mirror of
https://github.com/isc-projects/bind9.git
synced 2026-04-28 01:28:05 -04:00
Rewrite the statschannel traffic tests to pytest
(cherry picked from commit 1202fd912a)
This commit is contained in:
parent
09a1c051ec
commit
75e76b072a
8 changed files with 358 additions and 172 deletions
|
|
@ -26,4 +26,4 @@ rm -f ns2/Kmanykeys* ns2/manykeys.*.id
|
|||
rm -f ns2/*.db.signed* ns2/dsset-*. ns2/*.jbk
|
||||
rm -f ns2/dnssec.db.signed* ns2/dsset-dnssec.
|
||||
rm -f ns3/*.db
|
||||
rm -rf /.cache /__pycache__
|
||||
rm -rf ./.cache ./__pycache__
|
||||
|
|
|
|||
|
|
@ -9,8 +9,9 @@
|
|||
# information regarding copyright ownership.
|
||||
############################################################################
|
||||
|
||||
import pytest
|
||||
import os
|
||||
import pytest
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line(
|
||||
|
|
@ -22,11 +23,17 @@ def pytest_configure(config):
|
|||
config.addinivalue_line(
|
||||
"markers", "xml: mark tests that need xml.etree to function"
|
||||
)
|
||||
config.addinivalue_line(
|
||||
"markers", "dnspython: mark tests that need dnspython to function"
|
||||
)
|
||||
|
||||
|
||||
def pytest_collection_modifyitems(config, items):
|
||||
# pylint: disable=unused-argument,unused-import,too-many-branches
|
||||
# pylint: disable=import-outside-toplevel
|
||||
# Test for requests module
|
||||
skip_requests = pytest.mark.skip(reason="need requests module to run")
|
||||
skip_requests = pytest.mark.skip(
|
||||
reason="need requests module to run")
|
||||
try:
|
||||
import requests # noqa: F401
|
||||
except ModuleNotFoundError:
|
||||
|
|
@ -34,7 +41,8 @@ def pytest_collection_modifyitems(config, items):
|
|||
if "requests" in item.keywords:
|
||||
item.add_marker(skip_requests)
|
||||
# Test for json module
|
||||
skip_json = pytest.mark.skip(reason="need json module to run")
|
||||
skip_json = pytest.mark.skip(
|
||||
reason="need json module to run")
|
||||
try:
|
||||
import json # noqa: F401
|
||||
except ModuleNotFoundError:
|
||||
|
|
@ -42,33 +50,58 @@ def pytest_collection_modifyitems(config, items):
|
|||
if "json" in item.keywords:
|
||||
item.add_marker(skip_json)
|
||||
# Test for xml module
|
||||
skip_xml = pytest.mark.skip(reason="need xml module to run")
|
||||
skip_xml = pytest.mark.skip(
|
||||
reason="need xml module to run")
|
||||
try:
|
||||
import xml # noqa: F401
|
||||
import xml.etree.ElementTree # noqa: F401
|
||||
except ModuleNotFoundError:
|
||||
for item in items:
|
||||
if "xml" in item.keywords:
|
||||
item.add_marker(skip_xml)
|
||||
# Test if JSON statistics channel was enabled
|
||||
no_jsonstats = pytest.mark.skip(reason="need JSON statistics to be enabled")
|
||||
no_jsonstats = pytest.mark.skip(
|
||||
reason="need JSON statistics to be enabled")
|
||||
if os.getenv("HAVEJSONSTATS") is None:
|
||||
for item in items:
|
||||
if "json" in item.keywords:
|
||||
item.add_marker(no_jsonstats)
|
||||
# Test if XML statistics channel was enabled
|
||||
no_xmlstats = pytest.mark.skip(reason="need XML statistics to be enabled")
|
||||
no_xmlstats = pytest.mark.skip(
|
||||
reason="need XML statistics to be enabled")
|
||||
if os.getenv("HAVEXMLSTATS") is None:
|
||||
for item in items:
|
||||
if "xml" in item.keywords:
|
||||
item.add_marker(no_xmlstats)
|
||||
# Test for dnspython module
|
||||
skip_dnspython = pytest.mark.skip(
|
||||
reason="need dnspython module to run")
|
||||
try:
|
||||
import dns.query # noqa: F401
|
||||
except ModuleNotFoundError:
|
||||
for item in items:
|
||||
if "dnspython" in item.keywords:
|
||||
item.add_marker(skip_dnspython)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def statsport(request):
|
||||
port = os.getenv("EXTRAPORT1")
|
||||
# pylint: disable=unused-argument
|
||||
env_port = os.getenv("EXTRAPORT1")
|
||||
if port is None:
|
||||
port = 5301
|
||||
env_port = 5301
|
||||
else:
|
||||
port = int(port)
|
||||
env_port = int(env_port)
|
||||
|
||||
return port
|
||||
return env_port
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def port(request):
|
||||
# pylint: disable=unused-argument
|
||||
env_port = os.getenv("PORT")
|
||||
if port is None:
|
||||
env_port = 5300
|
||||
else:
|
||||
env_port = int(env_port)
|
||||
|
||||
return env_port
|
||||
|
|
|
|||
95
bin/tests/system/statschannel/generic.py
Normal file
95
bin/tests/system/statschannel/generic.py
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
############################################################################
|
||||
# Copyright (C) Internet Systems Consortium, Inc. ("ISC")
|
||||
#
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
#
|
||||
# See the COPYRIGHT file distributed with this work for additional
|
||||
# information regarding copyright ownership.
|
||||
############################################################################
|
||||
|
||||
import helper
|
||||
|
||||
|
||||
def test_zone_timers_primary(fetch_zones, load_timers, **kwargs):
|
||||
|
||||
statsip = kwargs['statsip']
|
||||
statsport = kwargs['statsport']
|
||||
zonedir = kwargs['zonedir']
|
||||
|
||||
zones = fetch_zones(statsip, statsport)
|
||||
|
||||
for zone in zones:
|
||||
(name, loaded, expires, refresh) = load_timers(zone, True)
|
||||
mtime = helper.zone_mtime(zonedir, name)
|
||||
helper.check_zone_timers(loaded, expires, refresh, mtime)
|
||||
|
||||
|
||||
def test_zone_timers_secondary(fetch_zones, load_timers, **kwargs):
|
||||
|
||||
statsip = kwargs['statsip']
|
||||
statsport = kwargs['statsport']
|
||||
zonedir = kwargs['zonedir']
|
||||
|
||||
zones = fetch_zones(statsip, statsport)
|
||||
|
||||
for zone in zones:
|
||||
(name, loaded, expires, refresh) = load_timers(zone, False)
|
||||
mtime = helper.zone_mtime(zonedir, name)
|
||||
helper.check_zone_timers(loaded, expires, refresh, mtime)
|
||||
|
||||
|
||||
def test_zone_with_many_keys(fetch_zones, load_zone, **kwargs):
|
||||
|
||||
statsip = kwargs['statsip']
|
||||
statsport = kwargs['statsport']
|
||||
|
||||
zones = fetch_zones(statsip, statsport)
|
||||
|
||||
for zone in zones:
|
||||
name = load_zone(zone)
|
||||
if name == 'manykeys':
|
||||
helper.check_manykeys(name)
|
||||
|
||||
|
||||
def test_traffic(fetch_traffic, **kwargs):
|
||||
|
||||
statsip = kwargs['statsip']
|
||||
statsport = kwargs['statsport']
|
||||
port = kwargs['port']
|
||||
|
||||
data = fetch_traffic(statsip, statsport)
|
||||
exp = helper.create_expected(data)
|
||||
|
||||
msg = helper.create_msg("short.example.", "TXT")
|
||||
helper.update_expected(exp, "dns-udp-requests-sizes-received-ipv4", msg)
|
||||
ans = helper.udp_query(statsip, port, msg)
|
||||
helper.update_expected(exp, "dns-udp-responses-sizes-sent-ipv4", ans)
|
||||
data = fetch_traffic(statsip, statsport)
|
||||
|
||||
helper.check_traffic(data, exp)
|
||||
|
||||
msg = helper.create_msg("long.example.", "TXT")
|
||||
helper.update_expected(exp, "dns-udp-requests-sizes-received-ipv4", msg)
|
||||
ans = helper.udp_query(statsip, port, msg)
|
||||
helper.update_expected(exp, "dns-udp-responses-sizes-sent-ipv4", ans)
|
||||
data = fetch_traffic(statsip, statsport)
|
||||
|
||||
helper.check_traffic(data, exp)
|
||||
|
||||
msg = helper.create_msg("short.example.", "TXT")
|
||||
helper.update_expected(exp, "dns-tcp-requests-sizes-received-ipv4", msg)
|
||||
ans = helper.tcp_query(statsip, port, msg)
|
||||
helper.update_expected(exp, "dns-tcp-responses-sizes-sent-ipv4", ans)
|
||||
data = fetch_traffic(statsip, statsport)
|
||||
|
||||
helper.check_traffic(data, exp)
|
||||
|
||||
msg = helper.create_msg("long.example.", "TXT")
|
||||
helper.update_expected(exp, "dns-tcp-requests-sizes-received-ipv4", msg)
|
||||
ans = helper.tcp_query(statsip, port, msg)
|
||||
helper.update_expected(exp, "dns-tcp-responses-sizes-sent-ipv4", ans)
|
||||
data = fetch_traffic(statsip, statsport)
|
||||
|
||||
helper.check_traffic(data, exp)
|
||||
|
|
@ -9,8 +9,16 @@
|
|||
# information regarding copyright ownership.
|
||||
############################################################################
|
||||
|
||||
import os
|
||||
import os.path
|
||||
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
import dns.message
|
||||
import dns.query
|
||||
import dns.rcode
|
||||
|
||||
# ISO datetime format without msec
|
||||
fmt = '%Y-%m-%dT%H:%M:%SZ'
|
||||
|
||||
|
|
@ -21,15 +29,18 @@ now = datetime.utcnow().replace(microsecond=0)
|
|||
dayzero = datetime.utcfromtimestamp(0).replace(microsecond=0)
|
||||
|
||||
|
||||
TIMEOUT = 10
|
||||
|
||||
|
||||
# Generic helper functions
|
||||
def check_expires(expires, min, max):
|
||||
assert expires >= min
|
||||
assert expires <= max
|
||||
def check_expires(expires, min_time, max_time):
|
||||
assert expires >= min_time
|
||||
assert expires <= max_time
|
||||
|
||||
|
||||
def check_refresh(refresh, min, max):
|
||||
assert refresh >= min
|
||||
assert refresh <= max
|
||||
def check_refresh(refresh, min_time, max_time):
|
||||
assert refresh >= min_time
|
||||
assert refresh <= max_time
|
||||
|
||||
|
||||
def check_loaded(loaded, expected):
|
||||
|
|
@ -47,12 +58,96 @@ def check_zone_timers(loaded, expires, refresh, loaded_exp):
|
|||
check_loaded(loaded, loaded_exp)
|
||||
|
||||
|
||||
def zone_mtime(zonedir, name):
|
||||
import os
|
||||
import os.path
|
||||
from datetime import datetime
|
||||
#
|
||||
# The output is gibberish, but at least make sure it does not crash.
|
||||
#
|
||||
def check_manykeys(name, zone=None):
|
||||
# pylint: disable=unused-argument
|
||||
assert name == "manykeys"
|
||||
|
||||
|
||||
def zone_mtime(zonedir, name):
|
||||
|
||||
try:
|
||||
si = os.stat(os.path.join(zonedir, "{}.db".format(name)))
|
||||
except FileNotFoundError:
|
||||
return dayzero
|
||||
|
||||
si = os.stat(os.path.join(zonedir, "{}.db".format(name)))
|
||||
mtime = datetime.utcfromtimestamp(si.st_mtime).replace(microsecond=0)
|
||||
|
||||
return mtime
|
||||
|
||||
|
||||
def zone_keyid(nameserver, zone, key):
|
||||
with open(f'{nameserver}/{zone}.{key}.id') as f:
|
||||
keyid = f.read().strip()
|
||||
print(f'{zone}-{key} ID: {keyid}')
|
||||
return keyid
|
||||
|
||||
|
||||
def create_msg(qname, qtype):
|
||||
msg = dns.message.make_query(qname, qtype, want_dnssec=True,
|
||||
use_edns=0, payload=4096)
|
||||
|
||||
return msg
|
||||
|
||||
|
||||
def udp_query(ip, port, msg):
|
||||
|
||||
ans = dns.query.udp(msg, ip, TIMEOUT, port=port)
|
||||
assert ans.rcode() == dns.rcode.NOERROR
|
||||
|
||||
return ans
|
||||
|
||||
|
||||
def tcp_query(ip, port, msg):
|
||||
|
||||
ans = dns.query.tcp(msg, ip, TIMEOUT, port=port)
|
||||
assert ans.rcode() == dns.rcode.NOERROR
|
||||
|
||||
return ans
|
||||
|
||||
|
||||
def create_expected(data):
|
||||
expected = {"dns-tcp-requests-sizes-received-ipv4": defaultdict(int),
|
||||
"dns-tcp-responses-sizes-sent-ipv4": defaultdict(int),
|
||||
"dns-tcp-requests-sizes-received-ipv6": defaultdict(int),
|
||||
"dns-tcp-responses-sizes-sent-ipv6": defaultdict(int),
|
||||
"dns-udp-requests-sizes-received-ipv4": defaultdict(int),
|
||||
"dns-udp-requests-sizes-received-ipv6": defaultdict(int),
|
||||
"dns-udp-responses-sizes-sent-ipv4": defaultdict(int),
|
||||
"dns-udp-responses-sizes-sent-ipv6": defaultdict(int),
|
||||
}
|
||||
|
||||
for k, v in data.items():
|
||||
for kk, vv in v.items():
|
||||
expected[k][kk] += vv
|
||||
|
||||
return expected
|
||||
|
||||
|
||||
def update_expected(expected, key, msg):
|
||||
msg_len = len(msg.to_wire())
|
||||
bucket_num = (msg_len // 16) * 16
|
||||
bucket = "{}-{}".format(bucket_num, bucket_num + 15)
|
||||
|
||||
expected[key][bucket] += 1
|
||||
|
||||
|
||||
def check_traffic(data, expected):
|
||||
def ordered(obj):
|
||||
if isinstance(obj, dict):
|
||||
return sorted((k, ordered(v)) for k, v in obj.items())
|
||||
if isinstance(obj, list):
|
||||
return sorted(ordered(x) for x in obj)
|
||||
return obj
|
||||
|
||||
ordered_data = ordered(data)
|
||||
ordered_expected = ordered(expected)
|
||||
|
||||
assert len(ordered_data) == 8
|
||||
assert len(ordered_expected) == 8
|
||||
assert len(data) == len(ordered_data)
|
||||
assert len(expected) == len(ordered_expected)
|
||||
|
||||
assert ordered_data == ordered_expected
|
||||
|
|
|
|||
|
|
@ -10,24 +10,37 @@
|
|||
# information regarding copyright ownership.
|
||||
############################################################################
|
||||
|
||||
import pytest
|
||||
from datetime import datetime
|
||||
from helper import fmt, zone_mtime, check_zone_timers, dayzero
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
import generic
|
||||
from helper import fmt
|
||||
|
||||
|
||||
# JSON helper functions
|
||||
def fetch_json(statsip, statsport):
|
||||
import requests
|
||||
def fetch_zones_json(statsip, statsport):
|
||||
|
||||
r = requests.get("http://{}:{}/json/v1/zones".format(statsip, statsport))
|
||||
assert r.status_code == 200
|
||||
|
||||
data = r.json()
|
||||
|
||||
return data["views"]["_default"]["zones"]
|
||||
|
||||
|
||||
def load_timers_from_json(zone, primary=True):
|
||||
def fetch_traffic_json(statsip, statsport):
|
||||
|
||||
r = requests.get("http://{}:{}/json/v1/traffic".format(statsip, statsport))
|
||||
assert r.status_code == 200
|
||||
|
||||
data = r.json()
|
||||
|
||||
return data["traffic"]
|
||||
|
||||
|
||||
def load_timers_json(zone, primary=True):
|
||||
|
||||
name = zone['name']
|
||||
|
||||
# Check if the primary zone timer exists
|
||||
|
|
@ -49,27 +62,39 @@ def load_timers_from_json(zone, primary=True):
|
|||
return (name, loaded, expires, refresh)
|
||||
|
||||
|
||||
def load_zone_json(zone):
|
||||
name = zone['name']
|
||||
|
||||
return name
|
||||
|
||||
|
||||
@pytest.mark.json
|
||||
@pytest.mark.requests
|
||||
def test_zone_timers_primary_json(statsport):
|
||||
statsip = "10.53.0.1"
|
||||
zonedir = "ns1"
|
||||
|
||||
zones = fetch_json(statsip, statsport)
|
||||
|
||||
for zone in zones:
|
||||
(name, loaded, expires, refresh) = load_timers_from_json(zone, True)
|
||||
mtime = zone_mtime(zonedir, name)
|
||||
check_zone_timers(loaded, expires, refresh, mtime)
|
||||
generic.test_zone_timers_primary(fetch_zones_json, load_timers_json,
|
||||
statsip="10.53.0.1", statsport=statsport,
|
||||
zonedir="ns1")
|
||||
|
||||
|
||||
@pytest.mark.json
|
||||
@pytest.mark.requests
|
||||
def test_zone_timers_secondary_json(statsport):
|
||||
statsip = "10.53.0.3"
|
||||
generic.test_zone_timers_secondary(fetch_zones_json, load_timers_json,
|
||||
statsip="10.53.0.3", statsport=statsport,
|
||||
zonedir="ns3")
|
||||
|
||||
zones = fetch_json(statsip, statsport)
|
||||
|
||||
for zone in zones:
|
||||
(name, loaded, expires, refresh) = load_timers_from_json(zone, False)
|
||||
check_zone_timers(loaded, expires, refresh, dayzero)
|
||||
@pytest.mark.json
|
||||
@pytest.mark.requests
|
||||
def test_zone_with_many_keys_json(statsport):
|
||||
generic.test_zone_with_many_keys(fetch_zones_json, load_zone_json,
|
||||
statsip="10.53.0.2", statsport=statsport)
|
||||
|
||||
|
||||
@pytest.mark.json
|
||||
@pytest.mark.requests
|
||||
@pytest.mark.dnspython
|
||||
def test_traffic_json(port, statsport):
|
||||
generic.test_traffic(fetch_traffic_json,
|
||||
statsip="10.53.0.2", statsport=statsport,
|
||||
port=port)
|
||||
|
|
|
|||
|
|
@ -10,15 +10,18 @@
|
|||
# information regarding copyright ownership.
|
||||
############################################################################
|
||||
|
||||
import pytest
|
||||
import xml.etree.ElementTree as ET
|
||||
from datetime import datetime
|
||||
from helper import fmt, zone_mtime, check_zone_timers, dayzero
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
import generic
|
||||
from helper import fmt
|
||||
|
||||
|
||||
# XML helper functions
|
||||
def fetch_xml(statsip, statsport):
|
||||
import xml.etree.ElementTree as ET
|
||||
import requests
|
||||
def fetch_zones_xml(statsip, statsport):
|
||||
|
||||
r = requests.get("http://{}:{}/xml/v3/zones".format(statsip, statsport))
|
||||
assert r.status_code == 200
|
||||
|
|
@ -35,7 +38,38 @@ def fetch_xml(statsip, statsport):
|
|||
return default_view.find('zones').findall('zone')
|
||||
|
||||
|
||||
def load_timers_from_xml(zone, primary=True):
|
||||
def fetch_traffic_xml(statsip, statsport):
|
||||
|
||||
def load_counters(data):
|
||||
out = {}
|
||||
for counter in data.findall("counter"):
|
||||
out[counter.attrib['name']] = int(counter.text)
|
||||
|
||||
return out
|
||||
|
||||
r = requests.get("http://{}:{}/xml/v3/traffic".format(statsip, statsport))
|
||||
assert r.status_code == 200
|
||||
|
||||
root = ET.fromstring(r.text)
|
||||
|
||||
traffic = {}
|
||||
for ip in ["ipv4", "ipv6"]:
|
||||
for proto in ["udp", "tcp"]:
|
||||
proto_root = root.find("traffic").find(ip).find(proto)
|
||||
for counters in proto_root.findall("counters"):
|
||||
if counters.attrib['type'] == "request-size":
|
||||
key = "dns-{}-requests-sizes-received-{}".format(proto, ip)
|
||||
else:
|
||||
key = "dns-{}-responses-sizes-sent-{}".format(proto, ip)
|
||||
|
||||
values = load_counters(counters)
|
||||
traffic[key] = values
|
||||
|
||||
return traffic
|
||||
|
||||
|
||||
def load_timers_xml(zone, primary=True):
|
||||
|
||||
name = zone.attrib['name']
|
||||
|
||||
loaded_el = zone.find('loaded')
|
||||
|
|
@ -58,27 +92,39 @@ def load_timers_from_xml(zone, primary=True):
|
|||
return (name, loaded, expires, refresh)
|
||||
|
||||
|
||||
def load_zone_xml(zone):
|
||||
name = zone.attrib['name']
|
||||
|
||||
return name
|
||||
|
||||
|
||||
@pytest.mark.xml
|
||||
@pytest.mark.requests
|
||||
def test_zone_timers_primary_xml(statsport):
|
||||
statsip = "10.53.0.1"
|
||||
zonedir = "ns1"
|
||||
|
||||
zones = fetch_xml(statsip, statsport)
|
||||
|
||||
for zone in zones:
|
||||
(name, loaded, expires, refresh) = load_timers_from_xml(zone, True)
|
||||
mtime = zone_mtime(zonedir, name)
|
||||
check_zone_timers(loaded, expires, refresh, mtime)
|
||||
generic.test_zone_timers_primary(fetch_zones_xml, load_timers_xml,
|
||||
statsip="10.53.0.1", statsport=statsport,
|
||||
zonedir="ns1")
|
||||
|
||||
|
||||
@pytest.mark.xml
|
||||
@pytest.mark.requests
|
||||
def test_zone_timers_secondary_xml(statsport):
|
||||
statsip = "10.53.0.3"
|
||||
generic.test_zone_timers_secondary(fetch_zones_xml, load_timers_xml,
|
||||
statsip="10.53.0.3", statsport=statsport,
|
||||
zonedir="ns3")
|
||||
|
||||
zones = fetch_xml(statsip, statsport)
|
||||
|
||||
for zone in zones:
|
||||
(name, loaded, expires, refresh) = load_timers_from_xml(zone, False)
|
||||
check_zone_timers(loaded, expires, refresh, dayzero)
|
||||
@pytest.mark.xml
|
||||
@pytest.mark.requests
|
||||
def test_zone_with_many_keys_xml(statsport):
|
||||
generic.test_zone_with_many_keys(fetch_zones_xml, load_zone_xml,
|
||||
statsip="10.53.0.2", statsport=statsport)
|
||||
|
||||
|
||||
@pytest.mark.xml
|
||||
@pytest.mark.requests
|
||||
@pytest.mark.dnspython
|
||||
def test_traffic_xml(port, statsport):
|
||||
generic.test_traffic(fetch_traffic_xml,
|
||||
statsip="10.53.0.2", statsport=statsport,
|
||||
port=port)
|
||||
|
|
|
|||
109
bin/tests/system/statschannel/tests.sh
Executable file → Normal file
109
bin/tests/system/statschannel/tests.sh
Executable file → Normal file
|
|
@ -46,21 +46,6 @@ if [ ! "$PERL_JSON" -a ! "$PERL_XML" ]; then
|
|||
fi
|
||||
|
||||
|
||||
gettraffic() {
|
||||
sleep 1
|
||||
echo_i "... using $1"
|
||||
case $1 in
|
||||
xml) path='xml/v3/traffic' ;;
|
||||
json) path='json/v1/traffic' ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
file=`$PERL fetch.pl -p ${EXTRAPORT1} $path`
|
||||
cp $file $file.$1.$2
|
||||
$PERL traffic-${1}.pl $file 2>/dev/null | sort > traffic.out.$2
|
||||
result=$?
|
||||
return $result
|
||||
}
|
||||
|
||||
getzones() {
|
||||
sleep 1
|
||||
echo_i "... using $1"
|
||||
|
|
@ -87,81 +72,6 @@ loadkeys_on() {
|
|||
|
||||
status=0
|
||||
n=1
|
||||
ret=0
|
||||
echo_i "fetching traffic size data ($n)"
|
||||
if [ $PERL_XML ]; then
|
||||
gettraffic xml x$n || ret=1
|
||||
cmp traffic.out.x$n traffic.expect.$n || ret=1
|
||||
fi
|
||||
if [ $PERL_JSON ]; then
|
||||
gettraffic json j$n || ret=1
|
||||
cmp traffic.out.j$n traffic.expect.$n || ret=1
|
||||
fi
|
||||
if [ $ret != 0 ]; then echo_i "failed"; fi
|
||||
status=`expr $status + $ret`
|
||||
n=`expr $n + 1`
|
||||
|
||||
ret=0
|
||||
echo_i "fetching traffic size data after small UDP query ($n)"
|
||||
$DIGCMD short.example txt > dig.out.$n || ret=1
|
||||
if [ $PERL_XML ]; then
|
||||
gettraffic xml x$n || ret=1
|
||||
cmp traffic.out.x$n traffic.expect.$n || ret=1
|
||||
fi
|
||||
if [ $PERL_JSON ]; then
|
||||
gettraffic json j$n || ret=1
|
||||
cmp traffic.out.j$n traffic.expect.$n || ret=1
|
||||
fi
|
||||
if [ $ret != 0 ]; then echo_i "failed"; fi
|
||||
status=`expr $status + $ret`
|
||||
n=`expr $n + 1`
|
||||
|
||||
ret=0
|
||||
n=`expr $n + 1`
|
||||
echo_i "fetching traffic size data after large UDP query ($n)"
|
||||
$DIGCMD long.example txt > dig.out.$n || ret=1
|
||||
if [ $PERL_XML ]; then
|
||||
gettraffic xml x$n || ret=1
|
||||
cmp traffic.out.x$n traffic.expect.$n || ret=1
|
||||
fi
|
||||
if [ $PERL_JSON ]; then
|
||||
gettraffic json j$n || ret=1
|
||||
cmp traffic.out.j$n traffic.expect.$n || ret=1
|
||||
fi
|
||||
if [ $ret != 0 ]; then echo_i "failed"; fi
|
||||
status=`expr $status + $ret`
|
||||
n=`expr $n + 1`
|
||||
|
||||
ret=0
|
||||
echo_i "fetching traffic size data after small TCP query ($n)"
|
||||
$DIGCMD +tcp short.example txt > dig.out.$n || ret=1
|
||||
if [ $PERL_XML ]; then
|
||||
gettraffic xml x$n || ret=1
|
||||
cmp traffic.out.x$n traffic.expect.$n || ret=1
|
||||
fi
|
||||
if [ $PERL_JSON ]; then
|
||||
gettraffic json j$n || ret=1
|
||||
cmp traffic.out.j$n traffic.expect.$n || ret=1
|
||||
fi
|
||||
if [ $ret != 0 ]; then echo_i "failed"; fi
|
||||
status=`expr $status + $ret`
|
||||
n=`expr $n + 1`
|
||||
|
||||
ret=0
|
||||
echo_i "fetching traffic size data after large TCP query ($n)"
|
||||
$DIGCMD +tcp long.example txt > dig.out.$n || ret=1
|
||||
if [ $PERL_XML ]; then
|
||||
gettraffic xml x$n || ret=1
|
||||
cmp traffic.out.x$n traffic.expect.$n || ret=1
|
||||
fi
|
||||
if [ $PERL_JSON ]; then
|
||||
gettraffic json j$n || ret=1
|
||||
cmp traffic.out.j$n traffic.expect.$n || ret=1
|
||||
fi
|
||||
if [ $ret != 0 ]; then echo_i "failed"; fi
|
||||
status=`expr $status + $ret`
|
||||
n=`expr $n + 1`
|
||||
|
||||
ret=0
|
||||
echo_i "checking consistency between named.stats and xml/json ($n)"
|
||||
rm -f ns2/named.stats
|
||||
|
|
@ -360,25 +270,6 @@ if [ $ret != 0 ]; then echo_i "failed"; fi
|
|||
status=`expr $status + $ret`
|
||||
n=`expr $n + 1`
|
||||
|
||||
# 4. Test a zone with more than four keys.
|
||||
zone="manykeys"
|
||||
ksk8_id=`cat ns2/$zone.ksk8.id`
|
||||
zsk8_id=`cat ns2/$zone.zsk8.id`
|
||||
ksk13_id=`cat ns2/$zone.ksk13.id`
|
||||
zsk13_id=`cat ns2/$zone.zsk13.id`
|
||||
ksk14_id=`cat ns2/$zone.ksk14.id`
|
||||
zsk14_id=`cat ns2/$zone.zsk14.id`
|
||||
|
||||
ret=0
|
||||
echo_i "fetch zone stats data for a zone with many keys ($n)"
|
||||
# Fetch and check the dnssec sign statistics.
|
||||
if [ $PERL_XML ]; then
|
||||
getzones xml $zone x$n || ret=1
|
||||
fi
|
||||
if [ $PERL_JSON ]; then
|
||||
getzones json $zone j$n || ret=1
|
||||
fi
|
||||
# The output is gibberish, but at least make sure it does not crash.
|
||||
if [ $ret != 0 ]; then echo_i "failed"; fi
|
||||
status=`expr $status + $ret`
|
||||
n=`expr $n + 1`
|
||||
|
|
|
|||
|
|
@ -1045,6 +1045,7 @@
|
|||
./bin/tests/system/statschannel/clean.sh SH 2015,2016,2017,2018,2019,2020
|
||||
./bin/tests/system/statschannel/conftest.py PYTHON 2020
|
||||
./bin/tests/system/statschannel/fetch.pl PERL 2015,2016,2018,2019,2020
|
||||
./bin/tests/system/statschannel/generic.py PYTHON 2020
|
||||
./bin/tests/system/statschannel/helper.py PYTHON 2020
|
||||
./bin/tests/system/statschannel/mem-xml.pl PERL 2017,2018,2019,2020
|
||||
./bin/tests/system/statschannel/ns2/sign.sh SH 2019,2020
|
||||
|
|
|
|||
Loading…
Reference in a new issue