Commit 69a7e745 authored by Alexandre's avatar Alexandre
Browse files

Merge branch 'master' into doh-option-header

parents 5dcb7d06 a69be136
......@@ -60,10 +60,14 @@ Possible options, besides `--dot`:
* --key KEYINBASE64: authentifies a DoT resolver with its public
key. Example: `homer.py --key "62lKu9HsDVbyiPenApnc4sfmSYTHOVfFgL3pyB+cBL4=" --dot 145.100.185.15 IN NS`
* --check: Run a set of tests (see below)
* --pipelining: on DoT, send several requests even before getting the
reply to the first one (may increase performance when you have
several requests)
* --multistreams: (DoH) Uses HTTP/2 streams (requires the --file option)
* --file INPUT_FILE: provide an input file with a list of domain name to query
(read the first line only, use --repeat N to read up to N lines of the file)
* --repeat N: repeat a test N times or read up to N lines of a file
* --no-display-results: do not output DNS response
### Check
......@@ -129,7 +133,7 @@ When repeating tests, you can add a delay between tests, with `--delay
N` or `-d N`, where N is the (possibly fractional) number of seconds
to wait.
### Mulitstreams
### Multistreams
When using Homer with DoH, the option `--multistreams` can be used
to specify that you want to take advantage of the HTTP/2 streams
......@@ -147,7 +151,6 @@ For example :
In order to focus on the time per request, you can suppress the
output by using the option `--no-display-results`.
This option only works with `--multistreams`.
Two modes are available. By default each response is read,
checked and displayed as soon as it is received.
......@@ -162,16 +165,18 @@ and [CURLINFO_PRETRANSFER_TIME](https://curl.haxx.se/libcurl/c/curl_easy_getinfo
```
% ./homer.py --multistreams --file input_file --repeat 5 --no-display-results --time https://doh.powerdns.org
0 36.165 ms 44.773 ms 8.608 ms
1 0.142 ms 8.580 ms 8.438 ms
3 0.095 ms 9.223 ms 9.128 ms
2 0.103 ms 10.282 ms 10.179 ms
4 0.104 ms 10.068 ms 9.964 ms
0 (200) 41.995 ms 51.409 ms 9.414 ms
1 (200) 0.156 ms 8.648 ms 8.492 ms
2 (200) 0.121 ms 8.494 ms 8.373 ms
3 (200) 0.120 ms 11.185 ms 11.065 ms
4 (200) 0.103 ms 11.922 ms 11.819 ms
Total elapsed time: 0.07 seconds (9.26 ms/request)
OK
Total elapsed time: 0.07 seconds (9.83 ms/request)
```
Finally note that when using multistreams an extra DNS request is sent
to initiate the connection. This request asks for the root NS.
### Monitoring with Nagios, Icinga, or similar software
......
......@@ -53,6 +53,8 @@ delay = None
forceIPv4 = False
forceIPv6 = False
connectTo = None
pipelining = False
max_in_flight = 20
multistreams = False
sync = False
display_results = True
......@@ -85,6 +87,9 @@ DOH_HEAD = 2
mandatory_levels = {"legal": 30, "necessary": 20, "nicetohave": 10}
TIMEOUT_CONN = 2
TIMEOUT_READ = 1
SLEEP_TIMEOUT = 0.5
MAX_DURATION = 10
def error(msg=None, exit=True):
if msg is None:
......@@ -103,7 +108,51 @@ def error(msg=None, exit=True):
def usage(msg=None):
if msg:
print(msg,file=sys.stderr)
print("Usage: %s [--dot] url-or-servername domain-name [DNS type]" % sys.argv[0], file=sys.stderr)
print("Usage: %s [options] url-or-servername [domain-name [DNS type]]" % sys.argv[0], file=sys.stderr)
print("""Options
-t --dot Use DoT (by default use DoH)
-P --post --POST Use HTTP POST method for all transfers (DoH only)
-e --head --HEAD Use HTTP HEAD method for all transfers (DoH only)
-r --repeat <N> Perform N times the query. If used with -f, read up to
<N> rows of the <file>.
-d --delay <T> Time to wait in seconds between each synchronous
request (only with --repeat)
-f --file <file> Read domain names from <file>, one per row with an
optional DNS type
--check Perform a set of predefined tests.
--mandatory-level <level>
Define the <level> of test to perform (only with
--check)
Available <level> : legal, necessary, nicetohave
--multistreams Use HTTP/2 streams, needs an input file with -f
(DoH only)
--sync Process received queries synchronously (only with
--multistreams)
--no-display-results
Disable output of DNS response (only with
--multistreams)
--time Display the time elapsed for the query (only with
--multistreams)
--dnssec Request DNSSEC data (signatures)
--noedns Disable EDNS, default is to indicate EDNS support
--ecs Send ECS to authoritative servers, default is to
refuse it
--key <key> Authenticate a DoT resolver with its public <key> in
base64 (DoT only)
--nosni Do not perform SNI (DoT only)
-V --vhost <vhost> Use a specific virtual host
-k --insecure Do not check the certificate
-4 --v4only Force IPv4 resolution of url-or-servername
-6 --v6only Force IPv6 resolution of url-or-servername
-v --verbose Make the program more talkative
--debug Make the program even more talkative than -v
-h --help Print this message
url-or-servername The URL or domain name of the DoT/DoH server
domain-name The domain name to resolve, not required if -f is
provided
DNS type The DNS record type to resolve, default AAAA
""", file=sys.stderr)
print("See the README.md for more details.", file=sys.stderr)
def is_valid_hostname(name):
......@@ -227,11 +276,9 @@ def timeout_connection(signum, frame):
class TimeoutConnectionError(Exception):
pass
class CustomException(Exception):
pass
class Request:
def __init__(self, qname, qtype=rtype, use_edns=edns, want_dnssec=dnssec):
if no_ecs:
......@@ -243,6 +290,7 @@ class Request:
use_edns=use_edns, want_dnssec=want_dnssec, options=options)
self.message.flags |= dns.flags.AD # Ask for validation
self.ok = True
self.i = 0 # request's number on the connection (default to the first)
def trunc_data(self):
self.data = self.message.to_wire()
......@@ -255,6 +303,8 @@ class Request:
class RequestDoT(Request):
def check_response(self, debug=False):
if self.response is None:
raise Exception("No reply received")
ok = self.ok
if not self.rcode:
self.ok = False
......@@ -267,10 +317,15 @@ class RequestDoT(Request):
return False
return self.ok
def store_response(self, rcode, response, size):
self.rcode = True
self.response = response
self.response_size = size
class RequestDoH(Request):
def __init__(self, qname, qtype=rtype, use_edns=edns, want_dnssec=dnssec):
Request.__init__(self, qname, qtype=rtype, use_edns=edns, want_dnssec=dnssec)
Request.__init__(self, qname, qtype=qtype, use_edns=edns, want_dnssec=dnssec)
self.message.id = 0 # DoH requests that
self.post = False
self.head = False
......@@ -352,7 +407,7 @@ class Connection:
class ConnectionDoT(Connection):
def __init__(self, server, servername=None, connect=None, forceIPv4=False, forceIPv6=False,
verbose=verbose, debug=debug, insecure=insecure):
pipelining=pipelining, verbose=verbose, debug=debug, insecure=insecure):
Connection.__init__(self, server, servername=servername, connect=connect,
forceIPv4=forceIPv4, forceIPv6=forceIPv6, dot=True,
verbose=verbose, debug=debug, insecure=insecure)
......@@ -379,7 +434,16 @@ class ConnectionDoT(Connection):
error(f'Could not connect to "{server}"')
else:
print(f'Could not connect to "{server}" on {connect}')
self.pipelining = pipelining
if pipelining:
self.all_requests = [] # Currently, we load everything in memory
# since we want to keep everything,
# anyway. May be in the future, if we don't
# want to keep individual results, we'll use
# an iterator to fill a smaller table.
# all_requests is indexed by its rank in the input file.
self.pending = {} # pending is indexed by the query ID, and its
# maximum size is max_in_flight.
def connect(self, addr, sock_family):
signal.alarm(TIMEOUT_CONN)
......@@ -442,6 +506,8 @@ class ConnectionDoT(Connection):
if key_string != key:
error("Key error: expected \"%s\", got \"%s\"" % (key, key_string))
signal.alarm(0)
if pipelining:
self.sock.settimeout(TIMEOUT_READ)
return True
def end(self):
......@@ -454,23 +520,69 @@ class ConnectionDoT(Connection):
length = len(data)
self.session.send(length.to_bytes(2, byteorder='big') + data)
def receive_data(self, request, dump=False):
buf = self.session.recv(2)
request.response_size = int.from_bytes(buf, byteorder='big')
buf = self.session.recv(request.response_size)
def receive_data(self, dump=False):
try:
buf = self.session.recv(2)
except OpenSSL.SSL.WantReadError:
return (False, None, None)
size = int.from_bytes(buf, byteorder='big')
buf = self.session.recv(size)
if dump:
dump_data(buf, 'data recv')
request.response = dns.message.from_wire(buf)
request.rcode = True
response = dns.message.from_wire(buf)
return (True, response, size)
def send_and_receive(self, request, dump=False):
self.send_data(request.data, dump=dump)
self.receive_data(request, dump=dump)
rcode, response, size = self.receive_data(dump=dump)
request.store_response(rcode, response, size)
def do_test(self, request, synchronous=True):
self.send_and_receive(request)
request.check_response(self.debug)
self.send_data(request.data)
if synchronous:
rcode, response, size = self.receive_data()
request.store_response(rcode, response, size)
request.check_response(self.debug)
def pipelining_add_request(self, request):
self.all_requests.append({'request': request, 'response': None}) # No answer yet
def pipelining_fill_pending(self, index):
if index < len(self.all_requests):
request = self.all_requests[index]['request']
id = request.message.id
# TODO check there is no duplicate in IDs
self.pending[id] = (False, index, request)
self.do_test(request, synchronous = False)
def pipelining_init_pending(self, max_in_flight):
for i in range(0, max_in_flight):
if i == len(self.all_requests):
break
self.pipelining_fill_pending(i)
return i
def read_result(self, connection, requests):
rcode, response, size = self.receive_data() # TODO can raise
# OpenSSL.SSL.ZeroReturnError
# if the
# conenction was
# closed
if not rcode:
if display_results:
print("TIMEOUT")
return None
id = response.id
if id not in requests:
raise Exception("Received response for ID %s which is unexpected" % id)
over, rank, request = requests[id]
self.all_requests[rank]['response'] = (rcode, response, size)
requests[id] = (True, rank, request)
if display_results:
print()
print(response)
# TODO a timeout if some responses are lost?
return id
def create_handle(connection, header=doh_header_default):
def reset_opt_default(handle):
......@@ -536,6 +648,7 @@ class ConnectionDoH(Connection):
if self.multistreams:
self.multi = self.create_multi()
self.all_handles = []
self.finished = { 'http': {} }
else:
self.curl_handle = create_handle(self)
......@@ -544,6 +657,23 @@ class ConnectionDoH(Connection):
multi.setopt(pycurl.M_MAX_HOST_CONNECTIONS, 1)
return multi
def init_multi(self):
# perform a first query alone
# to establish the connection and hence avoid starting
# the transfer of all the other queries simultaneously
# query the root NS because this should not impact the resover cache
if verbose:
print("Establishing multistreams connection...")
request = create_request('.', qtype='NS', dot=False)
try:
self.do_test(request, synchronous=False)
except (OpenSSL.SSL.Error, CustomException) as e:
ok = False
error(e)
self.perform_multi(silent=True)
self.all_handles = []
self.finished = { 'http': {} }
def end(self):
if not self.multistreams:
self.curl_handle.close()
......@@ -558,7 +688,7 @@ class ConnectionDoH(Connection):
h.close()
self.multi.remove_handle(h)
def perform_multi(self):
def perform_multi(self, silent=False):
while 1:
ret, num_handles = self.multi.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
......@@ -572,13 +702,13 @@ class ConnectionDoH(Connection):
if not sync:
n, handle_pass, handle_fail = self.multi.info_read()
for handle in handle_pass:
self.read_result_handle(handle)
self.read_result_handle(handle, silent=silent)
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
if not sync:
n, handle_pass, handle_fail = self.multi.info_read()
for handle in handle_pass:
self.read_result_handle(handle)
self.read_result_handle(handle, silent=silent)
def send(self, handle):
handle.buffer = io.BytesIO()
......@@ -609,15 +739,16 @@ class ConnectionDoH(Connection):
self.send(handle)
self.receive(handle)
def read_result_handle(self, handle):
def read_result_handle(self, handle, silent=False):
self.receive(handle)
handle.request.check_response()
if show_time:
print(f'{handle.request.i:3d}', end=' ')
print(f'{handle.pretime * 1000:8.3f} ms', end=' ')
print(f'{handle.time * 1000:8.3f} ms', end=' ')
print(f'{(handle.time - handle.pretime) * 1000:8.3f} ms')
if display_results:
if not silent and show_time:
self.print_time(handle)
try:
self.finished['http'][handle.request.rcode] += 1
except KeyError:
self.finished['http'][handle.request.rcode] = 1
if not silent and display_results:
print("Return code %s (%.2f ms):" % (handle.request.rcode,
(handle.time - handle.pretime) * 1000))
print(f"{handle.request.response}\n")
......@@ -628,6 +759,13 @@ class ConnectionDoH(Connection):
for handle in self.all_handles:
self.read_result_handle(handle)
def print_time(self, handle):
print(f'{handle.request.i:3d}', end=' ')
print(f'({handle.request.rcode})', end=' ')
print(f'{handle.pretime * 1000:8.3f} ms', end=' ')
print(f'{handle.time * 1000:8.3f} ms', end=' ')
print(f'{(handle.time - handle.pretime) * 1000:8.3f} ms')
def do_test(self, request, synchronous=True):
if synchronous:
handle = self.curl_handle
......@@ -663,7 +801,9 @@ def print_result(connection, request, prefix=None, display_err=True):
size = request.response_size
if (dot and rcode) or (not dot and rcode == 200):
if not monitoring:
if not check or verbose:
if not dot and show_time:
connection.print_time(connection.curl_handle)
if display_results and (not check or verbose):
print(msg)
else:
if expect is not None and expect not in str(request.response):
......@@ -701,9 +841,9 @@ def print_result(connection, request, prefix=None, display_err=True):
def create_request(qname, qtype=rtype, use_edns=edns, want_dnssec=dnssec, dot=dot, trunc=False):
if dot:
request = RequestDoT(qname, rtype, use_edns, want_dnssec)
request = RequestDoT(qname, qtype, use_edns, want_dnssec)
else:
request = RequestDoH(qname, rtype, use_edns, want_dnssec)
request = RequestDoH(qname, qtype, use_edns, want_dnssec)
if trunc:
request.trunc_data()
else:
......@@ -770,6 +910,9 @@ def run_check_default(connection):
return ok
def run_check_mime(connection, header):
# change the MIME value and see what happens
# based on the RFC only application/dns-message must be supported, any
# other MIME type can be also supported, but nothing is said on that
if dot:
return True
ok = True
......@@ -795,6 +938,10 @@ def run_check_mime(connection, header):
return ok
def run_check_trunc(connection):
# send truncated DNS request to the server and expect a HTTP return code
# either equal to 200 or in the 400 range
# in case the server answers with 200, look for a FORMERR error in the DNS
# response
ok = True
test_name = 'Test truncated data'
if verbose:
......@@ -830,7 +977,11 @@ def run_check_trunc(connection):
else:
if dot:
ok = False
else: # a 400 response's status is acceptable
else: # only a 400 range HTTP code is acceptable
# if we send garbage to the server, it seems reasonable that it
# does not fail, which means we don't accept a 500 range HTTP
# error code (even so it means the server failed to process the
# input data)
ok = (request.rcode >= 400 and request.rcode < 500)
print_result(connection, request, prefix=test_name, display_err=not ok)
if verbose:
......@@ -870,11 +1021,13 @@ if not monitoring:
message = None
try:
optlist, args = getopt.getopt (sys.argv[1:], "hvPkeV:r:f:d:t46H:",
["help", "verbose", "debug", "dot", "head",
"insecure", "POST", "vhost=", "multistreams",
["help", "verbose", "debug", "dot",
"head", "HEAD", "post", "POST",
"insecure", "vhost=", "multistreams",
"pipelining", "max-in-flight=", "key=",
"dnssec", "noedns", "ecs", "nosni",
"sync", "no-display-results", "time",
"dnssec", "noedns", "ecs", "repeat=", "file=", "delay=",
"key=", "nosni",
"file=", "repeat=", "delay=",
"v4only", "v6only",
"header=", "check", "mandatory-level="])
for option, value in optlist:
......@@ -927,28 +1080,47 @@ if not monitoring:
ifile = value
elif option == "--key":
key = value
elif option == "-4" or option == "v4only":
elif option == "-4" or option == "--v4only":
forceIPv4 = True
elif option == "-6" or option == "v6only":
elif option == "-6" or option == "--v6only":
forceIPv6 = True
elif option == "--pipelining":
pipelining = True
elif option == "--max-in-flight":
max_in_flight = int(value)
if max_in_flight <= 0:
error("--max_in_flight but be > 0")
if max_in_flight >= 65536:
error("Because of a limit of the DNS protocol (the size of the query ID) --max_in_flight must be < 65 536")
elif option == "--check":
check = True
display_results = False
elif option == "--no-display-results":
display_results = False
elif option == "--mandatory-level":
mandatory_level = value
elif option == "--header" or option == "-H":
doh_header.append(value)
else:
error("Unknown option %s" % option)
except getopt.error as reason:
except (getopt.error, ValueError) as reason:
usage(reason)
sys.exit(1)
if delay is not None and multistreams:
error("--delay makes no sense with multistreams")
if tests <= 1 and delay is not None:
error("--delay makes no sense if there is no repetition")
if post and head:
usage("POST or HEAD but not both")
if not dot and pipelining:
usage("Pipelining is only accepted for DoT")
sys.exit(1)
if dot and (post or head):
usage("POST or HEAD makes non sense for DoT")
sys.exit(1)
if post and head:
usage("POST or HEAD but not both")
sys.exit(1)
if pipelining and ifile is None:
usage("Pipelining requires an input file")
sys.exit(1)
if check and multistreams:
usage("--check and --multistreams are not compatible")
......@@ -962,11 +1134,8 @@ if not monitoring:
if sync and not multistreams:
usage("--sync cannot be used without --multistreams")
sys.exit(1)
if not display_results and not multistreams:
usage("--no-display-results cannot be used without --multistreams")
sys.exit(1)
if show_time and not multistreams:
usage("--time cannot be used without --multistreams")
if show_time and dot:
usage("--time cannot be used with --dot")
sys.exit(1)
if not edns and not no_ecs:
usage("ECS requires EDNS")
......@@ -1048,9 +1217,6 @@ else: # Monitoring plugin
if dot and (post or head):
print("POST or HEAD makes no sense for DoT")
sys.exit(STATE_UNKNOWN)
if dot and multistreams:
print("Multi-streams makes no sense for DoT")
sys.exit(STATE_UNKNOWN)
if dot and path:
print("URL path makes no sense for DoT")
sys.exit(STATE_UNKNOWN)
......@@ -1111,7 +1277,7 @@ for connectTo in ip_set:
if dot:
conn = ConnectionDoT(url, servername=extracheck, connect=connectTo, verbose=verbose,
debug=debug, forceIPv4=forceIPv4, forceIPv6=forceIPv6,
insecure=insecure)
pipelining=pipelining, insecure=insecure)
else:
conn = ConnectionDoH(url, servername=extracheck, connect=connectTo, verbose=verbose,
debug=debug, forceIPv4=forceIPv4, forceIPv6=forceIPv6,
......@@ -1132,6 +1298,8 @@ for connectTo in ip_set:
if ifile is not None:
input = open(ifile)
if not check:
if multistreams:
conn.init_multi()
for i in range (0, tests):
if tests > 1 and (verbose or display_results):
print("\nTest %i" % i)
......@@ -1143,54 +1311,73 @@ for connectTo in ip_set:
request.head = head
request.post = post
request.header = doh_header
try:
conn.do_test(request, synchronous = not multistreams)
except (OpenSSL.SSL.Error, CustomException) as e:
ok = False
error(e)
break
if not multistreams:
if not print_result(conn, request):
if not pipelining:
try:
conn.do_test(request, synchronous = not multistreams)
except (OpenSSL.SSL.Error, CustomException) as e:
ok = False
if tests > 1 and i == 0:
if multistreams: # do the first query alone
# to establish the connection and hence avoid starting
# the transfer of all the other queries
conn.perform_multi()
conn.first_handle = conn.all_handles[0]
start2 = time.time()
if delay is not None:
time.sleep(delay)
error(e)
break
if not multistreams:
if not print_result(conn, request):
ok = False
if tests > 1 and i == 0:
start2 = time.time()
if delay is not None:
time.sleep(delay)
else: # We do pipelining
conn.pipelining_add_request(request)
if multistreams:
conn.perform_multi()
if sync:
conn.read_results()
if dot and pipelining:
print("")
done = 0
current = conn.pipelining_init_pending(max_in_flight)
while done < tests:
if time.time() > start + MAX_DURATION: # if we send thousands of requests
# MAX_DURATION will be reached
# need to increase MAX_DURATION based
# on the number of queries
# or to define a relation such as
# f(tests) = MAX_DURATION
print("Elapsed time too long, %i requests never got a reply" % (tests-done))
ok = False
break
id = conn.read_result(conn, conn.pending)
if id is None: # Probably a timeout
time.sleep(SLEEP_TIMEOUT)
continue
done += 1
over, rank, request = conn.pending[id]
if not over:
error("Internal error, request %i should be over" % id)
if current < len(conn.all_requests):
conn.pipelining_fill_pending(current)
current += 1
else:
ok = run_check(conn) and ok # need to run run_check first
stop = time.time()
if tests > 1 and not multistreams:
extra = " , %.2f ms/request if we ignore the first one" % ((stop-start2)*1000/(tests-1))
if tests > 1 and not pipelining and not multistreams:
extra = ", %.2f ms/request if we ignore the first one" % ((stop-start2)*1000/(tests-1))
else:
extra = ""
if not monitoring and (not check or verbose):
time_tot = stop - start
if multistreams:
time_per_request = sum(handle.time - handle.pretime for handle in conn.all_handles) / tests * 1000
else:
time_per_request = time_tot / tests * 1000
time_per_request = time_tot / tests * 1000
print("\nTotal elapsed time: %.2f seconds (%.2f ms/request%s)" % (time_tot, time_per_request, extra))
if multistreams and verbose:
for rcode, n in conn.finished['http'].items():