Commit a69be136 authored by alex's avatar alex
Browse files

Merge branch 'small-improvements-and-fixes' into 'master'

Small improvements and fixes

* fix qtype usage
* send an independent first request with multistreams
* fix average time with multistreams
* add comments

See merge request bortzmeyer/homer!19
parents cf1d18f4 92eb630c
...@@ -132,7 +132,7 @@ When repeating tests, you can add a delay between tests, with `--delay ...@@ -132,7 +132,7 @@ When repeating tests, you can add a delay between tests, with `--delay
N` or `-d N`, where N is the (possibly fractional) number of seconds N` or `-d N`, where N is the (possibly fractional) number of seconds
to wait. to wait.
### Mulitstreams ### Multistreams
When using Homer with DoH, the option `--multistreams` can be used When using Homer with DoH, the option `--multistreams` can be used
to specify that you want to take advantage of the HTTP/2 streams to specify that you want to take advantage of the HTTP/2 streams
...@@ -173,6 +173,9 @@ and [CURLINFO_PRETRANSFER_TIME](https://curl.haxx.se/libcurl/c/curl_easy_getinfo ...@@ -173,6 +173,9 @@ and [CURLINFO_PRETRANSFER_TIME](https://curl.haxx.se/libcurl/c/curl_easy_getinfo
Total elapsed time: 0.07 seconds (9.83 ms/request) Total elapsed time: 0.07 seconds (9.83 ms/request)
``` ```
Finally note that when using multistreams an extra DNS request is sent
to initiate the connection. This request asks for the root NS.
### Monitoring with Nagios, Icinga, or similar software ### Monitoring with Nagios, Icinga, or similar software
......
...@@ -323,7 +323,7 @@ class RequestDoT(Request): ...@@ -323,7 +323,7 @@ class RequestDoT(Request):
class RequestDoH(Request): class RequestDoH(Request):
def __init__(self, qname, qtype=rtype, use_edns=edns, want_dnssec=dnssec): def __init__(self, qname, qtype=rtype, use_edns=edns, want_dnssec=dnssec):
Request.__init__(self, qname, qtype=rtype, use_edns=edns, want_dnssec=dnssec) Request.__init__(self, qname, qtype=qtype, use_edns=edns, want_dnssec=dnssec)
self.message.id = 0 # DoH requests that self.message.id = 0 # DoH requests that
self.post = False self.post = False
self.head = False self.head = False
...@@ -645,6 +645,7 @@ class ConnectionDoH(Connection): ...@@ -645,6 +645,7 @@ class ConnectionDoH(Connection):
if self.multistreams: if self.multistreams:
self.multi = self.create_multi() self.multi = self.create_multi()
self.all_handles = [] self.all_handles = []
self.finished = { 'http': {} }
else: else:
self.curl_handle = create_handle(self) self.curl_handle = create_handle(self)
...@@ -653,6 +654,23 @@ class ConnectionDoH(Connection): ...@@ -653,6 +654,23 @@ class ConnectionDoH(Connection):
multi.setopt(pycurl.M_MAX_HOST_CONNECTIONS, 1) multi.setopt(pycurl.M_MAX_HOST_CONNECTIONS, 1)
return multi return multi
def init_multi(self):
# perform a first query alone
# to establish the connection and hence avoid starting
# the transfer of all the other queries simultaneously
# query the root NS because this should not impact the resover cache
if verbose:
print("Establishing multistreams connection...")
request = create_request('.', qtype='NS', dot=False)
try:
self.do_test(request, synchronous=False)
except (OpenSSL.SSL.Error, CustomException) as e:
ok = False
error(e)
self.perform_multi(silent=True)
self.all_handles = []
self.finished = { 'http': {} }
def end(self): def end(self):
if not self.multistreams: if not self.multistreams:
self.curl_handle.close() self.curl_handle.close()
...@@ -667,7 +685,7 @@ class ConnectionDoH(Connection): ...@@ -667,7 +685,7 @@ class ConnectionDoH(Connection):
h.close() h.close()
self.multi.remove_handle(h) self.multi.remove_handle(h)
def perform_multi(self): def perform_multi(self, silent=False):
while 1: while 1:
ret, num_handles = self.multi.perform() ret, num_handles = self.multi.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM: if ret != pycurl.E_CALL_MULTI_PERFORM:
...@@ -681,13 +699,13 @@ class ConnectionDoH(Connection): ...@@ -681,13 +699,13 @@ class ConnectionDoH(Connection):
if not sync: if not sync:
n, handle_pass, handle_fail = self.multi.info_read() n, handle_pass, handle_fail = self.multi.info_read()
for handle in handle_pass: for handle in handle_pass:
self.read_result_handle(handle) self.read_result_handle(handle, silent=silent)
if ret != pycurl.E_CALL_MULTI_PERFORM: if ret != pycurl.E_CALL_MULTI_PERFORM:
break break
if not sync: if not sync:
n, handle_pass, handle_fail = self.multi.info_read() n, handle_pass, handle_fail = self.multi.info_read()
for handle in handle_pass: for handle in handle_pass:
self.read_result_handle(handle) self.read_result_handle(handle, silent=silent)
def send(self, handle): def send(self, handle):
handle.buffer = io.BytesIO() handle.buffer = io.BytesIO()
...@@ -718,16 +736,16 @@ class ConnectionDoH(Connection): ...@@ -718,16 +736,16 @@ class ConnectionDoH(Connection):
self.send(handle) self.send(handle)
self.receive(handle) self.receive(handle)
def read_result_handle(self, handle): def read_result_handle(self, handle, silent=False):
self.receive(handle) self.receive(handle)
handle.request.check_response() handle.request.check_response()
if show_time: if not silent and show_time:
self.print_time(handle) self.print_time(handle)
try: try:
self.finished['http'][handle.request.rcode] += 1 self.finished['http'][handle.request.rcode] += 1
except KeyError: except KeyError:
self.finished['http'][handle.request.rcode] = 1 self.finished['http'][handle.request.rcode] = 1
if display_results: if not silent and display_results:
print("Return code %s (%.2f ms):" % (handle.request.rcode, print("Return code %s (%.2f ms):" % (handle.request.rcode,
(handle.time - handle.pretime) * 1000)) (handle.time - handle.pretime) * 1000))
print(f"{handle.request.response}\n") print(f"{handle.request.response}\n")
...@@ -820,9 +838,9 @@ def print_result(connection, request, prefix=None, display_err=True): ...@@ -820,9 +838,9 @@ def print_result(connection, request, prefix=None, display_err=True):
def create_request(qname, qtype=rtype, use_edns=edns, want_dnssec=dnssec, dot=dot, trunc=False): def create_request(qname, qtype=rtype, use_edns=edns, want_dnssec=dnssec, dot=dot, trunc=False):
if dot: if dot:
request = RequestDoT(qname, rtype, use_edns, want_dnssec) request = RequestDoT(qname, qtype, use_edns, want_dnssec)
else: else:
request = RequestDoH(qname, rtype, use_edns, want_dnssec) request = RequestDoH(qname, qtype, use_edns, want_dnssec)
if trunc: if trunc:
request.trunc_data() request.trunc_data()
else: else:
...@@ -889,6 +907,9 @@ def run_check_default(connection): ...@@ -889,6 +907,9 @@ def run_check_default(connection):
return ok return ok
def run_check_mime(connection, accept="application/dns-message", content_type="application/dns-message"): def run_check_mime(connection, accept="application/dns-message", content_type="application/dns-message"):
# change the MIME value and see what happens
# based on the RFC only application/dns-message must be supported, any
# other MIME type can be also supported, but nothing is said on that
if dot: if dot:
return True return True
ok = True ok = True
...@@ -917,6 +938,10 @@ def run_check_mime(connection, accept="application/dns-message", content_type="a ...@@ -917,6 +938,10 @@ def run_check_mime(connection, accept="application/dns-message", content_type="a
return ok return ok
def run_check_trunc(connection): def run_check_trunc(connection):
# send truncated DNS request to the server and expect a HTTP return code
# either equal to 200 or in the 400 range
# in case the server answers with 200, look for a FORMERR error in the DNS
# response
ok = True ok = True
test_name = 'Test truncated data' test_name = 'Test truncated data'
if verbose: if verbose:
...@@ -952,7 +977,11 @@ def run_check_trunc(connection): ...@@ -952,7 +977,11 @@ def run_check_trunc(connection):
else: else:
if dot: if dot:
ok = False ok = False
else: # a 400 response's status is acceptable else: # only a 400 range HTTP code is acceptable
# if we send garbage to the server, it seems reasonable that it
# does not fail, which means we don't accept a 500 range HTTP
# error code (even so it means the server failed to process the
# input data)
ok = (request.rcode >= 400 and request.rcode < 500) ok = (request.rcode >= 400 and request.rcode < 500)
print_result(connection, request, prefix=test_name, display_err=not ok) print_result(connection, request, prefix=test_name, display_err=not ok)
if verbose: if verbose:
...@@ -1263,7 +1292,7 @@ for connectTo in ip_set: ...@@ -1263,7 +1292,7 @@ for connectTo in ip_set:
input = open(ifile) input = open(ifile)
if not check: if not check:
if multistreams: if multistreams:
conn.finished = { 'http': {} } conn.init_multi()
for i in range (0, tests): for i in range (0, tests):
if tests > 1 and (verbose or display_results): if tests > 1 and (verbose or display_results):
print("\nTest %i" % i) print("\nTest %i" % i)
...@@ -1285,11 +1314,6 @@ for connectTo in ip_set: ...@@ -1285,11 +1314,6 @@ for connectTo in ip_set:
if not print_result(conn, request): if not print_result(conn, request):
ok = False ok = False
if tests > 1 and i == 0: if tests > 1 and i == 0:
if multistreams: # do the first query alone
# to establish the connection and hence avoid starting
# the transfer of all the other queries
conn.perform_multi()
conn.first_handle = conn.all_handles[0]
start2 = time.time() start2 = time.time()
if delay is not None: if delay is not None:
time.sleep(delay) time.sleep(delay)
...@@ -1333,10 +1357,7 @@ for connectTo in ip_set: ...@@ -1333,10 +1357,7 @@ for connectTo in ip_set:
extra = "" extra = ""
if not monitoring and (not check or verbose): if not monitoring and (not check or verbose):
time_tot = stop - start time_tot = stop - start
if multistreams: time_per_request = time_tot / tests * 1000
time_per_request = sum(handle.time - handle.pretime for handle in conn.all_handles) / tests * 1000
else:
time_per_request = time_tot / tests * 1000
print("\nTotal elapsed time: %.2f seconds (%.2f ms/request%s)" % (time_tot, time_per_request, extra)) print("\nTotal elapsed time: %.2f seconds (%.2f ms/request%s)" % (time_tot, time_per_request, extra))
if multistreams and verbose: if multistreams and verbose:
for rcode, n in conn.finished['http'].items(): for rcode, n in conn.finished['http'].items():
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment