From: Ondrej Lichtner olichtne@redhat.com
This extends the format_res_data method to support lists. This is very usefull for test modules that return multiple entries of result data, for example Netperf with multiple runs.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com --- lnst/Common/NetTestCommand.py | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/lnst/Common/NetTestCommand.py b/lnst/Common/NetTestCommand.py index 7385e47..409076b 100644 --- a/lnst/Common/NetTestCommand.py +++ b/lnst/Common/NetTestCommand.py @@ -332,6 +332,13 @@ class NetTestCommandGeneric(object): if type(value) == dict: formatted_data += level*4*" " + str(key) + ":\n" formatted_data += self.format_res_data(value, level+1) + if type(value) == list: + formatted_data += level*4*" " + str(key) + ":\n" + for i in range(0, len(value)): + formatted_data += (level+1)*4*" " +\ + "item %d:" % (i+1) + "\n" + formatted_data += self.format_res_data(value[i], + level+2) else: formatted_data += level*4*" " + str(key) + ":" + \ (max_key_len-len(key))*" " + \
From: Ondrej Lichtner olichtne@redhat.com
The first argument of the super call should be the name of the current child class, not the parent class. This patch fixes the issue.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com --- test_modules/Netperf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/test_modules/Netperf.py b/test_modules/Netperf.py index 8d375f3..959bcd9 100644 --- a/test_modules/Netperf.py +++ b/test_modules/Netperf.py @@ -19,7 +19,7 @@ class Netperf(TestGeneric): "SCTP_STREAM", "SCTP_STREAM_MANY", "SCTP_RR"]
def __init__(self, command): - super(TestGeneric, self).__init__(command) + super(Netperf, self).__init__(command)
self._role = self.get_mopt("role")
From: Ondrej Lichtner olichtne@redhat.com
The Netperf module now takes an additional option "num_parallel" that indicates, how many netperf clients should be run in parallel. When the commands finish, their results are summed into a single result that is reported back. Using this feature will automatically disable the confidence feature because this combination doesn't make sense. It is recommended to use this feature in conjuncture with the parameter "runs" that will run this setup several times and compute the standard deviation.
This patch is inspired by super_netperf: https://github.com/borkmann/stuff/blob/master/super_netperf
Signed-off-by: Ondrej Lichtner olichtne@redhat.com --- test_modules/Netperf.py | 67 +++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 54 insertions(+), 13 deletions(-)
diff --git a/test_modules/Netperf.py b/test_modules/Netperf.py index 959bcd9..ecd8b7f 100644 --- a/test_modules/Netperf.py +++ b/test_modules/Netperf.py @@ -35,6 +35,7 @@ class Netperf(TestGeneric): self._bind = self.get_opt("bind", opt_type="addr") self._family = self.get_opt("family") self._cpu_util = self.get_opt("cpu_util") + self._num_parallel = int(self.get_opt("num_parallel", default=1))
self._runs = self.get_opt("runs", default=1) if self._runs > 1 and self._confidence is not None: @@ -85,7 +86,7 @@ class Netperf(TestGeneric): "can use other tests, but test result may not be correct.") cmd += " -t %s" % self._testname
- if self._confidence is not None: + if self._confidence is not None and self._num_parallel <= 1: """ confidence level that Netperf should try to achieve """ @@ -104,6 +105,15 @@ class Netperf(TestGeneric): custom options for netperf """ cmd += " %s" % self._netperf_opts + + if self._num_parallel > 1: + """ + wait 1 second before starting the data transfer + taken from the super_netperf script, can be removed if it + doesn't make sense + """ + cmd += " -s 1" + # Print only relevant output cmd += ' -- -k "THROUGHPUT, LOCAL_CPU_UTIL, REMOTE_CPU_UTIL, CONFIDENCE_LEVEL, THROUGHPUT_CONFID"'
@@ -219,6 +229,25 @@ class Netperf(TestGeneric): return {"rate": threshold_rate, "unit": threshold_unit_type}
+ def _sum_results(self, first, second): + result = {} + + #add rates + if first["unit"] == second["unit"]: + result["unit"] = first["unit"] + result["rate"] = first["rate"] + second["rate"] + + # netperf measures the complete cpu utilization of the machine, + # so both second and first should be +- the same number + if "LOCAL_CPU_UTIL" in first and "LOCAL_CPU_UTIL" in second: + result["LOCAL_CPU_UTIL"] = first["LOCAL_CPU_UTIL"] + + if "REMOTE_CPU_UTIL" in first and "REMOTE_CPU_UTIL" in second: + result["REMOTE_CPU_UTIL"] = first["REMOTE_CPU_UTIL"] + + #ignoring confidence because it doesn't make sense to sum those + return result + def _run_server(self, cmd): logging.debug("running as server...") server = ShellProcess(cmd) @@ -240,18 +269,30 @@ class Netperf(TestGeneric): for i in range(1, self._runs+1): if self._runs > 1: logging.info("Netperf starting run %d" % i) - client = ShellProcess(cmd) - try: - ret_code = client.wait() - rv += ret_code - except OSError as e: - if e.errno == errno.EINTR: - client.kill() - - output = client.read_nonblocking() - - if ret_code == 0: - results.append(self._parse_output(output)) + clients = [] + client_results = [] + for i in range(0, self._num_parallel): + clients.append(ShellProcess(cmd)) + + for client in clients: + try: + ret_code = client.wait() + rv += ret_code + except OSError as e: + if e.errno == errno.EINTR: + client.kill() + + if ret_code == 0: + output = client.read_nonblocking() + client_results.append(self._parse_output(output)) + + if len(client_results) > 0: + #accumulate all the parallel results into one + result = client_results[0] + for res in client_results[1:]: + result = self._sum_results(result, res) + + results.append(result) rates.append(results[-1]["rate"])
if results > 1:
From: Ondrej Lichtner olichtne@redhat.com
In case the Netperf client is run multiple times we reported the standard deviation as just rate deviation and used it to create a confidence interval around the mean value. Instead we should report the standard deviation separately and compute the interval as a multiple of the standard deviation, for now I set it to 2 times the standard deviation, based on the 68-95-99.7 rule, because it seems comparable to the intervals reported when -I99 is set for the Netperf client. But we should investigate this more...
Signed-off-by: Ondrej Lichtner olichtne@redhat.com --- test_modules/Netperf.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/test_modules/Netperf.py b/test_modules/Netperf.py index ecd8b7f..10c993b 100644 --- a/test_modules/Netperf.py +++ b/test_modules/Netperf.py @@ -304,7 +304,10 @@ class Netperf(TestGeneric): rate = 0.0
if len(rates) > 1: - rate_deviation = std_deviation(rates) + # setting deviation to 2xstd_deviation because of the 68-95-99.7 + # rule this seems comparable to the -I 99 netperf setting + res_data["std_deviation"] = std_deviation(rates) + rate_deviation = 2*res_data["std_deviation"] elif len(rates) == 1 and self._confidence is not None: result = results[0] rate_deviation = rate * (result["confidence"][1] / 100)
2015-11-16 14:27 GMT+01:00 olichtne@redhat.com:
From: Ondrej Lichtner olichtne@redhat.com
This extends the format_res_data method to support lists. This is very usefull for test modules that return multiple entries of result data, for example Netperf with multiple runs.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com
lnst/Common/NetTestCommand.py | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/lnst/Common/NetTestCommand.py b/lnst/Common/NetTestCommand.py index 7385e47..409076b 100644 --- a/lnst/Common/NetTestCommand.py +++ b/lnst/Common/NetTestCommand.py @@ -332,6 +332,13 @@ class NetTestCommandGeneric(object): if type(value) == dict: formatted_data += level*4*" " + str(key) + ":\n" formatted_data += self.format_res_data(value, level+1)
if type(value) == list:
formatted_data += level*4*" " + str(key) + ":\n"
for i in range(0, len(value)):
formatted_data += (level+1)*4*" " +\
"item %d:" % (i+1) + "\n"
formatted_data += self.format_res_data(value[i],
level+2) else: formatted_data += level*4*" " + str(key) + ":" + \ (max_key_len-len(key))*" " + \
-- 2.6.2 _______________________________________________ LNST-developers mailing list lnst-developers@lists.fedorahosted.org
https://lists.fedorahosted.org/admin/lists/lnst-developers@lists.fedorahoste...
ack to series
Acked-By: Jiri Prochazka jprochaz@redhat.com
Mon, Nov 16, 2015 at 02:27:06PM CET, olichtne@redhat.com wrote:
From: Ondrej Lichtner olichtne@redhat.com
This extends the format_res_data method to support lists. This is very usefull for test modules that return multiple entries of result data, for example Netperf with multiple runs.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com
lnst/Common/NetTestCommand.py | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/lnst/Common/NetTestCommand.py b/lnst/Common/NetTestCommand.py index 7385e47..409076b 100644 --- a/lnst/Common/NetTestCommand.py +++ b/lnst/Common/NetTestCommand.py @@ -332,6 +332,13 @@ class NetTestCommandGeneric(object): if type(value) == dict: formatted_data += level*4*" " + str(key) + ":\n" formatted_data += self.format_res_data(value, level+1)
if type(value) == list:
formatted_data += level*4*" " + str(key) + ":\n"
for i in range(0, len(value)):
formatted_data += (level+1)*4*" " +\
"item %d:" % (i+1) + "\n"
formatted_data += self.format_res_data(value[i],
level+2) else: formatted_data += level*4*" " + str(key) + ":" + \ (max_key_len-len(key))*" " + \
-- 2.6.2
Ack to series.
Acked-by: Jan Tluka jtluka@redhat.com
lnst-developers@lists.fedorahosted.org