From: Ondrej Lichtner olichtne@redhat.com
The function is generic enough and useful enough to move it into the lnst.RecipeCommon.Perf.Results module instead of having it as a helper method for flow average evaluation.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com --- .../Evaluators/BaselineFlowAverageEvaluator.py | 17 +++++++---------- lnst/RecipeCommon/Perf/Results.py | 5 +++++ 2 files changed, 12 insertions(+), 10 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py index 3f49ab0..ae53bed 100644 --- a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py +++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py @@ -6,6 +6,7 @@ from ..Measurements.BaseFlowMeasurement import ( FlowMeasurementResults, AggregatedFlowMeasurementResults, ) +from ..Results import result_averages_difference
class BaselineFlowAverageEvaluator(BaseEvaluator): @@ -30,16 +31,16 @@ class BaselineFlowAverageEvaluator(BaseEvaluator): comparison_result = False result_text.append("No baseline found for this flow") else: - generator_diff = _result_averages_difference( - result.generator_results, - baseline.generator_results) + generator_diff = result_averages_difference( + result.generator_results, baseline.generator_results + ) result_text.append( "Generator average is {:.2f}% different from the baseline generator average" .format(generator_diff))
- receiver_diff = _result_averages_difference( - result.receiver_results, - baseline.receiver_results) + receiver_diff = result_averages_difference( + result.receiver_results, baseline.receiver_results + ) result_text.append( "Receiver average is {:.2f}% different from the baseline receiver average" .format(receiver_diff)) @@ -51,7 +52,3 @@ class BaselineFlowAverageEvaluator(BaseEvaluator): comparison_result = False
recipe.add_result(comparison_result, "\n".join(result_text)) - - -def _result_averages_difference(a, b): - return 100 - ((a.average / b.average)*100) diff --git a/lnst/RecipeCommon/Perf/Results.py b/lnst/RecipeCommon/Perf/Results.py index 4591447..e0d80aa 100644 --- a/lnst/RecipeCommon/Perf/Results.py +++ b/lnst/RecipeCommon/Perf/Results.py @@ -144,3 +144,8 @@ class ParallelPerfResult(PerfResult, PerfList): return self[0].unit else: return None + +def result_averages_difference(a, b): + if a is None or b is None: + return None + return 100 - ((a.average / b.average) * 100)
From: Ondrej Lichtner olichtne@redhat.com
The evaluator is based on how reporting works for CPU measurements - separating the individual cpus by the host machine. And uses a comparison method based on how flow average evaluation implements it.
v2: * fixed zero division comparison error * fixed error when baseline not found
Signed-off-by: Ondrej Lichtner olichtne@redhat.com --- .../Evaluators/BaselineCPUAverageEvaluator.py | 63 +++++++++++++++---- 1 file changed, 51 insertions(+), 12 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py index de0b83d..1bee283 100644 --- a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py +++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py @@ -6,6 +6,7 @@ from ..Measurements.BaseCPUMeasurement import ( CPUMeasurementResults, AggregatedCPUMeasurementResults, ) +from ..Results import result_averages_difference
class BaselineCPUAverageEvaluator(BaseEvaluator): @@ -13,24 +14,62 @@ class BaselineCPUAverageEvaluator(BaseEvaluator): self._pass_difference = pass_difference
def evaluate_results(self, recipe, results): - for result in results: - baseline = self.get_baseline(recipe, result) - self._compare_result_with_baseline(recipe, result, baseline) + for host_results in self._divide_results_by_host(results).values(): + self._evaluate_host_results(recipe, host_results)
def get_baseline(self, recipe, result): return None
- def _compare_result_with_baseline(self, recipe, result, baseline): + def _divide_results_by_host(self, results): + results_by_host = {} + for result in results: + if result.host not in results_by_host: + results_by_host[result.host] = [] + results_by_host[result.host].append(result) + return results_by_host + + def _evaluate_host_results(self, recipe, host_results): comparison_result = True result_text = [ - "CPU Baseline average evaluation".format(), - "Configured {}% difference as acceptable".format(self._pass_difference), + "CPU Baseline average evaluation for Host {hostid}:".format( + hostid=host_results[0].host.hostid + ), + "Configured {diff}% difference as acceptable".format( + diff=self._pass_difference + ), ] - if baseline is None: - comparison_result = False - result_text.append("No baseline found for this CPU measurement") - else: - result_text.append("I don't know how to compare CPU averages yet!!!") - comparison_result = False + pairs = [ + (result, self.get_baseline(recipe, result)) + for result in host_results + ] + for result, baseline in pairs: + if baseline is None: + result_text.append( + "CPU {cpuid}: no baseline found for ".format( + cpuid=result.cpu + ) + ) + else: + try: + difference = result_averages_difference( + result.utilization, baseline.utilization + ) + + if abs(difference) > self._pass_difference: + comparison_result = False + + result_text.append( + "CPU {cpuid}: utilization {diff:.2f}% {direction} than baseline".format( + cpuid=result.cpu, + diff=abs(difference), + direction="higher" if difference >= 0 else "lower", + ) + ) + except ZeroDivisionError: + result_text.append( + "CPU {cpuid}: zero division by baseline".format( + cpuid=result.cpu + ) + )
recipe.add_result(comparison_result, "\n".join(result_text))
From: Ondrej Lichtner olichtne@redhat.com
PEP8 best practice prefers absolute imports over relative ones.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com --- .../Perf/Evaluators/BaselineCPUAverageEvaluator.py | 6 +++--- .../Perf/Evaluators/BaselineFlowAverageEvaluator.py | 6 +++--- lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py | 4 ++-- lnst/RecipeCommon/Perf/Evaluators/__init__.py | 6 +++--- 4 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py index 1bee283..d0bd5d2 100644 --- a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py +++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py @@ -1,12 +1,12 @@ from __future__ import division
-from .BaseEvaluator import BaseEvaluator +from lnst.RecipeCommon.Perf.Evaluators.BaseEvaluator import BaseEvaluator
-from ..Measurements.BaseCPUMeasurement import ( +from lnst.RecipeCommon.Perf.Measurements.BaseCPUMeasurement import ( CPUMeasurementResults, AggregatedCPUMeasurementResults, ) -from ..Results import result_averages_difference +from lnst.RecipeCommon.Perf.Results import result_averages_difference
class BaselineCPUAverageEvaluator(BaseEvaluator): diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py index ae53bed..36f25ba 100644 --- a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py +++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py @@ -1,12 +1,12 @@ from __future__ import division
-from .BaseEvaluator import BaseEvaluator +from lnst.RecipeCommon.Perf.Evaluators.BaseEvaluator import BaseEvaluator
-from ..Measurements.BaseFlowMeasurement import ( +from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import ( FlowMeasurementResults, AggregatedFlowMeasurementResults, ) -from ..Results import result_averages_difference +from lnst.RecipeCommon.Perf.Results import result_averages_difference
class BaselineFlowAverageEvaluator(BaseEvaluator): diff --git a/lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py b/lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py index 5ee9853..72b543d 100644 --- a/lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py +++ b/lnst/RecipeCommon/Perf/Evaluators/NonzeroFlowEvaluator.py @@ -1,6 +1,6 @@ -from .BaseEvaluator import BaseEvaluator +from lnst.RecipeCommon.Perf.Evaluators.BaseEvaluator import BaseEvaluator
-from ..Measurements.BaseFlowMeasurement import ( +from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import ( FlowMeasurementResults, AggregatedFlowMeasurementResults, ) diff --git a/lnst/RecipeCommon/Perf/Evaluators/__init__.py b/lnst/RecipeCommon/Perf/Evaluators/__init__.py index 9186ba7..cf68eb8 100644 --- a/lnst/RecipeCommon/Perf/Evaluators/__init__.py +++ b/lnst/RecipeCommon/Perf/Evaluators/__init__.py @@ -1,4 +1,4 @@ -from .NonzeroFlowEvaluator import NonzeroFlowEvaluator -from .BaselineFlowAverageEvaluator import BaselineFlowAverageEvaluator +from lnst.RecipeCommon.Perf.Evaluators.NonzeroFlowEvaluator import NonzeroFlowEvaluator +from lnst.RecipeCommon.Perf.Evaluators.BaselineFlowAverageEvaluator import BaselineFlowAverageEvaluator
-from .BaselineCPUAverageEvaluator import BaselineCPUAverageEvaluator +from lnst.RecipeCommon.Perf.Evaluators.BaselineCPUAverageEvaluator import BaselineCPUAverageEvaluator
From: Ondrej Lichtner olichtne@redhat.com
Adding a new parameter to the CPU evaluator - evaluation_filter. This Parameter expects a dictionary where: key - represents a hostid as accessed through Host.hostid property value - list of strings representing cpu names
This dictionary will be used to pre-filter the results to evaluate. This can be helpful when dealing with machines that have many cores and only some are strictly used during the performance test. All the other cores might be idling or the scheduler assigneds them tasks differently between runs which can lead to confusing evaluations.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com --- .../Evaluators/BaselineCPUAverageEvaluator.py | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py index d0bd5d2..5521c0b 100644 --- a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py +++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py @@ -10,16 +10,32 @@ from lnst.RecipeCommon.Perf.Results import result_averages_difference
class BaselineCPUAverageEvaluator(BaseEvaluator): - def __init__(self, pass_difference): + def __init__(self, pass_difference, evaluation_filter=None): self._pass_difference = pass_difference + self._evaluation_filter = evaluation_filter
def evaluate_results(self, recipe, results): - for host_results in self._divide_results_by_host(results).values(): + filtered_results = self._filter_results(results) + + for host_results in self._divide_results_by_host(filtered_results).values(): self._evaluate_host_results(recipe, host_results)
def get_baseline(self, recipe, result): return None
+ def _filter_results(self, results): + if self._evaluation_filter is None: + return results + + filtered = [] + for result in results: + if ( + result.host.hostid in self._evaluation_filter + and result.cpu in self._evaluation_filter[result.host.hostid] + ): + filtered.append(result) + return filtered + def _divide_results_by_host(self, results): results_by_host = {} for result in results:
On Tue, Apr 16, 2019 at 10:57:17AM +0200, olichtne@redhat.com wrote:
From: Ondrej Lichtner olichtne@redhat.com
The function is generic enough and useful enough to move it into the lnst.RecipeCommon.Perf.Results module instead of having it as a helper method for flow average evaluation.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com
.../Evaluators/BaselineFlowAverageEvaluator.py | 17 +++++++---------- lnst/RecipeCommon/Perf/Results.py | 5 +++++ 2 files changed, 12 insertions(+), 10 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py index 3f49ab0..ae53bed 100644 --- a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py +++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py @@ -6,6 +6,7 @@ from ..Measurements.BaseFlowMeasurement import ( FlowMeasurementResults, AggregatedFlowMeasurementResults, ) +from ..Results import result_averages_difference
class BaselineFlowAverageEvaluator(BaseEvaluator): @@ -30,16 +31,16 @@ class BaselineFlowAverageEvaluator(BaseEvaluator): comparison_result = False result_text.append("No baseline found for this flow") else:
generator_diff = _result_averages_difference(
result.generator_results,
baseline.generator_results)
generator_diff = result_averages_difference(
result.generator_results, baseline.generator_results
) result_text.append( "Generator average is {:.2f}% different from the baseline generator average" .format(generator_diff))
receiver_diff = _result_averages_difference(
result.receiver_results,
baseline.receiver_results)
receiver_diff = result_averages_difference(
result.receiver_results, baseline.receiver_results
) result_text.append( "Receiver average is {:.2f}% different from the baseline receiver average" .format(receiver_diff))
@@ -51,7 +52,3 @@ class BaselineFlowAverageEvaluator(BaseEvaluator): comparison_result = False
recipe.add_result(comparison_result, "\n".join(result_text))
-def _result_averages_difference(a, b):
- return 100 - ((a.average / b.average)*100)
diff --git a/lnst/RecipeCommon/Perf/Results.py b/lnst/RecipeCommon/Perf/Results.py index 4591447..e0d80aa 100644 --- a/lnst/RecipeCommon/Perf/Results.py +++ b/lnst/RecipeCommon/Perf/Results.py @@ -144,3 +144,8 @@ class ParallelPerfResult(PerfResult, PerfList): return self[0].unit else: return None
+def result_averages_difference(a, b):
- if a is None or b is None:
return None
- return 100 - ((a.average / b.average) * 100)
-- 2.21.0
pushed.
-Ondrej
Tue, Apr 16, 2019 at 10:57:17AM CEST, olichtne@redhat.com wrote:
From: Ondrej Lichtner olichtne@redhat.com
The function is generic enough and useful enough to move it into the lnst.RecipeCommon.Perf.Results module instead of having it as a helper method for flow average evaluation.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com
.../Evaluators/BaselineFlowAverageEvaluator.py | 17 +++++++---------- lnst/RecipeCommon/Perf/Results.py | 5 +++++ 2 files changed, 12 insertions(+), 10 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py index 3f49ab0..ae53bed 100644 --- a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py +++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py @@ -6,6 +6,7 @@ from ..Measurements.BaseFlowMeasurement import ( FlowMeasurementResults, AggregatedFlowMeasurementResults, ) +from ..Results import result_averages_difference
^^^^^^^^^ Do you mind adding full path of the import? Besides that the patch looks ok.
-Jan
class BaselineFlowAverageEvaluator(BaseEvaluator): @@ -30,16 +31,16 @@ class BaselineFlowAverageEvaluator(BaseEvaluator): comparison_result = False result_text.append("No baseline found for this flow") else:
generator_diff = _result_averages_difference(
result.generator_results,
baseline.generator_results)
generator_diff = result_averages_difference(
result.generator_results, baseline.generator_results
) result_text.append( "Generator average is {:.2f}% different from the baseline generator average" .format(generator_diff))
receiver_diff = _result_averages_difference(
result.receiver_results,
baseline.receiver_results)
receiver_diff = result_averages_difference(
result.receiver_results, baseline.receiver_results
) result_text.append( "Receiver average is {:.2f}% different from the baseline receiver average" .format(receiver_diff))
@@ -51,7 +52,3 @@ class BaselineFlowAverageEvaluator(BaseEvaluator): comparison_result = False
recipe.add_result(comparison_result, "\n".join(result_text))
-def _result_averages_difference(a, b):
- return 100 - ((a.average / b.average)*100)
diff --git a/lnst/RecipeCommon/Perf/Results.py b/lnst/RecipeCommon/Perf/Results.py index 4591447..e0d80aa 100644 --- a/lnst/RecipeCommon/Perf/Results.py +++ b/lnst/RecipeCommon/Perf/Results.py @@ -144,3 +144,8 @@ class ParallelPerfResult(PerfResult, PerfList): return self[0].unit else: return None
+def result_averages_difference(a, b):
- if a is None or b is None:
return None
- return 100 - ((a.average / b.average) * 100)
-- 2.21.0 _______________________________________________ LNST-developers mailing list -- lnst-developers@lists.fedorahosted.org To unsubscribe send an email to lnst-developers-leave@lists.fedorahosted.org Fedora Code of Conduct: https://getfedora.org/code-of-conduct.html List Guidelines: https://fedoraproject.org/wiki/Mailing_list_guidelines List Archives: https://lists.fedorahosted.org/archives/list/lnst-developers@lists.fedorahos...
Tue, Apr 16, 2019 at 11:16:26AM CEST, jtluka@redhat.com wrote:
Tue, Apr 16, 2019 at 10:57:17AM CEST, olichtne@redhat.com wrote:
From: Ondrej Lichtner olichtne@redhat.com
The function is generic enough and useful enough to move it into the lnst.RecipeCommon.Perf.Results module instead of having it as a helper method for flow average evaluation.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com
.../Evaluators/BaselineFlowAverageEvaluator.py | 17 +++++++---------- lnst/RecipeCommon/Perf/Results.py | 5 +++++ 2 files changed, 12 insertions(+), 10 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py index 3f49ab0..ae53bed 100644 --- a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py +++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py @@ -6,6 +6,7 @@ from ..Measurements.BaseFlowMeasurement import ( FlowMeasurementResults, AggregatedFlowMeasurementResults, ) +from ..Results import result_averages_difference
^^^^^^^^^
Do you mind adding full path of the import? Besides that the patch looks ok.
-Jan
Sorry for noise, this is fixed in a follwup patch.
lnst-developers@lists.fedorahosted.org