From: Ondrej Lichtner olichtne@redhat.com
The function is generic enough and useful enough to move it into the lnst.RecipeCommon.Perf.Results module instead of having it as a helper method for flow average evaluation.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com --- .../Evaluators/BaselineFlowAverageEvaluator.py | 17 +++++++---------- lnst/RecipeCommon/Perf/Results.py | 5 +++++ 2 files changed, 12 insertions(+), 10 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py index 3f49ab0..ae53bed 100644 --- a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py +++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py @@ -6,6 +6,7 @@ from ..Measurements.BaseFlowMeasurement import ( FlowMeasurementResults, AggregatedFlowMeasurementResults, ) +from ..Results import result_averages_difference
class BaselineFlowAverageEvaluator(BaseEvaluator): @@ -30,16 +31,16 @@ class BaselineFlowAverageEvaluator(BaseEvaluator): comparison_result = False result_text.append("No baseline found for this flow") else: - generator_diff = _result_averages_difference( - result.generator_results, - baseline.generator_results) + generator_diff = result_averages_difference( + result.generator_results, baseline.generator_results + ) result_text.append( "Generator average is {:.2f}% different from the baseline generator average" .format(generator_diff))
- receiver_diff = _result_averages_difference( - result.receiver_results, - baseline.receiver_results) + receiver_diff = result_averages_difference( + result.receiver_results, baseline.receiver_results + ) result_text.append( "Receiver average is {:.2f}% different from the baseline receiver average" .format(receiver_diff)) @@ -51,7 +52,3 @@ class BaselineFlowAverageEvaluator(BaseEvaluator): comparison_result = False
recipe.add_result(comparison_result, "\n".join(result_text)) - - -def _result_averages_difference(a, b): - return 100 - ((a.average / b.average)*100) diff --git a/lnst/RecipeCommon/Perf/Results.py b/lnst/RecipeCommon/Perf/Results.py index 4591447..e0d80aa 100644 --- a/lnst/RecipeCommon/Perf/Results.py +++ b/lnst/RecipeCommon/Perf/Results.py @@ -144,3 +144,8 @@ class ParallelPerfResult(PerfResult, PerfList): return self[0].unit else: return None + +def result_averages_difference(a, b): + if a is None or b is None: + return None + return 100 - ((a.average / b.average) * 100)
From: Ondrej Lichtner olichtne@redhat.com
The evaluator is based on how reporting works for CPU measurements - separating the individual cpus by the host machine. And uses a comparison method based on how flow average evaluation implements it.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com --- .../Evaluators/BaselineCPUAverageEvaluator.py | 52 ++++++++++++++----- 1 file changed, 40 insertions(+), 12 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py index de0b83d..e9c6bca 100644 --- a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py +++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py @@ -6,6 +6,7 @@ from ..Measurements.BaseCPUMeasurement import ( CPUMeasurementResults, AggregatedCPUMeasurementResults, ) +from ..Results import result_averages_difference
class BaselineCPUAverageEvaluator(BaseEvaluator): @@ -13,24 +14,51 @@ class BaselineCPUAverageEvaluator(BaseEvaluator): self._pass_difference = pass_difference
def evaluate_results(self, recipe, results): - for result in results: - baseline = self.get_baseline(recipe, result) - self._compare_result_with_baseline(recipe, result, baseline) + for host_results in self._divide_results_by_host(results).values(): + self._evaluate_host_results(recipe, host_results)
def get_baseline(self, recipe, result): return None
- def _compare_result_with_baseline(self, recipe, result, baseline): + def _divide_results_by_host(self, results): + results_by_host = {} + for result in results: + if result.host not in results_by_host: + results_by_host[result.host] = [] + results_by_host[result.host].append(result) + return results_by_host + + def _evaluate_host_results(self, recipe, host_results): comparison_result = True result_text = [ - "CPU Baseline average evaluation".format(), - "Configured {}% difference as acceptable".format(self._pass_difference), + "CPU Baseline average evaluation for Host {}:".format( + host_results[0].host.hostid + ), + "Configured {}% difference as acceptable".format( + self._pass_difference + ), + ] + pairs = [ + (result, self.get_baseline(recipe, result)) + for result in host_results ] - if baseline is None: - comparison_result = False - result_text.append("No baseline found for this CPU measurement") - else: - result_text.append("I don't know how to compare CPU averages yet!!!") - comparison_result = False + for result, baseline in pairs: + difference = result_averages_difference( + result.utilization, baseline.utilization + ) + if difference is None: + result_text.append( + "CPU {cpuid}: no baseline found for ".format(result.cpu) + ) + else: + if abs(difference) > self._pass_difference: + comparison_result = False + result_text.append( + "CPU {cpuid}: utilization {diff:.2f}% {direction} than baseline".format( + cpuid=result.cpu, + diff=abs(difference), + direction="higher" if difference >= 0 else "lower", + ) + )
recipe.add_result(comparison_result, "\n".join(result_text))
On Fri, Apr 12, 2019 at 02:44:10PM +0200, olichtne@redhat.com wrote:
From: Ondrej Lichtner olichtne@redhat.com
The evaluator is based on how reporting works for CPU measurements - separating the individual cpus by the host machine. And uses a comparison method based on how flow average evaluation implements it.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com
.../Evaluators/BaselineCPUAverageEvaluator.py | 52 ++++++++++++++----- 1 file changed, 40 insertions(+), 12 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py index de0b83d..e9c6bca 100644 --- a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py +++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py @@ -6,6 +6,7 @@ from ..Measurements.BaseCPUMeasurement import ( CPUMeasurementResults, AggregatedCPUMeasurementResults, ) +from ..Results import result_averages_difference
class BaselineCPUAverageEvaluator(BaseEvaluator): @@ -13,24 +14,51 @@ class BaselineCPUAverageEvaluator(BaseEvaluator): self._pass_difference = pass_difference
def evaluate_results(self, recipe, results):
for result in results:
baseline = self.get_baseline(recipe, result)
self._compare_result_with_baseline(recipe, result, baseline)
for host_results in self._divide_results_by_host(results).values():
self._evaluate_host_results(recipe, host_results)
def get_baseline(self, recipe, result): return None
- def _compare_result_with_baseline(self, recipe, result, baseline):
- def _divide_results_by_host(self, results):
results_by_host = {}
for result in results:
if result.host not in results_by_host:
results_by_host[result.host] = []
results_by_host[result.host].append(result)
return results_by_host
- def _evaluate_host_results(self, recipe, host_results): comparison_result = True result_text = [
"CPU Baseline average evaluation".format(),
"Configured {}% difference as acceptable".format(self._pass_difference),
"CPU Baseline average evaluation for Host {}:".format(
host_results[0].host.hostid
),
"Configured {}% difference as acceptable".format(
self._pass_difference
),
]
pairs = [
(result, self.get_baseline(recipe, result))
for result in host_results ]
if baseline is None:
comparison_result = False
result_text.append("No baseline found for this CPU measurement")
else:
result_text.append("I don't know how to compare CPU averages yet!!!")
comparison_result = False
for result, baseline in pairs:
difference = result_averages_difference(
result.utilization, baseline.utilization
)
if difference is None:
result_text.append(
"CPU {cpuid}: no baseline found for ".format(result.cpu)
)
else:
if abs(difference) > self._pass_difference:
comparison_result = False
result_text.append(
"CPU {cpuid}: utilization {diff:.2f}% {direction} than baseline".format(
cpuid=result.cpu,
diff=abs(difference),
direction="higher" if difference >= 0 else "lower",
)
) recipe.add_result(comparison_result, "\n".join(result_text))
-- 2.21.0
Looks like there's a couple of issues with this patch that showed up when running on real hw and numbers that didn't show up when running on a vm.
Will send a v2...
-Ondrej
lnst-developers@lists.fedorahosted.org