From: Ondrej Lichtner olichtne@redhat.com
This code refactors the code that takes care of calculating the averages difference and formatting the descriptive text. At the same time I enabled comparing the other two metrics generated for FlowMeasurements - generator_cpu_stats and receiver_cpu_stats.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com --- .../BaselineFlowAverageEvaluator.py | 41 ++++++++++--------- 1 file changed, 21 insertions(+), 20 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py index 36f25ba..b05849c 100644 --- a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py +++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py @@ -25,30 +25,31 @@ class BaselineFlowAverageEvaluator(BaseEvaluator): comparison_result = True result_text = [ "Flow {} Baseline average evaluation".format(result.flow), - "Configured {}% difference as acceptable".format(self._pass_difference), + "Configured {}% difference as acceptable".format( + self._pass_difference + ), ] if baseline is None: comparison_result = False result_text.append("No baseline found for this flow") else: - generator_diff = result_averages_difference( - result.generator_results, baseline.generator_results - ) - result_text.append( - "Generator average is {:.2f}% different from the baseline generator average" - .format(generator_diff)) - - receiver_diff = result_averages_difference( - result.receiver_results, baseline.receiver_results - ) - result_text.append( - "Receiver average is {:.2f}% different from the baseline receiver average" - .format(receiver_diff)) - - if ( - abs(generator_diff) > self._pass_difference - or abs(receiver_diff) > self._pass_difference - ): - comparison_result = False + for i in ["generator_results", "generator_cpu_stats", "receiver_results", "receiver_cpu_stats"]: + comparison, text = self._average_diff_comparison( + name="{} average".format(i), + target=getattr(result, i), + baseline=getattr(baseline, i), + ) + result_text.append(text) + comparison_result = comparison_result and comparison
recipe.add_result(comparison_result, "\n".join(result_text)) + + def _average_diff_comparison(self, name, target, baseline): + difference = result_averages_difference(target, baseline) + result_text = "New {name} is {diff:.2f}% {direction} from the baseline {name}".format( + name=name, + diff=abs(difference), + direction="higher" if difference >= 0 else "lower", + ) + comparison = abs(difference) <= self._pass_difference + return comparison, result_text
On Thu, Apr 18, 2019 at 01:55:03PM +0200, olichtne@redhat.com wrote:
From: Ondrej Lichtner olichtne@redhat.com
This code refactors the code that takes care of calculating the averages difference and formatting the descriptive text. At the same time I enabled comparing the other two metrics generated for FlowMeasurements - generator_cpu_stats and receiver_cpu_stats.
Signed-off-by: Ondrej Lichtner olichtne@redhat.com
.../BaselineFlowAverageEvaluator.py | 41 ++++++++++--------- 1 file changed, 21 insertions(+), 20 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py index 36f25ba..b05849c 100644 --- a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py +++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py @@ -25,30 +25,31 @@ class BaselineFlowAverageEvaluator(BaseEvaluator): comparison_result = True result_text = [ "Flow {} Baseline average evaluation".format(result.flow),
"Configured {}% difference as acceptable".format(self._pass_difference),
"Configured {}% difference as acceptable".format(
self._pass_difference
), ] if baseline is None: comparison_result = False result_text.append("No baseline found for this flow") else:
generator_diff = result_averages_difference(
result.generator_results, baseline.generator_results
)
result_text.append(
"Generator average is {:.2f}% different from the baseline generator average"
.format(generator_diff))
receiver_diff = result_averages_difference(
result.receiver_results, baseline.receiver_results
)
result_text.append(
"Receiver average is {:.2f}% different from the baseline receiver average"
.format(receiver_diff))
if (
abs(generator_diff) > self._pass_difference
or abs(receiver_diff) > self._pass_difference
):
comparison_result = False
for i in ["generator_results", "generator_cpu_stats", "receiver_results", "receiver_cpu_stats"]:
comparison, text = self._average_diff_comparison(
name="{} average".format(i),
target=getattr(result, i),
baseline=getattr(baseline, i),
)
result_text.append(text)
comparison_result = comparison_result and comparison recipe.add_result(comparison_result, "\n".join(result_text))
def _average_diff_comparison(self, name, target, baseline):
difference = result_averages_difference(target, baseline)
result_text = "New {name} is {diff:.2f}% {direction} from the baseline {name}".format(
name=name,
diff=abs(difference),
direction="higher" if difference >= 0 else "lower",
)
comparison = abs(difference) <= self._pass_difference
return comparison, result_text
-- 2.21.0
pushed
-Ondrej
lnst-developers@lists.fedorahosted.org