* fixed simple_netperf.py broken after rebase * udp_size change in simple_netperf.py moved to proper commit
Kamil Jerabek (3): Netperf: add option udp_size regression_tests: add udp_size to netperf tests regression_tests: skip unnecessary offload option for udp testing
recipes/regression_tests/phase1/3_vlans.py | 105 ++++++----- .../regression_tests/phase1/3_vlans_over_bond.py | 105 ++++++----- recipes/regression_tests/phase1/bonding_test.py | 97 ++++++---- recipes/regression_tests/phase1/simple_netperf.py | 101 +++++----- .../phase1/virtual_bridge_2_vlans_over_bond.py | 17 +- .../phase1/virtual_bridge_vlan_in_guest.py | 17 +- .../phase1/virtual_bridge_vlan_in_host.py | 17 +- .../regression_tests/phase2/3_vlans_over_team.py | 107 ++++++----- recipes/regression_tests/phase2/team_test.py | 209 ++++++++++++--------- ...l_ovs_bridge_2_vlans_over_active_backup_bond.py | 115 +++++++----- .../phase2/virtual_ovs_bridge_vlan_in_guest.py | 115 +++++++----- .../phase2/virtual_ovs_bridge_vlan_in_host.py | 115 +++++++----- test_modules/Netperf.py | 11 ++ 13 files changed, 662 insertions(+), 469 deletions(-)
This option set UDP datagram size for netperf test on client. If this option is not explicitly specified, default value from netperf is used.
The option accepts integer value describing datagram size, it should be also concatenated with G/M/K/g/m/k.
In this commit I also added this option to regression_test: simple_netperf test.
Signed-off-by: Kamil Jerabek kjerabek@redhat.com --- test_modules/Netperf.py | 11 +++++++++++ 1 file changed, 11 insertions(+)
diff --git a/test_modules/Netperf.py b/test_modules/Netperf.py index 6950bb8..9f98d23 100644 --- a/test_modules/Netperf.py +++ b/test_modules/Netperf.py @@ -39,6 +39,7 @@ class Netperf(TestGeneric): self._cpu_util = self.get_opt("cpu_util") self._num_parallel = int(self.get_opt("num_parallel", default=1)) self._runs = self.get_opt("runs", default=1) + self._udp_size = self.get_opt("udp_size") self._debug = int_it(self.get_opt("debug", default=0))
self._threshold = self._parse_threshold(self.get_opt("threshold")) @@ -141,6 +142,16 @@ class Netperf(TestGeneric): else: cmd += " -- %s" % self._testoptions
+ if self._udp_size is not None: + """ + udp packets will have this size + """ + if self._is_omni() or self._testoptions: + cmd += " -m %s" % self._udp_size + else: + cms += " -- -m %s" % self._udp_size + + elif self._role == "server": cmd = "netserver -D" if self._bind is not None:
This commit adds a udp_size parameter to all udp Netperf clients in our regression_tests/phase1-2 directories. The value of the parameter can be controlled via alias "nperf_udp_size" that I've added to all recipes. The default value is set by Netperf to size of out buffer otherwise to value specified in this alias.
Signed-off-by: Kamil Jerabek kjerabek@redhat.com --- recipes/regression_tests/phase1/3_vlans.py | 13 +++++++++++++ .../regression_tests/phase1/3_vlans_over_bond.py | 13 +++++++++++++ recipes/regression_tests/phase1/bonding_test.py | 13 +++++++++++++ recipes/regression_tests/phase1/simple_netperf.py | 11 +++++++++++ .../phase1/virtual_bridge_2_vlans_over_bond.py | 13 +++++++++++++ .../phase1/virtual_bridge_vlan_in_guest.py | 13 +++++++++++++ .../phase1/virtual_bridge_vlan_in_host.py | 13 +++++++++++++ .../regression_tests/phase2/3_vlans_over_team.py | 13 +++++++++++++ recipes/regression_tests/phase2/team_test.py | 21 +++++++++++++++++++++ ...al_ovs_bridge_2_vlans_over_active_backup_bond.py | 13 +++++++++++++ .../phase2/virtual_ovs_bridge_vlan_in_guest.py | 13 +++++++++++++ .../phase2/virtual_ovs_bridge_vlan_in_host.py | 13 +++++++++++++ 12 files changed, 162 insertions(+)
diff --git a/recipes/regression_tests/phase1/3_vlans.py b/recipes/regression_tests/phase1/3_vlans.py index 8144815..a508697 100644 --- a/recipes/regression_tests/phase1/3_vlans.py +++ b/recipes/regression_tests/phase1/3_vlans.py @@ -44,6 +44,7 @@ nperf_mode = ctl.get_alias("nperf_mode") nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel")) nperf_debug = ctl.get_alias("nperf_debug") nperf_max_dev = ctl.get_alias("nperf_max_dev") +nperf_udp_size = ctl.get_alias("nperf_udp_size") pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment) @@ -166,6 +167,10 @@ if nperf_mode == "multi": netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel}) netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None: + netperf_cli_udp.update_options({"udp_size" : nperf_udp_size}) + netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size}) + for setting in offload_settings: #apply offload setting dev_features = "" @@ -245,6 +250,10 @@ for setting in offload_settings: 'redhat_release']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.set_parameter('netperf_server_on_vlan', vlans[0]) result_udp.set_parameter('netperf_client_on_vlan', vlans[0]) result_udp.add_tag(product_name) @@ -301,6 +310,10 @@ for setting in offload_settings: 'redhat_release']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.set_parameter('netperf_server_on_vlan', vlans[0]) result_udp.set_parameter('netperf_client_on_vlan', vlans[0]) result_udp.set_tag(product_name) diff --git a/recipes/regression_tests/phase1/3_vlans_over_bond.py b/recipes/regression_tests/phase1/3_vlans_over_bond.py index 41f2b95..6c036f2 100644 --- a/recipes/regression_tests/phase1/3_vlans_over_bond.py +++ b/recipes/regression_tests/phase1/3_vlans_over_bond.py @@ -43,6 +43,7 @@ nperf_mode = ctl.get_alias("nperf_mode") nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel")) nperf_debug = ctl.get_alias("nperf_debug") nperf_max_dev = ctl.get_alias("nperf_max_dev") +nperf_udp_size = ctl.get_alias("nperf_udp_size") pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment) @@ -167,6 +168,10 @@ if nperf_mode == "multi": netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel}) netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None: + netperf_cli_udp.update_options({"udp_size" : nperf_udp_size}) + netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size}) + for setting in offload_settings: #apply offload setting dev_features = "" @@ -244,6 +249,10 @@ for setting in offload_settings: 'redhat_release']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.set_parameter('netperf_server_on_vlan', vlans[0]) result_udp.set_parameter('netperf_client_on_vlan', vlans[0]) result_udp.add_tag(product_name) @@ -300,6 +309,10 @@ for setting in offload_settings: 'redhat_release']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.set_parameter('netperf_server_on_vlan', vlans[0]) result_udp.set_parameter('netperf_client_on_vlan', vlans[0]) result_udp.add_tag(product_name) diff --git a/recipes/regression_tests/phase1/bonding_test.py b/recipes/regression_tests/phase1/bonding_test.py index 39e7df8..4bbf573 100644 --- a/recipes/regression_tests/phase1/bonding_test.py +++ b/recipes/regression_tests/phase1/bonding_test.py @@ -43,6 +43,7 @@ nperf_mode = ctl.get_alias("nperf_mode") nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel")) nperf_debug = ctl.get_alias("nperf_debug") nperf_max_dev = ctl.get_alias("nperf_max_dev") +nperf_udp_size = ctl.get_alias("nperf_udp_size") pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment) @@ -176,6 +177,10 @@ if nperf_mode == "multi": netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel}) netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None: + netperf_cli_udp.update_options({"udp_size" : nperf_udp_size}) + netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size}) + ctl.wait(15)
for setting in offload_settings: @@ -222,6 +227,10 @@ for setting in offload_settings: 'redhat_release']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.add_tag(product_name) if nperf_mode == "multi": result_udp.add_tag("multithreaded") @@ -276,6 +285,10 @@ for setting in offload_settings: 'redhat_release']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.add_tag(product_name) if nperf_mode == "multi": result_udp.add_tag("multithreaded") diff --git a/recipes/regression_tests/phase1/simple_netperf.py b/recipes/regression_tests/phase1/simple_netperf.py index fe9d96b..8d9fb1d 100644 --- a/recipes/regression_tests/phase1/simple_netperf.py +++ b/recipes/regression_tests/phase1/simple_netperf.py @@ -43,6 +43,7 @@ nperf_mode = ctl.get_alias("nperf_mode") nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel")) nperf_debug = ctl.get_alias("nperf_debug") nperf_max_dev = ctl.get_alias("nperf_max_dev") +nperf_udp_size = ctl.get_alias("nperf_udp_size") pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment) @@ -147,6 +148,10 @@ if nperf_mode == "multi": netperf_cli_tcp6.update_options({"num_parallel" : nperf_num_parallel}) netperf_cli_udp6.update_options({"num_parallel" : nperf_num_parallel})
+if nperf_udp_size is not None: + netperf_cli_udp.update_options({"udp_size" : nperf_udp_size}) + netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size}) + ctl.wait(15)
for setting in offload_settings: @@ -198,6 +203,9 @@ for setting in offload_settings: for offload in setting: result_udp.set_parameter(offload[0], offload[1])
+ if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.add_tag(product_name) if nperf_mode == "multi": result_udp.add_tag("multithreaded") @@ -249,6 +257,9 @@ for setting in offload_settings: for offload in setting: result_udp.set_parameter(offload[0], offload[1])
+ if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.add_tag(product_name) if nperf_mode == "multi": result_udp.add_tag("multithreaded") diff --git a/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py b/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py index 37f703f..fd421ef 100644 --- a/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py +++ b/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py @@ -48,6 +48,7 @@ nperf_mode = ctl.get_alias("nperf_mode") nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel")) nperf_debug = ctl.get_alias("nperf_debug") nperf_max_dev = ctl.get_alias("nperf_max_dev") +nperf_udp_size = ctl.get_alias("nperf_udp_size") pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([h1, g1, g2, h2, g3, g4], pr_user_comment) @@ -188,6 +189,10 @@ if nperf_mode == "multi": netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel}) netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None: + netperf_cli_udp.update_options({"udp_size" : nperf_udp_size}) + netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size}) + ping_mod_bad = ctl.get_module("IcmpPing", options={ "addr" : g4_guestnic.get_ip(0), @@ -304,6 +309,10 @@ for setting in offload_settings: r'host\d+..*tap\d*.devname']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.add_tag(product_name) if nperf_mode == "multi": result_udp.add_tag("multithreaded") @@ -368,6 +377,10 @@ for setting in offload_settings: r'host\d+..*tap\d*.devname']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.add_tag(product_name) if nperf_mode == "multi": result_udp.add_tag("multithreaded") diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py index b4b5c6c..62fc800 100644 --- a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py +++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py @@ -44,6 +44,7 @@ nperf_mode = ctl.get_alias("nperf_mode") nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel")) nperf_debug = ctl.get_alias("nperf_debug") nperf_max_dev = ctl.get_alias("nperf_max_dev") +nperf_udp_size = ctl.get_alias("nperf_udp_size") pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([h1, g1, h2], pr_user_comment) @@ -171,6 +172,10 @@ if nperf_mode == "multi": netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel}) netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None: + netperf_cli_udp.update_options({"udp_size" : nperf_udp_size}) + netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size}) + # configure mtu h1.get_interface("nic").set_mtu(mtu) h1.get_interface("tap").set_mtu(mtu) @@ -240,6 +245,10 @@ for setting in offload_settings: r'host\d+..*tap\d*.devname']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.add_tag(product_name) if nperf_mode == "multi": result_udp.add_tag("multithreaded") @@ -301,6 +310,10 @@ for setting in offload_settings: r'host\d+..*tap\d*.devname']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.add_tag(product_name) if nperf_mode == "multi": result_udp.add_tag("multithreaded") diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py index 1ebdd91..ec43321 100644 --- a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py +++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py @@ -44,6 +44,7 @@ nperf_mode = ctl.get_alias("nperf_mode") nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel")) nperf_debug = ctl.get_alias("nperf_debug") nperf_max_dev = ctl.get_alias("nperf_max_dev") +nperf_udp_size = ctl.get_alias("nperf_udp_size") pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([h1, g1, h2], pr_user_comment) @@ -170,6 +171,10 @@ if nperf_mode == "multi": netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel}) netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None: + netperf_cli_udp.update_options({"udp_size" : nperf_udp_size}) + netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size}) + # configure mtu h1.get_interface("nic").set_mtu(mtu) h1.get_interface("tap").set_mtu(mtu) @@ -240,6 +245,10 @@ for setting in offload_settings: r'host\d+..*tap\d*.devname']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.add_tag(product_name) if nperf_mode == "multi": result_udp.add_tag("multithreaded") @@ -301,6 +310,10 @@ for setting in offload_settings: r'host\d+..*tap\d*.devname']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.add_tag(product_name) if nperf_mode == "multi": result_udp.add_tag("multithreaded") diff --git a/recipes/regression_tests/phase2/3_vlans_over_team.py b/recipes/regression_tests/phase2/3_vlans_over_team.py index e9cae83..a157c45 100644 --- a/recipes/regression_tests/phase2/3_vlans_over_team.py +++ b/recipes/regression_tests/phase2/3_vlans_over_team.py @@ -43,6 +43,7 @@ nperf_mode = ctl.get_alias("nperf_mode") nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel")) nperf_debug = ctl.get_alias("nperf_debug") nperf_max_dev = ctl.get_alias("nperf_max_dev") +nperf_udp_size = ctl.get_alias("nperf_udp_size") pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment) @@ -167,6 +168,10 @@ if nperf_mode == "multi": netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel}) netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None: + netperf_cli_udp.update_options({"udp_size" : nperf_udp_size}) + netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size}) + for setting in offload_settings: #apply offload setting dev_features = "" @@ -244,6 +249,10 @@ for setting in offload_settings: 'redhat_release']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.set_parameter('netperf_server_on_vlan', vlans[0]) result_udp.set_parameter('netperf_client_on_vlan', vlans[0]) result_udp.add_tag(product_name) @@ -300,6 +309,10 @@ for setting in offload_settings: 'redhat_release']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.set_parameter('netperf_server_on_vlan', vlans[0]) result_udp.set_parameter('netperf_client_on_vlan', vlans[0]) result_udp.add_tag(product_name) diff --git a/recipes/regression_tests/phase2/team_test.py b/recipes/regression_tests/phase2/team_test.py index 1aa0d0f..fc6ed08 100644 --- a/recipes/regression_tests/phase2/team_test.py +++ b/recipes/regression_tests/phase2/team_test.py @@ -42,6 +42,7 @@ nperf_mode = ctl.get_alias("nperf_mode") nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel")) nperf_debug = ctl.get_alias("nperf_debug") nperf_max_dev = ctl.get_alias("nperf_max_dev") +nperf_udp_size = ctl.get_alias("nperf_udp_size") pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment) @@ -175,6 +176,10 @@ if nperf_mode == "multi": netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel}) netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None: + netperf_cli_udp.update_options({"udp_size" : nperf_udp_size}) + netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size}) + ctl.wait(15)
for setting in offload_settings: @@ -223,6 +228,10 @@ for setting in offload_settings: 'redhat_release']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.set_parameter('netperf_server', "testmachine1") result_udp.set_parameter('netperf_client', "testmachine2") result_udp.add_tag(product_name) @@ -280,6 +289,10 @@ for setting in offload_settings: 'redhat_release']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.set_parameter('netperf_server', "testmachine1") result_udp.set_parameter('netperf_client', "testmachine2") result_udp.add_tag(product_name) @@ -382,6 +395,10 @@ for setting in offload_settings: 'redhat_release']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.set_parameter('netperf_server', "testmachine2") result_udp.set_parameter('netperf_client', "testmachine1") result_udp.add_tag(product_name) @@ -439,6 +456,10 @@ for setting in offload_settings: 'redhat_release']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.set_parameter('netperf_server', "testmachine2") result_udp.set_parameter('netperf_client', "testmachine1") result_udp.add_tag(product_name) diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py index d795714..affc6f5 100644 --- a/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py +++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py @@ -48,6 +48,7 @@ nperf_mode = ctl.get_alias("nperf_mode") nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel")) nperf_debug = ctl.get_alias("nperf_debug") nperf_max_dev = ctl.get_alias("nperf_max_dev") +nperf_udp_size = ctl.get_alias("nperf_udp_size") pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([h1, g1, g2, h2, g3, g4], pr_user_comment) @@ -187,6 +188,10 @@ if nperf_mode == "multi": netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel}) netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None: + netperf_cli_udp.update_options({"udp_size" : nperf_udp_size}) + netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size}) + ping_mod_bad = ctl.get_module("IcmpPing", options={ "addr" : g4_guestnic.get_ip(0), @@ -283,6 +288,10 @@ for setting in offload_settings: r'host\d+..*tap\d*.devname']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.add_tag(product_name) if nperf_mode == "multi": result_udp.add_tag("multithreaded") @@ -347,6 +356,10 @@ for setting in offload_settings: r'host\d+..*tap\d*.devname']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.add_tag(product_name) if nperf_mode == "multi": result_udp.add_tag("multithreaded") diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py index c96e2a6..5b7ca94 100644 --- a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py +++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py @@ -44,6 +44,7 @@ nperf_mode = ctl.get_alias("nperf_mode") nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel")) nperf_debug = ctl.get_alias("nperf_debug") nperf_max_dev = ctl.get_alias("nperf_max_dev") +nperf_udp_size = ctl.get_alias("nperf_udp_size") pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([h1, g1, h2], pr_user_comment) @@ -168,6 +169,10 @@ if nperf_mode == "multi": netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel}) netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None: + netperf_cli_udp.update_options({"udp_size" : nperf_udp_size}) + netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size}) + ctl.wait(15)
for setting in offload_settings: @@ -228,6 +233,10 @@ for setting in offload_settings: r'host\d+..*tap\d*.devname']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.add_tag(product_name) if nperf_mode == "multi": result_udp.add_tag("multithreaded") @@ -289,6 +298,10 @@ for setting in offload_settings: r'host\d+..*tap\d*.devname']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.add_tag(product_name) if nperf_mode == "multi": result_udp.add_tag("multithreaded") diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py index eaa1cab..6acb1b1 100644 --- a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py +++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py @@ -44,6 +44,7 @@ nperf_mode = ctl.get_alias("nperf_mode") nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel")) nperf_debug = ctl.get_alias("nperf_debug") nperf_max_dev = ctl.get_alias("nperf_max_dev") +nperf_udp_size = ctl.get_alias("nperf_udp_size") pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([h1, g1, h2], pr_user_comment) @@ -167,6 +168,10 @@ if nperf_mode == "multi": netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel}) netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None: + netperf_cli_udp.update_options({"udp_size" : nperf_udp_size}) + netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size}) + ctl.wait(15)
for setting in offload_settings: @@ -227,6 +232,10 @@ for setting in offload_settings: r'host\d+..*tap\d*.devname']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.add_tag(product_name) if nperf_mode == "multi": result_udp.add_tag("multithreaded") @@ -289,6 +298,10 @@ for setting in offload_settings: r'host\d+..*tap\d*.devname']) for offload in setting: result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + result_udp.add_tag(product_name) if nperf_mode == "multi": result_udp.add_tag("multithreaded")
gro offload does not have any impact on udp testing. In that case, there were tested two equal combination of offloads for udp. This commit makes skip this one unnecessary combination. It has also impact on testing time of tests.
Signed-off-by: Kamil Jerabek kjerabek@redhat.com --- recipes/regression_tests/phase1/3_vlans.py | 114 +++++------ .../regression_tests/phase1/3_vlans_over_bond.py | 114 +++++------ recipes/regression_tests/phase1/bonding_test.py | 106 +++++----- recipes/regression_tests/phase1/simple_netperf.py | 102 +++++----- .../phase1/virtual_bridge_2_vlans_over_bond.py | 4 +- .../phase1/virtual_bridge_vlan_in_guest.py | 4 +- .../phase1/virtual_bridge_vlan_in_host.py | 4 +- .../regression_tests/phase2/3_vlans_over_team.py | 110 ++++++----- recipes/regression_tests/phase2/team_test.py | 220 +++++++++++---------- ...l_ovs_bridge_2_vlans_over_active_backup_bond.py | 118 +++++------ .../phase2/virtual_ovs_bridge_vlan_in_guest.py | 118 +++++------ .../phase2/virtual_ovs_bridge_vlan_in_host.py | 118 +++++------ 12 files changed, 576 insertions(+), 556 deletions(-)
diff --git a/recipes/regression_tests/phase1/3_vlans.py b/recipes/regression_tests/phase1/3_vlans.py index a508697..1b68889 100644 --- a/recipes/regression_tests/phase1/3_vlans.py +++ b/recipes/regression_tests/phase1/3_vlans.py @@ -242,34 +242,35 @@ for setting in offload_settings: result_tcp.set_comment(pr_comment) perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp - result_udp = perf_api.new_result("udp_ipv4_id", - "udp_ipv4_result", - hash_ignore=[ - 'kernel_release', - 'redhat_release']) - for offload in setting: - result_udp.set_parameter(offload[0], offload[1]) - - if nperf_udp_size is not None: - result_udp.set_parameter("nperf_udp_size", nperf_udp_size) - - result_udp.set_parameter('netperf_server_on_vlan', vlans[0]) - result_udp.set_parameter('netperf_client_on_vlan', vlans[0]) - result_udp.add_tag(product_name) - if nperf_mode == "multi": - result_udp.add_tag("multithreaded") - result_udp.set_parameter('num_parallel', nperf_num_parallel) - - baseline = perf_api.get_baseline_of_result(result_udp) - netperf_baseline_template(netperf_cli_udp, baseline) - - udp_res_data = m2.run(netperf_cli_udp, - timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) - - netperf_result_template(result_udp, udp_res_data) - result_udp.set_comment(pr_comment) - perf_api.save_result(result_udp) + if ("gro", "off") not in setting: + # prepare PerfRepo result for udp + result_udp = perf_api.new_result("udp_ipv4_id", + "udp_ipv4_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release']) + for offload in setting: + result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + + result_udp.set_parameter('netperf_server_on_vlan', vlans[0]) + result_udp.set_parameter('netperf_client_on_vlan', vlans[0]) + result_udp.add_tag(product_name) + if nperf_mode == "multi": + result_udp.add_tag("multithreaded") + result_udp.set_parameter('num_parallel', nperf_num_parallel) + + baseline = perf_api.get_baseline_of_result(result_udp) + netperf_baseline_template(netperf_cli_udp, baseline) + + udp_res_data = m2.run(netperf_cli_udp, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_udp, udp_res_data) + result_udp.set_comment(pr_comment) + perf_api.save_result(result_udp)
srv_proc.intr()
@@ -302,34 +303,35 @@ for setting in offload_settings: result_tcp.set_comment(pr_comment) perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp ipv6 - result_udp = perf_api.new_result("udp_ipv6_id", - "udp_ipv6_result", - hash_ignore=[ - 'kernel_release', - 'redhat_release']) - for offload in setting: - result_udp.set_parameter(offload[0], offload[1]) - - if nperf_udp_size is not None: - result_udp.set_parameter("nperf_udp_size", nperf_udp_size) - - result_udp.set_parameter('netperf_server_on_vlan', vlans[0]) - result_udp.set_parameter('netperf_client_on_vlan', vlans[0]) - result_udp.set_tag(product_name) - if nperf_mode == "multi": - result_udp.add_tag("multithreaded") - result_udp.set_parameter('num_parallel', nperf_num_parallel) - - baseline = perf_api.get_baseline_of_result(result_udp) - netperf_baseline_template(netperf_cli_udp6, baseline) - - udp_res_data = m2.run(netperf_cli_udp6, - timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) - - netperf_result_template(result_udp, udp_res_data) - result_udp.set_comment(pr_comment) - perf_api.save_result(result_udp) + if ("gro", "off") not in setting: + # prepare PerfRepo result for udp ipv6 + result_udp = perf_api.new_result("udp_ipv6_id", + "udp_ipv6_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release']) + for offload in setting: + result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + + result_udp.set_parameter('netperf_server_on_vlan', vlans[0]) + result_udp.set_parameter('netperf_client_on_vlan', vlans[0]) + result_udp.set_tag(product_name) + if nperf_mode == "multi": + result_udp.add_tag("multithreaded") + result_udp.set_parameter('num_parallel', nperf_num_parallel) + + baseline = perf_api.get_baseline_of_result(result_udp) + netperf_baseline_template(netperf_cli_udp6, baseline) + + udp_res_data = m2.run(netperf_cli_udp6, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_udp, udp_res_data) + result_udp.set_comment(pr_comment) + perf_api.save_result(result_udp)
srv_proc.intr()
diff --git a/recipes/regression_tests/phase1/3_vlans_over_bond.py b/recipes/regression_tests/phase1/3_vlans_over_bond.py index 6c036f2..0d5afad 100644 --- a/recipes/regression_tests/phase1/3_vlans_over_bond.py +++ b/recipes/regression_tests/phase1/3_vlans_over_bond.py @@ -241,34 +241,35 @@ for setting in offload_settings: result_tcp.set_comment(pr_comment) perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp - result_udp = perf_api.new_result("udp_ipv4_id", - "udp_ipv4_result", - hash_ignore=[ - 'kernel_release', - 'redhat_release']) - for offload in setting: - result_udp.set_parameter(offload[0], offload[1]) - - if nperf_udp_size is not None: - result_udp.set_parameter("nperf_udp_size", nperf_udp_size) - - result_udp.set_parameter('netperf_server_on_vlan', vlans[0]) - result_udp.set_parameter('netperf_client_on_vlan', vlans[0]) - result_udp.add_tag(product_name) - if nperf_mode == "multi": - result_udp.add_tag("multithreaded") - result_udp.set_parameter('num_parallel', nperf_num_parallel) - - baseline = perf_api.get_baseline_of_result(result_udp) - netperf_baseline_template(netperf_cli_udp, baseline) - - udp_res_data = m2.run(netperf_cli_udp, - timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) - - netperf_result_template(result_udp, udp_res_data) - result_udp.set_comment(pr_comment) - perf_api.save_result(result_udp) + if ("gro", "off") not in setting: + # prepare PerfRepo result for udp + result_udp = perf_api.new_result("udp_ipv4_id", + "udp_ipv4_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release']) + for offload in setting: + result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + + result_udp.set_parameter('netperf_server_on_vlan', vlans[0]) + result_udp.set_parameter('netperf_client_on_vlan', vlans[0]) + result_udp.add_tag(product_name) + if nperf_mode == "multi": + result_udp.add_tag("multithreaded") + result_udp.set_parameter('num_parallel', nperf_num_parallel) + + baseline = perf_api.get_baseline_of_result(result_udp) + netperf_baseline_template(netperf_cli_udp, baseline) + + udp_res_data = m2.run(netperf_cli_udp, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_udp, udp_res_data) + result_udp.set_comment(pr_comment) + perf_api.save_result(result_udp)
srv_proc.intr()
@@ -301,34 +302,35 @@ for setting in offload_settings: result_tcp.set_comment(pr_comment) perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp ipv6 - result_udp = perf_api.new_result("udp_ipv6_id", - "udp_ipv6_result", - hash_ignore=[ - 'kernel_release', - 'redhat_release']) - for offload in setting: - result_udp.set_parameter(offload[0], offload[1]) - - if nperf_udp_size is not None: - result_udp.set_parameter("nperf_udp_size", nperf_udp_size) - - result_udp.set_parameter('netperf_server_on_vlan', vlans[0]) - result_udp.set_parameter('netperf_client_on_vlan', vlans[0]) - result_udp.add_tag(product_name) - if nperf_mode == "multi": - result_udp.add_tag("multithreaded") - result_udp.set_parameter('num_parallel', nperf_num_parallel) - - baseline = perf_api.get_baseline_of_result(result_udp) - netperf_baseline_template(netperf_cli_udp6, baseline) - - udp_res_data = m2.run(netperf_cli_udp6, - timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) - - netperf_result_template(result_udp, udp_res_data) - result_udp.set_comment(pr_comment) - perf_api.save_result(result_udp) + if ("gro", "off") not in setting: + # prepare PerfRepo result for udp ipv6 + result_udp = perf_api.new_result("udp_ipv6_id", + "udp_ipv6_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release']) + for offload in setting: + result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + + result_udp.set_parameter('netperf_server_on_vlan', vlans[0]) + result_udp.set_parameter('netperf_client_on_vlan', vlans[0]) + result_udp.add_tag(product_name) + if nperf_mode == "multi": + result_udp.add_tag("multithreaded") + result_udp.set_parameter('num_parallel', nperf_num_parallel) + + baseline = perf_api.get_baseline_of_result(result_udp) + netperf_baseline_template(netperf_cli_udp6, baseline) + + udp_res_data = m2.run(netperf_cli_udp6, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_udp, udp_res_data) + result_udp.set_comment(pr_comment) + perf_api.save_result(result_udp)
srv_proc.intr()
diff --git a/recipes/regression_tests/phase1/bonding_test.py b/recipes/regression_tests/phase1/bonding_test.py index 4bbf573..44c60a0 100644 --- a/recipes/regression_tests/phase1/bonding_test.py +++ b/recipes/regression_tests/phase1/bonding_test.py @@ -219,32 +219,33 @@ for setting in offload_settings: result_tcp.set_comment(pr_comment) perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp - result_udp = perf_api.new_result("udp_ipv4_id", - "udp_ipv4_result", - hash_ignore=[ - 'kernel_release', - 'redhat_release']) - for offload in setting: - result_udp.set_parameter(offload[0], offload[1]) - - if nperf_udp_size is not None: - result_udp.set_parameter("nperf_udp_size", nperf_udp_size) - - result_udp.add_tag(product_name) - if nperf_mode == "multi": - result_udp.add_tag("multithreaded") - result_udp.set_parameter('num_parallel', nperf_num_parallel) - - baseline = perf_api.get_baseline_of_result(result_udp) - netperf_baseline_template(netperf_cli_udp, baseline) - - udp_res_data = m2.run(netperf_cli_udp, - timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) - - netperf_result_template(result_udp, udp_res_data) - result_udp.set_comment(pr_comment) - perf_api.save_result(result_udp) + if ("gro", "off") not in setting: + # prepare PerfRepo result for udp + result_udp = perf_api.new_result("udp_ipv4_id", + "udp_ipv4_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release']) + for offload in setting: + result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + + result_udp.add_tag(product_name) + if nperf_mode == "multi": + result_udp.add_tag("multithreaded") + result_udp.set_parameter('num_parallel', nperf_num_parallel) + + baseline = perf_api.get_baseline_of_result(result_udp) + netperf_baseline_template(netperf_cli_udp, baseline) + + udp_res_data = m2.run(netperf_cli_udp, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_udp, udp_res_data) + result_udp.set_comment(pr_comment) + perf_api.save_result(result_udp)
server_proc.intr()
@@ -277,32 +278,33 @@ for setting in offload_settings: result_tcp.set_comment(pr_comment) perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp ipv6 - result_udp = perf_api.new_result("udp_ipv6_id", - "udp_ipv6_result", - hash_ignore=[ - 'kernel_release', - 'redhat_release']) - for offload in setting: - result_udp.set_parameter(offload[0], offload[1]) - - if nperf_udp_size is not None: - result_udp.set_parameter("nperf_udp_size", nperf_udp_size) - - result_udp.add_tag(product_name) - if nperf_mode == "multi": - result_udp.add_tag("multithreaded") - result_udp.set_parameter('num_parallel', nperf_num_parallel) - - baseline = perf_api.get_baseline_of_result(result_udp) - netperf_baseline_template(netperf_cli_udp6, baseline) - - udp_res_data = m2.run(netperf_cli_udp6, - timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) - - netperf_result_template(result_udp, udp_res_data) - result_udp.set_comment(pr_comment) - perf_api.save_result(result_udp) + if ("gro", "off") not in setting: + # prepare PerfRepo result for udp ipv6 + result_udp = perf_api.new_result("udp_ipv6_id", + "udp_ipv6_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release']) + for offload in setting: + result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + + result_udp.add_tag(product_name) + if nperf_mode == "multi": + result_udp.add_tag("multithreaded") + result_udp.set_parameter('num_parallel', nperf_num_parallel) + + baseline = perf_api.get_baseline_of_result(result_udp) + netperf_baseline_template(netperf_cli_udp6, baseline) + + udp_res_data = m2.run(netperf_cli_udp6, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_udp, udp_res_data) + result_udp.set_comment(pr_comment) + perf_api.save_result(result_udp)
server_proc.intr()
diff --git a/recipes/regression_tests/phase1/simple_netperf.py b/recipes/regression_tests/phase1/simple_netperf.py index 8d9fb1d..60a731e 100644 --- a/recipes/regression_tests/phase1/simple_netperf.py +++ b/recipes/regression_tests/phase1/simple_netperf.py @@ -194,31 +194,32 @@ for setting in offload_settings: result_tcp.set_comment(pr_comment) perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp - result_udp = perf_api.new_result("udp_ipv4_id", - "udp_ipv4_result", - hash_ignore=[ - 'kernel_release', - 'redhat_release']) - for offload in setting: - result_udp.set_parameter(offload[0], offload[1]) - - if nperf_udp_size is not None: - result_udp.set_parameter("nperf_udp_size", nperf_udp_size) - - result_udp.add_tag(product_name) - if nperf_mode == "multi": - result_udp.add_tag("multithreaded") - result_udp.set_parameter("num_parallel", nperf_num_parallel) - - baseline = perf_api.get_baseline_of_result(result_udp) - netperf_baseline_template(netperf_cli_udp, baseline) - udp_res_data = m2.run(netperf_cli_udp, - timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) - - netperf_result_template(result_udp, udp_res_data) - result_udp.set_comment(pr_comment) - perf_api.save_result(result_udp) + if ("gro", "off") not in setting: + # prepare PerfRepo result for udp + result_udp = perf_api.new_result("udp_ipv4_id", + "udp_ipv4_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release']) + for offload in setting: + result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + + result_udp.add_tag(product_name) + if nperf_mode == "multi": + result_udp.add_tag("multithreaded") + result_udp.set_parameter("num_parallel", nperf_num_parallel) + + baseline = perf_api.get_baseline_of_result(result_udp) + netperf_baseline_template(netperf_cli_udp, baseline) + udp_res_data = m2.run(netperf_cli_udp, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_udp, udp_res_data) + result_udp.set_comment(pr_comment) + perf_api.save_result(result_udp) srv_proc.intr()
if ipv in [ 'ipv6', 'both' ]: @@ -248,31 +249,32 @@ for setting in offload_settings: result_tcp.set_comment(pr_comment) perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp ipv6 - result_udp = perf_api.new_result("udp_ipv6_id", - "udp_ipv6_result", - hash_ignore=[ - 'kernel_release', - 'redhat_release']) - for offload in setting: - result_udp.set_parameter(offload[0], offload[1]) - - if nperf_udp_size is not None: - result_udp.set_parameter("nperf_udp_size", nperf_udp_size) - - result_udp.add_tag(product_name) - if nperf_mode == "multi": - result_udp.add_tag("multithreaded") - result_udp.set_parameter("num_parallel", nperf_num_parallel) - - baseline = perf_api.get_baseline_of_result(result_udp) - netperf_baseline_template(netperf_cli_udp6, baseline) - udp_res_data = m2.run(netperf_cli_udp6, - timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) - - netperf_result_template(result_udp, udp_res_data) - result_udp.set_comment(pr_comment) - perf_api.save_result(result_udp) + if ("gro", "off") not in setting: + # prepare PerfRepo result for udp ipv6 + result_udp = perf_api.new_result("udp_ipv6_id", + "udp_ipv6_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release']) + for offload in setting: + result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + + result_udp.add_tag(product_name) + if nperf_mode == "multi": + result_udp.add_tag("multithreaded") + result_udp.set_parameter("num_parallel", nperf_num_parallel) + + baseline = perf_api.get_baseline_of_result(result_udp) + netperf_baseline_template(netperf_cli_udp6, baseline) + udp_res_data = m2.run(netperf_cli_udp6, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_udp, udp_res_data) + result_udp.set_comment(pr_comment) + perf_api.save_result(result_udp) srv_proc.intr()
# reset offload states diff --git a/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py b/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py index fd421ef..a2d5399 100644 --- a/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py +++ b/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py @@ -297,7 +297,7 @@ for setting in offload_settings: result_tcp.set_comment(pr_comment) perf_api.save_result(result_tcp)
- if enable_udp_perf is not None: + if enable_udp_perf is not None and ("gro", "off") not in setting: # prepare PerfRepo result for udp result_udp = perf_api.new_result("udp_ipv4_id", "udp_ipv4_result", @@ -366,7 +366,7 @@ for setting in offload_settings: perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6 - if enable_udp_perf is not None: + if enable_udp_perf is not None and ("gro", "off") not in setting: result_udp = perf_api.new_result("udp_ipv6_id", "udp_ipv6_result", hash_ignore=['kernel_release', diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py index 62fc800..66620f7 100644 --- a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py +++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py @@ -234,7 +234,7 @@ for setting in offload_settings: perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp - if enable_udp_perf is not None: + if enable_udp_perf is not None and ("gro", "off") not in setting: result_udp = perf_api.new_result("udp_ipv4_id", "udp_ipv4_result", hash_ignore=['kernel_release', @@ -299,7 +299,7 @@ for setting in offload_settings: perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6 - if enable_udp_perf is not None: + if enable_udp_perf is not None and ("gro", "off") not in setting: result_udp = perf_api.new_result("udp_ipv6_id", "udp_ipv6_result", hash_ignore=['kernel_release', diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py index ec43321..2160e0b 100644 --- a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py +++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py @@ -234,7 +234,7 @@ for setting in offload_settings: perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp - if enable_udp_perf is not None: + if enable_udp_perf is not None and ("gro", "off") not in setting: result_udp = perf_api.new_result("udp_ipv4_id", "udp_ipv4_result", hash_ignore=['kernel_release', @@ -299,7 +299,7 @@ for setting in offload_settings: perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6 - if enable_udp_perf: + if enable_udp_perf and ("gro", "off") not in setting: result_udp = perf_api.new_result("udp_ipv6_id", "udp_ipv6_result", hash_ignore=['kernel_release', diff --git a/recipes/regression_tests/phase2/3_vlans_over_team.py b/recipes/regression_tests/phase2/3_vlans_over_team.py index a157c45..5936347 100644 --- a/recipes/regression_tests/phase2/3_vlans_over_team.py +++ b/recipes/regression_tests/phase2/3_vlans_over_team.py @@ -242,33 +242,34 @@ for setting in offload_settings: perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp - result_udp = perf_api.new_result("udp_ipv4_id", - "udp_ipv4_result", - hash_ignore=[ - 'kernel_release', - 'redhat_release']) - for offload in setting: - result_udp.set_parameter(offload[0], offload[1]) - - if nperf_udp_size is not None: - result_udp.set_parameter("nperf_udp_size", nperf_udp_size) - - result_udp.set_parameter('netperf_server_on_vlan', vlans[0]) - result_udp.set_parameter('netperf_client_on_vlan', vlans[0]) - result_udp.add_tag(product_name) - if nperf_mode == "multi": - result_udp.add_tag("multithreaded") - result_udp.set_parameter('num_parallel', nperf_num_parallel) - - baseline = perf_api.get_baseline_of_result(result_udp) - netperf_baseline_template(netperf_cli_udp, baseline) - - udp_res_data = m2.run(netperf_cli_udp, - timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) - - netperf_result_template(result_udp, udp_res_data) - result_udp.set_comment(pr_comment) - perf_api.save_result(result_udp) + if ("gro", "off") not in setting: + result_udp = perf_api.new_result("udp_ipv4_id", + "udp_ipv4_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release']) + for offload in setting: + result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + + result_udp.set_parameter('netperf_server_on_vlan', vlans[0]) + result_udp.set_parameter('netperf_client_on_vlan', vlans[0]) + result_udp.add_tag(product_name) + if nperf_mode == "multi": + result_udp.add_tag("multithreaded") + result_udp.set_parameter('num_parallel', nperf_num_parallel) + + baseline = perf_api.get_baseline_of_result(result_udp) + netperf_baseline_template(netperf_cli_udp, baseline) + + udp_res_data = m2.run(netperf_cli_udp, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_udp, udp_res_data) + result_udp.set_comment(pr_comment) + perf_api.save_result(result_udp)
srv_proc.intr()
@@ -302,33 +303,34 @@ for setting in offload_settings: perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6 - result_udp = perf_api.new_result("udp_ipv6_id", - "udp_ipv6_result", - hash_ignore=[ - 'kernel_release', - 'redhat_release']) - for offload in setting: - result_udp.set_parameter(offload[0], offload[1]) - - if nperf_udp_size is not None: - result_udp.set_parameter("nperf_udp_size", nperf_udp_size) - - result_udp.set_parameter('netperf_server_on_vlan', vlans[0]) - result_udp.set_parameter('netperf_client_on_vlan', vlans[0]) - result_udp.add_tag(product_name) - if nperf_mode == "multi": - result_udp.add_tag("multithreaded") - result_udp.set_parameter('num_parallel', nperf_num_parallel) - - baseline = perf_api.get_baseline_of_result(result_udp) - netperf_baseline_template(netperf_cli_udp6, baseline) - - udp_res_data = m2.run(netperf_cli_udp6, - timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) - - netperf_result_template(result_udp, udp_res_data) - result_udp.set_comment(pr_comment) - perf_api.save_result(result_udp) + if ("gro", "off") not in setting: + result_udp = perf_api.new_result("udp_ipv6_id", + "udp_ipv6_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release']) + for offload in setting: + result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + + result_udp.set_parameter('netperf_server_on_vlan', vlans[0]) + result_udp.set_parameter('netperf_client_on_vlan', vlans[0]) + result_udp.add_tag(product_name) + if nperf_mode == "multi": + result_udp.add_tag("multithreaded") + result_udp.set_parameter('num_parallel', nperf_num_parallel) + + baseline = perf_api.get_baseline_of_result(result_udp) + netperf_baseline_template(netperf_cli_udp6, baseline) + + udp_res_data = m2.run(netperf_cli_udp6, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_udp, udp_res_data) + result_udp.set_comment(pr_comment) + perf_api.save_result(result_udp)
srv_proc.intr()
diff --git a/recipes/regression_tests/phase2/team_test.py b/recipes/regression_tests/phase2/team_test.py index fc6ed08..014194d 100644 --- a/recipes/regression_tests/phase2/team_test.py +++ b/recipes/regression_tests/phase2/team_test.py @@ -221,33 +221,34 @@ for setting in offload_settings: perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp - result_udp = perf_api.new_result("udp_ipv4_id", - "udp_ipv4_result", - hash_ignore=[ - 'kernel_release', - 'redhat_release']) - for offload in setting: - result_udp.set_parameter(offload[0], offload[1]) - - if nperf_udp_size is not None: - result_udp.set_parameter("nperf_udp_size", nperf_udp_size) - - result_udp.set_parameter('netperf_server', "testmachine1") - result_udp.set_parameter('netperf_client', "testmachine2") - result_udp.add_tag(product_name) - if nperf_mode == "multi": - result_udp.add_tag("multithreaded") - result_udp.set_parameter('num_parallel', nperf_num_parallel) - - baseline = perf_api.get_baseline_of_result(result_udp) - netperf_baseline_template(netperf_cli_udp, baseline) - - udp_res_data = m2.run(netperf_cli_udp, - timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) - - netperf_result_template(result_udp, udp_res_data) - result_udp.set_comment(pr_comment) - perf_api.save_result(result_udp) + if ("gro", "off") not in setting: + result_udp = perf_api.new_result("udp_ipv4_id", + "udp_ipv4_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release']) + for offload in setting: + result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + + result_udp.set_parameter('netperf_server', "testmachine1") + result_udp.set_parameter('netperf_client', "testmachine2") + result_udp.add_tag(product_name) + if nperf_mode == "multi": + result_udp.add_tag("multithreaded") + result_udp.set_parameter('num_parallel', nperf_num_parallel) + + baseline = perf_api.get_baseline_of_result(result_udp) + netperf_baseline_template(netperf_cli_udp, baseline) + + udp_res_data = m2.run(netperf_cli_udp, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_udp, udp_res_data) + result_udp.set_comment(pr_comment) + perf_api.save_result(result_udp)
server_proc.intr() if ipv in [ 'ipv6', 'both' ]: @@ -282,33 +283,34 @@ for setting in offload_settings: perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp - result_udp = perf_api.new_result("udp_ipv6_id", - "udp_ipv6_result", - hash_ignore=[ - 'kernel_release', - 'redhat_release']) - for offload in setting: - result_udp.set_parameter(offload[0], offload[1]) - - if nperf_udp_size is not None: - result_udp.set_parameter("nperf_udp_size", nperf_udp_size) - - result_udp.set_parameter('netperf_server', "testmachine1") - result_udp.set_parameter('netperf_client', "testmachine2") - result_udp.add_tag(product_name) - if nperf_mode == "multi": - result_udp.add_tag("multithreaded") - result_udp.set_parameter('num_parallel', nperf_num_parallel) - - baseline = perf_api.get_baseline_of_result(result_udp) - netperf_baseline_template(netperf_cli_udp6, baseline) - - udp_res_data = m2.run(netperf_cli_udp6, - timeout = (netperf_duration + nperf_reserve)*5) - - netperf_result_template(result_udp, udp_res_data) - result_udp.set_comment(pr_comment) - perf_api.save_result(result_udp) + if ("gro", "off") not in setting: + result_udp = perf_api.new_result("udp_ipv6_id", + "udp_ipv6_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release']) + for offload in setting: + result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + + result_udp.set_parameter('netperf_server', "testmachine1") + result_udp.set_parameter('netperf_client', "testmachine2") + result_udp.add_tag(product_name) + if nperf_mode == "multi": + result_udp.add_tag("multithreaded") + result_udp.set_parameter('num_parallel', nperf_num_parallel) + + baseline = perf_api.get_baseline_of_result(result_udp) + netperf_baseline_template(netperf_cli_udp6, baseline) + + udp_res_data = m2.run(netperf_cli_udp6, + timeout = (netperf_duration + nperf_reserve)*5) + + netperf_result_template(result_udp, udp_res_data) + result_udp.set_comment(pr_comment) + perf_api.save_result(result_udp)
server_proc.intr()
@@ -388,33 +390,34 @@ for setting in offload_settings: perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp - result_udp = perf_api.new_result("udp_ipv4_id", - "udp_ipv4_result", - hash_ignore=[ - 'kernel_release', - 'redhat_release']) - for offload in setting: - result_udp.set_parameter(offload[0], offload[1]) - - if nperf_udp_size is not None: - result_udp.set_parameter("nperf_udp_size", nperf_udp_size) - - result_udp.set_parameter('netperf_server', "testmachine2") - result_udp.set_parameter('netperf_client', "testmachine1") - result_udp.add_tag(product_name) - if nperf_mode == "multi": - result_udp.add_tag("multithreaded") - result_udp.set_parameter('num_parallel', nperf_num_parallel) - - baseline = perf_api.get_baseline_of_result(result_udp) - netperf_baseline_template(netperf_cli_udp, baseline) - - udp_res_data = m1.run(netperf_cli_udp, - timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) - - netperf_result_template(result_udp, udp_res_data) - result_udp.set_comment(pr_comment) - perf_api.save_result(result_udp) + if ("gro", "off") not in setting: + result_udp = perf_api.new_result("udp_ipv4_id", + "udp_ipv4_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release']) + for offload in setting: + result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + + result_udp.set_parameter('netperf_server', "testmachine2") + result_udp.set_parameter('netperf_client', "testmachine1") + result_udp.add_tag(product_name) + if nperf_mode == "multi": + result_udp.add_tag("multithreaded") + result_udp.set_parameter('num_parallel', nperf_num_parallel) + + baseline = perf_api.get_baseline_of_result(result_udp) + netperf_baseline_template(netperf_cli_udp, baseline) + + udp_res_data = m1.run(netperf_cli_udp, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_udp, udp_res_data) + result_udp.set_comment(pr_comment) + perf_api.save_result(result_udp)
server_proc.intr() if ipv in [ 'ipv6', 'both' ]: @@ -449,33 +452,34 @@ for setting in offload_settings: perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp - result_udp = perf_api.new_result("udp_ipv6_id", - "udp_ipv6_result", - hash_ignore=[ - 'kernel_release', - 'redhat_release']) - for offload in setting: - result_udp.set_parameter(offload[0], offload[1]) - - if nperf_udp_size is not None: - result_udp.set_parameter("nperf_udp_size", nperf_udp_size) - - result_udp.set_parameter('netperf_server', "testmachine2") - result_udp.set_parameter('netperf_client', "testmachine1") - result_udp.add_tag(product_name) - if nperf_mode == "multi": - result_udp.add_tag("multithreaded") - result_udp.set_parameter('num_parallel', nperf_num_parallel) - - baseline = perf_api.get_baseline_of_result(result_udp) - netperf_baseline_template(netperf_cli_udp6, baseline) - - udp_res_data = m1.run(netperf_cli_udp6, - timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) - - netperf_result_template(result_udp, udp_res_data) - result_udp.set_comment(pr_comment) - perf_api.save_result(result_udp) + if ("gro", "off") not in setting: + result_udp = perf_api.new_result("udp_ipv6_id", + "udp_ipv6_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release']) + for offload in setting: + result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + + result_udp.set_parameter('netperf_server', "testmachine2") + result_udp.set_parameter('netperf_client', "testmachine1") + result_udp.add_tag(product_name) + if nperf_mode == "multi": + result_udp.add_tag("multithreaded") + result_udp.set_parameter('num_parallel', nperf_num_parallel) + + baseline = perf_api.get_baseline_of_result(result_udp) + netperf_baseline_template(netperf_cli_udp6, baseline) + + udp_res_data = m1.run(netperf_cli_udp6, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_udp, udp_res_data) + result_udp.set_comment(pr_comment) + perf_api.save_result(result_udp)
server_proc.intr()
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py index affc6f5..897b173 100644 --- a/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py +++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py @@ -277,35 +277,36 @@ for setting in offload_settings: perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp - result_udp = perf_api.new_result("udp_ipv4_id", - "udp_ipv4_result", - hash_ignore=[ - 'kernel_release', - 'redhat_release', - r'guest\d+.hostname', - r'guest\d+..*hwaddr', - r'host\d+..*tap\d*.hwaddr', - r'host\d+..*tap\d*.devname']) - for offload in setting: - result_udp.set_parameter(offload[0], offload[1]) - - if nperf_udp_size is not None: - result_udp.set_parameter("nperf_udp_size", nperf_udp_size) - - result_udp.add_tag(product_name) - if nperf_mode == "multi": - result_udp.add_tag("multithreaded") - result_udp.set_parameter('num_parallel', nperf_num_parallel) - - baseline = perf_api.get_baseline_of_result(result_udp) - netperf_baseline_template(netperf_cli_udp, baseline) - - udp_res_data = g3.run(netperf_cli_udp, - timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) - - netperf_result_template(result_udp, udp_res_data) - result_udp.set_comment(pr_comment) - perf_api.save_result(result_udp) + if ("gro", "off") not in setting: + result_udp = perf_api.new_result("udp_ipv4_id", + "udp_ipv4_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release', + r'guest\d+.hostname', + r'guest\d+..*hwaddr', + r'host\d+..*tap\d*.hwaddr', + r'host\d+..*tap\d*.devname']) + for offload in setting: + result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + + result_udp.add_tag(product_name) + if nperf_mode == "multi": + result_udp.add_tag("multithreaded") + result_udp.set_parameter('num_parallel', nperf_num_parallel) + + baseline = perf_api.get_baseline_of_result(result_udp) + netperf_baseline_template(netperf_cli_udp, baseline) + + udp_res_data = g3.run(netperf_cli_udp, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_udp, udp_res_data) + result_udp.set_comment(pr_comment) + perf_api.save_result(result_udp)
server_proc.intr() if ipv in [ 'ipv6', 'both' ]: @@ -345,35 +346,36 @@ for setting in offload_settings: perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6 - result_udp = perf_api.new_result("udp_ipv6_id", - "udp_ipv6_result", - hash_ignore=[ - 'kernel_release', - 'redhat_release', - r'guest\d+.hostname', - r'guest\d+..*hwaddr', - r'host\d+..*tap\d*.hwaddr', - r'host\d+..*tap\d*.devname']) - for offload in setting: - result_udp.set_parameter(offload[0], offload[1]) - - if nperf_udp_size is not None: - result_udp.set_parameter("nperf_udp_size", nperf_udp_size) - - result_udp.add_tag(product_name) - if nperf_mode == "multi": - result_udp.add_tag("multithreaded") - result_udp.set_parameter('num_parallel', nperf_num_parallel) - - baseline = perf_api.get_baseline_of_result(result_udp) - netperf_baseline_template(netperf_cli_udp6, baseline) - - udp_res_data = g3.run(netperf_cli_udp6, - timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) - - netperf_result_template(result_udp, udp_res_data) - result_udp.set_comment(pr_comment) - perf_api.save_result(result_udp) + if ("gro", "off") not in setting: + result_udp = perf_api.new_result("udp_ipv6_id", + "udp_ipv6_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release', + r'guest\d+.hostname', + r'guest\d+..*hwaddr', + r'host\d+..*tap\d*.hwaddr', + r'host\d+..*tap\d*.devname']) + for offload in setting: + result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + + result_udp.add_tag(product_name) + if nperf_mode == "multi": + result_udp.add_tag("multithreaded") + result_udp.set_parameter('num_parallel', nperf_num_parallel) + + baseline = perf_api.get_baseline_of_result(result_udp) + netperf_baseline_template(netperf_cli_udp6, baseline) + + udp_res_data = g3.run(netperf_cli_udp6, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_udp, udp_res_data) + result_udp.set_comment(pr_comment) + perf_api.save_result(result_udp)
server_proc.intr()
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py index 5b7ca94..f084f37 100644 --- a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py +++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py @@ -222,35 +222,36 @@ for setting in offload_settings: perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp - result_udp = perf_api.new_result("udp_ipv4_id", - "udp_ipv4_result", - hash_ignore=[ - 'kernel_release', - 'redhat_release', - r'guest\d+.hostname', - r'guest\d+..*hwaddr', - r'host\d+..*tap\d*.hwaddr', - r'host\d+..*tap\d*.devname']) - for offload in setting: - result_udp.set_parameter(offload[0], offload[1]) - - if nperf_udp_size is not None: - result_udp.set_parameter("nperf_udp_size", nperf_udp_size) - - result_udp.add_tag(product_name) - if nperf_mode == "multi": - result_udp.add_tag("multithreaded") - result_udp.set_parameter('num_parallel', nperf_num_parallel) - - baseline = perf_api.get_baseline_of_result(result_udp) - netperf_baseline_template(netperf_cli_udp, baseline) - - udp_res_data = h2.run(netperf_cli_udp, - timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) - - netperf_result_template(result_udp, udp_res_data) - result_udp.set_comment(pr_comment) - perf_api.save_result(result_udp) + if ("gro", "off") not in setting: + result_udp = perf_api.new_result("udp_ipv4_id", + "udp_ipv4_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release', + r'guest\d+.hostname', + r'guest\d+..*hwaddr', + r'host\d+..*tap\d*.hwaddr', + r'host\d+..*tap\d*.devname']) + for offload in setting: + result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + + result_udp.add_tag(product_name) + if nperf_mode == "multi": + result_udp.add_tag("multithreaded") + result_udp.set_parameter('num_parallel', nperf_num_parallel) + + baseline = perf_api.get_baseline_of_result(result_udp) + netperf_baseline_template(netperf_cli_udp, baseline) + + udp_res_data = h2.run(netperf_cli_udp, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_udp, udp_res_data) + result_udp.set_comment(pr_comment) + perf_api.save_result(result_udp)
server_proc.intr() if ipv in [ 'ipv6', 'both' ]: @@ -287,35 +288,36 @@ for setting in offload_settings: perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6 - result_udp = perf_api.new_result("udp_ipv6_id", - "udp_ipv6_result", - hash_ignore=[ - 'kernel_release', - 'redhat_release', - r'guest\d+.hostname', - r'guest\d+..*hwaddr', - r'host\d+..*tap\d*.hwaddr', - r'host\d+..*tap\d*.devname']) - for offload in setting: - result_udp.set_parameter(offload[0], offload[1]) - - if nperf_udp_size is not None: - result_udp.set_parameter("nperf_udp_size", nperf_udp_size) - - result_udp.add_tag(product_name) - if nperf_mode == "multi": - result_udp.add_tag("multithreaded") - result_udp.set_parameter('num_parallel', nperf_num_parallel) - - baseline = perf_api.get_baseline_of_result(result_udp) - netperf_baseline_template(netperf_cli_udp6, baseline) - - udp_res_data = h2.run(netperf_cli_udp6, - timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) - - netperf_result_template(result_udp, udp_res_data) - result_udp.set_comment(pr_comment) - perf_api.save_result(result_udp) + if ("gro", "off") not in setting: + result_udp = perf_api.new_result("udp_ipv6_id", + "udp_ipv6_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release', + r'guest\d+.hostname', + r'guest\d+..*hwaddr', + r'host\d+..*tap\d*.hwaddr', + r'host\d+..*tap\d*.devname']) + for offload in setting: + result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + + result_udp.add_tag(product_name) + if nperf_mode == "multi": + result_udp.add_tag("multithreaded") + result_udp.set_parameter('num_parallel', nperf_num_parallel) + + baseline = perf_api.get_baseline_of_result(result_udp) + netperf_baseline_template(netperf_cli_udp6, baseline) + + udp_res_data = h2.run(netperf_cli_udp6, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_udp, udp_res_data) + result_udp.set_comment(pr_comment) + perf_api.save_result(result_udp)
server_proc.intr()
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py index 6acb1b1..31e57af 100644 --- a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py +++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py @@ -221,35 +221,36 @@ for setting in offload_settings: perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp - result_udp = perf_api.new_result("udp_ipv4_id", - "udp_ipv4_result", - hash_ignore=[ - 'kernel_release', - 'redhat_release', - r'guest\d+.hostname', - r'guest\d+..*hwaddr', - r'host\d+..*tap\d*.hwaddr', - r'host\d+..*tap\d*.devname']) - for offload in setting: - result_udp.set_parameter(offload[0], offload[1]) - - if nperf_udp_size is not None: - result_udp.set_parameter("nperf_udp_size", nperf_udp_size) - - result_udp.add_tag(product_name) - if nperf_mode == "multi": - result_udp.add_tag("multithreaded") - result_udp.set_parameter('num_parallel', nperf_num_parallel) - - baseline = perf_api.get_baseline_of_result(result_udp) - netperf_baseline_template(netperf_cli_udp, baseline) - - udp_res_data = h2.run(netperf_cli_udp, - timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) - - netperf_result_template(result_udp, udp_res_data) - result_udp.set_comment(pr_comment) - perf_api.save_result(result_udp) + if ("gro", "off") not in setting: + result_udp = perf_api.new_result("udp_ipv4_id", + "udp_ipv4_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release', + r'guest\d+.hostname', + r'guest\d+..*hwaddr', + r'host\d+..*tap\d*.hwaddr', + r'host\d+..*tap\d*.devname']) + for offload in setting: + result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + + result_udp.add_tag(product_name) + if nperf_mode == "multi": + result_udp.add_tag("multithreaded") + result_udp.set_parameter('num_parallel', nperf_num_parallel) + + baseline = perf_api.get_baseline_of_result(result_udp) + netperf_baseline_template(netperf_cli_udp, baseline) + + udp_res_data = h2.run(netperf_cli_udp, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_udp, udp_res_data) + result_udp.set_comment(pr_comment) + perf_api.save_result(result_udp)
server_proc.intr()
@@ -287,35 +288,36 @@ for setting in offload_settings: perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6 - result_udp = perf_api.new_result("udp_ipv6_id", - "udp_ipv6_result", - hash_ignore=[ - 'kernel_release', - 'redhat_release', - r'guest\d+.hostname', - r'guest\d+..*hwaddr', - r'host\d+..*tap\d*.hwaddr', - r'host\d+..*tap\d*.devname']) - for offload in setting: - result_udp.set_parameter(offload[0], offload[1]) - - if nperf_udp_size is not None: - result_udp.set_parameter("nperf_udp_size", nperf_udp_size) - - result_udp.add_tag(product_name) - if nperf_mode == "multi": - result_udp.add_tag("multithreaded") - result_udp.set_parameter('num_parallel', nperf_num_parallel) - - baseline = perf_api.get_baseline_of_result(result_udp) - netperf_baseline_template(netperf_cli_udp6, baseline) - - udp_res_data = h2.run(netperf_cli_udp6, - timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) - - netperf_result_template(result_udp, udp_res_data) - result_udp.set_comment(pr_comment) - perf_api.save_result(result_udp) + if ("gro", "off") not in setting: + result_udp = perf_api.new_result("udp_ipv6_id", + "udp_ipv6_result", + hash_ignore=[ + 'kernel_release', + 'redhat_release', + r'guest\d+.hostname', + r'guest\d+..*hwaddr', + r'host\d+..*tap\d*.hwaddr', + r'host\d+..*tap\d*.devname']) + for offload in setting: + result_udp.set_parameter(offload[0], offload[1]) + + if nperf_udp_size is not None: + result_udp.set_parameter("nperf_udp_size", nperf_udp_size) + + result_udp.add_tag(product_name) + if nperf_mode == "multi": + result_udp.add_tag("multithreaded") + result_udp.set_parameter('num_parallel', nperf_num_parallel) + + baseline = perf_api.get_baseline_of_result(result_udp) + netperf_baseline_template(netperf_cli_udp6, baseline) + + udp_res_data = h2.run(netperf_cli_udp6, + timeout = (netperf_duration + nperf_reserve)*nperf_max_runs) + + netperf_result_template(result_udp, udp_res_data) + result_udp.set_comment(pr_comment) + perf_api.save_result(result_udp)
server_proc.intr()
Tue, Aug 16, 2016 at 04:09:54PM CEST, kjerabek@redhat.com wrote:
- fixed simple_netperf.py broken after rebase
- udp_size change in simple_netperf.py moved to proper commit
having patchset cover letter subject as "changes of v5" is wrong. The subject of cover letter should remain the same. the changelog should be at the end of the cover letter.
Kamil Jerabek (3): Netperf: add option udp_size regression_tests: add udp_size to netperf tests regression_tests: skip unnecessary offload option for udp testing
recipes/regression_tests/phase1/3_vlans.py | 105 ++++++----- .../regression_tests/phase1/3_vlans_over_bond.py | 105 ++++++----- recipes/regression_tests/phase1/bonding_test.py | 97 ++++++---- recipes/regression_tests/phase1/simple_netperf.py | 101 +++++----- .../phase1/virtual_bridge_2_vlans_over_bond.py | 17 +- .../phase1/virtual_bridge_vlan_in_guest.py | 17 +- .../phase1/virtual_bridge_vlan_in_host.py | 17 +- .../regression_tests/phase2/3_vlans_over_team.py | 107 ++++++----- recipes/regression_tests/phase2/team_test.py | 209 ++++++++++++--------- ...l_ovs_bridge_2_vlans_over_active_backup_bond.py | 115 +++++++----- .../phase2/virtual_ovs_bridge_vlan_in_guest.py | 115 +++++++----- .../phase2/virtual_ovs_bridge_vlan_in_host.py | 115 +++++++----- test_modules/Netperf.py | 11 ++ 13 files changed, 662 insertions(+), 469 deletions(-)
-- 2.5.5 _______________________________________________ LNST-developers mailing list lnst-developers@lists.fedorahosted.org https://lists.fedorahosted.org/admin/lists/lnst-developers@lists.fedorahoste...
On Tue, Aug 16, 2016 at 04:09:54PM +0200, Kamil Jerabek wrote:
- fixed simple_netperf.py broken after rebase
- udp_size change in simple_netperf.py moved to proper commit
Kamil Jerabek (3): Netperf: add option udp_size regression_tests: add udp_size to netperf tests regression_tests: skip unnecessary offload option for udp testing
recipes/regression_tests/phase1/3_vlans.py | 105 ++++++----- .../regression_tests/phase1/3_vlans_over_bond.py | 105 ++++++----- recipes/regression_tests/phase1/bonding_test.py | 97 ++++++---- recipes/regression_tests/phase1/simple_netperf.py | 101 +++++----- .../phase1/virtual_bridge_2_vlans_over_bond.py | 17 +- .../phase1/virtual_bridge_vlan_in_guest.py | 17 +- .../phase1/virtual_bridge_vlan_in_host.py | 17 +- .../regression_tests/phase2/3_vlans_over_team.py | 107 ++++++----- recipes/regression_tests/phase2/team_test.py | 209 ++++++++++++--------- ...l_ovs_bridge_2_vlans_over_active_backup_bond.py | 115 +++++++----- .../phase2/virtual_ovs_bridge_vlan_in_guest.py | 115 +++++++----- .../phase2/virtual_ovs_bridge_vlan_in_host.py | 115 +++++++----- test_modules/Netperf.py | 11 ++ 13 files changed, 662 insertions(+), 469 deletions(-)
-- 2.5.5 _______________________________________________ LNST-developers mailing list lnst-developers@lists.fedorahosted.org https://lists.fedorahosted.org/admin/lists/lnst-developers@lists.fedorahoste...
thanks, pushed to master.
-Ondrej
lnst-developers@lists.fedorahosted.org