These tests are guest-to-guest version of existing virtual *vlan-in-host and
*vlan-in-guest tests of guest-to-baremetal topology.
Signed-off-by: Jan Tluka <jtluka(a)redhat.com>
---
.../virtual_bridge_vlan_in_guest_mirrored.README | 87 ++++
.../virtual_bridge_vlan_in_guest_mirrored.py | 466 ++++++++++++++++++++
.../virtual_bridge_vlan_in_guest_mirrored.xml | 110 +++++
.../virtual_bridge_vlan_in_host_mirrored.README | 87 ++++
.../phase1/virtual_bridge_vlan_in_host_mirrored.py | 464 ++++++++++++++++++++
.../virtual_bridge_vlan_in_host_mirrored.xml | 111 +++++
...irtual_ovs_bridge_vlan_in_guest_mirrored.README | 87 ++++
.../virtual_ovs_bridge_vlan_in_guest_mirrored.py | 469 +++++++++++++++++++++
.../virtual_ovs_bridge_vlan_in_guest_mirrored.xml | 104 +++++
...virtual_ovs_bridge_vlan_in_host_mirrored.README | 88 ++++
.../virtual_ovs_bridge_vlan_in_host_mirrored.py | 464 ++++++++++++++++++++
.../virtual_ovs_bridge_vlan_in_host_mirrored.xml | 98 +++++
12 files changed, 2635 insertions(+)
create mode 100644 recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.README
create mode 100644 recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.py
create mode 100644 recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.xml
create mode 100644 recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.README
create mode 100644 recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.py
create mode 100644 recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.xml
create mode 100644 recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.README
create mode 100644 recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.py
create mode 100644 recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.xml
create mode 100644 recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.README
create mode 100644 recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.py
create mode 100644 recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.xml
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.README b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.README
new file mode 100644
index 0000000..e3f9ba0
--- /dev/null
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.README
@@ -0,0 +1,87 @@
+Topology:
+
+ +----------+
+ | |
+ +-----------------+ switch +-----------------+
+ | | | |
+ | +----------+ |
+ | |
+ +-+-+ +-+-+
++------|nic|------+ +------|nic|------+
+| +-+-+ | | +-+-+ |
+| | | | | |
+| +----+ | | +----+ |
+| | | | | |
+| +-+-+ | | +-+-+ |
+| |br0| | | |br0| |
+| +-+-+ host1 | | host2 +-+-+ |
+| | | | | |
+| +-+-+ | | +-+-+ |
++-|tap|-----------+ +-----------|tap|-+
+ +-+-+ +-+-+
+ | |
+ |VLAN10 |VLAN10
+ | |
+ +-+-+ +-+-+
++-|nic|--+ +-|nic|--+
+| +---+ | | +---+ |
+| guest1 | | guest2 |
+| | | |
++--------+ +--------+
+
+Number of hosts: 4
+Host #1 description:
+ One ethernet device
+ One tap device
+ One bridge device, bridging ethernet and tap devices
+ Host for guest1 virtual machine
+Host #2 description:
+ One ethernet device
+ One tap device
+ One bridge device, bridging ethernet and tap devices
+ Host for guest2 virtual machine
+Guest #1 description:
+ One ethernet device with one VLAN subinterface
+Guest #2 description:
+ One ethernet device with one VLAN subinterface
+Test name:
+ virtual_bridge_vlan_in_guest_mirrored.py
+Test description:
+ Ping:
+ + count: 100
+ + interval: 0.1s
+ + between guest1's VLAN10 and guest2's VLAN10
+ Netperf:
+ + duration: 60s
+ + TCP_STREAM and UDP_STREAM
+ + between guest1's VLAN10 and guest2's VLAN10
+
+PerfRepo integration:
+ First, preparation in PerfRepo is required - you need to create Test objects
+ through the web interface that properly describe the individual Netperf
+ tests that this recipe runs. Don't forget to also add appropriate metrics.
+ For these Netperf tests it's always:
+ * throughput
+ * throughput_min
+ * throughput_max
+ * throughput_deviation
+
+ After that, to enable support for PerfRepo you need to create the file
+ virtual_bridge_vlan_in_guest_mirrored.mapping and define the following id mappings:
+ tcp_ipv4_id -> to store ipv4 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ tcp_ipv6_id -> to store ipv6 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv4_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv6_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+
+ To enable result comparison against baselines you need to create a Report in
+ PerfRepo that will store the baseline. Set up the Report to only contain results
+ with the same hash tag and then add a new mapping to the mapping file, with
+ this format:
+ <some_hash> = <report_id>
+
+ The hash value is automatically generated during test execution and added
+ to each result stored in PerfRepo. To get the Report id you need to open
+ that report in our browser and find if in the URL.
+
+ When running this recipe you should also define the 'product_name' alias
+ (e.g. RHEL7) in order to tag the result object in PerfRepo.
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.py
new file mode 100644
index 0000000..3d7dc1e
--- /dev/null
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.py
@@ -0,0 +1,466 @@
+from lnst.Controller.Task import ctl
+from lnst.Controller.PerfRepoUtils import netperf_baseline_template
+from lnst.Controller.PerfRepoUtils import netperf_result_template
+
+from lnst.RecipeCommon.IRQ import pin_dev_irqs
+from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment
+from lnst.RecipeCommon.Offloads import parse_offloads
+
+# ------
+# SETUP
+# ------
+
+mapping_file = ctl.get_alias("mapping_file")
+perf_api = ctl.connect_PerfRepo(mapping_file)
+
+product_name = ctl.get_alias("product_name")
+
+h1 = ctl.get_host("host1")
+g1 = ctl.get_host("guest1")
+
+h2 = ctl.get_host("host2")
+g2 = ctl.get_host("guest2")
+
+g1.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf"])
+g2.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf"])
+
+# ------
+# TESTS
+# ------
+
+ipv = ctl.get_alias("ipv")
+netperf_duration = int(ctl.get_alias("netperf_duration"))
+nperf_reserve = int(ctl.get_alias("nperf_reserve"))
+nperf_confidence = ctl.get_alias("nperf_confidence")
+nperf_max_runs = int(ctl.get_alias("nperf_max_runs"))
+nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
+nperf_mode = ctl.get_alias("nperf_mode")
+nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
+nperf_debug = ctl.get_alias("nperf_debug")
+nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_msg_size = ctl.get_alias("nperf_msg_size")
+pr_user_comment = ctl.get_alias("perfrepo_comment")
+offloads_alias = ctl.get_alias("offloads")
+nperf_protocols = ctl.get_alias("nperf_protocols")
+
+sctp_default_msg_size = "16K"
+
+if offloads_alias is not None:
+ offloads, offload_settings = parse_offloads(offloads_alias)
+else:
+ offloads = ["gro", "gso", "tso", "rx", "tx"]
+ offload_settings = [ [("gro", "on"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "on")],
+ [("gro", "off"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "on")],
+ [("gro", "on"), ("gso", "off"), ("tso", "off"), ("tx", "on"), ("rx", "on")],
+ [("gro", "on"), ("gso", "on"), ("tso", "off"), ("tx", "off"), ("rx", "on")],
+ [("gro", "on"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "off")]]
+
+pr_comment = generate_perfrepo_comment([h1, g1, h2, g2], pr_user_comment)
+
+mtu = ctl.get_alias("mtu")
+
+g1_vlan10 = g1.get_interface("vlan10")
+g2_vlan10 = g2.get_interface("vlan10")
+g1_guestnic = g1.get_interface("guestnic")
+g2_guestnic = g2.get_interface("guestnic")
+h1_nic = h1.get_interface("nic")
+h2_nic = h2.get_interface("nic")
+
+h1.run("service irqbalance stop")
+h2.run("service irqbalance stop")
+
+# this will pin devices irqs to cpu #0
+for m, d in [ (h1, h1_nic), (h2, h2_nic) ]:
+ pin_dev_irqs(m, d, 0)
+
+ping_mod = ctl.get_module("IcmpPing",
+ options={
+ "addr" : g2_vlan10.get_ip(0),
+ "count" : 100,
+ "iface" : g1_vlan10.get_devname(),
+ "interval" : 0.1
+ })
+
+ping_mod6 = ctl.get_module("Icmp6Ping",
+ options={
+ "addr" : g2_vlan10.get_ip(1),
+ "count" : 100,
+ "iface" : g1_vlan10.get_ip(1),
+ "interval" : 0.1
+ })
+
+netperf_srv = ctl.get_module("Netperf",
+ options={
+ "role" : "server",
+ "bind" : g1_vlan10.get_ip(0)
+ })
+
+netperf_srv6 = ctl.get_module("Netperf",
+ options={
+ "role" : "server",
+ "bind" : g1_vlan10.get_ip(1),
+ "netperf_opts" : " -6",
+ })
+
+p_opts = "-L %s" % (g2_vlan10.get_ip(0))
+p_opts6 = "-L %s -6" % (g2_vlan10.get_ip(1))
+
+netperf_cli_tcp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" : g1_vlan10.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_udp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" : g1_vlan10.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_tcp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_vlan10.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_udp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_vlan10.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_sctp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_vlan10.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "SCTP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "msg_size" : sctp_default_msg_size,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_sctp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_vlan10.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "SCTP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "msg_size" : sctp_default_msg_size,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+if nperf_mode == "multi":
+ netperf_cli_tcp.unset_option("confidence")
+ netperf_cli_udp.unset_option("confidence")
+ netperf_cli_sctp.unset_option("confidence")
+ netperf_cli_tcp6.unset_option("confidence")
+ netperf_cli_udp6.unset_option("confidence")
+ netperf_cli_sctp6.unset_option("confidence")
+
+ netperf_cli_tcp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_udp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_sctp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_sctp6.update_options({"num_parallel": nperf_num_parallel})
+
+ # we have to use multiqueue qdisc to get appropriate data
+ h1.run("tc qdisc replace dev %s root mq" % h1_nic.get_devname())
+ h2.run("tc qdisc replace dev %s root mq" % h2_nic.get_devname())
+
+if nperf_msg_size is not None:
+ netperf_cli_tcp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_udp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_sctp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_tcp6.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_udp6.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_sctp6.update_options({"msg_size" : nperf_msg_size})
+
+# configure mtu
+h1.get_interface("nic").set_mtu(mtu)
+h1.get_interface("tap").set_mtu(mtu)
+h1.get_interface("br").set_mtu(mtu)
+
+g1.get_interface("guestnic").set_mtu(mtu)
+g1.get_interface("vlan10").set_mtu(mtu)
+
+h2.get_interface("nic").set_mtu(mtu)
+h2.get_interface("tap").set_mtu(mtu)
+h2.get_interface("br").set_mtu(mtu)
+
+g2.get_interface("guestnic").set_mtu(mtu)
+g2.get_interface("vlan10").set_mtu(mtu)
+
+ctl.wait(15)
+
+for setting in offload_settings:
+ dev_features = ""
+ for offload in setting:
+ dev_features += " %s %s" % (offload[0], offload[1])
+ g1.run("ethtool -K %s %s" % (g1_guestnic.get_devname(), dev_features))
+ g2.run("ethtool -K %s %s" % (g2_guestnic.get_devname(), dev_features))
+ h1.run("ethtool -K %s %s" % (h1_nic.get_devname(), dev_features))
+ h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+
+ if ("rx", "off") in setting:
+ # when rx offload is turned off some of the cards might get reset
+ # and link goes down, so wait a few seconds until NIC is ready
+ ctl.wait(15)
+
+ if ipv in [ 'ipv4', 'both' ]:
+ g1.run(ping_mod)
+
+ server_proc = g1.run(netperf_srv, bg=True)
+ ctl.wait(2)
+
+ if nperf_protocols.find("tcp") > -1:
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=['kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp, baseline)
+
+ tcp_res_data = g2.run(netperf_cli_tcp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ if nperf_protocols.find("udp") > -1 and ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=['kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = g2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+ # for SCTP only gso offload on/off
+ if (nperf_protocols.find("sctp") > -1 and
+ (len([val for val in setting if val[1] == 'off']) == 0 or
+ ('gso', 'off') in setting)):
+ result_sctp = perf_api.new_result("sctp_ipv4_id",
+ "sctp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_sctp.set_parameter(offload[0], offload[1])
+
+ result_sctp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_sctp.add_tag("multithreaded")
+ result_sctp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_sctp)
+ netperf_baseline_template(netperf_cli_sctp, baseline)
+ sctp_res_data = g2.run(netperf_cli_sctp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_sctp, sctp_res_data)
+ result_sctp.set_comment(pr_comment)
+ perf_api.save_result(result_sctp)
+
+ server_proc.intr()
+
+ if ipv in [ 'ipv6', 'both' ]:
+ g1.run(ping_mod6)
+
+ server_proc = g1.run(netperf_srv6, bg=True)
+ ctl.wait(2)
+
+ if nperf_protocols.find("tcp") > -1:
+ # prepare PerfRepo result for tcp ipv6
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=['kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp6, baseline)
+
+ tcp_res_data = g2.run(netperf_cli_tcp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ if nperf_protocols.find("udp") > -1 and ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=['kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = g2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+ # for SCTP only gso offload on/off
+ if (nperf_protocols.find("sctp") > -1 and
+ (len([val for val in setting if val[1] == 'off']) == 0 or
+ ('gso', 'off') in setting)):
+ result_sctp = perf_api.new_result("sctp_ipv6_id",
+ "sctp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_sctp.set_parameter(offload[0], offload[1])
+
+ result_sctp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_sctp.add_tag("multithreaded")
+ result_sctp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_sctp)
+ netperf_baseline_template(netperf_cli_sctp, baseline)
+ sctp_res_data = g2.run(netperf_cli_sctp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_sctp, sctp_res_data)
+ result_sctp.set_comment(pr_comment)
+ perf_api.save_result(result_sctp)
+
+ server_proc.intr()
+
+#reset offload states
+dev_features = ""
+for offload in offloads:
+ dev_features += " %s %s" % (offload, "on")
+g1.run("ethtool -K %s %s" % (g1_guestnic.get_devname(), dev_features))
+g2.run("ethtool -K %s %s" % (g2_guestnic.get_devname(), dev_features))
+h1.run("ethtool -K %s %s" % (h1_nic.get_devname(), dev_features))
+h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+
+h1.run("service irqbalance start")
+h2.run("service irqbalance start")
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.xml b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.xml
new file mode 100644
index 0000000..9d91555
--- /dev/null
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest_mirrored.xml
@@ -0,0 +1,110 @@
+<lnstrecipe>
+ <define>
+ <alias name="ipv" value="both" />
+ <alias name="netperf_duration" value="60" />
+ <alias name="nperf_reserve" value="20" />
+ <alias name="nperf_confidence" value="99,5" />
+ <alias name="nperf_max_runs" value="5"/>
+ <alias name="nperf_mode" value="default"/>
+ <alias name="nperf_num_parallel" value="2"/>
+ <alias name="nperf_debug" value="0"/>
+ <alias name="nperf_max_dev" value="20%"/>
+ <alias name="nperf_protocols" value="tcp udp sctp"/>
+ <alias name="mtu" value="1500" />
+ <alias name="mapping_file" value="virtual_bridge_vlan_in_guest_mirrored.mapping" />
+ <alias name="vlan10_net" value="192.168.10"/>
+ <alias name="vlan10_tag" value="10"/>
+ <alias name="driver" value="ixgbe"/>
+ </define>
+ <network>
+ <host id="host1">
+ <params>
+ <param name="machine_type" value="baremetal"/>
+ </params>
+ <interfaces>
+ <eth id="nic" label="to_switch">
+ <params>
+ <param name="driver" value="{$driver}" />
+ </params>
+ </eth>
+ <eth id="tap" label="to_guest1" />
+ <bridge id="br">
+ <slaves>
+ <slave id="tap" />
+ <slave id="nic" />
+ </slaves>
+ <addresses>
+ <address>{$vlan10_net}.1/24</address>
+ </addresses>
+ </bridge>
+ </interfaces>
+ </host>
+ <host id="guest1">
+ <interfaces>
+ <eth id="guestnic" label="to_guest1">
+ <params>
+ <param name="driver" value="virtio" />
+ </params>
+ </eth>
+ <vlan id="vlan10">
+ <options>
+ <option name="vlan_tci" value="{$vlan10_tag}" />
+ </options>
+ <slaves>
+ <slave id="guestnic" />
+ </slaves>
+ <addresses>
+ <address>{$vlan10_net}.10/24</address>
+ <address>fc00:0:0:10::10/64</address>
+ </addresses>
+ </vlan>
+ </interfaces>
+ </host>
+
+ <host id="host2">
+ <params>
+ <param name="machine_type" value="baremetal"/>
+ </params>
+ <interfaces>
+ <eth id="nic" label="to_switch">
+ <params>
+ <param name="driver" value="{$driver}" />
+ </params>
+ </eth>
+ <eth id="tap" label="to_guest2" />
+ <bridge id="br">
+ <slaves>
+ <slave id="tap" />
+ <slave id="nic" />
+ </slaves>
+ <addresses>
+ <address>{$vlan10_net}.2/24</address>
+ </addresses>
+ </bridge>
+ </interfaces>
+ </host>
+ <host id="guest2">
+ <interfaces>
+ <eth id="guestnic" label="to_guest2">
+ <params>
+ <param name="driver" value="virtio" />
+ </params>
+ </eth>
+ <vlan id="vlan10">
+ <options>
+ <option name="vlan_tci" value="{$vlan10_tag}" />
+ </options>
+ <slaves>
+ <slave id="guestnic" />
+ </slaves>
+ <addresses>
+ <address>{$vlan10_net}.11/24</address>
+ <address>fc00:0:0:10::11/64</address>
+ </addresses>
+ </vlan>
+ </interfaces>
+ </host>
+ </network>
+
+ <task python="virtual_bridge_vlan_in_guest_mirrored.py" />
+</lnstrecipe>
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.README b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.README
new file mode 100644
index 0000000..14cdf71
--- /dev/null
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.README
@@ -0,0 +1,87 @@
+Topology:
+
+ +----------+
+ | |
+ +-----------------+ switch +-----------------+
+ | | | |
+ | +----------+ |
+ | |
+ +-+-+ +-+-+
++------|nic|------+ +------|nic|------+
+| +-+-+ | | +-+-+ |
+| VLAN10 | | | | VLAN10 |
+| +----+ | | +----+ |
+| | | | | |
+| +-+-+ | | +-+-+ |
+| |br0| | | |br0| |
+| +-+-+ host1 | | host2 +-+-+ |
+| | | | | |
+| +-+-+ | | +-+-+ |
++-|tap|-----------+ +-----------|tap|-+
+ +-+-+ +-+-+
+ | |
+ | |
+ | |
+ +-+-+ +-+-+
++-|nic|--+ +-|nic|--+
+| +---+ | | +---+ |
+| guest1 | | guest2 |
+| | | |
++--------+ +--------+
+
+Number of hosts: 4
+Host #1 description:
+ One ethernet device with one VLAN subinterface
+ One tap device
+ One bridge device, bridging VLAN and tap devices
+ Host for guest1 virtual machine
+Host #2 description:
+ One ethernet device with one VLAN subinterface
+ One tap device
+ One bridge device, bridging VLAN and tap devices
+ Host for guest2 virtual machine
+Guest #1 description:
+ One ethernet device
+Guest #2 description:
+ One ethernet device
+Test name:
+ virtual_bridge_vlan_in_host_mirrored.py
+Test description:
+ Ping:
+ + count: 100
+ + interval: 0.1s
+ + between guest1's NIC and guest2's NIC
+ Netperf:
+ + duration: 60s
+ + TCP_STREAM and UDP_STREAM
+ + between guest1's NIC and guest2's NIC
+
+PerfRepo integration:
+ First, preparation in PerfRepo is required - you need to create Test objects
+ through the web interface that properly describe the individual Netperf
+ tests that this recipe runs. Don't forget to also add appropriate metrics.
+ For these Netperf tests it's always:
+ * throughput
+ * throughput_min
+ * throughput_max
+ * throughput_deviation
+
+ After that, to enable support for PerfRepo you need to create the file
+ virtual_bridge_vlan_in_host_mirrored.mapping and define the following id mappings:
+ tcp_ipv4_id -> to store ipv4 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ tcp_ipv6_id -> to store ipv6 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv4_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv6_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+
+ To enable result comparison against baselines you need to create a Report in
+ PerfRepo that will store the baseline. Set up the Report to only contain results
+ with the same hash tag and then add a new mapping to the mapping file, with
+ this format:
+ <some_hash> = <report_id>
+
+ The hash value is automatically generated during test execution and added
+ to each result stored in PerfRepo. To get the Report id you need to open
+ that report in our browser and find if in the URL.
+
+ When running this recipe you should also define the 'product_name' alias
+ (e.g. RHEL7) in order to tag the result object in PerfRepo.
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.py
new file mode 100644
index 0000000..5a06fcd
--- /dev/null
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.py
@@ -0,0 +1,464 @@
+from lnst.Controller.Task import ctl
+from lnst.Controller.PerfRepoUtils import netperf_baseline_template
+from lnst.Controller.PerfRepoUtils import netperf_result_template
+
+from lnst.RecipeCommon.IRQ import pin_dev_irqs
+from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment
+from lnst.RecipeCommon.Offloads import parse_offloads
+
+# ------
+# SETUP
+# ------
+
+mapping_file = ctl.get_alias("mapping_file")
+perf_api = ctl.connect_PerfRepo(mapping_file)
+
+product_name = ctl.get_alias("product_name")
+
+h1 = ctl.get_host("host1")
+g1 = ctl.get_host("guest1")
+
+h2 = ctl.get_host("host2")
+g2 = ctl.get_host("guest2")
+
+g1.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf"])
+g2.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf"])
+
+# ------
+# TESTS
+# ------
+
+ipv = ctl.get_alias("ipv")
+netperf_duration = int(ctl.get_alias("netperf_duration"))
+nperf_reserve = int(ctl.get_alias("nperf_reserve"))
+nperf_confidence = ctl.get_alias("nperf_confidence")
+nperf_max_runs = int(ctl.get_alias("nperf_max_runs"))
+nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
+nperf_mode = ctl.get_alias("nperf_mode")
+nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
+nperf_debug = ctl.get_alias("nperf_debug")
+nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_msg_size = ctl.get_alias("nperf_msg_size")
+pr_user_comment = ctl.get_alias("perfrepo_comment")
+offloads_alias = ctl.get_alias("offloads")
+nperf_protocols = ctl.get_alias("nperf_protocols")
+
+sctp_default_msg_size = "16K"
+
+if offloads_alias is not None:
+ offloads, offload_settings = parse_offloads(offloads_alias)
+else:
+ offloads = ["gro", "gso", "tso", "rx", "tx"]
+ offload_settings = [ [("gro", "on"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "on")],
+ [("gro", "off"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "on")],
+ [("gro", "on"), ("gso", "off"), ("tso", "off"), ("tx", "on"), ("rx", "on")],
+ [("gro", "on"), ("gso", "on"), ("tso", "off"), ("tx", "off"), ("rx", "on")],
+ [("gro", "on"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "off")]]
+
+pr_comment = generate_perfrepo_comment([h1, g1, h2, g2], pr_user_comment)
+
+mtu = ctl.get_alias("mtu")
+
+g1_guestnic = g1.get_interface("guestnic")
+g2_guestnic = g2.get_interface("guestnic")
+h1_nic = h1.get_interface("nic")
+h2_nic = h2.get_interface("nic")
+
+h1.run("service irqbalance stop")
+h2.run("service irqbalance stop")
+
+# this will pin devices irqs to cpu #0
+for m, d in [ (h1, h1_nic), (h2, h2_nic) ]:
+ pin_dev_irqs(m, d, 0)
+
+ping_mod = ctl.get_module("IcmpPing",
+ options={
+ "addr" : g2_guestnic.get_ip(0),
+ "count" : 100,
+ "iface" : g1_guestnic.get_devname(),
+ "interval" : 0.1
+ })
+
+ping_mod6 = ctl.get_module("Icmp6Ping",
+ options={
+ "addr" : g2_guestnic.get_ip(1),
+ "count" : 100,
+ "iface" : g1_guestnic.get_ip(1),
+ "interval" : 0.1
+ })
+
+netperf_srv = ctl.get_module("Netperf",
+ options={
+ "role" : "server",
+ "bind" : g1_guestnic.get_ip(0)
+ })
+
+netperf_srv6 = ctl.get_module("Netperf",
+ options={
+ "role" : "server",
+ "bind" : g1_guestnic.get_ip(1),
+ "netperf_opts" : " -6",
+ })
+
+p_opts = "-L %s" % (g2_guestnic.get_ip(0))
+p_opts6 = "-L %s -6" % (g2_guestnic.get_ip(1))
+
+netperf_cli_tcp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" : g1_guestnic.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_udp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" : g1_guestnic.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_tcp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_guestnic.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_udp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_guestnic.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_sctp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_guestnic.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "SCTP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "msg_size" : sctp_default_msg_size,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_sctp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_guestnic.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "SCTP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "msg_size" : sctp_default_msg_size,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+if nperf_mode == "multi":
+ netperf_cli_tcp.unset_option("confidence")
+ netperf_cli_udp.unset_option("confidence")
+ netperf_cli_sctp.unset_option("confidence")
+ netperf_cli_tcp6.unset_option("confidence")
+ netperf_cli_udp6.unset_option("confidence")
+ netperf_cli_sctp6.unset_option("confidence")
+
+ netperf_cli_tcp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_udp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_sctp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_sctp6.update_options({"num_parallel": nperf_num_parallel})
+
+ # we have to use multiqueue qdisc to get appropriate data
+ h1.run("tc qdisc replace dev %s root mq" % h1_nic.get_devname())
+ h2.run("tc qdisc replace dev %s root mq" % h2_nic.get_devname())
+
+if nperf_msg_size is not None:
+ netperf_cli_tcp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_udp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_sctp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_tcp6.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_udp6.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_sctp6.update_options({"msg_size" : nperf_msg_size})
+
+# configure mtu
+h1.get_interface("nic").set_mtu(mtu)
+h1.get_interface("tap").set_mtu(mtu)
+h1.get_interface("vlan10").set_mtu(mtu)
+h1.get_interface("br").set_mtu(mtu)
+
+g1.get_interface("guestnic").set_mtu(mtu)
+
+h2.get_interface("nic").set_mtu(mtu)
+h2.get_interface("tap").set_mtu(mtu)
+h2.get_interface("vlan10").set_mtu(mtu)
+h2.get_interface("br").set_mtu(mtu)
+
+g2.get_interface("guestnic").set_mtu(mtu)
+
+ctl.wait(15)
+
+for setting in offload_settings:
+ dev_features = ""
+ for offload in setting:
+ dev_features += " %s %s" % (offload[0], offload[1])
+ g1.run("ethtool -K %s %s" % (g1_guestnic.get_devname(), dev_features))
+ h1.run("ethtool -K %s %s" % (h1_nic.get_devname(), dev_features))
+ g2.run("ethtool -K %s %s" % (g2_guestnic.get_devname(), dev_features))
+ h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+
+ if ("rx", "off") in setting:
+ # when rx offload is turned off some of the cards might get reset
+ # and link goes down, so wait a few seconds until NIC is ready
+ ctl.wait(15)
+
+ if ipv in [ 'ipv4', 'both' ]:
+ g1.run(ping_mod)
+
+ server_proc = g1.run(netperf_srv, bg=True)
+ ctl.wait(2)
+
+ if nperf_protocols.find("tcp") > -1:
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=['kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp, baseline)
+
+ tcp_res_data = g2.run(netperf_cli_tcp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ if nperf_protocols.find("udp") > -1 and ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=['kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = g2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+ # for SCTP only gso offload on/off
+ if (nperf_protocols.find("sctp") > -1 and
+ (len([val for val in setting if val[1] == 'off']) == 0 or
+ ('gso', 'off') in setting)):
+ result_sctp = perf_api.new_result("sctp_ipv4_id",
+ "sctp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_sctp.set_parameter(offload[0], offload[1])
+
+ result_sctp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_sctp.add_tag("multithreaded")
+ result_sctp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_sctp)
+ netperf_baseline_template(netperf_cli_sctp, baseline)
+ sctp_res_data = g2.run(netperf_cli_sctp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_sctp, sctp_res_data)
+ result_sctp.set_comment(pr_comment)
+ perf_api.save_result(result_sctp)
+
+ server_proc.intr()
+
+ if ipv in [ 'ipv6', 'both' ]:
+ g1.run(ping_mod6)
+
+ server_proc = g1.run(netperf_srv6, bg=True)
+ ctl.wait(2)
+
+ if nperf_protocols.find("tcp") > -1:
+ # prepare PerfRepo result for tcp ipv6
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=['kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp6, baseline)
+
+ tcp_res_data = g2.run(netperf_cli_tcp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ if nperf_protocols.find("udp") > -1 and ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=['kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = g2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+ # for SCTP only gso offload on/off
+ if (nperf_protocols.find("sctp") > -1 and
+ (len([val for val in setting if val[1] == 'off']) == 0 or
+ ('gso', 'off') in setting)):
+ result_sctp = perf_api.new_result("sctp_ipv6_id",
+ "sctp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_sctp.set_parameter(offload[0], offload[1])
+
+ result_sctp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_sctp.add_tag("multithreaded")
+ result_sctp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_sctp)
+ netperf_baseline_template(netperf_cli_sctp, baseline)
+ sctp_res_data = g2.run(netperf_cli_sctp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_sctp, sctp_res_data)
+ result_sctp.set_comment(pr_comment)
+ perf_api.save_result(result_sctp)
+
+ server_proc.intr()
+
+#reset offload states
+dev_features = ""
+for offload in offloads:
+ dev_features += " %s %s" % (offload, "on")
+g1.run("ethtool -K %s %s" % (g1_guestnic.get_devname(), dev_features))
+g2.run("ethtool -K %s %s" % (g2_guestnic.get_devname(), dev_features))
+h1.run("ethtool -K %s %s" % (h1_nic.get_devname(), dev_features))
+h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+
+h1.run("service irqbalance start")
+h2.run("service irqbalance start")
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.xml b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.xml
new file mode 100644
index 0000000..4973caf
--- /dev/null
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host_mirrored.xml
@@ -0,0 +1,111 @@
+<lnstrecipe>
+ <define>
+ <alias name="ipv" value="both" />
+ <alias name="netperf_duration" value="60" />
+ <alias name="nperf_reserve" value="20" />
+ <alias name="nperf_confidence" value="99,5" />
+ <alias name="nperf_max_runs" value="5"/>
+ <alias name="nperf_mode" value="default"/>
+ <alias name="nperf_num_parallel" value="2"/>
+ <alias name="nperf_debug" value="0"/>
+ <alias name="nperf_max_dev" value="20%"/>
+ <alias name="nperf_protocols" value="tcp udp sctp"/>
+ <alias name="mtu" value="1500" />
+ <alias name="mapping_file" value="virtual_bridge_vlan_in_host_mirrored.mapping" />
+ <alias name="vlan10_net" value="192.168.10"/>
+ <alias name="vlan10_tag" value="10"/>
+ <alias name="driver" value="ixgbe"/>
+ </define>
+ <network>
+ <host id="host1">
+ <params>
+ <param name="machine_type" value="baremetal"/>
+ </params>
+ <interfaces>
+ <eth id="nic" label="to_switch">
+ <params>
+ <param name="driver" value="{$driver}" />
+ </params>
+ </eth>
+ <eth id="tap" label="to_guest1" />
+ <vlan id="vlan10">
+ <options>
+ <option name="vlan_tci" value="{$vlan10_tag}" />
+ </options>
+ <slaves>
+ <slave id="nic" />
+ </slaves>
+ </vlan>
+ <bridge id="br">
+ <slaves>
+ <slave id="tap" />
+ <slave id="vlan10" />
+ </slaves>
+ <addresses>
+ <address>{$vlan10_net}.1/24</address>
+ </addresses>
+ </bridge>
+ </interfaces>
+ </host>
+ <host id="guest1">
+ <interfaces>
+ <eth id="guestnic" label="to_guest1">
+ <params>
+ <param name="driver" value="virtio" />
+ </params>
+ <addresses>
+ <address>{$vlan10_net}.10/24</address>
+ <address>fc00:0:0:10::10/64</address>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ <host id="host2">
+ <params>
+ <param name="machine_type" value="baremetal"/>
+ </params>
+ <interfaces>
+ <eth id="nic" label="to_switch">
+ <params>
+ <param name="driver" value="{$driver}" />
+ </params>
+ </eth>
+ <eth id="tap" label="to_guest2" />
+ <vlan id="vlan10">
+ <options>
+ <option name="vlan_tci" value="{$vlan10_tag}" />
+ </options>
+ <slaves>
+ <slave id="nic" />
+ </slaves>
+ </vlan>
+ <bridge id="br">
+ <slaves>
+ <slave id="tap" />
+ <slave id="vlan10" />
+ </slaves>
+ <addresses>
+ <address>{$vlan10_net}.2/24</address>
+ </addresses>
+ </bridge>
+ </interfaces>
+ </host>
+ <host id="guest2">
+ <interfaces>
+ <eth id="guestnic" label="to_guest2">
+ <params>
+ <param name="driver" value="virtio" />
+ </params>
+ <addresses>
+ <address>{$vlan10_net}.11/24</address>
+ <address>fc00:0:0:10::11/64</address>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ </network>
+
+ <task python="virtual_bridge_vlan_in_host_mirrored.py" />
+</lnstrecipe>
+
+
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.README b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.README
new file mode 100644
index 0000000..9db35be
--- /dev/null
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.README
@@ -0,0 +1,87 @@
+Topology:
+
+ +----------+
+ | |
+ +-----------------+ switch +-----------------+
+ | | | |
+ | +----------+ |
+ | |
+ +-+-+ +-+-+
++------|nic|------+ +------|nic|------+
+| +-+-+ | | +-+-+ |
+| | | | | |
+| +----+ | | +----+ |
+| | | | | |
+| +-+---------+ | | +---------+-+ |
+| | ovs_bridge| | | |ovs_bridge | |
+| +-+---------+ | | +---------+-+ |
+| | | | | |
+| +-+-+ host1 | | host2 +-+-+ |
++-|tap|-----------+ +-----------|tap|-+
+ +-+-+ +-+-+
+ | |
+ |VLAN10 |VLAN10
+ | |
+ +-+-+ +-+-+
++-|nic|--+ +-|nic|--+
+| +---+ | | +---+ |
+| guest1 | | guest2 |
+| | | |
++--------+ +--------+
+
+Number of hosts: 4
+Host #1 description:
+ One ethernet device
+ One tap device
+ One Open vSwitch bridge device, bridging ethernet and tap devices
+ Host for guest1 virtual machine
+Host #2 description:
+ One ethernet device
+ One tap device
+ One bridge device, bridging ethernet and tap devices
+ Host for guest2 virtual machine
+Guest #1 description:
+ One ethernet device with one VLAN subinterface
+Guest #2 description:
+ One ethernet device with one VLAN subinterface
+Test name:
+ virtual_ovs_bridge_vlan_in_guest_mirrored.py
+Test description:
+ Ping:
+ + count: 100
+ + interval: 0.1s
+ + between guest1's VLAN10 and guest2's VLAN10
+ Netperf:
+ + duration: 60s
+ + TCP_STREAM and UDP_STREAM
+ + between guest1's VLAN10 and guest2's VLAN10
+
+PerfRepo integration:
+ First, preparation in PerfRepo is required - you need to create Test objects
+ through the web interface that properly describe the individual Netperf
+ tests that this recipe runs. Don't forget to also add appropriate metrics.
+ For these Netperf tests it's always:
+ * throughput
+ * throughput_min
+ * throughput_max
+ * throughput_deviation
+
+ After that, to enable support for PerfRepo you need to create the file
+ virtual_ovs_bridge_vlan_in_guest_mirrored.mapping and define the following id mappings:
+ tcp_ipv4_id -> to store ipv4 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ tcp_ipv6_id -> to store ipv6 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv4_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv6_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+
+ To enable result comparison against baselines you need to create a Report in
+ PerfRepo that will store the baseline. Set up the Report to only contain results
+ with the same hash tag and then add a new mapping to the mapping file, with
+ this format:
+ <some_hash> = <report_id>
+
+ The hash value is automatically generated during test execution and added
+ to each result stored in PerfRepo. To get the Report id you need to open
+ that report in our browser and find if in the URL.
+
+ When running this recipe you should also define the 'product_name' alias
+ (e.g. RHEL7) in order to tag the result object in PerfRepo.
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.py
new file mode 100644
index 0000000..2203cdc
--- /dev/null
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.py
@@ -0,0 +1,469 @@
+from lnst.Controller.Task import ctl
+from lnst.Controller.PerfRepoUtils import netperf_baseline_template
+from lnst.Controller.PerfRepoUtils import netperf_result_template
+
+from lnst.RecipeCommon.IRQ import pin_dev_irqs
+from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment
+from lnst.RecipeCommon.Offloads import parse_offloads
+
+# ------
+# SETUP
+# ------
+
+mapping_file = ctl.get_alias("mapping_file")
+perf_api = ctl.connect_PerfRepo(mapping_file)
+
+product_name = ctl.get_alias("product_name")
+
+h1 = ctl.get_host("host1")
+g1 = ctl.get_host("guest1")
+
+h2 = ctl.get_host("host2")
+g2 = ctl.get_host("guest2")
+
+g1.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf"])
+g2.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf"])
+
+# ------
+# TESTS
+# ------
+
+mtu = ctl.get_alias("mtu")
+ipv = ctl.get_alias("ipv")
+netperf_duration = int(ctl.get_alias("netperf_duration"))
+nperf_reserve = int(ctl.get_alias("nperf_reserve"))
+nperf_confidence = ctl.get_alias("nperf_confidence")
+nperf_max_runs = int(ctl.get_alias("nperf_max_runs"))
+nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
+nperf_mode = ctl.get_alias("nperf_mode")
+nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
+nperf_debug = ctl.get_alias("nperf_debug")
+nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_msg_size = ctl.get_alias("nperf_msg_size")
+pr_user_comment = ctl.get_alias("perfrepo_comment")
+offloads_alias = ctl.get_alias("offloads")
+nperf_protocols = ctl.get_alias("nperf_protocols")
+
+sctp_default_msg_size = "16K"
+
+if offloads_alias is not None:
+ offloads, offload_settings = parse_offloads(offloads_alias)
+else:
+ offloads = ["gro", "gso", "tso", "rx", "tx"]
+ offload_settings = [ [("gro", "on"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "on")],
+ [("gro", "off"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "on")],
+ [("gro", "on"), ("gso", "off"), ("tso", "off"), ("tx", "on"), ("rx", "on")],
+ [("gro", "on"), ("gso", "on"), ("tso", "off"), ("tx", "off"), ("rx", "on")],
+ [("gro", "on"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "off")]]
+
+pr_comment = generate_perfrepo_comment([h1, g1, h2, g2], pr_user_comment)
+
+g1_vlan10 = g1.get_interface("vlan10")
+g2_vlan10 = g2.get_interface("vlan10")
+h1_nic = h1.get_interface("nic")
+h2_nic = h2.get_interface("nic")
+g1_guestnic = g1.get_interface("guestnic")
+g2_guestnic = g2.get_interface("guestnic")
+
+h1.run("service irqbalance stop")
+h2.run("service irqbalance stop")
+
+# this will pin devices irqs to cpu #0
+for m, d in [ (h1, h1_nic), (h2, h2_nic) ]:
+ pin_dev_irqs(m, d, 0)
+
+ping_mod = ctl.get_module("IcmpPing",
+ options={
+ "addr" : g2_vlan10.get_ip(0),
+ "count" : 100,
+ "iface" : g1_vlan10.get_devname(),
+ "interval" : 0.1
+ })
+
+ping_mod6 = ctl.get_module("Icmp6Ping",
+ options={
+ "addr" : g2_vlan10.get_ip(1),
+ "count" : 100,
+ "iface" : g1_vlan10.get_ip(1),
+ "interval" : 0.1
+ })
+
+netperf_srv = ctl.get_module("Netperf",
+ options={
+ "role" : "server",
+ "bind" : g1_vlan10.get_ip(0)
+ })
+
+netperf_srv6 = ctl.get_module("Netperf",
+ options={
+ "role" : "server",
+ "bind" : g1_vlan10.get_ip(1),
+ "netperf_opts" : " -6",
+ })
+
+p_opts = "-L %s" % (g2_vlan10.get_ip(0))
+p_opts6 = "-L %s -6" % (g2_vlan10.get_ip(1))
+
+netperf_cli_tcp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" : g1_vlan10.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_udp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" : g1_vlan10.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_tcp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_vlan10.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_udp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_vlan10.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_sctp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_vlan10.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "SCTP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "msg_size" : sctp_default_msg_size,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_sctp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_vlan10.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "SCTP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "msg_size" : sctp_default_msg_size,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+if nperf_mode == "multi":
+ netperf_cli_tcp.unset_option("confidence")
+ netperf_cli_udp.unset_option("confidence")
+ netperf_cli_sctp.unset_option("confidence")
+ netperf_cli_tcp6.unset_option("confidence")
+ netperf_cli_udp6.unset_option("confidence")
+ netperf_cli_sctp6.unset_option("confidence")
+
+ netperf_cli_tcp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_udp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_sctp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_sctp6.update_options({"num_parallel": nperf_num_parallel})
+
+ # we have to use multiqueue qdisc to get appropriate data
+ h1.run("tc qdisc replace dev %s root mq" % h1_nic.get_devname())
+ h2.run("tc qdisc replace dev %s root mq" % h2_nic.get_devname())
+
+if nperf_msg_size is not None:
+ netperf_cli_tcp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_udp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_sctp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_tcp6.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_udp6.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_sctp6.update_options({"msg_size" : nperf_msg_size})
+
+#set mtu
+h1.get_interface("nic").set_mtu(mtu)
+h1.get_interface("tap").set_mtu(mtu)
+h1.get_interface("br").set_mtu(mtu)
+
+h2.get_interface("nic").set_mtu(mtu)
+h2.get_interface("tap").set_mtu(mtu)
+h2.get_interface("br").set_mtu(mtu)
+
+g1.get_interface("guestnic").set_mtu(mtu)
+g1.get_interface("vlan10").set_mtu(mtu)
+
+g2.get_interface("guestnic").set_mtu(mtu)
+g2.get_interface("vlan10").set_mtu(mtu)
+
+ctl.wait(15)
+
+for setting in offload_settings:
+ dev_features = ""
+ for offload in setting:
+ dev_features += " %s %s" % (offload[0], offload[1])
+ h1.run("ethtool -K %s %s" % (h1_nic.get_devname(), dev_features))
+ g1.run("ethtool -K %s %s" % (g1_guestnic.get_devname(), dev_features))
+ h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+ g2.run("ethtool -K %s %s" % (g2_guestnic.get_devname(), dev_features))
+
+ if ("rx", "off") in setting:
+ # when rx offload is turned off some of the cards might get reset
+ # and link goes down, so wait a few seconds until NIC is ready
+ ctl.wait(15)
+
+ if ipv in [ 'ipv4', 'both' ]:
+ g1.run(ping_mod)
+
+ server_proc = g1.run(netperf_srv, bg=True)
+ ctl.wait(2)
+
+ if nperf_protocols.find("tcp") > -1:
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp, baseline)
+
+ tcp_res_data = g2.run(netperf_cli_tcp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ if nperf_protocols.find("udp") > -1 and ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = g2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+ # for SCTP only gso offload on/off
+ if (nperf_protocols.find("sctp") > -1 and
+ (len([val for val in setting if val[1] == 'off']) == 0 or
+ ('gso', 'off') in setting)):
+ result_sctp = perf_api.new_result("sctp_ipv4_id",
+ "sctp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_sctp.set_parameter(offload[0], offload[1])
+
+ result_sctp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_sctp.add_tag("multithreaded")
+ result_sctp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_sctp)
+ netperf_baseline_template(netperf_cli_sctp, baseline)
+ sctp_res_data = g2.run(netperf_cli_sctp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_sctp, sctp_res_data)
+ result_sctp.set_comment(pr_comment)
+ perf_api.save_result(result_sctp)
+
+ server_proc.intr()
+
+ if ipv in [ 'ipv6', 'both' ]:
+ g1.run(ping_mod6)
+
+ server_proc = g1.run(netperf_srv6, bg=True)
+ ctl.wait(2)
+
+ if nperf_protocols.find("tcp") > -1:
+ # prepare PerfRepo result for tcp ipv6
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp6, baseline)
+
+ tcp_res_data = g2.run(netperf_cli_tcp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ if nperf_protocols.find("udp") > -1 and ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = g2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+ # for SCTP only gso offload on/off
+ if (nperf_protocols.find("sctp") > -1 and
+ (len([val for val in setting if val[1] == 'off']) == 0 or
+ ('gso', 'off') in setting)):
+ result_sctp = perf_api.new_result("sctp_ipv6_id",
+ "sctp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_sctp.set_parameter(offload[0], offload[1])
+
+ result_sctp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_sctp.add_tag("multithreaded")
+ result_sctp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_sctp)
+ netperf_baseline_template(netperf_cli_sctp, baseline)
+ sctp_res_data = g2.run(netperf_cli_sctp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_sctp, sctp_res_data)
+ result_sctp.set_comment(pr_comment)
+ perf_api.save_result(result_sctp)
+
+ server_proc.intr()
+
+#reset offload states
+dev_features = ""
+for offload in offloads:
+ dev_features += " %s %s" % (offload, "on")
+h1.run("ethtool -K %s %s" % (h1_nic.get_devname(), dev_features))
+g1.run("ethtool -K %s %s" % (g1_guestnic.get_devname(), dev_features))
+h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+g2.run("ethtool -K %s %s" % (g2_guestnic.get_devname(), dev_features))
+
+h1.run("service irqbalance start")
+h2.run("service irqbalance start")
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.xml b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.xml
new file mode 100644
index 0000000..45e1808
--- /dev/null
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest_mirrored.xml
@@ -0,0 +1,104 @@
+<lnstrecipe>
+ <define>
+ <alias name="ipv" value="both" />
+ <alias name="netperf_duration" value="60" />
+ <alias name="nperf_reserve" value="20" />
+ <alias name="nperf_confidence" value="99,5" />
+ <alias name="nperf_max_runs" value="5" />
+ <alias name="nperf_mode" value="default"/>
+ <alias name="nperf_num_parallel" value="2"/>
+ <alias name="nperf_debug" value="0"/>
+ <alias name="nperf_max_dev" value="20%"/>
+ <alias name="nperf_protocols" value="tcp udp sctp"/>
+ <alias name="mtu" value="1500" />
+ <alias name="mapping_file" value="virtual_ovs_bridge_vlan_in_guest_mirrored.mapping" />
+ <alias name="vlan10_net" value="192.168.10"/>
+ <alias name="vlan10_tag" value="10"/>
+ <alias name="driver" value="ixgbe"/>
+ </define>
+ <network>
+ <host id="host1">
+ <params>
+ <param name="machine_type" value="baremetal"/>
+ </params>
+ <interfaces>
+ <eth id="nic" label="to_switch">
+ <params>
+ <param name="driver" value="{$driver}" />
+ </params>
+ </eth>
+ <eth id="tap" label="to_guest1" />
+ <ovs_bridge id="br">
+ <slaves>
+ <slave id="tap" />
+ <slave id="nic" />
+ </slaves>
+ </ovs_bridge>
+ </interfaces>
+ </host>
+ <host id="guest1">
+ <interfaces>
+ <eth id="guestnic" label="to_guest1">
+ <params>
+ <param name="driver" value="virtio" />
+ </params>
+ </eth>
+ <vlan id="vlan10">
+ <options>
+ <option name="vlan_tci" value="{$vlan10_tag}" />
+ </options>
+ <slaves>
+ <slave id="guestnic" />
+ </slaves>
+ <addresses>
+ <address>{$vlan10_net}.10/24</address>
+ <address>fc00:0:0:10::10/64</address>
+ </addresses>
+ </vlan>
+ </interfaces>
+ </host>
+
+ <host id="host2">
+ <params>
+ <param name="machine_type" value="baremetal"/>
+ </params>
+ <interfaces>
+ <eth id="nic" label="to_switch">
+ <params>
+ <param name="driver" value="{$driver}" />
+ </params>
+ </eth>
+ <eth id="tap" label="to_guest2" />
+ <ovs_bridge id="br">
+ <slaves>
+ <slave id="tap" />
+ <slave id="nic" />
+ </slaves>
+ </ovs_bridge>
+ </interfaces>
+ </host>
+ <host id="guest2">
+ <interfaces>
+ <eth id="guestnic" label="to_guest2">
+ <params>
+ <param name="driver" value="virtio" />
+ </params>
+ </eth>
+ <vlan id="vlan10">
+ <options>
+ <option name="vlan_tci" value="{$vlan10_tag}" />
+ </options>
+ <slaves>
+ <slave id="guestnic" />
+ </slaves>
+ <addresses>
+ <address>{$vlan10_net}.11/24</address>
+ <address>fc00:0:0:10::11/64</address>
+ </addresses>
+ </vlan>
+ </interfaces>
+ </host>
+ </network>
+
+ <task python="virtual_ovs_bridge_vlan_in_guest_mirrored.py" />
+</lnstrecipe>
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.README b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.README
new file mode 100644
index 0000000..bb6846d
--- /dev/null
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.README
@@ -0,0 +1,88 @@
+Topology:
+
+ +----------+
+ | |
+ +-----------------+ switch +-----------------+
+ | | | |
+ | +----------+ |
+ | |
+ +-+-+ +-+-+
++------|nic|-------+ +------|nic|-------+
+| +-+-+ | | +-+-+ |
+| | | | | |
+| +------+-------+ | | +------+-------+ |
+| | vlan10 | | | | vlan10 | |
+| | | | | | | |
+| | ovs_bridge | | | | ovs_bridge | |
+| | | | | | | |
+| +-+------------+ | | +-----------+--+ |
+| | | | | |
+| +-+-+ host1 | | host2 +-+-+ |
++-|tap|------------+ +-----------|tap|--+
+ +-+-+ +-+-+
+ | |
+ | |
+ | |
+ +-+-+ +-+-+
++-|nic|--+ +-|nic|--+
+| +---+ | | +---+ |
+| guest1 | | guest2 |
+| | | |
++--------+ +--------+
+
+Number of hosts: 4
+Host #1 description:
+ One ethernet device with one VLAN subinterface
+ One tap device
+ One Open vSwitch bridge device, bridging VLAN and tap devices
+ Host for guest1 virtual machine
+Host #2 description:
+ One ethernet device with one VLAN subinterface
+ One tap device
+ One Open vSwitch bridge device, bridging VLAN and tap devices
+ Host for guest2 virtual machine
+Guest #1 description:
+ One ethernet device
+Guest #2 description:
+ One ethernet device
+Test name:
+ virtual_ovs_bridge_vlan_in_host_mirrored.py
+Test description:
+ Ping:
+ + count: 100
+ + interval: 0.1s
+ + between guest1's NIC and guest2's NIC
+ Netperf:
+ + duration: 60s
+ + TCP_STREAM and UDP_STREAM
+ + between guest1's NIC and guest2's NIC
+
+PerfRepo integration:
+ First, preparation in PerfRepo is required - you need to create Test objects
+ through the web interface that properly describe the individual Netperf
+ tests that this recipe runs. Don't forget to also add appropriate metrics.
+ For these Netperf tests it's always:
+ * throughput
+ * throughput_min
+ * throughput_max
+ * throughput_deviation
+
+ After that, to enable support for PerfRepo you need to create the file
+ virtual_ovs_bridge_vlan_in_host_mirrored.mapping and define the following id mappings:
+ tcp_ipv4_id -> to store ipv4 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ tcp_ipv6_id -> to store ipv6 TCP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv4_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+ udp_ipv6_id -> to store ipv4 UDP_STREAM Netperf test results, maps to TestUid of a PerfRepo Test object
+
+ To enable result comparison against baselines you need to create a Report in
+ PerfRepo that will store the baseline. Set up the Report to only contain results
+ with the same hash tag and then add a new mapping to the mapping file, with
+ this format:
+ <some_hash> = <report_id>
+
+ The hash value is automatically generated during test execution and added
+ to each result stored in PerfRepo. To get the Report id you need to open
+ that report in our browser and find if in the URL.
+
+ When running this recipe you should also define the 'product_name' alias
+ (e.g. RHEL7) in order to tag the result object in PerfRepo.
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.py
new file mode 100644
index 0000000..407ddb6
--- /dev/null
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.py
@@ -0,0 +1,464 @@
+from lnst.Controller.Task import ctl
+from lnst.Controller.PerfRepoUtils import netperf_baseline_template
+from lnst.Controller.PerfRepoUtils import netperf_result_template
+
+from lnst.RecipeCommon.IRQ import pin_dev_irqs
+from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment
+from lnst.RecipeCommon.Offloads import parse_offloads
+
+# ------
+# SETUP
+# ------
+
+mapping_file = ctl.get_alias("mapping_file")
+perf_api = ctl.connect_PerfRepo(mapping_file)
+
+product_name = ctl.get_alias("product_name")
+
+h1 = ctl.get_host("host1")
+g1 = ctl.get_host("guest1")
+
+h2 = ctl.get_host("host2")
+g2 = ctl.get_host("guest2")
+
+g1.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf"])
+g2.sync_resources(modules=["IcmpPing", "Icmp6Ping", "Netperf"])
+
+# ------
+# TESTS
+# ------
+
+mtu = ctl.get_alias("mtu")
+ipv = ctl.get_alias("ipv")
+netperf_duration = int(ctl.get_alias("netperf_duration"))
+nperf_reserve = int(ctl.get_alias("nperf_reserve"))
+nperf_confidence = ctl.get_alias("nperf_confidence")
+nperf_max_runs = int(ctl.get_alias("nperf_max_runs"))
+nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
+nperf_mode = ctl.get_alias("nperf_mode")
+nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
+nperf_debug = ctl.get_alias("nperf_debug")
+nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_msg_size = ctl.get_alias("nperf_msg_size")
+pr_user_comment = ctl.get_alias("perfrepo_comment")
+offloads_alias = ctl.get_alias("offloads")
+nperf_protocols = ctl.get_alias("nperf_protocols")
+
+sctp_default_msg_size = "16K"
+
+if offloads_alias is not None:
+ offloads, offload_settings = parse_offloads(offloads_alias)
+else:
+ offloads = ["gro", "gso", "tso", "rx", "tx"]
+ offload_settings = [ [("gro", "on"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "on")],
+ [("gro", "off"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "on")],
+ [("gro", "on"), ("gso", "off"), ("tso", "off"), ("tx", "on"), ("rx", "on")],
+ [("gro", "on"), ("gso", "on"), ("tso", "off"), ("tx", "off"), ("rx", "on")],
+ [("gro", "on"), ("gso", "on"), ("tso", "on"), ("tx", "on"), ("rx", "off")]]
+
+pr_comment = generate_perfrepo_comment([h1, g1, h2], pr_user_comment)
+
+g1_guestnic = g1.get_interface("guestnic")
+g2_guestnic = g2.get_interface("guestnic")
+h1_nic = h1.get_interface("nic")
+h2_nic = h2.get_interface("nic")
+
+h1.run("service irqbalance stop")
+h2.run("service irqbalance stop")
+
+# this will pin devices irqs to cpu #0
+for m, d in [ (h1, h1_nic), (h2, h2_nic) ]:
+ pin_dev_irqs(m, d, 0)
+
+ping_mod = ctl.get_module("IcmpPing",
+ options={
+ "addr" : g2_guestnic.get_ip(0),
+ "count" : 100,
+ "iface" : g1_guestnic.get_devname(),
+ "interval" : 0.1
+ })
+
+ping_mod6 = ctl.get_module("Icmp6Ping",
+ options={
+ "addr" : g2_guestnic.get_ip(1),
+ "count" : 100,
+ "iface" : g1_guestnic.get_ip(1),
+ "interval" : 0.1
+ })
+
+netperf_srv = ctl.get_module("Netperf",
+ options={
+ "role" : "server",
+ "bind" : g1_guestnic.get_ip(0)
+ })
+
+netperf_srv6 = ctl.get_module("Netperf",
+ options={
+ "role" : "server",
+ "bind" : g1_guestnic.get_ip(1),
+ "netperf_opts" : " -6",
+ })
+
+p_opts = "-L %s" % (g2_guestnic.get_ip(0))
+p_opts6 = "-L %s -6" % (g2_guestnic.get_ip(1))
+
+netperf_cli_tcp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" : g1_guestnic.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_udp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" : g1_guestnic.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_tcp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_guestnic.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_udp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_guestnic.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_sctp = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_guestnic.get_ip(0),
+ "duration" : netperf_duration,
+ "testname" : "SCTP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" : p_opts,
+ "msg_size" : sctp_default_msg_size,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+netperf_cli_sctp6 = ctl.get_module("Netperf",
+ options={
+ "role" : "client",
+ "netperf_server" :
+ g1_guestnic.get_ip(1),
+ "duration" : netperf_duration,
+ "testname" : "SCTP_STREAM",
+ "confidence" : nperf_confidence,
+ "cpu_util" : nperf_cpu_util,
+ "runs" : nperf_max_runs,
+ "netperf_opts" : p_opts6,
+ "msg_size" : sctp_default_msg_size,
+ "debug" : nperf_debug,
+ "max_deviation" : nperf_max_dev
+ })
+
+if nperf_mode == "multi":
+ netperf_cli_tcp.unset_option("confidence")
+ netperf_cli_udp.unset_option("confidence")
+ netperf_cli_sctp.unset_option("confidence")
+ netperf_cli_tcp6.unset_option("confidence")
+ netperf_cli_udp6.unset_option("confidence")
+ netperf_cli_sctp6.unset_option("confidence")
+
+ netperf_cli_tcp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_udp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_sctp.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+ netperf_cli_sctp6.update_options({"num_parallel": nperf_num_parallel})
+
+ # we have to use multiqueue qdisc to get appropriate data
+ h1.run("tc qdisc replace dev %s root mq" % h1_nic.get_devname())
+ h2.run("tc qdisc replace dev %s root mq" % h2_nic.get_devname())
+
+if nperf_msg_size is not None:
+ netperf_cli_tcp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_tcp6.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_udp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_udp6.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_sctp.update_options({"msg_size" : nperf_msg_size})
+ netperf_cli_sctp6.update_options({"msg_size" : nperf_msg_size})
+
+#set mtu
+h1.get_interface("nic").set_mtu(mtu)
+h1.get_interface("tap").set_mtu(mtu)
+h1.get_interface("ovs_br").set_mtu(mtu)
+
+h2.get_interface("nic").set_mtu(mtu)
+h2.get_interface("tap").set_mtu(mtu)
+h2.get_interface("ovs_br").set_mtu(mtu)
+
+g1.get_interface("guestnic").set_mtu(mtu)
+g2.get_interface("guestnic").set_mtu(mtu)
+
+ctl.wait(15)
+
+for setting in offload_settings:
+ dev_features = ""
+ for offload in setting:
+ dev_features += " %s %s" % (offload[0], offload[1])
+ h1.run("ethtool -K %s %s" % (h1_nic.get_devname(), dev_features))
+ g1.run("ethtool -K %s %s" % (g1_guestnic.get_devname(), dev_features))
+ h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+ g2.run("ethtool -K %s %s" % (g2_guestnic.get_devname(), dev_features))
+
+ if ("rx", "off") in setting:
+ # when rx offload is turned off some of the cards might get reset
+ # and link goes down, so wait a few seconds until NIC is ready
+ ctl.wait(15)
+
+ if ipv in [ 'ipv4', 'both' ]:
+ g1.run(ping_mod)
+
+ server_proc = g1.run(netperf_srv, bg=True)
+ ctl.wait(2)
+
+ if nperf_protocols.find("tcp") > -1:
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp, baseline)
+
+ tcp_res_data = g2.run(netperf_cli_tcp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ if nperf_protocols.find("udp") > -1 and ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = g2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+ # for SCTP only gso offload on/off
+ if (nperf_protocols.find("sctp") > -1 and
+ (len([val for val in setting if val[1] == 'off']) == 0 or
+ ('gso', 'off') in setting)):
+ result_sctp = perf_api.new_result("sctp_ipv4_id",
+ "sctp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_sctp.set_parameter(offload[0], offload[1])
+
+ result_sctp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_sctp.add_tag("multithreaded")
+ result_sctp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_sctp)
+ netperf_baseline_template(netperf_cli_sctp, baseline)
+ sctp_res_data = g2.run(netperf_cli_sctp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_sctp, sctp_res_data)
+ result_sctp.set_comment(pr_comment)
+ perf_api.save_result(result_sctp)
+
+ server_proc.intr()
+
+ if ipv in [ 'ipv6', 'both' ]:
+ g1.run(ping_mod6)
+
+ server_proc = g1.run(netperf_srv6, bg=True)
+ ctl.wait(2)
+
+ if nperf_protocols.find("tcp") > -1:
+ # prepare PerfRepo result for tcp ipv6
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_tcp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp6, baseline)
+
+ tcp_res_data = g2.run(netperf_cli_tcp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ if nperf_protocols.find("udp") > -1 and ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_msg_size is not None:
+ result_udp.set_parameter("nperf_msg_size", nperf_msg_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = g2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+ # for SCTP only gso offload on/off
+ if (nperf_protocols.find("sctp") > -1 and
+ (len([val for val in setting if val[1] == 'off']) == 0 or
+ ('gso', 'off') in setting)):
+ result_sctp = perf_api.new_result("sctp_ipv6_id",
+ "sctp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_sctp.set_parameter(offload[0], offload[1])
+
+ result_sctp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_sctp.add_tag("multithreaded")
+ result_sctp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_sctp)
+ netperf_baseline_template(netperf_cli_sctp, baseline)
+ sctp_res_data = g2.run(netperf_cli_sctp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_sctp, sctp_res_data)
+ result_sctp.set_comment(pr_comment)
+ perf_api.save_result(result_sctp)
+
+ server_proc.intr()
+
+#reset offload states
+dev_features = ""
+for offload in offloads:
+ dev_features += " %s %s" % (offload, "on")
+h1.run("ethtool -K %s %s" % (h1_nic.get_devname(), dev_features))
+g1.run("ethtool -K %s %s" % (g1_guestnic.get_devname(), dev_features))
+h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+g2.run("ethtool -K %s %s" % (g2_guestnic.get_devname(), dev_features))
+
+h1.run("service irqbalance start")
+h2.run("service irqbalance start")
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.xml b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.xml
new file mode 100644
index 0000000..9e22de8
--- /dev/null
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host_mirrored.xml
@@ -0,0 +1,98 @@
+<lnstrecipe>
+ <define>
+ <alias name="ipv" value="both" />
+ <alias name="netperf_duration" value="60" />
+ <alias name="nperf_reserve" value="20" />
+ <alias name="nperf_confidence" value="99,5" />
+ <alias name="nperf_max_runs" value="5" />
+ <alias name="nperf_mode" value="default"/>
+ <alias name="nperf_num_parallel" value="2"/>
+ <alias name="nperf_debug" value="0"/>
+ <alias name="nperf_max_dev" value="20%"/>
+ <alias name="nperf_protocols" value="tcp udp sctp"/>
+ <alias name="mtu" value="1500" />
+ <alias name="mapping_file" value="virtual_ovs_bridge_vlan_in_host_mirrored.mapping" />
+ <alias name="vlan10_net" value="192.168.10"/>
+ <alias name="vlan10_tag" value="10"/>
+ <alias name="driver" value="ixgbe"/>
+ </define>
+ <network>
+ <host id="host1">
+ <params>
+ <param name="machine_type" value="baremetal"/>
+ </params>
+ <interfaces>
+ <eth id="nic" label="to_switch">
+ <params>
+ <param name="driver" value="{$driver}" />
+ </params>
+ </eth>
+ <eth id="tap" label="to_guest1"/>
+ <ovs_bridge id="ovs_br">
+ <slaves>
+ <slave id="tap" />
+ <slave id="nic" />
+ </slaves>
+ <vlan tag="{$vlan10_tag}">
+ <slaves>
+ <slave id="tap"/>
+ </slaves>
+ </vlan>
+ </ovs_bridge>
+ </interfaces>
+ </host>
+ <host id="guest1">
+ <interfaces>
+ <eth id="guestnic" label="to_guest1">
+ <params>
+ <param name="driver" value="virtio"/>
+ </params>
+ <addresses>
+ <address>{$vlan10_net}.10/24</address>
+ <address>fc00:0:0:10::10/64</address>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+
+ <host id="host2">
+ <params>
+ <param name="machine_type" value="baremetal"/>
+ </params>
+ <interfaces>
+ <eth id="nic" label="to_switch">
+ <params>
+ <param name="driver" value="{$driver}" />
+ </params>
+ </eth>
+ <eth id="tap" label="to_guest2"/>
+ <ovs_bridge id="ovs_br">
+ <slaves>
+ <slave id="tap" />
+ <slave id="nic" />
+ </slaves>
+ <vlan tag="{$vlan10_tag}">
+ <slaves>
+ <slave id="tap"/>
+ </slaves>
+ </vlan>
+ </ovs_bridge>
+ </interfaces>
+ </host>
+ <host id="guest2">
+ <interfaces>
+ <eth id="guestnic" label="to_guest2">
+ <params>
+ <param name="driver" value="virtio"/>
+ </params>
+ <addresses>
+ <address>{$vlan10_net}.11/24</address>
+ <address>fc00:0:0:10::11/64</address>
+ </addresses>
+ </eth>
+ </interfaces>
+ </host>
+ </network>
+
+ <task python="virtual_ovs_bridge_vlan_in_host_mirrored.py" />
+</lnstrecipe>
--
2.7.4