In order to make it easier to add more PvP tests, refactor common PvP funtionality into a base class from which all PvP Recipes (including current OvSDpdkPvPRecipe) can inherit.
Adrian Moreno (3): lnst.Recipes.ENRT: add BasePvPRecipe lnst.Recipes.ENRT: refactor OvSDpdkRecipe lnst.Recipes.ENRT: add VhostNetPvPRecipe
lnst/Recipes/ENRT/BasePvPRecipe.py | 283 +++++++++++++++++++++++++ lnst/Recipes/ENRT/OvS_DPDK_PvP.py | 228 +++++--------------- lnst/Recipes/ENRT/VhostNetPvPRecipe.py | 283 +++++++++++++++++++++++++ 3 files changed, 616 insertions(+), 178 deletions(-) create mode 100644 lnst/Recipes/ENRT/BasePvPRecipe.py create mode 100644 lnst/Recipes/ENRT/VhostNetPvPRecipe.py
It contains the common functionality that will help develop more PvP tests using virtual interfaces in the guest.
This patch does not add any functionality but just refactors what the OvsDpdkPvP recipe was already doing.
Signed-off-by: Adrian Moreno amorenoz@redhat.com --- lnst/Recipes/ENRT/BasePvPRecipe.py | 283 +++++++++++++++++++++++++++++ 1 file changed, 283 insertions(+) create mode 100644 lnst/Recipes/ENRT/BasePvPRecipe.py
diff --git a/lnst/Recipes/ENRT/BasePvPRecipe.py b/lnst/Recipes/ENRT/BasePvPRecipe.py new file mode 100644 index 0000000..34fa2e3 --- /dev/null +++ b/lnst/Recipes/ENRT/BasePvPRecipe.py @@ -0,0 +1,283 @@ +import xml.etree.ElementTree as ET +from enum import Enum + +from lnst.Common.LnstError import LnstError +from lnst.Common.Parameters import Param, IntParam, StrParam +from lnst.Common.IpAddress import ipaddress +from lnst.RecipeCommon.Ping import PingTestAndEvaluate +from lnst.Tests import Ping + +from lnst.RecipeCommon.Perf.Recipe import Recipe as PerfRecipe +from lnst.RecipeCommon.LibvirtControl import LibvirtControl +from lnst.RecipeCommon.Perf.Measurements import StatCPUMeasurement + +VirtioType = Enum('VirtType', 'VHOST_USER, VHOST_NET') + + +class VirtioDevice(object): + """ + Virtio Device + """ + def __init__(self, virt_type=None, hwaddr="", config=None): + if not isinstance(virt_type, (VirtioType, None)): + raise LnstError('Wrong virtio type') + self.type = virt_type # The virtio type + self.hwaddr = hwaddr # The MAC address of the device + self.config = config # Type-specific configuration + + +class BasePvPTestConf(object): + class BaseHostConf(object): + def __init__(self): + self.host = None + self.nics = [] + + class BaseGuestConf(BaseHostConf): + def __init__(self): + super(BasePvPTestConf.BaseGuestConf, self).__init__() + self.name = "" + self.virtctl = None + self.virtio_devs = [] # Array of VirtDevices + + def __init__(self, generator, dut, guest): + self.generator = generator + self.dut = dut + self.guest = guest + + +class BasePvPRecipe(PingTestAndEvaluate, PerfRecipe): + """ + Base PvP Recipe: + TODO: Describe stages and configurations + """ + + driver = StrParam(mandatory=True) + + trex_dir = StrParam(mandatory=True) + + """ + Guest configuration parameters + """ + guest_name = StrParam(mandatory=True) + guest_cpus = StrParam(mandatory=True) + guest_emulatorpin_cpu = StrParam(mandatory=True) + guest_mem_size = IntParam(default=16777216) + + """ + Packet generator + """ + + """ + Perf tool configuration parameters + """ + cpu_perf_tool = Param(default=StatCPUMeasurement) + + perf_duration = IntParam(default=60) + perf_iterations = IntParam(default=5) + perf_msg_size = IntParam(default=64) + perf_streams = IntParam(default=1) + + nr_hugepages = IntParam(default=13000) + # TODO: Allow 1G hugepages as well + + def warmup(self, ping_config): + """ Generate warmup pings + This ensures any in-between switches learn the corresponding MAC addresses + Args: + ping_config: array of tuples containing [OriginHost, OriginDevice, DestDevice]. + """ + try: + self.warmup_configuration(ping_config) + self.warmup_pings(ping_config) + finally: + self.warmup_deconfiguration(ping_config) + + def warmup_configuration(self, ping_config): + if len(ping_config) > 255: + raise LnstError("Too many warmup elements.") + for i, elem in enumerate(ping_config): + orig = elem[1] + dest = elem[2] + + orig.ip_add(ipaddress('192.168.{}.1/24'.format(i))) + dest.ip_add(ipaddress('192.168.{}.2/24'.format(i))) + + orig.up() + dest.up() + + def warmup_pings(self, ping_config): + jobs = [] + for i, elem in enumerate(ping_config): + host = elem[0] + orig = elem[1] + dest = elem[2] + jobs.append(host.run(Ping(interface=orig.ips[0], dst=dest.ips[0]))) + + for job in jobs: + job.wait() + + # TODO eval + + def warmup_deconfiguration(self, ping_config): + for i, elem in enumerate(ping_config): + orig = elem[1] + dest = elem[2] + + orig.ip_flush() + dest.ip_flush() + + def base_dpdk_configuration(self, dpdk_host_cfg): + """ Base DPDK configuration in a host + Args: + dpdk_host_cfg: An instance of BaseHostConf + """ + host = dpdk_host_cfg.host + + for nic in dpdk_host_cfg.nics: + nic.enable_readonly_cache() + + # TODO service should be a host method + host.run("service irqbalance stop") + + # This will pin all irqs to cpu #0 + self._pin_irqs(host, 0) + host.run("echo -n {} /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages" + .format(self.params.nr_hugepages)) + + host.run("modprobe vfio-pci") + for nic in dpdk_host_cfg.nics: + host.run("driverctl set-override {} vfio-pci".format(nic.bus_info)) + + def base_dpdk_deconfiguration(self, dpdk_host_cfg, service_list=[]): + """ Undo Base DPDK configuration in a host + Args: + dpdk_host_cfg: An instance of BaseHostConf + service_list: list of services using dpdk that might stop driverctl + from being able to unset-override the host's interfaces. + They will get restarted. + """ + host = dpdk_host_cfg.host + # TODO service should be a host method + host.run("service irqbalance start") + for nic in dpdk_host_cfg.nics: + job = host.run("driverctl unset-override {}".format(nic.bus_info), + bg=True) + for service in service_list: + host.run("systemctl restart {}". format(service)) + + if not job.wait(10): + job.kill() + + """ + Guest Management + """ + def init_guest_virtctl(self, host_conf, guest_conf): + """ + Initialize Libvirt Control + Args: + host_conf: An instance of BaseHostConf with the host info + guest_conf: An instance of BaseGuestConf with the guest info + """ + host = host_conf.host + + guest_conf.name = self.params.guest_name + guest_conf.virtctl = host.init_class(LibvirtControl) + + def shutdown_guest(self, guest_conf): + """ Shutdown a guest + Args: + guest_conf: An instance of BaseGuestConf with the guest info + """ + virtctl = guest_conf.virtctl + if virtctl: + virtctl.vm_shutdown(guest_conf.name) + self.ctl.wait_for_condition(lambda: + not virtctl.is_vm_running(guest_conf.name)) + + def init_guest_xml(self, guest_conf): + """ Initialize the guest XML configuration with some basic values + Args: + guest_conf: An instance of BaseGuestConf with the guest info + """ + virtctl = guest_conf.virtctl + guest_xml = ET.fromstring(virtctl.vm_XMLDesc(guest_conf.name)) + guest_conf.libvirt_xml = guest_xml + + cputune = ET.SubElement(guest_xml, "cputune") + for i, cpu_id in enumerate(self.params.guest_cpus.split(',')): + ET.SubElement(cputune, "vcpupin", vcpu=str(i), cpuset=str(cpu_id)) + + ET.SubElement(cputune, + "emulatorpin", + cpuset=str(self.params.guest_emulatorpin_cpu)) + + return guest_xml + + def create_guest(self, host_conf, guest_conf): + """ Create a guest + Args: + host_conf: The host_conf (instance of BaseHostConf) + guest_conf: The host_conf (instance of BaseGuestConf) + """ + host = host_conf.host + virtctl = guest_conf.virtctl + guest_xml = guest_conf.libvirt_xml + + str_xml = ET.tostring(guest_xml, encoding='utf8', method='xml') + virtctl.createXML(str_xml.decode('utf8')) + + guest_ip_job = host.run("gethostip -d {}".format(guest_conf.name)) + guest_ip = guest_ip_job.stdout.strip() + if not guest_ip: + raise LnstError("Could not determine guest's IP address") + + guest = self.ctl.connect_host(guest_ip, timeout=60, machine_id="guest1") + guest_conf.host = guest + + for i, vnic in enumerate(guest_conf.virtio_devs): + if not vnic.hwaddr: + raise LnstError("Virtio NIC HW Address not configured") + guest.map_device("eth{}".format(i), dict(hwaddr=vnic.hwaddr)) + device = getattr(guest, "eth{}".format(i)) + guest_conf.nics.append(device) + + return guest + + def pvp_test(self, config): + """ Perform the PvP test + Args: + config: An instance of BasePvPTestConf + """ + try: + self.test_wide_configuration(config) + + perf_config = self.generate_perf_config(config) + result = self.perf_test(perf_config) + self.perf_report_and_evaluate(result) + finally: + self.test_wide_deconfiguration(config) + + def _pin_irqs(self, host, cpu): + mask = 1 << cpu + host.run("MASK={:x}; " + "for i in `ls -d /proc/irq/[0-9]*` ; " + "do echo $MASK > ${{i}}/smp_affinity ; " + "done".format(mask)) + + """ + Methods to be overridden + """ + def generate_perf_config(self, config): + """ Generate the perf configuration + Args: + config: The global test configuration + Returns: + An instance of Perf.Recipe.RecipeConf + """ + pass + + def test_wide_deconfiguration(self, config): + pass + + def test_wide_configuration(self, config): + pass
Inherit common functionality from BasePvPRecipe
Signed-off-by: Adrian Moreno amorenoz@redhat.com --- lnst/Recipes/ENRT/OvS_DPDK_PvP.py | 228 +++++++----------------------- 1 file changed, 50 insertions(+), 178 deletions(-)
diff --git a/lnst/Recipes/ENRT/OvS_DPDK_PvP.py b/lnst/Recipes/ENRT/OvS_DPDK_PvP.py index fd9e25f..cd6161d 100644 --- a/lnst/Recipes/ENRT/OvS_DPDK_PvP.py +++ b/lnst/Recipes/ENRT/OvS_DPDK_PvP.py @@ -3,6 +3,8 @@ import time import signal import xml.etree.ElementTree as ET
+from lnst.Recipes.ENRT.BasePvPRecipe import BasePvPTestConf, BasePvPRecipe +from lnst.Recipes.ENRT.BasePvPRecipe import VirtioDevice, VirtioType from lnst.Controller import HostReq, DeviceReq, RecipeParam from lnst.Common.Logs import log_exc_traceback from lnst.Common.Parameters import Param, IntParam, StrParam, BoolParam @@ -21,33 +23,26 @@ from lnst.RecipeCommon.LibvirtControl import LibvirtControl
from lnst.Recipes.ENRT.BaseEnrtRecipe import EnrtConfiguration
-class PvPTestConf(object): - class HostConf(object): +class OVSPvPTestConf(BasePvPTestConf): + class DUTConf(BasePvPTestConf.BaseHostConf): def __init__(self): - self.host = None - self.nics = [] - - class DUTConf(HostConf): - def __init__(self): - super(PvPTestConf.DUTConf, self).__init__() + super(OVSPvPTestConf.DUTConf, self).__init__() self.trex_path = "" self.dpdk_ports = None self.vm_ports = None
- class GuestConf(HostConf): + class GuestConf(BasePvPTestConf.BaseGuestConf): def __init__(self): - super(PvPTestConf.GuestConf, self).__init__() - self.name = "" - self.virtctl = None + super(OVSPvPTestConf.GuestConf, self).__init__() self.testpmd = None - self.vhost_nics = None
def __init__(self): - self.generator = self.HostConf() + self.generator = self.BasePvPTestConf.BaseHostConf() self.dut = self.DUTConf() self.guest = self.GuestConf()
-class OvSDPDKPvPRecipe(PingTestAndEvaluate, PerfRecipe): + +class OvSDPDKPvPRecipe(BasePvPRecipe): m1 = HostReq() m1.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) m1.eth1 = DeviceReq(label="net1", driver=RecipeParam("driver")) @@ -56,26 +51,13 @@ class OvSDPDKPvPRecipe(PingTestAndEvaluate, PerfRecipe): m2.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) m2.eth1 = DeviceReq(label="net1", driver=RecipeParam("driver"))
- driver = StrParam(mandatory=True) - - trex_dir = StrParam(mandatory=True) - - guest_name = StrParam(mandatory=True) - guest_cpus = StrParam(mandatory=True) - guest_emulatorpin_cpu = StrParam(mandatory=True) guest_dpdk_cores = StrParam(mandatory=True) guest_testpmd_cores = StrParam(mandatory=True) - guest_mem_size = IntParam(default=16777216)
- host1_dpdk_cores = StrParam(mandatory=True) host2_pmd_cores = StrParam(mandatory=True) host2_l_cores = StrParam(mandatory=True) - nr_hugepages = IntParam(default=13000) socket_mem = IntParam(default=2048)
- dev_intr_cpu = IntParam(default=0) - - cpu_perf_tool = Param(default=StatCPUMeasurement)
perf_duration = IntParam(default=60) @@ -85,62 +67,24 @@ class OvSDPDKPvPRecipe(PingTestAndEvaluate, PerfRecipe): #doesn't do anything for now... perf_streams = IntParam(default=1)
- perf_usr_comment = StrParam(default="") - def test(self): self.check_dependencies() - self.warmup() - self.pvp_test() + ping_config = self.gen_ping_config() + self.warmup(ping_config) + + config = OVSPvPTestConf() + self.pvp_test(config)
def check_dependencies(self): pass
- def warmup(self): - try: - self.warmup_configuration() - self.warmup_pings() - finally: - self.warmup_deconfiguration() - - def warmup_configuration(self): - m1, m2 = self.matched.m1, self.matched.m2 - m1.eth0.ip_add(ipaddress("192.168.1.1/24")) - m1.eth1.ip_add(ipaddress("192.168.1.3/24")) - - m2.eth0.ip_add(ipaddress("192.168.1.2/24")) - m2.eth1.ip_add(ipaddress("192.168.1.4/24")) - - def warmup_pings(self): - m1, m2 = self.matched.m1, self.matched.m2 - - jobs = [] - jobs.append(m1.run(Ping(interface=m1.eth0.ips[0], dst=m2.eth0.ips[0]), bg=True)) - jobs.append(m1.run(Ping(interface=m1.eth1.ips[0], dst=m2.eth1.ips[0]), bg=True)) - jobs.append(m2.run(Ping(interface=m2.eth0.ips[0], dst=m1.eth0.ips[0]), bg=True)) - jobs.append(m2.run(Ping(interface=m2.eth1.ips[0], dst=m1.eth1.ips[0]), bg=True)) - - for job in jobs: - job.wait() - #TODO eval - - def warmup_deconfiguration(self): - m1, m2 = self.matched.m1, self.matched.m2 - m1.eth0.ip_flush() - m1.eth1.ip_flush() - - m2.eth0.ip_flush() - m2.eth1.ip_flush() - - def pvp_test(self): - try: - config = PvPTestConf() - self.test_wide_configuration(config) - - perf_config = self.generate_perf_config(config) - result = self.perf_test(perf_config) - self.perf_report_and_evaluate(result) - finally: - self.test_wide_deconfiguration(config) + def gen_ping_config(self): + return [ + (self.matched.m1, self.matched.m1.eth0, self.matched.m2.eth0), + (self.matched.m1, self.matched.m1.eth1, self.matched.m2.eth1), + (self.matched.m2, self.matched.m2.eth0, self.matched.m1.eth0), + (self.matched.m2, self.matched.m2.eth1, self.matched.m2.eth1) + ]
def test_wide_configuration(self, config): config.generator.host = self.matched.m1 @@ -190,14 +134,14 @@ class OvSDPDKPvPRecipe(PingTestAndEvaluate, PerfRecipe): pci_addr=dst_nic.bus_info, ip_addr=dst_nic.ips[0]) flows.append(PerfFlow( - type = "pvp_loop_rate", - generator = config.generator.host, - generator_bind = src_bind, - receiver = config.dut.host, - receiver_bind = dst_bind, - msg_size = self.params.perf_msg_size, - duration = self.params.perf_duration, - parallel_streams = self.params.perf_streams, + type="pvp_loop_rate", + generator=config.generator.host, + generator_bind=src_bind, + receiver=config.dut.host, + receiver_bind=dst_bind, + msg_size=self.params.perf_msg_size, + duration=self.params.perf_duration, + parallel_streams=self.params.perf_streams, cpupin=None))
return PerfRecipeConf( @@ -224,12 +168,12 @@ class OvSDPDKPvPRecipe(PingTestAndEvaluate, PerfRecipe): config.dut.host.run("ovs-vsctl del-br br0") config.dut.host.run("service openvswitch restart")
- self.base_dpdk_deconfiguration(config.dut) + self.base_dpdk_deconfiguration(config.dut, ["openvswitch"]) except: log_exc_traceback()
try: - #returning the guest to the original running state + # returning the guest to the original running state self.shutdown_guest(config.guest) config.guest.virtctl.vm_start(config.guest.name) except: @@ -244,37 +188,6 @@ class OvSDPDKPvPRecipe(PingTestAndEvaluate, PerfRecipe): except: log_exc_traceback()
- def base_dpdk_configuration(self, dpdk_host_cfg): - host = dpdk_host_cfg.host - - for nic in dpdk_host_cfg.nics: - nic.enable_readonly_cache() - - #TODO service should be a host method - host.run("service irqbalance stop") - - # this will pin all irqs to cpu #0 - self._pin_irqs(host, 0) - host.run("echo -n {} /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages" - .format(self.params.nr_hugepages)) - - host.run("modprobe vfio-pci") - for nic in dpdk_host_cfg.nics: - host.run("driverctl set-override {} vfio-pci".format(nic.bus_info)) - - def base_dpdk_deconfiguration(self, dpdk_host_cfg): - host = dpdk_host_cfg.host - #TODO service should be a host method - host.run("service irqbalance start") - for nic in dpdk_host_cfg.nics: - job = host.run("driverctl unset-override {}".format(nic.bus_info), - bg=True) - if isinstance(dpdk_host_cfg, PvPTestConf.DUTConf): - host.run("systemctl restart openvswitch") - - if not job.wait(10): - job.kill() - def ovs_dpdk_bridge_configuration(self, host_conf): host = host_conf.host host.run("systemctl enable openvswitch") @@ -288,8 +201,8 @@ class OvSDPDKPvPRecipe(PingTestAndEvaluate, PerfRecipe): .format(self.params.host2_l_cores)) host.run("systemctl restart openvswitch")
- #TODO use an actual OvS Device object - #TODO config.dut.nics.append(CachedRemoteDevice(m2.ovs)) + # TODO use an actual OvS Device object + # TODO config.dut.nics.append(CachedRemoteDevice(m2.ovs)) host.run("ovs-vsctl add-br br0 -- set bridge br0 datapath_type=netdev")
host_conf.dpdk_ports = [] @@ -301,29 +214,23 @@ class OvSDPDKPvPRecipe(PingTestAndEvaluate, PerfRecipe): host_conf.dpdk_ports.append( ("dpdk{}".format(i), "1{}".format(i)))
- def init_guest_virtctl(self, host_conf, guest_conf): - host = host_conf.host - - guest_conf.name = self.params.guest_name - guest_conf.virtctl = host.init_class(LibvirtControl) - - def shutdown_guest(self, guest_conf): - virtctl = guest_conf.virtctl - virtctl.vm_shutdown(guest_conf.name) - self.ctl.wait_for_condition(lambda: - not virtctl.is_vm_running(guest_conf.name)) - def configure_guest_xml(self, host_conf, guest_conf): - virtctl = guest_conf.virtctl - guest_xml = ET.fromstring(virtctl.vm_XMLDesc(guest_conf.name)) - guest_conf.libvirt_xml = guest_xml + # Initialize guest XML + guest_xml = self.init_guest_xml(guest_conf)
- guest_conf.vhost_nics = [] - vhosts = guest_conf.vhost_nics + guest_conf.virtio_devs = [] for i, nic in enumerate(host_conf.nics): - path = self._xml_add_vhostuser_dev( - guest_xml, "vhost_nic{i}".format(i=i), nic.hwaddr) - vhosts.append((path, nic.hwaddr)) + path = self._xml_add_vhostuser_dev(guest_xml, + "vhost_nic{i}".format(i=i), + nic.hwaddr) + + virtio_dev = VirtioDevice(VirtioType.VHOST_USER, + str(nic.hwaddr), + config={ + "path": path + } + ) + guest_conf.virtio_devs.append(virtio_dev)
cpu = guest_xml.find("cpu") numa = ET.SubElement(cpu, 'numa') @@ -331,14 +238,6 @@ class OvSDPDKPvPRecipe(PingTestAndEvaluate, PerfRecipe): memory=str(self.params.guest_mem_size), unit='KiB', memAccess='shared')
- cputune = ET.SubElement(guest_xml, "cputune") - for i, cpu_id in enumerate(self.params.guest_cpus.split(',')): - ET.SubElement(cputune, "vcpupin", vcpu=str(i), cpuset=str(cpu_id)) - - ET.SubElement(cputune, - "emulatorpin", - cpuset=str(self.params.guest_emulatorpin_cpu)) - memoryBacking = ET.SubElement(guest_xml, "memoryBacking") hugepages = ET.SubElement(memoryBacking, "hugepages") ET.SubElement(hugepages, "page", size="2", unit="M", nodeset="0") @@ -348,13 +247,13 @@ class OvSDPDKPvPRecipe(PingTestAndEvaluate, PerfRecipe): def ovs_dpdk_bridge_vm_configuration(self, host_conf, guest_conf): host = host_conf.host host_conf.vm_ports = [] - for i, nic in enumerate(guest_conf.vhost_nics): + for i, vhuser_nic in enumerate(guest_conf.virtio_devs): host.run( "ovs-vsctl add-port br0 guest_nic{i} -- " "set interface guest_nic{i} type=dpdkvhostuserclient " "ofport_request=2{i} " "options:vhost-server-path={path}".format( - i=i, path=nic[0])) + i=i, path=vhuser_nic.config.get("path"))) host_conf.vm_ports.append( ("guest_nic{}".format(i), "2{}".format(i)))
@@ -367,27 +266,6 @@ class OvSDPDKPvPRecipe(PingTestAndEvaluate, PerfRecipe): host.run("ovs-ofctl add-flow br0 in_port={},action={}" .format(vm_port[1], dpdk_port[1]))
- def create_guest(self, host_conf, guest_conf): - host = host_conf.host - virtctl = guest_conf.virtctl - guest_xml = guest_conf.libvirt_xml - - str_xml = ET.tostring(guest_xml, encoding='utf8', method='xml') - virtctl.createXML(str_xml.decode('utf8')) - - - guest_ip_job = host.run("gethostip -d {}".format(guest_conf.name)) - guest_ip = guest_ip_job.stdout.strip() - - guest = self.ctl.connect_host(guest_ip, timeout=60, machine_id="guest1") - guest_conf.host = guest - - for i, nic in enumerate(guest_conf.vhost_nics): - guest.map_device("eth{}".format(i), dict(hwaddr=nic[1])) - device = getattr(guest, "eth{}".format(i)) - guest_conf.nics.append(device) - return guest - def guest_vfio_modprobe(self, guest_conf): guest = guest_conf.host guest.run("modprobe -r vfio_iommu_type1") @@ -418,9 +296,3 @@ class OvSDPDKPvPRecipe(PingTestAndEvaluate, PerfRecipe): path=vhost_server_path, mode='server') return vhost_server_path
- def _pin_irqs(self, host, cpu): - mask = 1 << cpu - host.run("MASK={:x}; " - "for i in `ls -d /proc/irq/[0-9]*` ; " - "do echo $MASK > ${{i}}/smp_affinity ; " - "done".format(cpu))
Inherit common functionality from BasePvPRecipe
Use a bridge based forwarding plane both in guest and host. In the future it may be worth adding more forwarding planes (tc, xdp, etc)
In this case the traffic flows from one trex interface to the other making the bridge in the guest do the switching, so the destination IP address of the TRex flows is the generator's.
Use taskset to pin the vhost-net kernel threads.
Signed-off-by: Adrian Moreno amorenoz@redhat.com --- lnst/Recipes/ENRT/VhostNetPvPRecipe.py | 283 +++++++++++++++++++++++++ 1 file changed, 283 insertions(+) create mode 100644 lnst/Recipes/ENRT/VhostNetPvPRecipe.py
diff --git a/lnst/Recipes/ENRT/VhostNetPvPRecipe.py b/lnst/Recipes/ENRT/VhostNetPvPRecipe.py new file mode 100644 index 0000000..780e22f --- /dev/null +++ b/lnst/Recipes/ENRT/VhostNetPvPRecipe.py @@ -0,0 +1,283 @@ +import xml.etree.ElementTree as ET + +from lnst.Recipes.ENRT.BasePvPRecipe import BasePvPTestConf, BasePvPRecipe +from lnst.Recipes.ENRT.BasePvPRecipe import VirtioDevice, VirtioType +from lnst.Controller import HostReq, DeviceReq, RecipeParam + +from lnst.Common.Logs import log_exc_traceback +from lnst.Common.Parameters import Param, StrParam, ParamError +from lnst.Common.IpAddress import ipaddress +from lnst.Devices import BridgeDevice + +from lnst.RecipeCommon.Perf.Recipe import RecipeConf as PerfRecipeConf +from lnst.RecipeCommon.Perf.Measurements import Flow as PerfFlow +from lnst.RecipeCommon.Perf.Measurements import TRexFlowMeasurement +from lnst.RecipeCommon.Perf.Measurements import StatCPUMeasurement + + +class VhostPvPTestConf(BasePvPTestConf): + def __init__(self): + self.generator = self.BaseHostConf() + self.dut = self.BaseHostConf() + self.guest = self.BaseGuestConf() + + +class VhostNetPvPRecipe(BasePvPRecipe): + generator_req = HostReq() + generator_req.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) + generator_req.eth1 = DeviceReq(label="net1", driver=RecipeParam("driver")) + + host_req = HostReq(with_guest="yes") + host_req.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver")) + host_req.eth1 = DeviceReq(label="net1", driver=RecipeParam("driver")) + + vhost_cpus = StrParam(mandatory=True) # The CPUs used by vhost-net kernel threads + + # TODO: Study the possibility of adding more forwarding engines + # like xdp or tc + guest_fwd = StrParam(default='bridge') + host_fwd = StrParam(default='bridge') + + guest_macs = Param(default=['02:fa:fe:fa:fe:01', '02:fa:fe:fa:fe:02']) + + generator_dpdk_cores = StrParam(mandatory=True) + + cpu_perf_tool = Param(default=StatCPUMeasurement) + + def test(self): + self.check_params() + self.warmup(self.gen_ping_config()) + + config = VhostPvPTestConf() + self.pvp_test(config) + + def check_params(self): + # Check emulatorpin range contains vhost cores + emulator_min, emulator_max = self.params.guest_emulatorpin_cpu.split('-') + vhost_cpus = self.params.vhost_cpus.split(',') + for vcpu in vhost_cpus: + if vcpu > emulator_max or vcpu < emulator_min: + raise ParamError("Emulator pin must contain vhost cpus") + + def gen_ping_config(self): + return [ + (self.matched.generator_req, + self.matched.generator_req.eth0, + self.matched.host_req.eth0), + (self.matched.generator_req, + self.matched.generator_req.eth1, + self.matched.host_req.eth1), + (self.matched.host_req, + self.matched.host_req.eth0, + self.matched.generator_req.eth0), + (self.matched.host_req, + self.matched.host_req.eth1, + self.matched.host_req.eth1) + ] + + def test_wide_configuration(self, config): + + config.generator.host = self.matched.generator_req + config.generator.nics.append(self.matched.generator_req.eth0) + config.generator.nics.append(self.matched.generator_req.eth1) + + self.matched.generator_req.eth0.ip_add(ipaddress("192.168.1.1/24")) + self.matched.generator_req.eth1.ip_add(ipaddress("192.168.1.2/24")) + self.matched.generator_req.eth0.up() + self.matched.generator_req.eth1.up() + + self.base_dpdk_configuration(config.generator) + + config.dut.host = self.matched.host_req + config.dut.nics.append(self.matched.host_req.eth0) + config.dut.nics.append(self.matched.host_req.eth1) + self.matched.host_req.eth0.up() + self.matched.host_req.eth1.up() + + self.host_forwarding_configuration(config.dut) + + self.init_guest_virtctl(config.dut, config.guest) + self.shutdown_guest(config.guest) + self.configure_guest_xml(config.dut, config.guest) + + self.create_guest(config.dut, config.guest) + self.guest_forwarding(config.guest) + + self.host_forwarding_vm_configuration(config.dut, config.guest) + + return config + + def generate_perf_config(self, config): + flows = [] + for i in range(0, min(len(config.generator.nics), + len(config.guest.nics))): + src_nic = config.generator.nics[i] + src_ip = src_nic.ips[0] + dst_nic = config.guest.nics[i] + dst_ip = config.generator.nics[((i + 1) % len(config.generator.nics))].ips[0] + + src_bind = dict(mac_addr=src_nic.hwaddr, + pci_addr=src_nic.bus_info, + ip_addr=src_ip) + dst_bind = dict(mac_addr=dst_nic.hwaddr, + pci_addr=dst_nic.bus_info, + ip_addr=dst_ip) + flows.append(PerfFlow(type="pvp_loop_rate", + generator=config.generator.host, + generator_bind=src_bind, + receiver=config.guest.host, + receiver_bind=dst_bind, + msg_size=self.params.perf_msg_size, + duration=self.params.perf_duration, + parallel_streams=self.params.perf_streams, + cpupin=None) + ) + + return PerfRecipeConf( + measurements=[ + self.params.cpu_perf_tool([config.generator.host, + config.dut.host, + config.guest.host]), + TRexFlowMeasurement(flows, self.params.trex_dir) + ], + iterations=self.params.perf_iterations) + + def test_wide_deconfiguration(self, config): + try: + self.guest_deconfigure(config.guest) + except: + log_exc_traceback() + + try: + self.host_forwarding_vm_deconfiguration(config.dut, config.guest) + except: + log_exc_traceback() + + try: + self.host_forwarding_deconfiguration(config.dut) + except: + log_exc_traceback() + + try: + self.base_dpdk_deconfiguration(config.generator) + except: + log_exc_traceback() + + try: + #returning the guest to the original running state + self.shutdown_guest(config.guest) + if config.guest.virtctl: + config.guest.virtctl.vm_start(config.guest.name) + except: + log_exc_traceback() + + try: + config.generator.host.run("service irqbalance start") + except: + log_exc_traceback() + + def host_forwarding_vm_configuration(self, host_conf, guest_conf): + """ + VM - specific forwarding configuration + Pin vhost-net kernel threads to the cpus specfied by vhost_cpus param + """ + # Get a comma separated list of the vhost-net kernel threads' PIDs + vhost_pids = host_conf.host.run( + """ ps --ppid 2 | grep "vhost-$(pidof qemu-kvm)" """ + """ | awk '{if (length(pidstring) == 0) { """ + """ pidstring=$1 """ + """ } else { """ + """ pidstring = sprintf("%s,%s", pidstring, $1) """ + """ }}; """ + """ END{ print pidstring }'""") + for pid, cpu in zip(vhost_pids.stdout.strip().split(','), + self.params.vhost_cpus.split(',')): + mask = 1 << int(cpu) + host_conf.host.run('taskset -p {:x} {}'.format(mask, pid)) + + def host_forwarding_vm_deconfiguration(self, host_conf, guest_conf): + """ + VM - specific forwarding deconfiguration + """ + pass + + def host_forwarding_configuration(self, host_conf): + if (self.params.host_fwd == 'bridge'): + host_conf.bridges = [] + host_conf.host.br0 = BridgeDevice() + host_conf.host.br1 = BridgeDevice() + + host_conf.host.br0.slave_add(host_conf.nics[0]) + host_conf.host.br1.slave_add(host_conf.nics[1]) + + host_conf.host.br0.up() + host_conf.host.br1.up() + + host_conf.bridges.append(host_conf.host.br0) + host_conf.bridges.append(host_conf.host.br1) + + else: + # TBD + return + + def host_forwarding_deconfiguration(self, host_conf): + if (self.params.host_fwd == 'bridge'): + if host_conf.host.br0: + host_conf.host.br0.slave_del( + host_conf.nics[0]) + if host_conf.host.br1: + host_conf.host.br1.slave_del( + host_conf.nics[1]) + else: + # TBD + return + + def configure_guest_xml(self, host_conf, guest_conf): + guest_xml = self.init_guest_xml(guest_conf) + + virtctl = guest_conf.virtctl + guest_xml = ET.fromstring(virtctl.vm_XMLDesc(guest_conf.name)) + guest_conf.libvirt_xml = guest_xml + + guest_conf.virtio_devs = [] + for i, nic in enumerate(host_conf.nics): + self._xml_add_vhostnet_dev(guest_xml, + "vhostnet-{i}".format(i=i), + host_conf.bridges[i], + self.params.guest_macs[i]) + + vhost_device = VirtioDevice(VirtioType.VHOST_NET, + self.params.guest_macs[i], + config={ + "bridge": host_conf.bridges[i] + } + ) + guest_conf.virtio_devs.append(vhost_device) + + return guest_xml + + def guest_forwarding(self, guest_conf): + guest = guest_conf.host + if (self.params.guest_fwd == 'bridge'): + guest.bridge = BridgeDevice() + guest.bridge.name = 'guestbr0' + for nic in guest_conf.nics: + guest.bridge.slave_add(nic) + nic.up() + + guest.run("echo 1 > /proc/sys/net/ipv4/ip_forward") + + def guest_deconfigure(self, guest_conf): + if guest_conf.host: + guest_conf.host.run("echo 0 > /proc/sys/net/ipv4/ip_forward") + + def _xml_add_vhostnet_dev(self, guest_xml, name, bridge, mac_addr): + devices = guest_xml.find("devices") + + interface = ET.SubElement(devices, 'interface', type='bridge') + ET.SubElement(interface, 'source', bridge=str(bridge.name)) + ET.SubElement(interface, 'mac', address=str(mac_addr)) + ET.SubElement(interface, 'model', type='virtio') + ET.SubElement(interface, 'driver', name='vhost') + # TODO: Add driver suboptions + return guest_xml
On Fri, Oct 25, 2019 at 10:00:35AM +0200, Adrian Moreno wrote:
In order to make it easier to add more PvP tests, refactor common PvP funtionality into a base class from which all PvP Recipes (including current OvSDpdkPvPRecipe) can inherit.
Adrian Moreno (3): lnst.Recipes.ENRT: add BasePvPRecipe lnst.Recipes.ENRT: refactor OvSDpdkRecipe lnst.Recipes.ENRT: add VhostNetPvPRecipe
lnst/Recipes/ENRT/BasePvPRecipe.py | 283 +++++++++++++++++++++++++ lnst/Recipes/ENRT/OvS_DPDK_PvP.py | 228 +++++--------------- lnst/Recipes/ENRT/VhostNetPvPRecipe.py | 283 +++++++++++++++++++++++++ 3 files changed, 616 insertions(+), 178 deletions(-) create mode 100644 lnst/Recipes/ENRT/BasePvPRecipe.py create mode 100644 lnst/Recipes/ENRT/VhostNetPvPRecipe.py
-- 2.21.0
Looks good, pushing upstream, thanks.
-Ondrej
lnst-developers@lists.fedorahosted.org