F5Networks / f5-openstack-agent

The F5 Agent for OpenStack Neutron allows you to deploy BIG-IP services in an OpenStack environment.
http://clouddocs.f5.com/products/openstack/agent/latest
Apache License 2.0
14 stars 38 forks source link

Discovered in test: test_snat_common.py's - mismatch on the number of members created #864

Closed ssorenso closed 7 years ago

ssorenso commented 7 years ago
Traceback:
___________________________ test_snat_common_network ___________________________

bigip = <neutronless.testlib.bigip_client.BigIpClient object at 0x7f821209acd0>
services = [{'healthmonitors': [], 'listeners': [], 'loadbalancer': {'admin_state_up': True, 'description': '', 'gre_vteps': [], ...True, 'description': '', 'gre_vteps': [], 'id': '8856d03c-86f8-4533-93bc-31cfb3a1956b', ...}, 'members': [], ...}, ...]
icd_config = {'AGENT': '<oslo_config.cfg.GroupAttr object at 0x3b137d0>', 'OVS': '<oslo_config.cfg.GroupAttr object at 0x3ae9410>', 'advertise_mtu': False, 'advertised_tunnel_types': ['vxlan'], ...}
icontrol_driver = <f5_openstack_agent.lbaasv2.drivers.bigip.icontrol_driver.iControlDriver object at 0x7f821211fc90>

    def test_snat_common_network(bigip, services, icd_config, icontrol_driver):
        """Test creating and deleting SNAT pools with common network listener.

        The test procedure is:
            - Assume a shared (common) network
            - Assume a separate non-shared tenant network
            - Create load balancer/listener on shared network
            - Expect that a SNAT pool is created in the tenant partition with a
              /Common member for LB subnet
            - Add pool and member, with member on separate tenant network.
            - Expect that the same SNAT pool now has an additional SNAT member for
              the pool member, referenced to member subnet.
            - Delete member and expect that SNAT pool only has member for original
              LB
            - Delete everything else and expect all network objects and tenant
              folder are deleted.
        """
        env_prefix = icd_config['environment_prefix']
        service_iter = iter(services)
        validator = ResourceValidator(bigip, env_prefix)

        # create loadbalancer
        service = service_iter.next()
        lb_reader = LoadbalancerReader(service)
        folder = '{0}_{1}'.format(env_prefix, lb_reader.tenant_id())
        icontrol_driver._common_service_handler(service)
        assert bigip.folder_exists(folder)

        # validate SNAT pool created in tenant partition with one member for LB
        expected_members = []
        snat_pool_name = folder
        snat_pool_folder = folder
        subnet_id = service['loadbalancer']['vip_subnet_id']
        lb_snat_name = '/Common/snat-traffic-group-local-only-{0}_0'.\
            format(subnet_id)
        expected_members.append(lb_snat_name)
        validator.assert_snatpool_valid(
            snat_pool_name, snat_pool_folder, expected_members)

        # create listener
        service = service_iter.next()
        listener = service['listeners'][0]
        icontrol_driver._common_service_handler(service)
        validator.assert_virtual_valid(listener, folder)

        # create pool
        service = service_iter.next()
        pool = service['pools'][0]
        icontrol_driver._common_service_handler(service)
        validator.assert_pool_valid(pool, folder)

        # create member
        service = service_iter.next()
        icontrol_driver._common_service_handler(service)

        # validate that SNAT pool now has two SNAT members, one for LB and one for
        # the pool member
        member = service['members'][0]
        member_subnet_id = member['subnet_id']
        member_snat_name = '/{0}/snat-traffic-group-local-only-{1}_0'.\
            format(folder, member_subnet_id)
        expected_members.append(member_snat_name)
        validator.assert_snatpool_valid(
>           snat_pool_name, snat_pool_folder, expected_members)

../neutronless/loadbalancer/test_snat_common_network.py:138: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <neutronless.testlib.resource_validator.ResourceValidator object at 0x7f8212150690>
name = 'TEST_97210b7b76294cc9ba0a023a75f6d097'
folder = 'TEST_97210b7b76294cc9ba0a023a75f6d097'
members = ['/Common/snat-traffic-group-local-only-4dc7caad-f9a9-4050-914e-b60eb6cf8ef7_0', '/TEST_97210b7b76294cc9ba0a023a75f6d097/snat-traffic-group-local-only-9f663c20-37b6-4a5a-aa8d-d0575e61c059_0']

    def assert_snatpool_valid(self, name, folder, members):
        snatpool = self.bigip.get_resource(
            ResourceType.snatpool, name, partition=folder)

        # check snatpool exists and has same number of expected members
        assert snatpool
>       assert len(snatpool.members) == len(members)
E       AssertionError

../neutronless/testlib/resource_validator.py:221: AssertionError
----------------------------- Captured stderr call -----------------------------
ERROR:f5_openstack_agent.lbaasv2.drivers.bigip.l2_service:400 Unexpected Error: Bad Request for uri: https://10.190.24.195:443/mgmt/tm/net/tunnels/tunnel/
Text: u'{"code":400,"message":"010716ce:3: VXLAN tunnels must have unique VNIs - the specified VNI(91) conflicts with VNI of tunnel /Common/tunnel-vxlan-91.","errorStack":[],"apiError":3}'
Traceback (most recent call last):
  File "/var/jenkins/workspace/12.1.1-overcloud_smoke/f5_openstack_agent/lbaasv2/drivers/bigip/l2_service.py", line 351, in _assure_device_network_vxlan
    self.network_helper.create_multipoint_tunnel(bigip, payload)
  File "/var/jenkins/workspace/12.1.1-overcloud_smoke/.tox/singlebigip/local/lib/python2.7/site-packages/oslo_log/helpers.py", line 67, in wrapper
    return method(*args, **kwargs)
  File "/var/jenkins/workspace/12.1.1-overcloud_smoke/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py", line 135, in create_multipoint_tunnel
    obj = t.create(**payload)
  File "/var/jenkins/workspace/12.1.1-overcloud_smoke/.tox/singlebigip/local/lib/python2.7/site-packages/f5/bigip/resource.py", line 974, in create
    return self._create(**kwargs)
  File "/var/jenkins/workspace/12.1.1-overcloud_smoke/.tox/singlebigip/local/lib/python2.7/site-packages/f5/bigip/resource.py", line 941, in _create
    response = session.post(_create_uri, json=kwargs, **requests_params)
  File "/usr/local/lib/python2.7/dist-packages/icontrol/session.py", line 272, in wrapper
    raise iControlUnexpectedHTTPError(error_message, response=response)
iControlUnexpectedHTTPError: 400 Unexpected Error: Bad Request for uri: https://10.190.24.195:443/mgmt/tm/net/tunnels/tunnel/
Text: u'{"code":400,"message":"010716ce:3: VXLAN tunnels must have unique VNIs - the specified VNI(91) conflicts with VNI of tunnel /Common/tunnel-vxlan-91.","errorStack":[],"apiError":3}'
ERROR:f5_openstack_agent.lbaasv2.drivers.bigip.icontrol_driver:Prep-network exception: icontrol_driver: Failed to create vxlan tunnel: tunnel-vxlan-91
ERROR:f5_openstack_agent.lbaasv2.drivers.bigip.icontrol_driver:Failed to create vxlan tunnel: tunnel-vxlan-91
Traceback (most recent call last):
  File "/var/jenkins/workspace/12.1.1-overcloud_smoke/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py", line 1233, in _common_service_handler
    raise error
VXLANCreationException: Failed to create vxlan tunnel: tunnel-vxlan-91
--------------------------- Captured stdout teardown ---------------------------

Deleting folder on test exit: TEST_97210b7b76294cc9ba0a023a75f6d097

Test instance: 12.1.1-overcloud_smoke.58.consoleText

OpenStack Release

Mitaka

Description

It's not clear at this time if this is a cleanup issue as: test_f5_common_networks.py Is a test that uses a similar service object as this one. Perhaps creating new UUID's for each of the object's used will eliminate this error.

Agent Version

Mitaka - 323189b9db8e3298832a1c7b3623ce33adc1a632

Operating System

Smoke tests (nightly)

OpenStack Release

mitaka

Deployment

Undercloud neutronless

ssorenso commented 7 years ago

Another instance in nightly:

dflanigan commented 7 years ago

PR #865 is in master. PR used the keyword WIP and not Fixes or a keyword that would auto close this issue.