Closed jgruber closed 7 years ago
Historical context:
In LbaaSv1, the agent manager initiated a period task every 30 seconds to update pool and member stats:
def collect_stats(self, context):
if not self.plugin_rpc:
return
pool_services = copy.deepcopy(self.cache.services)
for pool_id in pool_services:
service = pool_services[pool_id]
if self.agent_host == service.agent_host:
try:
LOG.debug("collecting stats for pool %s" % service.pool_id)
stats = self.lbdriver.get_stats(
self.plugin_rpc.get_service_by_pool_id(
service.pool_id,
self.conf.f5_global_routed_mode
)
)
if stats:
self.plugin_rpc.update_pool_stats(service.pool_id,
stats)
except Exception as e:
LOG.exception(_('Error upating stats' + str(e.message)))
self.needs_resync = True
You can force a get_stats through the plugin...
Also.. When we do get stats, the older iControl driver wouldn't just collect the connection type stats, but it would also get the the monitor state of all pool members so that their state would be updated in neutron.. to DOWN or ACTIVE. This is the expected use of this attribute according to the reference implementation.
@is_connected
def get_stats(self, service):
"""Get service stats"""
# use pool stats because the pool_id is the
# the service definition...
stats = {}
stats[lb_const.STATS_IN_BYTES] = 0
stats[lb_const.STATS_OUT_BYTES] = 0
stats[lb_const.STATS_ACTIVE_CONNECTIONS] = 0
stats[lb_const.STATS_TOTAL_CONNECTIONS] = 0
# add a members stats return dictionary
members = {}
for hostbigip in self.get_all_bigips():
# It appears that stats are collected for pools in a pending delete
# state which means that if those messages are queued (or delayed)
# it can result in the process of a stats request after the pool
# and tenant are long gone. Check if the tenant exists.
if not service['pool'] or not hostbigip.system.folder_exists(
bigip_interfaces.OBJ_PREFIX + service['pool']['tenant_id']):
return None
pool = service['pool']
pool_stats = hostbigip.pool.get_statistics(
name=pool['id'],
folder=pool['tenant_id'],
config_mode=self.conf.icontrol_config_mode)
if 'STATISTIC_SERVER_SIDE_BYTES_IN' in pool_stats:
stats[lb_const.STATS_IN_BYTES] += \
pool_stats['STATISTIC_SERVER_SIDE_BYTES_IN']
stats[lb_const.STATS_OUT_BYTES] += \
pool_stats['STATISTIC_SERVER_SIDE_BYTES_OUT']
stats[lb_const.STATS_ACTIVE_CONNECTIONS] += \
pool_stats['STATISTIC_SERVER_SIDE_CURRENT_CONNECTIONS']
stats[lb_const.STATS_TOTAL_CONNECTIONS] += \
pool_stats['STATISTIC_SERVER_SIDE_TOTAL_CONNECTIONS']
# are there members to update status
if 'members' in service:
# only query BIG-IP pool members if they
# not in a state indicating provisioning or error
# provisioning the pool member
some_members_require_status_update = False
update_if_status = [plugin_const.ACTIVE,
plugin_const.DOWN,
plugin_const.INACTIVE]
if PLUGIN_CREATED_FLAG not in update_if_status:
update_if_status.append(PLUGIN_CREATED_FLAG)
for member in service['members']:
if member['status'] in update_if_status:
some_members_require_status_update = True
# are we have members who are in a
# state to update there status
if some_members_require_status_update:
# query pool members on each BIG-IP
monitor_states = \
hostbigip.pool.get_members_monitor_status(
name=pool['id'],
folder=pool['tenant_id'],
config_mode=self.conf.icontrol_config_mode
)
for member in service['members']:
if member['status'] in update_if_status:
# create the entry for this
# member in the return status
# dictionary set to ACTIVE
if not member['id'] in members:
members[member['id']] = \
{'status': plugin_const.INACTIVE}
# check if it down or up by monitor
# and update the status
for state in monitor_states:
# matched the pool member
# by address and port number
if member['address'] == \
strip_domain_address(
state['addr']) and \
int(member['protocol_port']) == \
int(state['port']):
# if the monitor says member is up
if state['state'] == \
'MONITOR_STATUS_UP' or \
state['state'] == \
'MONITOR_STATUS_UNCHECKED':
# set ACTIVE as long as the
# status was not set to 'DOWN'
# on another BIG-IP
if members[
member['id']]['status'] != \
'DOWN':
if member['admin_state_up']:
members[member['id']][
'status'] = \
plugin_const.ACTIVE
else:
members[member['id']][
'status'] = \
plugin_const.INACTIVE
else:
members[member['id']]['status'] = \
plugin_const.DOWN
stats['members'] = members
return stats
The BIG-IP pool member state (up or down) is not reflected in the LBaaS member's status attribute.
Per the reference implementation the following behavior is expected:
BIG-IP pool member state Unknown = LBaaS pool member status ACTIVE (neutron.plugin.common.constants) BIG-IP pool member state UP = LBaaS pool member status ACTIVE (neutron.plugin.common.constants) BIG-IP pool member state DOWN = LBaaS pool member status DOWN (neutron.plugin.common.constants)
In LBaaSv1 we polled for the pool member state every 30 seconds to avoid a callback from BIG-IP to the agent when a state changed.
Agent Version
All
Operating System
All
OpenStack Release
All
Description
Have a BIG-IP pool member transition from Unknown or Up to Down and there is not indication in Neutron the pool member status is anything but ACTIVE.
Deployment
All deployments