From: tierno Date: Tue, 16 Jun 2020 15:29:47 +0000 (+0000) Subject: fixing flake8 tests X-Git-Tag: release-v8.0-start^0 X-Git-Url: https://osm.etsi.org/gitweb/?p=osm%2FRO.git;a=commitdiff_plain;h=1ec592d80c7f07874b08a14984deb21fddb31441 fixing flake8 tests Change-Id: Id3db9e940d07fb67a81e727f310900a9eb92e18d Signed-off-by: tierno --- diff --git a/Dockerfile-local b/Dockerfile-local index 97d804a1..88fdfc84 100644 --- a/Dockerfile-local +++ b/Dockerfile-local @@ -32,11 +32,10 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get -y install python3-neutronclient pyt && mv /tmp/libzenohc.so /usr/local/lib/libzenohc.so \ && DEBIAN_FRONTEND=noninteractive python3 -m pip install -U jsonrpclib-pelix cvprac \ "osm-im @ git+https://osm.etsi.org/gerrit/osm/IM.git#egg=osm-im" "azure==4.0.0" boto \ - untangle pyone "oca @ git+https://github.com/python-oca/python-oca#egg=oca" \ + pyone "oca @ git+https://github.com/python-oca/python-oca#egg=oca" \ pyangbind sphinx zenoh==0.3.0 yaks==0.3.0.post1 fog05-sdk==0.2.0 fog05==0.2.0 - # DEBIAN_FRONTEND=noninteractive apt-get -y install python-openstacksdk python-openstackclient && \ # TODO py3 DEBIAN_FRONTEND=noninteractive add-apt-repository -y cloud-archive:rocky && apt-get update && apt-get install -y python3-networking-l2gw \ diff --git a/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/aristaConfigLet.py b/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/aristaConfigLet.py index 8e34091e..f340f413 100644 --- a/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/aristaConfigLet.py +++ b/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/aristaConfigLet.py @@ -38,7 +38,7 @@ class AristaSDNConfigLet: def __init__(self, topology=_VXLAN_MLAG): self.topology = topology - _basic_int =""" + _basic_int = """ interface {interface} !! service: {uuid} switchport @@ -71,18 +71,18 @@ interface {interface} def getEline_passthrough(self, uuid, interface, vlan_id, index): return self._get_interface(uuid, interface, vlan_id, "ELINE", index, "dot1q-tunnel") - _basic_vlan =""" + _basic_vlan = """ vlan {vlan} !! service: {service} {vlan} {uuid} name {service}{vlan} trunk group {service}{vlan} """ - _basic_mlag =""" trunk group MLAGPEER + _basic_mlag = """ trunk group MLAGPEER """ - _basic_vxlan ="""interface VXLAN1 + _basic_vxlan = """interface VXLAN1 VXLAN vlan {vlan} vni {vni} """ - _basic_end ="!" + _basic_end = "!" _configLet_VLAN = _basic_vlan + _basic_end _configLet_VXLAN = _basic_vlan + _basic_vxlan + _basic_end @@ -123,7 +123,6 @@ router bgp {bgp} loopback=loopback0, vni=vni_id) - def getElan_bgp(self, uuid, vlan_id, vni_id, loopback0, bgp): return self._get_bgp(uuid, vlan_id, vni_id, loopback0, bgp, "ELAN") diff --git a/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/wimconn_arista.py b/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/wimconn_arista.py index ca8e58f2..4708a7c6 100644 --- a/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/wimconn_arista.py +++ b/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/wimconn_arista.py @@ -42,7 +42,7 @@ from requests import RequestException from cvprac.cvp_client import CvpClient from cvprac.cvp_api import CvpApi -from cvprac.cvp_client_errors import CvpLoginError, CvpSessionLogOutError, CvpApiError +from cvprac.cvp_client_errors import CvpLoginError, CvpSessionLogOutError, CvpApiError from cvprac import __version__ as cvprac_version from osm_rosdn_arista_cloudvision.aristaConfigLet import AristaSDNConfigLet @@ -135,7 +135,6 @@ class AristaSdnConnector(SdnConnectorBase): _VLAN_MLAG = "VLAN-MLAG" _VXLAN_MLAG = "VXLAN-MLAG" - def __init__(self, wim, wim_account, config=None, logger=None): """ @@ -195,8 +194,8 @@ class AristaSdnConnector(SdnConnectorBase): raise SdnConnectorError(message="Unable to load switches from CVP", http_code=500) from e self.logger.debug("Using topology {} in Arista Leaf switches: {}".format( - self.topology, - self.delete_keys_from_dict(self.switches, ('passwd',)))) + self.topology, + self.delete_keys_from_dict(self.switches, ('passwd',)))) self.clC = AristaSDNConfigLet(self.topology) def __load_topology(self): @@ -255,7 +254,7 @@ class AristaSdnConnector(SdnConnectorBase): self.switches[cs].update(cs_content) # Load the rest of the data - if self.client == None: + if self.client is None: self.client = self.__connect() self.__load_inventory() if not self.switches: @@ -348,7 +347,7 @@ class AristaSdnConnector(SdnConnectorBase): for testing the access to CloudVision API """ try: - if self.client == None: + if self.client is None: self.client = self.__connect() result = self.client.api.get_cvp_info() self.logger.debug(result) @@ -406,7 +405,7 @@ class AristaSdnConnector(SdnConnectorBase): http_code=500) self.__get_Connection() - if conn_info == None: + if conn_info is None: raise SdnConnectorError(message='No connection information for service UUID {}'.format(service_uuid), http_code=500) @@ -535,13 +534,13 @@ class AristaSdnConnector(SdnConnectorBase): self.logger.info("Service with uuid {} created.". format(service_uuid)) s_uid, s_connInf = self.__processConnection( - service_uuid, - service_type, - connection_points, - kwargs) + service_uuid, + service_type, + connection_points, + kwargs) try: self.__addMetadata(s_uid, service_type, s_connInf['vlan_id']) - except Exception as e: + except Exception: pass return (s_uid, s_connInf) @@ -626,8 +625,9 @@ class AristaSdnConnector(SdnConnectorBase): processed_connection_points += switches for switch in switches: if not interface: - raise SdnConnectorError(message="Connection point switch port empty for switch_dpid {}".format(switch_id), - http_code=406) + raise SdnConnectorError( + message="Connection point switch port empty for switch_dpid {}".format(switch_id), + http_code=406) # it should be only one switch where the mac is attached if encap_type == 'dot1q': # SRIOV configLet for Leaf switch mac's attached to @@ -760,9 +760,9 @@ class AristaSdnConnector(SdnConnectorBase): continue cl = cls_perSw[s] res = self.__device_modify( - device_to_update=s, - new_configlets=cl, - delete=toDelete_in_cvp) + device_to_update=s, + new_configlets=cl, + delete=toDelete_in_cvp) if "errorMessage" in str(res): raise Exception(str(res)) self.logger.info("Device {} modify result {}".format(s, res)) @@ -773,10 +773,10 @@ class AristaSdnConnector(SdnConnectorBase): t_id, self.__SEPARATOR) self.client.api.add_note_to_configlet( - cls_perSw[s][0]['key'], - note_msg) + cls_perSw[s][0]['key'], + note_msg) cls_perSw[s][0]['note'] = note_msg - tasks = { t_id : {'workOrderId': t_id} } + tasks = {t_id: {'workOrderId': t_id}} self.__exec_task(tasks, self.__EXC_TASK_EXEC_WAIT) # with just one configLet assigned to a device, # delete all if there are errors in next loops @@ -793,7 +793,7 @@ class AristaSdnConnector(SdnConnectorBase): allLeafModified) except Exception as e: self.logger.error("Exception rolling back in updating connection: {}". - format(e), exc_info=True) + format(e), exc_info=True) raise ex def __rollbackConnection(self, @@ -824,7 +824,7 @@ class AristaSdnConnector(SdnConnectorBase): self.__configlet_modify(cls_perSw[s], delete=True) def __exec_task(self, tasks, tout=10): - if self.taskC == None: + if self.taskC is None: self.__connect() data = self.taskC.update_all_tasks(tasks).values() self.taskC.task_action(data, tout, 'executed') @@ -833,15 +833,14 @@ class AristaSdnConnector(SdnConnectorBase): """ Updates the devices (switches) adding or removing the configLet, the tasks Id's associated to the change are returned """ - self.logger.info('Enter in __device_modify delete: {}'.format( - delete)) + self.logger.info('Enter in __device_modify delete: {}'.format(delete)) updated = [] changed = False # Task Ids that have been identified during device actions newTasks = [] if (len(new_configlets) == 0 or - device_to_update == None or + device_to_update is None or len(device_to_update) == 0): data = {'updated': updated, 'tasks': newTasks} return [changed, data] @@ -857,14 +856,14 @@ class AristaSdnConnector(SdnConnectorBase): if try_device['hostname'] not in device_to_update: continue dev_cvp_configlets = self.client.api.get_configlets_by_device_id( - try_device['systemMacAddress']) + try_device['systemMacAddress']) # self.logger.debug(dev_cvp_configlets) try_device['deviceSpecificConfiglets'] = [] for cvp_configlet in dev_cvp_configlets: if int(cvp_configlet['containerCount']) == 0: try_device['deviceSpecificConfiglets'].append( - {'name': cvp_configlet['name'], - 'key': cvp_configlet['key']}) + {'name': cvp_configlet['name'], + 'key': cvp_configlet['key']}) # self.logger.debug(device) device = try_device break @@ -900,42 +899,41 @@ class AristaSdnConnector(SdnConnectorBase): try: if delete and len(cl_toDel) > 0: r = self.client.api.remove_configlets_from_device( - 'OSM', - up_device['device'], - cl_toDel, - create_task=True) + 'OSM', + up_device['device'], + cl_toDel, + create_task=True) dev_action = r self.logger.debug("remove_configlets_from_device {} {}".format(dev_action, cl_toDel)) elif len(cl_toAdd) > 0: r = self.client.api.apply_configlets_to_device( - 'OSM', - up_device['device'], - cl_toAdd, - create_task=True) + 'OSM', + up_device['device'], + cl_toAdd, + create_task=True) dev_action = r self.logger.debug("apply_configlets_to_device {} {}".format(dev_action, cl_toAdd)) except Exception as error: errorMessage = str(error) msg = "errorMessage: Device {} Configlets couldnot be updated: {}".format( - up_device['hostname'], errorMessage) + up_device['hostname'], errorMessage) raise SdnConnectorError(msg) from error else: if "errorMessage" in str(dev_action): m = "Device {} Configlets update fail: {}".format( - up_device['name'], dev_action['errorMessage']) + up_device['name'], dev_action['errorMessage']) raise SdnConnectorError(m) else: changed = True if 'taskIds' in str(dev_action): # Fix 1030 SDN-ARISTA Key error note when deploy a NS if not dev_action['data']['taskIds']: - raise SdnConnectorError("No taskIds found: Device {} Configlets couldnot be updated".format( - up_device['hostname'])) + raise SdnConnectorError("No taskIds found: Device {} Configlets could not be updated".format( + up_device['hostname'])) for taskId in dev_action['data']['taskIds']: - updated.append({up_device['hostname']: - "Configlets-{}".format( - taskId)}) + updated.append({ + up_device['hostname']: "Configlets-{}".format(taskId)}) newTasks.append(taskId) else: updated.append({up_device['hostname']: @@ -951,7 +949,7 @@ class AristaSdnConnector(SdnConnectorBase): :return: data: dict of module actions and taskIDs ''' self.logger.info('Enter in __configlet_modify delete:{}'.format( - delete)) + delete)) # Compare configlets against cvp_facts-configlets changed = False @@ -1006,27 +1004,27 @@ class AristaSdnConnector(SdnConnectorBase): if to_delete: operation = 'delete' resp = self.client.api.delete_configlet( - configlet['data']['name'], - configlet['data']['key']) + configlet['data']['name'], + configlet['data']['key']) elif to_update: operation = 'update' resp = self.client.api.update_configlet( - configlet['config'], - configlet['data']['key'], - configlet['data']['name'], - wait_task_ids=True) + configlet['config'], + configlet['data']['key'], + configlet['data']['name'], + wait_task_ids=True) elif to_create: operation = 'create' resp = self.client.api.add_configlet( - configlet['name'], - configlet['config']) + configlet['name'], + configlet['config']) else: operation = 'checked' resp = 'checked' except Exception as error: errorMessage = str(error).split(':')[-1] message = "Configlet {} cannot be {}: {}".format( - cl['name'], operation, errorMessage) + cl['name'], operation, errorMessage) if to_delete: deleted.append({configlet['name']: message}) elif to_update: @@ -1039,7 +1037,7 @@ class AristaSdnConnector(SdnConnectorBase): else: if "error" in str(resp).lower(): message = "Configlet {} cannot be deleted: {}".format( - cl['name'], resp['errorMessage']) + cl['name'], resp['errorMessage']) if to_delete: deleted.append({configlet['name']: message}) elif to_update: @@ -1073,7 +1071,7 @@ class AristaSdnConnector(SdnConnectorBase): if len(configlet) > 0: configlet['devices'] = [] applied_devices = self.client.api.get_applied_devices( - configlet['name']) + configlet['name']) for device in applied_devices['data']: configlet['devices'].append(device['hostName']) @@ -1113,7 +1111,7 @@ class AristaSdnConnector(SdnConnectorBase): http_code=500) self.__get_Connection() - if conn_info == None: + if conn_info is None: raise SdnConnectorError(message='No connection information for service UUID {}'.format(service_uuid), http_code=500) c_info = None @@ -1246,7 +1244,7 @@ class AristaSdnConnector(SdnConnectorBase): raise SdnConnectorError(message='Unable to perform operation, missing or empty connection information', http_code=500) - if connection_points == None: + if connection_points is None: return None self.__get_Connection() @@ -1261,10 +1259,10 @@ class AristaSdnConnector(SdnConnectorBase): kwargs=kwargs) s_uid, s_connInf = self.__processConnection( - service_uuid, - service_type, - connection_points, - kwargs) + service_uuid, + service_type, + connection_points, + kwargs) self.logger.info("Service with uuid {} configuration updated". format(s_uid)) return s_connInf @@ -1281,8 +1279,8 @@ class AristaSdnConnector(SdnConnectorBase): # TODO check if there are pending task, and cancel them before restoring self.__updateConnection(cls_currentPerSw) except Exception as e: - self.logger.error("Unable to restore configuration in service {} after an error in the configuration updated: {}". - format(service_uuid, str(e))) + self.logger.error("Unable to restore configuration in service {} after an error in the configuration" + " updated: {}".format(service_uuid, str(e))) if self.raiseException: raise ex raise SdnConnectorError(message=str(ex), @@ -1442,7 +1440,7 @@ class AristaSdnConnector(SdnConnectorBase): invoking the version retrival as test """ try: - if self.client == None: + if self.client is None: self.client = self.__connect() self.client.api.get_cvp_info() except (CvpSessionLogOutError, RequestException) as e: @@ -1569,8 +1567,9 @@ class AristaSdnConnector(SdnConnectorBase): break if found: break - if peer == None: - self.logger.error('No Peer device found for device {} with MLAG address {}'.format(device_id, mlagSystemId)) + if peer is None: + self.logger.error('No Peer device found for device {} with MLAG address {}'.format(device_id, + mlagSystemId)) else: self.logger.debug('Peer MLAG for device {} - value {}'.format(device_id, peer)) return peer @@ -1612,7 +1611,7 @@ class AristaSdnConnector(SdnConnectorBase): return True def delete_keys_from_dict(self, dict_del, lst_keys): - if dict_del == None: + if dict_del is None: return dict_del dict_copy = {k: v for k, v in dict_del.items() if k not in lst_keys} for k, v in dict_copy.items(): diff --git a/RO-SDN-arista_cloudvision/tox.ini b/RO-SDN-arista_cloudvision/tox.ini index d534123c..564d2919 100644 --- a/RO-SDN-arista_cloudvision/tox.ini +++ b/RO-SDN-arista_cloudvision/tox.ini @@ -15,7 +15,7 @@ [tox] envlist = flake8 -toxworkdir={toxinidir}/.tox +toxworkdir={toxinidir}/../.tox [testenv] basepython = python3 diff --git a/RO-SDN-dpb/osm_rosdn_dpb/wimconn_dpb.py b/RO-SDN-dpb/osm_rosdn_dpb/wimconn_dpb.py index 27f739f3..423ceff3 100755 --- a/RO-SDN-dpb/osm_rosdn_dpb/wimconn_dpb.py +++ b/RO-SDN-dpb/osm_rosdn_dpb/wimconn_dpb.py @@ -30,7 +30,7 @@ import logging import paramiko import requests import struct -import sys +# import sys from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError @@ -66,7 +66,7 @@ class DpbSshInterface(): ro is restarted """ self._check_connection() - if data == None: + if data is None: data = {} url_ext_info = url_params.split('/') for i in range(0, len(url_ext_info)): @@ -321,9 +321,9 @@ class DpbConnector(SdnConnectorBase): try: self.__post(self.__ACTIONS_MAP.get("RELEASE"), "/service/"+service_uuid, get_response=False) - except: + except Exception as e: raise SdnConnectorError( - "Could not delete service id:{} (could be an issue with the DPB)".format(service_uuid), 500) + "Could not delete service id:{} (could be an issue with the DPB): {}".format(service_uuid, e), 500) self.logger.debug( "Deleted connectivity service id:{}".format(service_uuid)) return None @@ -371,6 +371,6 @@ class DpbConnector(SdnConnectorBase): return conn_info def __check_service(self, serv_type, points, kwargs): - if not serv_type in self.__SUPPORTED_SERV_TYPES: + if serv_type not in self.__SUPPORTED_SERV_TYPES: raise SdnConnectorError("Service type no supported", 400) # Future: BW Checks here diff --git a/RO-SDN-dpb/tox.ini b/RO-SDN-dpb/tox.ini index 1faf015a..bae20e2e 100644 --- a/RO-SDN-dpb/tox.ini +++ b/RO-SDN-dpb/tox.ini @@ -14,8 +14,8 @@ ## [tox] -envlist = py3 -toxworkdir={homedir}/.tox +envlist = flake8 +toxworkdir={toxinidir}/../.tox [testenv] basepython = python3 diff --git a/RO-SDN-dynpac/requirements.txt b/RO-SDN-dynpac/requirements.txt index 37366789..0a169e4e 100644 --- a/RO-SDN-dynpac/requirements.txt +++ b/RO-SDN-dynpac/requirements.txt @@ -14,4 +14,4 @@ ## requests -git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin +osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin diff --git a/RO-SDN-dynpac/tox.ini b/RO-SDN-dynpac/tox.ini index a1e866ab..2bd23ea3 100644 --- a/RO-SDN-dynpac/tox.ini +++ b/RO-SDN-dynpac/tox.ini @@ -14,14 +14,13 @@ ## [tox] -envlist = py3 -toxworkdir={homedir}/.tox +envlist = flake8 +toxworkdir={toxinidir}/../.tox [testenv] +usedevelop = True basepython = python3 install_command = python3 -m pip install -r requirements.txt -U {opts} {packages} -# deps = -r{toxinidir}/test-requirements.txt -commands=python3 -m unittest discover -v [testenv:flake8] basepython = python3 diff --git a/RO-SDN-floodlight_openflow/tox.ini b/RO-SDN-floodlight_openflow/tox.ini index e95d02e2..bee1be4a 100644 --- a/RO-SDN-floodlight_openflow/tox.ini +++ b/RO-SDN-floodlight_openflow/tox.ini @@ -15,7 +15,7 @@ [tox] envlist = flake8 -toxworkdir={homedir}/.tox +toxworkdir={toxinidir}/../.tox [testenv] basepython = python3 diff --git a/RO-SDN-ietfl2vpn/tox.ini b/RO-SDN-ietfl2vpn/tox.ini index 040210ce..23c8f530 100644 --- a/RO-SDN-ietfl2vpn/tox.ini +++ b/RO-SDN-ietfl2vpn/tox.ini @@ -14,8 +14,8 @@ ## [tox] -envlist = py3 -toxworkdir={homedir}/.tox +envlist = flake8 +toxworkdir={toxinidir}/../.tox [testenv] basepython = python3 diff --git a/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_api.py b/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_api.py index 6ef91af2..7822cf73 100644 --- a/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_api.py +++ b/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_api.py @@ -148,10 +148,10 @@ class UnderlayApi: # Aux methods to avoid code duplication of name conventions def get_vpg_name(self, switch_id, switch_port): - return "{}_{}".format(switch_id, switch_port).replace(":","_") + return "{}_{}".format(switch_id, switch_port).replace(":", "_") def get_vmi_name(self, switch_id, switch_port, vlan): - return "{}_{}-{}".format(switch_id, switch_port, vlan).replace(":","_") + return "{}_{}-{}".format(switch_id, switch_port, vlan).replace(":", "_") # Virtual network operations diff --git a/RO-SDN-juniper_contrail/tox.ini b/RO-SDN-juniper_contrail/tox.ini index dfa3fde4..4ecb427d 100644 --- a/RO-SDN-juniper_contrail/tox.ini +++ b/RO-SDN-juniper_contrail/tox.ini @@ -14,8 +14,8 @@ ## [tox] -envlist = py3 -toxworkdir={homedir}/.tox +envlist = flake8 +toxworkdir={toxinidir}/../.tox [testenv] basepython = python3 diff --git a/RO-SDN-odl_openflow/osm_rosdn_odlof/odl_of.py b/RO-SDN-odl_openflow/osm_rosdn_odlof/odl_of.py index 329cb662..ed1bca4b 100644 --- a/RO-SDN-odl_openflow/osm_rosdn_odlof/odl_of.py +++ b/RO-SDN-odl_openflow/osm_rosdn_odlof/odl_of.py @@ -32,12 +32,12 @@ import json import requests import base64 import logging -from osm_ro_plugin.openflow_conn import OpenflowConn, OpenflowConnException, OpenflowConnConnectionException, \ - OpenflowConnUnexpectedResponse, OpenflowConnAuthException, OpenflowConnNotFoundException, \ - OpenflowConnConflictException, OpenflowConnNotSupportedException, OpenflowConnNotImplemented +from osm_ro_plugin.openflow_conn import OpenflowConn, OpenflowConnConnectionException, OpenflowConnUnexpectedResponse +# OpenflowConnException, OpenflowConnAuthException, OpenflowConnNotFoundException, +# OpenflowConnConflictException, OpenflowConnNotSupportedException, OpenflowConnNotImplemented __author__ = "Pablo Montes, Alfonso Tierno" -__date__ = "$28-oct-2014 12:07:15$" +__date__ = "$28-oct-2014 12:07:15$" class OfConnOdl(OpenflowConn): @@ -77,7 +77,7 @@ class OfConnOdl(OpenflowConn): self.dpid = str(params["of_dpid"]) self.id = 'openflow:'+str(int(self.dpid.replace(':', ''), 16)) if params and params.get("of_user"): - of_password=params.get("of_password", "") + of_password = params.get("of_password", "") self.auth = base64.b64encode(bytes(params["of_user"] + ":" + of_password, "utf-8")) self.auth = self.auth.decode() self.headers['authorization'] = 'Basic ' + self.auth @@ -90,7 +90,7 @@ class OfConnOdl(OpenflowConn): """ Obtain a a list of switches or DPID detected by this controller :return: list length, and a list where each element a tuple pair (DPID, IP address) - Raise an OpenflowconnConnectionException exception if fails with text_error + Raise an OpenflowConnConnectionException exception if fails with text_error """ try: of_response = requests.get(self.url + "restconf/operational/opendaylight-inventory:nodes", @@ -98,27 +98,27 @@ class OfConnOdl(OpenflowConn): error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) if of_response.status_code != 200: self.logger.warning("get_of_switches " + error_text) - raise OpenflowconnUnexpectedResponse("Error get_of_switches " + error_text) + raise OpenflowConnUnexpectedResponse("Error get_of_switches " + error_text) self.logger.debug("get_of_switches " + error_text) info = of_response.json() if not isinstance(info, dict): self.logger.error("get_of_switches. Unexpected response, not a dict: %s", str(info)) - raise OpenflowconnUnexpectedResponse("Unexpected response, not a dict. Wrong version?") + raise OpenflowConnUnexpectedResponse("Unexpected response, not a dict. Wrong version?") nodes = info.get('nodes') if type(nodes) is not dict: self.logger.error("get_of_switches. Unexpected response at 'nodes', not found or not a dict: %s", str(type(info))) - raise OpenflowconnUnexpectedResponse("Unexpected response at 'nodes', not found or not a dict." + raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes', not found or not a dict." " Wrong version?") node_list = nodes.get('node') if type(node_list) is not list: self.logger.error("get_of_switches. Unexpected response, at 'nodes':'node', " "not found or not a list: %s", str(type(node_list))) - raise OpenflowconnUnexpectedResponse("Unexpected response, at 'nodes':'node', not found " + raise OpenflowConnUnexpectedResponse("Unexpected response, at 'nodes':'node', not found " "or not a list. Wrong version?") switch_list = [] @@ -127,7 +127,7 @@ class OfConnOdl(OpenflowConn): if node_id is None: self.logger.error("get_of_switches. Unexpected response at 'nodes':'node'[]:'id', not found: %s", str(node)) - raise OpenflowconnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:'id', not found. " + raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:'id', not found. " "Wrong version?") if node_id == 'controller-config': @@ -137,28 +137,29 @@ class OfConnOdl(OpenflowConn): if node_ip_address is None: self.logger.error("get_of_switches. Unexpected response at 'nodes':'node'[]:'flow-node-inventory:" "ip-address', not found: %s", str(node)) - raise OpenflowconnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:" + raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:" "'flow-node-inventory:ip-address', not found. Wrong version?") node_id_hex = hex(int(node_id.split(':')[1])).split('x')[1].zfill(16) - switch_list.append((':'.join(a+b for a,b in zip(node_id_hex[::2], node_id_hex[1::2])), node_ip_address)) + switch_list.append((':'.join(a+b for a, b in zip(node_id_hex[::2], node_id_hex[1::2])), + node_ip_address)) return switch_list except requests.exceptions.RequestException as e: error_text = type(e).__name__ + ": " + str(e) self.logger.error("get_of_switches " + error_text) - raise OpenflowconnConnectionException(error_text) + raise OpenflowConnConnectionException(error_text) except ValueError as e: # ValueError in the case that JSON can not be decoded error_text = type(e).__name__ + ": " + str(e) self.logger.error("get_of_switches " + error_text) - raise OpenflowconnUnexpectedResponse(error_text) + raise OpenflowConnUnexpectedResponse(error_text) def obtain_port_correspondence(self): """ Obtain the correspondence between physical and openflow port names :return: dictionary: with physical name as key, openflow name as value, - Raise a OpenflowconnConnectionException expection in case of failure + Raise a OpenflowConnConnectionException expection in case of failure """ try: of_response = requests.get(self.url + "restconf/operational/opendaylight-inventory:nodes", @@ -166,26 +167,26 @@ class OfConnOdl(OpenflowConn): error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) if of_response.status_code != 200: self.logger.warning("obtain_port_correspondence " + error_text) - raise OpenflowconnUnexpectedResponse(error_text) + raise OpenflowConnUnexpectedResponse(error_text) self.logger.debug("obtain_port_correspondence " + error_text) info = of_response.json() if not isinstance(info, dict): self.logger.error("obtain_port_correspondence. Unexpected response not a dict: %s", str(info)) - raise OpenflowconnUnexpectedResponse("Unexpected openflow response, not a dict. Wrong version?") + raise OpenflowConnUnexpectedResponse("Unexpected openflow response, not a dict. Wrong version?") nodes = info.get('nodes') if not isinstance(nodes, dict): self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes', " "not found or not a dict: %s", str(type(nodes))) - raise OpenflowconnUnexpectedResponse("Unexpected response at 'nodes',not found or not a dict. " + raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes',not found or not a dict. " "Wrong version?") node_list = nodes.get('node') if not isinstance(node_list, list): self.logger.error("obtain_port_correspondence. Unexpected response, at 'nodes':'node', " "not found or not a list: %s", str(type(node_list))) - raise OpenflowconnUnexpectedResponse("Unexpected response, at 'nodes':'node', not found or not a list." + raise OpenflowConnUnexpectedResponse("Unexpected response, at 'nodes':'node', not found or not a list." " Wrong version?") for node in node_list: @@ -193,7 +194,7 @@ class OfConnOdl(OpenflowConn): if node_id is None: self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:'id', " "not found: %s", str(node)) - raise OpenflowconnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:'id', not found. " + raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:'id', not found. " "Wrong version?") if node_id == 'controller-config': @@ -209,7 +210,7 @@ class OfConnOdl(OpenflowConn): if not isinstance(node_connector_list, list): self.logger.error("obtain_port_correspondence. Unexpected response at " "'nodes':'node'[]:'node-connector', not found or not a list: %s", str(node)) - raise OpenflowconnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:'node-connector', " + raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:'node-connector', " "not found or not a list. Wrong version?") for node_connector in node_connector_list: @@ -220,7 +221,7 @@ class OfConnOdl(OpenflowConn): if node_ip_address is None: self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:" "'flow-node-inventory:ip-address', not found: %s", str(node)) - raise OpenflowconnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:" + raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:" "'flow-node-inventory:ip-address', not found. Wrong version?") # If we found the appropriate dpid no need to continue in the for loop @@ -231,12 +232,12 @@ class OfConnOdl(OpenflowConn): except requests.exceptions.RequestException as e: error_text = type(e).__name__ + ": " + str(e) self.logger.error("obtain_port_correspondence " + error_text) - raise OpenflowconnConnectionException(error_text) + raise OpenflowConnConnectionException(error_text) except ValueError as e: # ValueError in the case that JSON can not be decoded error_text = type(e).__name__ + ": " + str(e) self.logger.error("obtain_port_correspondence " + error_text) - raise OpenflowconnUnexpectedResponse(error_text) + raise OpenflowConnUnexpectedResponse(error_text) def get_of_rules(self, translate_of_ports=True): """ @@ -252,7 +253,7 @@ class OfConnOdl(OpenflowConn): (vlan, None/int): for stripping/setting a vlan tag (out, port): send to this port switch: DPID, all - Raise a OpenflowconnConnectionException exception in case of failure + Raise a OpenflowConnConnectionException exception in case of failure """ @@ -271,7 +272,7 @@ class OfConnOdl(OpenflowConn): elif of_response.status_code != 200: self.logger.warning("get_of_rules " + error_text) - raise OpenflowconnUnexpectedResponse(error_text) + raise OpenflowConnUnexpectedResponse(error_text) self.logger.debug("get_of_rules " + error_text) @@ -279,13 +280,13 @@ class OfConnOdl(OpenflowConn): if not isinstance(info, dict): self.logger.error("get_of_rules. Unexpected response not a dict: %s", str(info)) - raise OpenflowconnUnexpectedResponse("Unexpected openflow response, not a dict. Wrong version?") + raise OpenflowConnUnexpectedResponse("Unexpected openflow response, not a dict. Wrong version?") table = info.get('flow-node-inventory:table') if not isinstance(table, list): self.logger.error("get_of_rules. Unexpected response at 'flow-node-inventory:table', " "not a list: %s", str(type(table))) - raise OpenflowconnUnexpectedResponse("Unexpected response at 'flow-node-inventory:table', not a list. " + raise OpenflowConnUnexpectedResponse("Unexpected response at 'flow-node-inventory:table', not a list. " "Wrong version?") flow_list = table[0].get('flow') @@ -295,7 +296,7 @@ class OfConnOdl(OpenflowConn): if not isinstance(flow_list, list): self.logger.error("get_of_rules. Unexpected response at 'flow-node-inventory:table'[0]:'flow', not a " "list: %s", str(type(flow_list))) - raise OpenflowconnUnexpectedResponse("Unexpected response at 'flow-node-inventory:table'[0]:'flow', " + raise OpenflowConnUnexpectedResponse("Unexpected response at 'flow-node-inventory:table'[0]:'flow', " "not a list. Wrong version?") # TODO translate ports according to translate_of_ports parameter @@ -303,10 +304,10 @@ class OfConnOdl(OpenflowConn): rules = [] # Response list for flow in flow_list: if not ('id' in flow and 'match' in flow and 'instructions' in flow and - 'instruction' in flow['instructions'] and - 'apply-actions' in flow['instructions']['instruction'][0] and - 'action' in flow['instructions']['instruction'][0]['apply-actions']): - raise OpenflowconnUnexpectedResponse("unexpected openflow response, one or more elements are " + 'instruction' in flow['instructions'] and + 'apply-actions' in flow['instructions']['instruction'][0] and + 'action' in flow['instructions']['instruction'][0]['apply-actions']): + raise OpenflowConnUnexpectedResponse("unexpected openflow response, one or more elements are " "missing. Wrong version?") flow['instructions']['instruction'][0]['apply-actions']['action'] @@ -319,7 +320,7 @@ class OfConnOdl(OpenflowConn): if 'in-port' in flow['match']: in_port = flow['match']['in-port'] if in_port not in self.ofi2pp: - raise OpenflowconnUnexpectedResponse("Error: Ingress port {} is not in switch port list". + raise OpenflowConnUnexpectedResponse("Error: Ingress port {} is not in switch port list". format(in_port)) if translate_of_ports: @@ -330,7 +331,7 @@ class OfConnOdl(OpenflowConn): if 'vlan-match' in flow['match'] and 'vlan-id' in flow['match']['vlan-match'] and \ 'vlan-id' in flow['match']['vlan-match']['vlan-id'] and \ 'vlan-id-present' in flow['match']['vlan-match']['vlan-id'] and \ - flow['match']['vlan-match']['vlan-id']['vlan-id-present'] == True: + flow['match']['vlan-match']['vlan-id']['vlan-id-present'] is True: rule['vlan_id'] = flow['match']['vlan-match']['vlan-id']['vlan-id'] if 'ethernet-match' in flow['match'] and 'ethernet-destination' in flow['match']['ethernet-match'] \ @@ -348,12 +349,12 @@ class OfConnOdl(OpenflowConn): for instruction in instructions: if 'output-action' in instruction: if 'output-node-connector' not in instruction['output-action']: - raise OpenflowconnUnexpectedResponse("unexpected openflow response, one or more elementa " + raise OpenflowConnUnexpectedResponse("unexpected openflow response, one or more elementa " "are missing. Wrong version?") out_port = instruction['output-action']['output-node-connector'] if out_port not in self.ofi2pp: - raise OpenflowconnUnexpectedResponse("Error: Output port {} is not in switch port list". + raise OpenflowConnUnexpectedResponse("Error: Output port {} is not in switch port list". format(out_port)) if translate_of_ports: @@ -368,7 +369,7 @@ class OfConnOdl(OpenflowConn): if not ('vlan-match' in instruction['set-field'] and 'vlan-id' in instruction['set-field']['vlan-match'] and 'vlan-id' in instruction['set-field']['vlan-match']['vlan-id']): - raise OpenflowconnUnexpectedResponse("unexpected openflow response, one or more elements " + raise OpenflowConnUnexpectedResponse("unexpected openflow response, one or more elements " "are missing. Wrong version?") actions[instruction['order']] = ('vlan', @@ -383,18 +384,18 @@ class OfConnOdl(OpenflowConn): except requests.exceptions.RequestException as e: error_text = type(e).__name__ + ": " + str(e) self.logger.error("get_of_rules " + error_text) - raise OpenflowconnConnectionException(error_text) + raise OpenflowConnConnectionException(error_text) except ValueError as e: # ValueError in the case that JSON can not be decoded error_text = type(e).__name__ + ": " + str(e) self.logger.error("get_of_rules " + error_text) - raise OpenflowconnUnexpectedResponse(error_text) + raise OpenflowConnUnexpectedResponse(error_text) def del_flow(self, flow_name): """ Delete an existing rule :param flow_name: flow_name, this is the rule name - :return: Raise a OpenflowconnConnectionException expection in case of failure + :return: Raise a OpenflowConnConnectionException expection in case of failure """ try: @@ -403,14 +404,14 @@ class OfConnOdl(OpenflowConn): error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) if of_response.status_code != 200: self.logger.warning("del_flow " + error_text) - raise OpenflowconnUnexpectedResponse(error_text) + raise OpenflowConnUnexpectedResponse(error_text) self.logger.debug("del_flow OK " + error_text) return None except requests.exceptions.RequestException as e: # raise an exception in case of contection error error_text = type(e).__name__ + ": " + str(e) self.logger.error("del_flow " + error_text) - raise OpenflowconnConnectionException(error_text) + raise OpenflowConnConnectionException(error_text) def new_flow(self, data): """ @@ -424,7 +425,7 @@ class OfConnOdl(OpenflowConn): actions: list of actions, composed by a pair tuples with these posibilities: ('vlan', None/int): for stripping/setting a vlan tag ('out', port): send to this port - :return: Raise a OpenflowconnConnectionException exception in case of failure + :return: Raise a OpenflowConnConnectionException exception in case of failure """ try: @@ -446,7 +447,7 @@ class OfConnOdl(OpenflowConn): if not data['ingress_port'] in self.pp2ofi: error_text = 'Error. Port ' + data['ingress_port'] + ' is not present in the switch' self.logger.warning("new_flow " + error_text) - raise OpenflowconnUnexpectedResponse(error_text) + raise OpenflowConnUnexpectedResponse(error_text) flow['match']['in-port'] = self.pp2ofi[data['ingress_port']] if data.get('dst_mac'): flow['match']['ethernet-match'] = { @@ -487,13 +488,13 @@ class OfConnOdl(OpenflowConn): new_action['output-action'] = {} if not action[1] in self.pp2ofi: error_msg = 'Port ' + action[1] + ' is not present in the switch' - raise OpenflowconnUnexpectedResponse(error_msg) + raise OpenflowConnUnexpectedResponse(error_msg) new_action['output-action']['output-node-connector'] = self.pp2ofi[action[1]] else: - error_msg = "Unknown item '%s' in action list".format(action[0]) + error_msg = "Unknown item '{}' in action list".format(action[0]) self.logger.error("new_flow " + error_msg) - raise OpenflowconnUnexpectedResponse(error_msg) + raise OpenflowConnUnexpectedResponse(error_msg) actions.append(new_action) order += 1 @@ -504,7 +505,7 @@ class OfConnOdl(OpenflowConn): error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) if of_response.status_code != 200: self.logger.warning("new_flow " + error_text) - raise OpenflowconnUnexpectedResponse(error_text) + raise OpenflowConnUnexpectedResponse(error_text) self.logger.debug("new_flow OK " + error_text) return None @@ -512,12 +513,12 @@ class OfConnOdl(OpenflowConn): # raise an exception in case of contection error error_text = type(e).__name__ + ": " + str(e) self.logger.error("new_flow " + error_text) - raise OpenflowconnConnectionException(error_text) + raise OpenflowConnConnectionException(error_text) def clear_all_flows(self): """ Delete all existing rules - :return: Raise a OpenflowconnConnectionException expection in case of failure + :return: Raise a OpenflowConnConnectionException expection in case of failure """ try: of_response = requests.delete(self.url + "restconf/config/opendaylight-inventory:nodes/node/" + self.id + @@ -525,10 +526,10 @@ class OfConnOdl(OpenflowConn): error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) if of_response.status_code != 200 and of_response.status_code != 404: # HTTP_Not_Found self.logger.warning("clear_all_flows " + error_text) - raise OpenflowconnUnexpectedResponse(error_text) + raise OpenflowConnUnexpectedResponse(error_text) self.logger.debug("clear_all_flows OK " + error_text) return None except requests.exceptions.RequestException as e: error_text = type(e).__name__ + ": " + str(e) self.logger.error("clear_all_flows " + error_text) - raise OpenflowconnConnectionException(error_text) + raise OpenflowConnConnectionException(error_text) diff --git a/RO-SDN-odl_openflow/tox.ini b/RO-SDN-odl_openflow/tox.ini index 77b9ba07..68ba259d 100644 --- a/RO-SDN-odl_openflow/tox.ini +++ b/RO-SDN-odl_openflow/tox.ini @@ -15,7 +15,7 @@ [tox] envlist = flake8 -toxworkdir={toxinidir}/.tox +toxworkdir={toxinidir}/../.tox [testenv] basepython = python3 diff --git a/RO-SDN-onos_openflow/osm_rosdn_onosof/onos_of.py b/RO-SDN-onos_openflow/osm_rosdn_onosof/onos_of.py index fb09327c..ef32be27 100644 --- a/RO-SDN-onos_openflow/osm_rosdn_onosof/onos_of.py +++ b/RO-SDN-onos_openflow/osm_rosdn_onosof/onos_of.py @@ -22,23 +22,22 @@ # contact with: alaitz.mendiola@ehu.eus or alaitz.mendiola@gmail.com ## -''' +""" ImplementS the pluging for the Open Network Operating System (ONOS) openflow controller. It creates the class OF_conn to create dataplane connections with static rules based on packet destination MAC address -''' - -__author__="Alaitz Mendiola" -__date__ ="$22-nov-2016$" - +""" import json import requests import base64 import logging -from osm_ro_plugin.openflow_conn import OpenflowConn, OpenflowConnException, OpenflowConnConnectionException, \ - OpenflowConnUnexpectedResponse, OpenflowConnAuthException, OpenflowConnNotFoundException, \ - OpenflowConnConflictException, OpenflowConnNotSupportedException, OpenflowConnNotImplemented +from osm_ro_plugin.openflow_conn import OpenflowConn, OpenflowConnConnectionException, OpenflowConnUnexpectedResponse +# OpenflowConnException, OpenflowConnAuthException, OpenflowConnNotFoundException, \ +# OpenflowConnConflictException, OpenflowConnNotSupportedException, OpenflowConnNotImplemented + +__author__ = "Alaitz Mendiola" +__date__ = "$22-nov-2016$" class OfConnOnos(OpenflowConn): @@ -71,24 +70,24 @@ class OfConnOnos(OpenflowConn): # internal variables self.name = "onosof" - self.headers = {'content-type':'application/json','accept':'application/json',} + self.headers = {'content-type': 'application/json', 'accept': 'application/json'} - self.auth="None" - self.pp2ofi={} # From Physical Port to OpenFlow Index - self.ofi2pp={} # From OpenFlow Index to Physical Port + self.auth = "None" + self.pp2ofi = {} # From Physical Port to OpenFlow Index + self.ofi2pp = {} # From OpenFlow Index to Physical Port self.dpid = str(params["of_dpid"]) self.id = 'of:'+str(self.dpid.replace(':', '')) # TODO This may not be straightforward if params.get("of_user"): - of_password=params.get("of_password", "") + of_password = params.get("of_password", "") self.auth = base64.b64encode(bytes(params["of_user"] + ":" + of_password, "utf-8")) self.auth = self.auth.decode() self.headers['authorization'] = 'Basic ' + self.auth self.logger = logging.getLogger('openmano.sdnconn.onosof') - #self.logger.setLevel( getattr(logging, params.get("of_debug", "ERROR")) ) + # self.logger.setLevel( getattr(logging, params.get("of_debug", "ERROR")) ) self.logger.debug("onosof plugin initialized") self.ip_address = None @@ -120,7 +119,7 @@ class OfConnOnos(OpenflowConn): "get_of_switches. Unexpected response, at 'devices', not found or not a list: %s", str(type(node_list))) raise OpenflowConnUnexpectedResponse("Unexpected response, at 'devices', not found " - "or not a list. Wrong version?") + "or not a list. Wrong version?") switch_list = [] for node in node_list: @@ -129,7 +128,7 @@ class OfConnOnos(OpenflowConn): self.logger.error("get_of_switches. Unexpected response at 'device':'id', not found: %s", str(node)) raise OpenflowConnUnexpectedResponse("Unexpected response at 'device':'id', " - "not found . Wrong version?") + "not found . Wrong version?") node_ip_address = node.get('annotations').get('managementAddress') if node_ip_address is None: @@ -178,7 +177,7 @@ class OfConnOnos(OpenflowConn): "obtain_port_correspondence. Unexpected response at 'ports', not found or not a list: %s", str(node_connector_list)) raise OpenflowConnUnexpectedResponse("Unexpected response at 'ports', not found or not " - "a list. Wrong version?") + "a list. Wrong version?") for node_connector in node_connector_list: if node_connector['port'] != "local": @@ -191,7 +190,7 @@ class OfConnOnos(OpenflowConn): "obtain_port_correspondence. Unexpected response at 'managementAddress', not found: %s", str(self.id)) raise OpenflowConnUnexpectedResponse("Unexpected response at 'managementAddress', " - "not found. Wrong version?") + "not found. Wrong version?") self.ip_address = node_ip_address # print self.name, ": obtain_port_correspondence ports:", self.pp2ofi @@ -247,7 +246,7 @@ class OfConnOnos(OpenflowConn): if type(info) != dict: self.logger.error("get_of_rules. Unexpected response, not a dict: %s", str(info)) raise OpenflowConnUnexpectedResponse("Unexpected openflow response, not a dict. " - "Wrong version?") + "Wrong version?") flow_list = info.get('flows') @@ -258,15 +257,15 @@ class OfConnOnos(OpenflowConn): "get_of_rules. Unexpected response at 'flows', not a list: %s", str(type(flow_list))) raise OpenflowConnUnexpectedResponse("Unexpected response at 'flows', not a list. " - "Wrong version?") + "Wrong version?") rules = [] # Response list for flow in flow_list: - if not ('id' in flow and 'selector' in flow and 'treatment' in flow and \ - 'instructions' in flow['treatment'] and 'criteria' in \ - flow['selector']): + if not ('id' in flow and 'selector' in flow and 'treatment' in flow and + 'instructions' in flow['treatment'] and 'criteria' in + flow['selector']): raise OpenflowConnUnexpectedResponse("unexpected openflow response, one or more " - "elements are missing. Wrong version?") + "elements are missing. Wrong version?") rule = dict() rule['switch'] = self.dpid @@ -277,9 +276,9 @@ class OfConnOnos(OpenflowConn): if criteria['type'] == 'IN_PORT': in_port = str(criteria['port']) if in_port != "CONTROLLER": - if not in_port in self.ofi2pp: + if in_port not in self.ofi2pp: raise OpenflowConnUnexpectedResponse("Error: Ingress port {} is not " - "in switch port list".format(in_port)) + "in switch port list".format(in_port)) if translate_of_ports: in_port = self.ofi2pp[in_port] rule['ingress_port'] = in_port @@ -295,19 +294,19 @@ class OfConnOnos(OpenflowConn): if instruction['type'] == "OUTPUT": out_port = str(instruction['port']) if out_port != "CONTROLLER": - if not out_port in self.ofi2pp: + if out_port not in self.ofi2pp: raise OpenflowConnUnexpectedResponse("Error: Output port {} is not in " - "switch port list".format(out_port)) + "switch port list".format(out_port)) if translate_of_ports: out_port = self.ofi2pp[out_port] - actions.append( ('out', out_port) ) + actions.append(('out', out_port)) if instruction['type'] == "L2MODIFICATION" and instruction['subtype'] == "VLAN_POP": - actions.append( ('vlan', 'None') ) + actions.append(('vlan', 'None')) if instruction['type'] == "L2MODIFICATION" and instruction['subtype'] == "VLAN_ID": - actions.append( ('vlan', instruction['vlanId']) ) + actions.append(('vlan', instruction['vlanId'])) rule['actions'] = actions rules.append(rule) @@ -371,12 +370,12 @@ class OfConnOnos(OpenflowConn): # Build the dictionary with the flow rule information for ONOS flow = dict() - #flow['id'] = data['name'] + # flow['id'] = data['name'] flow['tableId'] = 0 flow['priority'] = data.get('priority') flow['timeout'] = 0 flow['isPermanent'] = "true" - flow['appId'] = 10 # FIXME We should create an appId for OSM + flow['appId'] = 10 # FIXME We should create an appId for OSM flow['selector'] = dict() flow['selector']['criteria'] = list() @@ -410,9 +409,9 @@ class OfConnOnos(OpenflowConn): for action in data['actions']: new_action = dict() - if action[0] == "vlan": + if action[0] == "vlan": new_action['type'] = "L2MODIFICATION" - if action[1] == None: + if action[1] is None: new_action['subtype'] = "VLAN_POP" else: new_action['subtype'] = "VLAN_ID" @@ -420,7 +419,7 @@ class OfConnOnos(OpenflowConn): elif action[0] == 'out': new_action['type'] = "OUTPUT" if not action[1] in self.pp2ofi: - error_msj = 'Port '+ action[1] + ' is not present in the switch' + error_msj = 'Port ' + action[1] + ' is not present in the switch' raise OpenflowConnUnexpectedResponse(error_msj) new_action['port'] = self.pp2ofi[action[1]] else: @@ -433,7 +432,7 @@ class OfConnOnos(OpenflowConn): self.headers['content-type'] = 'application/json' path = self.url + "flows/" + self.id self.logger.debug("new_flow post: {}".format(flow)) - of_response = requests.post(path, headers=self.headers, data=json.dumps(flow) ) + of_response = requests.post(path, headers=self.headers, data=json.dumps(flow)) error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) if of_response.status_code != 201: diff --git a/RO-SDN-onos_openflow/tox.ini b/RO-SDN-onos_openflow/tox.ini index a9f86882..61778145 100644 --- a/RO-SDN-onos_openflow/tox.ini +++ b/RO-SDN-onos_openflow/tox.ini @@ -15,7 +15,7 @@ [tox] envlist = flake8 -toxworkdir={toxinidir}/.tox +toxworkdir={toxinidir}/../.tox [testenv] basepython = python3 diff --git a/RO-SDN-onos_vpls/osm_rosdn_onos_vpls/sdn_assist_onos_vpls.py b/RO-SDN-onos_vpls/osm_rosdn_onos_vpls/sdn_assist_onos_vpls.py index 200e4b08..ced63d28 100644 --- a/RO-SDN-onos_vpls/osm_rosdn_onos_vpls/sdn_assist_onos_vpls.py +++ b/RO-SDN-onos_vpls/osm_rosdn_onos_vpls/sdn_assist_onos_vpls.py @@ -60,8 +60,8 @@ class OnosVpls(SdnConnectorBase): except Exception as e: if onos_config_req: status_code = onos_config_req.status_code - self.logger.exception('Error checking credentials') - raise SdnConnectorError('Error checking credentials', http_code=status_code) + self.logger.exception('Error checking credentials: {}'.format(e)) + raise SdnConnectorError('Error checking credentials: {}'.format(e), http_code=status_code) def get_connectivity_service_status(self, service_uuid, conn_info=None): try: @@ -354,17 +354,17 @@ class OnosVpls(SdnConnectorBase): if port_name in onos_config['ports'] and 'interfaces' in onos_config['ports'][port_name]: for interface in onos_config['ports'][port_name]['interfaces']: if interface['name'] == port['service_endpoint_id']: - #self.logger.debug("interface with same name and port exits") + # self.logger.debug("interface with same name and port exits") # interface already exists TODO ¿check vlan? ¿delete and recreate? # by the moment use and do not touch - #onos_config['ports'][port_name]['interfaces'].remove(interface) + # onos_config['ports'][port_name]['interfaces'].remove(interface) break else: - #self.logger.debug("port with same name exits but not interface") + # self.logger.debug("port with same name exits but not interface") onos_config['ports'][port_name]['interfaces'].append(interface_config) created_item = (port_name, port['service_endpoint_id']) else: - #self.logger.debug("create port and interface") + # self.logger.debug("create port and interface") onos_config['ports'][port_name] = { 'interfaces': [interface_config] } @@ -383,8 +383,8 @@ if __name__ == '__main__': wim = {'wim_url': wim_url} wim_account = {'user': user, 'password': password} onos_vpls = OnosVpls(wim=wim, wim_account=wim_account, logger=logger) - #conn_service = onos_vpls.get_connectivity_service_status("4e1f4c8a-a874-425d-a9b5-955cb77178f8") - #print(conn_service) + # conn_service = onos_vpls.get_connectivity_service_status("4e1f4c8a-a874-425d-a9b5-955cb77178f8") + # print(conn_service) service_type = 'ELAN' conn_point_0 = { "service_endpoint_id": "switch1:ifz1", @@ -405,13 +405,13 @@ if __name__ == '__main__': } } connection_points = [conn_point_0, conn_point_1] - #service_uuid, conn_info = onos_vpls.create_connectivity_service(service_type, connection_points) - #print(service_uuid) - #print(conn_info) + # service_uuid, conn_info = onos_vpls.create_connectivity_service(service_type, connection_points) + # print(service_uuid) + # print(conn_info) - #conn_info = None + # conn_info = None conn_info = {"interfaces": ['switch1:ifz1', 'switch3:ifz1']} - #onos_vpls.delete_connectivity_service("70248a41-11cb-44f3-9039-c41387394a30", conn_info) + # onos_vpls.delete_connectivity_service("70248a41-11cb-44f3-9039-c41387394a30", conn_info) conn_point_0 = { "service_endpoint_id": "switch1:ifz1", @@ -441,8 +441,9 @@ if __name__ == '__main__': } } connection_points_2 = [conn_point_0, conn_point_3] - #conn_info = onos_vpls.edit_connectivity_service("c65d88be-73aa-4933-927d-57ec6bee6b41", conn_info, connection_points_2) - #print(conn_info) + # conn_info = onos_vpls.edit_connectivity_service("c65d88be-73aa-4933-927d-57ec6bee6b41", + # conn_info, connection_points_2) + # print(conn_info) service_status = onos_vpls.get_connectivity_service_status("c65d88be-73aa-4933-927d-57ec6bee6b41", conn_info) print("service status") diff --git a/RO-SDN-onos_vpls/tox.ini b/RO-SDN-onos_vpls/tox.ini index e72bc162..a7b17d06 100644 --- a/RO-SDN-onos_vpls/tox.ini +++ b/RO-SDN-onos_vpls/tox.ini @@ -14,8 +14,8 @@ ## [tox] -envlist = py3 -toxworkdir={homedir}/.tox +envlist = flake8 +toxworkdir={toxinidir}/../.tox [testenv] basepython = python3 diff --git a/RO-VIM-aws/osm_rovim_aws/vimconn_aws.py b/RO-VIM-aws/osm_rovim_aws/vimconn_aws.py index fa245684..ee024d43 100644 --- a/RO-VIM-aws/osm_rovim_aws/vimconn_aws.py +++ b/RO-VIM-aws/osm_rovim_aws/vimconn_aws.py @@ -145,7 +145,8 @@ class vimconnector(vimconn.VimConnector): aws_secret_access_key=self.a_creds['aws_secret_access_key']) self.conn_vpc = boto.vpc.connect_to_region(self.region, aws_access_key_id=self.a_creds['aws_access_key_id'], aws_secret_access_key=self.a_creds['aws_secret_access_key']) - # client = boto3.client("sts", aws_access_key_id=self.a_creds['aws_access_key_id'], aws_secret_access_key=self.a_creds['aws_secret_access_key']) + # client = boto3.client("sts", aws_access_key_id=self.a_creds['aws_access_key_id'], + # aws_secret_access_key=self.a_creds['aws_secret_access_key']) # self.account_id = client.get_caller_identity()["Account"] except Exception as e: self.format_vimconn_exception(e) @@ -309,11 +310,13 @@ class vimconnector(vimconn.VimConnector): subnet = None vpc_id = self.vpc_id if self.vpc_data.get(vpc_id, None): - cidr_block = list(set(self.vpc_data[vpc_id]['subnets']) - set(self.get_network_details({'tenant_id': vpc_id}, detail='cidr_block')))[0] + cidr_block = list(set(self.vpc_data[vpc_id]['subnets']) - + set(self.get_network_details({'tenant_id': vpc_id}, detail='cidr_block')))[0] else: vpc = self.get_tenant_list({'id': vpc_id})[0] subnet_list = self.subnet_sizes(len(self.get_availability_zones_list()), vpc['cidr_block']) - cidr_block = list(set(subnet_list) - set(self.get_network_details({'tenant_id': vpc['id']}, detail='cidr_block')))[0] + cidr_block = list(set(subnet_list) - set(self.get_network_details({'tenant_id': vpc['id']}, + detail='cidr_block')))[0] subnet = self.conn_vpc.create_subnet(vpc_id, cidr_block) return subnet.id, created_items except Exception as e: @@ -336,7 +339,8 @@ class vimconnector(vimconn.VimConnector): id: string => returns networks with this VIM id, this imply returns one network at most shared: boolean >= returns only networks that are (or are not) shared tenant_id: sting => returns only networks that belong to this tenant/project - ,#(not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active + ,#(not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin + state active #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status Returns the network list of dictionaries. each dictionary contains: 'id': (mandatory) VIM network id @@ -435,13 +439,13 @@ class vimconnector(vimconn.VimConnector): else: subnet_dict['status'] = 'ERROR' subnet_dict['error_msg'] = '' - except Exception as e: + except Exception: subnet_dict['status'] = 'DELETED' subnet_dict['error_msg'] = 'Network not found' finally: try: subnet_dict['vim_info'] = yaml.safe_dump(subnet, default_flow_style=True, width=256) - except yaml.YAMLError as e: + except yaml.YAMLError: subnet_dict['vim_info'] = str(subnet) dict_entry[net_id] = subnet_dict return dict_entry @@ -479,16 +483,16 @@ class vimconnector(vimconn.VimConnector): flavor = None for key, values in self.flavor_info.items(): if (values["ram"], values["cpus"], values["disk"]) == ( - flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"]): + flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"]): flavor = (key, values) break elif (values["ram"], values["cpus"], values["disk"]) >= ( - flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"]): + flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"]): if not flavor: flavor = (key, values) else: if (flavor[1]["ram"], flavor[1]["cpus"], flavor[1]["disk"]) >= ( - values["ram"], values["cpus"], values["disk"]): + values["ram"], values["cpus"], values["disk"]): flavor = (key, values) if flavor: return flavor[0] @@ -499,17 +503,21 @@ class vimconnector(vimconn.VimConnector): def new_image(self, image_dict): """ Adds a tenant image to VIM Params: image_dict - name (string) - The name of the AMI. Valid only for EBS-based images. - description (string) - The description of the AMI. - image_location (string) - Full path to your AMI manifest in Amazon S3 storage. Only used for S3-based AMI’s. - architecture (string) - The architecture of the AMI. Valid choices are: * i386 * x86_64 - kernel_id (string) - The ID of the kernel with which to launch the instances - root_device_name (string) - The root device name (e.g. /dev/sdh) - block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) - A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. - virtualization_type (string) - The virutalization_type of the image. Valid choices are: * paravirtual * hvm - sriov_net_support (string) - Advanced networking support. Valid choices are: * simple - snapshot_id (string) - A snapshot ID for the snapshot to be used as root device for the image. Mutually exclusive with block_device_map, requires root_device_name - delete_root_volume_on_termination (bool) - Whether to delete the root volume of the image after instance termination. Only applies when creating image from snapshot_id. Defaults to False. Note that leaving volumes behind after instance termination is not free + name (string) - The name of the AMI. Valid only for EBS-based images. + description (string) - The description of the AMI. + image_location (string) - Full path to your AMI manifest in Amazon S3 storage. Only used for S3-based AMI’s. + architecture (string) - The architecture of the AMI. Valid choices are: * i386 * x86_64 + kernel_id (string) - The ID of the kernel with which to launch the instances + root_device_name (string) - The root device name (e.g. /dev/sdh) + block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) - A BlockDeviceMapping data structure + describing the EBS volumes associated with the Image. + virtualization_type (string) - The virutalization_type of the image. Valid choices are: * paravirtual * hvm + sriov_net_support (string) - Advanced networking support. Valid choices are: * simple + snapshot_id (string) - A snapshot ID for the snapshot to be used as root device for the image. Mutually + exclusive with block_device_map, requires root_device_name + delete_root_volume_on_termination (bool) - Whether to delete the root volume of the image after instance + termination. Only applies when creating image from snapshot_id. Defaults to False. Note that leaving + volumes behind after instance termination is not free Returns: image_id - image ID of the newly created image """ @@ -607,19 +615,23 @@ class vimconnector(vimconn.VimConnector): net_list name net_id - subnet_id from AWS - vpci - (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities + vpci - (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM + capabilities model: (optional and only have sense for type==virtual) interface model: virtio, e1000, ... mac_address: (optional) mac address to assign to this interface type: (mandatory) can be one of: virtual, in this case always connected to a network of type 'net_type=bridge' - 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it + 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a + data/ptp network ot it can created unconnected 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity. - VFnotShared - (SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs - are allocated on the same physical NIC + VFnotShared - (SRIOV without VLAN tag) same as PF for network connectivity. VF where no other + VFs are allocated on the same physical NIC bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS - port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing or True, it must apply the default VIM behaviour - vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this interface. 'net_list' is modified + port_security': (optional) If False it must avoid any traffic filtering at this interface. + If missing or True, it must apply the default VIM behaviour + vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this + interface. 'net_list' is modified elastic_ip - True/False to define if an elastic_ip is required cloud_config': (optional) dictionary with: key-pairs': (optional) list of strings with the public key to be inserted to the default user @@ -688,7 +700,7 @@ class vimconnector(vimconn.VimConnector): network_interface_id=boto.ec2.networkinterface.NetworkInterfaceCollection(net_intr), instance_id=instance.id, device_index=0) break - except: + except Exception: time.sleep(10) net_list[index]['vim_id'] = reservation.instances[0].interfaces[index].id @@ -762,7 +774,7 @@ class vimconnector(vimconn.VimConnector): interface_dict['vim_interface_id'] = interface.id interface_dict['vim_net_id'] = interface.subnet_id interface_dict['mac_address'] = interface.mac_address - if hasattr(interface, 'publicIp') and interface.publicIp != None: + if hasattr(interface, 'publicIp') and interface.publicIp is not None: interface_dict['ip_address'] = interface.publicIp + ";" + interface.private_ip_address else: interface_dict['ip_address'] = interface.private_ip_address @@ -774,7 +786,7 @@ class vimconnector(vimconn.VimConnector): finally: try: instance_dict['vim_info'] = yaml.safe_dump(instance, default_flow_style=True, width=256) - except yaml.YAMLError as e: + except yaml.YAMLError: # self.logger.error("Exception getting vm status: %s", str(e), exc_info=True) instance_dict['vim_info'] = str(instance) instances[instance.id] = instance_dict diff --git a/RO-VIM-aws/tox.ini b/RO-VIM-aws/tox.ini index 067b0d43..0c0e401a 100644 --- a/RO-VIM-aws/tox.ini +++ b/RO-VIM-aws/tox.ini @@ -14,8 +14,8 @@ ## [tox] -envlist = py3 -toxworkdir={homedir}/.tox +envlist = flake8 +toxworkdir={toxinidir}/../.tox [testenv] basepython = python3 diff --git a/RO-VIM-azure/osm_rovim_azure/vimconn_azure.py b/RO-VIM-azure/osm_rovim_azure/vimconn_azure.py index dc408173..d96a64e4 100755 --- a/RO-VIM-azure/osm_rovim_azure/vimconn_azure.py +++ b/RO-VIM-azure/osm_rovim_azure/vimconn_azure.py @@ -175,7 +175,7 @@ class vimconnector(vimconn.VimConnector): try: location = self.conn.resource_groups.get(resource_group_name).location return location - except Exception as e: + except Exception: raise vimconn.VimConnNotFoundException("Location '{}' not found".format(resource_group_name)) def _get_resource_group_name_from_resource_id(self, resource_id): @@ -183,7 +183,7 @@ class vimconnector(vimconn.VimConnector): try: rg = str(resource_id.split('/')[4]) return rg - except Exception as e: + except Exception: raise vimconn.VimConnException("Unable to get resource group from invalid resource_id format '{}'". format(resource_id)) @@ -192,7 +192,7 @@ class vimconnector(vimconn.VimConnector): try: net_name = str(resource_id.split('/')[8]) return net_name - except Exception as e: + except Exception: raise vimconn.VimConnException("Unable to get azure net_name from invalid resource_id format '{}'". format(resource_id)) @@ -374,7 +374,8 @@ class vimconnector(vimconn.VimConnector): if mac_address: net_ifz['mac_address'] = mac_address - async_nic_creation = self.conn_vnet.network_interfaces.create_or_update(self.resource_group, nic_name, net_ifz) + async_nic_creation = self.conn_vnet.network_interfaces.create_or_update(self.resource_group, nic_name, + net_ifz) nic_data = async_nic_creation.result() created_items[nic_data.id] = True self.logger.debug('created nic name %s', nic_name) @@ -613,8 +614,6 @@ class vimconnector(vimconn.VimConnector): # image_id are several fields of the image_id image_reference = self._get_image_reference(image_id) - - try: virtual_machine = None created_items = {} @@ -629,7 +628,7 @@ class vimconnector(vimconn.VimConnector): nic_name = vm_name + '-nic-' + str(idx) vm_nic, nic_items = self._create_nic(net, nic_name, net.get('ip_address'), created_items) vm_nics.append({'id': str(vm_nic.id)}) - #net['vim_id'] = vm_nic.id + # net['vim_id'] = vm_nic.id # cloud-init configuration # cloud config @@ -853,7 +852,7 @@ class vimconnector(vimconn.VimConnector): 'disk_size_gb': disk.get('size') }) self.logger.debug("attach disk name: %s", disk_name) - async_disk_attach = self.conn_compute.virtual_machines.create_or_update( + self.conn_compute.virtual_machines.create_or_update( self.resource_group, virtual_machine.name, virtual_machine @@ -886,7 +885,7 @@ class vimconnector(vimconn.VimConnector): 'sku': sku, 'version': version } - except Exception as e: + except Exception: raise vimconn.VimConnException( "Unable to get image_reference from invalid image_id format: '{}'".format(image_id)) @@ -1109,7 +1108,7 @@ class vimconnector(vimconn.VimConnector): if not v: # skip already deleted continue - #self.logger.debug("Must delete item id: %s", item_id) + # self.logger.debug("Must delete item id: %s", item_id) # Obtain type, supported nic, disk or public ip parsed_id = azure_tools.parse_resource_id(item_id) diff --git a/RO-VIM-azure/tox.ini b/RO-VIM-azure/tox.ini index 9bc1472c..bc074293 100644 --- a/RO-VIM-azure/tox.ini +++ b/RO-VIM-azure/tox.ini @@ -14,8 +14,8 @@ ## [tox] -envlist = py3 -toxworkdir={homedir}/.tox +envlist = flake8 +toxworkdir={toxinidir}/../.tox [testenv] basepython = python3 diff --git a/RO-VIM-fos/osm_rovim_fos/vimconn_fos.py b/RO-VIM-fos/osm_rovim_fos/vimconn_fos.py index a7018f36..21f1b9f1 100644 --- a/RO-VIM-fos/osm_rovim_fos/vimconn_fos.py +++ b/RO-VIM-fos/osm_rovim_fos/vimconn_fos.py @@ -31,19 +31,20 @@ Support config dict: for the selected hypervisor """ -__author__="Gabriele Baldoni" -__date__ ="$2-june-2020 10:35:12$" import uuid import socket import struct from osm_ro_plugin import vimconn -import json +# import json from functools import partial from fog05 import FIMAPI from fog05 import fimapi from fog05_sdk.interfaces.FDU import FDU +__author__ = "Gabriele Baldoni" +__date__ = "$2-june-2020 10:35:12$" + class vimconnector(vimconn.VimConnector): def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None, @@ -129,32 +130,32 @@ class vimconnector(vimconn.VimConnector): Returns the network identifier on success or raises and exception on failure """ self.logger.debug('new_network: {}'.format(locals())) - if net_type in ['data','ptp']: + if net_type in ['data', 'ptp']: raise vimconn.VimConnNotImplemented('{} type of network not supported'.format(net_type)) net_uuid = '{}'.format(uuid.uuid4()) desc = { - 'uuid':net_uuid, - 'name':net_name, - 'net_type':'ELAN', - 'is_mgmt':False - } + 'uuid': net_uuid, + 'name': net_name, + 'net_type': 'ELAN', + 'is_mgmt': False + } if ip_profile is not None: ip = {} if ip_profile.get('ip_version') == 'IPv4': ip_info = {} ip_range = self.__get_ip_range(ip_profile.get('dhcp_start_address'), ip_profile.get('dhcp_count')) - dhcp_range = '{},{}'.format(ip_range[0],ip_range[1]) - ip.update({'subnet':ip_profile.get('subnet_address')}) - ip.update({'dns':ip_profile.get('dns', None)}) - ip.update({'dhcp_enable':ip_profile.get('dhcp_enabled', False)}) - ip.update({'dhcp_range': dhcp_range}) - ip.update({'gateway':ip_profile.get('gateway_address', None)}) - desc.update({'ip_configuration':ip_info}) + dhcp_range = '{},{}'.format(ip_range[0], ip_range[1]) + ip['subnet'] = ip_profile.get('subnet_address') + ip['dns'] = ip_profile.get('dns', None) + ip['dhcp_enable'] = ip_profile.get('dhcp_enabled', False) + ip['dhcp_range'] = dhcp_range + ip['gateway'] = ip_profile.get('gateway_address', None) + desc['ip_configuration'] = ip_info else: raise vimconn.VimConnNotImplemented('IPV6 network is not implemented at VIM') - desc.update({'ip_configuration':ip}) + desc['ip_configuration'] = ip self.logger.debug('VIM new_network args: {} - Generated Eclipse fog05 Descriptor {}'.format(locals(), desc)) try: self.fos_api.network.add_network(desc) @@ -162,19 +163,19 @@ class vimconnector(vimconn.VimConnector): raise vimconn.VimConnConflictException("Network already exists at VIM. Error {}".format(free)) except Exception as e: raise vimconn.VimConnException("Unable to create network {}. Error {}".format(net_name, e)) - # No way from the current rest service to get the actual error, most likely it will be an already existing error - return net_uuid,{} + # No way from the current rest service to get the actual error, most likely it will be an already + # existing error + return net_uuid, {} def get_network_list(self, filter_dict={}): """Obtain tenant networks of VIM - Params: - 'filter_dict' (optional) contains entries to return only networks that matches ALL entries: - name: string => returns only networks with this name - id: string => returns networks with this VIM id, this imply returns one network at most - shared: boolean >= returns only networks that are (or are not) shared - tenant_id: sting => returns only networks that belong to this tenant/project - ,#(not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active - #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status + :param filter_dict: (optional) contains entries to return only networks that matches ALL entries: + name: string => returns only networks with this name + id: string => returns networks with this VIM id, this imply returns one network at most + shared: boolean >= returns only networks that are (or are not) shared + tenant_id: sting => returns only networks that belong to this tenant/project + (not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active + (not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status Returns the network list of dictionaries. each dictionary contains: 'id': (mandatory) VIM network id 'name': (mandatory) VIM network name @@ -191,11 +192,12 @@ class vimconnector(vimconn.VimConnector): try: nets = self.fos_api.network.list() except Exception as e: - raise vimconn.VimConnConnectionException("Cannot get network list from VIM, connection error. Error {}".format(e)) + raise vimconn.VimConnConnectionException( + "Cannot get network list from VIM, connection error. Error {}".format(e)) filters = [ partial(self.__name_filter, filter_name=filter_dict.get('name')), - partial(self.__id_filter,filter_id=filter_dict.get('id')) + partial(self.__id_filter, filter_id=filter_dict.get('id')) ] r1 = [] @@ -209,9 +211,9 @@ class vimconnector(vimconn.VimConnector): for n in r1: osm_net = { - 'id':n.get('uuid'), - 'name':n.get('name'), - 'status':'ACTIVE' + 'id': n.get('uuid'), + 'name': n.get('name'), + 'status': 'ACTIVE' } res.append(osm_net) return res @@ -227,7 +229,7 @@ class vimconnector(vimconn.VimConnector): Raises an exception upon error or when network is not found """ self.logger.debug('get_network: {}'.format(net_id)) - res = self.get_network_list(filter_dict={'id':net_id}) + res = self.get_network_list(filter_dict={'id': net_id}) if len(res) == 0: raise vimconn.VimConnNotFoundException("Network {} not found at VIM".format(net_id)) return res[0] @@ -240,7 +242,8 @@ class vimconnector(vimconn.VimConnector): try: self.fos_api.network.remove_network(net_id) except fimapi.FIMNotFoundException as fnfe: - raise vimconn.VimConnNotFoundException("Network {} not found at VIM (already deleted?). Error {}".format(net_id, fnfe)) + raise vimconn.VimConnNotFoundException( + "Network {} not found at VIM (already deleted?). Error {}".format(net_id, fnfe)) except Exception as e: raise vimconn.VimConnException("Cannot delete network {} from VIM. Error {}".format(net_id, e)) return net_id @@ -267,13 +270,9 @@ class vimconnector(vimconn.VimConnector): for n in net_list: try: osm_n = self.get_network(n) - r.update({ - osm_n.get('id'):{'status':osm_n.get('status')} - }) + r[osm_n.get('id')] = {'status': osm_n.get('status')} except vimconn.VimConnNotFoundException: - r.update({ - n:{'status':'VIM_ERROR'} - }) + r[n] = {'status': 'VIM_ERROR'} return r def get_flavor(self, flavor_id): @@ -288,7 +287,7 @@ class vimconnector(vimconn.VimConnector): raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e)) if r is None: raise vimconn.VimConnNotFoundException("Flavor not found at VIM") - return {'id':r.get('uuid'), 'name':r.get('name'), 'fos':r} + return {'id': r.get('uuid'), 'name': r.get('name'), 'fos': r} def get_flavor_id_from_data(self, flavor_dict): """Obtain flavor id that match the flavor description @@ -306,9 +305,11 @@ class vimconnector(vimconn.VimConnector): flvs = self.fos_api.flavor.list() except Exception as e: raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e)) - r = [x.get('uuid') for x in flvs if (x.get('cpu_min_count') == flavor_dict.get('vcpus') and x.get('ram_size_mb') == flavor_dict.get('ram') and x.get('storage_size_gb') == flavor_dict.get('disk'))] + r = [x.get('uuid') for x in flvs if (x.get('cpu_min_count') == flavor_dict.get('vcpus') and + x.get('ram_size_mb') == flavor_dict.get('ram') and + x.get('storage_size_gb') == flavor_dict.get('disk'))] if len(r) == 0: - raise vimconn.VimConnNotFoundException ( "No flavor found" ) + raise vimconn.VimConnNotFoundException("No flavor found") return r[0] def new_flavor(self, flavor_data): @@ -319,9 +320,9 @@ class vimconnector(vimconn.VimConnector): vpcus: cpus (cloud type) extended: EPA parameters - numas: #items requested in same NUMA - memory: number of 1G huge pages memory - paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads - interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa + memory: number of 1G huge pages memory + paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads + interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa - name: interface name dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC bandwidth: X Gbps; requested guarantee bandwidth @@ -333,13 +334,13 @@ class vimconnector(vimconn.VimConnector): self.logger.debug('VIM new_flavor with args: {}'.format(locals())) flv_id = '{}'.format(uuid.uuid4()) desc = { - 'uuid':flv_id, - 'name':flavor_data.get('name'), + 'uuid': flv_id, + 'name': flavor_data.get('name'), 'cpu_arch': self.arch, 'cpu_min_count': flavor_data.get('vcpus'), 'cpu_min_freq': 0, - 'ram_size_mb':float(flavor_data.get('ram')), - 'storage_size_gb':float(flavor_data.get('disk')) + 'ram_size_mb': float(flavor_data.get('ram')), + 'storage_size_gb': float(flavor_data.get('disk')) } try: self.fos_api.flavor.add(desc) @@ -355,7 +356,8 @@ class vimconnector(vimconn.VimConnector): try: self.fos_api.flavor.remove(flavor_id) except fimapi.FIMNotFoundException as fnfe: - raise vimconn.VimConnNotFoundException("Flavor {} not found at VIM (already deleted?). Error {}".format(flavor_id, fnfe)) + raise vimconn.VimConnNotFoundException( + "Flavor {} not found at VIM (already deleted?). Error {}".format(flavor_id, fnfe)) except Exception as e: raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e)) return flavor_id @@ -372,10 +374,10 @@ class vimconnector(vimconn.VimConnector): self.logger.debug('VIM new_image with args: {}'.format(locals())) img_id = '{}'.format(uuid.uuid4()) desc = { - 'name':image_dict.get('name'), - 'uuid':img_id, - 'uri':image_dict.get('location'), - 'format':image_dict.get('disk_format') + 'name': image_dict.get('name'), + 'uuid': img_id, + 'uri': image_dict.get('location'), + 'format': image_dict.get('disk_format') } try: self.fos_api.image.add(desc) @@ -395,7 +397,7 @@ class vimconnector(vimconn.VimConnector): imgs = self.fos_api.image.list() except Exception as e: raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e)) - res = [x.get('uuid') for x in imgs if x.get('uri')==path] + res = [x.get('uuid') for x in imgs if x.get('uri') == path] if len(res) == 0: raise vimconn.VimConnNotFoundException("Image with this path was not found") return res[0] @@ -420,8 +422,8 @@ class vimconnector(vimconn.VimConnector): filters = [ partial(self.__name_filter, filter_name=filter_dict.get('name')), - partial(self.__id_filter,filter_id=filter_dict.get('id')), - partial(self.__checksum_filter,filter_checksum=filter_dict.get('checksum')) + partial(self.__id_filter, filter_id=filter_dict.get('id')), + partial(self.__checksum_filter, filter_checksum=filter_dict.get('checksum')) ] r1 = [] @@ -435,65 +437,64 @@ class vimconnector(vimconn.VimConnector): for i in r1: img_info = { - 'name':i.get('name'), - 'id':i.get('uuid'), - 'checksum':i.get('checksum'), - 'location':i.get('uri'), - 'fos':i + 'name': i.get('name'), + 'id': i.get('uuid'), + 'checksum': i.get('checksum'), + 'location': i.get('uri'), + 'fos': i } r.append(img_info) return r - #raise VimConnNotImplemented( "Should have implemented this" ) + # raise VimConnNotImplemented( "Should have implemented this" ) def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None, - availability_zone_index=None, availability_zone_list=None): + availability_zone_index=None, availability_zone_list=None): """Adds a VM instance to VIM - Params: - 'start': (boolean) indicates if VM must start or created in pause mode. - 'image_id','flavor_id': image and flavor VIM id to use for the VM - 'net_list': list of interfaces, each one is a dictionary with: - 'name': (optional) name for the interface. - 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual - 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities - 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ... - 'mac_address': (optional) mac address to assign to this interface - 'ip_address': (optional) IP address to assign to this interface - #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided, - the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF - 'type': (mandatory) can be one of: - 'virtual', in this case always connected to a network of type 'net_type=bridge' - 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it - can created unconnected - 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity. - 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs - are allocated on the same physical NIC - 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS - 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing - or True, it must apply the default VIM behaviour - After execution the method will add the key: - 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this - interface. 'net_list' is modified - 'cloud_config': (optional) dictionary with: - 'key-pairs': (optional) list of strings with the public key to be inserted to the default user - 'users': (optional) list of users to be inserted, each item is a dict with: - 'name': (mandatory) user name, - 'key-pairs': (optional) list of strings with the public key to be inserted to the user - 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init, - or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file - 'config-files': (optional). List of files to be transferred. Each item is a dict with: - 'dest': (mandatory) string with the destination absolute path - 'encoding': (optional, by default text). Can be one of: - 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64' - 'content' (mandatory): string with the content of the file - 'permissions': (optional) string with file permissions, typically octal notation '0644' - 'owner': (optional) file owner, string with the format 'owner:group' - 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk) - 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with: - 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted - 'size': (mandatory) string with the size of the disk in GB - availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required - availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if - availability_zone_index is None + :param start: (boolean) indicates if VM must start or created in pause mode. + :param image_id: :param flavor_id: image and flavor VIM id to use for the VM + :param net_list: list of interfaces, each one is a dictionary with: + 'name': (optional) name for the interface. + 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual + 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities + 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ... + 'mac_address': (optional) mac address to assign to this interface + 'ip_address': (optional) IP address to assign to this interface + #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided, + the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF + 'type': (mandatory) can be one of: + 'virtual', in this case always connected to a network of type 'net_type=bridge' + 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a + data/ptp network ot it can created unconnected + 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity. + 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs + are allocated on the same physical NIC + 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS + 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing + or True, it must apply the default VIM behaviour + After execution the method will add the key: + 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this + interface. 'net_list' is modified + :param cloud_config: (optional) dictionary with: + 'key-pairs': (optional) list of strings with the public key to be inserted to the default user + 'users': (optional) list of users to be inserted, each item is a dict with: + 'name': (mandatory) user name, + 'key-pairs': (optional) list of strings with the public key to be inserted to the user + 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init, + or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file + 'config-files': (optional). List of files to be transferred. Each item is a dict with: + 'dest': (mandatory) string with the destination absolute path + 'encoding': (optional, by default text). Can be one of: + 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64' + 'content' (mandatory): string with the content of the file + 'permissions': (optional) string with file permissions, typically octal notation '0644' + 'owner': (optional) file owner, string with the format 'owner:group' + 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk) + :param disk_list: (optional) list with additional disks to the VM. Each item is a dict with: + 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted + 'size': (mandatory) string with the size of the disk in GB + :param availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required + :param availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if + availability_zone_index is None Returns a tuple with the instance identifier and created_items or raises an exception on error created_items can be None or a dictionary where this method can include key-values that will be passed to the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc. @@ -512,24 +513,24 @@ class vimconnector(vimconn.VimConnector): raise vimconn.VimConnNotFoundException("Image {} not found at VIM".format(image_id)) created_items = { - 'fdu_id':'', - 'node_id':'', - 'connection_points':[] - } + 'fdu_id': '', + 'node_id': '', + 'connection_points': [] + } fdu_desc = { - 'name':name, - 'id':fdu_uuid, - 'uuid':fdu_uuid, - 'computation_requirements':flv, - 'image':img, - 'hypervisor':self.hv, - 'migration_kind':'LIVE', - 'interfaces':[], - 'io_ports':[], - 'connection_points':[], - 'depends_on':[], - 'storage':[] + 'name': name, + 'id': fdu_uuid, + 'uuid': fdu_uuid, + 'computation_requirements': flv, + 'image': img, + 'hypervisor': self.hv, + 'migration_kind': 'LIVE', + 'interfaces': [], + 'io_ports': [], + 'connection_points': [], + 'depends_on': [], + 'storage': [] } nets = [] @@ -537,22 +538,22 @@ class vimconnector(vimconn.VimConnector): intf_id = 0 for n in net_list: cp_id = '{}'.format(uuid.uuid4()) - n.update({'vim_id':cp_id}) + n['vim_id'] = cp_id pair_id = n.get('net_id') cp_d = { - 'id':cp_id, + 'id': cp_id, 'name': cp_id, 'vld_ref': pair_id } intf_d = { - 'name':n.get('name','eth{}'.format(intf_id)), - 'is_mgmt':False, - 'if_type':'INTERNAL', - 'virtual_interface':{ - 'intf_type':n.get('model','VIRTIO'), - 'vpci':n.get('vpci','0:0:0'), - 'bandwidth':int(n.get('bw', 100)) + 'name': n.get('name', 'eth{}'.format(intf_id)), + 'is_mgmt': False, + 'if_type': 'INTERNAL', + 'virtual_interface': { + 'intf_type': n.get('model', 'VIRTIO'), + 'vpci': n.get('vpci', '0:0:0'), + 'bandwidth': int(n.get('bw', 100)) }, 'cp_id': cp_id } @@ -566,20 +567,17 @@ class vimconnector(vimconn.VimConnector): intf_id = intf_id + 1 if cloud_config is not None: - configuration = { - 'conf_type':'CLOUD_INIT' - } + configuration = {'conf_type': 'CLOUD_INIT'} if cloud_config.get('user-data') is not None: - configuration.update({'script':cloud_config.get('user-data')}) + configuration['script'] = cloud_config.get('user-data') if cloud_config.get('key-pairs') is not None: - configuration.update({'ssh_keys':cloud_config.get('key-pairs')}) + configuration['ssh_keys'] = cloud_config.get('key-pairs') if 'script' in configuration: - fdu_desc.update({'configuration':configuration}) + fdu_desc['configuration'] = configuration self.logger.debug('Eclipse fog05 FDU Descriptor: {}'.format(fdu_desc)) - fdu = FDU(fdu_desc) try: @@ -595,7 +593,8 @@ class vimconnector(vimconn.VimConnector): raise ValueError("Unable to find node for network creation") self.logger.debug('Selected node by VIM: {}'.format(selected_node)) - created_items.update({'fdu_id':fdu_uuid, 'node_id': selected_node}) + created_items['fdu_id'] = fdu_uuid + created_items['node_id'] = selected_node for cp in fdu_desc['connection_points']: nets = self.fos_api.network.list() @@ -608,17 +607,17 @@ class vimconnector(vimconn.VimConnector): self.logger.debug('Eclipse fog05 FDU Started {}'.format(instance.uuid)) - created_items.update({'instance_id': str(instance.uuid)}) + created_items['instance_id'] = str(instance.uuid) - self.fdu_node_map.update({instance.uuid: selected_node}) - self.logger.debug('new_vminstance returns: {} {}'.format( instance.uuid, created_items)) + self.fdu_node_map[instance.uuid] = selected_node + self.logger.debug('new_vminstance returns: {} {}'.format(instance.uuid, created_items)) return str(instance.uuid), created_items except fimapi.FIMAResouceExistingException as free: raise vimconn.VimConnConflictException("VM already exists at VIM. Error {}".format(free)) except Exception as e: raise vimconn.VimConnException("Error while instantiating VM {}. Error {}".format(name, e)) - def get_vminstance(self,vm_id): + def get_vminstance(self, vm_id): """Returns the VM instance information from VIM""" self.logger.debug('VIM get_vminstance with args: {}'.format(locals())) @@ -639,7 +638,7 @@ class vimconnector(vimconn.VimConnector): :return: None or the same vm_id. Raises an exception on fail """ self.logger.debug('FOS delete_vminstance with args: {}'.format(locals())) - fduid = created_items.get('fdu_id') + fduid = created_items.get('fdu_id') try: instance = self.fos_api.fdu.instance_info(vm_id) instance_list = self.fos_api.fdu.instance_list(instance.fdu_id) @@ -664,10 +663,10 @@ class vimconnector(vimconn.VimConnector): self.fos_api.fdu.offload(fduid) except Exception as e: - raise vimconn.VimConnException("Error on deletting VM with id {}. Error {}".format(vm_id,e)) + raise vimconn.VimConnException("Error on deleting VM with id {}. Error {}".format(vm_id, e)) return vm_id - #raise VimConnNotImplemented( "Should have implemented this" ) + # raise VimConnNotImplemented( "Should have implemented this" ) def refresh_vms_status(self, vm_list): """Get the status of the virtual machines and their interfaces/ports @@ -697,11 +696,11 @@ class vimconnector(vimconn.VimConnector): """ self.logger.debug('FOS refresh_vms_status with args: {}'.format(locals())) fos2osm_status = { - 'DEFINE':'OTHER', - 'CONFIGURE':'INACTIVE', - 'RUN':'ACTIVE', - 'PAUSE':'PAUSED', - 'ERROR':'ERROR' + 'DEFINE': 'OTHER', + 'CONFIGURE': 'INACTIVE', + 'RUN': 'ACTIVE', + 'PAUSE': 'PAUSED', + 'ERROR': 'ERROR' } r = {} @@ -712,23 +711,23 @@ class vimconnector(vimconn.VimConnector): info = {} nid = self.fdu_node_map.get(vm) if nid is None: - r.update({vm:{ - 'status':'VIM_ERROR', - 'error_msg':'Not compute node associated for VM' - }}) + r[vm] = { + 'status': 'VIM_ERROR', + 'error_msg': 'Not compute node associated for VM' + } continue try: vm_info = self.fos_api.fdu.instance_info(vm) - except: - r.update({vm:{ - 'status':'VIM_ERROR', - 'error_msg':'unable to connect to VIM' - }}) + except Exception: + r[vm] = { + 'status': 'VIM_ERROR', + 'error_msg': 'unable to connect to VIM' + } continue if vm_info is None: - r.update({vm:{'status':'DELETED'}}) + r[vm:] = {'status': 'DELETED'} continue desc = self.fos_api.fdu.info(str(vm_info.fdu_id)) @@ -740,14 +739,14 @@ class vimconnector(vimconn.VimConnector): self.logger.debug('FOS status info {}'.format(vm_info)) self.logger.debug('FOS status is {} <-> OSM Status {}'.format(vm_info.get('status'), osm_status)) - info.update({'status':osm_status}) + info['status'] = osm_status if vm_info.get('status') == 'ERROR': - info.update({'error_msg':vm_info.get('error_code')}) + info['error_msg'] = vm_info.get('error_code') # yaml.safe_dump(json.loads(json.dumps(vm_info))) - # info.update({'vim_info':''}) + # info['vim_info'] = '' faces = [] i = 0 - for intf_name in vm_info.get('hypervisor_info').get('network',[]): + for intf_name in vm_info.get('hypervisor_info').get('network', []): intf_info = vm_info.get('hypervisor_info').get('network').get(intf_name) face = {} face['compute_node'] = nid @@ -772,7 +771,7 @@ class vimconnector(vimconn.VimConnector): matches = [x for x in cps_d if x['id'] == cp_id] if len(matches) > 0: cpd = matches[0] - face['vim_net_id'] = cpd.get('vld_ref','') + face['vim_net_id'] = cpd.get('vld_ref', '') else: face['vim_net_id'] = '' face['vim_interface_id'] = cp_id @@ -783,8 +782,8 @@ class vimconnector(vimconn.VimConnector): faces.append(face) i += 1 - info.update({'interfaces':faces}) - r.update({vm:info}) + info['interfaces'] = faces + r[vm] = info self.logger.debug('FOS refresh_vms_status res for {} is {}'.format(vm, info)) self.logger.debug('FOS refresh_vms_status res is {}'.format(r)) return r @@ -813,22 +812,26 @@ class vimconnector(vimconn.VimConnector): elif instance.get('status') == 'PAUSE': self.fos_api.fdu.resume(vm_id) else: - raise vimconn.VimConnConflictException('Cannot start from current state: {}'.format(instance.get('status'))) + raise vimconn.VimConnConflictException('Cannot start from current state: {}'.format( + instance.get('status'))) elif "pause" in action_dict: if instance.get('status') == 'RUN': self.fos_api.fdu.pause(vm_id) else: - raise vimconn.VimConnConflictException('Cannot pause from current state: {}'.format(instance.get('status'))) + raise vimconn.VimConnConflictException('Cannot pause from current state: {}'.format( + instance.get('status'))) elif "resume" in action_dict: if instance.get('status') == 'PAUSE': self.fos_api.fdu.resume(vm_id) else: - raise vimconn.VimConnConflictException('Cannot resume from current state: {}'.format(instance.get('status'))) + raise vimconn.VimConnConflictException('Cannot resume from current state: {}'.format( + instance.get('status'))) elif "shutoff" in action_dict or "shutdown" or "forceOff" in action_dict: if instance.get('status') == 'RUN': self.fos_api.fdu.stop(vm_id) else: - raise vimconn.VimConnConflictException('Cannot shutoff from current state: {}'.format(instance.get('status'))) + raise vimconn.VimConnConflictException('Cannot shutoff from current state: {}'.format( + instance.get('status'))) elif "terminate" in action_dict: if instance.get('status') == 'RUN': self.fos_api.fdu.stop(vm_id) @@ -846,7 +849,8 @@ class vimconnector(vimconn.VimConnector): self.fos_api.fdu.undefine(vm_id) # self.fos_api.fdu.offload(vm_id) else: - raise vimconn.VimConnConflictException('Cannot terminate from current state: {}'.format(instance.get('status'))) + raise vimconn.VimConnConflictException('Cannot terminate from current state: {}'.format( + instance.get('status'))) elif "rebuild" in action_dict: raise vimconn.VimConnNotImplemented("Rebuild not implemented") elif "reboot" in action_dict: @@ -854,6 +858,7 @@ class vimconnector(vimconn.VimConnector): self.fos_api.fdu.stop(vm_id) self.fos_api.fdu.start(vm_id) else: - raise vimconn.VimConnConflictException('Cannot reboot from current state: {}'.format(instance.get('status'))) + raise vimconn.VimConnConflictException('Cannot reboot from current state: {}'.format( + instance.get('status'))) except Exception as e: raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e)) diff --git a/RO-VIM-fos/tox.ini b/RO-VIM-fos/tox.ini index 297800b0..e25c4bd5 100644 --- a/RO-VIM-fos/tox.ini +++ b/RO-VIM-fos/tox.ini @@ -14,8 +14,8 @@ ## [tox] -envlist = py3 -toxworkdir={homedir}/.tox +envlist = flake8 +toxworkdir={toxinidir}/../.tox [testenv] basepython = python3 diff --git a/RO-VIM-opennebula/debian/python3-osm-rovim-opennebula.postinst b/RO-VIM-opennebula/debian/python3-osm-rovim-opennebula.postinst index 27aacc74..cd155167 100755 --- a/RO-VIM-opennebula/debian/python3-osm-rovim-opennebula.postinst +++ b/RO-VIM-opennebula/debian/python3-osm-rovim-opennebula.postinst @@ -21,6 +21,6 @@ echo "POST INSTALL OSM-ROVIM-OPENNEBULA" #Pip packages required for opennebula connector python3 -m pip install -e git+https://github.com/python-oca/python-oca#egg=oca -python3 -m pip install untangle +# python3 -m pip install untangle python3 -m pip install pyone diff --git a/RO-VIM-opennebula/osm_rovim_opennebula/vimconn_opennebula.py b/RO-VIM-opennebula/osm_rovim_opennebula/vimconn_opennebula.py index 00c9b025..a84b6654 100644 --- a/RO-VIM-opennebula/osm_rovim_opennebula/vimconn_opennebula.py +++ b/RO-VIM-opennebula/osm_rovim_opennebula/vimconn_opennebula.py @@ -30,13 +30,14 @@ __author__ = "Jose Maria Carmona Perez,Juan Antonio Hernando Labajo, Emilio Abra __date__ = "$13-dec-2017 11:09:29$" from osm_ro_plugin import vimconn import requests -import logging +# import logging import oca -import untangle +# import untangle import math import random import pyone + class vimconnector(vimconn.VimConnector): def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level="DEBUG", config={}, persistent_info={}): @@ -154,7 +155,7 @@ class vimconnector(vimconn.VimConnector): '.format(self.user, self.passwd, (str(id_user)), (str(id_group))) requests.post(self.url, params) - def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None): # , **vim_specific): + def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None): """Adds a tenant network to VIM Params: 'net_name': name of the network @@ -200,30 +201,31 @@ class vimconnector(vimconn.VimConnector): size = int(math.pow(2, 32 - prefix)) if "dhcp_start_address" in ip_profile and ip_profile["dhcp_start_address"] is not None: ip_start = str(ip_profile["dhcp_start_address"]) - if ip_profile["ip_version"] == "IPv6": - ip_prefix_type = "GLOBAL_PREFIX" + # if ip_profile["ip_version"] == "IPv6": + # ip_prefix_type = "GLOBAL_PREFIX" if vlan is not None: vlan_id = vlan else: vlan_id = str(random.randint(100, 4095)) - #if "internal" in net_name: + # if "internal" in net_name: # OpenNebula not support two networks with same name random_net_name = str(random.randint(1, 1000000)) net_name = net_name + random_net_name net_id = one.vn.allocate({ - 'NAME': net_name, - 'VN_MAD': '802.1Q', - 'PHYDEV': self.config["network"]["phydev"], - 'VLAN_ID': vlan_id - }, self.config["cluster"]["id"]) - arpool = {'AR_POOL': { - 'AR': { - 'TYPE': 'IP4', - 'IP': ip_start, - 'SIZE': size - } + 'NAME': net_name, + 'VN_MAD': '802.1Q', + 'PHYDEV': self.config["network"]["phydev"], + 'VLAN_ID': vlan_id + }, self.config["cluster"]["id"]) + arpool = { + 'AR_POOL': { + 'AR': { + 'TYPE': 'IP4', + 'IP': ip_start, + 'SIZE': size } + } } one.vn.add_ar(net_id, arpool) return net_id, created_items @@ -233,14 +235,13 @@ class vimconnector(vimconn.VimConnector): def get_network_list(self, filter_dict={}): """Obtain tenant networks of VIM - Params: - 'filter_dict' (optional) contains entries to return only networks that matches ALL entries: - name: string => returns only networks with this name - id: string => returns networks with this VIM id, this imply returns one network at most - shared: boolean >= returns only networks that are (or are not) shared - tenant_id: sting => returns only networks that belong to this tenant/project - ,#(not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active - #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status + :params filter_dict: (optional) contains entries to return only networks that matches ALL entries: + name: string => returns only networks with this name + id: string => returns networks with this VIM id, this imply returns one network at most + shared: boolean >= returns only networks that are (or are not) shared + tenant_id: sting => returns only networks that belong to this tenant/project + (not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active + (not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status Returns the network list of dictionaries. each dictionary contains: 'id': (mandatory) VIM network id 'name': (mandatory) VIM network name @@ -384,9 +385,9 @@ class vimconnector(vimconn.VimConnector): vpcus: cpus (cloud type) extended: EPA parameters - numas: #items requested in same NUMA - memory: number of 1G huge pages memory - paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads - interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa + memory: number of 1G huge pages memory + paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads + interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa - name: interface name dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC bandwidth: X Gbps; requested guarantee bandwidth @@ -472,60 +473,65 @@ class vimconnector(vimconn.VimConnector): def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None): - - """Adds a VM instance to VIM - Params: - 'start': (boolean) indicates if VM must start or created in pause mode. - 'image_id','flavor_id': image and flavor VIM id to use for the VM - 'net_list': list of interfaces, each one is a dictionary with: - 'name': (optional) name for the interface. - 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual - 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities - 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ... - 'mac_address': (optional) mac address to assign to this interface - 'ip_address': (optional) IP address to assign to this interface - #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided, - the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF - 'type': (mandatory) can be one of: - 'virtual', in this case always connected to a network of type 'net_type=bridge' - 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it - can created unconnected - 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity. - 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs - are allocated on the same physical NIC - 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS - 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing - or True, it must apply the default VIM behaviour - After execution the method will add the key: - 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this - interface. 'net_list' is modified - 'cloud_config': (optional) dictionary with: - 'key-pairs': (optional) list of strings with the public key to be inserted to the default user - 'users': (optional) list of users to be inserted, each item is a dict with: - 'name': (mandatory) user name, - 'key-pairs': (optional) list of strings with the public key to be inserted to the user - 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init, - or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file - 'config-files': (optional). List of files to be transferred. Each item is a dict with: - 'dest': (mandatory) string with the destination absolute path - 'encoding': (optional, by default text). Can be one of: - 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64' - 'content' (mandatory): string with the content of the file - 'permissions': (optional) string with file permissions, typically octal notation '0644' - 'owner': (optional) file owner, string with the format 'owner:group' - 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk) - 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with: - 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted - 'size': (mandatory) string with the size of the disk in GB - availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required - availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if + """ + Adds a VM instance to VIM + :param name: + :param description: + :param start: (boolean) indicates if VM must start or created in pause mode. + :param image_id: image VIM id to use for the VM + :param flavor_id: flavor VIM id to use for the VM + :param net_list: list of interfaces, each one is a dictionary with: + 'name': (optional) name for the interface. + 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual + 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM + capabilities + 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ... + 'mac_address': (optional) mac address to assign to this interface + 'ip_address': (optional) IP address to assign to this interface + #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not + provided, the VLAN tag to be used. In case net_id is provided, the internal network vlan is + used for tagging VF + 'type': (mandatory) can be one of: + 'virtual', in this case always connected to a network of type 'net_type=bridge' + 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to + a data/ptp network ot itcan created unconnected + 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity. + 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs + are allocated on the same physical NIC + 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS + 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing + or True, it must apply the default VIM behaviour + After execution the method will add the key: + 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this + interface. 'net_list' is modified + :param cloud_config: (optional) dictionary with: + 'key-pairs': (optional) list of strings with the public key to be inserted to the default user + 'users': (optional) list of users to be inserted, each item is a dict with: + 'name': (mandatory) user name, + 'key-pairs': (optional) list of strings with the public key to be inserted to the user + 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init, + or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file + 'config-files': (optional). List of files to be transferred. Each item is a dict with: + 'dest': (mandatory) string with the destination absolute path + 'encoding': (optional, by default text). Can be one of: + 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64' + 'content' (mandatory): string with the content of the file + 'permissions': (optional) string with file permissions, typically octal notation '0644' + 'owner': (optional) file owner, string with the format 'owner:group' + 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk) + :param disk_list: (optional) list with additional disks to the VM. Each item is a dict with: + 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted + 'size': (mandatory) string with the size of the disk in GB + :param availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV + required + :param availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if availability_zone_index is None - Returns a tuple with the instance identifier and created_items or raises an exception on error - created_items can be None or a dictionary where this method can include key-values that will be passed to - the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc. - Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same - as not present. - """ + :return: a tuple with the instance identifier and created_items or raises an exception on error + created_items can be None or a dictionary where this method can include key-values that will be passed to + the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc. + Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same + as not present. + """ self.logger.debug( "new_vminstance input: image='{}' flavor='{}' nics='{}'".format(image_id, flavor_id, str(net_list))) try: @@ -593,7 +599,7 @@ class vimconnector(vimconn.VimConnector): else: vm = one.vm.info(int(vm_id)) - except pyone.OneNoExistsException as e: + except pyone.OneNoExistsException: self.logger.info("The vm " + str(vm_id) + " does not exist or is already deleted") raise vimconn.VimConnNotFoundException("The vm {} does not exist or is already deleted".format(vm_id)) except Exception as e: @@ -667,9 +673,9 @@ class vimconnector(vimconn.VimConnector): interface = {'vim_info': None, "mac_address": str(net["MAC"]), "vim_net_id": str(net["NETWORK_ID"]), "vim_interface_id": str(net["NETWORK_ID"])} # maybe it should be 2 different keys for ip_address if an interface has ipv4 and ipv6 - if u'IP' in net: + if 'IP' in net: interface["ip_address"] = str(net["IP"]) - if u'IP6_GLOBAL' in net: + if 'IP6_GLOBAL' in net: interface["ip_address"] = str(net["IP6_GLOBAL"]) interfaces.append(interface) else: @@ -677,11 +683,11 @@ class vimconnector(vimconn.VimConnector): interface = {'vim_info': None, "mac_address": str(net["MAC"]), "vim_net_id": str(net["NETWORK_ID"]), "vim_interface_id": str(net["NETWORK_ID"])} # maybe it should be 2 different keys for ip_address if an interface has ipv4 and ipv6 - if u'IP' in net: + if 'IP' in net: interface["ip_address"] = str(net["IP"]) - if u'IP6_GLOBAL' in net: + if 'IP6_GLOBAL' in net: interface["ip_address"] = str(net["IP6_GLOBAL"]) interfaces.append(interface) return interfaces - except Exception as e: + except Exception: self.logger.error("Error getting vm interface_information of vm_id: " + str(vm_element.ID)) diff --git a/RO-VIM-opennebula/requirements.txt b/RO-VIM-opennebula/requirements.txt index d3420cf9..6db93ce9 100644 --- a/RO-VIM-opennebula/requirements.txt +++ b/RO-VIM-opennebula/requirements.txt @@ -17,7 +17,7 @@ PyYAML requests netaddr -untangle +# untangle pyone git+https://github.com/python-oca/python-oca#egg=oca git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin diff --git a/RO-VIM-opennebula/tox.ini b/RO-VIM-opennebula/tox.ini index 6fb9d372..b6993f5a 100644 --- a/RO-VIM-opennebula/tox.ini +++ b/RO-VIM-opennebula/tox.ini @@ -15,8 +15,8 @@ ## [tox] -envlist = py3 -toxworkdir={homedir}/.tox +envlist = flake8 +toxworkdir={toxinidir}/../.tox [testenv] basepython = python3 diff --git a/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py b/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py index 6bfc1252..f78c5b69 100644 --- a/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py +++ b/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py @@ -424,7 +424,7 @@ class TestSfcOperations(unittest.TestCase): # translated and returned the OpenStack result self.assertEqual(result, [ {'sfis': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2', - '0d63799c-82d6-11e7-8deb-a746bb3ae9f5'], + '0d63799c-82d6-11e7-8deb-a746bb3ae9f5'], 'description': '', 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', diff --git a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py index ba8b004c..289c8278 100644 --- a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py +++ b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py @@ -18,7 +18,7 @@ # under the License. ## -''' +""" osconnector implements all the methods to interact with openstack using the python-neutronclient. For the VNF forwarding graph, The OpenStack VIM connector calls the @@ -28,9 +28,7 @@ to the VIM connector's SFC resources as follows: - Service Function Instance (OSM) -> Port Pair (Neutron) - Service Function (OSM) -> Port Pair Group (Neutron) - Service Function Path (OSM) -> Port Chain (Neutron) -''' -__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa" -__date__ = "$22-sep-2017 23:59:59$" +""" from osm_ro_plugin import vimconn # import json @@ -51,27 +49,35 @@ import keystoneclient.v3.client as ksClient_v3 import keystoneclient.v2_0.client as ksClient_v2 from glanceclient import client as glClient import glanceclient.exc as gl1Exceptions -from cinderclient import client as cClient -from http.client import HTTPException # TODO py3 check that this base exception matches python2 httplib.HTTPException +from cinderclient import client as cClient +from http.client import HTTPException # TODO py3 check that this base exception matches python2 httplib.HTTPException from neutronclient.neutron import client as neClient from neutronclient.common import exceptions as neExceptions from requests.exceptions import ConnectionError +__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa" +__date__ = "$22-sep-2017 23:59:59$" """contain the openstack virtual machine status to openmano status""" -vmStatus2manoFormat={'ACTIVE':'ACTIVE', - 'PAUSED':'PAUSED', - 'SUSPENDED': 'SUSPENDED', - 'SHUTOFF':'INACTIVE', - 'BUILD':'BUILD', - 'ERROR':'ERROR','DELETED':'DELETED' - } -netStatus2manoFormat={'ACTIVE':'ACTIVE','PAUSED':'PAUSED','INACTIVE':'INACTIVE','BUILD':'BUILD','ERROR':'ERROR','DELETED':'DELETED' - } +vmStatus2manoFormat = {'ACTIVE': 'ACTIVE', + 'PAUSED': 'PAUSED', + 'SUSPENDED': 'SUSPENDED', + 'SHUTOFF': 'INACTIVE', + 'BUILD': 'BUILD', + 'ERROR': 'ERROR', + 'DELETED': 'DELETED' + } +netStatus2manoFormat = {'ACTIVE': 'ACTIVE', + 'PAUSED': 'PAUSED', + 'INACTIVE': 'INACTIVE', + 'BUILD': 'BUILD', + 'ERROR': 'ERROR', + 'DELETED': 'DELETED' + } supportedClassificationTypes = ['legacy_flow_classifier'] -#global var to have a timeout creating and deleting volumes +# global var to have a timeout creating and deleting volumes volume_timeout = 1800 server_timeout = 1800 @@ -90,10 +96,10 @@ class SafeDumper(yaml.SafeDumper): class vimconnector(vimconn.VimConnector): def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}): - '''using common constructor parameters. In this case + """using common constructor parameters. In this case 'url' is the keystone authorization url, 'url_admin' is not use - ''' + """ api_version = config.get('APIversion') if api_version and api_version not in ('v3.3', 'v2.0', '2', '3'): raise vimconn.VimConnException("Invalid value '{}' for config:APIversion. " @@ -101,14 +107,14 @@ class vimconnector(vimconn.VimConnector): vim_type = config.get('vim_type') if vim_type and vim_type not in ('vio', 'VIO'): raise vimconn.VimConnException("Invalid value '{}' for config:vim_type." - "Allowed values are 'vio' or 'VIO'".format(vim_type)) + "Allowed values are 'vio' or 'VIO'".format(vim_type)) if config.get('dataplane_net_vlan_range') is not None: - #validate vlan ranges provided by user + # validate vlan ranges provided by user self._validate_vlan_ranges(config.get('dataplane_net_vlan_range'), 'dataplane_net_vlan_range') if config.get('multisegment_vlan_range') is not None: - #validate vlan ranges provided by user + # validate vlan ranges provided by user self._validate_vlan_ranges(config.get('multisegment_vlan_range'), 'multisegment_vlan_range') vimconn.VimConnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, @@ -153,12 +159,12 @@ class vimconnector(vimconn.VimConnector): self.config['security_groups'] = [self.config['security_groups']] self.security_groups_id = None - ####### VIO Specific Changes ######### + # ###### VIO Specific Changes ######### if self.vim_type == "VIO": self.logger = logging.getLogger('openmano.vim.vio') if log_level: - self.logger.setLevel( getattr(logging, log_level)) + self.logger.setLevel(getattr(logging, log_level)) def __getitem__(self, index): """Get individuals parameters. @@ -176,7 +182,7 @@ class vimconnector(vimconn.VimConnector): if index == 'project_domain_id': self.config["project_domain_id"] = value elif index == 'user_domain_id': - self.config["user_domain_id"] = value + self.config["user_domain_id"] = value else: vimconn.VimConnector.__setitem__(self, index, value) self.session['reload_client'] = True @@ -195,20 +201,20 @@ class vimconnector(vimconn.VimConnector): return yaml.dump(value, Dumper=SafeDumper, default_flow_style=True, width=256) except yaml.representer.RepresenterError: - self.logger.debug('The following entity cannot be serialized in YAML:\n\n%s\n\n', pformat(value), - exc_info=True) - return str(value) + self.logger.debug('The following entity cannot be serialized in YAML:\n\n%s\n\n', pformat(value), + exc_info=True) + return str(value) def _reload_connection(self): - '''Called before any operation, it check if credentials has changed + """Called before any operation, it check if credentials has changed Throw keystoneclient.apiclient.exceptions.AuthorizationFailure - ''' - #TODO control the timing and possible token timeout, but it seams that python client does this task for us :-) + """ + # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-) if self.session['reload_client']: if self.config.get('APIversion'): self.api_version3 = self.config['APIversion'] == 'v3.3' or self.config['APIversion'] == '3' else: # get from ending auth_url that end with v3 or with v2.0 - self.api_version3 = self.url.endswith("/v3") or self.url.endswith("/v3/") + self.api_version3 = self.url.endswith("/v3") or self.url.endswith("/v3/") self.session['api_version3'] = self.api_version3 if self.api_version3: if self.config.get('project_domain_id') or self.config.get('project_domain_name'): @@ -235,10 +241,12 @@ class vimconnector(vimconn.VimConnector): tenant_name=self.tenant_name, tenant_id=self.tenant_id) sess = session.Session(auth=auth, verify=self.verify) - # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River Titanium cloud and StarlingX + # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River + # Titanium cloud and StarlingX region_name = self.config.get('region_name') if self.api_version3: - self.keystone = ksClient_v3.Client(session=sess, endpoint_type=self.endpoint_type, region_name=region_name) + self.keystone = ksClient_v3.Client(session=sess, endpoint_type=self.endpoint_type, + region_name=region_name) else: self.keystone = ksClient_v2.Client(session=sess, endpoint_type=self.endpoint_type) self.session['keystone'] = self.keystone @@ -251,13 +259,18 @@ class vimconnector(vimconn.VimConnector): version = self.config.get("microversion") if not version: version = "2.1" - # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River Titanium cloud and StarlingX - self.nova = self.session['nova'] = nClient.Client(str(version), session=sess, endpoint_type=self.endpoint_type, region_name=region_name) - self.neutron = self.session['neutron'] = neClient.Client('2.0', session=sess, endpoint_type=self.endpoint_type, region_name=region_name) - self.cinder = self.session['cinder'] = cClient.Client(2, session=sess, endpoint_type=self.endpoint_type, region_name=region_name) + # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River + # Titanium cloud and StarlingX + self.nova = self.session['nova'] = nClient.Client(str(version), session=sess, + endpoint_type=self.endpoint_type, region_name=region_name) + self.neutron = self.session['neutron'] = neClient.Client('2.0', session=sess, + endpoint_type=self.endpoint_type, + region_name=region_name) + self.cinder = self.session['cinder'] = cClient.Client(2, session=sess, endpoint_type=self.endpoint_type, + region_name=region_name) try: self.my_tenant_id = self.session['my_tenant_id'] = sess.get_project_id() - except Exception as e: + except Exception: self.logger.error("Cannot get project_id from session", exc_info=True) if self.endpoint_type == "internalURL": glance_service_id = self.keystone.services.list(name="glance")[0].id @@ -276,19 +289,19 @@ class vimconnector(vimconn.VimConnector): self.security_groups_id = None # force to get again security_groups_ids next time they are needed def __net_os2mano(self, net_list_dict): - '''Transform the net openstack format to mano format - net_list_dict can be a list of dict or a single dict''' + """Transform the net openstack format to mano format + net_list_dict can be a list of dict or a single dict""" if type(net_list_dict) is dict: - net_list_=(net_list_dict,) + net_list_ = (net_list_dict,) elif type(net_list_dict) is list: - net_list_=net_list_dict + net_list_ = net_list_dict else: raise TypeError("param net_list_dict must be a list or a dictionary") for net in net_list_: if net.get('provider:network_type') == "vlan": - net['type']='data' + net['type'] = 'data' else: - net['type']='bridge' + net['type'] = 'bridge' def __classification_os2mano(self, class_list_dict): """Transform the openstack format (Flow Classifier) to mano format @@ -299,8 +312,7 @@ class vimconnector(vimconn.VimConnector): elif isinstance(class_list_dict, list): class_list_ = class_list_dict else: - raise TypeError( - "param class_list_dict must be a list or a dictionary") + raise TypeError("param class_list_dict must be a list or a dictionary") for classification in class_list_: id = classification.pop('id') name = classification.pop('name') @@ -447,13 +459,13 @@ class vimconnector(vimconn.VimConnector): self.get_network_list(filter_dict={}) def get_tenant_list(self, filter_dict={}): - '''Obtain tenants of VIM + """Obtain tenants of VIM filter_dict can contain the following keys: name: filter by tenant name id: filter by tenant uuid/id Returns the tenant list of dictionaries: [{'name':', 'id':', ...}, ...] - ''' + """ self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict)) try: self._reload_connection() @@ -461,7 +473,7 @@ class vimconnector(vimconn.VimConnector): project_class_list = self.keystone.projects.list(name=filter_dict.get("name")) else: project_class_list = self.keystone.tenants.findall(**filter_dict) - project_list=[] + project_list = [] for project in project_class_list: if filter_dict.get('id') and filter_dict["id"] != project.id: continue @@ -471,7 +483,7 @@ class vimconnector(vimconn.VimConnector): self._format_exception(e) def new_tenant(self, tenant_name, tenant_description): - '''Adds a new tenant to openstack VIM. Returns the tenant identifier''' + """Adds a new tenant to openstack VIM. Returns the tenant identifier""" self.logger.debug("Adding a new tenant name: %s", tenant_name) try: self._reload_connection() @@ -481,11 +493,12 @@ class vimconnector(vimconn.VimConnector): else: project = self.keystone.tenants.create(tenant_name, tenant_description) return project.id - except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.BadRequest, ConnectionError) as e: + except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.BadRequest, ConnectionError)\ + as e: self._format_exception(e) def delete_tenant(self, tenant_id): - '''Delete a tenant from openstack VIM. Returns the old tenant identifier''' + """Delete a tenant from openstack VIM. Returns the old tenant identifier""" self.logger.debug("Deleting tenant %s from VIM", tenant_id) try: self._reload_connection() @@ -494,7 +507,8 @@ class vimconnector(vimconn.VimConnector): else: self.keystone.tenants.delete(tenant_id) return tenant_id - except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.NotFound, ConnectionError) as e: + except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.NotFound, ConnectionError)\ + as e: self._format_exception(e) def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None): @@ -561,7 +575,7 @@ class vimconnector(vimconn.VimConnector): if provider_network_profile and "network-type" in provider_network_profile: network_dict["provider:network_type"] = provider_network_profile["network-type"] else: - network_dict["provider:network_type"] = self.config.get('dataplane_network_type','vlan') + network_dict["provider:network_type"] = self.config.get('dataplane_network_type', 'vlan') if vlan: network_dict["provider:segmentation_id"] = vlan else: @@ -583,7 +597,8 @@ class vimconnector(vimconn.VimConnector): segment2_dict["provider:segmentation_id"] = vlanID # else # raise vimconn.VimConnConflictException( - # "You must provide 'multisegment_vlan_range' at config dict before creating a multisegment network") + # "You must provide 'multisegment_vlan_range' at config dict before creating a multisegment + # network") segment_list.append(segment2_dict) network_dict["segments"] = segment_list @@ -598,22 +613,22 @@ class vimconnector(vimconn.VimConnector): network_dict["shared"] = shared if self.config.get("disable_network_port_security"): network_dict["port_security_enabled"] = False - new_net = self.neutron.create_network({'network':network_dict}) + new_net = self.neutron.create_network({'network': network_dict}) # print new_net # create subnetwork, even if there is no profile if not ip_profile: ip_profile = {} if not ip_profile.get('subnet_address'): - #Fake subnet is required + # Fake subnet is required subnet_rand = random.randint(0, 255) ip_profile['subnet_address'] = "192.168.{}.0/24".format(subnet_rand) if 'ip_version' not in ip_profile: ip_profile['ip_version'] = "IPv4" subnet = {"name": net_name+"-subnet", - "network_id": new_net["network"]["id"], - "ip_version": 4 if ip_profile['ip_version']=="IPv4" else 6, - "cidr": ip_profile['subnet_address'] - } + "network_id": new_net["network"]["id"], + "ip_version": 4 if ip_profile['ip_version'] == "IPv4" else 6, + "cidr": ip_profile['subnet_address'] + } # Gateway should be set to None if not needed. Otherwise openstack assigns one by default if ip_profile.get('gateway_address'): subnet['gateway_ip'] = ip_profile['gateway_address'] @@ -623,34 +638,35 @@ class vimconnector(vimconn.VimConnector): subnet['dns_nameservers'] = ip_profile['dns_address'].split(";") if 'dhcp_enabled' in ip_profile: subnet['enable_dhcp'] = False if \ - ip_profile['dhcp_enabled']=="false" or ip_profile['dhcp_enabled']==False else True + ip_profile['dhcp_enabled'] == "false" or ip_profile['dhcp_enabled'] is False else True if ip_profile.get('dhcp_start_address'): subnet['allocation_pools'] = [] subnet['allocation_pools'].append(dict()) subnet['allocation_pools'][0]['start'] = ip_profile['dhcp_start_address'] if ip_profile.get('dhcp_count'): - #parts = ip_profile['dhcp_start_address'].split('.') - #ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3]) + # parts = ip_profile['dhcp_start_address'].split('.') + # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3]) ip_int = int(netaddr.IPAddress(ip_profile['dhcp_start_address'])) ip_int += ip_profile['dhcp_count'] - 1 ip_str = str(netaddr.IPAddress(ip_int)) subnet['allocation_pools'][0]['end'] = ip_str - #self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet)) - self.neutron.create_subnet({"subnet": subnet} ) + # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet)) + self.neutron.create_subnet({"subnet": subnet}) if net_type == "data" and self.config.get('multisegment_support'): if self.config.get('l2gw_support'): l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ()) for l2gw in l2gw_list: - l2gw_conn = {} - l2gw_conn["l2_gateway_id"] = l2gw["id"] - l2gw_conn["network_id"] = new_net["network"]["id"] - l2gw_conn["segmentation_id"] = str(vlanID) + l2gw_conn = { + "l2_gateway_id": l2gw["id"], + "network_id": new_net["network"]["id"], + "segmentation_id": str(vlanID), + } new_l2gw_conn = self.neutron.create_l2_gateway_connection({"l2_gateway_connection": l2gw_conn}) created_items["l2gwconn:" + str(new_l2gw_conn["l2_gateway_connection"]["id"])] = True return new_net["network"]["id"], created_items except Exception as e: - #delete l2gw connections (if any) before deleting the network + # delete l2gw connections (if any) before deleting the network for k, v in created_items.items(): if not v: # skip already deleted continue @@ -665,7 +681,7 @@ class vimconnector(vimconn.VimConnector): self._format_exception(e) def get_network_list(self, filter_dict={}): - '''Obtain tenant networks of VIM + """Obtain tenant networks of VIM Filter_dict can be: name: network name id: network uuid @@ -674,33 +690,34 @@ class vimconnector(vimconn.VimConnector): admin_state_up: boolean status: 'ACTIVE' Returns the network list of dictionaries - ''' + """ self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict)) try: self._reload_connection() filter_dict_os = filter_dict.copy() if self.api_version3 and "tenant_id" in filter_dict_os: - filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id') #T ODO check + filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id') # TODO check net_dict = self.neutron.list_networks(**filter_dict_os) net_list = net_dict["networks"] self.__net_os2mano(net_list) return net_list - except (neExceptions.ConnectionFailed, ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e: + except (neExceptions.ConnectionFailed, ksExceptions.ClientException, neExceptions.NeutronException, + ConnectionError) as e: self._format_exception(e) def get_network(self, net_id): - '''Obtain details of network from VIM - Returns the network information from a network id''' + """Obtain details of network from VIM + Returns the network information from a network id""" self.logger.debug(" Getting tenant network %s from VIM", net_id) - filter_dict={"id": net_id} + filter_dict = {"id": net_id} net_list = self.get_network_list(filter_dict) - if len(net_list)==0: + if len(net_list) == 0: raise vimconn.VimConnNotFoundException("Network '{}' not found".format(net_id)) - elif len(net_list)>1: + elif len(net_list) > 1: raise vimconn.VimConnConflictException("Found more than one network with this criteria") net = net_list[0] - subnets=[] - for subnet_id in net.get("subnets", () ): + subnets = [] + for subnet_id in net.get("subnets", ()): try: subnet = self.neutron.show_subnet(subnet_id) except Exception as e: @@ -722,11 +739,11 @@ class vimconnector(vimconn.VimConnector): Returns the network identifier or raises an exception upon error or when network is not found """ self.logger.debug("Deleting network '%s' from VIM", net_id) - if created_items == None: + if created_items is None: created_items = {} try: self._reload_connection() - #delete l2gw connections (if any) before deleting the network + # delete l2gw connections (if any) before deleting the network for k, v in created_items.items(): if not v: # skip already deleted continue @@ -736,7 +753,7 @@ class vimconnector(vimconn.VimConnector): self.neutron.delete_l2_gateway_connection(k_id) except Exception as e: self.logger.error("Error deleting l2 gateway connection: {}: {}".format(type(e).__name__, e)) - #delete VM ports attached to this networks before the network + # delete VM ports attached to this networks before the network ports = self.neutron.list_ports(network_id=net_id) for p in ports['ports']: try: @@ -750,7 +767,7 @@ class vimconnector(vimconn.VimConnector): self._format_exception(e) def refresh_nets_status(self, net_list): - '''Get the status of the networks + """Get the status of the networks Params: the list of network identifiers Returns a dictionary with: net_id: #VIM id of this network @@ -765,14 +782,14 @@ class vimconnector(vimconn.VimConnector): error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR vim_info: #Text with plain information obtained from vim (yaml.safe_dump) - ''' - net_dict={} + """ + net_dict = {} for net_id in net_list: net = {} try: net_vim = self.get_network(net_id) if net_vim['status'] in netStatus2manoFormat: - net["status"] = netStatus2manoFormat[ net_vim['status'] ] + net["status"] = netStatus2manoFormat[net_vim['status']] else: net["status"] = "OTHER" net["error_msg"] = "VIM status reported " + net_vim['status'] @@ -782,7 +799,7 @@ class vimconnector(vimconn.VimConnector): net['vim_info'] = self.serialize(net_vim) - if net_vim.get('fault'): #TODO + if net_vim.get('fault'): # TODO net['error_msg'] = str(net_vim['fault']) except vimconn.VimConnNotFoundException as e: self.logger.error("Exception getting net status: %s", str(e)) @@ -796,14 +813,15 @@ class vimconnector(vimconn.VimConnector): return net_dict def get_flavor(self, flavor_id): - '''Obtain flavor details from the VIM. Returns the flavor dict details''' + """Obtain flavor details from the VIM. Returns the flavor dict details""" self.logger.debug("Getting flavor '%s'", flavor_id) try: self._reload_connection() flavor = self.nova.flavors.find(id=flavor_id) - #TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema) + # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema) return flavor.to_dict() - except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e: + except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, + ConnectionError) as e: self._format_exception(e) def get_flavor_id_from_data(self, flavor_dict): @@ -823,7 +841,7 @@ class vimconnector(vimconn.VimConnector): # numa=None extended = flavor_dict.get("extended", {}) if extended: - #TODO + # TODO raise vimconn.VimConnNotFoundException("Flavor with EPA still not implemented") # if len(numas) > 1: # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa") @@ -842,8 +860,9 @@ class vimconnector(vimconn.VimConnector): flavor_candidate_data = flavor_data if not exact_match and flavor_candidate_id: return flavor_candidate_id - raise vimconn.VimConnNotFoundException("Cannot find any flavor matching '{}'".format(str(flavor_dict))) - except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e: + raise vimconn.VimConnNotFoundException("Cannot find any flavor matching '{}'".format(flavor_dict)) + except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, + ConnectionError) as e: self._format_exception(e) def process_resource_quota(self, quota, prefix, extra_specs): @@ -861,37 +880,38 @@ class vimconnector(vimconn.VimConnector): extra_specs["quota:" + prefix + "_shares_share"] = quota['shares'] def new_flavor(self, flavor_data, change_name_if_used=True): - '''Adds a tenant flavor to openstack VIM - if change_name_if_used is True, it will change name in case of conflict, because it is not supported name repetition + """Adds a tenant flavor to openstack VIM + if change_name_if_used is True, it will change name in case of conflict, because it is not supported name + repetition Returns the flavor identifier - ''' + """ self.logger.debug("Adding flavor '%s'", str(flavor_data)) - retry=0 - max_retries=3 + retry = 0 + max_retries = 3 name_suffix = 0 try: - name=flavor_data['name'] - while retry 1: @@ -904,31 +924,37 @@ class vimconnector(vimconn.VimConnector): extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}' extra_specs["vmware:latency_sensitivity_level"] = "high" for numa in numas: - #overwrite ram and vcpus - #check if key 'memory' is present in numa else use ram value at flavor + # overwrite ram and vcpus + # check if key 'memory' is present in numa else use ram value at flavor if 'memory' in numa: ram = numa['memory']*1024 - #See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html + # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/ + # implemented/virt-driver-cpu-thread-pinning.html extra_specs["hw:cpu_sockets"] = 1 if 'paired-threads' in numa: vcpus = numa['paired-threads']*2 - #cpu_thread_policy "require" implies that the compute node must have an STM architecture + # cpu_thread_policy "require" implies that the compute node must have an + # STM architecture extra_specs["hw:cpu_thread_policy"] = "require" extra_specs["hw:cpu_policy"] = "dedicated" elif 'cores' in numa: vcpus = numa['cores'] - # cpu_thread_policy "prefer" implies that the host must not have an SMT architecture, or a non-SMT architecture will be emulated + # cpu_thread_policy "prefer" implies that the host must not have an SMT + # architecture, or a non-SMT architecture will be emulated extra_specs["hw:cpu_thread_policy"] = "isolate" extra_specs["hw:cpu_policy"] = "dedicated" elif 'threads' in numa: vcpus = numa['threads'] - # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture + # cpu_thread_policy "prefer" implies that the host may or may not have an SMT + # architecture extra_specs["hw:cpu_thread_policy"] = "prefer" extra_specs["hw:cpu_policy"] = "dedicated" # for interface in numa.get("interfaces",() ): # if interface["dedicated"]=="yes": - # raise vimconn.VimConnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable) - # #TODO, add the key 'pci_passthrough:alias"="