# This is not needed, because package dependency will install anyway.
# But done here in order to harry up image generation using cache
RUN DEBIAN_FRONTEND=noninteractive apt-get -y install python3-neutronclient python3-openstackclient \
- python3-requests python3-netaddr python3-argcomplete
+ python3-requests python3-netaddr python3-argcomplete \
+ && DEBIAN_FRONTEND=noninteractive python3 -m pip install -U jsonrpclib-pelix cvprac \
+ "osm-im @ git+https://osm.etsi.org/gerrit/osm/IM.git#egg=osm-im" "azure==4.0.0" boto "fog05rest>=0.0.4" \
+ untangle pyone "oca @ git+https://github.com/python-oca/python-oca#egg=oca"
# DEBIAN_FRONTEND=noninteractive apt-get -y install python-openstacksdk python-openstackclient && \
# TODO py3 DEBIAN_FRONTEND=noninteractive add-apt-repository -y cloud-archive:rocky && apt-get update && apt-get install -y python3-networking-l2gw \
# DEBIAN_FRONTEND=noninteractive apt-get -y install python-cffi libssl-dev libffi-dev python-mysqldb && \
-# DEBIAN_FRONTEND=noninteractive pip2 install -U azure && \
-# DEBIAN_FRONTEND=noninteractive pip2 install -U fog05rest && \
# && DEBIAN_FRONTEND=noninteractive apt-get -y install software-properties-common && \
# DEBIAN_FRONTEND=noninteractive apt-get -y install wget tox && \
-# DEBIAN_FRONTEND=noninteractive pip2 install untangle && \
-# DEBIAN_FRONTEND=noninteractive pip2 install pyone && \
-# DEBIAN_FRONTEND=noninteractive pip2 install -e git+https://github.com/python-oca/python-oca#egg=oca && \
COPY . /root/RO
python3 -m pip install -e /root/RO/RO-SDN-tapi && \
python3 -m pip install -e /root/RO/RO-SDN-onos_vpls && \
python3 -m pip install -e /root/RO/RO-SDN-onos_openflow && \
+ python3 -m pip install -e /root/RO/RO-SDN-odl_openflow && \
python3 -m pip install -e /root/RO/RO-SDN-floodlight_openflow && \
+ python3 -m pip install -e /root/RO/RO-SDN-arista && \
rm -rf /root/.cache && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
same "printed page" as the copyright notice for easier
identification within third-party archives.
- Copyright Copyright 2019 ETSI
+ Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+all: clean package
+
+clean:
+ rm -rf dist deb_dist osm_rosdn_arista-*.tar.gz osm_rosdn_arista.egg-info .eggs
+
+package:
+ python3 setup.py --command-packages=stdeb.command sdist_dsc
+ cp debian/python3-osm-rosdn-arista.postinst deb_dist/osm-rosdn-arista*/debian/
+ cd deb_dist/osm-rosdn-arista*/ && dpkg-buildpackage -rfakeroot -uc -us
+
--- /dev/null
+#!/bin/bash
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: OSM_TECH@list.etsi.org
+##
+
+echo "POST INSTALL OSM-ROSDN-ARISTA"
+
+#Pip packages required for openstack connector
+python3 -m pip install cvprac
--- /dev/null
+# -*- coding: utf-8 -*-\r
+##\r
+# Copyright 2019 Atos - CoE Telco NFV Team\r
+# All Rights Reserved.\r
+#\r
+# Contributors: Oscar Luis Peral, Atos\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License"); you may\r
+# not use this file except in compliance with the License. You may obtain\r
+# a copy of the License at\r
+#\r
+# http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT\r
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\r
+# License for the specific language governing permissions and limitations\r
+# under the License.\r
+#\r
+# For those usages not covered by the Apache License, Version 2.0 please\r
+# contact with: <oscarluis.peral@atos.net>\r
+#\r
+# Neither the name of Atos nor the names of its\r
+# contributors may be used to endorse or promote products derived from\r
+# this software without specific prior written permission.\r
+#\r
+# This work has been performed in the context of Arista Telefonica OSM PoC.\r
+##\r
+\r
+\r
+class AristaSDNConfigLet:\r
+ _configLet_SRIOV = """\r
+interface {interface}\r
+ !! service: {uuid}\r
+ switchport\r
+ switchport mode trunk\r
+ switchport trunk group {service}{vlan_id}\r
+!\r
+"""\r
+\r
+ def _get_sriov(self, uuid, interface, vlan_id, s_type, index):\r
+ return self._configLet_SRIOV.format(uuid=uuid, interface=interface, service=s_type, vlan_id=vlan_id)\r
+\r
+ def getElan_sriov(self, uuid, interface, vlan_id, index):\r
+ return self._get_sriov(uuid, interface, vlan_id, "ELAN", index)\r
+\r
+ def getEline_sriov(self, uuid, interface, vlan_id, index):\r
+ return self._get_sriov(uuid, interface, vlan_id, "ELINE", index)\r
+\r
+ _configLet_PASSTROUGH = """\r
+interface {interface}\r
+ !! service: {uuid}\r
+ switchport\r
+ switchport mode dot1q-tunnel\r
+ switchport access vlan {vlan_id}\r
+!\r
+"""\r
+\r
+ def _get_passthrough(self, uuid, interface, vlan_id, s_type, index):\r
+ return self._configLet_PASSTROUGH.format(uuid=uuid, interface=interface, vlan_id=vlan_id)\r
+\r
+ def getElan_passthrough(self, uuid, interface, vlan_id, index):\r
+ return self._get_passthrough(uuid, interface, vlan_id, "ELAN", index)\r
+\r
+ def getEline_passthrough(self, uuid, interface, vlan_id, index):\r
+ return self._get_passthrough(uuid, interface, vlan_id, "ELINE", index)\r
+\r
+ _configLet_VLAN = """\r
+vlan {vlan}\r
+ !! service: {service} {vlan} {uuid}\r
+ name {service}{vlan}\r
+ trunk group {service}{vlan}\r
+ trunk group MLAGPEER\r
+\r
+interface VXLAN1\r
+ VXLAN vlan {vlan} vni {vni}\r
+!\r
+"""\r
+\r
+ def _get_vlan(self, uuid, vlan_id, vni_id, s_type):\r
+ return self._configLet_VLAN.format(service=s_type, vlan=vlan_id, uuid=uuid, vni=vni_id)\r
+\r
+ def getElan_vlan(self, uuid, vlan_id, vni_id):\r
+ return self._get_vlan(uuid, vlan_id, vni_id, "ELAN")\r
+\r
+ def getEline_vlan(self, uuid, vlan_id, vni_id):\r
+ return self._get_vlan(uuid, vlan_id, vni_id, "ELINE")\r
+\r
+ _configLet_BGP = """\r
+router bgp {bgp}\r
+ vlan {vlan}\r
+ !! service: {uuid}\r
+ rd {loopback}:{vni}\r
+ route-target both {vni}:{vni}\r
+ redistribute learned\r
+!\r
+"""\r
+\r
+ def _get_bgp(self, uuid, vlan_id, vni_id, loopback0, bgp, s_type):\r
+ return self._configLet_BGP.format(uuid=uuid, bgp=bgp, vlan=vlan_id, loopback=loopback0, vni=vni_id)\r
+\r
+ def getElan_bgp(self, uuid, vlan_id, vni_id, loopback0, bgp):\r
+ return self._get_bgp(uuid, vlan_id, vni_id, loopback0, bgp, "ELAN")\r
+\r
+ def getEline_bgp(self, uuid, vlan_id, vni_id, loopback0, bgp):\r
+ return self._get_bgp(uuid, vlan_id, vni_id, loopback0, bgp, "ELINE")\r
--- /dev/null
+# -*- coding: utf-8 -*-\r
+##\r
+# Copyright 2019 Atos - CoE Telco NFV Team\r
+# All Rights Reserved.\r
+#\r
+# Contributors: Oscar Luis Peral, Atos\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License"); you may\r
+# not use this file except in compliance with the License. You may obtain\r
+# a copy of the License at\r
+#\r
+# http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT\r
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\r
+# License for the specific language governing permissions and limitations\r
+# under the License.\r
+#\r
+# For those usages not covered by the Apache License, Version 2.0 please\r
+# contact with: <oscarluis.peral@atos.net>\r
+#\r
+# Neither the name of Atos nor the names of its\r
+# contributors may be used to endorse or promote products derived from\r
+# this software without specific prior written permission.\r
+#\r
+# This work has been performed in the context of Arista Telefonica OSM PoC.\r
+##\r
+\r
+from jsonrpclib import Server\r
+import socket\r
+import ssl\r
+\r
+\r
+class AristaSwitch():\r
+ """\r
+ Used to run switch commands through eAPI and check command output\r
+ """\r
+\r
+ def __init__(self, name=None, host=None, user=None, passwd=None,\r
+ verify_ssl=False, unix_socket=None,\r
+ logger=None):\r
+\r
+ self.host = host\r
+ self.user = user\r
+ self.passwd = passwd\r
+\r
+ self.unix_socket = unix_socket\r
+ self.local_ep = Server(unix_socket) \\r
+ if unix_socket is not None else None\r
+\r
+ s = "https://{user}:{passwd}@{host}/command-api"\r
+ self.url = s.format(user=user, passwd=passwd, host=host)\r
+ self.ep = Server(self.url)\r
+ self.verify_ssl = verify_ssl\r
+ if not self.verify_ssl:\r
+ try:\r
+ ssl._create_default_https_context = ssl.\\r
+ _create_unverified_context\r
+ except AttributeError:\r
+ # Old python versions do not verify certs by default\r
+ pass\r
+\r
+ self.log = logger\r
+\r
+ def _multilinestr_to_list(self, multilinestr=None):\r
+ """\r
+ Returns a list, each item been one line of a (multi)line string\r
+ Handy for running multiple lines commands through one API call\r
+ """\r
+ mylist = \\r
+ [x.strip() for x in multilinestr.split('\n') if x.strip() != '']\r
+ return mylist\r
+\r
+ def run(self, cmds=None, timeout=10, local_run=False):\r
+ """\r
+ Runs commands through eAPI\r
+\r
+ If local_run is True eAPI call will be done using local unix socket\r
+ If local run is False eAPI call will be done using TCPIP\r
+ """\r
+ socket.setdefaulttimeout(timeout)\r
+\r
+ r = None\r
+\r
+ if type(cmds) is str:\r
+ run_list = self._multilinestr_to_list(cmds)\r
+\r
+ if type(cmds) is list:\r
+ run_list = cmds\r
+\r
+ if local_run:\r
+ ep = self.local_ep\r
+ ep_log = "local unix socket {}".format(str(self.unix_socket))\r
+ else:\r
+ ep = self.ep\r
+ ep_log = "tcpip socket {}".format(str(self.host))\r
+\r
+ self.log.debug("Calling eAPI at {} with commands {}".\r
+ format(ep_log, str(run_list)))\r
+\r
+ try:\r
+ r = ep.runCmds(1, run_list)\r
+ except Exception as e:\r
+ self.log.error(str(e))\r
+ raise(e)\r
+\r
+ return r\r
--- /dev/null
+# -*- coding: utf-8 -*-\r
+##\r
+# Copyright 2019 Atos - CoE Telco NFV Team\r
+# All Rights Reserved.\r
+#\r
+# Contributors: Oscar Luis Peral, Atos\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License"); you may\r
+# not use this file except in compliance with the License. You may obtain\r
+# a copy of the License at\r
+#\r
+# http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT\r
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\r
+# License for the specific language governing permissions and limitations\r
+# under the License.\r
+#\r
+# For those usages not covered by the Apache License, Version 2.0 please\r
+# contact with: <oscarluis.peral@atos.net>\r
+#\r
+# Neither the name of Atos nor the names of its\r
+# contributors may be used to endorse or promote products derived from\r
+# this software without specific prior written permission.\r
+#\r
+# This work has been performed in the context of Arista Telefonica OSM PoC.\r
+##\r
+import time\r
+\r
+\r
+class AristaCVPTask:\r
+ def __init__(self, cvpClientApi):\r
+ self.cvpClientApi = cvpClientApi\r
+\r
+ def __get_id(self, task):\r
+ return task.get("workOrderId")\r
+\r
+ def __get_state(self, task):\r
+ return task.get("workOrderUserDefinedStatus")\r
+\r
+ def __execute_task(self, task_id):\r
+ return self.cvpClientApi.execute_task(task_id)\r
+\r
+ def __cancel_task(self, task_id):\r
+ return self.cvpClientApi.cancel_task(task_id)\r
+\r
+ def __apply_state(self, task, state):\r
+ t_id = self.__get_id(task)\r
+ self.cvpClientApi.add_note_to_task(t_id, "Executed by OSM")\r
+ if state == "executed":\r
+ return self.__execute_task(t_id)\r
+ elif state == "cancelled":\r
+ return self.__cancel_task(t_id)\r
+\r
+ def __actionable(self, state):\r
+ return state in ["Pending"]\r
+\r
+ def __terminal(self, state):\r
+ return state in ["Completed", "Cancelled"]\r
+\r
+ def __state_is_different(self, task, target):\r
+ return self.__get_state(task) != target\r
+\r
+ def update_all_tasks(self, data):\r
+ new_data = dict()\r
+ for task_id in data.keys():\r
+ res = self.cvpClientApi.get_task_by_id(task_id)\r
+ new_data[task_id] = res\r
+ return new_data\r
+\r
+ def get_pending_tasks(self):\r
+ return self.cvpClientApi.get_tasks_by_status('Pending')\r
+\r
+ def get_pending_tasks_old(self):\r
+ taskList = []\r
+ tasksField = {'workOrderId': 'workOrderId',\r
+ 'workOrderState': 'workOrderState',\r
+ 'currentTaskName': 'currentTaskName',\r
+ 'description': 'description',\r
+ 'workOrderUserDefinedStatus':\r
+ 'workOrderUserDefinedStatus',\r
+ 'note': 'note',\r
+ 'taskStatus': 'taskStatus',\r
+ 'workOrderDetails': 'workOrderDetails'}\r
+ tasks = self.cvpClientApi.get_tasks_by_status('Pending')\r
+ # Reduce task data to required fields\r
+ for task in tasks:\r
+ taskFacts = {}\r
+ for field in task.keys():\r
+ if field in tasksField:\r
+ taskFacts[tasksField[field]] = task[field]\r
+ taskList.append(taskFacts)\r
+ return taskList\r
+\r
+ def task_action(self, tasks, wait, state):\r
+ changed = False\r
+ data = dict()\r
+ warnings = list()\r
+\r
+ at = [t for t in tasks if self.__actionable(self.__get_state(t))]\r
+ actionable_tasks = at\r
+\r
+ if len(actionable_tasks) == 0:\r
+ warnings.append("No actionable tasks found on CVP")\r
+ return changed, data, warnings\r
+\r
+ for task in actionable_tasks:\r
+ if self.__state_is_different(task, state):\r
+ self.__apply_state(task, state)\r
+ changed = True\r
+ data[self.__get_id(task)] = task\r
+\r
+ if wait == 0:\r
+ return changed, data, warnings\r
+\r
+ start = time.time()\r
+ now = time.time()\r
+ while (now - start) < wait:\r
+ data = self.update_all_tasks(data)\r
+ if all([self.__terminal(self.__get_state(t)) for t in data.values()]):\r
+ break\r
+ time.sleep(1)\r
+ now = time.time()\r
+\r
+ if wait:\r
+ for i, task in data.items():\r
+ if not self.__terminal(self.__get_state(task)):\r
+ warnings.append("Task {} has not completed in {} seconds".\r
+ format(i, wait))\r
+\r
+ return changed, data, warnings\r
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2019 Atos - CoE Telco NFV Team
+# All Rights Reserved.
+#
+# Contributors: Oscar Luis Peral, Atos
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <oscarluis.peral@atos.net>
+#
+# Neither the name of Atos nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of Arista Telefonica OSM PoC.
+##
+from osm_ro.wim.sdnconn import SdnConnectorBase, SdnConnectorError
+import re
+import socket
+# Required by compare function
+import difflib
+# Library that uses Levenshtein Distance to calculate the differences
+# between strings.
+# from fuzzywuzzy import fuzz
+
+import logging
+import uuid
+from enum import Enum
+from requests import RequestException
+
+from cvprac.cvp_client import CvpClient
+from cvprac.cvp_api import CvpApi
+from cvprac.cvp_client_errors import CvpLoginError, CvpSessionLogOutError, CvpApiError
+from cvprac import __version__ as cvprac_version
+
+from osm_rosdn_arista.aristaSwitch import AristaSwitch
+from osm_rosdn_arista.aristaConfigLet import AristaSDNConfigLet
+from osm_rosdn_arista.aristaTask import AristaCVPTask
+
+
+class SdnError(Enum):
+ UNREACHABLE = 'Unable to reach the WIM.',
+ VLAN_INCONSISTENT = \
+ 'VLAN value inconsistent between the connection points',
+ VLAN_NOT_PROVIDED = 'VLAN value not provided',
+ CONNECTION_POINTS_SIZE = \
+ 'Unexpected number of connection points: 2 expected.',
+ ENCAPSULATION_TYPE = \
+ 'Unexpected service_endpoint_encapsulation_type. \
+ Only "dotq1" is accepted.',
+ BANDWIDTH = 'Unable to get the bandwidth.',
+ STATUS = 'Unable to get the status for the service.',
+ DELETE = 'Unable to delete service.',
+ CLEAR_ALL = 'Unable to clear all the services',
+ UNKNOWN_ACTION = 'Unknown action invoked.',
+ BACKUP = 'Unable to get the backup parameter.',
+ UNSUPPORTED_FEATURE = "Unsupported feature",
+ UNAUTHORIZED = "Failed while authenticating",
+ INTERNAL_ERROR = "Internal error"
+
+
+class AristaSdnConnector(SdnConnectorBase):
+ """Arista class for the SDN connectors
+
+ Arguments:
+ wim (dict): WIM record, as stored in the database
+ wim_account (dict): WIM account record, as stored in the database
+ config
+ The arguments of the constructor are converted to object attributes.
+ An extra property, ``service_endpoint_mapping`` is created from ``config``.
+
+ The access to Arista CloudVision is made through the API defined in
+ https://github.com/aristanetworks/cvprac
+ The a connectivity service consist in creating a VLAN and associate the interfaces
+ of the connection points MAC addresses to this VLAN in all the switches of the topology,
+ the BDP is also configured for this VLAN.
+
+ The Arista Cloud Vision API workflow is the following
+ -- The switch configuration is defined as a set of switch configuration commands,
+ what is called 'ConfigLet'
+ -- The ConfigLet is associated to the device (leaf switch)
+ -- Automatically a task is associated to this activity for change control, the task
+ in this stage is in 'Pending' state
+ -- The task will be executed so that the configuration is applied to the switch.
+ -- The service information is saved in the response of the creation call
+ -- All created services identification is stored in a generic ConfigLet 'OSM_metadata'
+ to keep track of the managed resources by OSM in the Arista deployment.
+ """
+ __supported_service_types = ["ELINE (L2)", "ELINE", "ELAN"]
+ __service_types_ELAN = "ELAN"
+ __service_types_ELINE = "ELINE"
+ __ELINE_num_connection_points = 2
+ __supported_service_types = ["ELINE", "ELAN"]
+ __supported_encapsulation_types = ["dot1q"]
+ __WIM_LOGGER = 'openmano.sdnconn.arista'
+ __SERVICE_ENDPOINT_MAPPING = 'service_endpoint_mapping'
+ __ENCAPSULATION_TYPE_PARAM = "service_endpoint_encapsulation_type"
+ __ENCAPSULATION_INFO_PARAM = "service_endpoint_encapsulation_info"
+ __BACKUP_PARAM = "backup"
+ __BANDWIDTH_PARAM = "bandwidth"
+ __SERVICE_ENDPOINT_PARAM = "service_endpoint_id"
+ __MAC_PARAM = "mac"
+ __WAN_SERVICE_ENDPOINT_PARAM = "service_endpoint_id"
+ __WAN_MAPPING_INFO_PARAM = "service_mapping_info"
+ __DEVICE_ID_PARAM = "device_id"
+ __DEVICE_INTERFACE_ID_PARAM = "device_interface_id"
+ __SW_ID_PARAM = "switch_dpid"
+ __SW_PORT_PARAM = "switch_port"
+ __VLAN_PARAM = "vlan"
+ __VNI_PARAM = "vni"
+ __SEPARATOR = '_'
+ __MANAGED_BY_OSM = '## Managed by OSM '
+ __OSM_PREFIX = "osm_"
+ __OSM_METADATA = "OSM_metadata"
+ __METADATA_PREFIX = '!## Service'
+ __EXC_TASK_EXEC_WAIT = 10
+ __ROLLB_TASK_EXEC_WAIT = 10
+ __API_REQUEST_TOUT = 60
+ __SWITCH_TAG_NAME = 'topology_type'
+ __SWITCH_TAG_VALUE = 'leaf'
+
+
+ def __init__(self, wim, wim_account, config=None, logger=None):
+ """
+
+ :param wim: (dict). Contains among others 'wim_url'
+ :param wim_account: (dict). Contains among others 'uuid' (internal id), 'name',
+ 'sdn' (True if is intended for SDN-assist or False if intended for WIM), 'user', 'password'.
+ :param config: (dict or None): Particular information of plugin. These keys if present have a common meaning:
+ 'mapping_not_needed': (bool) False by default or if missing, indicates that mapping is not needed.
+ 'service_endpoint_mapping': (list) provides the internal endpoint mapping. The meaning is:
+ KEY meaning for WIM meaning for SDN assist
+ -------- -------- --------
+ device_id pop_switch_dpid compute_id
+ device_interface_id pop_switch_port compute_pci_address
+ service_endpoint_id wan_service_endpoint_id SDN_service_endpoint_id
+ service_mapping_info wan_service_mapping_info SDN_service_mapping_info
+ contains extra information if needed. Text in Yaml format
+ switch_dpid wan_switch_dpid SDN_switch_dpid
+ switch_port wan_switch_port SDN_switch_port
+ datacenter_id vim_account vim_account
+ id: (internal, do not use)
+ wim_id: (internal, do not use)
+ :param logger (logging.Logger): optional logger object. If none is passed 'openmano.sdn.sdnconn' is used.
+ """
+ self.__regex = re.compile(
+ r'^(?:http|ftp)s?://' # http:// or https://
+ r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
+ r'localhost|' # localhost...
+ r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
+ r'(?::\d+)?', re.IGNORECASE) # optional port
+ self.raiseException = True
+ self.logger = logger or logging.getLogger(self.__WIM_LOGGER)
+ super().__init__(wim, wim_account, config, self.logger)
+ self.__wim = wim
+ self.__wim_account = wim_account
+ self.__config = config
+ if self.is_valid_destination(self.__wim.get("wim_url")):
+ self.__wim_url = self.__wim.get("wim_url")
+ else:
+ raise SdnConnectorError(message='Invalid wim_url value',
+ http_code=500)
+ self.__user = wim_account.get("user")
+ self.__passwd = wim_account.get("password")
+ self.client = None
+ self.cvp_inventory = None
+ self.cvp_tags = None
+ self.logger.debug("Arista SDN plugin {}, cvprac version {}, user:{} and config:{}".
+ format(wim, cvprac_version, self.__user,
+ self.delete_keys_from_dict(config, ('passwd',))))
+ self.allDeviceFacts = []
+ self.clC = AristaSDNConfigLet()
+ self.taskC = None
+ self.__load_switches()
+
+ def __load_switches(self):
+ """ Retrieves the switches to configure in the following order
+ 1. from incoming configuration:
+ 1.1 using port mapping
+ using user and password from WIM
+ retrieving Lo0 and AS from switch
+ 1.2 from 'switches' parameter,
+ if any parameter is not present
+ Lo0 and AS - it will be requested to the switch
+ usr and pass - from WIM configuration
+ 2. Looking in the CloudVision inventory if not in configuration parameters
+ 2.1 using the switches with the topology_type tag set to 'leaf'
+ 2.2 using the switches whose parent container is 'leaf'
+ 2.3 using the switches whose hostname contains with 'leaf'
+
+ All the search methods will be used
+ """
+ self.switches = {}
+ if self.__config and self.__config.get(self.__SERVICE_ENDPOINT_MAPPING):
+ for port in self.__config.get(self.__SERVICE_ENDPOINT_MAPPING):
+ switch_dpid = port.get(self.__SW_ID_PARAM)
+ if switch_dpid and switch_dpid not in self.switches:
+ self.switches[switch_dpid] = {'passwd': self.__passwd,
+ 'ip': None,
+ 'usr': self.__user,
+ 'lo0': None,
+ 'AS': None}
+
+ if self.__config and self.__config.get('switches'):
+ # Not directly from json, complete one by one
+ config_switches = self.__config.get('switches')
+ for cs, cs_content in config_switches.items():
+ if cs not in self.switches:
+ self.switches[cs] = {'passwd': self.__passwd, 'ip': None, 'usr': self.__user, 'lo0': None,'AS': None}
+ if cs_content:
+ self.switches[cs].update(cs_content)
+
+ # Load the rest of the data
+ if self.client is None:
+ self.client = self.__connect()
+ self.__load_inventory()
+ if not self.switches:
+ self.__get_tags(self.__SWITCH_TAG_NAME, self.__SWITCH_TAG_VALUE)
+ for device in self.allDeviceFacts:
+ # get the switches whose container parent is 'leaf',
+ # or the topology_tag is 'leaf'
+ # or the hostname contains 'leaf'
+ if ((device['serialNumber'] in self.cvp_tags) or
+ (self.__SWITCH_TAG_VALUE in device['containerName'].lower()) or
+ (self.__SWITCH_TAG_VALUE in device['hostname'].lower())):
+ if not self.switches.get(device['hostname']):
+ switch_data = {'passwd': self.__passwd,
+ 'ip': device['ipAddress'],
+ 'usr': self.__user,
+ 'lo0': None,
+ 'AS': None}
+ self.switches[device['hostname']] = switch_data
+ if len(self.switches) == 0:
+ self.logger.error("Unable to load Leaf switches from CVP")
+ return
+
+ # self.s_api are switch objects, one for each switch in self.switches,
+ # used to make eAPI calls by using switch.py module
+ self.s_api = {}
+ for s in self.switches:
+ if not self.switches[s].get('ip'):
+ for device in self.allDeviceFacts:
+ if device['hostname'] == s:
+ self.switches[s]['ip'] = device['ipAddress']
+ if self.is_valid_destination(self.switches[s].get('ip')):
+ self.s_api[s] = AristaSwitch(host=self.switches[s]['ip'],
+ user=self.switches[s]['usr'],
+ passwd=self.switches[s]['passwd'],
+ logger=self.logger)
+ # Each switch has a different loopback address,
+ # so it's a different configLet
+ if not self.switches[s].get('lo0'):
+ inf = self.__get_switch_interface_ip(s, 'Loopback0')
+ self.switches[s]["lo0"] = inf.split('/')[0]
+ if not self.switches[s].get('AS'):
+ self.switches[s]["AS"] = self.__get_switch_asn(s)
+ self.logger.debug("Using Arista Leaf switches: {}".format(
+ self.delete_keys_from_dict(self.switches, ('passwd',))))
+
+ def __lldp_find_neighbor(self, tlv_name=None, tlv_value=None):
+ """Returns a list of dicts where a mathing LLDP neighbor has been found
+ Each dict has:
+ switch -> switch name
+ interface -> switch interface
+ """
+ r = []
+ lldp_info = {}
+
+ # Get LLDP info from each switch
+ for s in self.s_api:
+ result = self.s_api[s].run("show lldp neighbors detail")
+ lldp_info[s] = result[0]["lldpNeighbors"]
+ # Look LLDP match on each interface
+ # Note that eAPI returns [] for an interface with no LLDP neighbors
+ # in the corresponding interface lldpNeighborInfo field
+ for interface in lldp_info[s]:
+ if lldp_info[s][interface]["lldpNeighborInfo"]:
+ lldp_nInf = lldp_info[s][interface]["lldpNeighborInfo"][0]
+ if tlv_name in lldp_nInf:
+ if lldp_nInf[tlv_name] == tlv_value:
+ r.append({"name": s, "interface": interface})
+
+ return r
+
+ def __get_switch_asn(self, switch):
+ """Returns switch ASN in default VRF
+ """
+ bgp_info = self.s_api[switch].run("show ip bgp summary")[0]
+ return(bgp_info["vrfs"]["default"]["asn"])
+
+ def __get_switch_po(self, switch, interface=None):
+ """Returns Port-Channels for a given interface
+ If interface is None returns a list with all PO interfaces
+ Note that if specified, interface should be exact name
+ for instance: Ethernet3 and not e3 eth3 and so on
+ """
+ po_inf = self.s_api[switch].run("show port-channel")[0]["portChannels"]
+
+ if interface:
+ r = [x for x in po_inf if interface in po_inf[x]["activePorts"]]
+ else:
+ r = po_inf
+
+ return r
+
+ def __get_switch_interface_ip(self, switch, interface=None):
+ """Returns interface primary ip
+ interface should be exact name
+ for instance: Ethernet3 and not ethernet 3, e3 eth3 and so on
+ """
+ cmd = "show ip interface {}".format(interface)
+ ip_info = self.s_api[switch].run(cmd)[0]["interfaces"][interface]
+
+ ip = ip_info["interfaceAddress"]["primaryIp"]["address"]
+ mask = ip_info["interfaceAddress"]["primaryIp"]["maskLen"]
+
+ return "{}/{}".format(ip, mask)
+
+ def __check_service(self, service_type, connection_points,
+ check_vlan=True, check_num_cp=True, kwargs=None):
+ """ Reviews the connection points elements looking for semantic errors in the incoming data
+ """
+ if service_type not in self.__supported_service_types:
+ raise Exception("The service '{}' is not supported. Only '{}' are accepted".format(
+ service_type,
+ self.__supported_service_types))
+
+ if check_num_cp:
+ if (len(connection_points) < 2):
+ raise Exception(SdnError.CONNECTION_POINTS_SIZE)
+ if ((len(connection_points) != self.__ELINE_num_connection_points) and
+ (service_type == self.__service_types_ELINE)):
+ raise Exception(SdnError.CONNECTION_POINTS_SIZE)
+
+ if check_vlan:
+ vlan_id = ''
+ for cp in connection_points:
+ enc_type = cp.get(self.__ENCAPSULATION_TYPE_PARAM)
+ if (enc_type and
+ enc_type not in self.__supported_encapsulation_types):
+ raise Exception(SdnError.ENCAPSULATION_TYPE)
+ encap_info = cp.get(self.__ENCAPSULATION_INFO_PARAM)
+ cp_vlan_id = str(encap_info.get(self.__VLAN_PARAM))
+ if cp_vlan_id:
+ if not vlan_id:
+ vlan_id = cp_vlan_id
+ elif vlan_id != cp_vlan_id:
+ raise Exception(SdnError.VLAN_INCONSISTENT)
+ if not vlan_id:
+ raise Exception(SdnError.VLAN_NOT_PROVIDED)
+ if vlan_id in self.__get_srvVLANs():
+ raise Exception('VLAN {} already assigned to a connectivity service'.format(vlan_id))
+
+ # Commented out for as long as parameter isn't implemented
+ # bandwidth = kwargs.get(self.__BANDWIDTH_PARAM)
+ # if not isinstance(bandwidth, int):
+ # self.__exception(SdnError.BANDWIDTH, http_code=400)
+
+ # Commented out for as long as parameter isn't implemented
+ # backup = kwargs.get(self.__BACKUP_PARAM)
+ # if not isinstance(backup, bool):
+ # self.__exception(SdnError.BACKUP, http_code=400)
+
+ def check_credentials(self):
+ """Retrieves the CloudVision version information, as the easiest way
+ for testing the access to CloudVision API
+ """
+ try:
+ if self.client is None:
+ self.client = self.__connect()
+ result = self.client.api.get_cvp_info()
+ self.logger.debug(result)
+ except CvpLoginError as e:
+ self.logger.info(str(e))
+ self.client = None
+ raise SdnConnectorError(message=SdnError.UNAUTHORIZED,
+ http_code=401) from e
+ except Exception as ex:
+ self.client = None
+ self.logger.error(str(ex))
+ raise SdnConnectorError(message=SdnError.INTERNAL_ERROR,
+ http_code=500) from ex
+
+ def get_connectivity_service_status(self, service_uuid, conn_info=None):
+ """Monitor the status of the connectivity service established
+ Arguments:
+ service_uuid (str): UUID of the connectivity service
+ conn_info (dict or None): Information returned by the connector
+ during the service creation/edition and subsequently stored in
+ the database.
+
+ Returns:
+ dict: JSON/YAML-serializable dict that contains a mandatory key
+ ``sdn_status`` associated with one of the following values::
+
+ {'sdn_status': 'ACTIVE'}
+ # The service is up and running.
+
+ {'sdn_status': 'INACTIVE'}
+ # The service was created, but the connector
+ # cannot determine yet if connectivity exists
+ # (ideally, the caller needs to wait and check again).
+
+ {'sdn_status': 'DOWN'}
+ # Connection was previously established,
+ # but an error/failure was detected.
+
+ {'sdn_status': 'ERROR'}
+ # An error occurred when trying to create the service/
+ # establish the connectivity.
+
+ {'sdn_status': 'BUILD'}
+ # Still trying to create the service, the caller
+ # needs to wait and check again.
+
+ Additionally ``error_msg``(**str**) and ``sdn_info``(**dict**)
+ keys can be used to provide additional status explanation or
+ new information available for the connectivity service.
+ """
+ try:
+ self.logger.debug("invoked get_connectivity_service_status '{}'".format(service_uuid))
+ if not service_uuid:
+ raise SdnConnectorError(message='No connection service UUID',
+ http_code=500)
+
+ self.__get_Connection()
+ if conn_info is None:
+ raise SdnConnectorError(message='No connection information for service UUID {}'.format(service_uuid),
+ http_code=500)
+
+ if 'configLetPerSwitch' in conn_info.keys():
+ c_info = conn_info
+ else:
+ c_info = None
+ cls_perSw = self.__get_serviceData(service_uuid,
+ conn_info['service_type'],
+ conn_info['vlan_id'],
+ c_info)
+
+ t_isCancelled = False
+ t_isFailed = False
+ t_isPending = False
+ failed_switches = []
+ for s in self.s_api:
+ if (len(cls_perSw[s]) > 0):
+ for cl in cls_perSw[s]:
+ # Fix 1030 SDN-ARISTA Key error note when deploy a NS
+ # Added protection to check that 'note' exists and additionally
+ # verify that it is managed by OSM
+ if (not cls_perSw[s][0]['config'] or
+ not cl.get('note') or
+ self.__MANAGED_BY_OSM not in cl['note']):
+ continue
+ note = cl['note']
+ t_id = note.split(self.__SEPARATOR)[1]
+ result = self.client.api.get_task_by_id(t_id)
+ if result['workOrderUserDefinedStatus'] == 'Completed':
+ continue
+ elif result['workOrderUserDefinedStatus'] == 'Cancelled':
+ t_isCancelled = True
+ elif result['workOrderUserDefinedStatus'] == 'Failed':
+ t_isFailed = True
+ else:
+ t_isPending = True
+ failed_switches.append(s)
+ if t_isCancelled:
+ error_msg = 'Some works were cancelled in switches: {}'.format(str(failed_switches))
+ sdn_status = 'DOWN'
+ elif t_isFailed:
+ error_msg = 'Some works failed in switches: {}'.format(str(failed_switches))
+ sdn_status = 'ERROR'
+ elif t_isPending:
+ error_msg = 'Some works are still under execution in switches: {}'.format(str(failed_switches))
+ sdn_status = 'BUILD'
+ else:
+ error_msg = ''
+ sdn_status = 'ACTIVE'
+ sdn_info = ''
+ return {'sdn_status': sdn_status,
+ 'error_msg': error_msg,
+ 'sdn_info': sdn_info}
+ except CvpLoginError as e:
+ self.logger.info(str(e))
+ self.client = None
+ raise SdnConnectorError(message=SdnError.UNAUTHORIZED,
+ http_code=401) from e
+ except Exception as ex:
+ self.client = None
+ self.logger.error(str(ex), exc_info=True)
+ raise SdnConnectorError(message=str(ex),
+ http_code=500) from ex
+
+ def create_connectivity_service(self, service_type, connection_points,
+ **kwargs):
+ """Stablish SDN/WAN connectivity between the endpoints
+ :param service_type:
+ (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2), ``L3``.
+ :param connection_points: (list): each point corresponds to
+ an entry point to be connected. For WIM: from the DC
+ to the transport network.
+ For SDN: Compute/PCI to the transport network. One
+ connection point serves to identify the specific access and
+ some other service parameters, such as encapsulation type.
+ Each item of the list is a dict with:
+ "service_endpoint_id": (str)(uuid) Same meaning that for
+ 'service_endpoint_mapping' (see __init__)
+ In case the config attribute mapping_not_needed is True,
+ this value is not relevant. In this case
+ it will contain the string "device_id:device_interface_id"
+ "service_endpoint_encapsulation_type": None, "dot1q", ...
+ "service_endpoint_encapsulation_info": (dict) with:
+ "vlan": ..., (int, present if encapsulation is dot1q)
+ "vni": ... (int, present if encapsulation is vxlan),
+ "peers": [(ipv4_1), (ipv4_2)] (present if
+ encapsulation is vxlan)
+ "mac": ...
+ "device_id": ..., same meaning that for
+ 'service_endpoint_mapping' (see __init__)
+ "device_interface_id": same meaning that for
+ 'service_endpoint_mapping' (see __init__)
+ "switch_dpid": ..., present if mapping has been found
+ for this device_id,device_interface_id
+ "switch_port": ... present if mapping has been found
+ for this device_id,device_interface_id
+ "service_mapping_info": present if mapping has
+ been found for this device_id,device_interface_id
+ :param kwargs: For future versions:
+ bandwidth (int): value in kilobytes
+ latency (int): value in milliseconds
+ Other QoS might be passed as keyword arguments.
+ :return: tuple: ``(service_id, conn_info)`` containing:
+ - *service_uuid* (str): UUID of the established
+ connectivity service
+ - *conn_info* (dict or None): Information to be
+ stored at the database (or ``None``).
+ This information will be provided to the
+ :meth:`~.edit_connectivity_service` and :obj:`~.delete`.
+ **MUST** be JSON/YAML-serializable (plain data structures).
+ :raises: SdnConnectorError: In case of error. Nothing should be
+ created in this case.
+ Provide the parameter http_code
+ """
+ try:
+ self.logger.debug("invoked create_connectivity_service '{}' ports: {}".
+ format(service_type, connection_points))
+ self.__get_Connection()
+ self.__check_service(service_type,
+ connection_points,
+ check_vlan=True,
+ kwargs=kwargs)
+ service_uuid = str(uuid.uuid4())
+
+ self.logger.info("Service with uuid {} created.".
+ format(service_uuid))
+ s_uid, s_connInf = self.__processConnection(
+ service_uuid,
+ service_type,
+ connection_points,
+ kwargs)
+ try:
+ self.__addMetadata(s_uid, service_type, s_connInf['vlan_id'])
+ except Exception as e:
+ pass
+
+ return (s_uid, s_connInf)
+ except CvpLoginError as e:
+ self.logger.info(str(e))
+ self.client = None
+ raise SdnConnectorError(message=SdnError.UNAUTHORIZED,
+ http_code=401) from e
+ except SdnConnectorError as sde:
+ raise sde
+ except Exception as ex:
+ self.client = None
+ self.logger.error(str(ex), exc_info=True)
+ if self.raiseException:
+ raise ex
+ raise SdnConnectorError(message=str(ex),
+ http_code=500) from ex
+
+ def __processConnection(self,
+ service_uuid,
+ service_type,
+ connection_points,
+ kwargs):
+ """
+ Invoked from creation and edit methods
+
+ Process the connection points array,
+ creating a set of configuration per switch where it has to be applied
+ for creating the configuration, the switches have to be queried for obtaining:
+ - the loopback address
+ - the BGP ASN (autonomous system number)
+ - the interface name of the MAC address to add in the connectivity service
+ Once the new configuration is ready, the __updateConnection method is invoked for appling the changes
+ """
+ try:
+ cls_perSw = {}
+ cls_cp = {}
+ cl_bgp = {}
+ for s in self.s_api:
+ cls_perSw[s] = []
+ cls_cp[s] = []
+ vlan_processed = False
+ vlan_id = ''
+ i = 0
+ processed_connection_points = []
+ for cp in connection_points:
+ i += 1
+ encap_info = cp.get(self.__ENCAPSULATION_INFO_PARAM)
+ if not vlan_processed:
+ vlan_id = str(encap_info.get(self.__VLAN_PARAM))
+ if not vlan_id:
+ continue
+ vni_id = encap_info.get(self.__VNI_PARAM)
+ if not vni_id:
+ vni_id = str(10000 + int(vlan_id))
+
+ if service_type == self.__service_types_ELAN:
+ cl_vlan = self.clC.getElan_vlan(service_uuid,
+ vlan_id,
+ vni_id)
+ else:
+ cl_vlan = self.clC.getEline_vlan(service_uuid,
+ vlan_id,
+ vni_id)
+ vlan_processed = True
+
+ encap_type = cp.get(self.__ENCAPSULATION_TYPE_PARAM)
+ switch_id = encap_info.get(self.__SW_ID_PARAM)
+ if not switch_id:
+ point_mac = encap_info.get(self.__MAC_PARAM)
+ switches = self.__lldp_find_neighbor("chassisId", point_mac)
+ self.logger.debug("Found connection point for MAC {}: {}".
+ format(point_mac, switches))
+ else:
+ interface = encap_info.get(self.__SW_PORT_PARAM)
+ switches = [{'name': switch_id, 'interface': interface}]
+
+ if len(switches) == 0:
+ raise SdnConnectorError(message="Connection point MAC address {} not found in the switches".format(point_mac),
+ http_code=406)
+
+ # remove those connections that are equal. This happens when several sriovs are located in the same
+ # compute node interface, that is, in the same switch and interface
+ switches = [x for x in switches if x not in processed_connection_points]
+ if not switches:
+ continue
+ processed_connection_points += switches
+ for switch in switches:
+ if not switch_id:
+ port_channel = self.__get_switch_po(switch['name'],
+ switch['interface'])
+ if len(port_channel) > 0:
+ interface = port_channel[0]
+ else:
+ interface = switch['interface']
+ if not interface:
+ raise SdnConnectorError(message="Connection point switch port empty for switch_dpid {}".format(switch_id),
+ http_code=406)
+ # it should be only one switch where the mac is attached
+ if encap_type == 'dot1q':
+ # SRIOV configLet for Leaf switch mac's attached to
+ if service_type == self.__service_types_ELAN:
+ cl_encap = self.clC.getElan_sriov(service_uuid, interface, vlan_id, i)
+ else:
+ cl_encap = self.clC.getEline_sriov(service_uuid, interface, vlan_id, i)
+ elif not encap_type:
+ # PT configLet for Leaf switch attached to the mac
+ if service_type == self.__service_types_ELAN:
+ cl_encap = self.clC.getElan_passthrough(service_uuid,
+ interface,
+ vlan_id, i)
+ else:
+ cl_encap = self.clC.getEline_passthrough(service_uuid,
+ interface,
+ vlan_id, i)
+ if cls_cp.get(switch['name']):
+ cls_cp[switch['name']] = str(cls_cp[switch['name']]) + cl_encap
+ else:
+ cls_cp[switch['name']] = cl_encap
+
+ # at least 1 connection point has to be received
+ if not vlan_processed:
+ raise SdnConnectorError(message=SdnError.UNSUPPORTED_FEATURE,
+ http_code=406)
+
+ for s in self.s_api:
+ # for cl in cp_configLets:
+ cl_name = (self.__OSM_PREFIX +
+ s +
+ self.__SEPARATOR + service_type + str(vlan_id) +
+ self.__SEPARATOR + service_uuid)
+ # apply VLAN and BGP configLet to all Leaf switches
+ if service_type == self.__service_types_ELAN:
+ cl_bgp[s] = self.clC.getElan_bgp(service_uuid,
+ vlan_id,
+ vni_id,
+ self.switches[s]['lo0'],
+ self.switches[s]['AS'])
+ else:
+ cl_bgp[s] = self.clC.getEline_bgp(service_uuid,
+ vlan_id,
+ vni_id,
+ self.switches[s]['lo0'],
+ self.switches[s]['AS'])
+
+ if not cls_cp.get(s):
+ cl_config = ''
+ else:
+ cl_config = str(cl_vlan) + str(cl_bgp[s]) + str(cls_cp[s])
+
+ cls_perSw[s] = [{'name': cl_name, 'config': cl_config}]
+
+ allLeafConfigured, allLeafModified = self.__updateConnection(cls_perSw)
+
+ conn_info = {
+ "uuid": service_uuid,
+ "status": "BUILD",
+ "service_type": service_type,
+ "vlan_id": vlan_id,
+ "connection_points": connection_points,
+ "configLetPerSwitch": cls_perSw,
+ 'allLeafConfigured': allLeafConfigured,
+ 'allLeafModified': allLeafModified}
+
+ return service_uuid, conn_info
+ except Exception as ex:
+ self.logger.debug("Exception processing connection {}: {}".
+ format(service_uuid, str(ex)))
+ raise ex
+
+ def __updateConnection(self, cls_perSw):
+ """ Invoked in the creation and modification
+
+ checks if the new connection points config is:
+ - already in the Cloud Vision, the configLet is modified, and applied to the switch,
+ executing the corresponding task
+ - if it has to be removed:
+ then configuration has to be removed from the switch executing the corresponding task,
+ before trying to remove the configuration
+ - created, the configuration set is created, associated to the switch, and the associated
+ task to the configLet modification executed
+ In case of any error, rollback is executed, removing the created elements, and restoring to the
+ previous state.
+ """
+ try:
+ allLeafConfigured = {}
+ allLeafModified = {}
+
+ for s in self.s_api:
+ allLeafConfigured[s] = False
+ allLeafModified[s] = False
+ cl_toDelete = []
+ for s in self.s_api:
+ toDelete_in_cvp = False
+ if not (cls_perSw.get(s) and cls_perSw[s][0].get('config')):
+ # when there is no configuration, means that there is no interface
+ # in the switch to be connected, so the configLet has to be removed from CloudVision
+ # after removing the ConfigLet fron the switch if it was already there
+
+ # get config let name and key
+ cl = cls_perSw[s]
+ try:
+ cvp_cl = self.client.api.get_configlet_by_name(cl[0]['name'])
+ # remove configLet
+ cl_toDelete.append(cvp_cl)
+ cl[0] = cvp_cl
+ toDelete_in_cvp = True
+ except CvpApiError as error:
+ if "Entity does not exist" in error.msg:
+ continue
+ else:
+ raise error
+ # remove configLet from device
+ else:
+ res = self.__configlet_modify(cls_perSw[s])
+ allLeafConfigured[s] = res[0]
+ if not allLeafConfigured[s]:
+ continue
+ cl = cls_perSw[s]
+ res = self.__device_modify(
+ device_to_update=s,
+ new_configlets=cl,
+ delete=toDelete_in_cvp)
+ if "errorMessage" in str(res):
+ raise Exception(str(res))
+ self.logger.info("Device {} modify result {}".format(s, res))
+ for t_id in res[1]['tasks']:
+ if not toDelete_in_cvp:
+ note_msg = "{}{}{}{}##".format(self.__MANAGED_BY_OSM,
+ self.__SEPARATOR,
+ t_id,
+ self.__SEPARATOR)
+ self.client.api.add_note_to_configlet(
+ cls_perSw[s][0]['key'],
+ note_msg)
+ cls_perSw[s][0]['note'] = note_msg
+ tasks = { t_id : {'workOrderId': t_id} }
+ self.__exec_task(tasks, self.__EXC_TASK_EXEC_WAIT)
+ # with just one configLet assigned to a device,
+ # delete all if there are errors in next loops
+ if not toDelete_in_cvp:
+ allLeafModified[s] = True
+ if len(cl_toDelete) > 0:
+ self.__configlet_modify(cl_toDelete, delete=True)
+
+ return allLeafConfigured, allLeafModified
+ except Exception as ex:
+ try:
+ self.__rollbackConnection(cls_perSw,
+ allLeafConfigured,
+ allLeafModified)
+ except Exception as e:
+ self.logger.error("Exception rolling back in updating connection: {}".
+ format(e), exc_info=True)
+ raise ex
+
+ def __rollbackConnection(self,
+ cls_perSw,
+ allLeafConfigured,
+ allLeafModified):
+ """ Removes the given configLet from the devices and then remove the configLets
+ """
+ for s in self.s_api:
+ if allLeafModified[s]:
+ try:
+ res = self.__device_modify(
+ device_to_update=s,
+ new_configlets=cls_perSw[s],
+ delete=True)
+ if "errorMessage" in str(res):
+ raise Exception(str(res))
+ tasks = dict()
+ for t_id in res[1]['tasks']:
+ tasks[t_id] = {'workOrderId': t_id}
+ self.__exec_task(tasks)
+ self.logger.info("Device {} modify result {}".format(s, res))
+ except Exception as e:
+ self.logger.error('Error removing configlets from device {}: {}'.format(s, e))
+ pass
+ for s in self.s_api:
+ if allLeafConfigured[s]:
+ self.__configlet_modify(cls_perSw[s], delete=True)
+
+ def __exec_task(self, tasks, tout=10):
+ if self.taskC is None:
+ self.__connect()
+ data = self.taskC.update_all_tasks(tasks).values()
+ self.taskC.task_action(data, tout, 'executed')
+
+ def __device_modify(self, device_to_update, new_configlets, delete):
+ """ Updates the devices (switches) adding or removing the configLet,
+ the tasks Id's associated to the change are returned
+ """
+ self.logger.info('Enter in __device_modify delete: {}'.format(
+ delete))
+ updated = []
+ changed = False
+ # Task Ids that have been identified during device actions
+ newTasks = []
+
+ if (len(new_configlets) == 0 or
+ device_to_update is None or
+ len(device_to_update) == 0):
+ data = {'updated': updated, 'tasks': newTasks}
+ return [changed, data]
+
+ self.__load_inventory()
+
+ allDeviceFacts = self.allDeviceFacts
+ # Work through Devices list adding device specific information
+ device = None
+ for try_device in allDeviceFacts:
+ # Add Device Specific Configlets
+ # self.logger.debug(device)
+ if try_device['hostname'] not in device_to_update:
+ continue
+ dev_cvp_configlets = self.client.api.get_configlets_by_device_id(
+ try_device['systemMacAddress'])
+ # self.logger.debug(dev_cvp_configlets)
+ try_device['deviceSpecificConfiglets'] = []
+ for cvp_configlet in dev_cvp_configlets:
+ if int(cvp_configlet['containerCount']) == 0:
+ try_device['deviceSpecificConfiglets'].append(
+ {'name': cvp_configlet['name'],
+ 'key': cvp_configlet['key']})
+ # self.logger.debug(device)
+ device = try_device
+ break
+
+ # Check assigned configlets
+ device_update = False
+ add_configlets = []
+ remove_configlets = []
+ update_devices = []
+
+ if delete:
+ for cvp_configlet in device['deviceSpecificConfiglets']:
+ for cl in new_configlets:
+ if cvp_configlet['name'] == cl['name']:
+ remove_configlets.append(cvp_configlet)
+ device_update = True
+ else:
+ for configlet in new_configlets:
+ if configlet not in device['deviceSpecificConfiglets']:
+ add_configlets.append(configlet)
+ device_update = True
+ if device_update:
+ update_devices.append({'hostname': device['hostname'],
+ 'configlets': [add_configlets,
+ remove_configlets],
+ 'device': device})
+ self.logger.info("Device to modify: {}".format(update_devices))
+
+ up_device = update_devices[0]
+ cl_toAdd = up_device['configlets'][0]
+ cl_toDel = up_device['configlets'][1]
+ # Update Configlets
+ try:
+ if delete and len(cl_toDel) > 0:
+ r = self.client.api.remove_configlets_from_device(
+ 'OSM',
+ up_device['device'],
+ cl_toDel,
+ create_task=True)
+ dev_action = r
+ self.logger.debug("remove_configlets_from_device {} {}".format(dev_action, cl_toDel))
+ elif len(cl_toAdd) > 0:
+ r = self.client.api.apply_configlets_to_device(
+ 'OSM',
+ up_device['device'],
+ cl_toAdd,
+ create_task=True)
+ dev_action = r
+ self.logger.debug("apply_configlets_to_device {} {}".format(dev_action, cl_toAdd))
+
+ except Exception as error:
+ errorMessage = str(error)
+ msg = "errorMessage: Device {} Configlets couldnot be updated: {}".format(
+ up_device['hostname'], errorMessage)
+ raise SdnConnectorError(msg) from error
+ else:
+ if "errorMessage" in str(dev_action):
+ m = "Device {} Configlets update fail: {}".format(
+ up_device['name'], dev_action['errorMessage'])
+ raise SdnConnectorError(m)
+ else:
+ changed = True
+ if 'taskIds' in str(dev_action):
+ # Fix 1030 SDN-ARISTA Key error note when deploy a NS
+ if not dev_action['data']['taskIds']:
+ raise SdnConnectorError("No taskIds found: Device {} Configlets couldnot be updated".format(
+ up_device['hostname']))
+ for taskId in dev_action['data']['taskIds']:
+ updated.append({up_device['hostname']:
+ "Configlets-{}".format(
+ taskId)})
+ newTasks.append(taskId)
+ else:
+ updated.append({up_device['hostname']:
+ "Configlets-No_Specific_Tasks"})
+ data = {'updated': updated, 'tasks': newTasks}
+ return [changed, data]
+
+ def __configlet_modify(self, configletsToApply, delete=False):
+ ''' adds/update or delete the provided configLets
+ :param configletsToApply: list of configLets to apply
+ :param delete: flag to indicate if the configLets have to be deleted
+ from Cloud Vision Portal
+ :return: data: dict of module actions and taskIDs
+ '''
+ self.logger.info('Enter in __configlet_modify delete:{}'.format(
+ delete))
+
+ # Compare configlets against cvp_facts-configlets
+ changed = False
+ checked = []
+ deleted = []
+ updated = []
+ new = []
+
+ for cl in configletsToApply:
+ found_in_cvp = False
+ to_delete = False
+ to_update = False
+ to_create = False
+ to_check = False
+ try:
+ cvp_cl = self.client.api.get_configlet_by_name(cl['name'])
+ cl['key'] = cvp_cl['key']
+ cl['note'] = cvp_cl['note']
+ found_in_cvp = True
+ except CvpApiError as error:
+ if "Entity does not exist" in error.msg:
+ pass
+ else:
+ raise error
+
+ if delete:
+ if found_in_cvp:
+ to_delete = True
+ configlet = {'name': cvp_cl['name'],
+ 'data': cvp_cl}
+ else:
+ if found_in_cvp:
+ cl_compare = self.__compare(cl['config'],
+ cvp_cl['config'])
+ # compare function returns a floating point number
+ if cl_compare[0] != 100.0:
+ to_update = True
+ configlet = {'name': cl['name'],
+ 'data': cvp_cl,
+ 'config': cl['config']}
+ else:
+ to_check = True
+ configlet = {'name': cl['name'],
+ 'key': cvp_cl['key'],
+ 'data': cvp_cl,
+ 'config': cl['config']}
+ else:
+ to_create = True
+ configlet = {'name': cl['name'],
+ 'config': cl['config']}
+ try:
+ if to_delete:
+ operation = 'delete'
+ resp = self.client.api.delete_configlet(
+ configlet['data']['name'],
+ configlet['data']['key'])
+ elif to_update:
+ operation = 'update'
+ resp = self.client.api.update_configlet(
+ configlet['config'],
+ configlet['data']['key'],
+ configlet['data']['name'],
+ wait_task_ids=True)
+ elif to_create:
+ operation = 'create'
+ resp = self.client.api.add_configlet(
+ configlet['name'],
+ configlet['config'])
+ else:
+ operation = 'checked'
+ resp = 'checked'
+ except Exception as error:
+ errorMessage = str(error).split(':')[-1]
+ message = "Configlet {} cannot be {}: {}".format(
+ cl['name'], operation, errorMessage)
+ if to_delete:
+ deleted.append({configlet['name']: message})
+ elif to_update:
+ updated.append({configlet['name']: message})
+ elif to_create:
+ new.append({configlet['name']: message})
+ elif to_check:
+ checked.append({configlet['name']: message})
+
+ else:
+ if "error" in str(resp).lower():
+ message = "Configlet {} cannot be deleted: {}".format(
+ cl['name'], resp['errorMessage'])
+ if to_delete:
+ deleted.append({configlet['name']: message})
+ elif to_update:
+ updated.append({configlet['name']: message})
+ elif to_create:
+ new.append({configlet['name']: message})
+ elif to_check:
+ checked.append({configlet['name']: message})
+ else:
+ if to_delete:
+ changed = True
+ deleted.append({configlet['name']: "success"})
+ elif to_update:
+ changed = True
+ updated.append({configlet['name']: "success"})
+ elif to_create:
+ changed = True
+ cl['key'] = resp # This key is used in API call deviceApplyConfigLet FGA
+ new.append({configlet['name']: "success"})
+ elif to_check:
+ changed = False
+ checked.append({configlet['name']: "success"})
+
+ data = {'new': new, 'updated': updated, 'deleted': deleted, 'checked': checked}
+ return [changed, data]
+
+ def __get_configletsDevices(self, configlets):
+ for s in self.s_api:
+ configlet = configlets[s]
+ # Add applied Devices
+ if len(configlet) > 0:
+ configlet['devices'] = []
+ applied_devices = self.client.api.get_applied_devices(
+ configlet['name'])
+ for device in applied_devices['data']:
+ configlet['devices'].append(device['hostName'])
+
+ def __get_serviceData(self, service_uuid, service_type, vlan_id, conn_info=None):
+ cls_perSw = {}
+ for s in self.s_api:
+ cls_perSw[s] = []
+ if not conn_info:
+ srv_cls = self.__get_serviceConfigLets(service_uuid,
+ service_type,
+ vlan_id)
+ self.__get_configletsDevices(srv_cls)
+ for s in self.s_api:
+ cl = srv_cls[s]
+ if len(cl) > 0:
+ for dev in cl['devices']:
+ cls_perSw[dev].append(cl)
+ else:
+ cls_perSw = conn_info['configLetPerSwitch']
+ return cls_perSw
+
+ def delete_connectivity_service(self, service_uuid, conn_info=None):
+ """
+ Disconnect multi-site endpoints previously connected
+
+ :param service_uuid: The one returned by create_connectivity_service
+ :param conn_info: The one returned by last call to 'create_connectivity_service' or 'edit_connectivity_service'
+ if they do not return None
+ :return: None
+ :raises: SdnConnectorException: In case of error. The parameter http_code must be filled
+ """
+ try:
+ self.logger.debug('invoked delete_connectivity_service {}'.
+ format(service_uuid))
+ if not service_uuid:
+ raise SdnConnectorError(message='No connection service UUID',
+ http_code=500)
+
+ self.__get_Connection()
+ if conn_info is None:
+ raise SdnConnectorError(message='No connection information for service UUID {}'.format(service_uuid),
+ http_code=500)
+ c_info = None
+ cls_perSw = self.__get_serviceData(service_uuid,
+ conn_info['service_type'],
+ conn_info['vlan_id'],
+ c_info)
+ allLeafConfigured = {}
+ allLeafModified = {}
+ for s in self.s_api:
+ allLeafConfigured[s] = True
+ allLeafModified[s] = True
+ found_in_cvp = False
+ for s in self.s_api:
+ if cls_perSw[s]:
+ found_in_cvp = True
+ if found_in_cvp:
+ self.__rollbackConnection(cls_perSw,
+ allLeafConfigured,
+ allLeafModified)
+ else:
+ # if the service is not defined in Cloud Vision, return a 404 - NotFound error
+ raise SdnConnectorError(message='Service {} was not found in Arista Cloud Vision {}'.
+ format(service_uuid, self.__wim_url),
+ http_code=404)
+ self.__removeMetadata(service_uuid)
+ except CvpLoginError as e:
+ self.logger.info(str(e))
+ self.client = None
+ raise SdnConnectorError(message=SdnError.UNAUTHORIZED,
+ http_code=401) from e
+ except SdnConnectorError as sde:
+ raise sde
+ except Exception as ex:
+ self.client = None
+ self.logger.error(ex)
+ if self.raiseException:
+ raise ex
+ raise SdnConnectorError(message=SdnError.INTERNAL_ERROR,
+ http_code=500) from ex
+
+ def __addMetadata(self, service_uuid, service_type, vlan_id):
+ """ Adds the connectivity service from 'OSM_metadata' configLet
+ """
+ found_in_cvp = False
+ try:
+ cvp_cl = self.client.api.get_configlet_by_name(self.__OSM_METADATA)
+ found_in_cvp = True
+ except CvpApiError as error:
+ if "Entity does not exist" in error.msg:
+ pass
+ else:
+ raise error
+ try:
+ new_serv = '{} {} {} {}\n'.format(self.__METADATA_PREFIX, service_type, vlan_id, service_uuid)
+
+ if found_in_cvp:
+ cl_config = cvp_cl['config'] + new_serv
+ else:
+ cl_config = new_serv
+ cl_meta = [{'name': self.__OSM_METADATA, 'config': cl_config}]
+ self.__configlet_modify(cl_meta)
+ except Exception as e:
+ self.logger.error('Error in setting metadata in CloudVision from OSM for service {}: {}'.
+ format(service_uuid, str(e)))
+ pass
+
+ def __removeMetadata(self, service_uuid):
+ """ Removes the connectivity service from 'OSM_metadata' configLet
+ """
+ found_in_cvp = False
+ try:
+ cvp_cl = self.client.api.get_configlet_by_name(self.__OSM_METADATA)
+ found_in_cvp = True
+ except CvpApiError as error:
+ if "Entity does not exist" in error.msg:
+ pass
+ else:
+ raise error
+ try:
+ if found_in_cvp:
+ if service_uuid in cvp_cl['config']:
+ cl_config = ''
+ for line in cvp_cl['config'].split('\n'):
+ if service_uuid in line:
+ continue
+ else:
+ cl_config = cl_config + line
+ cl_meta = [{'name': self.__OSM_METADATA, 'config': cl_config}]
+ self.__configlet_modify(cl_meta)
+ except Exception as e:
+ self.logger.error('Error in removing metadata in CloudVision from OSM for service {}: {}'.
+ format(service_uuid, str(e)))
+ pass
+
+ def edit_connectivity_service(self,
+ service_uuid,
+ conn_info=None,
+ connection_points=None,
+ **kwargs):
+ """ Change an existing connectivity service.
+
+ This method's arguments and return value follow the same convention as
+ :meth:`~.create_connectivity_service`.
+
+ :param service_uuid: UUID of the connectivity service.
+ :param conn_info: (dict or None): Information previously returned
+ by last call to create_connectivity_service
+ or edit_connectivity_service
+ :param connection_points: (list): If provided, the old list of
+ connection points will be replaced.
+ :param kwargs: Same meaning that create_connectivity_service
+ :return: dict or None: Information to be updated and stored at
+ the database.
+ When ``None`` is returned, no information should be changed.
+ When an empty dict is returned, the database record will
+ be deleted.
+ **MUST** be JSON/YAML-serializable (plain data structures).
+ Raises:
+ SdnConnectorError: In case of error.
+ """
+ try:
+ self.logger.debug('invoked edit_connectivity_service for service {}. ports: {}'.format(service_uuid,
+ connection_points))
+
+ if not service_uuid:
+ raise SdnConnectorError(message='Unable to perform operation, missing or empty uuid',
+ http_code=500)
+ if not conn_info:
+ raise SdnConnectorError(message='Unable to perform operation, missing or empty connection information',
+ http_code=500)
+
+ if connection_points is None:
+ return None
+
+ self.__get_Connection()
+
+ cls_currentPerSw = conn_info['configLetPerSwitch']
+ service_type = conn_info['service_type']
+
+ self.__check_service(service_type,
+ connection_points,
+ check_vlan=False,
+ check_num_cp=False,
+ kwargs=kwargs)
+
+ s_uid, s_connInf = self.__processConnection(
+ service_uuid,
+ service_type,
+ connection_points,
+ kwargs)
+ self.logger.info("Service with uuid {} configuration updated".
+ format(s_uid))
+ return s_connInf
+ except CvpLoginError as e:
+ self.logger.info(str(e))
+ self.client = None
+ raise SdnConnectorError(message=SdnError.UNAUTHORIZED,
+ http_code=401) from e
+ except SdnConnectorError as sde:
+ raise sde
+ except Exception as ex:
+ try:
+ # Add previous
+ # TODO check if there are pending task, and cancel them before restoring
+ self.__updateConnection(cls_currentPerSw)
+ except Exception as e:
+ self.logger.error("Unable to restore configuration in service {} after an error in the configuration updated: {}".
+ format(service_uuid, str(e)))
+ if self.raiseException:
+ raise ex
+ raise SdnConnectorError(message=str(ex),
+ http_code=500) from ex
+
+ def clear_all_connectivity_services(self):
+ """ Removes all connectivity services from Arista CloudVision with two steps:
+ - retrives all the services from Arista CloudVision
+ - removes each service
+ """
+ try:
+ self.logger.debug('invoked AristaImpl ' +
+ 'clear_all_connectivity_services')
+ self.__get_Connection()
+ s_list = self.__get_srvUUIDs()
+ for serv in s_list:
+ conn_info = {}
+ conn_info['service_type'] = serv['type']
+ conn_info['vlan_id'] = serv['vlan']
+
+ self.delete_connectivity_service(serv['uuid'], conn_info)
+ except CvpLoginError as e:
+ self.logger.info(str(e))
+ self.client = None
+ raise SdnConnectorError(message=SdnError.UNAUTHORIZED,
+ http_code=401) from e
+ except SdnConnectorError as sde:
+ raise sde
+ except Exception as ex:
+ self.client = None
+ self.logger.error(ex)
+ if self.raiseException:
+ raise ex
+ raise SdnConnectorError(message=SdnError.INTERNAL_ERROR,
+ http_code=500) from ex
+
+ def get_all_active_connectivity_services(self):
+ """ Return the uuid of all the active connectivity services with two steps:
+ - retrives all the services from Arista CloudVision
+ - retrives the status of each server
+ """
+ try:
+ self.logger.debug('invoked AristaImpl {}'.format(
+ 'get_all_active_connectivity_services'))
+ self.__get_Connection()
+ s_list = self.__get_srvUUIDs()
+ result = []
+ for serv in s_list:
+ conn_info = {}
+ conn_info['service_type'] = serv['type']
+ conn_info['vlan_id'] = serv['vlan']
+
+ status = self.get_connectivity_service_status(serv['uuid'], conn_info)
+ if status['sdn_status'] == 'ACTIVE':
+ result.append(serv['uuid'])
+ return result
+ except CvpLoginError as e:
+ self.logger.info(str(e))
+ self.client = None
+ raise SdnConnectorError(message=SdnError.UNAUTHORIZED,
+ http_code=401) from e
+ except SdnConnectorError as sde:
+ raise sde
+ except Exception as ex:
+ self.client = None
+ self.logger.error(ex)
+ if self.raiseException:
+ raise ex
+ raise SdnConnectorError(message=SdnError.INTERNAL_ERROR,
+ http_code=500) from ex
+
+ def __get_serviceConfigLets(self, service_uuid, service_type, vlan_id):
+ """ Return the configLet's associated with a connectivity service,
+ There should be one, as maximum, per device (switch) for a given
+ connectivity service
+ """
+ srv_cls = {}
+ for s in self.s_api:
+ srv_cls[s] = []
+ found_in_cvp = False
+ name = (self.__OSM_PREFIX +
+ s +
+ self.__SEPARATOR + service_type + str(vlan_id) +
+ self.__SEPARATOR + service_uuid)
+ try:
+ cvp_cl = self.client.api.get_configlet_by_name(name)
+ found_in_cvp = True
+ except CvpApiError as error:
+ if "Entity does not exist" in error.msg:
+ pass
+ else:
+ raise error
+ if found_in_cvp:
+ srv_cls[s] = cvp_cl
+ return srv_cls
+
+ def __get_srvVLANs(self):
+ """ Returns a list with all the VLAN id's used in the connectivity services managed
+ in tha Arista CloudVision by checking the 'OSM_metadata' configLet where this
+ information is stored
+ """
+ found_in_cvp = False
+ try:
+ cvp_cl = self.client.api.get_configlet_by_name(self.__OSM_METADATA)
+ found_in_cvp = True
+ except CvpApiError as error:
+ if "Entity does not exist" in error.msg:
+ pass
+ else:
+ raise error
+ s_vlan_list = []
+ if found_in_cvp:
+ lines = cvp_cl['config'].split('\n')
+ for line in lines:
+ if self.__METADATA_PREFIX in line:
+ s_vlan = line.split(' ')[3]
+ else:
+ continue
+ if (s_vlan is not None and
+ len(s_vlan) > 0 and
+ s_vlan not in s_vlan_list):
+ s_vlan_list.append(s_vlan)
+
+ return s_vlan_list
+
+ def __get_srvUUIDs(self):
+ """ Retrieves all the connectivity services, managed in tha Arista CloudVision
+ by checking the 'OSM_metadata' configLet where this information is stored
+ """
+ found_in_cvp = False
+ try:
+ cvp_cl = self.client.api.get_configlet_by_name(self.__OSM_METADATA)
+ found_in_cvp = True
+ except CvpApiError as error:
+ if "Entity does not exist" in error.msg:
+ pass
+ else:
+ raise error
+ serv_list = []
+ if found_in_cvp:
+ lines = cvp_cl['config'].split('\n')
+ for line in lines:
+ if self.__METADATA_PREFIX in line:
+ line = line.split(' ')
+ serv = {'uuid': line[4], 'type': line[2], 'vlan': line[3]}
+ else:
+ continue
+ if (serv is not None and
+ len(serv) > 0 and
+ serv not in serv_list):
+ serv_list.append(serv)
+
+ return serv_list
+
+ def __get_Connection(self):
+ """ Open a connection with Arista CloudVision,
+ invoking the version retrival as test
+ """
+ try:
+ if self.client is None:
+ self.client = self.__connect()
+ self.client.api.get_cvp_info()
+ except (CvpSessionLogOutError, RequestException) as e:
+ self.logger.debug("Connection error '{}'. Reconnecting".format(e))
+ self.client = self.__connect()
+ self.client.api.get_cvp_info()
+
+ def __connect(self):
+ ''' Connects to CVP device using user provided credentials from initialization.
+ :return: CvpClient object with connection instantiated.
+ '''
+ client = CvpClient()
+ protocol, _, rest_url = self.__wim_url.rpartition("://")
+ host, _, port = rest_url.partition(":")
+ if port and port.endswith("/"):
+ port = int(port[:-1])
+ elif port:
+ port = int(port)
+ else:
+ port = 443
+
+ client.connect([host],
+ self.__user,
+ self.__passwd,
+ protocol=protocol or "https",
+ port=port,
+ connect_timeout=2)
+ client.api = CvpApi(client, request_timeout=self.__API_REQUEST_TOUT)
+ self.taskC = AristaCVPTask(client.api)
+ return client
+
+ def __compare(self, fromText, toText, lines=10):
+ """ Compare text string in 'fromText' with 'toText' and produce
+ diffRatio - a score as a float in the range [0, 1] 2.0*M / T
+ T is the total number of elements in both sequences,
+ M is the number of matches.
+ Score - 1.0 if the sequences are identical, and
+ 0.0 if they have nothing in common.
+ unified diff list
+ Code Meaning
+ '- ' line unique to sequence 1
+ '+ ' line unique to sequence 2
+ ' ' line common to both sequences
+ '? ' line not present in either input sequence
+ """
+ fromlines = fromText.splitlines(1)
+ tolines = toText.splitlines(1)
+ diff = list(difflib.unified_diff(fromlines, tolines, n=lines))
+ textComp = difflib.SequenceMatcher(None, fromText, toText)
+ diffRatio = round(textComp.quick_ratio()*100, 2)
+ return [diffRatio, diff]
+
+ def __load_inventory(self):
+ """ Get Inventory Data for All Devices (aka switches) from the Arista CloudVision
+ """
+ if not self.cvp_inventory:
+ self.cvp_inventory = self.client.api.get_inventory()
+ self.allDeviceFacts = []
+ for device in self.cvp_inventory:
+ self.allDeviceFacts.append(device)
+
+ def __get_tags(self, name, value):
+ if not self.cvp_tags:
+ self.cvp_tags = []
+ url = '/api/v1/rest/analytics/tags/labels/devices/{}/value/{}/elements'.format(name, value)
+ self.logger.debug('get_tags: URL {}'.format(url))
+ data = self.client.get(url, timeout=self.__API_REQUEST_TOUT)
+ for dev in data['notifications']:
+ for elem in dev['updates']:
+ self.cvp_tags.append(elem)
+ self.logger.debug('Available devices with tag_name {} - value {}: {} '.format(name, value, self.cvp_tags))
+
+ def is_valid_destination(self, url):
+ """ Check that the provided WIM URL is correct
+ """
+ if re.match(self.__regex, url):
+ return True
+ elif self.is_valid_ipv4_address(url):
+ return True
+ else:
+ return self.is_valid_ipv6_address(url)
+
+ def is_valid_ipv4_address(self, address):
+ """ Checks that the given IP is IPv4 valid
+ """
+ try:
+ socket.inet_pton(socket.AF_INET, address)
+ except AttributeError: # no inet_pton here, sorry
+ try:
+ socket.inet_aton(address)
+ except socket.error:
+ return False
+ return address.count('.') == 3
+ except socket.error: # not a valid address
+ return False
+ return True
+
+ def is_valid_ipv6_address(self, address):
+ """ Checks that the given IP is IPv6 valid
+ """
+ try:
+ socket.inet_pton(socket.AF_INET6, address)
+ except socket.error: # not a valid address
+ return False
+ return True
+
+ def delete_keys_from_dict(self, dict_del, lst_keys):
+ dict_copy = {k: v for k, v in dict_del.items() if k not in lst_keys}
+ for k, v in dict_copy.items():
+ if isinstance(v, dict):
+ dict_copy[k] = self.delete_keys_from_dict(v, lst_keys)
+ return dict_copy
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+requests
+jsonrpclib-pelix
+uuid
+cvprac
+git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro
+
--- /dev/null
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from setuptools import setup
+
+_name = "osm_rosdn_arista"
+
+README = """
+===========
+osm-rosdn_arista
+===========
+
+osm-ro pluging for arista SDN
+"""
+
+setup(
+ name=_name,
+ description='OSM ro sdn plugin for arista',
+ long_description=README,
+ version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ # version=VERSION,
+ # python_requires='>3.5.0',
+ author='ETSI OSM',
+ # TODO py3 author_email='',
+ maintainer='oscarluis.peral@atos.net', # TODO py3
+ # TODO py3 maintainer_email='',
+ url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
+ license='Apache 2.0',
+
+ packages=[_name],
+ include_package_data=True,
+ install_requires=["requests",
+ "uuid",
+ "jsonrpclib-pelix",
+ "cvprac",
+ "osm-ro @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO"],
+ setup_requires=['setuptools-version-command'],
+ entry_points={
+ 'osm_rosdn.plugins': ['rosdn_arista = osm_rosdn_arista.wimconn_arista:AristaSdnConnector']
+ },
+)
--- /dev/null
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+[DEFAULT]
+X-Python3-Version : >= 3.5
+Depends3: python3-requests, python3-osm-ro, python3-jsonrpclib-pelix
+
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+[tox]
+envlist = py3
+toxworkdir={homedir}/.tox
+
+[testenv]
+basepython = python3
+install_command = python3 -m pip install -r requirements.txt -U {opts} {packages}
+# deps = -r{toxinidir}/test-requirements.txt
+commands=python3 -m unittest discover -v
+
+[testenv:flake8]
+basepython = python3
+deps = flake8
+commands = flake8 osm_rosdn_arista --max-line-length 120 \
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+
+[testenv:unittest]
+basepython = python3
+commands = python3 -m unittest osm_rosdn_arista.tests
+
+[testenv:build]
+basepython = python3
+deps = stdeb
+ setuptools-version-command
+commands = python3 setup.py --command-packages=stdeb.command bdist_deb
+
##
requests
-git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro
+git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO
packages=[_name],
include_package_data=True,
- dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
- install_requires=["requests", "osm-ro"],
+ install_requires=[
+ "requests",
+ "osm-ro @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO"
+ ],
setup_requires=['setuptools-version-command'],
entry_points={
'osm_rosdn.plugins': ['rosdn_dynpac = osm_rosdn_dynpac.wimconn_dynpac:DynpacConnector'],
super().__init__(wim, wim_account, config, logger)
of_params = {
"of_url": wim["wim_url"],
- "of_dpid": config.get("dpid"),
+ "of_dpid": config.get("dpid") or config.get("switch_id"),
"of_user": wim_account["user"],
"of_password": wim_account["password"],
}
packages=[_name],
include_package_data=True,
- dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
- install_requires=["requests", "osm-ro"],
+ install_requires=[
+ "requests",
+ "osm-ro @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO"
+ ],
setup_requires=['setuptools-version-command'],
entry_points={
'osm_rosdn.plugins': ['rosdn_floodlightof = osm_rosdn_floodlightof.sdnconn_floodlightof:'
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+all: clean package
+
+clean:
+ rm -rf dist deb_dist osm_rosdn_odlof-*.tar.gz osm_rosdn_odlof.egg-info .eggs
+
+package:
+ python3 setup.py --command-packages=stdeb.command sdist_dsc
+ cd deb_dist/osm-rosdn-odlof*/ && dpkg-buildpackage -rfakeroot -uc -us
+
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+"""
+Implement the plugging for OpenDayLight openflow controller
+It creates the class OF_conn to create dataplane connections
+with static rules based on packet destination MAC address
+"""
+
+import json
+import requests
+import base64
+import logging
+from osm_ro.wim.openflow_conn import OpenflowConn, OpenflowConnException, OpenflowConnConnectionException, \
+ OpenflowConnUnexpectedResponse, OpenflowConnAuthException, OpenflowConnNotFoundException, \
+ OpenflowConnConflictException, OpenflowConnNotSupportedException, OpenflowConnNotImplemented
+
+__author__ = "Pablo Montes, Alfonso Tierno"
+__date__ = "$28-oct-2014 12:07:15$"
+
+
+class OfConnOdl(OpenflowConn):
+ """OpenDayLight connector. No MAC learning is used"""
+
+ def __init__(self, params):
+ """ Constructor.
+ Params: dictionary with the following keys:
+ of_dpid: DPID to use for this controller
+ of_url: must be [http://HOST:PORT/]
+ of_user: user credentials, can be missing or None
+ of_password: password credentials
+ of_debug: debug level for logging. Default to ERROR
+ other keys are ignored
+ Raise an exception if same parameter is missing or wrong
+ """
+
+ OpenflowConn.__init__(self, params)
+
+ # check params
+ url = params.get("of_url")
+ if not url:
+ raise ValueError("'url' must be provided")
+ if not url.startswith("http"):
+ url = "http://" + url
+ if not url.endswith("/"):
+ url = url + "/"
+ self.url = url + "onos/v1/"
+
+ # internal variables
+ self.name = "OpenDayLight"
+ self.headers = {'content-type': 'application/json', 'Accept': 'application/json'}
+ self.auth = None
+ self.pp2ofi = {} # From Physical Port to OpenFlow Index
+ self.ofi2pp = {} # From OpenFlow Index to Physical Port
+
+ self.dpid = str(params["of_dpid"])
+ self.id = 'openflow:'+str(int(self.dpid.replace(':', ''), 16))
+ if params and params.get("of_user"):
+ of_password=params.get("of_password", "")
+ self.auth = base64.b64encode(bytes(params["of_user"] + ":" + of_password, "utf-8"))
+ self.auth = self.auth.decode()
+ self.headers['authorization'] = 'Basic ' + self.auth
+
+ self.logger = logging.getLogger('openmano.sdnconn.onosof')
+ # self.logger.setLevel(getattr(logging, params.get("of_debug", "ERROR")))
+ self.logger.debug("odlof plugin initialized")
+
+ def get_of_switches(self):
+ """
+ Obtain a a list of switches or DPID detected by this controller
+ :return: list length, and a list where each element a tuple pair (DPID, IP address)
+ Raise an OpenflowconnConnectionException exception if fails with text_error
+ """
+ try:
+ of_response = requests.get(self.url + "restconf/operational/opendaylight-inventory:nodes",
+ headers=self.headers)
+ error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
+ if of_response.status_code != 200:
+ self.logger.warning("get_of_switches " + error_text)
+ raise OpenflowconnUnexpectedResponse("Error get_of_switches " + error_text)
+
+ self.logger.debug("get_of_switches " + error_text)
+ info = of_response.json()
+
+ if not isinstance(info, dict):
+ self.logger.error("get_of_switches. Unexpected response, not a dict: %s", str(info))
+ raise OpenflowconnUnexpectedResponse("Unexpected response, not a dict. Wrong version?")
+
+ nodes = info.get('nodes')
+ if type(nodes) is not dict:
+ self.logger.error("get_of_switches. Unexpected response at 'nodes', not found or not a dict: %s",
+ str(type(info)))
+ raise OpenflowconnUnexpectedResponse("Unexpected response at 'nodes', not found or not a dict."
+ " Wrong version?")
+
+ node_list = nodes.get('node')
+ if type(node_list) is not list:
+ self.logger.error("get_of_switches. Unexpected response, at 'nodes':'node', "
+ "not found or not a list: %s", str(type(node_list)))
+ raise OpenflowconnUnexpectedResponse("Unexpected response, at 'nodes':'node', not found "
+ "or not a list. Wrong version?")
+
+ switch_list = []
+ for node in node_list:
+ node_id = node.get('id')
+ if node_id is None:
+ self.logger.error("get_of_switches. Unexpected response at 'nodes':'node'[]:'id', not found: %s",
+ str(node))
+ raise OpenflowconnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:'id', not found. "
+ "Wrong version?")
+
+ if node_id == 'controller-config':
+ continue
+
+ node_ip_address = node.get('flow-node-inventory:ip-address')
+ if node_ip_address is None:
+ self.logger.error("get_of_switches. Unexpected response at 'nodes':'node'[]:'flow-node-inventory:"
+ "ip-address', not found: %s", str(node))
+ raise OpenflowconnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:"
+ "'flow-node-inventory:ip-address', not found. Wrong version?")
+
+ node_id_hex = hex(int(node_id.split(':')[1])).split('x')[1].zfill(16)
+ switch_list.append((':'.join(a+b for a,b in zip(node_id_hex[::2], node_id_hex[1::2])), node_ip_address))
+ return switch_list
+
+ except requests.exceptions.RequestException as e:
+ error_text = type(e).__name__ + ": " + str(e)
+ self.logger.error("get_of_switches " + error_text)
+ raise OpenflowconnConnectionException(error_text)
+ except ValueError as e:
+ # ValueError in the case that JSON can not be decoded
+ error_text = type(e).__name__ + ": " + str(e)
+ self.logger.error("get_of_switches " + error_text)
+ raise OpenflowconnUnexpectedResponse(error_text)
+
+ def obtain_port_correspondence(self):
+ """
+ Obtain the correspondence between physical and openflow port names
+ :return: dictionary: with physical name as key, openflow name as value,
+ Raise a OpenflowconnConnectionException expection in case of failure
+ """
+ try:
+ of_response = requests.get(self.url + "restconf/operational/opendaylight-inventory:nodes",
+ headers=self.headers)
+ error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
+ if of_response.status_code != 200:
+ self.logger.warning("obtain_port_correspondence " + error_text)
+ raise OpenflowconnUnexpectedResponse(error_text)
+ self.logger.debug("obtain_port_correspondence " + error_text)
+ info = of_response.json()
+
+ if not isinstance(info, dict):
+ self.logger.error("obtain_port_correspondence. Unexpected response not a dict: %s", str(info))
+ raise OpenflowconnUnexpectedResponse("Unexpected openflow response, not a dict. Wrong version?")
+
+ nodes = info.get('nodes')
+ if not isinstance(nodes, dict):
+ self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes', "
+ "not found or not a dict: %s", str(type(nodes)))
+ raise OpenflowconnUnexpectedResponse("Unexpected response at 'nodes',not found or not a dict. "
+ "Wrong version?")
+
+ node_list = nodes.get('node')
+ if not isinstance(node_list, list):
+ self.logger.error("obtain_port_correspondence. Unexpected response, at 'nodes':'node', "
+ "not found or not a list: %s", str(type(node_list)))
+ raise OpenflowconnUnexpectedResponse("Unexpected response, at 'nodes':'node', not found or not a list."
+ " Wrong version?")
+
+ for node in node_list:
+ node_id = node.get('id')
+ if node_id is None:
+ self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:'id', "
+ "not found: %s", str(node))
+ raise OpenflowconnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:'id', not found. "
+ "Wrong version?")
+
+ if node_id == 'controller-config':
+ continue
+
+ # Figure out if this is the appropriate switch. The 'id' is 'openflow:' plus the decimal value
+ # of the dpid
+ # In case this is not the desired switch, continue
+ if self.id != node_id:
+ continue
+
+ node_connector_list = node.get('node-connector')
+ if not isinstance(node_connector_list, list):
+ self.logger.error("obtain_port_correspondence. Unexpected response at "
+ "'nodes':'node'[]:'node-connector', not found or not a list: %s", str(node))
+ raise OpenflowconnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:'node-connector', "
+ "not found or not a list. Wrong version?")
+
+ for node_connector in node_connector_list:
+ self.pp2ofi[str(node_connector['flow-node-inventory:name'])] = str(node_connector['id'])
+ self.ofi2pp[node_connector['id']] = str(node_connector['flow-node-inventory:name'])
+
+ node_ip_address = node.get('flow-node-inventory:ip-address')
+ if node_ip_address is None:
+ self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:"
+ "'flow-node-inventory:ip-address', not found: %s", str(node))
+ raise OpenflowconnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:"
+ "'flow-node-inventory:ip-address', not found. Wrong version?")
+
+ # If we found the appropriate dpid no need to continue in the for loop
+ break
+
+ # print self.name, ": obtain_port_correspondence ports:", self.pp2ofi
+ return self.pp2ofi
+ except requests.exceptions.RequestException as e:
+ error_text = type(e).__name__ + ": " + str(e)
+ self.logger.error("obtain_port_correspondence " + error_text)
+ raise OpenflowconnConnectionException(error_text)
+ except ValueError as e:
+ # ValueError in the case that JSON can not be decoded
+ error_text = type(e).__name__ + ": " + str(e)
+ self.logger.error("obtain_port_correspondence " + error_text)
+ raise OpenflowconnUnexpectedResponse(error_text)
+
+ def get_of_rules(self, translate_of_ports=True):
+ """
+ Obtain the rules inserted at openflow controller
+ :param translate_of_ports:
+ :return: list where each item is a dictionary with the following content:
+ priority: rule priority
+ name: rule name (present also as the master dict key)
+ ingress_port: match input port of the rule
+ dst_mac: match destination mac address of the rule, can be missing or None if not apply
+ vlan_id: match vlan tag of the rule, can be missing or None if not apply
+ actions: list of actions, composed by a pair tuples:
+ (vlan, None/int): for stripping/setting a vlan tag
+ (out, port): send to this port
+ switch: DPID, all
+ Raise a OpenflowconnConnectionException exception in case of failure
+
+ """
+
+ try:
+ # get rules
+ if len(self.ofi2pp) == 0:
+ self.obtain_port_correspondence()
+
+ of_response = requests.get(self.url + "restconf/config/opendaylight-inventory:nodes/node/" + self.id +
+ "/table/0", headers=self.headers)
+ error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
+
+ # The configured page does not exist if there are no rules installed. In that case we return an empty dict
+ if of_response.status_code == 404:
+ return []
+
+ elif of_response.status_code != 200:
+ self.logger.warning("get_of_rules " + error_text)
+ raise OpenflowconnUnexpectedResponse(error_text)
+
+ self.logger.debug("get_of_rules " + error_text)
+
+ info = of_response.json()
+
+ if not isinstance(info, dict):
+ self.logger.error("get_of_rules. Unexpected response not a dict: %s", str(info))
+ raise OpenflowconnUnexpectedResponse("Unexpected openflow response, not a dict. Wrong version?")
+
+ table = info.get('flow-node-inventory:table')
+ if not isinstance(table, list):
+ self.logger.error("get_of_rules. Unexpected response at 'flow-node-inventory:table', "
+ "not a list: %s", str(type(table)))
+ raise OpenflowconnUnexpectedResponse("Unexpected response at 'flow-node-inventory:table', not a list. "
+ "Wrong version?")
+
+ flow_list = table[0].get('flow')
+ if flow_list is None:
+ return []
+
+ if not isinstance(flow_list, list):
+ self.logger.error("get_of_rules. Unexpected response at 'flow-node-inventory:table'[0]:'flow', not a "
+ "list: %s", str(type(flow_list)))
+ raise OpenflowconnUnexpectedResponse("Unexpected response at 'flow-node-inventory:table'[0]:'flow', "
+ "not a list. Wrong version?")
+
+ # TODO translate ports according to translate_of_ports parameter
+
+ rules = [] # Response list
+ for flow in flow_list:
+ if not ('id' in flow and 'match' in flow and 'instructions' in flow and
+ 'instruction' in flow['instructions'] and
+ 'apply-actions' in flow['instructions']['instruction'][0] and
+ 'action' in flow['instructions']['instruction'][0]['apply-actions']):
+ raise OpenflowconnUnexpectedResponse("unexpected openflow response, one or more elements are "
+ "missing. Wrong version?")
+
+ flow['instructions']['instruction'][0]['apply-actions']['action']
+
+ rule = dict()
+ rule['switch'] = self.dpid
+ rule['priority'] = flow.get('priority')
+ # rule['name'] = flow['id']
+ # rule['cookie'] = flow['cookie']
+ if 'in-port' in flow['match']:
+ in_port = flow['match']['in-port']
+ if in_port not in self.ofi2pp:
+ raise OpenflowconnUnexpectedResponse("Error: Ingress port {} is not in switch port list".
+ format(in_port))
+
+ if translate_of_ports:
+ in_port = self.ofi2pp[in_port]
+
+ rule['ingress_port'] = in_port
+
+ if 'vlan-match' in flow['match'] and 'vlan-id' in flow['match']['vlan-match'] and \
+ 'vlan-id' in flow['match']['vlan-match']['vlan-id'] and \
+ 'vlan-id-present' in flow['match']['vlan-match']['vlan-id'] and \
+ flow['match']['vlan-match']['vlan-id']['vlan-id-present'] == True:
+ rule['vlan_id'] = flow['match']['vlan-match']['vlan-id']['vlan-id']
+
+ if 'ethernet-match' in flow['match'] and 'ethernet-destination' in flow['match']['ethernet-match'] \
+ and 'address' in flow['match']['ethernet-match']['ethernet-destination']:
+ rule['dst_mac'] = flow['match']['ethernet-match']['ethernet-destination']['address']
+
+ instructions = flow['instructions']['instruction'][0]['apply-actions']['action']
+
+ max_index = 0
+ for instruction in instructions:
+ if instruction['order'] > max_index:
+ max_index = instruction['order']
+
+ actions = [None]*(max_index+1)
+ for instruction in instructions:
+ if 'output-action' in instruction:
+ if 'output-node-connector' not in instruction['output-action']:
+ raise OpenflowconnUnexpectedResponse("unexpected openflow response, one or more elementa "
+ "are missing. Wrong version?")
+
+ out_port = instruction['output-action']['output-node-connector']
+ if out_port not in self.ofi2pp:
+ raise OpenflowconnUnexpectedResponse("Error: Output port {} is not in switch port list".
+ format(out_port))
+
+ if translate_of_ports:
+ out_port = self.ofi2pp[out_port]
+
+ actions[instruction['order']] = ('out', out_port)
+
+ elif 'strip-vlan-action' in instruction:
+ actions[instruction['order']] = ('vlan', None)
+
+ elif 'set-field' in instruction:
+ if not ('vlan-match' in instruction['set-field'] and
+ 'vlan-id' in instruction['set-field']['vlan-match'] and
+ 'vlan-id' in instruction['set-field']['vlan-match']['vlan-id']):
+ raise OpenflowconnUnexpectedResponse("unexpected openflow response, one or more elements "
+ "are missing. Wrong version?")
+
+ actions[instruction['order']] = ('vlan',
+ instruction['set-field']['vlan-match']['vlan-id']['vlan-id'])
+
+ actions = [x for x in actions if x is not None]
+
+ rule['actions'] = list(actions)
+ rules.append(rule)
+
+ return rules
+ except requests.exceptions.RequestException as e:
+ error_text = type(e).__name__ + ": " + str(e)
+ self.logger.error("get_of_rules " + error_text)
+ raise OpenflowconnConnectionException(error_text)
+ except ValueError as e:
+ # ValueError in the case that JSON can not be decoded
+ error_text = type(e).__name__ + ": " + str(e)
+ self.logger.error("get_of_rules " + error_text)
+ raise OpenflowconnUnexpectedResponse(error_text)
+
+ def del_flow(self, flow_name):
+ """
+ Delete an existing rule
+ :param flow_name: flow_name, this is the rule name
+ :return: Raise a OpenflowconnConnectionException expection in case of failure
+ """
+
+ try:
+ of_response = requests.delete(self.url + "restconf/config/opendaylight-inventory:nodes/node/" + self.id +
+ "/table/0/flow/" + flow_name, headers=self.headers)
+ error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
+ if of_response.status_code != 200:
+ self.logger.warning("del_flow " + error_text)
+ raise OpenflowconnUnexpectedResponse(error_text)
+ self.logger.debug("del_flow OK " + error_text)
+ return None
+ except requests.exceptions.RequestException as e:
+ # raise an exception in case of contection error
+ error_text = type(e).__name__ + ": " + str(e)
+ self.logger.error("del_flow " + error_text)
+ raise OpenflowconnConnectionException(error_text)
+
+ def new_flow(self, data):
+ """
+ Insert a new static rule
+ :param data: dictionary with the following content:
+ priority: rule priority
+ name: rule name
+ ingress_port: match input port of the rule
+ dst_mac: match destination mac address of the rule, missing or None if not apply
+ vlan_id: match vlan tag of the rule, missing or None if not apply
+ actions: list of actions, composed by a pair tuples with these posibilities:
+ ('vlan', None/int): for stripping/setting a vlan tag
+ ('out', port): send to this port
+ :return: Raise a OpenflowconnConnectionException exception in case of failure
+ """
+
+ try:
+ self.logger.debug("new_flow data: {}".format(data))
+ if len(self.pp2ofi) == 0:
+ self.obtain_port_correspondence()
+
+ # We have to build the data for the opendaylight call from the generic data
+ flow = {
+ 'id': data['name'],
+ 'flow-name': data['name'],
+ 'idle-timeout': 0,
+ 'hard-timeout': 0,
+ 'table_id': 0,
+ 'priority': data.get('priority'),
+ 'match': {}
+ }
+ sdata = {'flow-node-inventory:flow': [flow]}
+ if not data['ingress_port'] in self.pp2ofi:
+ error_text = 'Error. Port ' + data['ingress_port'] + ' is not present in the switch'
+ self.logger.warning("new_flow " + error_text)
+ raise OpenflowconnUnexpectedResponse(error_text)
+ flow['match']['in-port'] = self.pp2ofi[data['ingress_port']]
+ if data.get('dst_mac'):
+ flow['match']['ethernet-match'] = {
+ 'ethernet-destination': {'address': data['dst_mac']}
+ }
+ if data.get('vlan_id'):
+ flow['match']['vlan-match'] = {
+ 'vlan-id': {
+ 'vlan-id-present': True,
+ 'vlan-id': int(data['vlan_id'])
+ }
+ }
+ actions = []
+ flow['instructions'] = {
+ 'instruction': [{
+ 'order': 1,
+ 'apply-actions': {'action': actions}
+ }]
+ }
+
+ order = 0
+ for action in data['actions']:
+ new_action = {'order': order}
+ if action[0] == "vlan":
+ if action[1] is None:
+ # strip vlan
+ new_action['strip-vlan-action'] = {}
+ else:
+ new_action['set-field'] = {
+ 'vlan-match': {
+ 'vlan-id': {
+ 'vlan-id-present': True,
+ 'vlan-id': int(action[1])
+ }
+ }
+ }
+ elif action[0] == 'out':
+ new_action['output-action'] = {}
+ if not action[1] in self.pp2ofi:
+ error_msg = 'Port ' + action[1] + ' is not present in the switch'
+ raise OpenflowconnUnexpectedResponse(error_msg)
+
+ new_action['output-action']['output-node-connector'] = self.pp2ofi[action[1]]
+ else:
+ error_msg = "Unknown item '%s' in action list".format(action[0])
+ self.logger.error("new_flow " + error_msg)
+ raise OpenflowconnUnexpectedResponse(error_msg)
+
+ actions.append(new_action)
+ order += 1
+
+ # print json.dumps(sdata)
+ of_response = requests.put(self.url + "restconf/config/opendaylight-inventory:nodes/node/" + self.id +
+ "/table/0/flow/" + data['name'], headers=self.headers, data=json.dumps(sdata))
+ error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
+ if of_response.status_code != 200:
+ self.logger.warning("new_flow " + error_text)
+ raise OpenflowconnUnexpectedResponse(error_text)
+ self.logger.debug("new_flow OK " + error_text)
+ return None
+
+ except requests.exceptions.RequestException as e:
+ # raise an exception in case of contection error
+ error_text = type(e).__name__ + ": " + str(e)
+ self.logger.error("new_flow " + error_text)
+ raise OpenflowconnConnectionException(error_text)
+
+ def clear_all_flows(self):
+ """
+ Delete all existing rules
+ :return: Raise a OpenflowconnConnectionException expection in case of failure
+ """
+ try:
+ of_response = requests.delete(self.url + "restconf/config/opendaylight-inventory:nodes/node/" + self.id +
+ "/table/0", headers=self.headers)
+ error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
+ if of_response.status_code != 200 and of_response.status_code != 404: # HTTP_Not_Found
+ self.logger.warning("clear_all_flows " + error_text)
+ raise OpenflowconnUnexpectedResponse(error_text)
+ self.logger.debug("clear_all_flows OK " + error_text)
+ return None
+ except requests.exceptions.RequestException as e:
+ error_text = type(e).__name__ + ": " + str(e)
+ self.logger.error("clear_all_flows " + error_text)
+ raise OpenflowconnConnectionException(error_text)
--- /dev/null
+##
+# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+##
+"""The SdnConnectorOdlOf connector is responsible for creating services using pro active operflow rules.
+"""
+
+import logging
+from osm_ro.wim.openflow_conn import SdnConnectorOpenFlow
+from .odl_of import OfConnOdl
+
+
+class SdnConnectorOdlOf(SdnConnectorOpenFlow):
+
+ def __init__(self, wim, wim_account, config=None, logger=None):
+ """Creates a connectivity based on pro-active openflow rules
+ """
+ self.logger = logging.getLogger('openmano.sdnconn.odlof')
+ super().__init__(wim, wim_account, config, logger)
+ of_params = {
+ "of_url": wim["wim_url"],
+ "of_dpid": config.get("dpid") or config.get("switch_id"),
+ "of_user": wim_account["user"],
+ "of_password": wim_account["password"],
+ }
+ self.openflow_conn = OfConnOdl(of_params)
+ super().__init__(wim, wim_account, config, logger, self.openflow_conn)
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+requests
+git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO
+
--- /dev/null
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from setuptools import setup
+
+_name = "osm_rosdn_odlof"
+
+README = """
+===========
+osm-rosdn_odlof
+===========
+
+osm-ro plugin for OpenDayLight SDN using pre-computed openflow rules
+"""
+
+setup(
+ name=_name,
+ description='OSM RO plugin for SDN with odl openflow rules',
+ long_description=README,
+ version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ # version=VERSION,
+ # python_requires='>3.5.0',
+ author='ETSI OSM',
+ author_email='alfonso.tiernosepulveda@telefonica.com',
+ maintainer='Alfonso Tierno',
+ maintainer_email='alfonso.tiernosepulveda@telefonica.com',
+ url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
+ license='Apache 2.0',
+
+ packages=[_name],
+ include_package_data=True,
+ install_requires=[
+ "requests",
+ "osm-ro @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO"
+ ],
+ setup_requires=['setuptools-version-command'],
+ entry_points={
+ 'osm_rosdn.plugins': ['rosdn_odlof = osm_rosdn_odlof.sdnconn_odlof:SdnConnectorOdlOf'],
+ },
+)
--- /dev/null
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+[DEFAULT]
+X-Python3-Version : >= 3.5
+Depends3: python3-requests, python3-osm-ro
+
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+[tox]
+envlist = flake8
+toxworkdir={toxinidir}/.tox
+
+[testenv]
+basepython = python3
+install_command = python3 -m pip install -U {opts} {packages}
+# deps = -r{toxinidir}/test-requirements.txt
+commands=python3 -m unittest discover -v
+
+[testenv:flake8]
+basepython = python3
+deps = flake8
+ -r{toxinidir}/requirements.txt
+install_command = python3 -m pip install -U {opts} {packages}
+commands = flake8 osm_rosdn_odlof --max-line-length 120 \
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+
+[testenv:unittest]
+basepython = python3
+commands = python3 -m unittest osm_rosdn_odlof.tests
+
+[testenv:build]
+basepython = python3
+deps = stdeb
+ setuptools-version-command
+commands = python3 setup.py --command-packages=stdeb.command bdist_deb
+
""" Constructor.
:param params: dictionary with the following keys:
of_dpid: DPID to use for this controller ?? Does a controller have a dpid?
- url: must be [http://HOST:PORT/]
+ of_url: must be [http://HOST:PORT/]
of_user: user credentials, can be missing or None
of_password: password credentials
of_debug: debug level for logging. Default to ERROR
self.auth = self.auth.decode()
self.headers['authorization'] = 'Basic ' + self.auth
- self.logger = logging.getLogger('openmano.sdn.onosof')
+ self.logger = logging.getLogger('openmano.sdnconn.onosof')
#self.logger.setLevel( getattr(logging, params.get("of_debug", "ERROR")) )
self.logger.debug("onosof plugin initialized")
self.ip_address = None
try:
self.headers['content-type'] = 'text/plain'
of_response = requests.get(self.url + "devices/" + self.id + "/ports", headers=self.headers)
- error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+ error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
if of_response.status_code != 200:
self.logger.warning("obtain_port_correspondence " + error_text)
raise OpenflowConnUnexpectedResponse(error_text)
(vlan, None/int): for stripping/setting a vlan tag
(out, port): send to this port
switch: DPID, all
- Raise a openflowconnUnexpectedResponse expection in case of failure
+ Raise a openflowconnUnexpectedResponse exception in case of failure
"""
try:
# The configured page does not exist if there are no rules installed. In that case we return an empty dict
if of_response.status_code == 404:
- return {}
+ return []
elif of_response.status_code != 200:
self.logger.warning("get_of_rules " + error_text)
flow_list = info.get('flows')
if flow_list is None:
- return {}
-
+ return []
if type(flow_list) is not list:
self.logger.error(
"get_of_rules. Unexpected response at 'flows', not a list: %s",
raise OpenflowConnUnexpectedResponse("Unexpected response at 'flows', not a list. "
"Wrong version?")
- rules = [] # Response list
+ rules = [] # Response list
for flow in flow_list:
if not ('id' in flow and 'selector' in flow and 'treatment' in flow and \
'instructions' in flow['treatment'] and 'criteria' in \
actions: list of actions, composed by a pair tuples with these posibilities:
('vlan', None/int): for stripping/setting a vlan tag
('out', port): send to this port
- :return: Raise a openflowconnUnexpectedResponse expection in case of failure
+ :return: Raise a openflowconnUnexpectedResponse exception in case of failure
"""
try:
self.logger.debug("new_flow data: {}".format(data))
def __init__(self, wim, wim_account, config=None, logger=None):
"""Creates a connectivity based on pro-active openflow rules
"""
- self.logger = logging.getLogger('openmano.sdn.onosof')
+ self.logger = logging.getLogger('openmano.sdnconn.onosof')
super().__init__(wim, wim_account, config, logger)
of_params = {
"of_url": wim["wim_url"],
- "of_dpid": config.get("dpid"),
+ "of_dpid": config.get("dpid") or config.get("switch_id"),
"of_user": wim_account["user"],
"of_password": wim_account["password"],
}
self.openflow_conn = OfConnOnos(of_params)
super().__init__(wim, wim_account, config, logger, self.openflow_conn)
+ self.logger.debug("Init sdn plugin '{}' dpid={} user={}".format(of_params["of_url"], of_params["of_dpid"],
+ of_params["of_user"]))
##
requests
-git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro
+git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO
packages=[_name],
include_package_data=True,
- dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
- install_requires=["requests", "osm-ro"],
+ install_requires=[
+ "requests",
+ "osm-ro @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO"
+ ],
setup_requires=['setuptools-version-command'],
entry_points={
'osm_rosdn.plugins': ['rosdn_onosof = osm_rosdn_onosof.sdnconn_onosof:SdnConnectorOnosOf'],
##
[tox]
-envlist = py3
-toxworkdir={homedir}/.tox
+envlist = flake8
+toxworkdir={toxinidir}/.tox
[testenv]
basepython = python3
-install_command = python3 -m pip install -r requirements.txt -U {opts} {packages}
+install_command = python3 -m pip install -U {opts} {packages}
# deps = -r{toxinidir}/test-requirements.txt
commands=python3 -m unittest discover -v
[testenv:flake8]
basepython = python3
deps = flake8
+ -r{toxinidir}/requirements.txt
+install_command = python3 -m pip install -U {opts} {packages}
commands = flake8 osm_rosdn_onosof --max-line-length 120 \
--exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
"""
https://wiki.onosproject.org/display/ONOS/VPLS+User+Guide
"""
- _WIM_LOGGER = "sdn.assist.onos.vpls"
+ _WIM_LOGGER = "openmano.sdnconn.onosvpls"
def __init__(self, wim, wim_account, config=None, logger=None):
self.logger = logger or logging.getLogger(self._WIM_LOGGER)
##
requests
-git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro
+git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO
packages=[_name],
include_package_data=True,
- dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
- install_requires=["requests", "osm-ro"],
+ install_requires=[
+ "requests",
+ "osm-ro @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO"
+ ],
setup_requires=['setuptools-version-command'],
entry_points={
'osm_rosdn.plugins': ['rosdn_onos_vpls = osm_rosdn_onos_vpls.sdn_assist_onos_vpls:OnosVpls'],
##
requests
-git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro
+git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO
packages=[_name],
include_package_data=True,
dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
- install_requires=["requests", "osm-ro"],
+ install_requires=[
+ "requests",
+ "osm-ro @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO"
+ ],
setup_requires=['setuptools-version-command'],
entry_points={
'osm_rosdn.plugins': ['rosdn_tapi = osm_rosdn_tapi.wimconn_ietfl2vpn:WimconnectorIETFL2VPN'],
packages=[_name],
include_package_data=True,
- dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
install_requires=[
- "requests", "netaddr", "PyYAML", "osm-ro", "boto"
+ "requests", "netaddr", "PyYAML", "boto",
+ "osm-ro @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO"
],
setup_requires=['setuptools-version-command'],
entry_points={
echo "POST INSTALL OSM-ROVIM-AZURE"
#Pip packages required for azure connector
-python3 -m pip install azure
+python3 -m pip install azure==4.0.0
PyYAML
requests
netaddr
-azure
+azure==4.0.0
git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO
packages=[_name],
include_package_data=True,
- dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
- install_requires=["requests", "netaddr", "PyYAML", "azure", "osm-ro"],
+ install_requires=[
+ "requests",
+ "netaddr",
+ "PyYAML",
+ "azure==4.0.0",
+ "osm-ro @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO"
+ ],
setup_requires=['setuptools-version-command'],
entry_points={
'osm_rovim.plugins': ['rovim_azure = osm_rovim_azure.vimconn_azure'],
packages=[_name],
include_package_data=True,
- dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
install_requires=[
- "requests", "netaddr", "PyYAML", "osm-ro", "fog05rest>=0.0.4"
+ "requests",
+ "netaddr",
+ "PyYAML",
+ "fog05rest>=0.0.4",
+ "osm-ro @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO"
],
setup_requires=['setuptools-version-command'],
entry_points={
packages=[_name],
include_package_data=True,
- dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
- install_requires=["requests", "netaddr", "PyYAML", "osm-ro",],
+ install_requires=[
+ "requests",
+ "netaddr",
+ "PyYAML",
+ "osm-ro @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO"
+ ],
setup_requires=['setuptools-version-command'],
entry_points={
'osm_rovim.plugins': ['rovim_opennebula = osm_rovim_opennebula.vimconn_opennebula'],
# method before the implemented VIM connectors are called.
def _format_exception(self, exception):
- '''Transform a keystone, nova, neutron exception into a vimconn exception'''
+ """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
- message_error = exception.message
+ message_error = str(exception)
if isinstance(exception, (neExceptions.NetworkNotFoundClient, nvExceptions.NotFound, ksExceptions.NotFound,
gl1Exceptions.HTTPNotFound)):
raise vimconn.vimconnNotFoundException(type(exception).__name__ + ": " + message_error)
elif isinstance(exception, (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError,
- ConnectionError, ksExceptions.ConnectionError, neExceptions.ConnectionFailed)):
+ ConnectionError, ksExceptions.ConnectionError, neExceptions.ConnectionFailed)):
raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + message_error)
- elif isinstance(exception, (KeyError, nvExceptions.BadRequest, ksExceptions.BadRequest)):
+ elif isinstance(exception, (KeyError, nvExceptions.BadRequest, ksExceptions.BadRequest)):
raise vimconn.vimconnException(type(exception).__name__ + ": " + message_error)
elif isinstance(exception, (nvExceptions.ClientException, ksExceptions.ClientException,
neExceptions.NeutronException)):
'dhcp_start_address': ip_schema, first IP to grant
'dhcp_count': number of IPs to grant.
'shared': if this network can be seen/use by other tenants/organization
- 'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}
+ 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
+ physical-network: physnet-label}
Returns a tuple with the network identifier and created_items, or raises an exception on error
created_items can be None or a dictionary where this method can include key-values that will be passed to
the method delete_network. Can be used to store created segments, created l2gw connections, etc.
if not self.config.get('multisegment_support'):
network_dict["provider:physical_network"] = provider_physical_network
- network_dict["provider:network_type"] = "vlan"
+ if provider_network_profile and "network-type" in provider_network_profile:
+ network_dict["provider:network_type"] = provider_network_profile["network-type"]
+ else:
+ network_dict["provider:network_type"] = self.config.get('dataplane_network_type','vlan')
if vlan:
network_dict["provider:segmentation_id"] = vlan
else:
packages=[_name],
include_package_data=True,
- dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
install_requires=[
"python-openstackclient", "python-neutronclient",
"requests", "netaddr", "PyYAML",
- "osm-ro", # TODO py3 "networking-l2gw"
+ "osm-ro @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO"
+ # TODO py3 "networking-l2gw"
# "python-novaclient", "python-keystoneclient", "python-glanceclient", "python-cinderclient",
],
setup_requires=['setuptools-version-command'],
packages=[_name],
include_package_data=True,
- dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
install_requires=[
"requests", "netaddr", "PyYAML",
+ "osm-ro @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO"
],
setup_requires=['setuptools-version-command'],
entry_points={
packages=[_name],
include_package_data=True,
- dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
install_requires=[
"pyvcloud==19.1.1", "progressbar", "prettytable", "pyvmomi",
"requests", "netaddr", "PyYAML",
- "osm-ro",
+ "osm-ro @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO"
],
setup_requires=['setuptools-version-command'],
entry_points={
# gettting uuid
values = ",".join(map(self.__tuple2db_format_set, UPDATE.items() ))
if modified_time:
- values += ",modified_at={:f}".format(modified_time)
- cmd= "UPDATE " + table + " SET " + values + " WHERE " + self.__create_where(WHERE)
+ values += "{}modified_at={:f}".format("," if values else "", modified_time)
+ cmd = "UPDATE " + table + " SET " + values + " WHERE " + self.__create_where(WHERE)
self.logger.debug(cmd)
self.cur.execute(cmd)
return self.cur.rowcount
# import imp
import json
+import string
import yaml
from random import choice as random_choice
from osm_ro import utils
# WIM
from .wim import sdnconn
-from .wim.wimconn_fake import FakeConnector
+from .wim.wimconn_dummy import DummyConnector
from .wim.failing_connector import FailingConnector
from .http_tools import errors as httperrors
from .wim.engine import WimEngine
try:
worker_id = get_process_id()
- if "rosdn_fake" not in plugins:
- plugins["rosdn_fake"] = FakeConnector
+ if "rosdn_dummy" not in plugins:
+ plugins["rosdn_dummy"] = DummyConnector
# starts ovim library
ovim = Sdn(db, plugins)
# table nets (internal-vld)
net_id2uuid = {} # for mapping interface with network
+ net_id2index = {} # for mapping interface with network
for vld in vnfd.get("internal-vld").values():
net_uuid = str(uuid4())
uuid_list.append(net_uuid)
"type": "bridge", # TODO adjust depending on connection point type
}
net_id2uuid[vld.get("id")] = net_uuid
+ net_id2index[vld.get("id")] = len(db_nets)
db_nets.append(db_net)
# ip-profile, link db_ip_profile with db_sce_net
if vld.get("ip-profile-ref"):
raise KeyError()
if vdu_id in vdu_id2cp_name:
- vdu_id2cp_name[vdu_id] = None # more than two connecdtion point for this VDU
+ vdu_id2cp_name[vdu_id] = None # more than two connection point for this VDU
else:
vdu_id2cp_name[vdu_id] = db_interface["external_name"]
if not icp:
raise KeyError("is not referenced by any 'internal-vld'")
+ # set network type as data
+ if iface.get("virtual-interface") and iface["virtual-interface"].get("type") in \
+ ("SR-IOV", "PCI-PASSTHROUGH"):
+ db_nets[net_id2index[icp_vld.get("id")]]["type"] = "data"
db_interface["net_id"] = net_id2uuid[icp_vld.get("id")]
if str(icp_descriptor.get("port-security-enabled")).lower() == "false":
db_interface["port_security"] = 0
else:
update(scenario_net['ip_profile'], ipprofile_db)
- if 'provider-network' in net_instance_desc:
- provider_network_db = net_instance_desc['provider-network']
- if 'provider-network' not in scenario_net:
- scenario_net['provider-network'] = provider_network_db
- else:
- update(scenario_net['provider-network'], provider_network_db)
+ if net_instance_desc.get('provider-network'):
+ provider_network_db = net_instance_desc['provider-network']
+ if 'provider_network' not in scenario_net:
+ scenario_net['provider_network'] = provider_network_db
+ else:
+ update(scenario_net['provider_network'], provider_network_db)
for vdu_id, vdu_instance_desc in vnf_instance_desc.get("vdus", {}).items():
for scenario_vm in scenario_vnf['vms']:
# TODO: use this information during network creation
wim_account_id = wim_account_name = None
if len(involved_datacenters) > 1 and 'uuid' in sce_net:
- if target_wim_account is None or target_wim_account is True: # automatic selection of WIM
+ urls = [myvims[v].url for v in involved_datacenters]
+ if len(set(urls)) < 2:
+ wim_usage[sce_net['uuid']] = False
+ elif target_wim_account is None or target_wim_account is True: # automatic selection of WIM
# OBS: sce_net without uuid are used internally to VNFs
# and the assumption is that VNFs will not be split among
# different datacenters
"created": create_network, # TODO py3
"sdn": True,
})
+
task_wim_extra = {"params": [net_type, wim_account_name]}
+ # add sdn interfaces
+ if sce_net.get('provider_network') and sce_net['provider_network'].get("sdn-ports"):
+ task_wim_extra["sdn-ports"] = sce_net['provider_network'].get("sdn-ports")
db_vim_action = {
"instance_action_id": instance_action_id,
"status": "SCHEDULED",
sce_net2wim_instance = params_out["sce_net2wim_instance"]
vnf_net2instance = {}
+ vnf_net2wim_instance = {}
# 2. Creating new nets (vnf internal nets) in the VIM"
# For each vnf net, we create it and we add it to instanceNetlist.
"created": True, # TODO py3
"sdn": True,
})
+ vnf_net2wim_instance[net_uuid] = sdn_net_id
db_net = {
"uuid": net_uuid,
else:
netDict['net_id'] = "TASK-{}".format(net2task_id[sce_vnf['uuid']][iface['net_id']])
instance_net_id = vnf_net2instance[sce_vnf['uuid']][iface['net_id']]
- instance_wim_net_id = None
+ instance_wim_net_id = vnf_net2wim_instance.get(instance_net_id)
task_depends_on.append(net2task_id[sce_vnf['uuid']][iface['net_id']])
# skip bridge ifaces not connected to any net
if 'net_id' not in netDict or netDict['net_id'] == None:
db_vm_iface_instance.update(db_vm_iface)
if db_vm_iface_instance.get("ip_address"): # increment ip_address
ip = db_vm_iface_instance.get("ip_address")
- i = ip.rfind(".")
- if i > 0:
- try:
+ try:
+ i = ip.rfind(".")
+ if i > 0:
i += 1
ip = ip[i:] + str(int(ip[:i]) + 1)
db_vm_iface_instance["ip_address"] = ip
- except:
- db_vm_iface_instance["ip_address"] = None
+ except:
+ db_vm_iface_instance["ip_address"] = None
db_instance_interfaces.append(db_vm_iface_instance)
myVMDict['networks'][iface_index]["uuid"] = iface_uuid
iface_index += 1
"extra": yaml.safe_dump({"params": vm_interfaces},
default_flow_style=True, width=256)
}
+ # get affected instance_interfaces (deleted on cascade) to check if a wim_network must be updated
+ deleted_interfaces = mydb.get_rows(
+ SELECT=("instance_wim_net_id", ),
+ FROM="instance_interfaces",
+ WHERE={"instance_vm_id": vdu_id, "instance_wim_net_id<>": None},
+ )
+ for deleted_interface in deleted_interfaces:
+ db_vim_actions.append({"TO-UPDATE": {}, "WHERE": {
+ "item": "instance_wim_nets", "item_id": deleted_interface["instance_wim_net_id"]}})
+
task_index += 1
db_vim_actions.append(db_vim_action)
vm_result["deleted"].append(vdu_id)
"uuid": iface_uuid,
'instance_vm_id': vm_uuid,
"instance_net_id": vm_iface["instance_net_id"],
+ "instance_wim_net_id": vm_iface["instance_wim_net_id"],
'interface_id': vm_iface['interface_id'],
'type': vm_iface['type'],
+ 'model': vm_iface['model'],
'floating_ip': vm_iface['floating_ip'],
'port_security': vm_iface['port_security']
}
db_instance_interfaces.append(db_vm_iface)
+ if db_vm_iface["instance_wim_net_id"]:
+ db_vim_actions.append({"TO-UPDATE": {}, "WHERE": {
+ "item": "instance_wim_nets", "item_id": db_vm_iface["instance_wim_net_id"]}})
task_params_copy = deepcopy(task_params)
for iface in task_params_copy[5]:
iface["uuid"] = iface2iface[iface["uuid"]]
# increment ip_address
- if "ip_address" in iface:
- ip = iface.get("ip_address")
- i = ip.rfind(".")
- if i > 0:
- try:
+ if iface.get("ip_address"):
+ try:
+ ip = iface["ip_address"]
+ i = ip.rfind(".")
+ if i > 0:
i += 1
ip = ip[i:] + str(int(ip[:i]) + 1)
iface["ip_address"] = ip
- except:
- iface["ip_address"] = None
+ except:
+ iface["ip_address"] = None
if vm_name:
task_params_copy[0] = vm_name
db_vim_action = {
datacenter_type = datacenter_descriptor.get("type", "openvim");
# module_info = None
+ for url_field in ('vim_url', 'vim_url_admin'):
+ # It is common that users copy and paste the URL from the VIM website
+ # (example OpenStack), therefore a common mistake is to include blank
+ # characters at the end of the URL. Let's remove it and just in case,
+ # lets remove trailing slash as well.
+ url = datacenter_descriptor.get(url_field)
+ if url:
+ datacenter_descriptor[url_field] = url.strip(string.whitespace + '/')
+
# load plugin
plugin_name = "rovim_" + datacenter_type
if plugin_name not in plugins:
if config:
original_config_dict = yaml.load(original_config, Loader=yaml.Loader)
original_config_dict.update(config)
- update["config"] = yaml.safe_dump(original_config_dict, default_flow_style=True, width=256)
+ update_["config"] = yaml.safe_dump(original_config_dict, default_flow_style=True, width=256)
if name:
update_['name'] = name
if vim_tenant:
pci = port.get("pci")
element["switch_port"] = port.get("switch_port")
element["switch_mac"] = port.get("switch_mac")
+ element["switch_dpid"] = port.get("switch_dpid")
+ element["switch_id"] = port.get("switch_id")
if not element["switch_port"] and not element["switch_mac"]:
raise NfvoException ("The mapping must contain 'switch_port' or 'switch_mac'", httperrors.Bad_Request)
for pci_expanded in utils.expand_brackets(pci):
if "TO-DELETE" in row:
self._delete_row_by_id_internal(table_name, row["TO-DELETE"])
continue
+ if "TO-UPDATE" in row:
+ self._update_rows(table_name, UPDATE=row["TO-UPDATE"], WHERE=row["WHERE"],
+ modified_time=created_time)
+ continue
if table_name in self.tables_with_created_field:
if "created_at" in row:
created_time_param = created_time + (index + row.pop("created_at"))*0.00001
"log_level_console": log_level_schema,
"log_level_ovim": log_level_schema,
"log_level_sdn": log_level_schema,
+ "log_level_sdnconn": log_level_schema,
"log_file_db": path_schema,
"log_file_vim": path_schema,
"log_file_wim": path_schema,
"log_file_console": path_schema,
"log_file_ovim": path_schema,
"log_file_sdn": path_schema,
+ "log_file_sdnconn": path_schema,
"log_socket_host": nameshort_schema,
"log_socket_port": port_schema,
"log_file": path_schema,
"additionalProperties": False
}
-sdn_port_mapping_schema = {
+sdn_port_mapping_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title":"sdn port mapping information schema",
"type": "object",
"properties": {
"pci": {"OneOf": [null_schema, pci_extended_schema]}, # pci_schema,
"switch_port": nameshort_schema,
+ "switch_id": nameshort_schema,
+ "switch_dpid": nameshort_schema,
"switch_mac": mac_schema
},
"required": ["pci"]
#log_file_wim: /opt/openmano/logs/openmano_wimconn.log
#log_level_nfvo: DEBUG #Main engine log levels
#log_file_nfvo: /opt/openmano/logs/openmano_nfvo.log
-log_level_http: DEBUG #Main engine log levels
+#log_level_http: DEBUG #Main engine log levels
#log_file_http: /opt/openmano/logs/openmano_http.log
#log_level_console: DEBUG #proxy console log levels
#log_file_console: /opt/openmano/logs/openmano_console.log
#log_file_ovim: /opt/openmano/logs/openmano_ovim.log
#log_level_sdn: DEBUG
#log_file_sdn: /opt/openmano/logs/openmano_sdn.log
+#log_level_sdnconn: DEBUG
+#log_file_sdnconn: /opt/openmano/logs/openmano_sdnconn.log
#Uncomment to send logs via IP to an external host
#log_socket_host: localhost
__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes"
__date__ = "$26-aug-2014 11:09:29$"
-__version__ = "7.0.0.post13"
-version_date = "Jan 2019"
+__version__ = "7.1.0.post1"
+version_date = "May 2020"
database_version = 40 # expected database schema version
global global_config
logger.critical("Starting openmano server version: '%s %s' command: '%s'",
ro_version, version_date, " ".join(sys.argv))
- for log_module in ("nfvo", "http", "vim", "wim", "db", "console", "ovim","sdn"):
+ for log_module in ("nfvo", "http", "vim", "wim", "db", "console", "ovim", "sdn", "sdnconn"):
log_level_module = "log_level_" + log_module
log_file_module = "log_file_" + log_module
logger_module = logging.getLogger('openmano.' + log_module)
# get database wim_accounts
wim_account = self._get_of_controller(of_id)
- db_wim_update = {x: ofc_data[x] for x in ("name", "description", "type", "wim_url")}
- db_wim_account_update = {x: ofc_data[x] for x in ("name", "user", "password")}
+ db_wim_update = {x: ofc_data[x] for x in ("name", "description", "type", "wim_url") if x in ofc_data}
+ db_wim_account_update = {x: ofc_data[x] for x in ("name", "user", "password") if x in ofc_data}
db_wim_account_config = ofc_data.get("config", {})
if ofc_data.get("ip") or ofc_data.get("port"):
db_wim_account_config["version"] = ofc_data["version"]
if db_wim_account_config:
- db_wim_account_update["config"] = yaml.load(wim_account["config"]) or {}
+ db_wim_account_update["config"] = yaml.load(wim_account["config"], Loader=yaml.Loader) or {}
db_wim_account_update["config"].update(db_wim_account_config)
+ db_wim_account_update["config"] = yaml.safe_dump(db_wim_account_update["config"], default_flow_style=True,
+ width=256)
if db_wim_account_update:
self.db.update_rows('wim_accounts', db_wim_account_update, WHERE={'uuid': of_id})
if db_wim_update:
- self.db.update_rows('wims', db_wim_account_update, WHERE={'uuid': wim_account["wim_id"]})
+ self.db.update_rows('wims', db_wim_update, WHERE={'uuid': wim_account["wim_id"]})
def _get_of_controller(self, of_id):
wim_accounts = self.db.get_rows(FROM='wim_accounts', WHERE={"uuid": of_id, "sdn": "true"})
of_data = {x: wim_account[x] for x in ("uuid", "name", "user")}
if isinstance(wim_account["config"], str):
config = yaml.load(wim_account["config"], Loader=yaml.Loader)
- of_data["dpid"] = config.get("dpid")
+ of_data["dpid"] = config.get("switch_id") or config.get("dpid")
of_data["version"] = config.get("version")
if wim:
of_data["url"] = wim["wim_url"]
wim_id = wim_account["wim_id"]
db_wim_port_mappings = []
for map in maps:
+ _switch_dpid = map.get("switch_id") or map.get("switch_dpid") or switch_dpid
new_map = {
'wim_id': wim_id,
- 'switch_dpid': switch_dpid,
+ 'switch_dpid': _switch_dpid,
"switch_port": map.get("switch_port"),
'datacenter_id': vim_id,
"device_id": map.get("compute_node"),
- "service_endpoint_id": switch_dpid + "-" + str(uuid4())
+ "service_endpoint_id": _switch_dpid + "-" + str(uuid4())
}
if map.get("pci"):
new_map["device_interface_id"] = map["pci"].lower()
else:
map["service_mapping_info"] = {}
return maps
+
+ def get_ports(self, instance_wim_net_id):
+ # get wim_id
+ instance_wim_net = self.db.get_rows(FROM='instance_wim_nets', WHERE={"uuid": instance_wim_net_id})
+ wim_id = instance_wim_net[0]["wim_id"]
+ switch_ports = []
+ ports = self.db.get_rows(FROM='instance_interfaces', WHERE={"instance_wim_net_id": instance_wim_net_id})
+ maps = self.get_of_port_mappings(db_filter={"wim_id": wim_id})
+ for port in ports:
+ map_ = next((x for x in maps if x.get("device_id") == port["compute_node"] and
+ x.get("device_interface_id") == port["pci"]), None)
+ if map_:
+ switch_port = {'switch_dpid': map_.get('switch_dpid') or map_.get('switch_id'),
+ 'switch_port': map_.get('switch_port')}
+ if switch_port not in switch_ports:
+ switch_ports.append(switch_port)
+ return switch_ports
+
copy_to["sdn_net_id"] = copy_from["sdn_net_id"]
if copy_from.get("interfaces"):
copy_to["interfaces"] = copy_from["interfaces"]
+ if copy_from.get("sdn-ports"):
+ copy_to["sdn-ports"] = copy_from["sdn-ports"]
if copy_from.get("created_items"):
if not copy_to.get("created_items"):
copy_to["created_items"] = {}
# look for ports
sdn_ports = []
pending_ports = 0
+ vlan_used = None
ports = self.db.get_rows(FROM='instance_interfaces', WHERE={'instance_wim_net_id': task["item_id"]})
sdn_need_update = False
for port in ports:
+ vlan_used = port.get("vlan") or vlan_used
# TODO. Do not connect if already done
if port.get("compute_node") and port.get("pci"):
- for map in self.port_mappings:
- if map.get("device_id") == port["compute_node"] and \
- map.get("device_interface_id") == port["pci"]:
+ for pmap in self.port_mappings:
+ if pmap.get("device_id") == port["compute_node"] and \
+ pmap.get("device_interface_id") == port["pci"]:
break
else:
if self.sdnconn_config.get("mapping_not_needed"):
- map = {
+ pmap = {
"service_endpoint_id": "{}:{}".format(port["compute_node"], port["pci"]),
"service_endpoint_encapsulation_info": {
"vlan": port["vlan"],
}
}
else:
- map = None
+ pmap = None
error_list.append("Port mapping not found for compute_node={} pci={}".format(
port["compute_node"], port["pci"]))
- if map:
- if port["uuid"] not in connected_ports or port["modified_at"] > last_update:
+ if pmap:
+ if port["modified_at"] > last_update:
sdn_need_update = True
new_connected_ports.append(port["uuid"])
sdn_ports.append({
- "service_endpoint_id": map["service_endpoint_id"],
+ "service_endpoint_id": pmap["service_endpoint_id"],
"service_endpoint_encapsulation_type": "dot1q" if port["model"] == "SR-IOV" else None,
"service_endpoint_encapsulation_info": {
"vlan": port["vlan"],
"mac": port["mac_address"],
- "device_id": map.get("device_id"),
- "device_interface_id": map.get("device_interface_id"),
- "switch_dpid": map.get("switch_dpid"),
- "switch_port": map.get("switch_port"),
- "service_mapping_info": map.get("service_mapping_info"),
+ "device_id": pmap.get("device_id"),
+ "device_interface_id": pmap.get("device_interface_id"),
+ "switch_dpid": pmap.get("switch_dpid"),
+ "switch_port": pmap.get("switch_port"),
+ "service_mapping_info": pmap.get("service_mapping_info"),
}
})
if pending_ports:
error_list.append("Waiting for getting interfaces location from VIM. Obtained '{}' of {}"
.format(len(ports)-pending_ports, len(ports)))
+
+ # connect external ports
+ for index, external_port in enumerate(task["extra"].get("sdn-ports") or ()):
+ external_port_id = external_port.get("service_endpoint_id") or str(index)
+ sdn_ports.append({
+ "service_endpoint_id": external_port_id,
+ "service_endpoint_encapsulation_type": external_port.get("service_endpoint_encapsulation_type",
+ "dot1q"),
+ "service_endpoint_encapsulation_info": {
+ "vlan": external_port.get("vlan") or vlan_used,
+ "mac": external_port.get("mac_address"),
+ "device_id": external_port.get("device_id"),
+ "device_interface_id": external_port.get("device_interface_id"),
+ "switch_dpid": external_port.get("switch_dpid") or external_port.get("switch_id"),
+ "switch_port": external_port.get("switch_port"),
+ "service_mapping_info": external_port.get("service_mapping_info"),
+ }})
+ new_connected_ports.append(external_port_id)
+
# if there are more ports to connect or they have been modified, call create/update
- if sdn_need_update and len(sdn_ports) >= 2:
+ if (set(connected_ports) != set(new_connected_ports) or sdn_need_update) and len(sdn_ports) >= 2:
+ last_update = time.time()
if not wimconn_net_id:
if params[0] == "data":
net_type = "ELAN"
else:
created_items = self.sdnconnector.edit_connectivity_service(wimconn_net_id, conn_info=created_items,
connection_points=sdn_ports)
- last_update = time.time()
connected_ports = new_connected_ports
elif wimconn_net_id:
try:
"""
import json
import logging
+import string
from contextlib import contextmanager
from hashlib import sha1
from itertools import groupby
if "config" in wim_descriptor:
wim_descriptor["config"] = _serialize(wim_descriptor["config"])
+ url = wim_descriptor["wim_url"]
+ wim_descriptor["wim_url"] = url.strip(string.whitespace + "/")
+ # ^ This avoid the common problem caused by trailing spaces/slashes in
+ # the URL (due to CTRL+C/CTRL+V)
+
return self.db.new_row(
"wims", wim_descriptor, add_uuid=True, confidential_data=True)
)
# WIM -------------------------------------------------------------------------
-wim_types = ["tapi", "onos", "onos_vpls", "odl", "dynpac", "fake"]
+wim_types = ["tapi", "onos", "onos_vpls", "odl", "dynpac", "dummy"]
dpid_type = {
"type": "string",
"description": description_schema,
"type": {
"type": "string",
- # "enum": ["tapi", "onos", "odl", "dynpac", "fake"]
+ # "enum": ["tapi", "onos", "odl", "dynpac", "dummy"]
},
"wim_url": description_schema,
"config": {
wim_id: (internal, do not use)
:param logger (logging.Logger): optional logger object. If none is passed 'openmano.sdn.sdnconn' is used.
"""
- self.logger = logger or logging.getLogger('openmano.sdn.sdnconn')
+ self.logger = logger or logging.getLogger('openmano.sdnconn')
self.wim = wim
self.wim_account = wim_account
return merge_dicts(wan_port_mapping, service_mapping_info=mapping)
def _get_port_sdn(self, ovim, instance_net):
- criteria = {'net_id': instance_net['sdn_net_id']}
try:
- local_port_mapping = ovim.get_ports(filter=criteria)
+ local_port_mapping = ovim.get_ports(instance_net['sdn_net_id'])
if local_port_mapping:
return (local_port_mapping[0]['switch_dpid'],
except: # noqa
self.logger.exception('Problems when calling OpenVIM')
- self.logger.debug('No ports found using criteria:\n%r\n.', criteria)
+ self.logger.debug("No ports found for sdn_net_id='{}'", instance_net['sdn_net_id'])
return None
def _evaluate_rules(self, rules, vim_info):
)
from .failing_connector import FailingConnector
from .sdnconn import SdnConnectorError
-from .wimconn_fake import FakeConnector
+from .wimconn_dummy import DummyConnector
ACTIONS = {
'instance_wim_nets': wan_link_actions.ACTIONS
CONNECTORS = {
# "odl": wimconn_odl.OdlConnector,
- "fake": FakeConnector,
+ "dummy": DummyConnector,
# Add extra connectors here not managed via plugins
}
wim_account['name'], wim_account['uuid'])
super(WimThread, self).__init__(name=name)
self.plugins = plugins
- if "rosdn_fake" not in self.plugins:
- self.plugins["rosdn_fake"] = FakeConnector
+ if "rosdn_dummy" not in self.plugins:
+ self.plugins["rosdn_dummy"] = DummyConnector
self.name = name
self.connector = None
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 Telefonica
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This WIM does nothing and allows using it for testing and when no WIM is needed
+"""
+
+import logging
+from uuid import uuid4
+from .sdnconn import SdnConnectorBase, SdnConnectorError
+from http import HTTPStatus
+__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
+
+
+class DummyConnector(SdnConnectorBase):
+ """Abstract base class for all the WIM connectors
+
+ Arguments:
+ wim (dict): WIM record, as stored in the database
+ wim_account (dict): WIM account record, as stored in the database
+ config (dict): optional persistent information related to an specific
+ connector. Inside this dict, a special key,
+ ``service_endpoint_mapping`` provides the internal endpoint
+ mapping.
+ logger (logging.Logger): optional logger object. If none is passed
+ ``openmano.wim.wimconn`` is used.
+
+ The arguments of the constructor are converted to object attributes.
+ An extra property, ``service_endpoint_mapping`` is created from ``config``.
+ """
+ def __init__(self, wim, wim_account, config=None, logger=None):
+ self.logger = logger or logging.getLogger('openmano.sdnconn.dummy')
+ super(DummyConnector, self).__init__(wim, wim_account, config, self.logger)
+ self.logger.debug("__init: wim='{}' wim_account='{}'".format(wim, wim_account))
+ self.connections = {}
+ self.counter = 0
+
+ def check_credentials(self):
+ """Check if the connector itself can access the WIM.
+
+ Raises:
+ SdnConnectorError: Issues regarding authorization, access to
+ external URLs, etc are detected.
+ """
+ self.logger.debug("check_credentials")
+ return None
+
+ def get_connectivity_service_status(self, service_uuid, conn_info=None):
+ """Monitor the status of the connectivity service established
+
+ Arguments:
+ service_uuid (str): UUID of the connectivity service
+ conn_info (dict or None): Information returned by the connector
+ during the service creation/edition and subsequently stored in
+ the database.
+
+ Returns:
+ dict: JSON/YAML-serializable dict that contains a mandatory key
+ ``sdn_status`` associated with one of the following values::
+
+ Additionally ``error_msg``(**str**) and ``sdn_info``(**dict**)
+ keys can be used to provide additional status explanation or
+ new information available for the connectivity service.
+ """
+ self.logger.debug("get_connectivity_service_status: service_uuid='{}' conn_info='{}'".format(service_uuid,
+ conn_info))
+ return {'sdn_status': 'ACTIVE', 'sdn_info': self.connectivity.get(service_uuid)}
+
+ def create_connectivity_service(self, service_type, connection_points,
+ **kwargs):
+ """
+ Stablish WAN connectivity between the endpoints
+
+ """
+ self.logger.debug("create_connectivity_service: service_type='{}' connection_points='{}', kwargs='{}'".
+ format(service_type, connection_points, kwargs))
+ _id = str(uuid4())
+ self.connections[_id] = connection_points.copy()
+ self.counter += 1
+ return _id, None
+
+ def delete_connectivity_service(self, service_uuid, conn_info=None):
+ """Disconnect multi-site endpoints previously connected
+
+ """
+ self.logger.debug("delete_connectivity_service: service_uuid='{}' conn_info='{}'".format(service_uuid,
+ conn_info))
+ if service_uuid not in self.connections:
+ raise SdnConnectorError("connectivity {} not found".format(service_uuid),
+ http_code=HTTPStatus.NOT_FOUND.value)
+ self.connections.pop(service_uuid, None)
+ return None
+
+ def edit_connectivity_service(self, service_uuid, conn_info=None,
+ connection_points=None, **kwargs):
+ """Change an existing connectivity service.
+
+ This method's arguments and return value follow the same convention as
+ :meth:`~.create_connectivity_service`.
+ """
+ self.logger.debug("edit_connectivity_service: service_uuid='{}' conn_info='{}', connection_points='{}'"
+ "kwargs='{}'".format(service_uuid, conn_info, connection_points, kwargs))
+ if service_uuid not in self.connections:
+ raise SdnConnectorError("connectivity {} not found".format(service_uuid),
+ http_code=HTTPStatus.NOT_FOUND.value)
+ self.connections[service_uuid] = connection_points.copy()
+ return None
+
+ def clear_all_connectivity_services(self):
+ """Delete all WAN Links in a WIM.
+
+ This method is intended for debugging only, and should delete all the
+ connections controlled by the WIM, not only the WIM connections that
+ a specific RO is aware of.
+
+ """
+ self.logger.debug("clear_all_connectivity_services")
+ self.connections.clear()
+ return None
+
+ def get_all_active_connectivity_services(self):
+ """Provide information about all active connections provisioned by a
+ WIM.
+
+ Raises:
+ SdnConnectorException: In case of error.
+ """
+ self.logger.debug("get_all_active_connectivity_services")
+ return self.connections
+++ /dev/null
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 Telefonica
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-This WIM does nothing and allows using it for testing and when no WIM is needed
-"""
-
-import logging
-from uuid import uuid4
-from .sdnconn import SdnConnectorBase, SdnConnectorError
-from http import HTTPStatus
-__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
-
-
-class FakeConnector(SdnConnectorBase):
- """Abstract base class for all the WIM connectors
-
- Arguments:
- wim (dict): WIM record, as stored in the database
- wim_account (dict): WIM account record, as stored in the database
- config (dict): optional persistent information related to an specific
- connector. Inside this dict, a special key,
- ``service_endpoint_mapping`` provides the internal endpoint
- mapping.
- logger (logging.Logger): optional logger object. If none is passed
- ``openmano.wim.wimconn`` is used.
-
- The arguments of the constructor are converted to object attributes.
- An extra property, ``service_endpoint_mapping`` is created from ``config``.
- """
- def __init__(self, wim, wim_account, config=None, logger=None):
- self.logger = logger or logging.getLogger('openmano.sdnconn.fake')
- super(FakeConnector, self).__init__(wim, wim_account, config, self.logger)
- self.logger.debug("__init: wim='{}' wim_account='{}'".format(wim, wim_account))
- self.connections = {}
- self.counter = 0
-
- def check_credentials(self):
- """Check if the connector itself can access the WIM.
-
- Raises:
- SdnConnectorError: Issues regarding authorization, access to
- external URLs, etc are detected.
- """
- self.logger.debug("check_credentials")
- return None
-
- def get_connectivity_service_status(self, service_uuid, conn_info=None):
- """Monitor the status of the connectivity service established
-
- Arguments:
- service_uuid (str): UUID of the connectivity service
- conn_info (dict or None): Information returned by the connector
- during the service creation/edition and subsequently stored in
- the database.
-
- Returns:
- dict: JSON/YAML-serializable dict that contains a mandatory key
- ``sdn_status`` associated with one of the following values::
-
- Additionally ``error_msg``(**str**) and ``sdn_info``(**dict**)
- keys can be used to provide additional status explanation or
- new information available for the connectivity service.
- """
- self.logger.debug("get_connectivity_service_status: service_uuid='{}' conn_info='{}'".format(service_uuid,
- conn_info))
- return {'sdn_status': 'ACTIVE', 'sdn_info': self.connectivity.get(service_uuid)}
-
- def create_connectivity_service(self, service_type, connection_points,
- **kwargs):
- """
- Stablish WAN connectivity between the endpoints
-
- """
- self.logger.debug("create_connectivity_service: service_type='{}' connection_points='{}', kwargs='{}'".
- format(service_type, connection_points, kwargs))
- _id = str(uuid4())
- self.connections[_id] = connection_points.copy()
- self.counter += 1
- return _id, None
-
- def delete_connectivity_service(self, service_uuid, conn_info=None):
- """Disconnect multi-site endpoints previously connected
-
- """
- self.logger.debug("delete_connectivity_service: service_uuid='{}' conn_info='{}'".format(service_uuid,
- conn_info))
- if service_uuid not in self.connections:
- raise SdnConnectorError("connectivity {} not found".format(service_uuid),
- http_code=HTTPStatus.NOT_FOUND.value)
- self.connections.pop(service_uuid, None)
- return None
-
- def edit_connectivity_service(self, service_uuid, conn_info=None,
- connection_points=None, **kwargs):
- """Change an existing connectivity service.
-
- This method's arguments and return value follow the same convention as
- :meth:`~.create_connectivity_service`.
- """
- self.logger.debug("edit_connectivity_service: service_uuid='{}' conn_info='{}', connection_points='{}'"
- "kwargs='{}'".format(service_uuid, conn_info, connection_points, kwargs))
- if service_uuid not in self.connections:
- raise SdnConnectorError("connectivity {} not found".format(service_uuid),
- http_code=HTTPStatus.NOT_FOUND.value)
- self.connections[service_uuid] = connection_points.copy()
- return None
-
- def clear_all_connectivity_services(self):
- """Delete all WAN Links in a WIM.
-
- This method is intended for debugging only, and should delete all the
- connections controlled by the WIM, not only the WIM connections that
- a specific RO is aware of.
-
- """
- self.logger.debug("clear_all_connectivity_services")
- self.connections.clear()
- return None
-
- def get_all_active_connectivity_services(self):
- """Provide information about all active connections provisioned by a
- WIM.
-
- Raises:
- SdnConnectorException: In case of error.
- """
- self.logger.debug("get_all_active_connectivity_services")
- return self.connections
_url = 'https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary'
_requirements = [
# TODO py3 revise
- "osm-im",
+ "osm-im @ git+https://osm.etsi.org/gerrit/osm/IM.git#egg=osm-im",
"PyYAML",
"bottle",
"logutils",
scripts=['osm_ro/scripts/RO-start.sh'
#'openmanod', 'openmano', 'osm_ro/scripts/service-openmano', 'osm_ro/scripts/openmano-report',
],
- dependency_links=["git+https://osm.etsi.org/gerrit/osm/IM.git#egg=osm-im"],
+ # dependency_links=["git+https://osm.etsi.org/gerrit/osm/IM.git#egg=osm_im"],
install_requires=_requirements,
include_package_data=True,
setup_requires=['setuptools-version-command'],
MAINTAINER Alfonso Tierno <alfonso.tiernosepulveda@telefoncia.com>
RUN apt-get update && apt-get -y install curl software-properties-common git tox python3-pip \
- && python3 -m pip install --upgrade pip && python3 -m pip install pyangbind
+ && python3 -m pip install --upgrade pip && python3 -m pip install pyangbind networking-l2gw
ARG REPOSITORY_BASE=http://osm-download.etsi.org/repository/osm/debian
ARG RELEASE=ReleaseSIX-daily