FROM ubuntu:16.04
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get --yes install git tox make python-all python3 python3-pip debhelper wget && \
- DEBIAN_FRONTEND=noninteractive apt-get --yes install python3-all libssl-dev && \
+ DEBIAN_FRONTEND=noninteractive apt-get --yes install python3-all libssl-dev flake8 && \
DEBIAN_FRONTEND=noninteractive pip3 install -U setuptools setuptools-version-command stdeb
# FROM ubuntu:16.04
# This is not needed, because package dependency will install anyway.
# But done here in order to harry up image generation using cache
-
RUN DEBIAN_FRONTEND=noninteractive apt-get -y install python3-neutronclient python3-openstackclient \
python3-requests python3-netaddr python3-argcomplete
python3 -m pip install -e /root/RO/RO-VIM-openvim && \
python3 -m pip install -e /root/RO/RO-VIM-aws && \
python3 -m pip install -e /root/RO/RO-VIM-fos && \
+ python3 -m pip install -e /root/RO/RO-SDN-dynpac && \
+ python3 -m pip install -e /root/RO/RO-SDN-tapi && \
rm -rf /root/.cache && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+all: clean package
+
+clean:
+ rm -rf dist deb_dist osm_rosdn_dynpac-*.tar.gz osm_rosdn_dynpac.egg-info .eggs
+
+package:
+ python3 setup.py --command-packages=stdeb.command sdist_dsc
+ cd deb_dist/osm-rosdn-dynpac*/ && dpkg-buildpackage -rfakeroot -uc -us
+
--- /dev/null
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2018 David GarcÃa, University of the Basque Country
+# Copyright 2018 University of the Basque Country
+# This file is part of openmano
+# All Rights Reserved.
+# Contact information at http://i2t.ehu.eus
+#
+# # Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import requests
+import json
+import logging
+from enum import Enum
+
+from osm_ro.wim.sdnconn import SdnConnectorBase, SdnConnectorError
+
+
+class SdnError(Enum):
+ UNREACHABLE = 'Unable to reach the WIM.',
+ SERVICE_TYPE_ERROR = 'Unexpected service_type. Only "L2" is accepted.',
+ CONNECTION_POINTS_SIZE = \
+ 'Unexpected number of connection points: 2 expected.',
+ ENCAPSULATION_TYPE = \
+ 'Unexpected service_endpoint_encapsulation_type. \
+ Only "dotq1" is accepted.',
+ BANDWIDTH = 'Unable to get the bandwidth.',
+ STATUS = 'Unable to get the status for the service.',
+ DELETE = 'Unable to delete service.',
+ CLEAR_ALL = 'Unable to clear all the services',
+ UNKNOWN_ACTION = 'Unknown action invoked.',
+ BACKUP = 'Unable to get the backup parameter.',
+ UNSUPPORTED_FEATURE = "Unsupported feature",
+ UNAUTHORIZED = "Failed while authenticating"
+
+
+class SdnAPIActions(Enum):
+ CHECK_CONNECTIVITY = "CHECK_CONNECTIVITY",
+ CREATE_SERVICE = "CREATE_SERVICE",
+ DELETE_SERVICE = "DELETE_SERVICE",
+ CLEAR_ALL = "CLEAR_ALL",
+ SERVICE_STATUS = "SERVICE_STATUS",
+
+
+class DynpacConnector(SdnConnectorBase):
+ __supported_service_types = ["ELINE (L2)", "ELINE"]
+ __supported_encapsulation_types = ["dot1q"]
+ __WIM_LOGGER = 'openmano.sdnconn.dynpac'
+ __ENCAPSULATION_TYPE_PARAM = "service_endpoint_encapsulation_type"
+ __ENCAPSULATION_INFO_PARAM = "service_endpoint_encapsulation_info"
+ __BACKUP_PARAM = "backup"
+ __BANDWIDTH_PARAM = "bandwidth"
+ __SERVICE_ENDPOINT_PARAM = "service_endpoint_id"
+ __WAN_SERVICE_ENDPOINT_PARAM = "service_endpoint_id"
+ __WAN_MAPPING_INFO_PARAM = "service_mapping_info"
+ __SW_ID_PARAM = "switch_dpid"
+ __SW_PORT_PARAM = "switch_port"
+ __VLAN_PARAM = "vlan"
+
+ # Public functions exposed to the Resource Orchestrator
+ def __init__(self, wim, wim_account, config=None, logger=None):
+ self.logger = logger or logging.getLogger(self.__WIM_LOGGER)
+ super().__init__(wim, wim_account, config, self.logger)
+ self.__wim = wim
+ self.__wim_account = wim_account
+ self.__config = config
+ self.__wim_url = self.__wim.get("wim_url")
+ self.__user = wim_account.get("user")
+ self.__passwd = wim_account.get("password")
+ self.logger.info("Initialized.")
+
+ def create_connectivity_service(self, service_type, connection_points, **kwargs):
+ self.__check_service(service_type, connection_points, kwargs)
+
+ body = self.__get_body(service_type, connection_points, kwargs)
+
+ headers = {'Content-type': 'application/x-www-form-urlencoded'}
+ endpoint = "{}/service/create".format(self.__wim_url)
+
+ try:
+ response = requests.post(endpoint, data=body, headers=headers)
+ except requests.exceptions.RequestException as e:
+ self.__exception(e.message, http_code=503)
+
+ if response.status_code != 200:
+ error = json.loads(response.content)
+ reason = "Reason: {}. ".format(error.get("code"))
+ description = "Description: {}.".format(error.get("description"))
+ exception = reason + description
+ self.__exception(exception, http_code=response.status_code)
+ uuid = response.content
+ self.logger.info("Service with uuid {} created.".format(uuid))
+ return (uuid, None)
+
+ def edit_connectivity_service(self, service_uuid,
+ conn_info, connection_points,
+ **kwargs):
+ self.__exception(SdnError.UNSUPPORTED_FEATURE, http_code=501)
+
+ def get_connectivity_service_status(self, service_uuid):
+ endpoint = "{}/service/status/{}".format(self.__wim_url, service_uuid)
+ try:
+ response = requests.get(endpoint)
+ except requests.exceptions.RequestException as e:
+ self.__exception(e.message, http_code=503)
+
+ if response.status_code != 200:
+ self.__exception(SdnError.STATUS, http_code=response.status_code)
+ self.logger.info("Status for service with uuid {}: {}"
+ .format(service_uuid, response.content))
+ return response.content
+
+ def delete_connectivity_service(self, service_uuid, conn_info):
+ endpoint = "{}/service/delete/{}".format(self.__wim_url, service_uuid)
+ try:
+ response = requests.delete(endpoint)
+ except requests.exceptions.RequestException as e:
+ self.__exception(e.message, http_code=503)
+ if response.status_code != 200:
+ self.__exception(SdnError.DELETE, http_code=response.status_code)
+
+ self.logger.info("Service with uuid: {} deleted".format(service_uuid))
+
+ def clear_all_connectivity_services(self):
+ endpoint = "{}/service/clearAll".format(self.__wim_url)
+ try:
+ response = requests.delete(endpoint)
+ http_code = response.status_code
+ except requests.exceptions.RequestException as e:
+ self.__exception(e.message, http_code=503)
+ if http_code != 200:
+ self.__exception(SdnError.CLEAR_ALL, http_code=http_code)
+
+ self.logger.info("{} services deleted".format(response.content))
+ return "{} services deleted".format(response.content)
+
+ def check_connectivity(self):
+ endpoint = "{}/checkConnectivity".format(self.__wim_url)
+
+ try:
+ response = requests.get(endpoint)
+ http_code = response.status_code
+ except requests.exceptions.RequestException as e:
+ self.__exception(e.message, http_code=503)
+
+ if http_code != 200:
+ self.__exception(SdnError.UNREACHABLE, http_code=http_code)
+ self.logger.info("Connectivity checked")
+
+ def check_credentials(self):
+ endpoint = "{}/checkCredentials".format(self.__wim_url)
+ auth = (self.__user, self.__passwd)
+
+ try:
+ response = requests.get(endpoint, auth=auth)
+ http_code = response.status_code
+ except requests.exceptions.RequestException as e:
+ self.__exception(e.message, http_code=503)
+
+ if http_code != 200:
+ self.__exception(SdnError.UNAUTHORIZED, http_code=http_code)
+ self.logger.info("Credentials checked")
+
+ # Private functions
+ def __exception(self, x, **kwargs):
+ http_code = kwargs.get("http_code")
+ if hasattr(x, "value"):
+ error = x.value
+ else:
+ error = x
+ self.logger.error(error)
+ raise SdnConnectorError(error, http_code=http_code)
+
+ def __check_service(self, service_type, connection_points, kwargs):
+ if service_type not in self.__supported_service_types:
+ self.__exception(SdnError.SERVICE_TYPE_ERROR, http_code=400)
+
+ if len(connection_points) != 2:
+ self.__exception(SdnError.CONNECTION_POINTS_SIZE, http_code=400)
+
+ for connection_point in connection_points:
+ enc_type = connection_point.get(self.__ENCAPSULATION_TYPE_PARAM)
+ if enc_type not in self.__supported_encapsulation_types:
+ self.__exception(SdnError.ENCAPSULATION_TYPE, http_code=400)
+
+ # Commented out for as long as parameter isn't implemented
+ # bandwidth = kwargs.get(self.__BANDWIDTH_PARAM)
+ # if not isinstance(bandwidth, int):
+ # self.__exception(SdnError.BANDWIDTH, http_code=400)
+
+ # Commented out for as long as parameter isn't implemented
+ # backup = kwargs.get(self.__BACKUP_PARAM)
+ # if not isinstance(backup, bool):
+ # self.__exception(SdnError.BACKUP, http_code=400)
+
+ def __get_body(self, service_type, connection_points, kwargs):
+ port_mapping = self.__config.get("service_endpoint_mapping")
+ selected_ports = []
+ for connection_point in connection_points:
+ endpoint_id = connection_point.get(self.__SERVICE_ENDPOINT_PARAM)
+ port = filter(lambda x: x.get(self.__WAN_SERVICE_ENDPOINT_PARAM) == endpoint_id, port_mapping)[0]
+ port_info = port.get(self.__WAN_MAPPING_INFO_PARAM)
+ selected_ports.append(port_info)
+ if service_type == "ELINE (L2)" or service_type == "ELINE":
+ service_type = "L2"
+ body = {
+ "connection_points": [{
+ "wan_switch_dpid": selected_ports[0].get(self.__SW_ID_PARAM),
+ "wan_switch_port": selected_ports[0].get(self.__SW_PORT_PARAM),
+ "wan_vlan": connection_points[0].get(self.__ENCAPSULATION_INFO_PARAM).get(self.__VLAN_PARAM)
+ }, {
+ "wan_switch_dpid": selected_ports[1].get(self.__SW_ID_PARAM),
+ "wan_switch_port": selected_ports[1].get(self.__SW_PORT_PARAM),
+ "wan_vlan": connection_points[1].get(self.__ENCAPSULATION_INFO_PARAM).get(self.__VLAN_PARAM)
+ }],
+ "bandwidth": 100, # Hardcoded for as long as parameter isn't implemented
+ "service_type": service_type,
+ "backup": False # Hardcoded for as long as parameter isn't implemented
+ }
+ return "body={}".format(json.dumps(body))
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+requests
+git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro
+
--- /dev/null
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from setuptools import setup
+
+_name = "osm_rosdn_dynpac"
+
+README = """
+===========
+osm-rosdn_dynpac
+===========
+
+osm-ro pluging for dynpac SDN
+"""
+
+setup(
+ name=_name,
+ description='OSM ro sdn plugin for dynpac',
+ long_description=README,
+ version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ # version=VERSION,
+ # python_requires='>3.5.0',
+ author='ETSI OSM',
+ # TODO py3 author_email='',
+ maintainer='OSM_TECH@LIST.ETSI.ORG', # TODO py3
+ # TODO py3 maintainer_email='',
+ url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
+ license='Apache 2.0',
+
+ packages=[_name],
+ include_package_data=True,
+ dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
+ install_requires=["requests", "osm-ro"],
+ setup_requires=['setuptools-version-command'],
+ entry_points={
+ 'osm_rosdn.plugins': ['rosdn_dynpac = osm_rosdn_dynpac.wimconn_dynpac'],
+ },
+)
--- /dev/null
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+[DEFAULT]
+X-Python3-Version : >= 3.5
+Depends3: python3-requests, python3-osm-ro
+
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+[tox]
+envlist = py3
+toxworkdir={homedir}/.tox
+
+[testenv]
+basepython = python3
+install_command = python3 -m pip install -r requirements.txt -U {opts} {packages}
+# deps = -r{toxinidir}/test-requirements.txt
+commands=python3 -m unittest discover -v
+
+[testenv:flake8]
+basepython = python3
+deps = flake8
+commands = flake8 osm_rosdn_dynpac --max-line-length 120 \
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+
+[testenv:unittest]
+basepython = python3
+commands = python3 -m unittest osm_rosdn_dynpac.tests
+
+[testenv:build]
+basepython = python3
+deps = stdeb
+ setuptools-version-command
+commands = python3 setup.py --command-packages=stdeb.command bdist_deb
+
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+all: clean package
+
+clean:
+ rm -rf dist deb_dist osm_rosdn_onosof-*.tar.gz osm_rosdn_onosof.egg-info .eggs
+
+package:
+ python3 setup.py --command-packages=stdeb.command sdist_dsc
+ cd deb_dist/osm-rosdn-onosof*/ && dpkg-buildpackage -rfakeroot -uc -us
+
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2016, I2T Research Group (UPV/EHU)
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: alaitz.mendiola@ehu.eus or alaitz.mendiola@gmail.com
+##
+
+'''
+ImplementS the pluging for the Open Network Operating System (ONOS) openflow
+controller. It creates the class OF_conn to create dataplane connections
+with static rules based on packet destination MAC address
+'''
+
+__author__="Alaitz Mendiola"
+__date__ ="$22-nov-2016$"
+
+
+import json
+import requests
+import base64
+import logging
+from osm_ro.wim.openflow_conn import OpenflowConn, OpenflowConnException, OpenflowConnConnectionException, \
+ OpenflowConnUnexpectedResponse, OpenflowConnAuthException, OpenflowConnNotFoundException, \
+ OpenflowConnConflictException, OpenflowConnNotSupportedException, OpenflowConnNotImplemented
+
+
+class OfConnOnos(OpenflowConn):
+ """
+ ONOS connector. No MAC learning is used
+ """
+ def __init__(self, params):
+ """ Constructor.
+ Params: dictionary with the following keys:
+ of_dpid: DPID to use for this controller ?? Does a controller have a dpid?
+ url: must be [http://HOST:PORT/
+ of_user: user credentials, can be missing or None
+ of_password: password credentials
+ of_debug: debug level for logging. Default to ERROR
+ other keys are ignored
+ Raise an exception if same parameter is missing or wrong
+ """
+
+ OpenflowConn.__init__(self, params)
+
+ # check params
+ url = params.get("of_url")
+ if not url:
+ raise ValueError("'url' must be provided")
+ if not url.startswith("http"):
+ url = "http://" + url
+ if not url.endswith("/"):
+ url = url + "/"
+ self.url = url + "onos/v1/"
+
+ #internal variables
+ self.name = "onosof"
+ self.headers = {'content-type':'application/json','accept':'application/json',}
+
+ self.auth="None"
+ self.pp2ofi={} # From Physical Port to OpenFlow Index
+ self.ofi2pp={} # From OpenFlow Index to Physical Port
+
+ self.dpid = str(params["of_dpid"])
+ self.id = 'of:'+str(self.dpid.replace(':', ''))
+
+ # TODO This may not be straightforward
+ if params.get("of_user"):
+ of_password=params.get("of_password", "")
+ self.auth = base64.b64encode(bytes(params["of_user"] + ":" + of_password, "utf-8"))
+ self.auth = self.auth.decode()
+ self.headers['authorization'] = 'Basic ' + self.auth
+
+ self.logger = logging.getLogger('vim.OF.onos')
+ self.logger.setLevel( getattr(logging, params.get("of_debug", "ERROR")) )
+ self.ip_address = None
+
+ def get_of_switches(self):
+ """
+ Obtain a a list of switches or DPID detected by this controller
+ :return: list where each element a tuple pair (DPID, IP address)
+ Raise a openflowconnUnexpectedResponse expection in case of failure
+ """
+ try:
+ self.headers['content-type'] = 'text/plain'
+ of_response = requests.get(self.url + "devices", headers=self.headers)
+ error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+ if of_response.status_code != 200:
+ self.logger.warning("get_of_switches " + error_text)
+ raise OpenflowConnUnexpectedResponse(error_text)
+
+ self.logger.debug("get_of_switches " + error_text)
+ info = of_response.json()
+
+ if type(info) != dict:
+ self.logger.error("get_of_switches. Unexpected response, not a dict: %s", str(info))
+ raise OpenflowConnUnexpectedResponse("Unexpected response, not a dict. Wrong version?")
+
+ node_list = info.get('devices')
+
+ if type(node_list) is not list:
+ self.logger.error(
+ "get_of_switches. Unexpected response, at 'devices', not found or not a list: %s",
+ str(type(node_list)))
+ raise OpenflowConnUnexpectedResponse("Unexpected response, at 'devices', not found "
+ "or not a list. Wrong version?")
+
+ switch_list = []
+ for node in node_list:
+ node_id = node.get('id')
+ if node_id is None:
+ self.logger.error("get_of_switches. Unexpected response at 'device':'id', not found: %s",
+ str(node))
+ raise OpenflowConnUnexpectedResponse("Unexpected response at 'device':'id', "
+ "not found . Wrong version?")
+
+ node_ip_address = node.get('annotations').get('managementAddress')
+ if node_ip_address is None:
+ self.logger.error(
+ "get_of_switches. Unexpected response at 'device':'managementAddress', not found: %s",
+ str(node))
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response at 'device':'managementAddress', not found. Wrong version?")
+
+ node_id_hex = hex(int(node_id.split(':')[1])).split('x')[1].zfill(16)
+
+ switch_list.append(
+ (':'.join(a + b for a, b in zip(node_id_hex[::2], node_id_hex[1::2])), node_ip_address))
+ return switch_list
+
+ except requests.exceptions.RequestException as e:
+ error_text = type(e).__name__ + ": " + str(e)
+ self.logger.error("get_of_switches " + error_text)
+ raise OpenflowConnConnectionException(error_text)
+ except ValueError as e:
+ # ValueError in the case that JSON can not be decoded
+ error_text = type(e).__name__ + ": " + str(e)
+ self.logger.error("get_of_switches " + error_text)
+ raise OpenflowConnUnexpectedResponse(error_text)
+
+ def obtain_port_correspondence(self):
+ """
+ Obtain the correspondence between physical and openflow port names
+ :return: dictionary with physical name as key, openflow name as value
+ Raise a openflowconnUnexpectedResponse expection in case of failure
+ """
+ try:
+ self.headers['content-type'] = 'text/plain'
+ of_response = requests.get(self.url + "devices/" + self.id + "/ports", headers=self.headers)
+ error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+ if of_response.status_code != 200:
+ self.logger.warning("obtain_port_correspondence " + error_text)
+ raise OpenflowConnUnexpectedResponse(error_text)
+
+ self.logger.debug("obtain_port_correspondence " + error_text)
+ info = of_response.json()
+
+ node_connector_list = info.get('ports')
+ if type(node_connector_list) is not list:
+ self.logger.error(
+ "obtain_port_correspondence. Unexpected response at 'ports', not found or not a list: %s",
+ str(node_connector_list))
+ raise OpenflowConnUnexpectedResponse("Unexpected response at 'ports', not found or not "
+ "a list. Wrong version?")
+
+ for node_connector in node_connector_list:
+ if node_connector['port'] != "local":
+ self.pp2ofi[str(node_connector['annotations']['portName'])] = str(node_connector['port'])
+ self.ofi2pp[str(node_connector['port'])] = str(node_connector['annotations']['portName'])
+
+ node_ip_address = info['annotations']['managementAddress']
+ if node_ip_address is None:
+ self.logger.error(
+ "obtain_port_correspondence. Unexpected response at 'managementAddress', not found: %s",
+ str(self.id))
+ raise OpenflowConnUnexpectedResponse("Unexpected response at 'managementAddress', "
+ "not found. Wrong version?")
+ self.ip_address = node_ip_address
+
+ # print self.name, ": obtain_port_correspondence ports:", self.pp2ofi
+ return self.pp2ofi
+ except requests.exceptions.RequestException as e:
+ error_text = type(e).__name__ + ": " + str(e)
+ self.logger.error("obtain_port_correspondence " + error_text)
+ raise OpenflowConnConnectionException(error_text)
+ except ValueError as e:
+ # ValueError in the case that JSON can not be decoded
+ error_text = type(e).__name__ + ": " + str(e)
+ self.logger.error("obtain_port_correspondence " + error_text)
+ raise OpenflowConnUnexpectedResponse(error_text)
+
+ def get_of_rules(self, translate_of_ports=True):
+ """
+ Obtain the rules inserted at openflow controller
+ :param translate_of_ports: if True it translates ports from openflow index to physical switch name
+ :return: list where each item is a dictionary with the following content:
+ priority: rule priority
+ name: rule name (present also as the master dict key)
+ ingress_port: match input port of the rule
+ dst_mac: match destination mac address of the rule, can be missing or None if not apply
+ vlan_id: match vlan tag of the rule, can be missing or None if not apply
+ actions: list of actions, composed by a pair tuples:
+ (vlan, None/int): for stripping/setting a vlan tag
+ (out, port): send to this port
+ switch: DPID, all
+ Raise a openflowconnUnexpectedResponse expection in case of failure
+ """
+
+ try:
+
+ if len(self.ofi2pp) == 0:
+ self.obtain_port_correspondence()
+
+ # get rules
+ self.headers['content-type'] = 'text/plain'
+ of_response = requests.get(self.url + "flows/" + self.id, headers=self.headers)
+ error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+
+ # The configured page does not exist if there are no rules installed. In that case we return an empty dict
+ if of_response.status_code == 404:
+ return {}
+
+ elif of_response.status_code != 200:
+ self.logger.warning("get_of_rules " + error_text)
+ raise OpenflowConnUnexpectedResponse(error_text)
+ self.logger.debug("get_of_rules " + error_text)
+
+ info = of_response.json()
+
+ if type(info) != dict:
+ self.logger.error("get_of_rules. Unexpected response, not a dict: %s", str(info))
+ raise OpenflowConnUnexpectedResponse("Unexpected openflow response, not a dict. "
+ "Wrong version?")
+
+ flow_list = info.get('flows')
+
+ if flow_list is None:
+ return {}
+
+ if type(flow_list) is not list:
+ self.logger.error(
+ "get_of_rules. Unexpected response at 'flows', not a list: %s",
+ str(type(flow_list)))
+ raise OpenflowConnUnexpectedResponse("Unexpected response at 'flows', not a list. "
+ "Wrong version?")
+
+ rules = [] # Response list
+ for flow in flow_list:
+ if not ('id' in flow and 'selector' in flow and 'treatment' in flow and \
+ 'instructions' in flow['treatment'] and 'criteria' in \
+ flow['selector']):
+ raise OpenflowConnUnexpectedResponse("unexpected openflow response, one or more "
+ "elements are missing. Wrong version?")
+
+ rule = dict()
+ rule['switch'] = self.dpid
+ rule['priority'] = flow.get('priority')
+ rule['name'] = flow['id']
+
+ for criteria in flow['selector']['criteria']:
+ if criteria['type'] == 'IN_PORT':
+ in_port = str(criteria['port'])
+ if in_port != "CONTROLLER":
+ if not in_port in self.ofi2pp:
+ raise OpenflowConnUnexpectedResponse("Error: Ingress port {} is not "
+ "in switch port list".format(in_port))
+ if translate_of_ports:
+ in_port = self.ofi2pp[in_port]
+ rule['ingress_port'] = in_port
+
+ elif criteria['type'] == 'VLAN_VID':
+ rule['vlan_id'] = criteria['vlanId']
+
+ elif criteria['type'] == 'ETH_DST':
+ rule['dst_mac'] = str(criteria['mac']).lower()
+
+ actions = []
+ for instruction in flow['treatment']['instructions']:
+ if instruction['type'] == "OUTPUT":
+ out_port = str(instruction['port'])
+ if out_port != "CONTROLLER":
+ if not out_port in self.ofi2pp:
+ raise OpenflowConnUnexpectedResponse("Error: Output port {} is not in "
+ "switch port list".format(out_port))
+
+ if translate_of_ports:
+ out_port = self.ofi2pp[out_port]
+
+ actions.append( ('out', out_port) )
+
+ if instruction['type'] == "L2MODIFICATION" and instruction['subtype'] == "VLAN_POP":
+ actions.append( ('vlan', 'None') )
+ if instruction['type'] == "L2MODIFICATION" and instruction['subtype'] == "VLAN_ID":
+ actions.append( ('vlan', instruction['vlanId']) )
+
+ rule['actions'] = actions
+ rules.append(rule)
+ return rules
+
+ except requests.exceptions.RequestException as e:
+ # ValueError in the case that JSON can not be decoded
+ error_text = type(e).__name__ + ": " + str(e)
+ self.logger.error("get_of_rules " + error_text)
+ raise OpenflowConnConnectionException(error_text)
+ except ValueError as e:
+ # ValueError in the case that JSON can not be decoded
+ error_text = type(e).__name__ + ": " + str(e)
+ self.logger.error("get_of_rules " + error_text)
+ raise OpenflowConnUnexpectedResponse(error_text)
+
+ def del_flow(self, flow_name):
+ """
+ Delete an existing rule
+ :param flow_name:
+ :return: Raise a openflowconnUnexpectedResponse expection in case of failure
+ """
+
+ try:
+ self.headers['content-type'] = None
+ of_response = requests.delete(self.url + "flows/" + self.id + "/" + flow_name, headers=self.headers)
+ error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+
+ if of_response.status_code != 204:
+ self.logger.warning("del_flow " + error_text)
+ raise OpenflowConnUnexpectedResponse(error_text)
+
+ self.logger.debug("del_flow OK " + error_text)
+ return None
+
+ except requests.exceptions.RequestException as e:
+ error_text = type(e).__name__ + ": " + str(e)
+ self.logger.error("del_flow " + error_text)
+ raise OpenflowConnConnectionException(error_text)
+
+ def new_flow(self, data):
+ """
+ Insert a new static rule
+ :param data: dictionary with the following content:
+ priority: rule priority
+ name: rule name
+ ingress_port: match input port of the rule
+ dst_mac: match destination mac address of the rule, missing or None if not apply
+ vlan_id: match vlan tag of the rule, missing or None if not apply
+ actions: list of actions, composed by a pair tuples with these posibilities:
+ ('vlan', None/int): for stripping/setting a vlan tag
+ ('out', port): send to this port
+ :return: Raise a openflowconnUnexpectedResponse expection in case of failure
+ """
+ try:
+
+ if len(self.pp2ofi) == 0:
+ self.obtain_port_correspondence()
+
+ # Build the dictionary with the flow rule information for ONOS
+ flow = dict()
+ #flow['id'] = data['name']
+ flow['tableId'] = 0
+ flow['priority'] = data.get('priority')
+ flow['timeout'] = 0
+ flow['isPermanent'] = "true"
+ flow['appId'] = 10 # FIXME We should create an appId for OSM
+ flow['selector'] = dict()
+ flow['selector']['criteria'] = list()
+
+ # Flow rule matching criteria
+ if not data['ingress_port'] in self.pp2ofi:
+ error_text = 'Error. Port ' + data['ingress_port'] + ' is not present in the switch'
+ self.logger.warning("new_flow " + error_text)
+ raise OpenflowConnUnexpectedResponse(error_text)
+
+ ingress_port_criteria = dict()
+ ingress_port_criteria['type'] = "IN_PORT"
+ ingress_port_criteria['port'] = self.pp2ofi[data['ingress_port']]
+ flow['selector']['criteria'].append(ingress_port_criteria)
+
+ if 'dst_mac' in data:
+ dst_mac_criteria = dict()
+ dst_mac_criteria["type"] = "ETH_DST"
+ dst_mac_criteria["mac"] = data['dst_mac']
+ flow['selector']['criteria'].append(dst_mac_criteria)
+
+ if data.get('vlan_id'):
+ vlan_criteria = dict()
+ vlan_criteria["type"] = "VLAN_VID"
+ vlan_criteria["vlanId"] = int(data['vlan_id'])
+ flow['selector']['criteria'].append(vlan_criteria)
+
+ # Flow rule treatment
+ flow['treatment'] = dict()
+ flow['treatment']['instructions'] = list()
+ flow['treatment']['deferred'] = list()
+
+ for action in data['actions']:
+ new_action = dict()
+ if action[0] == "vlan":
+ new_action['type'] = "L2MODIFICATION"
+ if action[1] == None:
+ new_action['subtype'] = "VLAN_POP"
+ else:
+ new_action['subtype'] = "VLAN_ID"
+ new_action['vlanId'] = int(action[1])
+ elif action[0] == 'out':
+ new_action['type'] = "OUTPUT"
+ if not action[1] in self.pp2ofi:
+ error_msj = 'Port '+ action[1] + ' is not present in the switch'
+ raise OpenflowConnUnexpectedResponse(error_msj)
+ new_action['port'] = self.pp2ofi[action[1]]
+ else:
+ error_msj = "Unknown item '%s' in action list" % action[0]
+ self.logger.error("new_flow " + error_msj)
+ raise OpenflowConnUnexpectedResponse(error_msj)
+
+ flow['treatment']['instructions'].append(new_action)
+
+ self.headers['content-type'] = 'application/json'
+ path = self.url + "flows/" + self.id
+ of_response = requests.post(path, headers=self.headers, data=json.dumps(flow) )
+
+ error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+ if of_response.status_code != 201:
+ self.logger.warning("new_flow " + error_text)
+ raise OpenflowConnUnexpectedResponse(error_text)
+
+ flowId = of_response.headers['location'][path.__len__() + 1:]
+
+ data['name'] = flowId
+
+ self.logger.debug("new_flow OK " + error_text)
+ return None
+
+ except requests.exceptions.RequestException as e:
+ error_text = type(e).__name__ + ": " + str(e)
+ self.logger.error("new_flow " + error_text)
+ raise OpenflowConnConnectionException(error_text)
+
+ def clear_all_flows(self):
+ """
+ Delete all existing rules
+ :return: Raise a openflowconnUnexpectedResponse expection in case of failure
+ """
+ try:
+ rules = self.get_of_rules(True)
+
+ for rule in rules:
+ self.del_flow(rule)
+
+ self.logger.debug("clear_all_flows OK ")
+ return None
+
+ except requests.exceptions.RequestException as e:
+ error_text = type(e).__name__ + ": " + str(e)
+ self.logger.error("clear_all_flows " + error_text)
+ raise OpenflowConnConnectionException(error_text)
--- /dev/null
+##
+# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+##
+"""The SdnConnectorOnosOf connector is responsible for creating services using pro active operflow rules.
+"""
+
+import logging
+from osm_ro.wim.openflow_conn import SdnConnectorOpenFlow
+from .onos_of import OfConnOnos
+
+
+class SdnConnectorOnosOf(SdnConnectorOpenFlow):
+
+ def __init__(self, wim, wim_account, config=None, logger=None):
+ """Creates a connectivity based on pro-active openflow rules
+ """
+ self.logger = logging.getLogger('openmano.sdnconn.onosof')
+ super().__init__(wim, wim_account, config, logger)
+ of_params = {
+ "of_url": wim["wim_url"],
+ "of_dpid": config.get("dpid"),
+ "of_user": wim_account["user"],
+ "of_password": wim_account["password"],
+ }
+ self.openflow_conn = OfConnOnos(of_params)
+ super().__init__(wim, wim_account, config, logger, self.openflow_conn)
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+requests
+git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro
+
--- /dev/null
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from setuptools import setup
+
+_name = "osm_rosdn_onosof"
+
+README = """
+===========
+osm-rosdn_onosof
+===========
+
+osm-ro pluging for onosof (ietfl2vpn) SDN
+"""
+
+setup(
+ name=_name,
+ description='OSM ro sdn plugin for onosof (ietfl2vpn)',
+ long_description=README,
+ version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ # version=VERSION,
+ # python_requires='>3.5.0',
+ author='ETSI OSM',
+ author_email='alfonso.tiernosepulveda@telefonica.com',
+ maintainer='Alfonso Tierno',
+ maintainer_email='alfonso.tiernosepulveda@telefonica.com',
+ url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
+ license='Apache 2.0',
+
+ packages=[_name],
+ include_package_data=True,
+ dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
+ install_requires=["requests", "osm-ro"],
+ setup_requires=['setuptools-version-command'],
+ entry_points={
+ 'osm_rosdn.plugins': ['rosdn_onosof = osm_rosdn_onosof.sdnconn_onosof:SdnConnectorOnosOf'],
+ },
+)
--- /dev/null
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+[DEFAULT]
+X-Python3-Version : >= 3.5
+Depends3: python3-requests, python3-osm-ro
+
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+[tox]
+envlist = py3
+toxworkdir={homedir}/.tox
+
+[testenv]
+basepython = python3
+install_command = python3 -m pip install -r requirements.txt -U {opts} {packages}
+# deps = -r{toxinidir}/test-requirements.txt
+commands=python3 -m unittest discover -v
+
+[testenv:flake8]
+basepython = python3
+deps = flake8
+commands = flake8 osm_rosdn_onosof --max-line-length 120 \
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+
+[testenv:unittest]
+basepython = python3
+commands = python3 -m unittest osm_rosdn_onosof.tests
+
+[testenv:build]
+basepython = python3
+deps = stdeb
+ setuptools-version-command
+commands = python3 setup.py --command-packages=stdeb.command bdist_deb
+
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+all: clean package
+
+clean:
+ rm -rf dist deb_dist osm_rosdn_tapi-*.tar.gz osm_rosdn_tapi.egg-info .eggs
+
+package:
+ python3 setup.py --command-packages=stdeb.command sdist_dsc
+ cd deb_dist/osm-rosdn-tapi*/ && dpkg-buildpackage -rfakeroot -uc -us
+
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 Telefonica
+# All Rights Reserved.
+#
+# Contributors: Oscar Gonzalez de Dios, Manuel Lopez Bravo, Guillermo Pajares Martin
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This work has been performed in the context of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 program.
+##
+"""The SDN/WIM connector is responsible for establishing wide area network
+connectivity.
+
+This SDN/WIM connector implements the standard IETF RFC 8466 "A YANG Data
+ Model for Layer 2 Virtual Private Network (L2VPN) Service Delivery"
+
+It receives the endpoints and the necessary details to request
+the Layer 2 service.
+"""
+import requests
+import uuid
+import logging
+from osm_ro.wim.sdnconn import SdnConnectorBase, SdnConnectorError
+"""CHeck layer where we move it"""
+
+
+class WimconnectorIETFL2VPN(SdnConnectorBase):
+
+ def __init__(self, wim, wim_account, config=None, logger=None):
+ """IETF L2VPM WIM connector
+
+ Arguments: (To be completed)
+ wim (dict): WIM record, as stored in the database
+ wim_account (dict): WIM account record, as stored in the database
+ """
+ self.logger = logging.getLogger('openmano.sdnconn.ietfl2vpn')
+ super().__init__(wim, wim_account, config, logger)
+ self.headers = {'Content-Type': 'application/json'}
+ self.mappings = {m['service_endpoint_id']: m
+ for m in self.service_endpoint_mapping}
+ self.user = wim_account.get("user")
+ self.passwd = wim_account.get("passwordd")
+ if self.user and self.passwd is not None:
+ self.auth = (self.user, self.passwd)
+ else:
+ self.auth = None
+ self.logger.info("IETFL2VPN Connector Initialized.")
+
+ def check_credentials(self):
+ endpoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
+ try:
+ response = requests.get(endpoint, auth=self.auth)
+ http_code = response.status_code
+ except requests.exceptions.RequestException as e:
+ raise SdnConnectorError(e.message, http_code=503)
+
+ if http_code != 200:
+ raise SdnConnectorError("Failed while authenticating", http_code=http_code)
+ self.logger.info("Credentials checked")
+
+ def get_connectivity_service_status(self, service_uuid, conn_info=None):
+ """Monitor the status of the connectivity service stablished
+
+ Arguments:
+ service_uuid: Connectivity service unique identifier
+
+ Returns:
+ Examples::
+ {'sdn_status': 'ACTIVE'}
+ {'sdn_status': 'INACTIVE'}
+ {'sdn_status': 'DOWN'}
+ {'sdn_status': 'ERROR'}
+ """
+ try:
+ self.logger.info("Sending get connectivity service stuatus")
+ servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(
+ self.wim["wim_url"], service_uuid)
+ response = requests.get(servicepoint, auth=self.auth)
+ if response.status_code != requests.codes.ok:
+ raise SdnConnectorError("Unable to obtain connectivity servcice status", http_code=response.status_code)
+ service_status = {'sdn_status': 'ACTIVE'}
+ return service_status
+ except requests.exceptions.ConnectionError:
+ raise SdnConnectorError("Request Timeout", http_code=408)
+
+ def search_mapp(self, connection_point):
+ id = connection_point['service_endpoint_id']
+ if id not in self.mappings:
+ raise SdnConnectorError("Endpoint {} not located".format(str(id)))
+ else:
+ return self.mappings[id]
+
+ def create_connectivity_service(self, service_type, connection_points, **kwargs):
+ """Stablish WAN connectivity between the endpoints
+
+ Arguments:
+ service_type (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2),
+ ``L3``.
+ connection_points (list): each point corresponds to
+ an entry point from the DC to the transport network. One
+ connection point serves to identify the specific access and
+ some other service parameters, such as encapsulation type.
+ Represented by a dict as follows::
+
+ {
+ "service_endpoint_id": ..., (str[uuid])
+ "service_endpoint_encapsulation_type": ...,
+ (enum: none, dot1q, ...)
+ "service_endpoint_encapsulation_info": {
+ ... (dict)
+ "vlan": ..., (int, present if encapsulation is dot1q)
+ "vni": ... (int, present if encapsulation is vxlan),
+ "peers": [(ipv4_1), (ipv4_2)]
+ (present if encapsulation is vxlan)
+ }
+ }
+
+ The service endpoint ID should be previously informed to the WIM
+ engine in the RO when the WIM port mapping is registered.
+
+ Keyword Arguments:
+ bandwidth (int): value in kilobytes
+ latency (int): value in milliseconds
+
+ Other QoS might be passed as keyword arguments.
+
+ Returns:
+ tuple: ``(service_id, conn_info)`` containing:
+ - *service_uuid* (str): UUID of the established connectivity
+ service
+ - *conn_info* (dict or None): Information to be stored at the
+ database (or ``None``). This information will be provided to
+ the :meth:`~.edit_connectivity_service` and :obj:`~.delete`.
+ **MUST** be JSON/YAML-serializable (plain data structures).
+
+ Raises:
+ SdnConnectorException: In case of error.
+ """
+ if service_type == "ELINE":
+ if len(connection_points) > 2:
+ raise SdnConnectorError('Connections between more than 2 endpoints are not supported')
+ if len(connection_points) < 2:
+ raise SdnConnectorError('Connections must be of at least 2 endpoints')
+ """ First step, create the vpn service """
+ uuid_l2vpn = str(uuid.uuid4())
+ vpn_service = {}
+ vpn_service["vpn-id"] = uuid_l2vpn
+ vpn_service["vpn-scv-type"] = "vpws"
+ vpn_service["svc-topo"] = "any-to-any"
+ vpn_service["customer-name"] = "osm"
+ vpn_service_list = []
+ vpn_service_list.append(vpn_service)
+ vpn_service_l = {"ietf-l2vpn-svc:vpn-service": vpn_service_list}
+ response_service_creation = None
+ conn_info = []
+ self.logger.info("Sending vpn-service :{}".format(vpn_service_l))
+ try:
+ endpoint_service_creation = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(
+ self.wim["wim_url"])
+ response_service_creation = requests.post(endpoint_service_creation, headers=self.headers,
+ json=vpn_service_l, auth=self.auth)
+ except requests.exceptions.ConnectionError:
+ raise SdnConnectorError("Request to create service Timeout", http_code=408)
+ if response_service_creation.status_code == 409:
+ raise SdnConnectorError("Service already exists", http_code=response_service_creation.status_code)
+ elif response_service_creation.status_code != requests.codes.created:
+ raise SdnConnectorError("Request to create service not accepted",
+ http_code=response_service_creation.status_code)
+ """ Second step, create the connections and vpn attachments """
+ for connection_point in connection_points:
+ connection_point_wan_info = self.search_mapp(connection_point)
+ site_network_access = {}
+ connection = {}
+ if connection_point["service_endpoint_encapsulation_type"] != "none":
+ if connection_point["service_endpoint_encapsulation_type"] == "dot1q":
+ """ The connection is a VLAN """
+ connection["encapsulation-type"] = "dot1q-vlan-tagged"
+ tagged = {}
+ tagged_interf = {}
+ service_endpoint_encapsulation_info = connection_point["service_endpoint_encapsulation_info"]
+ if service_endpoint_encapsulation_info["vlan"] is None:
+ raise SdnConnectorError("VLAN must be provided")
+ tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info["vlan"]
+ tagged["dot1q-vlan-tagged"] = tagged_interf
+ connection["tagged-interface"] = tagged
+ else:
+ raise NotImplementedError("Encapsulation type not implemented")
+ site_network_access["connection"] = connection
+ self.logger.info("Sending connection:{}".format(connection))
+ vpn_attach = {}
+ vpn_attach["vpn-id"] = uuid_l2vpn
+ vpn_attach["site-role"] = vpn_service["svc-topo"]+"-role"
+ site_network_access["vpn-attachment"] = vpn_attach
+ self.logger.info("Sending vpn-attachement :{}".format(vpn_attach))
+ uuid_sna = str(uuid.uuid4())
+ site_network_access["network-access-id"] = uuid_sna
+ site_network_access["bearer"] = connection_point_wan_info["service_mapping_info"]["bearer"]
+ site_network_accesses = {}
+ site_network_access_list = []
+ site_network_access_list.append(site_network_access)
+ site_network_accesses["ietf-l2vpn-svc:site-network-access"] = site_network_access_list
+ conn_info_d = {}
+ conn_info_d["site"] = connection_point_wan_info["service_mapping_info"]["site-id"]
+ conn_info_d["site-network-access-id"] = site_network_access["network-access-id"]
+ conn_info_d["mapping"] = None
+ conn_info.append(conn_info_d)
+ try:
+ endpoint_site_network_access_creation = \
+ "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/sites/site={}/site-network-accesses/".format(
+ self.wim["wim_url"], connection_point_wan_info["service_mapping_info"]["site-id"])
+ response_endpoint_site_network_access_creation = requests.post(
+ endpoint_site_network_access_creation,
+ headers=self.headers,
+ json=site_network_accesses,
+ auth=self.auth)
+
+ if response_endpoint_site_network_access_creation.status_code == 409:
+ self.delete_connectivity_service(vpn_service["vpn-id"])
+ raise SdnConnectorError("Site_Network_Access with ID '{}' already exists".format(
+ site_network_access["network-access-id"]),
+ http_code=response_endpoint_site_network_access_creation.status_code)
+
+ elif response_endpoint_site_network_access_creation.status_code == 400:
+ self.delete_connectivity_service(vpn_service["vpn-id"])
+ raise SdnConnectorError("Site {} does not exist".format(
+ connection_point_wan_info["service_mapping_info"]["site-id"]),
+ http_code=response_endpoint_site_network_access_creation.status_code)
+
+ elif response_endpoint_site_network_access_creation.status_code != requests.codes.created and \
+ response_endpoint_site_network_access_creation.status_code != requests.codes.no_content:
+ self.delete_connectivity_service(vpn_service["vpn-id"])
+ raise SdnConnectorError("Request no accepted",
+ http_code=response_endpoint_site_network_access_creation.status_code)
+
+ except requests.exceptions.ConnectionError:
+ self.delete_connectivity_service(vpn_service["vpn-id"])
+ raise SdnConnectorError("Request Timeout", http_code=408)
+ return uuid_l2vpn, conn_info
+
+ else:
+ raise NotImplementedError
+
+ def delete_connectivity_service(self, service_uuid, conn_info=None):
+ """Disconnect multi-site endpoints previously connected
+
+ This method should receive as the first argument the UUID generated by
+ the ``create_connectivity_service``
+ """
+ try:
+ self.logger.info("Sending delete")
+ servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(
+ self.wim["wim_url"], service_uuid)
+ response = requests.delete(servicepoint, auth=self.auth)
+ if response.status_code != requests.codes.no_content:
+ raise SdnConnectorError("Error in the request", http_code=response.status_code)
+ except requests.exceptions.ConnectionError:
+ raise SdnConnectorError("Request Timeout", http_code=408)
+
+ def edit_connectivity_service(self, service_uuid, conn_info=None,
+ connection_points=None, **kwargs):
+ """Change an existing connectivity service, see
+ ``create_connectivity_service``"""
+
+ # sites = {"sites": {}}
+ # site_list = []
+ vpn_service = {}
+ vpn_service["svc-topo"] = "any-to-any"
+ counter = 0
+ for connection_point in connection_points:
+ site_network_access = {}
+ connection_point_wan_info = self.search_mapp(connection_point)
+ params_site = {}
+ params_site["site-id"] = connection_point_wan_info["service_mapping_info"]["site-id"]
+ params_site["site-vpn-flavor"] = "site-vpn-flavor-single"
+ device_site = {}
+ device_site["device-id"] = connection_point_wan_info["device-id"]
+ params_site["devices"] = device_site
+ # network_access = {}
+ connection = {}
+ if connection_point["service_endpoint_encapsulation_type"] != "none":
+ if connection_point["service_endpoint_encapsulation_type"] == "dot1q":
+ """ The connection is a VLAN """
+ connection["encapsulation-type"] = "dot1q-vlan-tagged"
+ tagged = {}
+ tagged_interf = {}
+ service_endpoint_encapsulation_info = connection_point["service_endpoint_encapsulation_info"]
+ if service_endpoint_encapsulation_info["vlan"] is None:
+ raise SdnConnectorError("VLAN must be provided")
+ tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info["vlan"]
+ tagged["dot1q-vlan-tagged"] = tagged_interf
+ connection["tagged-interface"] = tagged
+ else:
+ raise NotImplementedError("Encapsulation type not implemented")
+ site_network_access["connection"] = connection
+ vpn_attach = {}
+ vpn_attach["vpn-id"] = service_uuid
+ vpn_attach["site-role"] = vpn_service["svc-topo"]+"-role"
+ site_network_access["vpn-attachment"] = vpn_attach
+ uuid_sna = conn_info[counter]["site-network-access-id"]
+ site_network_access["network-access-id"] = uuid_sna
+ site_network_access["bearer"] = connection_point_wan_info["service_mapping_info"]["bearer"]
+ site_network_accesses = {}
+ site_network_access_list = []
+ site_network_access_list.append(site_network_access)
+ site_network_accesses["ietf-l2vpn-svc:site-network-access"] = site_network_access_list
+ try:
+ endpoint_site_network_access_edit = \
+ "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/sites/site={}/site-network-accesses/".format(
+ self.wim["wim_url"], connection_point_wan_info["service_mapping_info"]["site-id"])
+ response_endpoint_site_network_access_creation = requests.put(endpoint_site_network_access_edit,
+ headers=self.headers,
+ json=site_network_accesses,
+ auth=self.auth)
+ if response_endpoint_site_network_access_creation.status_code == 400:
+ raise SdnConnectorError("Service does not exist",
+ http_code=response_endpoint_site_network_access_creation.status_code)
+ elif response_endpoint_site_network_access_creation.status_code != 201 and \
+ response_endpoint_site_network_access_creation.status_code != 204:
+ raise SdnConnectorError("Request no accepted",
+ http_code=response_endpoint_site_network_access_creation.status_code)
+ except requests.exceptions.ConnectionError:
+ raise SdnConnectorError("Request Timeout", http_code=408)
+ counter += 1
+ return None
+
+ def clear_all_connectivity_services(self):
+ """Delete all WAN Links corresponding to a WIM"""
+ try:
+ self.logger.info("Sending clear all connectivity services")
+ servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
+ response = requests.delete(servicepoint, auth=self.auth)
+ if response.status_code != requests.codes.no_content:
+ raise SdnConnectorError("Unable to clear all connectivity services", http_code=response.status_code)
+ except requests.exceptions.ConnectionError:
+ raise SdnConnectorError("Request Timeout", http_code=408)
+
+ def get_all_active_connectivity_services(self):
+ """Provide information about all active connections provisioned by a
+ WIM
+ """
+ try:
+ self.logger.info("Sending get all connectivity services")
+ servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
+ response = requests.get(servicepoint, auth=self.auth)
+ if response.status_code != requests.codes.ok:
+ raise SdnConnectorError("Unable to get all connectivity services", http_code=response.status_code)
+ return response
+ except requests.exceptions.ConnectionError:
+ raise SdnConnectorError("Request Timeout", http_code=408)
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+requests
+git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro
+
--- /dev/null
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from setuptools import setup
+
+_name = "osm_rosdn_tapi"
+
+README = """
+===========
+osm-rosdn_tapi
+===========
+
+osm-ro pluging for tapi (ietfl2vpn) SDN
+"""
+
+setup(
+ name=_name,
+ description='OSM ro sdn plugin for tapi (ietfl2vpn)',
+ long_description=README,
+ version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ # version=VERSION,
+ # python_requires='>3.5.0',
+ author='ETSI OSM',
+ # TODO py3 author_email='',
+ maintainer='OSM_TECH@LIST.ETSI.ORG', # TODO py3
+ # TODO py3 maintainer_email='',
+ url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
+ license='Apache 2.0',
+
+ packages=[_name],
+ include_package_data=True,
+ dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
+ install_requires=["requests", "osm-ro"],
+ setup_requires=['setuptools-version-command'],
+ entry_points={
+ 'osm_rosdn.plugins': ['rosdn_tapi = osm_rosdn_tapi.wimconn_ietfl2vpn:WimconnectorIETFL2VPN'],
+ },
+)
--- /dev/null
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+[DEFAULT]
+X-Python3-Version : >= 3.5
+Depends3: python3-requests, python3-osm-ro
+
--- /dev/null
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+[tox]
+envlist = py3
+toxworkdir={homedir}/.tox
+
+[testenv]
+basepython = python3
+install_command = python3 -m pip install -r requirements.txt -U {opts} {packages}
+# deps = -r{toxinidir}/test-requirements.txt
+commands=python3 -m unittest discover -v
+
+[testenv:flake8]
+basepython = python3
+deps = flake8
+commands = flake8 osm_rosdn_tapi --max-line-length 120 \
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+
+[testenv:unittest]
+basepython = python3
+commands = python3 -m unittest osm_rosdn_tapi.tests
+
+[testenv:build]
+basepython = python3
+deps = stdeb
+ setuptools-version-command
+commands = python3 setup.py --command-packages=stdeb.command bdist_deb
+
-===========
+ Copyright 2018 Telefonica S.A.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+============
osm-roclient
-===========
+============
osm-roclient is a client for interact with osm-ro server
+ Copyright 2018 Telefonica S.A.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
===========
osm-ro
===========
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
#!/usr/bin/env bash
+##
+# Copyright Telefonica Investigacion y Desarrollo, S.A.U.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
DB_NAME='mano_db'
DB_ADMIN_USER="root"
DB_USER="mano"
BACKUP_DIR=""
BACKUP_FILE=""
#TODO update it with the last database version
-LAST_DB_VERSION=39
+LAST_DB_VERSION=40
# Detect paths
MYSQL=$(which mysql)
#[ $OPENMANO_VER_NUM -ge 6009 ] && DB_VERSION=37 #0.6.09 => 37
#[ $OPENMANO_VER_NUM -ge 6011 ] && DB_VERSION=38 #0.6.11 => 38
#[ $OPENMANO_VER_NUM -ge 6020 ] && DB_VERSION=39 #0.6.20 => 39
+#[ $OPENMANO_VER_NUM -ge 6000004 ] && DB_VERSION=40 #6.0.4 => 40
#TODO ... put next versions here
function upgrade_to_1(){
sql "DELETE FROM schema_version WHERE version_int='39';"
}
+function upgrade_to_40(){
+ echo " Adding instance_wim_net_id, created_at, modified_at at 'instance_interfaces'"
+ sql "ALTER TABLE instance_interfaces ADD COLUMN instance_wim_net_id VARCHAR(36) NULL AFTER instance_net_id, "\
+ "ADD COLUMN model VARCHAR(12) NULL DEFAULT NULL AFTER type, "\"
+ "ADD COLUMN created_at DOUBLE NULL DEFAULT NULL AFTER vlan, " \
+ "ADD COLUMN modified_at DOUBLE NULL DEFAULT NULL AFTER created_at;"
+ echo " Adding sdn to 'instance_wim_nets'"
+ sql "ALTER TABLE instance_wim_nets ADD COLUMN sdn ENUM('true','false') NOT NULL DEFAULT 'false' AFTER created;"
+ echo " Change from created to sdn at 'wim_accounts'"
+ sql "ALTER TABLE wim_accounts CHANGE COLUMN created sdn ENUM('true','false') NOT NULL DEFAULT 'false' AFTER wim_id;"
+ echo " Remove unique_datacenter_port_mapping at 'wim_port_mappings'"
+ sql "ALTER TABLE wim_port_mappings DROP INDEX unique_datacenter_port_mapping;"
+ echo " change 'wim_port_mappings' pop_x to device_x, adding switch_dpid, switch_port"
+ sql "ALTER TABLE wim_port_mappings ALTER pop_switch_dpid DROP DEFAULT, ALTER pop_switch_port DROP DEFAULT;"
+ sql "ALTER TABLE wim_port_mappings CHANGE COLUMN pop_switch_dpid device_id VARCHAR(64) NULL AFTER datacenter_id," \
+ " CHANGE COLUMN pop_switch_port device_interface_id VARCHAR(64) NULL AFTER device_id, " \
+ " CHANGE COLUMN wan_service_endpoint_id service_endpoint_id VARCHAR(256) NOT NULL AFTER device_interface_id, " \
+ " CHANGE COLUMN wan_service_mapping_info service_mapping_info TEXT NULL AFTER service_endpoint_id, " \
+ " ADD COLUMN switch_dpid VARCHAR(64) NULL AFTER wan_service_endpoint_id," \
+ " ADD COLUMN switch_port VARCHAR(64) NULL AFTER switch_dpid;"
+ echo " remove unique name to 'datacenters'"
+ sql "ALTER TABLE datacenters DROP INDEX name;"
+
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
+ "VALUES (40, '0.40', '6.0.4', 'Chagnes to SDN ', '2019-10-23');"
+}
+function downgrade_from_40(){
+ echo " Removing instance_wim_net_id, created_at, modified_at from 'instance_interfaces'"
+ sql "ALTER TABLE instance_interfaces DROP COLUMN instance_wim_net_id, DROP COLUMN created_at, " \
+ "DROP COLUMN modified_at, DROP COLUMN model;"
+ echo " Removing sdn from 'instance_wim_nets'"
+ sql "ALTER TABLE instance_wim_nets DROP COLUMN sdn;"
+ echo " Change back from sdn to created at 'wim_accounts'"
+ sql "ALTER TABLE wim_accounts CHANGE COLUMN sdn created ENUM('true','false') NOT NULL DEFAULT 'false' AFTER wim_id;"
+ echo " Restore back unique_datacenter_port_mapping at 'wim_port_mappings'"
+ echo " change 'wim_port_mappings' device_x to pop_x, remove switch_dpid, switch_port"
+ sql "ALTER TABLE wim_port_mappings ALTER device_id DROP DEFAULT, ALTER device_interface_id DROP DEFAULT;"
+ sql "ALTER TABLE wim_port_mappings CHANGE COLUMN device_id pop_switch_dpid VARCHAR(64) NOT NULL AFTER " \
+ "datacenter_id, CHANGE COLUMN device_interface_id pop_switch_port VARCHAR(64) NOT NULL AFTER pop_switch_dpid," \
+ " CHANGE COLUMN service_endpoint_id wan_service_endpoint_id VARCHAR(256) NOT NULL AFTER pop_switch_port, " \
+ " CHANGE COLUMN service_mapping_info wan_service_mapping_info TEXT NULL AFTER wan_service_endpoint_id, " \
+ " DROP COLUMN switch_dpid, DROP COLUMN switch_port;"
+ sql "ALTER TABLE wim_port_mappings ADD UNIQUE INDEX unique_datacenter_port_mapping(datacenter_id, pop_switch_dpid,
+ pop_switch_port);"
+ echo " add unique name to 'datacenters'"
+ sql "ALTER TABLE datacenters ADD UNIQUE INDEX name (name);"
+ sql "DELETE FROM schema_version WHERE version_int='40';"
+}
+
+
#TODO ... put functions here
+/**
+* Licensed under the Apache License, Version 2.0 (the "License"); you may
+* not use this file except in compliance with the License. You may obtain
+* a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+* License for the specific language governing permissions and limitations
+* under the License.
+**/
--
-- Tear down database structure required for integrating OSM with
-- Wide Are Network Infrastructure Managers
+/**
+* Licensed under the Apache License, Version 2.0 (the "License"); you may
+* not use this file except in compliance with the License. You may obtain
+* a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+* License for the specific language governing permissions and limitations
+* under the License.
+**/
--
-- Removing ingress and egress ports for SFC purposes.
-- Inserting only one port for ingress and egress.
+/**
+* Licensed under the Apache License, Version 2.0 (the "License"); you may
+* not use this file except in compliance with the License. You may obtain
+* a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+* License for the specific language governing permissions and limitations
+* under the License.
+**/
--
-- Setup database structure required for integrating OSM with
-- Wide Are Network Infrastructure Managers
+/**
+* Licensed under the Apache License, Version 2.0 (the "License"); you may
+* not use this file except in compliance with the License. You may obtain
+* a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+* License for the specific language governing permissions and limitations
+* under the License.
+**/
--
-- Adding different ingress and egress ports for SFC.
--
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
# -*- coding: utf-8 -*-
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
import logging
from functools import wraps
# -*- coding: utf-8 -*-
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
#
# Util functions previously in `httpserver`
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
# -*- coding: utf-8 -*-
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
import unittest
import bottle
# -*- coding: utf-8 -*-
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
import unittest
from mock import MagicMock, patch
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
# This tox file allows the devs to run unit tests only for this subpackage.
# In order to do so, cd into the directory and run `tox`
from threading import Lock
import time as t
# TODO py3 BEGIN
-# from lib_osm_openvim import ovim as ovim_module
+from osm_ro.sdn import Sdn, SdnException as ovimException
# from lib_osm_openvim.ovim import ovimException
-from unittest.mock import MagicMock
-ovim_module = MagicMock()
-class ovimException(Exception):
- pass
-ovim_module.ovimException = ovimException
+# from unittest.mock import MagicMock
+# class ovimException(Exception):
+# pass
# TODO py3 END
from Crypto.PublicKey import RSA
# WIM
-import osm_ro.wim.wimconn as wimconn
-import osm_ro.wim.wim_thread as wim_thread
-from osm_ro.http_tools import errors as httperrors
-from osm_ro.wim.engine import WimEngine
-from osm_ro.wim.persistence import WimPersistence
+from .wim import sdnconn
+from .wim.wimconn_fake import FakeConnector
+from .wim.failing_connector import FailingConnector
+from .http_tools import errors as httperrors
+from .wim.engine import WimEngine
+from .wim.persistence import WimPersistence
from copy import deepcopy
from pprint import pformat
#
# WIM
global wim_engine
wim_engine = None
-global wimconn_imported
+global sdnconn_imported
#
global logger
global default_volume_size
vim_threads = {"running":{}, "deleting": {}, "names": []} # threads running for attached-VIMs
vim_persistent_info = {}
# WIM
-wimconn_imported = {} # dictionary with WIM type as key, loaded module as value
+sdnconn_imported = {} # dictionary with WIM type as key, loaded module as value
wim_threads = {"running":{}, "deleting": {}, "names": []} # threads running for attached-WIMs
wim_persistent_info = {}
#
class NfvoException(httperrors.HttpMappedError):
"""Common Class for NFVO errors"""
-def _load_vim_plugin(name):
+def _load_plugin(name, type="vim"):
+ # type can be vim or sdn
global plugins
- for v in iter_entry_points('osm_rovim.plugins', name):
- plugins[name] = v.load()
+ try:
+ for v in iter_entry_points('osm_ro{}.plugins'.format(type), name):
+ plugins[name] = v.load()
+ except Exception as e:
+ logger.critical("Cannot load osm_{}: {}".format(name, e))
+ if name:
+ plugins[name] = FailingConnector("Cannot load osm_{}: {}".format(name, e))
if name and name not in plugins:
- raise NfvoException("Unknown vim type '{}'. This plugin has not been registered".format(name),
- httperrors.Bad_Request)
+ error_text = "Cannot load a module for {t} type '{n}'. The plugin 'osm_{n}' has not been" \
+ " registered".format(t=type, n=name)
+ logger.critical(error_text)
+ plugins[name] = FailingConnector(error_text)
+ # raise NfvoException("Cannot load a module for {t} type '{n}'. The plugin 'osm_{n}' has not been registered".
+ # format(t=type, n=name), httperrors.Bad_Request)
def get_task_id():
global last_task_id
def start_service(mydb, persistence=None, wim=None):
- global db, global_config, plugins
+ global db, global_config, plugins, ovim
db = nfvo_db.nfvo_db(lock=db_lock)
mydb.lock = db_lock
db.connect(global_config['db_host'], global_config['db_user'], global_config['db_passwd'], global_config['db_name'])
- global ovim
persistence = persistence or WimPersistence(db)
- # Initialize openvim for SDN control
- # TODO: Avoid static configuration by adding new parameters to openmanod.cfg
- # TODO: review ovim.py to delete not needed configuration
- ovim_configuration = {
- 'logger_name': 'openmano.ovim',
- 'network_vlan_range_start': 1000,
- 'network_vlan_range_end': 4096,
- 'db_name': global_config["db_ovim_name"],
- 'db_host': global_config["db_ovim_host"],
- 'db_user': global_config["db_ovim_user"],
- 'db_passwd': global_config["db_ovim_passwd"],
- 'bridge_ifaces': {},
- 'mode': 'normal',
- 'network_type': 'bridge',
- #TODO: log_level_of should not be needed. To be modified in ovim
- 'log_level_of': 'DEBUG'
- }
try:
+ if "rosdn_fake" not in plugins:
+ plugins["rosdn_fake"] = FakeConnector
# starts ovim library
- ovim = ovim_module.ovim(ovim_configuration)
+ ovim = Sdn(db, plugins)
global wim_engine
- wim_engine = wim or WimEngine(persistence)
+ wim_engine = wim or WimEngine(persistence, plugins)
wim_engine.ovim = ovim
ovim.start_service()
extra.update(yaml.load(vim["dt_config"], Loader=yaml.Loader))
plugin_name = "rovim_" + vim["type"]
if plugin_name not in plugins:
- _load_vim_plugin(plugin_name)
+ _load_plugin(plugin_name, type="vim")
thread_id = vim['datacenter_tenant_id']
vim_persistent_info[thread_id] = {}
logger.error("Cannot launch thread for VIM {} '{}': {}".format(vim['datacenter_name'],
vim['datacenter_id'], e))
except Exception as e:
- raise NfvoException("Error at VIM {}; {}: {}".format(vim["type"], type(e).__name__, e),
- httperrors.Internal_Server_Error)
+ logger.critical("Cannot launch thread for VIM {} '{}': {}".format(vim['datacenter_name'],
+ vim['datacenter_id'], e))
+ # raise NfvoException("Error at VIM {}; {}: {}".format(vim["type"], type(e).__name__, e),
+ # httperrors.Internal_Server_Error)
thread_name = get_non_used_vim_name(vim['datacenter_name'], vim['datacenter_id'], vim['vim_tenant_name'],
vim['vim_tenant_id'])
- new_thread = vim_thread(task_lock, plugins, thread_name, vim['datacenter_name'],
- vim['datacenter_tenant_id'], db=db, db_lock=db_lock, ovim=ovim)
+ new_thread = vim_thread(task_lock, plugins, thread_name, None,
+ vim['datacenter_tenant_id'], db=db)
new_thread.start()
vim_threads["running"][thread_id] = new_thread
+ wims = mydb.get_rows(FROM="wim_accounts join wims on wim_accounts.wim_id=wims.uuid",
+ WHERE={"sdn": "true"},
+ SELECT=("wim_accounts.uuid as uuid", "type", "wim_accounts.name as name"))
+ for wim in wims:
+ plugin_name = "rosdn_" + wim["type"]
+ if plugin_name not in plugins:
+ _load_plugin(plugin_name, type="sdn")
+ thread_id = wim['uuid']
+ thread_name = get_non_used_vim_name(wim['name'], wim['uuid'], wim['uuid'], None)
+ new_thread = vim_thread(task_lock, plugins, thread_name, wim['uuid'], None, db=db)
+ new_thread.start()
+ vim_threads["running"][thread_id] = new_thread
wim_engine.start_threads()
except db_base_Exception as e:
raise NfvoException(str(e) + " at nfvo.get_vim", e.http_code)
- except ovim_module.ovimException as e:
+ except ovimException as e:
message = str(e)
if message[:22] == "DATABASE wrong version":
message = "DATABASE wrong version of lib_osm_openvim {msg} -d{dbname} -u{dbuser} -p{dbpass} {ver}' "\
plugin_name = "rovim_" + vim["type"]
if plugin_name not in plugins:
try:
- _load_vim_plugin(plugin_name)
+ _load_plugin(plugin_name, type="vim")
except NfvoException as e:
if ignore_errors:
logger.error("{}".format(e))
return d
+def _get_wim(db, wim_account_id):
+ # get wim from wim_account
+ wim_accounts = db.get_rows(FROM='wim_accounts', WHERE={"uuid": wim_account_id})
+ if not wim_accounts:
+ raise NfvoException("Not found sdn id={}".format(wim_account_id), http_code=httperrors.Not_Found)
+ return wim_accounts[0]["wim_id"]
+
+
def create_instance(mydb, tenant_id, instance_dict):
# print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
# logger.debug("Creating instance...")
}
# Auxiliary dictionaries from x to y
+ sce_net2wim_instance = {}
sce_net2instance = {}
net2task_id = {'scenario': {}}
# Mapping between local networks and WIMs
# 1. Creating new nets (sce_nets) in the VIM"
number_mgmt_networks = 0
db_instance_nets = []
+ db_instance_wim_nets = []
for sce_net in scenarioDict['nets']:
sce_net_uuid = sce_net.get('uuid', sce_net["name"])
# get involved datacenters where this network need to be created
if site.get("datacenter") and site["datacenter"] not in involved_datacenters:
involved_datacenters.append(site["datacenter"])
sce_net2instance[sce_net_uuid] = {}
+ sce_net2wim_instance[sce_net_uuid] = {}
net2task_id['scenario'][sce_net_uuid] = {}
use_network = None
sce_net2instance[sce_net_uuid][datacenter_id] = net_uuid
if not related_network: # all db_instance_nets will have same related
related_network = use_network or net_uuid
+ sdn_net_id = None
+ sdn_controller = vim.config.get('sdn-controller')
+ sce_net2wim_instance[sce_net_uuid][datacenter_id] = None
+ if sdn_controller and net_type in ("data", "ptp"):
+ wim_id = _get_wim(mydb, sdn_controller)
+ sdn_net_id = str(uuid4())
+ sce_net2wim_instance[sce_net_uuid][datacenter_id] = sdn_net_id
+ task_extra["sdn_net_id"] = sdn_net_id
+ db_instance_wim_nets.append({
+ "uuid": sdn_net_id,
+ "instance_scenario_id": instance_uuid,
+ "sce_net_id": sce_net.get("uuid"),
+ "wim_id": wim_id,
+ "wim_account_id": sdn_controller,
+ 'status': 'BUILD', # if create_network else "ACTIVE"
+ "related": related_network,
+ 'multipoint': True if net_type=="data" else False,
+ "created": create_network, # TODO py3
+ "sdn": True,
+ })
+ task_wim_extra = {"params": [net_type, wim_account_name]}
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "status": "SCHEDULED",
+ "task_index": task_index,
+ # "datacenter_vim_id": myvim_thread_id,
+ "wim_account_id": sdn_controller,
+ "action": task_action,
+ "item": "instance_wim_nets",
+ "item_id": sdn_net_id,
+ "related": related_network,
+ "extra": yaml.safe_dump(task_wim_extra, default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
db_net = {
"uuid": net_uuid,
"osm_id": sce_net.get("osm_id") or sce_net["name"],
"created": create_network,
'datacenter_id': datacenter_id,
'datacenter_tenant_id': myvim_thread_id,
- 'status': 'BUILD' # if create_network else "ACTIVE"
+ 'status': 'BUILD', # if create_network else "ACTIVE"
+ 'sdn_net_id': sdn_net_id,
}
db_instance_nets.append(db_net)
db_vim_action = {
"task_index": task_index,
"uuid_list": uuid_list,
"db_instance_nets": db_instance_nets,
+ "db_instance_wim_nets": db_instance_wim_nets,
"db_vim_actions": db_vim_actions,
"db_ip_profiles": db_ip_profiles,
"db_instance_vnfs": db_instance_vnfs,
"db_instance_interfaces": db_instance_interfaces,
"net2task_id": net2task_id,
"sce_net2instance": sce_net2instance,
+ "sce_net2wim_instance": sce_net2wim_instance,
}
# sce_vnf_list = sorted(scenarioDict['vnfs'], key=lambda k: k['name'])
for sce_vnf in scenarioDict.get('vnfs', ()): # sce_vnf_list:
{"instance_sfs": db_instance_sfs},
{"instance_classifications": db_instance_classifications},
{"instance_sfps": db_instance_sfps},
- {"instance_wim_nets": wan_links},
+ {"instance_wim_nets": db_instance_wim_nets + wan_links},
{"vim_wim_actions": db_vim_actions + wim_actions}
]
returned_instance = mydb.get_instance_scenario(instance_uuid)
returned_instance["action_id"] = instance_action_id
return returned_instance
- except (NfvoException, vimconn.vimconnException, wimconn.WimConnectorError, db_base_Exception) as e:
+ except (NfvoException, vimconn.vimconnException, sdnconn.SdnConnectorError, db_base_Exception) as e:
message = rollback(mydb, myvims, rollbackList)
if isinstance(e, db_base_Exception):
error_text = "database Exception"
elif isinstance(e, vimconn.vimconnException):
error_text = "VIM Exception"
- elif isinstance(e, wimconn.WimConnectorError):
+ elif isinstance(e, sdnconn.SdnConnectorError):
error_text = "WIM Exception"
else:
error_text = "Exception"
task_index = params_out["task_index"]
uuid_list = params_out["uuid_list"]
db_instance_nets = params_out["db_instance_nets"]
+ db_instance_wim_nets = params_out["db_instance_wim_nets"]
db_vim_actions = params_out["db_vim_actions"]
db_ip_profiles = params_out["db_ip_profiles"]
db_instance_vnfs = params_out["db_instance_vnfs"]
db_instance_interfaces = params_out["db_instance_interfaces"]
net2task_id = params_out["net2task_id"]
sce_net2instance = params_out["sce_net2instance"]
+ sce_net2wim_instance = params_out["sce_net2wim_instance"]
vnf_net2instance = {}
# 2. Creating new nets (vnf internal nets) in the VIM"
# For each vnf net, we create it and we add it to instanceNetlist.
if sce_vnf.get("datacenter"):
+ vim = myvims[sce_vnf["datacenter"]]
datacenter_id = sce_vnf["datacenter"]
myvim_thread_id = myvim_threads_id[sce_vnf["datacenter"]]
else:
+ vim = myvims[default_datacenter_id]
datacenter_id = default_datacenter_id
myvim_thread_id = myvim_threads_id[default_datacenter_id]
for net in sce_vnf['nets']:
vnf_net2instance[sce_vnf['uuid']] = {}
if sce_vnf['uuid'] not in net2task_id:
net2task_id[sce_vnf['uuid']] = {}
- net2task_id[sce_vnf['uuid']][net['uuid']] = task_index
# fill database content
net_uuid = str(uuid4())
uuid_list.append(net_uuid)
vnf_net2instance[sce_vnf['uuid']][net['uuid']] = net_uuid
+
+ sdn_controller = vim.config.get('sdn-controller')
+ sdn_net_id = None
+ if sdn_controller and net_type in ("data", "ptp"):
+ wim_id = _get_wim(mydb, sdn_controller)
+ sdn_net_id = str(uuid4())
+ db_instance_wim_nets.append({
+ "uuid": sdn_net_id,
+ "instance_scenario_id": instance_uuid,
+ "wim_id": wim_id,
+ "wim_account_id": sdn_controller,
+ 'status': 'BUILD', # if create_network else "ACTIVE"
+ "related": net_uuid,
+ 'multipoint': True if net_type == "data" else False,
+ "created": True, # TODO py3
+ "sdn": True,
+ })
+
db_net = {
"uuid": net_uuid,
"related": net_uuid,
"created": True,
'datacenter_id': datacenter_id,
'datacenter_tenant_id': myvim_thread_id,
+ 'sdn_net_id': sdn_net_id,
}
db_instance_nets.append(db_net)
else:
task_action = "CREATE"
task_extra = {"params": (net_name, net_type, net.get('ip_profile', None))}
+ if sdn_net_id:
+ task_extra["sdn_net_id"] = sdn_net_id
+ if sdn_net_id:
+ task_wim_extra = {"params": [net_type, None]}
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "status": "SCHEDULED",
+ "task_index": task_index,
+ # "datacenter_vim_id": myvim_thread_id,
+ "wim_account_id": sdn_controller,
+ "action": task_action,
+ "item": "instance_wim_nets",
+ "item_id": sdn_net_id,
+ "related": net_uuid,
+ "extra": yaml.safe_dump(task_wim_extra, default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
db_vim_action = {
"instance_action_id": instance_action_id,
"task_index": task_index,
"related": net_uuid,
"extra": yaml.safe_dump(task_extra, default_flow_style=True, width=256)
}
+ net2task_id[sce_vnf['uuid']][net['uuid']] = task_index
task_index += 1
db_vim_actions.append(db_vim_action)
netDict['net_id'] = "TASK-{}".format(
net2task_id['scenario'][vnf_iface['sce_net_id']][datacenter_id])
instance_net_id = sce_net2instance[vnf_iface['sce_net_id']][datacenter_id]
+ instance_wim_net_id = sce_net2wim_instance[vnf_iface['sce_net_id']][datacenter_id]
task_depends_on.append(net2task_id['scenario'][vnf_iface['sce_net_id']][datacenter_id])
break
else:
# "uuid"
# 'instance_vm_id': instance_vm_uuid,
"instance_net_id": instance_net_id,
+ "instance_wim_net_id": instance_wim_net_id,
'interface_id': iface['uuid'],
# 'vim_interface_id': ,
'type': 'external' if iface['external_name'] is not None else 'internal',
+ 'model': iface['model'],
'ip_address': iface.get('ip_address'),
'mac_address': iface.get('mac'),
'floating_ip': int(iface.get('floating-ip', False)),
}
task_index += 1
db_vim_actions.append(db_vim_action)
+ for sdn_net in instanceDict['sdn_nets']:
+ if not sdn_net["sdn"]:
+ continue
+ extra = {}
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "wim_account_id": sdn_net["wim_account_id"],
+ "action": "DELETE",
+ "status": "SCHEDULED",
+ "item": "instance_wim_nets",
+ "item_id": sdn_net["uuid"],
+ "related": sdn_net["related"],
+ "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
db_instance_action["number_tasks"] = task_index
#obtain data
instance_dict = mydb.get_instance_scenario(instance_id, tenant_id, verbose=True)
- for net in instance_dict["nets"]:
- if net.get("sdn_net_id"):
- net_sdn = ovim.show_network(net["sdn_net_id"])
- net["sdn_info"] = {
- "admin_state_up": net_sdn.get("admin_state_up"),
- "flows": net_sdn.get("flows"),
- "last_error": net_sdn.get("last_error"),
- "ports": net_sdn.get("ports"),
- "type": net_sdn.get("type"),
- "status": net_sdn.get("status"),
- "vlan": net_sdn.get("vlan"),
- }
+ # TODO py3
+ # for net in instance_dict["nets"]:
+ # if net.get("sdn_net_id"):
+ # net_sdn = ovim.show_network(net["sdn_net_id"])
+ # net["sdn_info"] = {
+ # "admin_state_up": net_sdn.get("admin_state_up"),
+ # "flows": net_sdn.get("flows"),
+ # "last_error": net_sdn.get("last_error"),
+ # "ports": net_sdn.get("ports"),
+ # "type": net_sdn.get("type"),
+ # "status": net_sdn.get("status"),
+ # "vlan": net_sdn.get("vlan"),
+ # }
return instance_dict
@deprecated("Instance is automatically refreshed by vim_threads")
# load plugin
plugin_name = "rovim_" + datacenter_type
if plugin_name not in plugins:
- _load_vim_plugin(plugin_name)
+ _load_plugin(plugin_name, type="vim")
datacenter_id = mydb.new_row("datacenters", datacenter_descriptor, add_uuid=True, confidential_data=True)
if sdn_port_mapping:
# create thread
thread_name = get_non_used_vim_name(datacenter_name, datacenter_id, tenant_dict['name'], tenant_dict['uuid'])
- new_thread = vim_thread(task_lock, plugins, thread_name, datacenter_name, datacenter_tenant_id,
- db=db, db_lock=db_lock, ovim=ovim)
+ new_thread = vim_thread(task_lock, plugins, thread_name, None, datacenter_tenant_id, db=db)
new_thread.start()
thread_id = datacenter_tenants_dict["uuid"]
vim_threads["running"][thread_id] = new_thread
return vim_action_get(mydb, tenant_id, datacenter, item, content)
def sdn_controller_create(mydb, tenant_id, sdn_controller):
- data = ovim.new_of_controller(sdn_controller)
- logger.debug('New SDN controller created with uuid {}'.format(data))
- return data
+ wim_id = ovim.new_of_controller(sdn_controller)
+
+ thread_name = get_non_used_vim_name(sdn_controller['name'], wim_id, wim_id, None)
+ new_thread = vim_thread(task_lock, plugins, thread_name, wim_id, None, db=db)
+ new_thread.start()
+ thread_id = wim_id
+ vim_threads["running"][thread_id] = new_thread
+ logger.debug('New SDN controller created with uuid {}'.format(wim_id))
+ return wim_id
def sdn_controller_update(mydb, tenant_id, controller_id, sdn_controller):
data = ovim.edit_of_controller(controller_id, sdn_controller)
msg = 'SDN controller {} updated'.format(data)
+ vim_threads["running"][controller_id].insert_task("reload")
logger.debug(msg)
return msg
#element = {"ofc_id": sdn_controller_id, "region": datacenter_id, "switch_dpid": switch_dpid}
element = dict()
element["compute_node"] = compute_node["compute_node"]
- for port in compute_node["ports"]:
- pci = port.get("pci")
- element["switch_port"] = port.get("switch_port")
- element["switch_mac"] = port.get("switch_mac")
- if not element["switch_port"] and not element["switch_mac"]:
- raise NfvoException ("The mapping must contain 'switch_port' or 'switch_mac'", httperrors.Bad_Request)
- for pci_expanded in utils.expand_brackets(pci):
- element["pci"] = pci_expanded
- maps.append(dict(element))
-
- return ovim.set_of_port_mapping(maps, ofc_id=sdn_controller_id, switch_dpid=switch_dpid, region=datacenter_id)
+ if compute_node["ports"]:
+ for port in compute_node["ports"]:
+ pci = port.get("pci")
+ element["switch_port"] = port.get("switch_port")
+ element["switch_mac"] = port.get("switch_mac")
+ if not element["switch_port"] and not element["switch_mac"]:
+ raise NfvoException ("The mapping must contain 'switch_port' or 'switch_mac'", httperrors.Bad_Request)
+ for pci_expanded in utils.expand_brackets(pci):
+ element["pci"] = pci_expanded
+ maps.append(dict(element))
+
+ out = ovim.set_of_port_mapping(maps, sdn_id=sdn_controller_id, switch_dpid=switch_dpid, vim_id=datacenter_id)
+ vim_threads["running"][sdn_controller_id].insert_task("reload")
+ return out
def datacenter_sdn_port_mapping_list(mydb, tenant_id, datacenter_id):
- maps = ovim.get_of_port_mappings(db_filter={"region": datacenter_id})
+ maps = ovim.get_of_port_mappings(db_filter={"datacenter_id": datacenter_id})
result = {
"sdn-controller": None,
ports_correspondence_dict = dict()
for link in maps:
- if result["sdn-controller"] != link["ofc_id"]:
+ if result["sdn-controller"] != link["wim_id"]:
raise NfvoException("The sdn-controller specified for different port mappings differ", httperrors.Internal_Server_Error)
if result["dpid"] != link["switch_dpid"]:
raise NfvoException("The dpid specified for different port mappings differ", httperrors.Internal_Server_Error)
+ link_config = link["service_mapping_info"]
element = dict()
- element["pci"] = link["pci"]
+ element["pci"] = link.get("device_interface_id")
if link["switch_port"]:
element["switch_port"] = link["switch_port"]
- if link["switch_mac"]:
- element["switch_mac"] = link["switch_mac"]
+ if link_config["switch_mac"]:
+ element["switch_mac"] = link_config.get("switch_mac")
- if not link["compute_node"] in ports_correspondence_dict:
+ if not link.get("interface_id") in ports_correspondence_dict:
content = dict()
- content["compute_node"] = link["compute_node"]
+ content["compute_node"] = link.get("interface_id")
content["ports"] = list()
- ports_correspondence_dict[link["compute_node"]] = content
+ ports_correspondence_dict[link.get("interface_id")] = content
- ports_correspondence_dict[link["compute_node"]]["ports"].append(element)
+ ports_correspondence_dict[link["interface_id"]]["ports"].append(element)
for key in sorted(ports_correspondence_dict):
result["ports_mapping"].append(ports_correspondence_dict[key])
return result
def datacenter_sdn_port_mapping_delete(mydb, tenant_id, datacenter_id):
- return ovim.clear_of_port_mapping(db_filter={"region":datacenter_id})
+ return ovim.clear_of_port_mapping(db_filter={"datacenter_id":datacenter_id})
def create_RO_keypair(tenant_id):
"""
"instance_actions", "sce_vnffgs", "sce_rsps", "sce_rsp_hops",
"sce_classifiers", "sce_classifier_matches", "instance_sfis", "instance_sfs",
"instance_classifications", "instance_sfps", "wims", "wim_accounts", "wim_nfvo_tenants",
- "wim_port_mappings", "vim_wim_actions",
+ "wim_port_mappings", "vim_wim_actions", "instance_interfaces",
"instance_wim_nets"]
cmd = "SELECT vim_interface_id, instance_net_id, internal_name,external_name, mac_address,"\
" ii.ip_address as ip_address, vim_info, i.type as type, sdn_port_id, i.uuid"\
" FROM instance_interfaces as ii join interfaces as i on ii.interface_id=i.uuid"\
- " WHERE instance_vm_id='{}' ORDER BY created_at".format(vm['uuid'])
+ " WHERE instance_vm_id='{}' ORDER BY i.created_at".format(vm['uuid'])
self.logger.debug(cmd)
self.cur.execute(cmd )
vm['interfaces'] = self.cur.fetchall()
self.cur.execute(cmd)
instance_dict['nets'] = self.cur.fetchall()
+ # instance sdn_nets:
+ cmd = "SELECT * FROM instance_wim_nets WHERE instance_scenario_id='{}' ORDER BY created_at;".format(
+ instance_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['sdn_nets'] = self.cur.fetchall()
+
#instance_sfps
cmd = "SELECT uuid,vim_sfp_id,sce_rsp_id,datacenter_id,"\
"datacenter_tenant_id,status,error_msg,vim_info, related"\
__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes"
__date__ = "$26-aug-2014 11:09:29$"
-__version__ = "6.0.3.post5"
+__version__ = "6.0.4.post6"
version_date = "Oct 2019"
-database_version = 39 # expected database schema version
+database_version = 40 # expected database schema version
global global_config
global logger
# WIM module
wim_persistence = WimPersistence(mydb)
- wim_engine = WimEngine(wim_persistence)
+ wim_engine = WimEngine(wim_persistence, nfvo.plugins)
# ---
nfvo.start_service(mydb, wim_persistence, wim_engine)
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
[Unit]
Description=openmano server (OSM RO)
After=mysql.service
#!/bin/bash
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
# This script is intended for launching RO from a docker container.
# It waits for mysql server ready, normally running on a separate container, ...
--- /dev/null
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+"""
+This is the thread for the http server North API.
+Two thread will be launched, with normal and administrative permissions.
+"""
+import yaml
+from uuid import uuid4
+from http import HTTPStatus
+
+__author__ = "Alfonso Tierno"
+__date__ = "2019-10-22"
+__version__ = "0.1"
+version_date = "Oct 2019"
+
+
+class SdnException(Exception):
+ def __init__(self, message, http_code=HTTPStatus.BAD_REQUEST.value):
+ self.http_code = http_code
+ Exception.__init__(self, message)
+
+
+class Sdn():
+ running_info = {} # TODO OVIM move the info of running threads from config_dic to this static variable
+ of_module = {}
+
+ def __init__(self, db, plugins):
+ self.db = db
+ self.plugins = plugins
+
+ def start_service(self):
+ pass # TODO py3 needed to load wims and plugins
+
+ def stop_service(self):
+ pass # nothing needed
+
+ def show_network(self, uuid):
+ pass
+
+ def delete_network(self, uuid):
+ pass
+
+ def new_network(self, network):
+ pass
+
+ def get_openflow_rules(self, network_id=None):
+ """
+ Get openflow id from DB
+ :param network_id: Network id, if none all networks will be retrieved
+ :return: Return a list with Openflow rules per net
+ """
+ # ignore input data
+ if not network_id:
+
+ where_ = {}
+ else:
+ where_ = {"net_id": network_id}
+ result, content = self.db.get_table(
+ SELECT=("name", "net_id", "ofc_id", "priority", "vlan_id", "ingress_port", "src_mac", "dst_mac", "actions"),
+ WHERE=where_, FROM='of_flows')
+
+ if result < 0:
+ raise SdnException(str(content), -result)
+ return content
+
+ def edit_openflow_rules(self, network_id=None):
+
+ """
+ To make actions over the net. The action is to reinstall the openflow rules
+ network_id can be 'all'
+ :param network_id: Network id, if none all networks will be retrieved
+ :return : Number of nets updated
+ """
+
+ # ignore input data
+ if not network_id:
+ where_ = {}
+ else:
+ where_ = {"uuid": network_id}
+ result, content = self.db.get_table(SELECT=("uuid", "type"), WHERE=where_, FROM='nets')
+
+ if result < 0:
+ raise SdnException(str(content), -result)
+
+ for net in content:
+ if net["type"] != "ptp" and net["type"] != "data":
+ result -= 1
+ continue
+
+ try:
+ self.net_update_ofc_thread(net['uuid'])
+ except SdnException as e:
+ raise SdnException("Error updating network'{}' {}".format(net['uuid'], e),
+ HTTPStatus.INTERNAL_SERVER_ERROR.value)
+ except Exception as e:
+ raise SdnException("Error updating network '{}' {}".format(net['uuid'], e),
+ HTTPStatus.INTERNAL_SERVER_ERROR.value)
+
+ return result
+
+ def delete_openflow_rules(self, ofc_id=None):
+ """
+ To make actions over the net. The action is to delete ALL openflow rules
+ :return: return operation result
+ """
+
+ if not ofc_id:
+ if 'Default' in self.config['ofcs_thread']:
+ r, c = self.config['ofcs_thread']['Default'].insert_task("clear-all")
+ else:
+ raise SdnException("Default Openflow controller not not running", HTTPStatus.NOT_FOUND.value)
+
+ elif ofc_id in self.config['ofcs_thread']:
+ r, c = self.config['ofcs_thread'][ofc_id].insert_task("clear-all")
+
+ # ignore input data
+ if r < 0:
+ raise SdnException(str(c), -r)
+ else:
+ raise SdnException("Openflow controller not found with ofc_id={}".format(ofc_id),
+ HTTPStatus.NOT_FOUND.value)
+ return r
+
+ def get_openflow_ports(self, ofc_id=None):
+ """
+ Obtain switch ports names of openflow controller
+ :return: Return flow ports in DB
+ """
+ if not ofc_id:
+ if 'Default' in self.config['ofcs_thread']:
+ conn = self.config['ofcs_thread']['Default'].OF_connector
+ else:
+ raise SdnException("Default Openflow controller not not running", HTTPStatus.NOT_FOUND.value)
+
+ elif ofc_id in self.config['ofcs_thread']:
+ conn = self.config['ofcs_thread'][ofc_id].OF_connector
+ else:
+ raise SdnException("Openflow controller not found with ofc_id={}".format(ofc_id),
+ HTTPStatus.NOT_FOUND.value)
+ return conn.pp2ofi
+
+ def new_of_controller(self, ofc_data):
+ """
+ Create a new openflow controller into DB
+ :param ofc_data: Dict openflow controller data
+ :return: openflow controller dpid
+ """
+ db_wim = {
+ "uuid": str(uuid4()),
+ "name": ofc_data["name"],
+ "description": "",
+ "type": ofc_data["type"],
+ "wim_url": "{}:{}".format(ofc_data["ip"], ofc_data["port"]),
+ }
+ db_wim_account = {
+ "uuid": str(uuid4()),
+ "name": ofc_data["name"],
+ "wim_id": db_wim["uuid"],
+ "sdn": "true",
+ "user": ofc_data.get("user"),
+ "password": ofc_data.get("password"),
+ "config": yaml.safe_dump({"dpid": ofc_data["dpid"], "version": ofc_data.get("version")},
+ default_flow_style=True, width=256)
+ }
+ db_tables = [
+ {"wims": db_wim},
+ {"wim_accounts": db_wim_account},
+ ]
+ uuid_list = [db_wim["uuid"], db_wim_account["uuid"]]
+ self.db.new_rows(db_tables, uuid_list)
+ return db_wim_account["uuid"]
+
+ def edit_of_controller(self, of_id, ofc_data):
+ """
+ Edit an openflow controller entry from DB
+ :return:
+ """
+ if not ofc_data:
+ raise SdnException("No data received during uptade OF contorller",
+ http_code=HTTPStatus.INTERNAL_SERVER_ERROR.value)
+
+ old_of_controller = self.show_of_controller(of_id)
+
+ if old_of_controller:
+ result, content = self.db.update_rows('ofcs', ofc_data, WHERE={'uuid': of_id}, log=False)
+ if result >= 0:
+ return ofc_data
+ else:
+ raise SdnException("Error uptating OF contorller with uuid {}".format(of_id),
+ http_code=-result)
+ else:
+ raise SdnException("Error uptating OF contorller with uuid {}".format(of_id),
+ http_code=HTTPStatus.INTERNAL_SERVER_ERROR.value)
+
+ def delete_of_controller(self, of_id):
+ """
+ Delete an openflow controller from DB.
+ :param of_id: openflow controller dpid
+ :return:
+ """
+ wim_accounts = self.db.get_rows(FROM='wim_accounts', WHERE={"uuid": of_id, "sdn": "true"})
+ if not wim_accounts:
+ raise SdnException("Cannot find sdn controller with id='{}'".format(of_id),
+ http_code=HTTPStatus.NOT_FOUND.value)
+ elif len(wim_accounts) > 1:
+ raise SdnException("Found more than one sdn controller with id='{}'".format(of_id),
+ http_code=HTTPStatus.CONFLICT.value)
+ self.db.delete_row(FROM='wim_accounts', WHERE={"uuid": of_id})
+ self.db.delete_row(FROM='wims', WHERE={"uuid": wim_accounts[0]["wim_id"]})
+ return of_id
+
+ def _format_of_controller(self, wim_account, wim=None):
+ of_data = {x: wim_account[x] for x in ("uuid", "name", "user")}
+ if isinstance(wim_account["config"], str):
+ config = yaml.load(wim_account["config"], Loader=yaml.Loader)
+ of_data["dpid"] = config.get("dpid")
+ of_data["version"] = config.get("version")
+ if wim:
+ ip, port = wim["wim_url"].split(":")
+ of_data["ip"] = ip
+ of_data["port"] = port
+ of_data["type"] = wim["type"]
+ return of_data
+
+ def show_of_controller(self, of_id):
+ """
+ Show an openflow controller by dpid from DB.
+ :param db_filter: List with where query parameters
+ :return:
+ """
+ wim_accounts = self.db.get_rows(FROM='wim_accounts', WHERE={"uuid": of_id, "sdn": "true"})
+ if not wim_accounts:
+ raise SdnException("Cannot find sdn controller with id='{}'".format(of_id),
+ http_code=HTTPStatus.NOT_FOUND.value)
+ elif len(wim_accounts) > 1:
+ raise SdnException("Found more than one sdn controller with id='{}'".format(of_id),
+ http_code=HTTPStatus.CONFLICT.value)
+ wims = self.db.get_rows(FROM='wims', WHERE={"uuid": wim_accounts[0]["wim_id"]})
+ return self._format_of_controller(wim_accounts[0], wims[0])
+
+ def get_of_controllers(self, filter=None):
+ """
+ Show an openflow controllers from DB.
+ :return:
+ """
+ filter = filter or {}
+ filter["sdn"] = "true"
+ wim_accounts = self.db.get_rows(FROM='wim_accounts', WHERE=filter)
+ return [self._format_of_controller(w) for w in wim_accounts]
+
+ def set_of_port_mapping(self, maps, sdn_id, switch_dpid, vim_id):
+ """
+ Create new port mapping entry
+ :param of_maps: List with port mapping information
+ # maps =[{"ofc_id": <ofc_id>,"region": datacenter region,"compute_node": compute uuid,"pci": pci adress,
+ "switch_dpid": swith dpid,"switch_port": port name,"switch_mac": mac}]
+ :param sdn_id: ofc id
+ :param switch_dpid: switch dpid
+ :param vim_id: datacenter
+ :return:
+ """
+ # get wim from wim_account
+ wim_accounts = self.db.get_rows(FROM='wim_accounts', WHERE={"uuid": sdn_id})
+ if not wim_accounts:
+ raise SdnException("Not found sdn id={}".format(sdn_id), http_code=HTTPStatus.NOT_FOUND.value)
+ wim_id = wim_accounts[0]["wim_id"]
+ db_wim_port_mappings = []
+ for map in maps:
+ new_map = {
+ 'wim_id': wim_id,
+ 'switch_dpid': switch_dpid,
+ "switch_port": map.get("switch_port"),
+ 'datacenter_id': vim_id,
+ "device_id": map.get("compute_node"),
+ "service_endpoint_id": switch_dpid + "-" + str(uuid4())
+ }
+ if map.get("pci"):
+ new_map["device_interface_id"] = map["pci"].lower()
+ config = {}
+ if map.get("switch_mac"):
+ config["switch_mac"] = map["switch_mac"]
+ if config:
+ new_map["service_mapping_info"] = yaml.safe_dump(config, default_flow_style=True, width=256)
+ db_wim_port_mappings.append(new_map)
+
+ db_tables = [
+ {"wim_port_mappings": db_wim_port_mappings},
+ ]
+ self.db.new_rows(db_tables, [])
+ return db_wim_port_mappings
+
+ def clear_of_port_mapping(self, db_filter=None):
+ """
+ Clear port mapping filtering using db_filter dict
+ :param db_filter: Parameter to filter during remove process
+ :return:
+ """
+ return self.db.delete_row(FROM='wim_port_mappings', WHERE=db_filter)
+
+ def get_of_port_mappings(self, db_filter=None):
+ """
+ Retrive port mapping from DB
+ :param db_filter:
+ :return:
+ """
+ maps = self.db.get_rows(WHERE=db_filter, FROM='wim_port_mappings')
+ for map in maps:
+ if map.get("service_mapping_info"):
+ map["service_mapping_info"] = yaml.load(map["service_mapping_info"], Loader=yaml.Loader)
+ else:
+ map["service_mapping_info"] = {}
+ return maps
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
# -*- coding: utf-8 -*-
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
# pylint: disable=E1101
import unittest
# -*- coding: utf-8 -*-
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
# pylint: disable=E1101
import unittest
import queue
import logging
from osm_ro import vimconn
+from osm_ro.wim.sdnconn import SdnConnectorError
import yaml
from osm_ro.db_base import db_base_Exception
-# TODO py3 BEGIN
-class ovimException(Exception):
- pass
-# TODO py3 END
+from http import HTTPStatus
from copy import deepcopy
__author__ = "Alfonso Tierno, Pablo Montes"
REFRESH_ERROR = 600
REFRESH_DELETE = 3600 * 10
- def __init__(self, task_lock, plugins, name=None, datacenter_name=None, datacenter_tenant_id=None,
- db=None, db_lock=None, ovim=None):
+ def __init__(self, task_lock, plugins, name=None, wim_account_id=None, datacenter_tenant_id=None, db=None):
"""Init a thread.
Arguments:
'id' number of thead
"""
threading.Thread.__init__(self)
self.plugins = plugins
+ self.plugin_name = "unknown"
self.vim = None
+ self.sdnconnector = None
+ self.sdnconn_config = None
self.error_status = None
- self.datacenter_name = datacenter_name
+ self.wim_account_id = wim_account_id
self.datacenter_tenant_id = datacenter_tenant_id
- self.ovim = ovim
+ self.port_mapping = None
+ if self.wim_account_id:
+ self.target_k = "wim_account_id"
+ self.target_v = self.wim_account_id
+ else:
+ self.target_k = "datacenter_vim_id"
+ self.target_v = self.datacenter_tenant_id
if not name:
- self.name = vimconn["id"] + "." + vimconn["config"]["datacenter_tenant_id"]
+ self.name = wim_account_id or str(datacenter_tenant_id)
else:
self.name = name
self.vim_persistent_info = {}
self.my_id = self.name[:64]
- self.logger = logging.getLogger('openmano.vim.' + self.name)
+ self.logger = logging.getLogger('openmano.{}.{}'.format("vim" if self.datacenter_tenant_id else "sdn",
+ self.name))
self.db = db
- self.db_lock = db_lock
self.task_lock = task_lock
self.task_queue = queue.Queue(2000)
- def get_vimconnector(self):
- try:
- from_ = "datacenter_tenants as dt join datacenters as d on dt.datacenter_id=d.uuid"
- select_ = ('type', 'd.config as config', 'd.uuid as datacenter_id', 'vim_url', 'vim_url_admin',
- 'd.name as datacenter_name', 'dt.uuid as datacenter_tenant_id',
- 'dt.vim_tenant_name as vim_tenant_name', 'dt.vim_tenant_id as vim_tenant_id',
- 'user', 'passwd', 'dt.config as dt_config')
- where_ = {"dt.uuid": self.datacenter_tenant_id}
- vims = self.db.get_rows(FROM=from_, SELECT=select_, WHERE=where_)
- vim = vims[0]
- vim_config = {}
- if vim["config"]:
- vim_config.update(yaml.load(vim["config"], Loader=yaml.Loader))
- if vim["dt_config"]:
- vim_config.update(yaml.load(vim["dt_config"], Loader=yaml.Loader))
- vim_config['datacenter_tenant_id'] = vim.get('datacenter_tenant_id')
- vim_config['datacenter_id'] = vim.get('datacenter_id')
-
- # get port_mapping
- with self.db_lock:
- vim_config["wim_external_ports"] = self.ovim.get_of_port_mappings(
- db_filter={"region": vim_config['datacenter_id'], "pci": None})
-
- self.vim = self.plugins["rovim_" + vim["type"]].vimconnector(
- uuid=vim['datacenter_id'], name=vim['datacenter_name'],
- tenant_id=vim['vim_tenant_id'], tenant_name=vim['vim_tenant_name'],
- url=vim['vim_url'], url_admin=vim['vim_url_admin'],
- user=vim['user'], passwd=vim['passwd'],
- config=vim_config, persistent_info=self.vim_persistent_info
- )
- self.error_status = None
- except Exception as e:
- self.logger.error("Cannot load vimconnector for vim_account {}: {}".format(self.datacenter_tenant_id, e))
- self.vim = None
- self.error_status = "Error loading vimconnector: {}".format(e)
+ def _proccess_sdn_exception(self, exc):
+ if isinstance(exc, SdnConnectorError):
+ raise
+ else:
+ self.logger.error("plugin={} throws a non SdnConnectorError exception {}".format(self.plugin_name, exc),
+ exc_info=True)
+ raise SdnConnectorError(str(exc), http_code=HTTPStatus.INTERNAL_SERVER_ERROR.value) from exc
+
+ def _proccess_vim_exception(self, exc):
+ if isinstance(exc, vimconn.vimconnException):
+ raise
+ else:
+ self.logger.error("plugin={} throws a non vimconnException exception {}".format(self.plugin_name, exc),
+ exc_info=True)
+ raise vimconn.vimconnException(str(exc), http_code=HTTPStatus.INTERNAL_SERVER_ERROR.value) from exc
+
+ def get_vim_sdn_connector(self):
+ if self.datacenter_tenant_id:
+ try:
+ from_ = "datacenter_tenants as dt join datacenters as d on dt.datacenter_id=d.uuid"
+ select_ = ('type', 'd.config as config', 'd.uuid as datacenter_id', 'vim_url', 'vim_url_admin',
+ 'd.name as datacenter_name', 'dt.uuid as datacenter_tenant_id',
+ 'dt.vim_tenant_name as vim_tenant_name', 'dt.vim_tenant_id as vim_tenant_id',
+ 'user', 'passwd', 'dt.config as dt_config')
+ where_ = {"dt.uuid": self.datacenter_tenant_id}
+ vims = self.db.get_rows(FROM=from_, SELECT=select_, WHERE=where_)
+ vim = vims[0]
+ vim_config = {}
+ if vim["config"]:
+ vim_config.update(yaml.load(vim["config"], Loader=yaml.Loader))
+ if vim["dt_config"]:
+ vim_config.update(yaml.load(vim["dt_config"], Loader=yaml.Loader))
+ vim_config['datacenter_tenant_id'] = vim.get('datacenter_tenant_id')
+ vim_config['datacenter_id'] = vim.get('datacenter_id')
+
+ # get port_mapping
+ # vim_port_mappings = self.ovim.get_of_port_mappings(
+ # db_filter={"datacenter_id": vim_config['datacenter_id']})
+ # vim_config["wim_external_ports"] = [x for x in vim_port_mappings
+ # if x["service_mapping_info"].get("wim")]
+ self.plugin_name = "rovim_" + vim["type"]
+ self.vim = self.plugins[self.plugin_name].vimconnector(
+ uuid=vim['datacenter_id'], name=vim['datacenter_name'],
+ tenant_id=vim['vim_tenant_id'], tenant_name=vim['vim_tenant_name'],
+ url=vim['vim_url'], url_admin=vim['vim_url_admin'],
+ user=vim['user'], passwd=vim['passwd'],
+ config=vim_config, persistent_info=self.vim_persistent_info
+ )
+ self.error_status = None
+ self.logger.info("Vim Connector loaded for vim_account={}, plugin={}".format(
+ self.datacenter_tenant_id, self.plugin_name))
+ except Exception as e:
+ self.logger.error("Cannot load vimconnector for vim_account={} plugin={}: {}".format(
+ self.datacenter_tenant_id, self.plugin_name, e))
+ self.vim = None
+ self.error_status = "Error loading vimconnector: {}".format(e)
+ else:
+ try:
+ wim_account = self.db.get_rows(FROM="wim_accounts", WHERE={"uuid": self.wim_account_id})[0]
+ wim = self.db.get_rows(FROM="wims", WHERE={"uuid": wim_account["wim_id"]})[0]
+ if wim["config"]:
+ self.sdnconn_config = yaml.load(wim["config"], Loader=yaml.Loader)
+ else:
+ self.sdnconn_config = {}
+ if wim_account["config"]:
+ self.sdnconn_config.update(yaml.load(wim_account["config"], Loader=yaml.Loader))
+ self.port_mappings = self.db.get_rows(FROM="wim_port_mappings", WHERE={"wim_id": wim_account["wim_id"]})
+ if self.port_mappings:
+ self.sdnconn_config["service_endpoint_mapping"] = self.port_mappings
+ self.plugin_name = "rosdn_" + wim["type"]
+ self.sdnconnector = self.plugins[self.plugin_name](
+ wim, wim_account, config=self.sdnconn_config)
+ self.error_status = None
+ self.logger.info("Sdn Connector loaded for wim_account={}, plugin={}".format(
+ self.wim_account_id, self.plugin_name))
+ except Exception as e:
+ self.logger.error("Cannot load sdn connector for wim_account={}, plugin={}: {}".format(
+ self.wim_account_id, self.plugin_name, e))
+ self.sdnconnector = None
+ self.error_status = "Error loading sdn connector: {}".format(e)
def _get_db_task(self):
"""
while True:
# get 20 (database_limit) entries each time
vim_actions = self.db.get_rows(FROM="vim_wim_actions",
- WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+ WHERE={self.target_k: self.target_v,
"status": ['SCHEDULED', 'BUILD', 'DONE'],
"worker": [None, self.my_id], "modified_at<=": now
},
task_related = task["related"]
# lock ...
self.db.update_rows("vim_wim_actions", UPDATE={"worker": self.my_id}, modified_time=0,
- WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+ WHERE={self.target_k: self.target_v,
"status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
"worker": [None, self.my_id],
"related": task_related,
})
# ... and read all related and check if locked
related_tasks = self.db.get_rows(FROM="vim_wim_actions",
- WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+ WHERE={self.target_k: self.target_v,
"status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
"related": task_related,
"item": task["item"],
if some_tasks_not_locked:
if some_tasks_locked: # unlock
self.db.update_rows("vim_wim_actions", UPDATE={"worker": None}, modified_time=0,
- WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+ WHERE={self.target_k: self.target_v,
"worker": self.my_id,
"related": task_related,
"item": task["item"],
try:
# get all related tasks
related_tasks = self.db.get_rows(FROM="vim_wim_actions",
- WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+ WHERE={self.target_k: self.target_v,
"status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
"action": ["FIND", "CREATE"],
"related": task["related"],
# mark task_create as FINISHED
self.db.update_rows("vim_wim_actions", UPDATE={"status": "FINISHED"},
- WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+ WHERE={self.target_k: self.target_v,
"instance_action_id": task_create["instance_action_id"],
"task_index": task_create["task_index"]
})
UPDATE={"extra": yaml.safe_dump(extra_new_created, default_flow_style=True,
width=256),
"vim_id": task_create.get("vim_id")},
- WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+ WHERE={self.target_k: self.target_v,
"instance_action_id": dependency_task["instance_action_id"],
"task_index": dependency_task["task_index"]
})
task_vim_interface = task_interface.get("vim_info")
if task_vim_interface != interface:
# delete old port
- if task_interface.get("sdn_port_id"):
- try:
- with self.db_lock:
- self.ovim.delete_port(task_interface["sdn_port_id"], idempotent=True)
- task_interface["sdn_port_id"] = None
- except ovimException as e:
- error_text = "ovimException deleting external_port={}: {}".format(
- task_interface["sdn_port_id"], e)
- self.logger.error("task={} get-VM: {}".format(task_id, error_text), exc_info=True)
- task_warning_msg += error_text
- # TODO Set error_msg at instance_nets instead of instance VMs
+ # if task_interface.get("sdn_port_id"):
+ # try:
+ # self.ovim.delete_port(task_interface["sdn_port_id"], idempotent=True)
+ # task_interface["sdn_port_id"] = None
+ # except ovimException as e:
+ # error_text = "ovimException deleting external_port={}: {}".format(
+ # task_interface["sdn_port_id"], e)
+ # self.logger.error("task={} get-VM: {}".format(task_id, error_text), exc_info=True)
+ # task_warning_msg += error_text
+ # # TODO Set error_msg at instance_nets instead of instance VMs
# Create SDN port
- sdn_net_id = task_interface.get("sdn_net_id")
- if sdn_net_id and interface.get("compute_node") and interface.get("pci"):
- sdn_port_name = sdn_net_id + "." + task["vim_id"]
- sdn_port_name = sdn_port_name[:63]
- try:
- with self.db_lock:
- sdn_port_id = self.ovim.new_external_port(
- {"compute_node": interface["compute_node"],
- "pci": interface["pci"],
- "vlan": interface.get("vlan"),
- "net_id": sdn_net_id,
- "region": self.vim["config"]["datacenter_id"],
- "name": sdn_port_name,
- "mac": interface.get("mac_address")})
- task_interface["sdn_port_id"] = sdn_port_id
- except (ovimException, Exception) as e:
- error_text = "ovimException creating new_external_port compute_node={} pci={} vlan={} {}".\
- format(interface["compute_node"], interface["pci"], interface.get("vlan"), e)
- self.logger.error("task={} get-VM: {}".format(task_id, error_text), exc_info=True)
- task_warning_msg += error_text
- # TODO Set error_msg at instance_nets instead of instance VMs
+ # sdn_net_id = task_interface.get("sdn_net_id")
+ # if sdn_net_id and interface.get("compute_node") and interface.get("pci"):
+ # sdn_port_name = sdn_net_id + "." + task["vim_id"]
+ # sdn_port_name = sdn_port_name[:63]
+ # try:
+ # sdn_port_id = self.ovim.new_external_port(
+ # {"compute_node": interface["compute_node"],
+ # "pci": interface["pci"],
+ # "vlan": interface.get("vlan"),
+ # "net_id": sdn_net_id,
+ # "region": self.vim["config"]["datacenter_id"],
+ # "name": sdn_port_name,
+ # "mac": interface.get("mac_address")})
+ # task_interface["sdn_port_id"] = sdn_port_id
+ # except (ovimException, Exception) as e:
+ # error_text = "ovimException creating new_external_port compute_node={} pci={} vlan={} {}".\
+ # format(interface["compute_node"], interface["pci"], interface.get("vlan"), e)
+ # self.logger.error("task={} get-VM: {}".format(task_id, error_text), exc_info=True)
+ # task_warning_msg += error_text
+ # # TODO Set error_msg at instance_nets instead of instance VMs
self.db.update_rows('instance_interfaces',
UPDATE={"mac_address": interface.get("mac_address"),
"vlan": interface.get("vlan")},
WHERE={'uuid': task_interface["iface_id"]})
task_interface["vim_info"] = interface
+ # if sdn_net_id and interface.get("compute_node") and interface.get("pci"):
+ # # TODO Send message to task SDN to update
# check and update task and instance_vms database
vim_info_error_msg = None
task_vim_info = task["extra"].get("vim_info")
task_vim_status = task["extra"].get("vim_status")
task_error_msg = task.get("error_msg")
- task_sdn_net_id = task["extra"].get("sdn_net_id")
+ # task_sdn_net_id = task["extra"].get("sdn_net_id")
vim_info_status = vim_info["status"]
vim_info_error_msg = vim_info.get("error_msg")
# get ovim status
- if task_sdn_net_id:
- try:
- with self.db_lock:
- sdn_net = self.ovim.show_network(task_sdn_net_id)
- except (ovimException, Exception) as e:
- text_error = "ovimException getting network snd_net_id={}: {}".format(task_sdn_net_id, e)
- self.logger.error("task={} get-net: {}".format(task_id, text_error), exc_info=True)
- sdn_net = {"status": "ERROR", "last_error": text_error}
- if sdn_net["status"] == "ERROR":
- if not vim_info_error_msg:
- vim_info_error_msg = str(sdn_net.get("last_error"))
- else:
- vim_info_error_msg = "VIM_ERROR: {} && SDN_ERROR: {}".format(
- self._format_vim_error_msg(vim_info_error_msg, 1024 // 2 - 14),
- self._format_vim_error_msg(sdn_net["last_error"], 1024 // 2 - 14))
- vim_info_status = "ERROR"
- elif sdn_net["status"] == "BUILD":
- if vim_info_status == "ACTIVE":
- vim_info_status = "BUILD"
+ # if task_sdn_net_id:
+ # try:
+ # sdn_net = self.ovim.show_network(task_sdn_net_id)
+ # except (ovimException, Exception) as e:
+ # text_error = "ovimException getting network snd_net_id={}: {}".format(task_sdn_net_id, e)
+ # self.logger.error("task={} get-net: {}".format(task_id, text_error), exc_info=True)
+ # sdn_net = {"status": "ERROR", "last_error": text_error}
+ # if sdn_net["status"] == "ERROR":
+ # if not vim_info_error_msg:
+ # vim_info_error_msg = str(sdn_net.get("last_error"))
+ # else:
+ # vim_info_error_msg = "VIM_ERROR: {} && SDN_ERROR: {}".format(
+ # self._format_vim_error_msg(vim_info_error_msg, 1024 // 2 - 14),
+ # self._format_vim_error_msg(sdn_net["last_error"], 1024 // 2 - 14))
+ # vim_info_status = "ERROR"
+ # elif sdn_net["status"] == "BUILD":
+ # if vim_info_status == "ACTIVE":
+ # vim_info_status = "BUILD"
# update database
if vim_info_error_msg:
# Move this task to the time dependency is going to be modified plus 10 seconds.
self.db.update_rows("vim_wim_actions", modified_time=dependency_modified_at + 10,
UPDATE={"worker": None},
- WHERE={"datacenter_vim_id": self.datacenter_tenant_id, "worker": self.my_id,
+ WHERE={self.target_k: self.target_v, "worker": self.my_id,
"related": task["related"],
})
# task["extra"]["tries"] = task["extra"].get("tries", 0) + 1
if task["status"] == "SUPERSEDED":
# not needed to do anything but update database with the new status
database_update = None
- elif not self.vim:
+ elif not self.vim and not self.sdnconnector:
task["status"] = "FAILED"
task["error_msg"] = self.error_status
database_update = {"status": "VIM_ERROR", "error_msg": task["error_msg"]}
database_update = self.get_net(task)
else:
raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+ elif task["item"] == 'instance_wim_nets':
+ if task["status"] in ('BUILD', 'DONE') and task["action"] in ("FIND", "CREATE"):
+ database_update = self.new_or_update_sdn_net(task)
+ create_or_find = True
+ elif task["action"] == "CREATE":
+ create_or_find = True
+ database_update = self.new_or_update_sdn_net(task)
+ elif task["action"] == "DELETE":
+ self.del_sdn_net(task)
+ elif task["action"] == "FIND":
+ database_update = self.get_sdn_net(task)
+ else:
+ raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
elif task["item"] == 'instance_sfis':
if task["action"] == "CREATE":
create_or_find = True
"error_msg": task["error_msg"],
},
- WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+ WHERE={self.target_k: self.target_v,
"worker": self.my_id,
"action": ["FIND", "CREATE"],
"related": task["related"],
self.db.update_rows(
table="vim_wim_actions", modified_time=0,
UPDATE={"worker": None},
- WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+ WHERE={self.target_k: self.target_v,
"worker": self.my_id,
"related": task["related"],
})
def run(self):
self.logger.debug("Starting")
while True:
- self.get_vimconnector()
+ self.get_vim_sdn_connector()
self.logger.debug("Vimconnector loaded")
reload_thread = False
return instance_element_update
def del_vm(self, task):
- task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ # task_id = task["instance_action_id"] + "." + str(task["task_index"])
vm_vim_id = task["vim_id"]
- interfaces = task["extra"].get("interfaces", ())
+ # interfaces = task["extra"].get("interfaces", ())
try:
- for iface in interfaces.values():
- if iface.get("sdn_port_id"):
- try:
- with self.db_lock:
- self.ovim.delete_port(iface["sdn_port_id"], idempotent=True)
- except ovimException as e:
- self.logger.error("task={} del-VM: ovimException when deleting external_port={}: {} ".format(
- task_id, iface["sdn_port_id"], e), exc_info=True)
- # TODO Set error_msg at instance_nets
+ # for iface in interfaces.values():
+ # if iface.get("sdn_port_id"):
+ # try:
+ # self.ovim.delete_port(iface["sdn_port_id"], idempotent=True)
+ # except ovimException as e:
+ # self.logger.error("task={} del-VM: ovimException when deleting external_port={}: {} ".format(
+ # task_id, iface["sdn_port_id"], e), exc_info=True)
+ # # TODO Set error_msg at instance_nets
self.vim.delete_vminstance(vm_vim_id, task["extra"].get("created_items"))
task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
def new_net(self, task):
vim_net_id = None
- sdn_net_id = None
task_id = task["instance_action_id"] + "." + str(task["task_index"])
action_text = ""
try:
action_text = "creating VIM"
vim_net_id, created_items = self.vim.new_network(*params[0:3])
- net_name = params[0]
- net_type = params[1]
- wim_account_name = None
- if len(params) >= 4:
- wim_account_name = params[3]
-
- sdn_controller = self.vim.config.get('sdn-controller')
- if sdn_controller and (net_type == "data" or net_type == "ptp"):
- network = {"name": net_name, "type": net_type, "region": self.vim["config"]["datacenter_id"]}
-
- vim_net = self.vim.get_network(vim_net_id)
- if vim_net.get('encapsulation') != 'vlan':
- raise vimconn.vimconnException(
- "net '{}' defined as type '{}' has not vlan encapsulation '{}'".format(
- net_name, net_type, vim_net['encapsulation']))
- network["vlan"] = vim_net.get('segmentation_id')
- action_text = "creating SDN"
- with self.db_lock:
- sdn_net_id = self.ovim.new_network(network)
-
- if wim_account_name and self.vim.config["wim_external_ports"]:
- # add external port to connect WIM. Try with compute node __WIM:wim_name and __WIM
- action_text = "attaching external port to ovim network"
- sdn_port_name = "external_port"
- sdn_port_data = {
- "compute_node": "__WIM:" + wim_account_name[0:58],
- "pci": None,
- "vlan": network["vlan"],
- "net_id": sdn_net_id,
- "region": self.vim["config"]["datacenter_id"],
- "name": sdn_port_name,
- }
- try:
- with self.db_lock:
- sdn_external_port_id = self.ovim.new_external_port(sdn_port_data)
- except ovimException:
- sdn_port_data["compute_node"] = "__WIM"
- with self.db_lock:
- sdn_external_port_id = self.ovim.new_external_port(sdn_port_data)
- self.logger.debug("Added sdn_external_port {} to sdn_network {}".format(sdn_external_port_id,
- sdn_net_id))
+ # net_name = params[0]
+ # net_type = params[1]
+ # wim_account_name = None
+ # if len(params) >= 4:
+ # wim_account_name = params[3]
+
+ # TODO fix at nfvo adding external port
+ # if wim_account_name and self.vim.config["wim_external_ports"]:
+ # # add external port to connect WIM. Try with compute node __WIM:wim_name and __WIM
+ # action_text = "attaching external port to ovim network"
+ # sdn_port_name = "external_port"
+ # sdn_port_data = {
+ # "compute_node": "__WIM:" + wim_account_name[0:58],
+ # "pci": None,
+ # "vlan": network["vlan"],
+ # "net_id": sdn_net_id,
+ # "region": self.vim["config"]["datacenter_id"],
+ # "name": sdn_port_name,
+ # }
+ # try:
+ # sdn_external_port_id = self.ovim.new_external_port(sdn_port_data)
+ # except ovimException:
+ # sdn_port_data["compute_node"] = "__WIM"
+ # sdn_external_port_id = self.ovim.new_external_port(sdn_port_data)
+ # self.logger.debug("Added sdn_external_port {} to sdn_network {}".format(sdn_external_port_id,
+ # sdn_net_id))
task["status"] = "DONE"
task["extra"]["vim_info"] = {}
- task["extra"]["sdn_net_id"] = sdn_net_id
+ # task["extra"]["sdn_net_id"] = sdn_net_id
task["extra"]["vim_status"] = "BUILD"
task["extra"]["created"] = True
task["extra"]["created_items"] = created_items
task["error_msg"] = None
task["vim_id"] = vim_net_id
- instance_element_update = {"vim_net_id": vim_net_id, "sdn_net_id": sdn_net_id, "status": "BUILD",
+ instance_element_update = {"vim_net_id": vim_net_id, "status": "BUILD",
"created": True, "error_msg": None}
return instance_element_update
- except (vimconn.vimconnException, ovimException) as e:
+ except vimconn.vimconnException as e:
self.logger.error("task={} new-net: Error {}: {}".format(task_id, action_text, e))
task["status"] = "FAILED"
task["vim_id"] = vim_net_id
task["error_msg"] = self._format_vim_error_msg(str(e))
- task["extra"]["sdn_net_id"] = sdn_net_id
- instance_element_update = {"vim_net_id": vim_net_id, "sdn_net_id": sdn_net_id, "status": "VIM_ERROR",
+ # task["extra"]["sdn_net_id"] = sdn_net_id
+ instance_element_update = {"vim_net_id": vim_net_id, "status": "VIM_ERROR",
"error_msg": task["error_msg"]}
return instance_element_update
def del_net(self, task):
net_vim_id = task["vim_id"]
- sdn_net_id = task["extra"].get("sdn_net_id")
+ # sdn_net_id = task["extra"].get("sdn_net_id")
try:
if net_vim_id:
self.vim.delete_network(net_vim_id, task["extra"].get("created_items"))
- if sdn_net_id:
- # Delete any attached port to this sdn network. There can be ports associated to this network in case
- # it was manually done using 'openmano vim-net-sdn-attach'
- with self.db_lock:
- port_list = self.ovim.get_ports(columns={'uuid'},
- filter={'name': 'external_port', 'net_id': sdn_net_id})
- for port in port_list:
- self.ovim.delete_port(port['uuid'], idempotent=True)
- self.ovim.delete_network(sdn_net_id, idempotent=True)
+ # if sdn_net_id:
+ # # Delete any attached port to this sdn network. There can be ports associated to this network in case
+ # # it was manually done using 'openmano vim-net-sdn-attach'
+ # port_list = self.ovim.get_ports(columns={'uuid'},
+ # filter={'name': 'external_port', 'net_id': sdn_net_id})
+ # for port in port_list:
+ # self.ovim.delete_port(port['uuid'], idempotent=True)
+ # self.ovim.delete_network(sdn_net_id, idempotent=True)
task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
task["error_msg"] = None
return None
- except ovimException as e:
- task["error_msg"] = self._format_vim_error_msg("ovimException obtaining and deleting external "
- "ports for net {}: {}".format(sdn_net_id, str(e)))
except vimconn.vimconnException as e:
task["error_msg"] = self._format_vim_error_msg(str(e))
if isinstance(e, vimconn.vimconnNotFoundException):
task["status"] = "FAILED"
return None
+ def new_or_update_sdn_net(self, task):
+ wimconn_net_id = task["vim_id"]
+ created_items = task["extra"].get("created_items")
+ connected_ports = task["extra"].get("connected_ports", [])
+ new_connected_ports = []
+ last_update = task["extra"].get("last_update", 0)
+ sdn_status = "BUILD"
+ sdn_info = None
+
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ error_list = []
+ try:
+ # FIND
+ if task["extra"].get("find"):
+ wimconn_id = task["extra"]["find"][0]
+ try:
+ instance_element_update = self.sdnconnector.get_connectivity_service_status(wimconn_id)
+ wimconn_net_id = wimconn_id
+ instance_element_update = {"wim_internal_id": wimconn_net_id, "created": False, "status": "BUILD",
+ "error_msg": None, }
+ return instance_element_update
+ except Exception as e:
+ if isinstance(e, SdnConnectorError) and e.http_error == HTTPStatus.NOT_FOUND.value:
+ pass
+ else:
+ self._proccess_sdn_exception(e)
+
+ params = task["params"]
+ # CREATE
+ # look for ports
+ sdn_ports = []
+ pending_ports = 0
+
+ ports = self.db.get_rows(FROM='instance_interfaces', WHERE={'instance_wim_net_id': task["item_id"]})
+ sdn_need_update = False
+ for port in ports:
+ # TODO. Do not connect if already done
+ if port.get("compute_node") and port.get("pci"):
+ for map in self.port_mappings:
+ if map.get("device_id") == port["compute_node"] and \
+ map.get("device_interface_id") == port["pci"]:
+ break
+ else:
+ if self.sdnconn_config.get("mapping_not_needed"):
+ map = {
+ "service_endpoint_id": "{}:{}".format(port["compute_node"], port["pci"]),
+ "service_endpoint_encapsulation_info": {
+ "vlan": port["vlan"],
+ "mac": port["mac_address"],
+ "device_id": port["compute_node"],
+ "device_interface_id": port["pci"]
+ }
+ }
+ else:
+ map = None
+ error_list.append("Port mapping not found for compute_node={} pci={}".format(
+ port["compute_node"], port["pci"]))
+
+ if map:
+ if port["uuid"] not in connected_ports or port["modified_at"] > last_update:
+ sdn_need_update = True
+ new_connected_ports.append(port["uuid"])
+ sdn_ports.append({
+ "service_endpoint_id": map["service_endpoint_id"],
+ "service_endpoint_encapsulation_type": "dot1q" if port["model"] == "SR-IOV" else None,
+ "service_endpoint_encapsulation_info": {
+ "vlan": port["vlan"],
+ "mac": port["mac_address"],
+ "device_id": map.get("device_id"),
+ "device_interface_id": map.get("device_interface_id"),
+ "switch_dpid": map.get("switch_dpid"),
+ "switch_port": map.get("switch_port"),
+ "service_mapping_info": map.get("service_mapping_info"),
+ }
+ })
+
+ else:
+ pending_ports += 1
+ if pending_ports:
+ error_list.append("Waiting for getting interfaces location from VIM. Obtained '{}' of {}"
+ .format(len(ports)-pending_ports, len(ports)))
+ # if there are more ports to connect or they have been modified, call create/update
+ if sdn_need_update and len(sdn_ports) >= 2:
+ if not wimconn_net_id:
+ if params[0] == "data":
+ net_type = "ELAN"
+ elif params[0] == "ptp":
+ net_type = "ELINE"
+ else:
+ net_type = "L3"
+
+ wimconn_net_id, created_items = self.sdnconnector.create_connectivity_service(net_type, sdn_ports)
+ else:
+ created_items = self.sdnconnector.edit_connectivity_service(wimconn_net_id, conn_info=created_items,
+ connection_points=sdn_ports)
+ last_update = time.time()
+ connected_ports = new_connected_ports
+ elif wimconn_net_id:
+ try:
+ wim_status_dict = self.sdnconnector.get_connectivity_service_status(wimconn_net_id,
+ conn_info=created_items)
+ sdn_status = wim_status_dict["sdn_status"]
+ if wim_status_dict.get("error_msg"):
+ error_list.append(wim_status_dict.get("error_msg"))
+ if wim_status_dict.get("sdn_info"):
+ sdn_info = str(wim_status_dict.get("sdn_info"))
+ except Exception as e:
+ self._proccess_sdn_exception(e)
+
+ task["status"] = "DONE"
+ task["extra"]["vim_info"] = {}
+ # task["extra"]["sdn_net_id"] = sdn_net_id
+ task["extra"]["vim_status"] = "BUILD"
+ task["extra"]["created"] = True
+ task["extra"]["created_items"] = created_items
+ task["extra"]["connected_ports"] = connected_ports
+ task["extra"]["last_update"] = last_update
+ task["error_msg"] = self._format_vim_error_msg(" ; ".join(error_list))
+ task["vim_id"] = wimconn_net_id
+ instance_element_update = {"wim_internal_id": wimconn_net_id, "status": sdn_status,
+ "created": True, "error_msg": task["error_msg"] or None}
+ except (vimconn.vimconnException, SdnConnectorError) as e:
+ self.logger.error("task={} new-sdn-net: Error: {}".format(task_id, e))
+ task["status"] = "FAILED"
+ task["vim_id"] = wimconn_net_id
+ task["error_msg"] = self._format_vim_error_msg(str(e))
+ # task["extra"]["sdn_net_id"] = sdn_net_id
+ instance_element_update = {"wim_internal_id": wimconn_net_id, "status": "WIM_ERROR",
+ "error_msg": task["error_msg"]}
+ if sdn_info:
+ instance_element_update["wim_info"] = sdn_info
+ return instance_element_update
+
+ def del_sdn_net(self, task):
+ wimconn_net_id = task["vim_id"]
+ try:
+ try:
+ if wimconn_net_id:
+ self.sdnconnector.delete_connectivity_service(wimconn_net_id, task["extra"].get("created_items"))
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ task["error_msg"] = None
+ return None
+ except Exception as e:
+ self._proccess_sdn_exception(e)
+ except SdnConnectorError as e:
+ task["error_msg"] = self._format_vim_error_msg(str(e))
+ if e.http_code == HTTPStatus.NOT_FOUND.value:
+ # If not found mark as Done and fill error_msg
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ task["error_msg"] = None
+ return None
+ task["status"] = "FAILED"
+ return None
+
# Service Function Instances
def new_sfi(self, task):
vim_sfi_id = None
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
from contextlib import contextmanager
from itertools import groupby
from operator import itemgetter
-from sys import exc_info
+# from sys import exc_info
from uuid import uuid4
from ..utils import remove_none_items
DbBaseException,
NoWimConnectedToDatacenters,
UnexpectedDatabaseError,
- WimAccountNotActive
+ WimAccountNotActive,
+ UndefinedWimConnector
)
from .wim_thread import WimThread
+# from ..http_tools.errors import Bad_Request
+from pkg_resources import iter_entry_points
class WimEngine(object):
"""Logic supporting the establishment of WAN links when NS spans across
different datacenters.
"""
- def __init__(self, persistence, logger=None, ovim=None):
+ def __init__(self, persistence, plugins, logger=None, ovim=None):
self.persist = persistence
+ self.plugins = plugins if plugins is not None else {}
self.logger = logger or logging.getLogger('openmano.wim.engine')
self.threads = {}
self.connectors = {}
self.ovim = ovim
+ def _load_plugin(self, name, type="sdn"):
+ # type can be vim or sdn
+ for v in iter_entry_points('osm_ro{}.plugins'.format(type), name):
+ self.plugins[name] = v.load()
+ if name and name not in self.plugins:
+ raise UndefinedWimConnector(type, name)
+
def create_wim(self, properties):
"""Create a new wim record according to the properties
"""
port_mapping = ((properties.get('config', {}) or {})
.pop('wim_port_mapping', {}))
+ plugin_name = "rosdn_" + properties["type"]
+ if plugin_name not in self.plugins:
+ self._load_plugin(plugin_name, type="sdn")
+
uuid = self.persist.create_wim(properties)
if port_mapping:
past"""
if instance_scenario_id:
wan_links = self.persist.get_wan_links(
- instance_scenario_id=instance_scenario_id)
+ instance_scenario_id=instance_scenario_id, sdn='false')
return [self.delete_action(l) for l in wan_links]
def incorporate_actions(self, wim_actions, instance_action):
"""
thread = None
try:
- thread = WimThread(self.persist, wim_account, ovim=self.ovim)
+ thread = WimThread(self.persist, self.plugins, wim_account, ovim=self.ovim)
self.threads[wim_account['uuid']] = thread
thread.start()
except: # noqa
def start_threads(self):
"""Start the threads responsible for processing WIM Actions"""
accounts = self.persist.get_wim_accounts(error_if_none=False)
- self.threads = remove_none_items(
- {a['uuid']: self._spawn_thread(a) for a in accounts})
+ thread_dict = {}
+ for account in accounts:
+ try:
+ plugin_name = "rosdn_" + account["wim"]["type"]
+ if plugin_name not in self.plugins:
+ self._load_plugin(plugin_name, type="sdn")
+ thread_dict[account["uuid"]] = self._spawn_thread(account)
+ except UndefinedWimConnector as e:
+ self.logger.error(e)
+ self.threads = remove_none_items(thread_dict)
def stop_threads(self):
"""Stop the threads responsible for processing WIM Actions"""
class UndefinedWimConnector(DbBaseException):
"""The connector class for the specified wim type is not implemented"""
- def __init__(self, wim_type, module_name, location_reference):
- super(UndefinedWimConnector, self).__init__(
- ('{}: `{}`. Could not find module `{}` '
- '(check if it is necessary to install a plugin)'
- .format(self.__class__.__doc__, wim_type, module_name)),
- http_code=Bad_Request)
+ def __init__(self, wim_type, module_name):
+ super(UndefinedWimConnector, self).__init__("Cannot load a module for {t} type '{n}'. The plugin 'osm_{n}' has"
+ " not been registered".format(t=wim_type, n=module_name),
+ http_code=Bad_Request)
class WimAccountOverwrite(DbBaseException):
execute any action
"""
import json
-from .wimconn import WimConnectorError
+from .sdnconn import SdnConnectorError
class FailingConnector(object):
def __init__(self, error_msg):
self.error_msg = error_msg
+ def __call__(self, wim, wim_account, config=None, logger=None):
+ return self
+
+ def vimconnector(self, *args, **kwargs):
+ raise Exception(self.error_msg)
+
def check_credentials(self):
- raise WimConnectorError('Impossible to use WIM:\n' + self.error_msg)
+ raise SdnConnectorError('Impossible to use WIM:\n' + self.error_msg)
def get_connectivity_service_status(self, service_uuid, _conn_info=None):
- raise WimConnectorError('Impossible to retrieve status for {}\n\n{}'
+ raise SdnConnectorError('Impossible to retrieve status for {}\n\n{}'
.format(service_uuid, self.error_msg))
def create_connectivity_service(self, service_uuid, *args, **kwargs):
- raise WimConnectorError('Impossible to connect {}.\n{}\n{}\n{}'
+ raise SdnConnectorError('Impossible to connect {}.\n{}\n{}\n{}'
.format(service_uuid, self.error_msg,
json.dumps(args, indent=4),
json.dumps(kwargs, indent=4)))
def delete_connectivity_service(self, service_uuid, _conn_info=None):
- raise WimConnectorError('Impossible to disconnect {}\n\n{}'
+ raise SdnConnectorError('Impossible to disconnect {}\n\n{}'
.format(service_uuid, self.error_msg))
def edit_connectivity_service(self, service_uuid, *args, **kwargs):
- raise WimConnectorError('Impossible to change connection {}.\n{}\n'
+ raise SdnConnectorError('Impossible to change connection {}.\n{}\n'
'{}\n{}'
.format(service_uuid, self.error_msg,
json.dumps(args, indent=4),
json.dumps(kwargs, indent=4)))
def clear_all_connectivity_services(self):
- raise WimConnectorError('Impossible to use WIM:\n' + self.error_msg)
+ raise SdnConnectorError('Impossible to use WIM:\n' + self.error_msg)
def get_all_active_connectivity_services(self):
- raise WimConnectorError('Impossible to use WIM:\n' + self.error_msg)
+ raise SdnConnectorError('Impossible to use WIM:\n' + self.error_msg)
--- /dev/null
+##
+# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+##
+import logging
+from http import HTTPStatus
+from osm_ro.wim.sdnconn import SdnConnectorBase, SdnConnectorError
+from uuid import uuid4
+
+"""
+Implement an Abstract class 'OpenflowConn' and an engine 'SdnConnectorOpenFlow' used for base class for SDN plugings
+that implements a pro-active opeflow rules.
+"""
+
+__author__ = "Alfonso Tierno"
+__date__ = "2019-11-11"
+
+
+class OpenflowConnException(Exception):
+ """Common and base class Exception for all vimconnector exceptions"""
+ def __init__(self, message, http_code=HTTPStatus.BAD_REQUEST.value):
+ Exception.__init__(self, message)
+ self.http_code = http_code
+
+
+class OpenflowConnConnectionException(OpenflowConnException):
+ """Connectivity error with the VIM"""
+ def __init__(self, message, http_code=HTTPStatus.SERVICE_UNAVAILABLE.value):
+ OpenflowConnException.__init__(self, message, http_code)
+
+
+class OpenflowConnUnexpectedResponse(OpenflowConnException):
+ """Get an wrong response from VIM"""
+ def __init__(self, message, http_code=HTTPStatus.INTERNAL_SERVER_ERROR.value):
+ OpenflowConnException.__init__(self, message, http_code)
+
+
+class OpenflowConnAuthException(OpenflowConnException):
+ """Invalid credentials or authorization to perform this action over the VIM"""
+ def __init__(self, message, http_code=HTTPStatus.UNAUTHORIZED.value):
+ OpenflowConnException.__init__(self, message, http_code)
+
+
+class OpenflowConnNotFoundException(OpenflowConnException):
+ """The item is not found at VIM"""
+ def __init__(self, message, http_code=HTTPStatus.NOT_FOUND.value):
+ OpenflowConnException.__init__(self, message, http_code)
+
+
+class OpenflowConnConflictException(OpenflowConnException):
+ """There is a conflict, e.g. more item found than one"""
+ def __init__(self, message, http_code=HTTPStatus.CONFLICT.value):
+ OpenflowConnException.__init__(self, message, http_code)
+
+
+class OpenflowConnNotSupportedException(OpenflowConnException):
+ """The request is not supported by connector"""
+ def __init__(self, message, http_code=HTTPStatus.SERVICE_UNAVAILABLE.value):
+ OpenflowConnException.__init__(self, message, http_code)
+
+
+class OpenflowConnNotImplemented(OpenflowConnException):
+ """The method is not implemented by the connected"""
+ def __init__(self, message, http_code=HTTPStatus.NOT_IMPLEMENTED.value):
+ OpenflowConnException.__init__(self, message, http_code)
+
+
+class OpenflowConn:
+ """
+ Openflow controller connector abstract implementeation.
+ """
+ def __init__(self, params):
+ self.name = "openflow_conector"
+ self.pp2ofi = {} # From Physical Port to OpenFlow Index
+ self.ofi2pp = {} # From OpenFlow Index to Physical Port
+ self.logger = logging.getLogger('openflow_conn')
+ self.logger.setLevel(getattr(logging, params.get("of_debug", "ERROR")))
+
+ def get_of_switches(self):
+ """"
+ Obtain a a list of switches or DPID detected by this controller
+ :return: list length, and a list where each element a tuple pair (DPID, IP address), text_error: if fails
+ """
+ raise OpenflowConnNotImplemented("Should have implemented this")
+
+ def obtain_port_correspondence(self):
+ """
+ Obtain the correspondence between physical and openflow port names
+ :return: dictionary: with physical name as key, openflow name as value, error_text: if fails
+ """
+ raise OpenflowConnNotImplemented("Should have implemented this")
+
+ def get_of_rules(self, translate_of_ports=True):
+ """
+ Obtain the rules inserted at openflow controller
+ :param translate_of_ports: if True it translates ports from openflow index to physical switch name
+ :return: list where each item is a dictionary with the following content:
+ priority: rule priority
+ priority: rule priority
+ name: rule name (present also as the master dict key)
+ ingress_port: match input port of the rule
+ dst_mac: match destination mac address of the rule, can be missing or None if not apply
+ vlan_id: match vlan tag of the rule, can be missing or None if not apply
+ actions: list of actions, composed by a pair tuples:
+ (vlan, None/int): for stripping/setting a vlan tag
+ (out, port): send to this port
+ switch: DPID, all
+ text_error if fails
+ """
+ raise OpenflowConnNotImplemented("Should have implemented this")
+
+ def del_flow(self, flow_name):
+ """
+ Delete all existing rules
+ :param flow_name: flow_name, this is the rule name
+ :return: None if ok, text_error if fails
+ """
+ raise OpenflowConnNotImplemented("Should have implemented this")
+
+ def new_flow(self, data):
+ """
+ Insert a new static rule
+ :param data: dictionary with the following content:
+ priority: rule priority
+ name: rule name
+ ingress_port: match input port of the rule
+ dst_mac: match destination mac address of the rule, missing or None if not apply
+ vlan_id: match vlan tag of the rule, missing or None if not apply
+ actions: list of actions, composed by a pair tuples with these posibilities:
+ ('vlan', None/int): for stripping/setting a vlan tag
+ ('out', port): send to this port
+ :return: None if ok, text_error if fails
+ """
+ raise OpenflowConnNotImplemented("Should have implemented this")
+
+ def clear_all_flows(self):
+ """"
+ Delete all existing rules
+ :return: None if ok, text_error if fails
+ """
+ raise OpenflowConnNotImplemented("Should have implemented this")
+
+
+class SdnConnectorOpenFlow(SdnConnectorBase):
+ """
+ This class is the base engine of SDN plugins base on openflow rules
+ """
+ flow_fields = ('priority', 'vlan', 'ingress_port', 'actions', 'dst_mac', 'src_mac', 'net_id')
+
+ def __init__(self, wim, wim_account, config=None, logger=None, of_connector=None):
+ self.logger = logger or logging.getLogger('openmano.sdnconn.openflow')
+ self.of_connector = of_connector
+ self.of_controller_nets_with_same_vlan = config.get("of_controller_nets_with_same_vlan", False)
+
+ def check_credentials(self):
+ try:
+ self.openflow_conn.obtain_port_correspondence()
+ except OpenflowConnException as e:
+ raise SdnConnectorError(e, http_code=e.http_code)
+
+ def get_connectivity_service_status(self, service_uuid, conn_info=None):
+ conn_info = conn_info or {}
+ return {
+ "sdn_status": conn_info.get("status", "ERROR"),
+ "error_msg": conn_info.get("error_msg", "Variable conn_info not provided"),
+ }
+ # TODO check rules connectirng to of_connector
+
+ def create_connectivity_service(self, service_type, connection_points, **kwargs):
+ net_id = str(uuid4())
+ ports = []
+ for cp in connection_points:
+ port = {
+ "uuid": cp["service_endpoint_id"],
+ "vlan": cp.get("service_endpoint_encapsulation_info", {}).get("vlan"),
+ "mac": cp.get("service_endpoint_encapsulation_info", {}).get("mac"),
+ "switch_port": cp.get("service_endpoint_encapsulation_info", {}).get("switch_port"),
+ }
+ ports.append(port)
+ try:
+ created_items = self._set_openflow_rules(service_type, net_id, ports, created_items=None)
+ return net_id, created_items
+ except (SdnConnectorError, OpenflowConnException) as e:
+ raise SdnConnectorError(e, http_code=e.http_code)
+
+ def delete_connectivity_service(self, service_uuid, conn_info=None):
+ try:
+ service_type = "ELAN"
+ ports = []
+ self._set_openflow_rules(service_type, service_uuid, ports, created_items=conn_info)
+ return None
+ except (SdnConnectorError, OpenflowConnException) as e:
+ raise SdnConnectorError(e, http_code=e.http_code)
+
+ def edit_connectivity_service(self, service_uuid, conn_info=None, connection_points=None, **kwargs):
+ ports = []
+ for cp in connection_points:
+ port = {
+ "uuid": cp["service_endpoint_id"],
+ "vlan": cp.get("service_endpoint_encapsulation_info", {}).get("vlan"),
+ "mac": cp.get("service_endpoint_encapsulation_info", {}).get("mac"),
+ "switch_port": cp.get("service_endpoint_encapsulation_info", {}).get("switch_port"),
+ }
+ ports.append(port)
+ service_type = "ELAN" # TODO. Store at conn_info for later use
+ try:
+ created_items = self._set_openflow_rules(service_type, service_uuid, ports, created_items=conn_info)
+ return created_items
+ except (SdnConnectorError, OpenflowConnException) as e:
+ raise SdnConnectorError(e, http_code=e.http_code)
+
+ def clear_all_connectivity_services(self):
+ """Delete all WAN Links corresponding to a WIM"""
+ pass
+
+ def get_all_active_connectivity_services(self):
+ """Provide information about all active connections provisioned by a
+ WIM
+ """
+ pass
+
+ def _set_openflow_rules(self, net_type, net_id, ports, created_items=None):
+ ifaces_nb = len(ports)
+ if not created_items:
+ created_items = {"status": None, "error_msg": None, "installed_rules_ids": []}
+ rules_to_delete = created_items.get("installed_rules_ids") or []
+ new_installed_rules_ids = []
+ error_list = []
+
+ try:
+ step = "Checking ports and network type compatibility"
+ if ifaces_nb < 2:
+ pass
+ elif net_type == 'ELINE':
+ if ifaces_nb > 2:
+ raise SdnConnectorError("'ELINE' type network cannot connect {} interfaces, only 2".format(
+ ifaces_nb))
+ elif net_type == 'ELAN':
+ if ifaces_nb > 2 and self.of_controller_nets_with_same_vlan:
+ # check all ports are VLAN (tagged) or none
+ vlan_tags = []
+ for port in ports:
+ if port["vlan"] not in vlan_tags:
+ vlan_tags.append(port["vlan"])
+ if len(vlan_tags) > 1:
+ raise SdnConnectorError("This pluging cannot connect ports with diferent VLAN tags when flag "
+ "'of_controller_nets_with_same_vlan' is active")
+ else:
+ raise SdnConnectorError('Only ELINE or ELAN network types are supported for openflow')
+
+ # Get the existing flows at openflow controller
+ step = "Getting installed openflow rules"
+ existing_flows = self.of_connector.get_of_rules()
+ existing_flows_ids = [flow["name"] for flow in existing_flows]
+
+ # calculate new flows to be inserted
+ step = "Compute needed openflow rules"
+ new_flows = self._compute_net_flows(net_id, ports)
+
+ name_index = 0
+ for flow in new_flows:
+ # 1 check if an equal flow is already present
+ index = self._check_flow_already_present(flow, existing_flows)
+ if index >= 0:
+ flow_id = existing_flows[index]["name"]
+ self.logger.debug("Skipping already present flow %s", str(flow))
+ else:
+ # 2 look for a non used name
+ flow_name = flow["net_id"] + "." + str(name_index)
+ while flow_name in existing_flows_ids:
+ name_index += 1
+ flow_name = flow["net_id"] + "." + str(name_index)
+ flow['name'] = flow_name
+ # 3 insert at openflow
+ try:
+ self.of_connector.new_flow(flow)
+ flow_id = flow["name"]
+ existing_flows_ids.append(flow_id)
+ except OpenflowConnException as e:
+ flow_id = None
+ error_list.append("Cannot create rule for ingress_port={}, dst_mac={}: {}"
+ .format(flow["ingress_port"], flow["dst_mac"], e))
+
+ # 4 insert at database
+ if flow_id:
+ new_installed_rules_ids.append(flow_id)
+ if flow_id in rules_to_delete:
+ rules_to_delete.remove(flow_id)
+
+ # delete not needed old flows from openflow
+ for flow_id in rules_to_delete:
+ # Delete flow
+ try:
+ self.of_connector.del_flow(flow_id)
+ except OpenflowConnNotFoundException:
+ pass
+ except OpenflowConnException as e:
+ error_text = "Cannot remove rule '{}': {}".format(flow['name'], e)
+ error_list.append(error_text)
+ self.logger.error(error_text)
+ created_items["installed_rules_ids"] = new_installed_rules_ids
+ if error_list:
+ created_items["error_msg"] = ";".join(error_list)[:1000]
+ created_items["error_msg"] = "ERROR"
+ else:
+ created_items["error_msg"] = None
+ created_items["status"] = "ACTIVE"
+ return created_items
+ except (SdnConnectorError, OpenflowConnException) as e:
+ raise SdnConnectorError("Error while {}: {}".format(step, e)) from e
+ except Exception as e:
+ self.logger.critical(error_text, exc_info=True)
+ raise SdnConnectorError("Error while {}: {}".format(step, e))
+
+ def _compute_net_flows(self, net_id, ports):
+ new_flows = []
+ new_broadcast_flows = {}
+ nb_ports = len(ports)
+
+ # Check switch_port information is right
+ for port in ports:
+ nb_ports += 1
+ if str(port['switch_port']) not in self.of_connector.pp2ofi:
+ raise SdnConnectorError("switch port name '{}' is not valid for the openflow controller".
+ format(port['switch_port']))
+ priority = 1000 # 1100
+
+ for src_port in ports:
+ # if src_port.get("groups")
+ vlan_in = src_port['vlan']
+
+ # BROADCAST:
+ broadcast_key = src_port['uuid'] + "." + str(vlan_in)
+ if broadcast_key in new_broadcast_flows:
+ flow_broadcast = new_broadcast_flows[broadcast_key]
+ else:
+ flow_broadcast = {'priority': priority,
+ 'net_id': net_id,
+ 'dst_mac': 'ff:ff:ff:ff:ff:ff',
+ "ingress_port": str(src_port['switch_port']),
+ 'vlan_id': vlan_in,
+ 'actions': []
+ }
+ new_broadcast_flows[broadcast_key] = flow_broadcast
+ if vlan_in is not None:
+ flow_broadcast['vlan_id'] = str(vlan_in)
+
+ for dst_port in ports:
+ vlan_out = dst_port['vlan']
+ if src_port['switch_port'] == dst_port['switch_port'] and vlan_in == vlan_out:
+ continue
+ flow = {
+ "priority": priority,
+ 'net_id': net_id,
+ "ingress_port": str(src_port['switch_port']),
+ 'vlan_id': vlan_in,
+ 'actions': []
+ }
+ # allow that one port have no mac
+ if dst_port['mac'] is None or nb_ports == 2: # point to point or nets with 2 elements
+ flow['priority'] = priority - 5 # less priority
+ else:
+ flow['dst_mac'] = str(dst_port['mac'])
+
+ if vlan_out is None:
+ if vlan_in:
+ flow['actions'].append(('vlan', None))
+ else:
+ flow['actions'].append(('vlan', vlan_out))
+ flow['actions'].append(('out', str(dst_port['switch_port'])))
+
+ if self._check_flow_already_present(flow, new_flows) >= 0:
+ self.logger.debug("Skipping repeated flow '%s'", str(flow))
+ continue
+
+ new_flows.append(flow)
+
+ # BROADCAST:
+ if nb_ports <= 2: # point to multipoint or nets with more than 2 elements
+ continue
+ out = (vlan_out, str(dst_port['switch_port']))
+ if out not in flow_broadcast['actions']:
+ flow_broadcast['actions'].append(out)
+
+ # BROADCAST
+ for flow_broadcast in new_broadcast_flows.values():
+ if len(flow_broadcast['actions']) == 0:
+ continue # nothing to do, skip
+ flow_broadcast['actions'].sort()
+ if 'vlan_id' in flow_broadcast:
+ previous_vlan = 0 # indicates that a packet contains a vlan, and the vlan
+ else:
+ previous_vlan = None
+ final_actions = []
+ action_number = 0
+ for action in flow_broadcast['actions']:
+ if action[0] != previous_vlan:
+ final_actions.append(('vlan', action[0]))
+ previous_vlan = action[0]
+ if self.of_controller_nets_with_same_vlan and action_number:
+ raise SdnConnectorError("Cannot interconnect different vlan tags in a network when flag "
+ "'of_controller_nets_with_same_vlan' is True.")
+ action_number += 1
+ final_actions.append(('out', action[1]))
+ flow_broadcast['actions'] = final_actions
+
+ if self._check_flow_already_present(flow_broadcast, new_flows) >= 0:
+ self.logger.debug("Skipping repeated flow '%s'", str(flow_broadcast))
+ continue
+
+ new_flows.append(flow_broadcast)
+
+ # UNIFY openflow rules with the same input port and vlan and the same output actions
+ # These flows differ at the dst_mac; and they are unified by not filtering by dst_mac
+ # this can happen if there is only two ports. It is converted to a point to point connection
+ flow_dict = {} # use as key vlan_id+ingress_port and as value the list of flows matching these values
+ for flow in new_flows:
+ key = str(flow.get("vlan_id")) + ":" + flow["ingress_port"]
+ if key in flow_dict:
+ flow_dict[key].append(flow)
+ else:
+ flow_dict[key] = [flow]
+ new_flows2 = []
+ for flow_list in flow_dict.values():
+ convert2ptp = False
+ if len(flow_list) >= 2:
+ convert2ptp = True
+ for f in flow_list:
+ if f['actions'] != flow_list[0]['actions']:
+ convert2ptp = False
+ break
+ if convert2ptp: # add only one unified rule without dst_mac
+ self.logger.debug("Convert flow rules to NON mac dst_address " + str(flow_list))
+ flow_list[0].pop('dst_mac')
+ flow_list[0]["priority"] -= 5
+ new_flows2.append(flow_list[0])
+ else: # add all the rules
+ new_flows2 += flow_list
+ return new_flows2
+
+ def _check_flow_already_present(self, new_flow, flow_list):
+ '''check if the same flow is already present in the flow list
+ The flow is repeated if all the fields, apart from name, are equal
+ Return the index of matching flow, -1 if not match'''
+ for index, flow in enumerate(flow_list):
+ for f in self.flow_fields:
+ if flow.get(f) != new_flow.get(f):
+ break
+ else:
+ return index
+ return -1
from hashlib import sha1
from itertools import groupby
from operator import itemgetter
-from sys import exc_info
+# from sys import exc_info
# from time import time
from uuid import uuid1 as generate_uuid
def get_wim_accounts(self, **kwargs):
"""Retrieve all the accounts from the database"""
kwargs.setdefault('postprocess', _postprocess_wim_account)
+ kwargs.setdefault('WHERE', {"sdn": "false"})
return self.query(FROM=_WIM_ACCOUNT_JOIN, **kwargs)
def get_wim_account(self, uuid_or_name, **kwargs):
self.logger.exception(old_exception)
ex = InvalidParameters(
"The mapping must contain the "
- "'pop_switch_dpid', 'pop_switch_port', and "
+ "'device_id', 'device_interface_id', and "
"wan_service_mapping_info: "
"('wan_switch_dpid' and 'wan_switch_port') or "
"'wan_service_endpoint_id}'")
kwargs.setdefault('error_if_none', False)
criteria_fields = ('uuid', 'instance_scenario_id', 'sce_net_id',
- 'wim_id', 'wim_account_id')
+ 'wim_id', 'wim_account_id', 'sdn')
criteria = remove_none_items(filter_dict_keys(kwargs, criteria_fields))
kwargs = filter_out_dict_keys(kwargs, criteria_fields)
"items": {
"type": "object",
"properties": {
- "pop_switch_dpid": dpid_type,
- "pop_switch_port": port_type,
- "wan_service_endpoint_id": name_schema,
- "wan_service_mapping_info": {
+ "device_id": nameshort_schema,
+ "device_interface_id": nameshort_schema,
+ "service_endpoint_id": name_schema,
+ "switch_dpid": dpid_type,
+ "switch_port": port_type,
+ "service_mapping_info": {
"type": "object",
"properties": {
"mapping_type": name_schema,
- "wan_switch_dpid": dpid_type,
- "wan_switch_port": port_type
},
"additionalProperties": True,
"required": ["mapping_type"]
}
},
- "anyOf": [
- {
- "required": [
- "pop_switch_dpid",
- "pop_switch_port",
- "wan_service_endpoint_id"
- ]
- },
- {
- "required": [
- "pop_switch_dpid",
- "pop_switch_port",
- "wan_service_mapping_info"
- ]
- }
- ]
+ "required": ["service_endpoint_id"]
}
}
},
"description": description_schema,
"type": {
"type": "string",
- "enum": ["tapi", "onos", "odl", "dynpac", "fake"]
+ # "enum": ["tapi", "onos", "odl", "dynpac", "fake"]
},
"wim_url": description_schema,
"config": {
--- /dev/null
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+"""The SDN connector is responsible for establishing both wide area network connectivity (WIM)
+and intranet SDN connectivity.
+
+It receives information from ports to be connected .
+"""
+import logging
+
+from ..http_tools.errors import HttpMappedError
+
+
+class SdnConnectorError(HttpMappedError):
+ """Base Exception for all connector related errors
+ provide the parameter 'http_code' (int) with the error code:
+ Bad_Request = 400
+ Unauthorized = 401 (e.g. credentials are not valid)
+ Not_Found = 404 (e.g. try to edit or delete a non existing connectivity service)
+ Forbidden = 403
+ Method_Not_Allowed = 405
+ Not_Acceptable = 406
+ Request_Timeout = 408 (e.g timeout reaching server, or cannot reach the server)
+ Conflict = 409
+ Service_Unavailable = 503
+ Internal_Server_Error = 500
+ """
+
+
+class SdnConnectorBase(object):
+ """Abstract base class for all the SDN connectors
+
+ Arguments:
+ wim (dict): WIM record, as stored in the database
+ wim_account (dict): WIM account record, as stored in the database
+ config
+ The arguments of the constructor are converted to object attributes.
+ An extra property, ``service_endpoint_mapping`` is created from ``config``.
+ """
+ def __init__(self, wim, wim_account, config=None, logger=None):
+ """
+
+ :param wim: (dict). Contains among others 'wim_url'
+ :param wim_account: (dict). Contains among others 'uuid' (internal id), 'name',
+ 'sdn' (True if is intended for SDN-assist or False if intended for WIM), 'user', 'password'.
+ :param config: (dict or None): Particular information of plugin. These keys if present have a common meaning:
+ 'mapping_not_needed': (bool) False by default or if missing, indicates that mapping is not needed.
+ 'service_endpoint_mapping': (list) provides the internal endpoint mapping. The meaning is:
+ KEY meaning for WIM meaning for SDN assist
+ -------- -------- --------
+ device_id pop_switch_dpid compute_id
+ device_interface_id pop_switch_port compute_pci_address
+ service_endpoint_id wan_service_endpoint_id SDN_service_endpoint_id
+ service_mapping_info wan_service_mapping_info SDN_service_mapping_info
+ contains extra information if needed. Text in Yaml format
+ switch_dpid wan_switch_dpid SDN_switch_dpid
+ switch_port wan_switch_port SDN_switch_port
+ datacenter_id vim_account vim_account
+ id: (internal, do not use)
+ wim_id: (internal, do not use)
+ :param logger (logging.Logger): optional logger object. If none is passed 'openmano.sdn.sdnconn' is used.
+ """
+ self.logger = logger or logging.getLogger('openmano.sdn.sdnconn')
+
+ self.wim = wim
+ self.wim_account = wim_account
+ self.config = config or {}
+ self.service_endpoint_mapping = (
+ self.config.get('service_endpoint_mapping', []))
+
+ def check_credentials(self):
+ """Check if the connector itself can access the SDN/WIM with the provided url (wim.wim_url),
+ user (wim_account.user), and password (wim_account.password)
+
+ Raises:
+ SdnConnectorError: Issues regarding authorization, access to
+ external URLs, etc are detected.
+ """
+ raise NotImplementedError
+
+ def get_connectivity_service_status(self, service_uuid, conn_info=None):
+ """Monitor the status of the connectivity service established
+
+ Arguments:
+ service_uuid (str): UUID of the connectivity service
+ conn_info (dict or None): Information returned by the connector
+ during the service creation/edition and subsequently stored in
+ the database.
+
+ Returns:
+ dict: JSON/YAML-serializable dict that contains a mandatory key
+ ``sdn_status`` associated with one of the following values::
+
+ {'sdn_status': 'ACTIVE'}
+ # The service is up and running.
+
+ {'sdn_status': 'INACTIVE'}
+ # The service was created, but the connector
+ # cannot determine yet if connectivity exists
+ # (ideally, the caller needs to wait and check again).
+
+ {'sdn_status': 'DOWN'}
+ # Connection was previously established,
+ # but an error/failure was detected.
+
+ {'sdn_status': 'ERROR'}
+ # An error occurred when trying to create the service/
+ # establish the connectivity.
+
+ {'sdn_status': 'BUILD'}
+ # Still trying to create the service, the caller
+ # needs to wait and check again.
+
+ Additionally ``error_msg``(**str**) and ``sdn_info``(**dict**)
+ keys can be used to provide additional status explanation or
+ new information available for the connectivity service.
+ """
+ raise NotImplementedError
+
+ def create_connectivity_service(self, service_type, connection_points, **kwargs):
+ """
+ Stablish SDN/WAN connectivity between the endpoints
+ :param service_type: (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2), ``L3``.
+ :param connection_points: (list): each point corresponds to
+ an entry point to be connected. For WIM: from the DC to the transport network.
+ For SDN: Compute/PCI to the transport network. One
+ connection point serves to identify the specific access and
+ some other service parameters, such as encapsulation type.
+ Each item of the list is a dict with:
+ "service_endpoint_id": (str)(uuid) Same meaning that for 'service_endpoint_mapping' (see __init__)
+ In case the config attribute mapping_not_needed is True, this value is not relevant. In this case
+ it will contain the string "device_id:device_interface_id"
+ "service_endpoint_encapsulation_type": None, "dot1q", ...
+ "service_endpoint_encapsulation_info": (dict) with:
+ "vlan": ..., (int, present if encapsulation is dot1q)
+ "vni": ... (int, present if encapsulation is vxlan),
+ "peers": [(ipv4_1), (ipv4_2)] (present if encapsulation is vxlan)
+ "mac": ...
+ "device_id": ..., same meaning that for 'service_endpoint_mapping' (see __init__)
+ "device_interface_id": same meaning that for 'service_endpoint_mapping' (see __init__)
+ "switch_dpid": ..., present if mapping has been found for this device_id,device_interface_id
+ "swith_port": ... present if mapping has been found for this device_id,device_interface_id
+ "service_mapping_info": present if mapping has been found for this device_id,device_interface_id
+ :param kwargs: For future versions:
+ bandwidth (int): value in kilobytes
+ latency (int): value in milliseconds
+ Other QoS might be passed as keyword arguments.
+ :return: tuple: ``(service_id, conn_info)`` containing:
+ - *service_uuid* (str): UUID of the established connectivity service
+ - *conn_info* (dict or None): Information to be stored at the database (or ``None``).
+ This information will be provided to the :meth:`~.edit_connectivity_service` and :obj:`~.delete`.
+ **MUST** be JSON/YAML-serializable (plain data structures).
+ :raises: SdnConnectorException: In case of error. Nothing should be created in this case.
+ Provide the parameter http_code
+ """
+ raise NotImplementedError
+
+ def delete_connectivity_service(self, service_uuid, conn_info=None):
+ """
+ Disconnect multi-site endpoints previously connected
+
+ :param service_uuid: The one returned by create_connectivity_service
+ :param conn_info: The one returned by last call to 'create_connectivity_service' or 'edit_connectivity_service'
+ if they do not return None
+ :return: None
+ :raises: SdnConnectorException: In case of error. The parameter http_code must be filled
+ """
+ raise NotImplementedError
+
+ def edit_connectivity_service(self, service_uuid, conn_info=None, connection_points=None, **kwargs):
+ """ Change an existing connectivity service.
+
+ This method's arguments and return value follow the same convention as
+ :meth:`~.create_connectivity_service`.
+
+ :param service_uuid: UUID of the connectivity service.
+ :param conn_info: (dict or None): Information previously returned by last call to create_connectivity_service
+ or edit_connectivity_service
+ :param connection_points: (list): If provided, the old list of connection points will be replaced.
+ :param kwargs: Same meaning that create_connectivity_service
+ :return: dict or None: Information to be updated and stored at the database.
+ When ``None`` is returned, no information should be changed.
+ When an empty dict is returned, the database record will be deleted.
+ **MUST** be JSON/YAML-serializable (plain data structures).
+ Raises:
+ SdnConnectorException: In case of error.
+ """
+
+ def clear_all_connectivity_services(self):
+ """Delete all WAN Links in a WIM.
+
+ This method is intended for debugging only, and should delete all the
+ connections controlled by the WIM/SDN, not only the connections that
+ a specific RO is aware of.
+
+ Raises:
+ SdnConnectorException: In case of error.
+ """
+ raise NotImplementedError
+
+ def get_all_active_connectivity_services(self):
+ """Provide information about all active connections provisioned by a
+ WIM.
+
+ Raises:
+ SdnConnectorException: In case of error.
+ """
+ raise NotImplementedError
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
'encapsulation_type': 'vlan'},
'vim_external_port':
dict(zip(('switch', 'port'),
- _datacenter_to_switch_port(identifier)))}
+ _datacenter_to_switch_port(identifier)))}
]})
return {'uuid': uuid('dc%d' % identifier),
return {'wim_id': uuid('wim%d' % wim),
'datacenter_id': uuid('dc%d' % datacenter),
- 'pop_switch_dpid': pop_dpid,
- 'pop_switch_port': (str(pop_port) if pop_port else
- str(int(datacenter) + int(wim) + 1)),
+ 'device_id': pop_dpid,
+ 'device_interface_id': (str(pop_port) if pop_port else
+ str(int(datacenter) + int(wim) + 1)),
# ^ Datacenter router have one port managed by each WIM
- 'wan_service_endpoint_id': id_,
+ 'service_endpoint_id': id_,
# ^ WIM managed router have one port connected to each DC
- 'wan_service_mapping_info': json.dumps(mapping_info)}
+ 'service_mapping_info': json.dumps(mapping_info)}
def processed_port_mapping(wim, datacenter,
'wim_id': uuid('wim%d' % wim),
'datacenter_id': uuid('dc%d' % datacenter),
'pop_wan_mappings': [
- {'pop_switch_dpid': pop_dpid,
- 'pop_switch_port': wim + 1 + i,
- 'wan_service_endpoint_id':
+ {'device_id': pop_dpid,
+ 'device_interface_id': wim + 1 + i,
+ 'service_endpoint_id':
sha1('dpid-port|%s|%d' % (wan_dpid, datacenter + 1 + i)),
- 'wan_service_mapping_info': {
+ 'service_mapping_info': {
'mapping_type': 'dpid-port',
'wan_switch_dpid': wan_dpid,
'wan_switch_port': datacenter + 1 + i}}
##
# pylint: disable=E1101
-#from __future__ import unicode_literals, print_function
+# from __future__ import unicode_literals, print_function
import json
import unittest
)
from ..persistence import WimPersistence, preprocess_record
from ..wan_link_actions import WanLinkCreate, WanLinkDelete
-from ..wimconn import WimConnectorError
+from ..sdnconn import SdnConnectorError
class TestActionsWithDb(TestCaseWithDatabasePerTest):
# If the connector raises an error
with patch.object(self.connector, 'create_connectivity_service',
- MagicMock(side_effect=WimConnectorError('foobar'))):
+ MagicMock(side_effect=SdnConnectorError('foobar'))):
# When we try to process a CREATE action that refers to the same
# instance_scenario_id and sce_net_id
action.process(self.connector, self.persist, self.ovim)
port_mappings = next(r['wim_port_mappings']
for r in db_state if 'wim_port_mappings' in r)
for mapping in port_mappings:
- mapping['pop_switch_dpid'] = switch
- mapping['pop_switch_port'] = port
+ mapping['device_id'] = switch
+ mapping['device_interface_id'] = port
instance_action = eg.instance_action(action_id='ACTION-000')
instance_nets = eg.instance_nets(num_datacenters=2, num_links=1,
connector_patch = patch.object(
self.connector, 'create_connectivity_service',
- MagicMock(side_effect=WimConnectorError('foobar')))
+ MagicMock(side_effect=SdnConnectorError('foobar')))
# If the connector throws an error
with connector_patch, ovim_patch:
config={'wim_port_mapping': [{
'datacenter_name': 'dc0',
'pop_wan_mappings': [{
- 'pop_switch_dpid': '00:AA:11:BB:22:CC:33:DD',
- 'pop_switch_port': 1,
- 'wan_service_mapping_info': {
+ 'device_id': '00:AA:11:BB:22:CC:33:DD',
+ 'device_interface_id': 1,
+ 'service_mapping_info': {
'mapping_type': 'dpid-port',
'wan_switch_dpid': 'BB:BB:BB:BB:BB:BB:BB:0A',
'wan_switch_port': 1
mappings = response.json['wim']['config']['wim_port_mapping']
self.assertEqual(len(mappings), 1)
self.assertEqual(
- mappings[0]['pop_wan_mappings'][0]['pop_switch_dpid'],
+ mappings[0]['pop_wan_mappings'][0]['device_id'],
'00:AA:11:BB:22:CC:33:DD')
def test_delete_wim(self):
config={'wim_port_mapping': [{
'datacenter_name': 'dc0',
'pop_wan_mappings': [{
- 'pop_switch_dpid': 'AA:AA:AA:AA:AA:AA:AA:01',
- 'pop_switch_port': 1,
- 'wan_service_mapping_info': {
+ 'device_id': 'AA:AA:AA:AA:AA:AA:AA:01',
+ 'device_interface_id': 1,
+ 'service_mapping_info': {
'mapping_type': 'dpid-port',
'wan_switch_dpid': 'BB:BB:BB:BB:BB:BB:BB:01',
'wan_switch_port': 1
{'wim_port_mapping': [{
'datacenter_name': 'dc888',
'pop_wan_mappings': [
- {'pop_switch_dpid': 'AA:AA:AA:AA:AA:AA:AA:AA',
- 'pop_switch_port': 1,
- 'wan_service_mapping_info': {
+ {'device_id': 'AA:AA:AA:AA:AA:AA:AA:AA',
+ 'device_interface_id': 1,
+ 'service_mapping_info': {
'mapping_type': 'dpid-port',
'wan_switch_dpid': 'BB:BB:BB:BB:BB:BB:BB:BB',
'wan_switch_port': 1
wim = eg.wim(0)
account = eg.wim_account(0, 0)
account['wim'] = wim
- self.thread = WimThread(self.persist, account)
+ self.thread = WimThread(self.persist, {}, account)
self.thread.connector = MagicMock()
def assertTasksEqual(self, left, right):
account = eg.wim_account(0, 0)
account['wim'] = wim
self.persist = MagicMock()
- self.thread = WimThread(self.persist, account)
+ self.thread = WimThread(self.persist, {}, account)
self.thread.connector = MagicMock()
super(TestWimThread, self).setUp()
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
# This tox file allows the devs to run unit tests only for this subpackage.
# In order to do so, cd into the directory and run `tox`
# pylint: disable=E1101,E0203,W0201
import json
from pprint import pformat
-from sys import exc_info
+# from sys import exc_info
from time import time
from ..utils import filter_dict_keys as filter_keys
NoRecordFound,
NoExternalPortFound
)
-from .wimconn import WimConnectorError
+from .sdnconn import SdnConnectorError
INSTANCE_NET_STATUS_ERROR = ('DOWN', 'ERROR', 'VIM_ERROR',
'DELETED', 'SCHEDULED_DELETION')
Infrastructure Manager system
persistence: abstraction layer for the database
"""
- fields = ('wim_status', 'wim_info', 'error_msg')
+ fields = ('sdn_status', 'sdn_info', 'error_msg')
result = dict.fromkeys(fields)
try:
result.update(
connector
.get_connectivity_service_status(self.wim_internal_id))
- except WimConnectorError as ex:
+ except SdnConnectorError as ex:
self.logger.exception(ex)
- result.update(wim_status='WIM_ERROR', error_msg=truncate(ex))
+ result.update(sdn_status='WIM_ERROR', error_msg=truncate(ex))
result = filter_keys(result, fields)
action_changes = remove_none_items({
'extra': merge_dicts(self.extra, result),
- 'status': 'BUILD' if result['wim_status'] == 'BUILD' else None,
+ 'status': 'BUILD' if result['sdn_status'] == 'BUILD' else None,
'error_msg': result['error_msg'],
'modified_at': time()})
- link_changes = merge_dicts(result, status=result.pop('wim_status'))
- # ^ Rename field: wim_status => status
+ link_changes = merge_dicts(result, status=result.pop('sdn_status'))
+ # ^ Rename field: sdn_status => status
persistence.update_wan_link(self.item_id,
remove_none_items(link_changes))
Returns:
dict: Record representing the wan_port_mapping associated to the
given instance_net. The expected fields are:
- **wim_id**, **datacenter_id**, **pop_switch_dpid** (the local
- network is expected to be connected at this switch),
- **pop_switch_port**, **wan_service_endpoint_id**,
- **wan_service_mapping_info**.
+ **wim_id**, **datacenter_id**, **device_id** (the local
+ network is expected to be connected at this switch dpid),
+ **device_interface_id**, **service_endpoint_id**,
+ **service_mapping_info**.
"""
# First, we need to find a route from the datacenter to the outside
# world. For that, we can use the rules given in the datacenter
criteria = {
'wim_id': wim_account['wim_id'],
- 'pop_switch_dpid': external_port[0],
- 'pop_switch_port': external_port[1],
+ 'device_id': external_port[0],
+ 'device_interface_id': external_port[1],
'datacenter_id': datacenter_id}
wan_port_mapping = persistence.query_one(
# It is important to return encapsulation information if present
mapping = merge_dicts(
- wan_port_mapping.get('wan_service_mapping_info'),
+ wan_port_mapping.get('service_mapping_info'),
filter_keys(vim_info, ('encapsulation_type', 'encapsulation_id'))
)
- return merge_dicts(wan_port_mapping, wan_service_mapping_info=mapping)
+ return merge_dicts(wan_port_mapping, service_mapping_info=mapping)
def _get_port_sdn(self, ovim, instance_net):
criteria = {'net_id': instance_net['sdn_net_id']}
@staticmethod
def _derive_connection_point(wan_info):
- point = {'service_endpoint_id': wan_info['wan_service_endpoint_id']}
+ point = {'service_endpoint_id': wan_info['service_endpoint_id']}
# TODO: Cover other scenarios, e.g. VXLAN.
- details = wan_info.get('wan_service_mapping_info', {})
+ details = wan_info.get('service_mapping_info', {})
if details.get('encapsulation_type') == 'vlan':
point['service_endpoint_encapsulation_type'] = 'dot1q'
point['service_endpoint_encapsulation_info'] = {
"""Store plugin/connector specific information in the database"""
persistence.update_wan_link(self.item_id, {
'wim_internal_id': service_uuid,
- 'wim_info': {'conn_info': conn_info},
+ 'sdn_info': {'conn_info': conn_info},
'status': 'BUILD'})
def execute(self, connector, persistence, ovim, instance_nets):
connection_points
# TODO: other properties, e.g. bandwidth
)
- except (WimConnectorError, InconsistentState,
+ except (SdnConnectorError, InconsistentState,
NoExternalPortFound) as ex:
self.logger.exception(ex)
return self.fail(
try:
id = self.wim_internal_id
- conn_info = safe_get(wan_link, 'wim_info.conn_info')
+ conn_info = safe_get(wan_link, 'sdn_info.conn_info')
self.logger.debug('Connection Service %s (wan_link: %s):\n%s\n',
id, wan_link['uuid'],
json.dumps(conn_info, indent=4))
result = connector.delete_connectivity_service(id, conn_info)
- except (WimConnectorError, InconsistentState) as ex:
+ except (SdnConnectorError, InconsistentState) as ex:
self.logger.exception(ex)
return self.fail(
persistence,
from functools import partial
from itertools import islice, chain, takewhile
from operator import itemgetter, attrgetter
-from sys import exc_info
+# from sys import exc_info
from time import time, sleep
import queue
UndefinedAction,
)
from .failing_connector import FailingConnector
-from .wimconn import WimConnectorError
-from .wimconn_dynpac import DynpacConnector
+from .sdnconn import SdnConnectorError
from .wimconn_fake import FakeConnector
-from .wimconn_ietfl2vpn import WimconnectorIETFL2VPN
ACTIONS = {
'instance_wim_nets': wan_link_actions.ACTIONS
CONNECTORS = {
# "odl": wimconn_odl.OdlConnector,
- "dynpac": DynpacConnector,
"fake": FakeConnector,
- "tapi": WimconnectorIETFL2VPN,
- # Add extra connectors here
+ # Add extra connectors here not managed via plugins
}
MAX_RECOVERY_TIME = 180
WAITING_TIME = 1 # Wait 1s for taks to arrive, when there are none
- def __init__(self, persistence, wim_account, logger=None, ovim=None):
+ def __init__(self, persistence, plugins, wim_account, logger=None, ovim=None):
"""Init a thread.
Arguments:
persistence: Database abstraction layer
+ plugins: dictionary with the vim/sdn plugins
wim_account: Record containing wim_account, tenant and wim
information.
"""
name = '{}.{}.{}'.format(wim_account['wim']['name'],
wim_account['name'], wim_account['uuid'])
super(WimThread, self).__init__(name=name)
+ self.plugins = plugins
+ if "rosdn_fake" not in self.plugins:
+ self.plugins["rosdn_fake"] = FakeConnector
self.name = name
self.connector = None
mapping = self.persist.query('wim_port_mappings',
WHERE={'wim_id': wim['uuid']},
error_if_none=False)
- return CONNECTORS[wim['type']](wim, account, {
- 'service_endpoint_mapping': mapping or []
- })
+ if wim["type"] in CONNECTORS:
+ return CONNECTORS[wim['type']](wim, account, {'service_endpoint_mapping': mapping or []})
+ else: # load a plugin
+ return self.plugins["rosdn_" + wim["type"]](
+ wim, account, {'service_endpoint_mapping': mapping or []})
except DbBaseException as ex:
error_msg = ('Error when retrieving WIM account ({})\n'
.format(account_id)) + str(ex)
except KeyError as ex:
error_msg = ('Unable to find the WIM connector for WIM ({})\n'
.format(wim['type'])) + str(ex)
- self.logger.error(error_msg, exc_info=True)
- except (WimConnectorError, Exception) as ex:
+ self.logger.error(error_msg)
+ except (SdnConnectorError, Exception) as ex:
# TODO: Remove the Exception class here when the connector class is
# ready
error_msg = ('Error when loading WIM connector for WIM ({})\n'
+++ /dev/null
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-"""The WIM connector is responsible for establishing wide area network
-connectivity.
-
-It receives information from the WimThread/WAN Actions about the endpoints of
-a link that spans across multiple datacenters and stablish a path between them.
-"""
-import logging
-
-from ..http_tools.errors import HttpMappedError
-
-
-class WimConnectorError(HttpMappedError):
- """Base Exception for all connector related errors"""
-
-
-class WimConnector(object):
- """Abstract base class for all the WIM connectors
-
- Arguments:
- wim (dict): WIM record, as stored in the database
- wim_account (dict): WIM account record, as stored in the database
- config (dict): optional persistent information related to an specific
- connector. Inside this dict, a special key,
- ``service_endpoint_mapping`` provides the internal endpoint
- mapping.
- logger (logging.Logger): optional logger object. If none is passed
- ``openmano.wim.wimconn`` is used.
-
- The arguments of the constructor are converted to object attributes.
- An extra property, ``service_endpoint_mapping`` is created from ``config``.
- """
- def __init__(self, wim, wim_account, config=None, logger=None):
- self.logger = logger or logging.getLogger('openmano.wim.wimconn')
-
- self.wim = wim
- self.wim_account = wim_account
- self.config = config or {}
- self.service_endpoint_mapping = (
- config.get('service_endpoint_mapping', []))
-
- def check_credentials(self):
- """Check if the connector itself can access the WIM.
-
- Raises:
- WimConnectorError: Issues regarding authorization, access to
- external URLs, etc are detected.
- """
- raise NotImplementedError
-
- def get_connectivity_service_status(self, service_uuid, conn_info=None):
- """Monitor the status of the connectivity service established
-
- Arguments:
- service_uuid (str): UUID of the connectivity service
- conn_info (dict or None): Information returned by the connector
- during the service creation/edition and subsequently stored in
- the database.
-
- Returns:
- dict: JSON/YAML-serializable dict that contains a mandatory key
- ``wim_status`` associated with one of the following values::
-
- {'wim_status': 'ACTIVE'}
- # The service is up and running.
-
- {'wim_status': 'INACTIVE'}
- # The service was created, but the connector
- # cannot determine yet if connectivity exists
- # (ideally, the caller needs to wait and check again).
-
- {'wim_status': 'DOWN'}
- # Connection was previously established,
- # but an error/failure was detected.
-
- {'wim_status': 'ERROR'}
- # An error occurred when trying to create the service/
- # establish the connectivity.
-
- {'wim_status': 'BUILD'}
- # Still trying to create the service, the caller
- # needs to wait and check again.
-
- Additionally ``error_msg``(**str**) and ``wim_info``(**dict**)
- keys can be used to provide additional status explanation or
- new information available for the connectivity service.
- """
- raise NotImplementedError
-
- def create_connectivity_service(self, service_type, connection_points,
- **kwargs):
- """Stablish WAN connectivity between the endpoints
-
- Arguments:
- service_type (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2),
- ``L3``.
- connection_points (list): each point corresponds to
- an entry point from the DC to the transport network. One
- connection point serves to identify the specific access and
- some other service parameters, such as encapsulation type.
- Represented by a dict as follows::
-
- {
- "service_endpoint_id": ..., (str[uuid])
- "service_endpoint_encapsulation_type": ...,
- (enum: none, dot1q, ...)
- "service_endpoint_encapsulation_info": {
- ... (dict)
- "vlan": ..., (int, present if encapsulation is dot1q)
- "vni": ... (int, present if encapsulation is vxlan),
- "peers": [(ipv4_1), (ipv4_2)]
- (present if encapsulation is vxlan)
- }
- }
-
- The service endpoint ID should be previously informed to the WIM
- engine in the RO when the WIM port mapping is registered.
-
- Keyword Arguments:
- bandwidth (int): value in kilobytes
- latency (int): value in milliseconds
-
- Other QoS might be passed as keyword arguments.
-
- Returns:
- tuple: ``(service_id, conn_info)`` containing:
- - *service_uuid* (str): UUID of the established connectivity
- service
- - *conn_info* (dict or None): Information to be stored at the
- database (or ``None``). This information will be provided to
- the :meth:`~.edit_connectivity_service` and :obj:`~.delete`.
- **MUST** be JSON/YAML-serializable (plain data structures).
-
- Raises:
- WimConnectorException: In case of error.
- """
- raise NotImplementedError
-
- def delete_connectivity_service(self, service_uuid, conn_info=None):
- """Disconnect multi-site endpoints previously connected
-
- This method should receive as arguments both the UUID and the
- connection info dict (respectively), as returned by
- :meth:`~.create_connectivity_service` and
- :meth:`~.edit_connectivity_service`.
-
- Arguments:
- service_uuid (str): UUID of the connectivity service
- conn_info (dict or None): Information returned by the connector
- during the service creation and subsequently stored in the
- database.
-
- Raises:
- WimConnectorException: In case of error.
- """
- raise NotImplementedError
-
- def edit_connectivity_service(self, service_uuid, conn_info=None,
- connection_points=None, **kwargs):
- """Change an existing connectivity service.
-
- This method's arguments and return value follow the same convention as
- :meth:`~.create_connectivity_service`.
-
- Arguments:
- service_uuid (str): UUID of the connectivity service.
- conn_info (dict or None): Information previously stored in the
- database.
- connection_points (list): If provided, the old list of connection
- points will be replaced.
-
- Returns:
- dict or None: Information to be updated and stored at the
- database.
- When ``None`` is returned, no information should be changed.
- When an empty dict is returned, the database record will be
- deleted.
- **MUST** be JSON/YAML-serializable (plain data structures).
-
- Raises:
- WimConnectorException: In case of error.
- """
- raise NotImplementedError
-
- def clear_all_connectivity_services(self):
- """Delete all WAN Links in a WIM.
-
- This method is intended for debugging only, and should delete all the
- connections controlled by the WIM, not only the WIM connections that
- a specific RO is aware of.
-
- Raises:
- WimConnectorException: In case of error.
- """
- raise NotImplementedError
-
- def get_all_active_connectivity_services(self):
- """Provide information about all active connections provisioned by a
- WIM.
-
- Raises:
- WimConnectorException: In case of error.
- """
- raise NotImplementedError
+++ /dev/null
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2018 David GarcÃa, University of the Basque Country
-# Copyright 2018 University of the Basque Country
-# This file is part of openmano
-# All Rights Reserved.
-# Contact information at http://i2t.ehu.eus
-#
-# # Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import requests
-import json
-import logging
-from enum import Enum
-
-from .wimconn import WimConnector, WimConnectorError
-
-
-class WimError(Enum):
- UNREACHABLE = 'Unable to reach the WIM.',
- SERVICE_TYPE_ERROR = 'Unexpected service_type. Only "L2" is accepted.',
- CONNECTION_POINTS_SIZE = \
- 'Unexpected number of connection points: 2 expected.',
- ENCAPSULATION_TYPE = \
- 'Unexpected service_endpoint_encapsulation_type. \
- Only "dotq1" is accepted.',
- BANDWIDTH = 'Unable to get the bandwidth.',
- STATUS = 'Unable to get the status for the service.',
- DELETE = 'Unable to delete service.',
- CLEAR_ALL = 'Unable to clear all the services',
- UNKNOWN_ACTION = 'Unknown action invoked.',
- BACKUP = 'Unable to get the backup parameter.',
- UNSUPPORTED_FEATURE = "Unsupported feature",
- UNAUTHORIZED = "Failed while authenticating"
-
-
-class WimAPIActions(Enum):
- CHECK_CONNECTIVITY = "CHECK_CONNECTIVITY",
- CREATE_SERVICE = "CREATE_SERVICE",
- DELETE_SERVICE = "DELETE_SERVICE",
- CLEAR_ALL = "CLEAR_ALL",
- SERVICE_STATUS = "SERVICE_STATUS",
-
-
-class DynpacConnector(WimConnector):
- __supported_service_types = ["ELINE (L2)", "ELINE"]
- __supported_encapsulation_types = ["dot1q"]
- __WIM_LOGGER = 'openmano.wimconn.dynpac'
- __ENCAPSULATION_TYPE_PARAM = "service_endpoint_encapsulation_type"
- __ENCAPSULATION_INFO_PARAM = "service_endpoint_encapsulation_info"
- __BACKUP_PARAM = "backup"
- __BANDWIDTH_PARAM = "bandwidth"
- __SERVICE_ENDPOINT_PARAM = "service_endpoint_id"
- __WAN_SERVICE_ENDPOINT_PARAM = "wan_service_endpoint_id"
- __WAN_MAPPING_INFO_PARAM = "wan_service_mapping_info"
- __SW_ID_PARAM = "wan_switch_dpid"
- __SW_PORT_PARAM = "wan_switch_port"
- __VLAN_PARAM = "vlan"
-
- # Public functions exposed to the Resource Orchestrator
- def __init__(self, wim, wim_account, config):
- self.logger = logging.getLogger(self.__WIM_LOGGER)
- self.__wim = wim
- self.__wim_account = wim_account
- self.__config = config
- self.__wim_url = self.__wim.get("wim_url")
- self.__user = wim_account.get("user")
- self.__passwd = wim_account.get("passwd")
- self.logger.info("Initialized.")
-
- def create_connectivity_service(self,
- service_type,
- connection_points,
- **kwargs):
- self.__check_service(service_type, connection_points, kwargs)
-
- body = self.__get_body(service_type, connection_points, kwargs)
-
- headers = {'Content-type': 'application/x-www-form-urlencoded'}
- endpoint = "{}/service/create".format(self.__wim_url)
-
- try:
- response = requests.post(endpoint, data=body, headers=headers)
- except requests.exceptions.RequestException as e:
- self.__exception(e.message, http_code=503)
-
- if response.status_code != 200:
- error = json.loads(response.content)
- reason = "Reason: {}. ".format(error.get("code"))
- description = "Description: {}.".format(error.get("description"))
- exception = reason + description
- self.__exception(exception, http_code=response.status_code)
- uuid = response.content
- self.logger.info("Service with uuid {} created.".format(uuid))
- return (uuid, None)
-
- def edit_connectivity_service(self, service_uuid,
- conn_info, connection_points,
- **kwargs):
- self.__exception(WimError.UNSUPPORTED_FEATURE, http_code=501)
-
- def get_connectivity_service_status(self, service_uuid):
- endpoint = "{}/service/status/{}".format(self.__wim_url, service_uuid)
- try:
- response = requests.get(endpoint)
- except requests.exceptions.RequestException as e:
- self.__exception(e.message, http_code=503)
-
- if response.status_code != 200:
- self.__exception(WimError.STATUS, http_code=response.status_code)
- self.logger.info("Status for service with uuid {}: {}"
- .format(service_uuid, response.content))
- return response.content
-
- def delete_connectivity_service(self, service_uuid, conn_info):
- endpoint = "{}/service/delete/{}".format(self.__wim_url, service_uuid)
- try:
- response = requests.delete(endpoint)
- except requests.exceptions.RequestException as e:
- self.__exception(e.message, http_code=503)
- if response.status_code != 200:
- self.__exception(WimError.DELETE, http_code=response.status_code)
-
- self.logger.info("Service with uuid: {} deleted".format(service_uuid))
-
- def clear_all_connectivity_services(self):
- endpoint = "{}/service/clearAll".format(self.__wim_url)
- try:
- response = requests.delete(endpoint)
- http_code = response.status_code
- except requests.exceptions.RequestException as e:
- self.__exception(e.message, http_code=503)
- if http_code != 200:
- self.__exception(WimError.CLEAR_ALL, http_code=http_code)
-
- self.logger.info("{} services deleted".format(response.content))
- return "{} services deleted".format(response.content)
-
- def check_connectivity(self):
- endpoint = "{}/checkConnectivity".format(self.__wim_url)
-
- try:
- response = requests.get(endpoint)
- http_code = response.status_code
- except requests.exceptions.RequestException as e:
- self.__exception(e.message, http_code=503)
-
- if http_code != 200:
- self.__exception(WimError.UNREACHABLE, http_code=http_code)
- self.logger.info("Connectivity checked")
-
- def check_credentials(self):
- endpoint = "{}/checkCredentials".format(self.__wim_url)
- auth = (self.__user, self.__passwd)
-
- try:
- response = requests.get(endpoint, auth=auth)
- http_code = response.status_code
- except requests.exceptions.RequestException as e:
- self.__exception(e.message, http_code=503)
-
- if http_code != 200:
- self.__exception(WimError.UNAUTHORIZED, http_code=http_code)
- self.logger.info("Credentials checked")
-
- # Private functions
- def __exception(self, x, **kwargs):
- http_code = kwargs.get("http_code")
- if hasattr(x, "value"):
- error = x.value
- else:
- error = x
- self.logger.error(error)
- raise WimConnectorError(error, http_code=http_code)
-
- def __check_service(self, service_type, connection_points, kwargs):
- if service_type not in self.__supported_service_types:
- self.__exception(WimError.SERVICE_TYPE_ERROR, http_code=400)
-
- if len(connection_points) != 2:
- self.__exception(WimError.CONNECTION_POINTS_SIZE, http_code=400)
-
- for connection_point in connection_points:
- enc_type = connection_point.get(self.__ENCAPSULATION_TYPE_PARAM)
- if enc_type not in self.__supported_encapsulation_types:
- self.__exception(WimError.ENCAPSULATION_TYPE, http_code=400)
-
- # Commented out for as long as parameter isn't implemented
- # bandwidth = kwargs.get(self.__BANDWIDTH_PARAM)
- # if not isinstance(bandwidth, int):
- # self.__exception(WimError.BANDWIDTH, http_code=400)
-
- # Commented out for as long as parameter isn't implemented
- # backup = kwargs.get(self.__BACKUP_PARAM)
- # if not isinstance(backup, bool):
- # self.__exception(WimError.BACKUP, http_code=400)
-
- def __get_body(self, service_type, connection_points, kwargs):
- port_mapping = self.__config.get("service_endpoint_mapping")
- selected_ports = []
- for connection_point in connection_points:
- endpoint_id = connection_point.get(self.__SERVICE_ENDPOINT_PARAM)
- port = filter(lambda x: x.get(self.__WAN_SERVICE_ENDPOINT_PARAM) == endpoint_id, port_mapping)[0]
- port_info = port.get(self.__WAN_MAPPING_INFO_PARAM)
- selected_ports.append(port_info)
- if service_type == "ELINE (L2)" or service_type == "ELINE":
- service_type = "L2"
- body = {
- "connection_points": [{
- "wan_switch_dpid": selected_ports[0].get(self.__SW_ID_PARAM),
- "wan_switch_port": selected_ports[0].get(self.__SW_PORT_PARAM),
- "wan_vlan": connection_points[0].get(self.__ENCAPSULATION_INFO_PARAM).get(self.__VLAN_PARAM)
- }, {
- "wan_switch_dpid": selected_ports[1].get(self.__SW_ID_PARAM),
- "wan_switch_port": selected_ports[1].get(self.__SW_PORT_PARAM),
- "wan_vlan": connection_points[1].get(self.__ENCAPSULATION_INFO_PARAM).get(self.__VLAN_PARAM)
- }],
- "bandwidth": 100, # Hardcoded for as long as parameter isn't implemented
- "service_type": service_type,
- "backup": False # Hardcoded for as long as parameter isn't implemented
- }
- return "body={}".format(json.dumps(body))
import logging
from uuid import uuid4
-from .wimconn import WimConnector
-
+from .sdnconn import SdnConnectorBase, SdnConnectorError
+from http import HTTPStatus
__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
-class FakeConnector(WimConnector):
+class FakeConnector(SdnConnectorBase):
"""Abstract base class for all the WIM connectors
Arguments:
An extra property, ``service_endpoint_mapping`` is created from ``config``.
"""
def __init__(self, wim, wim_account, config=None, logger=None):
- self.logger = logging.getLogger('openmano.wimconn.fake')
- super(FakeConnector, self).__init__(wim, wim_account, config, logger)
+ self.logger = logger or logging.getLogger('openmano.sdnconn.fake')
+ super(FakeConnector, self).__init__(wim, wim_account, config, self.logger)
self.logger.debug("__init: wim='{}' wim_account='{}'".format(wim, wim_account))
self.connections = {}
self.counter = 0
"""Check if the connector itself can access the WIM.
Raises:
- WimConnectorError: Issues regarding authorization, access to
+ SdnConnectorError: Issues regarding authorization, access to
external URLs, etc are detected.
"""
self.logger.debug("check_credentials")
Returns:
dict: JSON/YAML-serializable dict that contains a mandatory key
- ``wim_status`` associated with one of the following values::
+ ``sdn_status`` associated with one of the following values::
- Additionally ``error_msg``(**str**) and ``wim_info``(**dict**)
+ Additionally ``error_msg``(**str**) and ``sdn_info``(**dict**)
keys can be used to provide additional status explanation or
new information available for the connectivity service.
"""
self.logger.debug("get_connectivity_service_status: service_uuid='{}' conn_info='{}'".format(service_uuid,
conn_info))
- return {'wim_status': 'ACTIVE', 'wim_info': self.connectivity.get(service_uuid)}
+ return {'sdn_status': 'ACTIVE', 'sdn_info': self.connectivity.get(service_uuid)}
def create_connectivity_service(self, service_type, connection_points,
**kwargs):
self.logger.debug("create_connectivity_service: service_type='{}' connection_points='{}', kwargs='{}'".
format(service_type, connection_points, kwargs))
_id = str(uuid4())
- self.connectivity[_id] = {"nb": self.counter}
+ self.connections[_id] = connection_points.copy()
self.counter += 1
- return _id, self.connectivity[_id]
+ return _id, None
def delete_connectivity_service(self, service_uuid, conn_info=None):
"""Disconnect multi-site endpoints previously connected
"""
self.logger.debug("delete_connectivity_service: service_uuid='{}' conn_info='{}'".format(service_uuid,
conn_info))
- self.connectivity.pop(service_uuid, None)
+ if service_uuid not in self.connections:
+ raise SdnConnectorError("connectivity {} not found".format(service_uuid),
+ http_code=HTTPStatus.NOT_FOUND.value)
+ self.connections.pop(service_uuid, None)
return None
def edit_connectivity_service(self, service_uuid, conn_info=None,
"""
self.logger.debug("edit_connectivity_service: service_uuid='{}' conn_info='{}', connection_points='{}'"
"kwargs='{}'".format(service_uuid, conn_info, connection_points, kwargs))
+ if service_uuid not in self.connections:
+ raise SdnConnectorError("connectivity {} not found".format(service_uuid),
+ http_code=HTTPStatus.NOT_FOUND.value)
+ self.connections[service_uuid] = connection_points.copy()
return None
def clear_all_connectivity_services(self):
"""
self.logger.debug("clear_all_connectivity_services")
- self.connectivity.clear()
+ self.connections.clear()
return None
def get_all_active_connectivity_services(self):
WIM.
Raises:
- WimConnectorException: In case of error.
+ SdnConnectorException: In case of error.
"""
self.logger.debug("get_all_active_connectivity_services")
- return self.connectivity
+ return self.connections
+++ /dev/null
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 Telefonica
-# All Rights Reserved.
-#
-# Contributors: Oscar Gonzalez de Dios, Manuel Lopez Bravo, Guillermo Pajares Martin
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# This work has been performed in the context of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 program.
-##
-"""The WIM connector is responsible for establishing wide area network
-connectivity.
-
-This WIM connector implements the standard IETF RFC 8466 "A YANG Data
- Model for Layer 2 Virtual Private Network (L2VPN) Service Delivery"
-
-It receives the endpoints and the necessary details to request
-the Layer 2 service.
-"""
-import requests
-import uuid
-import logging
-from .wimconn import WimConnector, WimConnectorError
-"""CHeck layer where we move it"""
-
-
-class WimconnectorIETFL2VPN(WimConnector):
-
- def __init__(self, wim, wim_account, config=None, logger=None):
- """IETF L2VPM WIM connector
-
- Arguments: (To be completed)
- wim (dict): WIM record, as stored in the database
- wim_account (dict): WIM account record, as stored in the database
- """
- self.logger = logging.getLogger('openmano.wimconn.ietfl2vpn')
- super(WimconnectorIETFL2VPN, self).__init__(wim, wim_account, config, logger)
- self.headers = {'Content-Type': 'application/json'}
- self.mappings = {m['wan_service_endpoint_id']: m
- for m in self.service_endpoint_mapping}
- self.user = wim_account.get("user")
- self.passwd = wim_account.get("passwd")
- if self.user and self.passwd is not None:
- self.auth = (self.user, self.passwd)
- else:
- self.auth = None
- self.logger.info("IETFL2VPN Connector Initialized.")
-
- def check_credentials(self):
- endpoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
- try:
- response = requests.get(endpoint, auth=self.auth)
- http_code = response.status_code
- except requests.exceptions.RequestException as e:
- raise WimConnectorError(e.message, http_code=503)
-
- if http_code != 200:
- raise WimConnectorError("Failed while authenticating", http_code=http_code)
- self.logger.info("Credentials checked")
-
- def get_connectivity_service_status(self, service_uuid, conn_info=None):
- """Monitor the status of the connectivity service stablished
-
- Arguments:
- service_uuid: Connectivity service unique identifier
-
- Returns:
- Examples::
- {'wim_status': 'ACTIVE'}
- {'wim_status': 'INACTIVE'}
- {'wim_status': 'DOWN'}
- {'wim_status': 'ERROR'}
- """
- try:
- self.logger.info("Sending get connectivity service stuatus")
- servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(
- self.wim["wim_url"], service_uuid)
- response = requests.get(servicepoint, auth=self.auth)
- if response.status_code != requests.codes.ok:
- raise WimConnectorError("Unable to obtain connectivity servcice status", http_code=response.status_code)
- service_status = {'wim_status': 'ACTIVE'}
- return service_status
- except requests.exceptions.ConnectionError:
- raise WimConnectorError("Request Timeout", http_code=408)
-
- def search_mapp(self, connection_point):
- id = connection_point['service_endpoint_id']
- if id not in self.mappings:
- raise WimConnectorError("Endpoint {} not located".format(str(id)))
- else:
- return self.mappings[id]
-
- def create_connectivity_service(self, service_type, connection_points, **kwargs):
- """Stablish WAN connectivity between the endpoints
-
- Arguments:
- service_type (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2),
- ``L3``.
- connection_points (list): each point corresponds to
- an entry point from the DC to the transport network. One
- connection point serves to identify the specific access and
- some other service parameters, such as encapsulation type.
- Represented by a dict as follows::
-
- {
- "service_endpoint_id": ..., (str[uuid])
- "service_endpoint_encapsulation_type": ...,
- (enum: none, dot1q, ...)
- "service_endpoint_encapsulation_info": {
- ... (dict)
- "vlan": ..., (int, present if encapsulation is dot1q)
- "vni": ... (int, present if encapsulation is vxlan),
- "peers": [(ipv4_1), (ipv4_2)]
- (present if encapsulation is vxlan)
- }
- }
-
- The service endpoint ID should be previously informed to the WIM
- engine in the RO when the WIM port mapping is registered.
-
- Keyword Arguments:
- bandwidth (int): value in kilobytes
- latency (int): value in milliseconds
-
- Other QoS might be passed as keyword arguments.
-
- Returns:
- tuple: ``(service_id, conn_info)`` containing:
- - *service_uuid* (str): UUID of the established connectivity
- service
- - *conn_info* (dict or None): Information to be stored at the
- database (or ``None``). This information will be provided to
- the :meth:`~.edit_connectivity_service` and :obj:`~.delete`.
- **MUST** be JSON/YAML-serializable (plain data structures).
-
- Raises:
- WimConnectorException: In case of error.
- """
- if service_type == "ELINE":
- if len(connection_points) > 2:
- raise WimConnectorError('Connections between more than 2 endpoints are not supported')
- if len(connection_points) < 2:
- raise WimConnectorError('Connections must be of at least 2 endpoints')
- """ First step, create the vpn service """
- uuid_l2vpn = str(uuid.uuid4())
- vpn_service = {}
- vpn_service["vpn-id"] = uuid_l2vpn
- vpn_service["vpn-scv-type"] = "vpws"
- vpn_service["svc-topo"] = "any-to-any"
- vpn_service["customer-name"] = "osm"
- vpn_service_list = []
- vpn_service_list.append(vpn_service)
- vpn_service_l = {"ietf-l2vpn-svc:vpn-service": vpn_service_list}
- response_service_creation = None
- conn_info = []
- self.logger.info("Sending vpn-service :{}".format(vpn_service_l))
- try:
- endpoint_service_creation = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(
- self.wim["wim_url"])
- response_service_creation = requests.post(endpoint_service_creation, headers=self.headers,
- json=vpn_service_l, auth=self.auth)
- except requests.exceptions.ConnectionError:
- raise WimConnectorError("Request to create service Timeout", http_code=408)
- if response_service_creation.status_code == 409:
- raise WimConnectorError("Service already exists", http_code=response_service_creation.status_code)
- elif response_service_creation.status_code != requests.codes.created:
- raise WimConnectorError("Request to create service not accepted",
- http_code=response_service_creation.status_code)
- """ Second step, create the connections and vpn attachments """
- for connection_point in connection_points:
- connection_point_wan_info = self.search_mapp(connection_point)
- site_network_access = {}
- connection = {}
- if connection_point["service_endpoint_encapsulation_type"] != "none":
- if connection_point["service_endpoint_encapsulation_type"] == "dot1q":
- """ The connection is a VLAN """
- connection["encapsulation-type"] = "dot1q-vlan-tagged"
- tagged = {}
- tagged_interf = {}
- service_endpoint_encapsulation_info = connection_point["service_endpoint_encapsulation_info"]
- if service_endpoint_encapsulation_info["vlan"] is None:
- raise WimConnectorError("VLAN must be provided")
- tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info["vlan"]
- tagged["dot1q-vlan-tagged"] = tagged_interf
- connection["tagged-interface"] = tagged
- else:
- raise NotImplementedError("Encapsulation type not implemented")
- site_network_access["connection"] = connection
- self.logger.info("Sending connection:{}".format(connection))
- vpn_attach = {}
- vpn_attach["vpn-id"] = uuid_l2vpn
- vpn_attach["site-role"] = vpn_service["svc-topo"]+"-role"
- site_network_access["vpn-attachment"] = vpn_attach
- self.logger.info("Sending vpn-attachement :{}".format(vpn_attach))
- uuid_sna = str(uuid.uuid4())
- site_network_access["network-access-id"] = uuid_sna
- site_network_access["bearer"] = connection_point_wan_info["wan_service_mapping_info"]["bearer"]
- site_network_accesses = {}
- site_network_access_list = []
- site_network_access_list.append(site_network_access)
- site_network_accesses["ietf-l2vpn-svc:site-network-access"] = site_network_access_list
- conn_info_d = {}
- conn_info_d["site"] = connection_point_wan_info["wan_service_mapping_info"]["site-id"]
- conn_info_d["site-network-access-id"] = site_network_access["network-access-id"]
- conn_info_d["mapping"] = None
- conn_info.append(conn_info_d)
- try:
- endpoint_site_network_access_creation = \
- "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/sites/site={}/site-network-accesses/".format(
- self.wim["wim_url"], connection_point_wan_info["wan_service_mapping_info"]["site-id"])
- response_endpoint_site_network_access_creation = requests.post(
- endpoint_site_network_access_creation,
- headers=self.headers,
- json=site_network_accesses,
- auth=self.auth)
-
- if response_endpoint_site_network_access_creation.status_code == 409:
- self.delete_connectivity_service(vpn_service["vpn-id"])
- raise WimConnectorError("Site_Network_Access with ID '{}' already exists".format(
- site_network_access["network-access-id"]),
- http_code=response_endpoint_site_network_access_creation.status_code)
-
- elif response_endpoint_site_network_access_creation.status_code == 400:
- self.delete_connectivity_service(vpn_service["vpn-id"])
- raise WimConnectorError("Site {} does not exist".format(
- connection_point_wan_info["wan_service_mapping_info"]["site-id"]),
- http_code=response_endpoint_site_network_access_creation.status_code)
-
- elif response_endpoint_site_network_access_creation.status_code != requests.codes.created and \
- response_endpoint_site_network_access_creation.status_code != requests.codes.no_content:
- self.delete_connectivity_service(vpn_service["vpn-id"])
- raise WimConnectorError("Request no accepted",
- http_code=response_endpoint_site_network_access_creation.status_code)
-
- except requests.exceptions.ConnectionError:
- self.delete_connectivity_service(vpn_service["vpn-id"])
- raise WimConnectorError("Request Timeout", http_code=408)
- return uuid_l2vpn, conn_info
-
- else:
- raise NotImplementedError
-
- def delete_connectivity_service(self, service_uuid, conn_info=None):
- """Disconnect multi-site endpoints previously connected
-
- This method should receive as the first argument the UUID generated by
- the ``create_connectivity_service``
- """
- try:
- self.logger.info("Sending delete")
- servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(
- self.wim["wim_url"], service_uuid)
- response = requests.delete(servicepoint, auth=self.auth)
- if response.status_code != requests.codes.no_content:
- raise WimConnectorError("Error in the request", http_code=response.status_code)
- except requests.exceptions.ConnectionError:
- raise WimConnectorError("Request Timeout", http_code=408)
-
- def edit_connectivity_service(self, service_uuid, conn_info=None,
- connection_points=None, **kwargs):
- """Change an existing connectivity service, see
- ``create_connectivity_service``"""
-
- # sites = {"sites": {}}
- # site_list = []
- vpn_service = {}
- vpn_service["svc-topo"] = "any-to-any"
- counter = 0
- for connection_point in connection_points:
- site_network_access = {}
- connection_point_wan_info = self.search_mapp(connection_point)
- params_site = {}
- params_site["site-id"] = connection_point_wan_info["wan_service_mapping_info"]["site-id"]
- params_site["site-vpn-flavor"] = "site-vpn-flavor-single"
- device_site = {}
- device_site["device-id"] = connection_point_wan_info["device-id"]
- params_site["devices"] = device_site
- # network_access = {}
- connection = {}
- if connection_point["service_endpoint_encapsulation_type"] != "none":
- if connection_point["service_endpoint_encapsulation_type"] == "dot1q":
- """ The connection is a VLAN """
- connection["encapsulation-type"] = "dot1q-vlan-tagged"
- tagged = {}
- tagged_interf = {}
- service_endpoint_encapsulation_info = connection_point["service_endpoint_encapsulation_info"]
- if service_endpoint_encapsulation_info["vlan"] is None:
- raise WimConnectorError("VLAN must be provided")
- tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info["vlan"]
- tagged["dot1q-vlan-tagged"] = tagged_interf
- connection["tagged-interface"] = tagged
- else:
- raise NotImplementedError("Encapsulation type not implemented")
- site_network_access["connection"] = connection
- vpn_attach = {}
- vpn_attach["vpn-id"] = service_uuid
- vpn_attach["site-role"] = vpn_service["svc-topo"]+"-role"
- site_network_access["vpn-attachment"] = vpn_attach
- uuid_sna = conn_info[counter]["site-network-access-id"]
- site_network_access["network-access-id"] = uuid_sna
- site_network_access["bearer"] = connection_point_wan_info["wan_service_mapping_info"]["bearer"]
- site_network_accesses = {}
- site_network_access_list = []
- site_network_access_list.append(site_network_access)
- site_network_accesses["ietf-l2vpn-svc:site-network-access"] = site_network_access_list
- try:
- endpoint_site_network_access_edit = \
- "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/sites/site={}/site-network-accesses/".format(
- self.wim["wim_url"], connection_point_wan_info["wan_service_mapping_info"]["site-id"])
- response_endpoint_site_network_access_creation = requests.put(endpoint_site_network_access_edit,
- headers=self.headers,
- json=site_network_accesses,
- auth=self.auth)
- if response_endpoint_site_network_access_creation.status_code == 400:
- raise WimConnectorError("Service does not exist",
- http_code=response_endpoint_site_network_access_creation.status_code)
- elif response_endpoint_site_network_access_creation.status_code != 201 and \
- response_endpoint_site_network_access_creation.status_code != 204:
- raise WimConnectorError("Request no accepted",
- http_code=response_endpoint_site_network_access_creation.status_code)
- except requests.exceptions.ConnectionError:
- raise WimConnectorError("Request Timeout", http_code=408)
- counter += 1
- return None
-
- def clear_all_connectivity_services(self):
- """Delete all WAN Links corresponding to a WIM"""
- try:
- self.logger.info("Sending clear all connectivity services")
- servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
- response = requests.delete(servicepoint, auth=self.auth)
- if response.status_code != requests.codes.no_content:
- raise WimConnectorError("Unable to clear all connectivity services", http_code=response.status_code)
- except requests.exceptions.ConnectionError:
- raise WimConnectorError("Request Timeout", http_code=408)
-
- def get_all_active_connectivity_services(self):
- """Provide information about all active connections provisioned by a
- WIM
- """
- try:
- self.logger.info("Sending get all connectivity services")
- servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
- response = requests.get(servicepoint, auth=self.auth)
- if response.status_code != requests.codes.ok:
- raise WimConnectorError("Unable to get all connectivity services", http_code=response.status_code)
- return response
- except requests.exceptions.ConnectionError:
- raise WimConnectorError("Request Timeout", http_code=408)
# funded by the European Commission under Grant number 761727 through the
# Horizon 2020 and 5G-PPP programmes.
##
-from .wimconn import WimConnector
+from .sdnconn import SdnConnectorBase
# TODO: Basically create this file
-class OdlConnector(WimConnector):
+class OdlConnector(SdnConnectorBase):
def get_connectivity_service_status(self, link_uuid):
raise NotImplementedError
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
PyYAML
bottle
MySQL-python
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
nsd:nsd-catalog:
nsd:
- id: test_2vdu_nsd
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
vnfd-catalog:
vnfd:
- connection-point:
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
vnfd-catalog:
vnfd:
- connection-point:
# VIM Opennebula plugin
make -C RO-VIM-opennebula clean package
cp RO-VIM-opennebula/deb_dist/python3-osm-rovim-opennebula_*.deb deb_dist/
+
+# SDN Dynpack plugin
+make -C RO-SDN-dynpac clean package
+cp RO-SDN-dynpac/deb_dist/python3-osm-rosdn-dynpac_*.deb deb_dist/
+
+# SDN Tapi plugin
+make -C RO-SDN-tapi clean package
+cp RO-SDN-tapi/deb_dist/python3-osm-rosdn-tapi_*.deb deb_dist/
+
+# SDN Onos openflow
+make -C RO-SDN-onos_openflow clean package
+cp RO-SDN-onos_openflow/deb_dist/python3-osm-rosdn-onosof_*.deb deb_dist/
+