From: tierno Date: Sat, 23 Nov 2019 15:11:15 +0000 (+0000) Subject: Merge branch 'py3' features 8029 8030 X-Git-Tag: v7.0.0rc1~13 X-Git-Url: https://osm.etsi.org/gitweb/?p=osm%2FRO.git;a=commitdiff_plain;h=refs%2Fchanges%2F22%2F8222%2F4;hp=a85c54de5c5d1f951b27082a21e5654e15712529 Merge branch 'py3' features 8029 8030 Change-Id: Ia670d01fc45d63f4051209ef73ca272054895873 Signed-off-by: tierno --- diff --git a/Dockerfile b/Dockerfile index a0f45ba9..8eec0077 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,30 +18,36 @@ # Use docker/Dockerfile-local for running osm/RO in a docker container from source FROM ubuntu:16.04 - RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get -y install git make python python-pip debhelper python3 python3-all python3-pip python3-setuptools && \ - DEBIAN_FRONTEND=noninteractive apt-get -y install wget tox apt-utils flake8 python-nose python-mock && \ - DEBIAN_FRONTEND=noninteractive pip install pip==9.0.3 && \ - DEBIAN_FRONTEND=noninteractive pip3 install pip==9.0.3 && \ - DEBIAN_FRONTEND=noninteractive pip install -U setuptools setuptools-version-command stdeb && \ - DEBIAN_FRONTEND=noninteractive pip install -U pyang pyangbind && \ - DEBIAN_FRONTEND=noninteractive pip3 install -U pyang pyangbind && \ - DEBIAN_FRONTEND=noninteractive apt-get -y install python-yaml python-netaddr python-boto && \ - DEBIAN_FRONTEND=noninteractive apt-get -y install software-properties-common && \ - DEBIAN_FRONTEND=noninteractive add-apt-repository -y cloud-archive:queens && \ - DEBIAN_FRONTEND=noninteractive apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get -y install python-novaclient python-keystoneclient python-glanceclient python-cinderclient python-neutronclient python-networking-l2gw && \ - DEBIAN_FRONTEND=noninteractive pip install -U progressbar pyvmomi pyvcloud==19.1.1 && \ - DEBIAN_FRONTEND=noninteractive pip install -U fog05rest && \ - DEBIAN_FRONTEND=noninteractive pip install -U azure && \ - DEBIAN_FRONTEND=noninteractive apt-get -y install python-argcomplete python-bottle python-cffi python-packaging python-paramiko python-pkgconfig libmysqlclient-dev libssl-dev libffi-dev python-mysqldb && \ - DEBIAN_FRONTEND=noninteractive apt-get -y install python-logutils python-openstackclient python-openstacksdk && \ - DEBIAN_FRONTEND=noninteractive pip install untangle && \ - DEBIAN_FRONTEND=noninteractive pip install pyone && \ - DEBIAN_FRONTEND=noninteractive pip install -e git+https://github.com/python-oca/python-oca#egg=oca + DEBIAN_FRONTEND=noninteractive apt-get --yes install git tox make python-all python3 python3-pip debhelper wget && \ + DEBIAN_FRONTEND=noninteractive apt-get --yes install python3-all libssl-dev flake8 && \ + DEBIAN_FRONTEND=noninteractive pip3 install -U setuptools setuptools-version-command stdeb + +# FROM ubuntu:16.04 +# RUN apt-get update && \ +# DEBIAN_FRONTEND=noninteractive apt-get -y install git make python python-pip debhelper python3 python3-all python3-pip python3-setuptools && \ +# DEBIAN_FRONTEND=noninteractive apt-get -y install wget tox apt-utils flake8 python-nose python-mock && \ +# DEBIAN_FRONTEND=noninteractive pip install pip==9.0.3 && \ +# DEBIAN_FRONTEND=noninteractive pip3 install pip==9.0.3 && \ +# DEBIAN_FRONTEND=noninteractive pip install -U setuptools setuptools-version-command stdeb && \ +# DEBIAN_FRONTEND=noninteractive pip install -U pyang pyangbind && \ +# DEBIAN_FRONTEND=noninteractive pip3 install -U pyang pyangbind && \ +# DEBIAN_FRONTEND=noninteractive apt-get -y install python-yaml python-netaddr python-boto && \ +# DEBIAN_FRONTEND=noninteractive apt-get -y install software-properties-common && \ +# DEBIAN_FRONTEND=noninteractive add-apt-repository -y cloud-archive:queens && \ +# DEBIAN_FRONTEND=noninteractive apt-get update && \ +# DEBIAN_FRONTEND=noninteractive apt-get -y install python-novaclient python-keystoneclient python-glanceclient python-cinderclient python-neutronclient python-networking-l2gw && \ +# DEBIAN_FRONTEND=noninteractive pip install -U progressbar pyvmomi pyvcloud==19.1.1 && \ +# DEBIAN_FRONTEND=noninteractive pip install -U fog05rest && \ +# DEBIAN_FRONTEND=noninteractive pip install -U azure && \ +# DEBIAN_FRONTEND=noninteractive apt-get -y install python-argcomplete python-bottle python-cffi python-packaging python-paramiko python-pkgconfig libmysqlclient-dev libssl-dev libffi-dev python-mysqldb && \ +# DEBIAN_FRONTEND=noninteractive apt-get -y install python-logutils python-openstackclient python-openstacksdk && \ +# DEBIAN_FRONTEND=noninteractive pip install untangle && \ +# DEBIAN_FRONTEND=noninteractive pip install pyone && \ +# DEBIAN_FRONTEND=noninteractive pip install -e git+https://github.com/python-oca/python-oca#egg=oca +# TODO py3 comment # Uncomment this block to generate automatically a debian package and show info # # Set the working directory to /app # WORKDIR /app diff --git a/Dockerfile-local b/Dockerfile-local new file mode 100644 index 00000000..8430fa90 --- /dev/null +++ b/Dockerfile-local @@ -0,0 +1,100 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +FROM ubuntu:18.04 + +LABEL authors="Alfonso Tierno" + +RUN apt-get update && apt-get install -y git python3 python3-pip \ + && python3 -m pip install --upgrade pip \ + && DEBIAN_FRONTEND=noninteractive apt-get -y install libmysqlclient-dev mysql-client \ + && DEBIAN_FRONTEND=noninteractive python3 -m pip install -U networking-l2gw \ + && DEBIAN_FRONTEND=noninteractive python3 -m pip install -U progressbar pyvmomi pyvcloud==19.1.1 \ + && DEBIAN_FRONTEND=noninteractive apt-get -y install genisoimage + +# This is not needed, because package dependency will install anyway. +# But done here in order to harry up image generation using cache +RUN DEBIAN_FRONTEND=noninteractive apt-get -y install python3-neutronclient python3-openstackclient \ + python3-requests python3-netaddr python3-argcomplete + +# DEBIAN_FRONTEND=noninteractive apt-get -y install python-openstacksdk python-openstackclient && \ +# TODO py3 DEBIAN_FRONTEND=noninteractive add-apt-repository -y cloud-archive:rocky && apt-get update && apt-get install -y python3-networking-l2gw \ + +# DEBIAN_FRONTEND=noninteractive apt-get -y install python-cffi libssl-dev libffi-dev python-mysqldb && \ +# DEBIAN_FRONTEND=noninteractive pip2 install -U azure && \ +# DEBIAN_FRONTEND=noninteractive pip2 install -U fog05rest && \ +# && DEBIAN_FRONTEND=noninteractive apt-get -y install software-properties-common && \ +# DEBIAN_FRONTEND=noninteractive apt-get -y install wget tox && \ +# DEBIAN_FRONTEND=noninteractive pip2 install untangle && \ +# DEBIAN_FRONTEND=noninteractive pip2 install pyone && \ +# DEBIAN_FRONTEND=noninteractive pip2 install -e git+https://github.com/python-oca/python-oca#egg=oca && \ + +COPY . /root/RO + +RUN /root/RO/RO/osm_ro/scripts/install-osm-im.sh --develop && \ + /root/RO/RO/osm_ro/scripts/install-lib-osm-openvim.sh --develop && \ + mkdir -p /var/log/osm && \ + python3 -m pip install -e /root/RO/RO && \ + python3 -m pip install -e /root/RO/RO-client && \ + python3 -m pip install -e /root/RO/RO-VIM-vmware && \ + python3 -m pip install -e /root/RO/RO-VIM-openstack && \ + python3 -m pip install -e /root/RO/RO-VIM-openvim && \ + python3 -m pip install -e /root/RO/RO-VIM-aws && \ + python3 -m pip install -e /root/RO/RO-VIM-fos && \ + python3 -m pip install -e /root/RO/RO-SDN-dynpac && \ + python3 -m pip install -e /root/RO/RO-SDN-tapi && \ + rm -rf /root/.cache && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +VOLUME /var/log/osm + +EXPOSE 9090 + +# Two mysql databases are needed (DB and DB_OVIM). Can be hosted on same or separated containers +# These ENV must be provided +# RO_DB_HOST: host of the main +# RO_DB_OVIM_HOST: ... if empty RO_DB_HOST is assumed +# RO_DB_ROOT_PASSWORD: this has to be provided first time for creating database. It will create and init only if empty! +# RO_DB_OVIM_ROOT_PASSWORD: ... if empty RO_DB_ROOT_PASSWORD is assumed +# RO_DB_USER: default value 'mano' +# RO_DB_OVIM_USER: default value 'mano' +# RO_DB_PASSWORD: default value 'manopw' +# RO_DB_OVIM_PASSWORD: default value 'manopw' +# RO_DB_PORT: default value '3306' +# RO_DB_OVIM_PORT: default value '3306' +# RO_DB_NAME: default value 'mano_db' +# RO_DB_OVIM_NAME: default value 'mano_vim_db' +# RO_LOG_FILE: default log to stderr if not defined + +ENV RO_DB_HOST="" \ + RO_DB_OVIM_HOST="" \ + RO_DB_ROOT_PASSWORD="" \ + RO_DB_OVIM_ROOT_PASSWORD="" \ + RO_DB_USER=mano \ + RO_DB_OVIM_USER=mano \ + RO_DB_PASSWORD=manopw \ + RO_DB_OVIM_PASSWORD=manopw \ + RO_DB_PORT=3306 \ + RO_DB_OVIM_PORT=3306 \ + RO_DB_NAME=mano_db \ + RO_DB_OVIM_NAME=mano_vim_db \ + OPENMANO_TENANT=osm \ + RO_LOG_LEVEL=DEBUG + +CMD RO-start.sh + +# HEALTHCHECK --start-period=30s --interval=10s --timeout=5s --retries=12 \ +# CMD curl --silent --fail localhost:9090/openmano/tenants || exit 1 diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 483b709e..00000000 --- a/MANIFEST.in +++ /dev/null @@ -1,7 +0,0 @@ -#include MANIFEST.in -#include requirements.txt -include README.rst -include openmano -include openmanod -recursive-include osm_ro * - diff --git a/Makefile b/Makefile index 33deb4e9..90ee12c3 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,3 @@ -# Copyright 2018 Telefonica S.A. -# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -13,110 +11,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -.PHONY: all test clean - -SHELL := /bin/bash - -BRANCH ?= master - -all: # lib-openvim # osm-im - $(MAKE) clean_build build - $(MAKE) clean_build package - -clean: clean_build - rm -rf .build openvim IM - -clean_build: - rm -rf build - find osm_ro -name '*.pyc' -delete - find osm_ro -name '*.pyo' -delete - -prepare: -# ip install --user --upgrade setuptools - mkdir -p build/ -# VER1=$(shell git describe | sed -e 's/^v//' |cut -d- -f1); \ -# VER2=$(shell git describe | cut -d- -f2); \ -# VER3=$(shell git describe | cut -d- -f3); \ -# echo "$$VER1.dev$$VER2+$$VER3" > build/RO_VERSION - cp tox.ini build/ - cp MANIFEST.in build/ - cp requirements.txt build/ - cp README.rst build/ - cp setup.py build/ - cp stdeb.cfg build/ - cp -r osm_ro build/ - cp openmano build/ - cp openmanod build/ - cp -r vnfs build/osm_ro - cp -r scenarios build/osm_ro - cp -r instance-scenarios build/osm_ro - cp -r scripts build/osm_ro - cp -r database_utils build/osm_ro - cp LICENSE build/osm_ro - -connectors: prepare - # python-novaclient is required for that - rm -f build/osm_ro/openmanolinkervimconn.py - cd build/osm_ro; for i in `ls vimconn_*.py |sed "s/\.py//"` ; do echo "import $$i" >> openmanolinkervimconn.py; done - python build/osm_ro/openmanolinkervimconn.py 2>&1 - rm -f build/osm_ro/openmanolinkervimconn.py - -build: connectors prepare - python -m py_compile build/osm_ro/*.py -# cd build && tox -e flake8 - -lib-openvim: - $(shell git clone https://osm.etsi.org/gerrit/osm/openvim) - LIB_BRANCH=$(shell git -C openvim branch -a|grep -oP 'remotes/origin/\K$(BRANCH)'); \ - [ -z "$$LIB_BRANCH" ] && LIB_BRANCH='master'; \ - echo "BRANCH: $(BRANCH)"; \ - echo "LIB_OPENVIM_BRANCH: $$LIB_BRANCH"; \ - git -C openvim checkout $$LIB_BRANCH - make -C openvim clean lite - -osm-im: - $(shell git clone https://osm.etsi.org/gerrit/osm/IM) - make -C IM clean all - -package: prepare -# apt-get install -y python-stdeb - cd build && python setup.py --command-packages=stdeb.command sdist_dsc --with-python2=True - cd build && cp osm_ro/scripts/python-osm-ro.postinst deb_dist/osm-ro*/debian/ - cd build/deb_dist/osm-ro* && dpkg-buildpackage -rfakeroot -uc -us - mkdir -p .build - cp build/deb_dist/python-*.deb .build/ - -snap: - echo "Nothing to be done yet" - -install: lib-openvim osm-im - dpkg -i IM/deb_dist/python-osm-im*.deb - dpkg -i openvim/.build/python-lib-osm-openvim*.deb - dpkg -i .build/python-osm-ro*.deb - cd .. && \ - OSMLIBOVIM_PATH=`python -c 'import lib_osm_openvim; print lib_osm_openvim.__path__[0]'` || FATAL "lib-osm-openvim was not properly installed" && \ - OSMRO_PATH=`python -c 'import osm_ro; print osm_ro.__path__[0]'` || FATAL "osm-ro was not properly installed" && \ - USER=root DEBIAN_FRONTEND=noninteractive $$OSMRO_PATH/database_utils/install-db-server.sh --updatedb || FATAL "osm-ro db installation failed" && \ - USER=root DEBIAN_FRONTEND=noninteractive $$OSMLIBOVIM_PATH/database_utils/install-db-server.sh -u mano -p manopw -d mano_vim_db --updatedb || FATAL "lib-osm-openvim db installation failed" - service osm-ro restart - -develop: prepare -# pip install -r requirements.txt - cd build && ./setup.py develop - -test: - . ./test/basictest.sh -f --insert-bashrc --install-openvim --init-openvim - . ./test/basictest.sh -f reset add-openvim - ./test/test_RO.py deploy -n mgmt -t osm -i cirros034 -d local-openvim --timeout=30 --failfast - ./test/test_RO.py vim -t osm -d local-openvim --timeout=30 --failfast - -build-docker-from-source: - docker build -t osm/openmano -f docker/Dockerfile-local . +SUBDIRS := $(wildcard */Makefile) -run-docker: - docker-compose -f docker/openmano-compose.yml up +all: clean package +clean: $(SUBDIRS) +package: $(SUBDIRS) -stop-docker: - docker-compose -f docker/openmano-compose.yml down +$(SUBDIRS): + $(MAKE) -C $(@:Makefile=) $(MAKECMDGOALS) +.PHONY: all $(SUBDIRS) diff --git a/README.rst b/README.rst deleted file mode 100644 index 3a2be888..00000000 --- a/README.rst +++ /dev/null @@ -1,8 +0,0 @@ -=========== -osm-ro -=========== - -osm-ro is the Resource Orchestrator for OSM, dealing with resource operations -against different VIMs such as Openstack, VMware's vCloud Director, openvim -and AWS. - diff --git a/RO-SDN-dynpac/Makefile b/RO-SDN-dynpac/Makefile new file mode 100644 index 00000000..9fb4408b --- /dev/null +++ b/RO-SDN-dynpac/Makefile @@ -0,0 +1,24 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +all: clean package + +clean: + rm -rf dist deb_dist osm_rosdn_dynpac-*.tar.gz osm_rosdn_dynpac.egg-info .eggs + +package: + python3 setup.py --command-packages=stdeb.command sdist_dsc + cd deb_dist/osm-rosdn-dynpac*/ && dpkg-buildpackage -rfakeroot -uc -us + diff --git a/RO-SDN-dynpac/osm_rosdn_dynpac/wimconn_dynpac.py b/RO-SDN-dynpac/osm_rosdn_dynpac/wimconn_dynpac.py new file mode 100644 index 00000000..b32856b4 --- /dev/null +++ b/RO-SDN-dynpac/osm_rosdn_dynpac/wimconn_dynpac.py @@ -0,0 +1,233 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +## +# Copyright 2018 David García, University of the Basque Country +# Copyright 2018 University of the Basque Country +# This file is part of openmano +# All Rights Reserved. +# Contact information at http://i2t.ehu.eus +# +# # Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import requests +import json +import logging +from enum import Enum + +from osm_ro.wim.sdnconn import SdnConnectorBase, SdnConnectorError + + +class SdnError(Enum): + UNREACHABLE = 'Unable to reach the WIM.', + SERVICE_TYPE_ERROR = 'Unexpected service_type. Only "L2" is accepted.', + CONNECTION_POINTS_SIZE = \ + 'Unexpected number of connection points: 2 expected.', + ENCAPSULATION_TYPE = \ + 'Unexpected service_endpoint_encapsulation_type. \ + Only "dotq1" is accepted.', + BANDWIDTH = 'Unable to get the bandwidth.', + STATUS = 'Unable to get the status for the service.', + DELETE = 'Unable to delete service.', + CLEAR_ALL = 'Unable to clear all the services', + UNKNOWN_ACTION = 'Unknown action invoked.', + BACKUP = 'Unable to get the backup parameter.', + UNSUPPORTED_FEATURE = "Unsupported feature", + UNAUTHORIZED = "Failed while authenticating" + + +class SdnAPIActions(Enum): + CHECK_CONNECTIVITY = "CHECK_CONNECTIVITY", + CREATE_SERVICE = "CREATE_SERVICE", + DELETE_SERVICE = "DELETE_SERVICE", + CLEAR_ALL = "CLEAR_ALL", + SERVICE_STATUS = "SERVICE_STATUS", + + +class DynpacConnector(SdnConnectorBase): + __supported_service_types = ["ELINE (L2)", "ELINE"] + __supported_encapsulation_types = ["dot1q"] + __WIM_LOGGER = 'openmano.sdnconn.dynpac' + __ENCAPSULATION_TYPE_PARAM = "service_endpoint_encapsulation_type" + __ENCAPSULATION_INFO_PARAM = "service_endpoint_encapsulation_info" + __BACKUP_PARAM = "backup" + __BANDWIDTH_PARAM = "bandwidth" + __SERVICE_ENDPOINT_PARAM = "service_endpoint_id" + __WAN_SERVICE_ENDPOINT_PARAM = "service_endpoint_id" + __WAN_MAPPING_INFO_PARAM = "service_mapping_info" + __SW_ID_PARAM = "switch_dpid" + __SW_PORT_PARAM = "switch_port" + __VLAN_PARAM = "vlan" + + # Public functions exposed to the Resource Orchestrator + def __init__(self, wim, wim_account, config=None, logger=None): + self.logger = logger or logging.getLogger(self.__WIM_LOGGER) + super().__init__(wim, wim_account, config, self.logger) + self.__wim = wim + self.__wim_account = wim_account + self.__config = config + self.__wim_url = self.__wim.get("wim_url") + self.__user = wim_account.get("user") + self.__passwd = wim_account.get("password") + self.logger.info("Initialized.") + + def create_connectivity_service(self, service_type, connection_points, **kwargs): + self.__check_service(service_type, connection_points, kwargs) + + body = self.__get_body(service_type, connection_points, kwargs) + + headers = {'Content-type': 'application/x-www-form-urlencoded'} + endpoint = "{}/service/create".format(self.__wim_url) + + try: + response = requests.post(endpoint, data=body, headers=headers) + except requests.exceptions.RequestException as e: + self.__exception(e.message, http_code=503) + + if response.status_code != 200: + error = json.loads(response.content) + reason = "Reason: {}. ".format(error.get("code")) + description = "Description: {}.".format(error.get("description")) + exception = reason + description + self.__exception(exception, http_code=response.status_code) + uuid = response.content + self.logger.info("Service with uuid {} created.".format(uuid)) + return (uuid, None) + + def edit_connectivity_service(self, service_uuid, + conn_info, connection_points, + **kwargs): + self.__exception(SdnError.UNSUPPORTED_FEATURE, http_code=501) + + def get_connectivity_service_status(self, service_uuid): + endpoint = "{}/service/status/{}".format(self.__wim_url, service_uuid) + try: + response = requests.get(endpoint) + except requests.exceptions.RequestException as e: + self.__exception(e.message, http_code=503) + + if response.status_code != 200: + self.__exception(SdnError.STATUS, http_code=response.status_code) + self.logger.info("Status for service with uuid {}: {}" + .format(service_uuid, response.content)) + return response.content + + def delete_connectivity_service(self, service_uuid, conn_info): + endpoint = "{}/service/delete/{}".format(self.__wim_url, service_uuid) + try: + response = requests.delete(endpoint) + except requests.exceptions.RequestException as e: + self.__exception(e.message, http_code=503) + if response.status_code != 200: + self.__exception(SdnError.DELETE, http_code=response.status_code) + + self.logger.info("Service with uuid: {} deleted".format(service_uuid)) + + def clear_all_connectivity_services(self): + endpoint = "{}/service/clearAll".format(self.__wim_url) + try: + response = requests.delete(endpoint) + http_code = response.status_code + except requests.exceptions.RequestException as e: + self.__exception(e.message, http_code=503) + if http_code != 200: + self.__exception(SdnError.CLEAR_ALL, http_code=http_code) + + self.logger.info("{} services deleted".format(response.content)) + return "{} services deleted".format(response.content) + + def check_connectivity(self): + endpoint = "{}/checkConnectivity".format(self.__wim_url) + + try: + response = requests.get(endpoint) + http_code = response.status_code + except requests.exceptions.RequestException as e: + self.__exception(e.message, http_code=503) + + if http_code != 200: + self.__exception(SdnError.UNREACHABLE, http_code=http_code) + self.logger.info("Connectivity checked") + + def check_credentials(self): + endpoint = "{}/checkCredentials".format(self.__wim_url) + auth = (self.__user, self.__passwd) + + try: + response = requests.get(endpoint, auth=auth) + http_code = response.status_code + except requests.exceptions.RequestException as e: + self.__exception(e.message, http_code=503) + + if http_code != 200: + self.__exception(SdnError.UNAUTHORIZED, http_code=http_code) + self.logger.info("Credentials checked") + + # Private functions + def __exception(self, x, **kwargs): + http_code = kwargs.get("http_code") + if hasattr(x, "value"): + error = x.value + else: + error = x + self.logger.error(error) + raise SdnConnectorError(error, http_code=http_code) + + def __check_service(self, service_type, connection_points, kwargs): + if service_type not in self.__supported_service_types: + self.__exception(SdnError.SERVICE_TYPE_ERROR, http_code=400) + + if len(connection_points) != 2: + self.__exception(SdnError.CONNECTION_POINTS_SIZE, http_code=400) + + for connection_point in connection_points: + enc_type = connection_point.get(self.__ENCAPSULATION_TYPE_PARAM) + if enc_type not in self.__supported_encapsulation_types: + self.__exception(SdnError.ENCAPSULATION_TYPE, http_code=400) + + # Commented out for as long as parameter isn't implemented + # bandwidth = kwargs.get(self.__BANDWIDTH_PARAM) + # if not isinstance(bandwidth, int): + # self.__exception(SdnError.BANDWIDTH, http_code=400) + + # Commented out for as long as parameter isn't implemented + # backup = kwargs.get(self.__BACKUP_PARAM) + # if not isinstance(backup, bool): + # self.__exception(SdnError.BACKUP, http_code=400) + + def __get_body(self, service_type, connection_points, kwargs): + port_mapping = self.__config.get("service_endpoint_mapping") + selected_ports = [] + for connection_point in connection_points: + endpoint_id = connection_point.get(self.__SERVICE_ENDPOINT_PARAM) + port = filter(lambda x: x.get(self.__WAN_SERVICE_ENDPOINT_PARAM) == endpoint_id, port_mapping)[0] + port_info = port.get(self.__WAN_MAPPING_INFO_PARAM) + selected_ports.append(port_info) + if service_type == "ELINE (L2)" or service_type == "ELINE": + service_type = "L2" + body = { + "connection_points": [{ + "wan_switch_dpid": selected_ports[0].get(self.__SW_ID_PARAM), + "wan_switch_port": selected_ports[0].get(self.__SW_PORT_PARAM), + "wan_vlan": connection_points[0].get(self.__ENCAPSULATION_INFO_PARAM).get(self.__VLAN_PARAM) + }, { + "wan_switch_dpid": selected_ports[1].get(self.__SW_ID_PARAM), + "wan_switch_port": selected_ports[1].get(self.__SW_PORT_PARAM), + "wan_vlan": connection_points[1].get(self.__ENCAPSULATION_INFO_PARAM).get(self.__VLAN_PARAM) + }], + "bandwidth": 100, # Hardcoded for as long as parameter isn't implemented + "service_type": service_type, + "backup": False # Hardcoded for as long as parameter isn't implemented + } + return "body={}".format(json.dumps(body)) diff --git a/RO-SDN-dynpac/requirements.txt b/RO-SDN-dynpac/requirements.txt new file mode 100644 index 00000000..44c797f2 --- /dev/null +++ b/RO-SDN-dynpac/requirements.txt @@ -0,0 +1,18 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +requests +git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro + diff --git a/RO-SDN-dynpac/setup.py b/RO-SDN-dynpac/setup.py new file mode 100644 index 00000000..46d25e16 --- /dev/null +++ b/RO-SDN-dynpac/setup.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +from setuptools import setup + +_name = "osm_rosdn_dynpac" + +README = """ +=========== +osm-rosdn_dynpac +=========== + +osm-ro pluging for dynpac SDN +""" + +setup( + name=_name, + description='OSM ro sdn plugin for dynpac', + long_description=README, + version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + # version=VERSION, + # python_requires='>3.5.0', + author='ETSI OSM', + # TODO py3 author_email='', + maintainer='OSM_TECH@LIST.ETSI.ORG', # TODO py3 + # TODO py3 maintainer_email='', + url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', + license='Apache 2.0', + + packages=[_name], + include_package_data=True, + dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"], + install_requires=["requests", "osm-ro"], + setup_requires=['setuptools-version-command'], + entry_points={ + 'osm_rosdn.plugins': ['rosdn_dynpac = osm_rosdn_dynpac.wimconn_dynpac'], + }, +) diff --git a/RO-SDN-dynpac/stdeb.cfg b/RO-SDN-dynpac/stdeb.cfg new file mode 100644 index 00000000..0c718e4f --- /dev/null +++ b/RO-SDN-dynpac/stdeb.cfg @@ -0,0 +1,19 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +[DEFAULT] +X-Python3-Version : >= 3.5 +Depends3: python3-requests, python3-osm-ro + diff --git a/RO-SDN-dynpac/tox.ini b/RO-SDN-dynpac/tox.ini new file mode 100644 index 00000000..a1e866ab --- /dev/null +++ b/RO-SDN-dynpac/tox.ini @@ -0,0 +1,41 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +[tox] +envlist = py3 +toxworkdir={homedir}/.tox + +[testenv] +basepython = python3 +install_command = python3 -m pip install -r requirements.txt -U {opts} {packages} +# deps = -r{toxinidir}/test-requirements.txt +commands=python3 -m unittest discover -v + +[testenv:flake8] +basepython = python3 +deps = flake8 +commands = flake8 osm_rosdn_dynpac --max-line-length 120 \ + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504 + +[testenv:unittest] +basepython = python3 +commands = python3 -m unittest osm_rosdn_dynpac.tests + +[testenv:build] +basepython = python3 +deps = stdeb + setuptools-version-command +commands = python3 setup.py --command-packages=stdeb.command bdist_deb + diff --git a/RO-SDN-onos_openflow/Makefile b/RO-SDN-onos_openflow/Makefile new file mode 100644 index 00000000..5e96ce0b --- /dev/null +++ b/RO-SDN-onos_openflow/Makefile @@ -0,0 +1,24 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +all: clean package + +clean: + rm -rf dist deb_dist osm_rosdn_onosof-*.tar.gz osm_rosdn_onosof.egg-info .eggs + +package: + python3 setup.py --command-packages=stdeb.command sdist_dsc + cd deb_dist/osm-rosdn-onosof*/ && dpkg-buildpackage -rfakeroot -uc -us + diff --git a/RO-SDN-onos_openflow/osm_rosdn_onosof/onos_of.py b/RO-SDN-onos_openflow/osm_rosdn_onosof/onos_of.py new file mode 100644 index 00000000..060d1d37 --- /dev/null +++ b/RO-SDN-onos_openflow/osm_rosdn_onosof/onos_of.py @@ -0,0 +1,469 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +## +# Copyright 2016, I2T Research Group (UPV/EHU) +# This file is part of openvim +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: alaitz.mendiola@ehu.eus or alaitz.mendiola@gmail.com +## + +''' +ImplementS the pluging for the Open Network Operating System (ONOS) openflow +controller. It creates the class OF_conn to create dataplane connections +with static rules based on packet destination MAC address +''' + +__author__="Alaitz Mendiola" +__date__ ="$22-nov-2016$" + + +import json +import requests +import base64 +import logging +from osm_ro.wim.openflow_conn import OpenflowConn, OpenflowConnException, OpenflowConnConnectionException, \ + OpenflowConnUnexpectedResponse, OpenflowConnAuthException, OpenflowConnNotFoundException, \ + OpenflowConnConflictException, OpenflowConnNotSupportedException, OpenflowConnNotImplemented + + +class OfConnOnos(OpenflowConn): + """ + ONOS connector. No MAC learning is used + """ + def __init__(self, params): + """ Constructor. + Params: dictionary with the following keys: + of_dpid: DPID to use for this controller ?? Does a controller have a dpid? + url: must be [http://HOST:PORT/ + of_user: user credentials, can be missing or None + of_password: password credentials + of_debug: debug level for logging. Default to ERROR + other keys are ignored + Raise an exception if same parameter is missing or wrong + """ + + OpenflowConn.__init__(self, params) + + # check params + url = params.get("of_url") + if not url: + raise ValueError("'url' must be provided") + if not url.startswith("http"): + url = "http://" + url + if not url.endswith("/"): + url = url + "/" + self.url = url + "onos/v1/" + + #internal variables + self.name = "onosof" + self.headers = {'content-type':'application/json','accept':'application/json',} + + self.auth="None" + self.pp2ofi={} # From Physical Port to OpenFlow Index + self.ofi2pp={} # From OpenFlow Index to Physical Port + + self.dpid = str(params["of_dpid"]) + self.id = 'of:'+str(self.dpid.replace(':', '')) + + # TODO This may not be straightforward + if params.get("of_user"): + of_password=params.get("of_password", "") + self.auth = base64.b64encode(bytes(params["of_user"] + ":" + of_password, "utf-8")) + self.auth = self.auth.decode() + self.headers['authorization'] = 'Basic ' + self.auth + + self.logger = logging.getLogger('vim.OF.onos') + self.logger.setLevel( getattr(logging, params.get("of_debug", "ERROR")) ) + self.ip_address = None + + def get_of_switches(self): + """ + Obtain a a list of switches or DPID detected by this controller + :return: list where each element a tuple pair (DPID, IP address) + Raise a openflowconnUnexpectedResponse expection in case of failure + """ + try: + self.headers['content-type'] = 'text/plain' + of_response = requests.get(self.url + "devices", headers=self.headers) + error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text) + if of_response.status_code != 200: + self.logger.warning("get_of_switches " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) + + self.logger.debug("get_of_switches " + error_text) + info = of_response.json() + + if type(info) != dict: + self.logger.error("get_of_switches. Unexpected response, not a dict: %s", str(info)) + raise OpenflowConnUnexpectedResponse("Unexpected response, not a dict. Wrong version?") + + node_list = info.get('devices') + + if type(node_list) is not list: + self.logger.error( + "get_of_switches. Unexpected response, at 'devices', not found or not a list: %s", + str(type(node_list))) + raise OpenflowConnUnexpectedResponse("Unexpected response, at 'devices', not found " + "or not a list. Wrong version?") + + switch_list = [] + for node in node_list: + node_id = node.get('id') + if node_id is None: + self.logger.error("get_of_switches. Unexpected response at 'device':'id', not found: %s", + str(node)) + raise OpenflowConnUnexpectedResponse("Unexpected response at 'device':'id', " + "not found . Wrong version?") + + node_ip_address = node.get('annotations').get('managementAddress') + if node_ip_address is None: + self.logger.error( + "get_of_switches. Unexpected response at 'device':'managementAddress', not found: %s", + str(node)) + raise OpenflowConnUnexpectedResponse( + "Unexpected response at 'device':'managementAddress', not found. Wrong version?") + + node_id_hex = hex(int(node_id.split(':')[1])).split('x')[1].zfill(16) + + switch_list.append( + (':'.join(a + b for a, b in zip(node_id_hex[::2], node_id_hex[1::2])), node_ip_address)) + return switch_list + + except requests.exceptions.RequestException as e: + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("get_of_switches " + error_text) + raise OpenflowConnConnectionException(error_text) + except ValueError as e: + # ValueError in the case that JSON can not be decoded + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("get_of_switches " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) + + def obtain_port_correspondence(self): + """ + Obtain the correspondence between physical and openflow port names + :return: dictionary with physical name as key, openflow name as value + Raise a openflowconnUnexpectedResponse expection in case of failure + """ + try: + self.headers['content-type'] = 'text/plain' + of_response = requests.get(self.url + "devices/" + self.id + "/ports", headers=self.headers) + error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text) + if of_response.status_code != 200: + self.logger.warning("obtain_port_correspondence " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) + + self.logger.debug("obtain_port_correspondence " + error_text) + info = of_response.json() + + node_connector_list = info.get('ports') + if type(node_connector_list) is not list: + self.logger.error( + "obtain_port_correspondence. Unexpected response at 'ports', not found or not a list: %s", + str(node_connector_list)) + raise OpenflowConnUnexpectedResponse("Unexpected response at 'ports', not found or not " + "a list. Wrong version?") + + for node_connector in node_connector_list: + if node_connector['port'] != "local": + self.pp2ofi[str(node_connector['annotations']['portName'])] = str(node_connector['port']) + self.ofi2pp[str(node_connector['port'])] = str(node_connector['annotations']['portName']) + + node_ip_address = info['annotations']['managementAddress'] + if node_ip_address is None: + self.logger.error( + "obtain_port_correspondence. Unexpected response at 'managementAddress', not found: %s", + str(self.id)) + raise OpenflowConnUnexpectedResponse("Unexpected response at 'managementAddress', " + "not found. Wrong version?") + self.ip_address = node_ip_address + + # print self.name, ": obtain_port_correspondence ports:", self.pp2ofi + return self.pp2ofi + except requests.exceptions.RequestException as e: + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("obtain_port_correspondence " + error_text) + raise OpenflowConnConnectionException(error_text) + except ValueError as e: + # ValueError in the case that JSON can not be decoded + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("obtain_port_correspondence " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) + + def get_of_rules(self, translate_of_ports=True): + """ + Obtain the rules inserted at openflow controller + :param translate_of_ports: if True it translates ports from openflow index to physical switch name + :return: list where each item is a dictionary with the following content: + priority: rule priority + name: rule name (present also as the master dict key) + ingress_port: match input port of the rule + dst_mac: match destination mac address of the rule, can be missing or None if not apply + vlan_id: match vlan tag of the rule, can be missing or None if not apply + actions: list of actions, composed by a pair tuples: + (vlan, None/int): for stripping/setting a vlan tag + (out, port): send to this port + switch: DPID, all + Raise a openflowconnUnexpectedResponse expection in case of failure + """ + + try: + + if len(self.ofi2pp) == 0: + self.obtain_port_correspondence() + + # get rules + self.headers['content-type'] = 'text/plain' + of_response = requests.get(self.url + "flows/" + self.id, headers=self.headers) + error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text) + + # The configured page does not exist if there are no rules installed. In that case we return an empty dict + if of_response.status_code == 404: + return {} + + elif of_response.status_code != 200: + self.logger.warning("get_of_rules " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) + self.logger.debug("get_of_rules " + error_text) + + info = of_response.json() + + if type(info) != dict: + self.logger.error("get_of_rules. Unexpected response, not a dict: %s", str(info)) + raise OpenflowConnUnexpectedResponse("Unexpected openflow response, not a dict. " + "Wrong version?") + + flow_list = info.get('flows') + + if flow_list is None: + return {} + + if type(flow_list) is not list: + self.logger.error( + "get_of_rules. Unexpected response at 'flows', not a list: %s", + str(type(flow_list))) + raise OpenflowConnUnexpectedResponse("Unexpected response at 'flows', not a list. " + "Wrong version?") + + rules = [] # Response list + for flow in flow_list: + if not ('id' in flow and 'selector' in flow and 'treatment' in flow and \ + 'instructions' in flow['treatment'] and 'criteria' in \ + flow['selector']): + raise OpenflowConnUnexpectedResponse("unexpected openflow response, one or more " + "elements are missing. Wrong version?") + + rule = dict() + rule['switch'] = self.dpid + rule['priority'] = flow.get('priority') + rule['name'] = flow['id'] + + for criteria in flow['selector']['criteria']: + if criteria['type'] == 'IN_PORT': + in_port = str(criteria['port']) + if in_port != "CONTROLLER": + if not in_port in self.ofi2pp: + raise OpenflowConnUnexpectedResponse("Error: Ingress port {} is not " + "in switch port list".format(in_port)) + if translate_of_ports: + in_port = self.ofi2pp[in_port] + rule['ingress_port'] = in_port + + elif criteria['type'] == 'VLAN_VID': + rule['vlan_id'] = criteria['vlanId'] + + elif criteria['type'] == 'ETH_DST': + rule['dst_mac'] = str(criteria['mac']).lower() + + actions = [] + for instruction in flow['treatment']['instructions']: + if instruction['type'] == "OUTPUT": + out_port = str(instruction['port']) + if out_port != "CONTROLLER": + if not out_port in self.ofi2pp: + raise OpenflowConnUnexpectedResponse("Error: Output port {} is not in " + "switch port list".format(out_port)) + + if translate_of_ports: + out_port = self.ofi2pp[out_port] + + actions.append( ('out', out_port) ) + + if instruction['type'] == "L2MODIFICATION" and instruction['subtype'] == "VLAN_POP": + actions.append( ('vlan', 'None') ) + if instruction['type'] == "L2MODIFICATION" and instruction['subtype'] == "VLAN_ID": + actions.append( ('vlan', instruction['vlanId']) ) + + rule['actions'] = actions + rules.append(rule) + return rules + + except requests.exceptions.RequestException as e: + # ValueError in the case that JSON can not be decoded + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("get_of_rules " + error_text) + raise OpenflowConnConnectionException(error_text) + except ValueError as e: + # ValueError in the case that JSON can not be decoded + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("get_of_rules " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) + + def del_flow(self, flow_name): + """ + Delete an existing rule + :param flow_name: + :return: Raise a openflowconnUnexpectedResponse expection in case of failure + """ + + try: + self.headers['content-type'] = None + of_response = requests.delete(self.url + "flows/" + self.id + "/" + flow_name, headers=self.headers) + error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text) + + if of_response.status_code != 204: + self.logger.warning("del_flow " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) + + self.logger.debug("del_flow OK " + error_text) + return None + + except requests.exceptions.RequestException as e: + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("del_flow " + error_text) + raise OpenflowConnConnectionException(error_text) + + def new_flow(self, data): + """ + Insert a new static rule + :param data: dictionary with the following content: + priority: rule priority + name: rule name + ingress_port: match input port of the rule + dst_mac: match destination mac address of the rule, missing or None if not apply + vlan_id: match vlan tag of the rule, missing or None if not apply + actions: list of actions, composed by a pair tuples with these posibilities: + ('vlan', None/int): for stripping/setting a vlan tag + ('out', port): send to this port + :return: Raise a openflowconnUnexpectedResponse expection in case of failure + """ + try: + + if len(self.pp2ofi) == 0: + self.obtain_port_correspondence() + + # Build the dictionary with the flow rule information for ONOS + flow = dict() + #flow['id'] = data['name'] + flow['tableId'] = 0 + flow['priority'] = data.get('priority') + flow['timeout'] = 0 + flow['isPermanent'] = "true" + flow['appId'] = 10 # FIXME We should create an appId for OSM + flow['selector'] = dict() + flow['selector']['criteria'] = list() + + # Flow rule matching criteria + if not data['ingress_port'] in self.pp2ofi: + error_text = 'Error. Port ' + data['ingress_port'] + ' is not present in the switch' + self.logger.warning("new_flow " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) + + ingress_port_criteria = dict() + ingress_port_criteria['type'] = "IN_PORT" + ingress_port_criteria['port'] = self.pp2ofi[data['ingress_port']] + flow['selector']['criteria'].append(ingress_port_criteria) + + if 'dst_mac' in data: + dst_mac_criteria = dict() + dst_mac_criteria["type"] = "ETH_DST" + dst_mac_criteria["mac"] = data['dst_mac'] + flow['selector']['criteria'].append(dst_mac_criteria) + + if data.get('vlan_id'): + vlan_criteria = dict() + vlan_criteria["type"] = "VLAN_VID" + vlan_criteria["vlanId"] = int(data['vlan_id']) + flow['selector']['criteria'].append(vlan_criteria) + + # Flow rule treatment + flow['treatment'] = dict() + flow['treatment']['instructions'] = list() + flow['treatment']['deferred'] = list() + + for action in data['actions']: + new_action = dict() + if action[0] == "vlan": + new_action['type'] = "L2MODIFICATION" + if action[1] == None: + new_action['subtype'] = "VLAN_POP" + else: + new_action['subtype'] = "VLAN_ID" + new_action['vlanId'] = int(action[1]) + elif action[0] == 'out': + new_action['type'] = "OUTPUT" + if not action[1] in self.pp2ofi: + error_msj = 'Port '+ action[1] + ' is not present in the switch' + raise OpenflowConnUnexpectedResponse(error_msj) + new_action['port'] = self.pp2ofi[action[1]] + else: + error_msj = "Unknown item '%s' in action list" % action[0] + self.logger.error("new_flow " + error_msj) + raise OpenflowConnUnexpectedResponse(error_msj) + + flow['treatment']['instructions'].append(new_action) + + self.headers['content-type'] = 'application/json' + path = self.url + "flows/" + self.id + of_response = requests.post(path, headers=self.headers, data=json.dumps(flow) ) + + error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text) + if of_response.status_code != 201: + self.logger.warning("new_flow " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) + + flowId = of_response.headers['location'][path.__len__() + 1:] + + data['name'] = flowId + + self.logger.debug("new_flow OK " + error_text) + return None + + except requests.exceptions.RequestException as e: + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("new_flow " + error_text) + raise OpenflowConnConnectionException(error_text) + + def clear_all_flows(self): + """ + Delete all existing rules + :return: Raise a openflowconnUnexpectedResponse expection in case of failure + """ + try: + rules = self.get_of_rules(True) + + for rule in rules: + self.del_flow(rule) + + self.logger.debug("clear_all_flows OK ") + return None + + except requests.exceptions.RequestException as e: + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("clear_all_flows " + error_text) + raise OpenflowConnConnectionException(error_text) diff --git a/RO-SDN-onos_openflow/osm_rosdn_onosof/sdnconn_onosof.py b/RO-SDN-onos_openflow/osm_rosdn_onosof/sdnconn_onosof.py new file mode 100644 index 00000000..79c14412 --- /dev/null +++ b/RO-SDN-onos_openflow/osm_rosdn_onosof/sdnconn_onosof.py @@ -0,0 +1,41 @@ +## +# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +## +"""The SdnConnectorOnosOf connector is responsible for creating services using pro active operflow rules. +""" + +import logging +from osm_ro.wim.openflow_conn import SdnConnectorOpenFlow +from .onos_of import OfConnOnos + + +class SdnConnectorOnosOf(SdnConnectorOpenFlow): + + def __init__(self, wim, wim_account, config=None, logger=None): + """Creates a connectivity based on pro-active openflow rules + """ + self.logger = logging.getLogger('openmano.sdnconn.onosof') + super().__init__(wim, wim_account, config, logger) + of_params = { + "of_url": wim["wim_url"], + "of_dpid": config.get("dpid"), + "of_user": wim_account["user"], + "of_password": wim_account["password"], + } + self.openflow_conn = OfConnOnos(of_params) + super().__init__(wim, wim_account, config, logger, self.openflow_conn) diff --git a/RO-SDN-onos_openflow/requirements.txt b/RO-SDN-onos_openflow/requirements.txt new file mode 100644 index 00000000..44c797f2 --- /dev/null +++ b/RO-SDN-onos_openflow/requirements.txt @@ -0,0 +1,18 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +requests +git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro + diff --git a/RO-SDN-onos_openflow/setup.py b/RO-SDN-onos_openflow/setup.py new file mode 100644 index 00000000..380adc7d --- /dev/null +++ b/RO-SDN-onos_openflow/setup.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +from setuptools import setup + +_name = "osm_rosdn_onosof" + +README = """ +=========== +osm-rosdn_onosof +=========== + +osm-ro pluging for onosof (ietfl2vpn) SDN +""" + +setup( + name=_name, + description='OSM ro sdn plugin for onosof (ietfl2vpn)', + long_description=README, + version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + # version=VERSION, + # python_requires='>3.5.0', + author='ETSI OSM', + author_email='alfonso.tiernosepulveda@telefonica.com', + maintainer='Alfonso Tierno', + maintainer_email='alfonso.tiernosepulveda@telefonica.com', + url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', + license='Apache 2.0', + + packages=[_name], + include_package_data=True, + dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"], + install_requires=["requests", "osm-ro"], + setup_requires=['setuptools-version-command'], + entry_points={ + 'osm_rosdn.plugins': ['rosdn_onosof = osm_rosdn_onosof.sdnconn_onosof:SdnConnectorOnosOf'], + }, +) diff --git a/RO-SDN-onos_openflow/stdeb.cfg b/RO-SDN-onos_openflow/stdeb.cfg new file mode 100644 index 00000000..0c718e4f --- /dev/null +++ b/RO-SDN-onos_openflow/stdeb.cfg @@ -0,0 +1,19 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +[DEFAULT] +X-Python3-Version : >= 3.5 +Depends3: python3-requests, python3-osm-ro + diff --git a/RO-SDN-onos_openflow/tox.ini b/RO-SDN-onos_openflow/tox.ini new file mode 100644 index 00000000..00b45857 --- /dev/null +++ b/RO-SDN-onos_openflow/tox.ini @@ -0,0 +1,41 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +[tox] +envlist = py3 +toxworkdir={homedir}/.tox + +[testenv] +basepython = python3 +install_command = python3 -m pip install -r requirements.txt -U {opts} {packages} +# deps = -r{toxinidir}/test-requirements.txt +commands=python3 -m unittest discover -v + +[testenv:flake8] +basepython = python3 +deps = flake8 +commands = flake8 osm_rosdn_onosof --max-line-length 120 \ + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504 + +[testenv:unittest] +basepython = python3 +commands = python3 -m unittest osm_rosdn_onosof.tests + +[testenv:build] +basepython = python3 +deps = stdeb + setuptools-version-command +commands = python3 setup.py --command-packages=stdeb.command bdist_deb + diff --git a/RO-SDN-tapi/Makefile b/RO-SDN-tapi/Makefile new file mode 100644 index 00000000..2e052802 --- /dev/null +++ b/RO-SDN-tapi/Makefile @@ -0,0 +1,24 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +all: clean package + +clean: + rm -rf dist deb_dist osm_rosdn_tapi-*.tar.gz osm_rosdn_tapi.egg-info .eggs + +package: + python3 setup.py --command-packages=stdeb.command sdist_dsc + cd deb_dist/osm-rosdn-tapi*/ && dpkg-buildpackage -rfakeroot -uc -us + diff --git a/RO-SDN-tapi/osm_rosdn_tapi/wimconn_ietfl2vpn.py b/RO-SDN-tapi/osm_rosdn_tapi/wimconn_ietfl2vpn.py new file mode 100644 index 00000000..26680b5a --- /dev/null +++ b/RO-SDN-tapi/osm_rosdn_tapi/wimconn_ietfl2vpn.py @@ -0,0 +1,362 @@ +# -*- coding: utf-8 -*- +## +# Copyright 2018 Telefonica +# All Rights Reserved. +# +# Contributors: Oscar Gonzalez de Dios, Manuel Lopez Bravo, Guillermo Pajares Martin +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This work has been performed in the context of the Metro-Haul project - +# funded by the European Commission under Grant number 761727 through the +# Horizon 2020 program. +## +"""The SDN/WIM connector is responsible for establishing wide area network +connectivity. + +This SDN/WIM connector implements the standard IETF RFC 8466 "A YANG Data + Model for Layer 2 Virtual Private Network (L2VPN) Service Delivery" + +It receives the endpoints and the necessary details to request +the Layer 2 service. +""" +import requests +import uuid +import logging +from osm_ro.wim.sdnconn import SdnConnectorBase, SdnConnectorError +"""CHeck layer where we move it""" + + +class WimconnectorIETFL2VPN(SdnConnectorBase): + + def __init__(self, wim, wim_account, config=None, logger=None): + """IETF L2VPM WIM connector + + Arguments: (To be completed) + wim (dict): WIM record, as stored in the database + wim_account (dict): WIM account record, as stored in the database + """ + self.logger = logging.getLogger('openmano.sdnconn.ietfl2vpn') + super().__init__(wim, wim_account, config, logger) + self.headers = {'Content-Type': 'application/json'} + self.mappings = {m['service_endpoint_id']: m + for m in self.service_endpoint_mapping} + self.user = wim_account.get("user") + self.passwd = wim_account.get("passwordd") + if self.user and self.passwd is not None: + self.auth = (self.user, self.passwd) + else: + self.auth = None + self.logger.info("IETFL2VPN Connector Initialized.") + + def check_credentials(self): + endpoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"]) + try: + response = requests.get(endpoint, auth=self.auth) + http_code = response.status_code + except requests.exceptions.RequestException as e: + raise SdnConnectorError(e.message, http_code=503) + + if http_code != 200: + raise SdnConnectorError("Failed while authenticating", http_code=http_code) + self.logger.info("Credentials checked") + + def get_connectivity_service_status(self, service_uuid, conn_info=None): + """Monitor the status of the connectivity service stablished + + Arguments: + service_uuid: Connectivity service unique identifier + + Returns: + Examples:: + {'sdn_status': 'ACTIVE'} + {'sdn_status': 'INACTIVE'} + {'sdn_status': 'DOWN'} + {'sdn_status': 'ERROR'} + """ + try: + self.logger.info("Sending get connectivity service stuatus") + servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format( + self.wim["wim_url"], service_uuid) + response = requests.get(servicepoint, auth=self.auth) + if response.status_code != requests.codes.ok: + raise SdnConnectorError("Unable to obtain connectivity servcice status", http_code=response.status_code) + service_status = {'sdn_status': 'ACTIVE'} + return service_status + except requests.exceptions.ConnectionError: + raise SdnConnectorError("Request Timeout", http_code=408) + + def search_mapp(self, connection_point): + id = connection_point['service_endpoint_id'] + if id not in self.mappings: + raise SdnConnectorError("Endpoint {} not located".format(str(id))) + else: + return self.mappings[id] + + def create_connectivity_service(self, service_type, connection_points, **kwargs): + """Stablish WAN connectivity between the endpoints + + Arguments: + service_type (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2), + ``L3``. + connection_points (list): each point corresponds to + an entry point from the DC to the transport network. One + connection point serves to identify the specific access and + some other service parameters, such as encapsulation type. + Represented by a dict as follows:: + + { + "service_endpoint_id": ..., (str[uuid]) + "service_endpoint_encapsulation_type": ..., + (enum: none, dot1q, ...) + "service_endpoint_encapsulation_info": { + ... (dict) + "vlan": ..., (int, present if encapsulation is dot1q) + "vni": ... (int, present if encapsulation is vxlan), + "peers": [(ipv4_1), (ipv4_2)] + (present if encapsulation is vxlan) + } + } + + The service endpoint ID should be previously informed to the WIM + engine in the RO when the WIM port mapping is registered. + + Keyword Arguments: + bandwidth (int): value in kilobytes + latency (int): value in milliseconds + + Other QoS might be passed as keyword arguments. + + Returns: + tuple: ``(service_id, conn_info)`` containing: + - *service_uuid* (str): UUID of the established connectivity + service + - *conn_info* (dict or None): Information to be stored at the + database (or ``None``). This information will be provided to + the :meth:`~.edit_connectivity_service` and :obj:`~.delete`. + **MUST** be JSON/YAML-serializable (plain data structures). + + Raises: + SdnConnectorException: In case of error. + """ + if service_type == "ELINE": + if len(connection_points) > 2: + raise SdnConnectorError('Connections between more than 2 endpoints are not supported') + if len(connection_points) < 2: + raise SdnConnectorError('Connections must be of at least 2 endpoints') + """ First step, create the vpn service """ + uuid_l2vpn = str(uuid.uuid4()) + vpn_service = {} + vpn_service["vpn-id"] = uuid_l2vpn + vpn_service["vpn-scv-type"] = "vpws" + vpn_service["svc-topo"] = "any-to-any" + vpn_service["customer-name"] = "osm" + vpn_service_list = [] + vpn_service_list.append(vpn_service) + vpn_service_l = {"ietf-l2vpn-svc:vpn-service": vpn_service_list} + response_service_creation = None + conn_info = [] + self.logger.info("Sending vpn-service :{}".format(vpn_service_l)) + try: + endpoint_service_creation = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format( + self.wim["wim_url"]) + response_service_creation = requests.post(endpoint_service_creation, headers=self.headers, + json=vpn_service_l, auth=self.auth) + except requests.exceptions.ConnectionError: + raise SdnConnectorError("Request to create service Timeout", http_code=408) + if response_service_creation.status_code == 409: + raise SdnConnectorError("Service already exists", http_code=response_service_creation.status_code) + elif response_service_creation.status_code != requests.codes.created: + raise SdnConnectorError("Request to create service not accepted", + http_code=response_service_creation.status_code) + """ Second step, create the connections and vpn attachments """ + for connection_point in connection_points: + connection_point_wan_info = self.search_mapp(connection_point) + site_network_access = {} + connection = {} + if connection_point["service_endpoint_encapsulation_type"] != "none": + if connection_point["service_endpoint_encapsulation_type"] == "dot1q": + """ The connection is a VLAN """ + connection["encapsulation-type"] = "dot1q-vlan-tagged" + tagged = {} + tagged_interf = {} + service_endpoint_encapsulation_info = connection_point["service_endpoint_encapsulation_info"] + if service_endpoint_encapsulation_info["vlan"] is None: + raise SdnConnectorError("VLAN must be provided") + tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info["vlan"] + tagged["dot1q-vlan-tagged"] = tagged_interf + connection["tagged-interface"] = tagged + else: + raise NotImplementedError("Encapsulation type not implemented") + site_network_access["connection"] = connection + self.logger.info("Sending connection:{}".format(connection)) + vpn_attach = {} + vpn_attach["vpn-id"] = uuid_l2vpn + vpn_attach["site-role"] = vpn_service["svc-topo"]+"-role" + site_network_access["vpn-attachment"] = vpn_attach + self.logger.info("Sending vpn-attachement :{}".format(vpn_attach)) + uuid_sna = str(uuid.uuid4()) + site_network_access["network-access-id"] = uuid_sna + site_network_access["bearer"] = connection_point_wan_info["service_mapping_info"]["bearer"] + site_network_accesses = {} + site_network_access_list = [] + site_network_access_list.append(site_network_access) + site_network_accesses["ietf-l2vpn-svc:site-network-access"] = site_network_access_list + conn_info_d = {} + conn_info_d["site"] = connection_point_wan_info["service_mapping_info"]["site-id"] + conn_info_d["site-network-access-id"] = site_network_access["network-access-id"] + conn_info_d["mapping"] = None + conn_info.append(conn_info_d) + try: + endpoint_site_network_access_creation = \ + "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/sites/site={}/site-network-accesses/".format( + self.wim["wim_url"], connection_point_wan_info["service_mapping_info"]["site-id"]) + response_endpoint_site_network_access_creation = requests.post( + endpoint_site_network_access_creation, + headers=self.headers, + json=site_network_accesses, + auth=self.auth) + + if response_endpoint_site_network_access_creation.status_code == 409: + self.delete_connectivity_service(vpn_service["vpn-id"]) + raise SdnConnectorError("Site_Network_Access with ID '{}' already exists".format( + site_network_access["network-access-id"]), + http_code=response_endpoint_site_network_access_creation.status_code) + + elif response_endpoint_site_network_access_creation.status_code == 400: + self.delete_connectivity_service(vpn_service["vpn-id"]) + raise SdnConnectorError("Site {} does not exist".format( + connection_point_wan_info["service_mapping_info"]["site-id"]), + http_code=response_endpoint_site_network_access_creation.status_code) + + elif response_endpoint_site_network_access_creation.status_code != requests.codes.created and \ + response_endpoint_site_network_access_creation.status_code != requests.codes.no_content: + self.delete_connectivity_service(vpn_service["vpn-id"]) + raise SdnConnectorError("Request no accepted", + http_code=response_endpoint_site_network_access_creation.status_code) + + except requests.exceptions.ConnectionError: + self.delete_connectivity_service(vpn_service["vpn-id"]) + raise SdnConnectorError("Request Timeout", http_code=408) + return uuid_l2vpn, conn_info + + else: + raise NotImplementedError + + def delete_connectivity_service(self, service_uuid, conn_info=None): + """Disconnect multi-site endpoints previously connected + + This method should receive as the first argument the UUID generated by + the ``create_connectivity_service`` + """ + try: + self.logger.info("Sending delete") + servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format( + self.wim["wim_url"], service_uuid) + response = requests.delete(servicepoint, auth=self.auth) + if response.status_code != requests.codes.no_content: + raise SdnConnectorError("Error in the request", http_code=response.status_code) + except requests.exceptions.ConnectionError: + raise SdnConnectorError("Request Timeout", http_code=408) + + def edit_connectivity_service(self, service_uuid, conn_info=None, + connection_points=None, **kwargs): + """Change an existing connectivity service, see + ``create_connectivity_service``""" + + # sites = {"sites": {}} + # site_list = [] + vpn_service = {} + vpn_service["svc-topo"] = "any-to-any" + counter = 0 + for connection_point in connection_points: + site_network_access = {} + connection_point_wan_info = self.search_mapp(connection_point) + params_site = {} + params_site["site-id"] = connection_point_wan_info["service_mapping_info"]["site-id"] + params_site["site-vpn-flavor"] = "site-vpn-flavor-single" + device_site = {} + device_site["device-id"] = connection_point_wan_info["device-id"] + params_site["devices"] = device_site + # network_access = {} + connection = {} + if connection_point["service_endpoint_encapsulation_type"] != "none": + if connection_point["service_endpoint_encapsulation_type"] == "dot1q": + """ The connection is a VLAN """ + connection["encapsulation-type"] = "dot1q-vlan-tagged" + tagged = {} + tagged_interf = {} + service_endpoint_encapsulation_info = connection_point["service_endpoint_encapsulation_info"] + if service_endpoint_encapsulation_info["vlan"] is None: + raise SdnConnectorError("VLAN must be provided") + tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info["vlan"] + tagged["dot1q-vlan-tagged"] = tagged_interf + connection["tagged-interface"] = tagged + else: + raise NotImplementedError("Encapsulation type not implemented") + site_network_access["connection"] = connection + vpn_attach = {} + vpn_attach["vpn-id"] = service_uuid + vpn_attach["site-role"] = vpn_service["svc-topo"]+"-role" + site_network_access["vpn-attachment"] = vpn_attach + uuid_sna = conn_info[counter]["site-network-access-id"] + site_network_access["network-access-id"] = uuid_sna + site_network_access["bearer"] = connection_point_wan_info["service_mapping_info"]["bearer"] + site_network_accesses = {} + site_network_access_list = [] + site_network_access_list.append(site_network_access) + site_network_accesses["ietf-l2vpn-svc:site-network-access"] = site_network_access_list + try: + endpoint_site_network_access_edit = \ + "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/sites/site={}/site-network-accesses/".format( + self.wim["wim_url"], connection_point_wan_info["service_mapping_info"]["site-id"]) + response_endpoint_site_network_access_creation = requests.put(endpoint_site_network_access_edit, + headers=self.headers, + json=site_network_accesses, + auth=self.auth) + if response_endpoint_site_network_access_creation.status_code == 400: + raise SdnConnectorError("Service does not exist", + http_code=response_endpoint_site_network_access_creation.status_code) + elif response_endpoint_site_network_access_creation.status_code != 201 and \ + response_endpoint_site_network_access_creation.status_code != 204: + raise SdnConnectorError("Request no accepted", + http_code=response_endpoint_site_network_access_creation.status_code) + except requests.exceptions.ConnectionError: + raise SdnConnectorError("Request Timeout", http_code=408) + counter += 1 + return None + + def clear_all_connectivity_services(self): + """Delete all WAN Links corresponding to a WIM""" + try: + self.logger.info("Sending clear all connectivity services") + servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"]) + response = requests.delete(servicepoint, auth=self.auth) + if response.status_code != requests.codes.no_content: + raise SdnConnectorError("Unable to clear all connectivity services", http_code=response.status_code) + except requests.exceptions.ConnectionError: + raise SdnConnectorError("Request Timeout", http_code=408) + + def get_all_active_connectivity_services(self): + """Provide information about all active connections provisioned by a + WIM + """ + try: + self.logger.info("Sending get all connectivity services") + servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"]) + response = requests.get(servicepoint, auth=self.auth) + if response.status_code != requests.codes.ok: + raise SdnConnectorError("Unable to get all connectivity services", http_code=response.status_code) + return response + except requests.exceptions.ConnectionError: + raise SdnConnectorError("Request Timeout", http_code=408) diff --git a/RO-SDN-tapi/requirements.txt b/RO-SDN-tapi/requirements.txt new file mode 100644 index 00000000..44c797f2 --- /dev/null +++ b/RO-SDN-tapi/requirements.txt @@ -0,0 +1,18 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +requests +git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro + diff --git a/RO-SDN-tapi/setup.py b/RO-SDN-tapi/setup.py new file mode 100644 index 00000000..931dd66c --- /dev/null +++ b/RO-SDN-tapi/setup.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +from setuptools import setup + +_name = "osm_rosdn_tapi" + +README = """ +=========== +osm-rosdn_tapi +=========== + +osm-ro pluging for tapi (ietfl2vpn) SDN +""" + +setup( + name=_name, + description='OSM ro sdn plugin for tapi (ietfl2vpn)', + long_description=README, + version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + # version=VERSION, + # python_requires='>3.5.0', + author='ETSI OSM', + # TODO py3 author_email='', + maintainer='OSM_TECH@LIST.ETSI.ORG', # TODO py3 + # TODO py3 maintainer_email='', + url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', + license='Apache 2.0', + + packages=[_name], + include_package_data=True, + dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"], + install_requires=["requests", "osm-ro"], + setup_requires=['setuptools-version-command'], + entry_points={ + 'osm_rosdn.plugins': ['rosdn_tapi = osm_rosdn_tapi.wimconn_ietfl2vpn:WimconnectorIETFL2VPN'], + }, +) diff --git a/RO-SDN-tapi/stdeb.cfg b/RO-SDN-tapi/stdeb.cfg new file mode 100644 index 00000000..0c718e4f --- /dev/null +++ b/RO-SDN-tapi/stdeb.cfg @@ -0,0 +1,19 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +[DEFAULT] +X-Python3-Version : >= 3.5 +Depends3: python3-requests, python3-osm-ro + diff --git a/RO-SDN-tapi/tox.ini b/RO-SDN-tapi/tox.ini new file mode 100644 index 00000000..7d643cd2 --- /dev/null +++ b/RO-SDN-tapi/tox.ini @@ -0,0 +1,41 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +[tox] +envlist = py3 +toxworkdir={homedir}/.tox + +[testenv] +basepython = python3 +install_command = python3 -m pip install -r requirements.txt -U {opts} {packages} +# deps = -r{toxinidir}/test-requirements.txt +commands=python3 -m unittest discover -v + +[testenv:flake8] +basepython = python3 +deps = flake8 +commands = flake8 osm_rosdn_tapi --max-line-length 120 \ + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504 + +[testenv:unittest] +basepython = python3 +commands = python3 -m unittest osm_rosdn_tapi.tests + +[testenv:build] +basepython = python3 +deps = stdeb + setuptools-version-command +commands = python3 setup.py --command-packages=stdeb.command bdist_deb + diff --git a/RO-VIM-aws/Makefile b/RO-VIM-aws/Makefile new file mode 100644 index 00000000..edf3eb7a --- /dev/null +++ b/RO-VIM-aws/Makefile @@ -0,0 +1,23 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +all: clean package + +clean: + rm -rf dist deb_dist osm_rovim_aws-*.tar.gz osm_rovim_aws.egg-info .eggs + +package: + python3 setup.py --command-packages=stdeb.command sdist_dsc + cd deb_dist/osm-rovim-aws*/ && dpkg-buildpackage -rfakeroot -uc -us diff --git a/RO-VIM-aws/osm_rovim_aws/vimconn_aws.py b/RO-VIM-aws/osm_rovim_aws/vimconn_aws.py new file mode 100644 index 00000000..28dc4e99 --- /dev/null +++ b/RO-VIM-aws/osm_rovim_aws/vimconn_aws.py @@ -0,0 +1,803 @@ +# -*- coding: utf-8 -*- + +## +# Copyright 2017 xFlow Research Pvt. Ltd +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: saboor.ahmad@xflowresearch.com +## + +''' +AWS-connector implements all the methods to interact with AWS using the BOTO client +''' + +__author__ = "Saboor Ahmad" +__date__ = "10-Apr-2017" + +from osm_ro import vimconn +import yaml +import logging +import netaddr +import time + +import boto +import boto.ec2 +import boto.vpc + + +class vimconnector(vimconn.vimconnector): + def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None, + config={}, persistent_info={}): + """ Params: uuid - id asigned to this VIM + name - name assigned to this VIM, can be used for logging + tenant_id - ID to be used for tenant + tenant_name - name of tenant to be used VIM tenant to be used + url_admin - optional, url used for administrative tasks + user - credentials of the VIM user + passwd - credentials of the VIM user + log_level - if must use a different log_level than the general one + config - dictionary with misc VIM information + region_name - name of region to deploy the instances + vpc_cidr_block - default CIDR block for VPC + security_groups - default security group to specify this instance + persistent_info - dict where the class can store information that will be available among class + destroy/creation cycles. This info is unique per VIM/credential. At first call it will contain an + empty dict. Useful to store login/tokens information for speed up communication + """ + + vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, + config, persistent_info) + + self.persistent_info = persistent_info + self.a_creds = {} + if user: + self.a_creds['aws_access_key_id'] = user + else: + raise vimconn.vimconnAuthException("Username is not specified") + if passwd: + self.a_creds['aws_secret_access_key'] = passwd + else: + raise vimconn.vimconnAuthException("Password is not specified") + if 'region_name' in config: + self.region = config.get('region_name') + else: + raise vimconn.vimconnException("AWS region_name is not specified at config") + + self.vpc_data = {} + self.subnet_data = {} + self.conn = None + self.conn_vpc = None + self.account_id = None + + self.vpc_id = self.get_tenant_list()[0]['id'] + # we take VPC CIDR block if specified, otherwise we use the default CIDR + # block suggested by AWS while creating instance + self.vpc_cidr_block = '10.0.0.0/24' + + if tenant_id: + self.vpc_id = tenant_id + if 'vpc_cidr_block' in config: + self.vpc_cidr_block = config['vpc_cidr_block'] + + self.security_groups = None + if 'security_groups' in config: + self.security_groups = config['security_groups'] + + self.key_pair = None + if 'key_pair' in config: + self.key_pair = config['key_pair'] + + self.flavor_info = None + if 'flavor_info' in config: + flavor_data = config.get('flavor_info') + if isinstance(flavor_data, str): + try: + if flavor_data[0] == "@": # read from a file + with open(flavor_data[1:], 'r') as stream: + self.flavor_info = yaml.load(stream, Loader=yaml.Loader) + else: + self.flavor_info = yaml.load(flavor_data, Loader=yaml.Loader) + except yaml.YAMLError as e: + self.flavor_info = None + raise vimconn.vimconnException("Bad format at file '{}': {}".format(flavor_data[1:], e)) + except IOError as e: + raise vimconn.vimconnException("Error reading file '{}': {}".format(flavor_data[1:], e)) + elif isinstance(flavor_data, dict): + self.flavor_info = flavor_data + + self.logger = logging.getLogger('openmano.vim.aws') + if log_level: + self.logger.setLevel(getattr(logging, log_level)) + + def __setitem__(self, index, value): + """Params: index - name of value of set + value - value to set + """ + if index == 'user': + self.a_creds['aws_access_key_id'] = value + elif index == 'passwd': + self.a_creds['aws_secret_access_key'] = value + elif index == 'region': + self.region = value + else: + vimconn.vimconnector.__setitem__(self, index, value) + + def _reload_connection(self): + """Returns: sets boto.EC2 and boto.VPC connection to work with AWS services + """ + + try: + self.conn = boto.ec2.connect_to_region(self.region, aws_access_key_id=self.a_creds['aws_access_key_id'], + aws_secret_access_key=self.a_creds['aws_secret_access_key']) + self.conn_vpc = boto.vpc.connect_to_region(self.region, aws_access_key_id=self.a_creds['aws_access_key_id'], + aws_secret_access_key=self.a_creds['aws_secret_access_key']) + # client = boto3.client("sts", aws_access_key_id=self.a_creds['aws_access_key_id'], aws_secret_access_key=self.a_creds['aws_secret_access_key']) + # self.account_id = client.get_caller_identity()["Account"] + except Exception as e: + self.format_vimconn_exception(e) + + def format_vimconn_exception(self, e): + """Params: an Exception object + Returns: Raises the exception 'e' passed in mehtod parameters + """ + + self.conn = None + self.conn_vpc = None + raise vimconn.vimconnConnectionException(type(e).__name__ + ": " + str(e)) + + def get_availability_zones_list(self): + """Obtain AvailabilityZones from AWS + """ + + try: + self._reload_connection() + az_list = [] + for az in self.conn.get_all_zones(): + az_list.append(az.name) + return az_list + except Exception as e: + self.format_vimconn_exception(e) + + def get_tenant_list(self, filter_dict={}): + """Obtain tenants of VIM + filter_dict dictionary that can contain the following keys: + name: filter by tenant name + id: filter by tenant uuid/id + + Returns the tenant list of dictionaries, and empty list if no tenant match all the filers: + [{'name':', 'id':', ...}, ...] + """ + + try: + self._reload_connection() + vpc_ids = [] + tfilters = {} + if filter_dict != {}: + if 'id' in filter_dict: + vpc_ids.append(filter_dict['id']) + tfilters['name'] = filter_dict['id'] + tenants = self.conn_vpc.get_all_vpcs(vpc_ids, tfilters) + tenant_list = [] + for tenant in tenants: + tenant_list.append({'id': str(tenant.id), 'name': str(tenant.id), 'status': str(tenant.state), + 'cidr_block': str(tenant.cidr_block)}) + return tenant_list + except Exception as e: + self.format_vimconn_exception(e) + + def new_tenant(self, tenant_name, tenant_description): + """Adds a new tenant to VIM with this name and description, this is done using admin_url if provided + "tenant_name": string max lenght 64 + "tenant_description": string max length 256 + returns the tenant identifier or raise exception + """ + + self.logger.debug("Adding a new VPC") + try: + self._reload_connection() + vpc = self.conn_vpc.create_vpc(self.vpc_cidr_block) + self.conn_vpc.modify_vpc_attribute(vpc.id, enable_dns_support=True) + self.conn_vpc.modify_vpc_attribute(vpc.id, enable_dns_hostnames=True) + + gateway = self.conn_vpc.create_internet_gateway() + self.conn_vpc.attach_internet_gateway(gateway.id, vpc.id) + route_table = self.conn_vpc.create_route_table(vpc.id) + self.conn_vpc.create_route(route_table.id, '0.0.0.0/0', gateway.id) + + self.vpc_data[vpc.id] = {'gateway': gateway.id, 'route_table': route_table.id, + 'subnets': self.subnet_sizes(len(self.get_availability_zones_list()), + self.vpc_cidr_block)} + return vpc.id + except Exception as e: + self.format_vimconn_exception(e) + + def delete_tenant(self, tenant_id): + """Delete a tenant from VIM + tenant_id: returned VIM tenant_id on "new_tenant" + Returns None on success. Raises and exception of failure. If tenant is not found raises vimconnNotFoundException + """ + + self.logger.debug("Deleting specified VPC") + try: + self._reload_connection() + vpc = self.vpc_data.get(tenant_id) + if 'gateway' in vpc and 'route_table' in vpc: + gateway_id, route_table_id = vpc['gateway'], vpc['route_table'] + self.conn_vpc.detach_internet_gateway(gateway_id, tenant_id) + self.conn_vpc.delete_vpc(tenant_id) + self.conn_vpc.delete_route(route_table_id, '0.0.0.0/0') + else: + self.conn_vpc.delete_vpc(tenant_id) + except Exception as e: + self.format_vimconn_exception(e) + + def subnet_sizes(self, availability_zones, cidr): + """Calcualtes possible subnets given CIDR value of VPC + """ + + if availability_zones != 2 and availability_zones != 3: + self.logger.debug("Number of AZs should be 2 or 3") + raise vimconn.vimconnNotSupportedException("Number of AZs should be 2 or 3") + + netmasks = ('255.255.252.0', '255.255.254.0', '255.255.255.0', '255.255.255.128') + ip = netaddr.IPNetwork(cidr) + mask = ip.netmask + + if str(mask) not in netmasks: + self.logger.debug("Netmask " + str(mask) + " not found") + raise vimconn.vimconnNotFoundException("Netmask " + str(mask) + " not found") + + if availability_zones == 2: + for n, netmask in enumerate(netmasks): + if str(mask) == netmask: + subnets = list(ip.subnet(n + 24)) + else: + for n, netmask in enumerate(netmasks): + if str(mask) == netmask: + pub_net = list(ip.subnet(n + 24)) + pri_subs = pub_net[1:] + pub_mask = pub_net[0].netmask + pub_split = list(ip.subnet(26)) if (str(pub_mask) == '255.255.255.0') else list(ip.subnet(27)) + pub_subs = pub_split[:3] + subnets = pub_subs + pri_subs + + return map(str, subnets) + + def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None): + """Adds a tenant network to VIM + Params: + 'net_name': name of the network + 'net_type': one of: + 'bridge': overlay isolated network + 'data': underlay E-LAN network for Passthrough and SRIOV interfaces + 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces. + 'ip_profile': is a dict containing the IP parameters of the network (Currently only IPv4 is implemented) + 'ip-version': can be one of ["IPv4","IPv6"] + 'subnet-address': ip_prefix_schema, that is X.X.X.X/Y + 'gateway-address': (Optional) ip_schema, that is X.X.X.X + 'dns-address': (Optional) ip_schema, + 'dhcp': (Optional) dict containing + 'enabled': {"type": "boolean"}, + 'start-address': ip_schema, first IP to grant + 'count': number of IPs to grant. + 'shared': if this network can be seen/use by other tenants/organization + Returns a tuple with the network identifier and created_items, or raises an exception on error + created_items can be None or a dictionary where this method can include key-values that will be passed to + the method delete_network. Can be used to store created segments, created l2gw connections, etc. + Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same + as not present. + """ + + self.logger.debug("Adding a subnet to VPC") + try: + created_items = {} + self._reload_connection() + subnet = None + vpc_id = self.vpc_id + if self.vpc_data.get(vpc_id, None): + cidr_block = list(set(self.vpc_data[vpc_id]['subnets']) - set(self.get_network_details({'tenant_id': vpc_id}, detail='cidr_block')))[0] + else: + vpc = self.get_tenant_list({'id': vpc_id})[0] + subnet_list = self.subnet_sizes(len(self.get_availability_zones_list()), vpc['cidr_block']) + cidr_block = list(set(subnet_list) - set(self.get_network_details({'tenant_id': vpc['id']}, detail='cidr_block')))[0] + subnet = self.conn_vpc.create_subnet(vpc_id, cidr_block) + return subnet.id, created_items + except Exception as e: + self.format_vimconn_exception(e) + + def get_network_details(self, filters, detail): + """Get specified details related to a subnet + """ + detail_list = [] + subnet_list = self.get_network_list(filters) + for net in subnet_list: + detail_list.append(net[detail]) + return detail_list + + def get_network_list(self, filter_dict={}): + """Obtain tenant networks of VIM + Params: + 'filter_dict' (optional) contains entries to return only networks that matches ALL entries: + name: string => returns only networks with this name + id: string => returns networks with this VIM id, this imply returns one network at most + shared: boolean >= returns only networks that are (or are not) shared + tenant_id: sting => returns only networks that belong to this tenant/project + ,#(not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active + #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status + Returns the network list of dictionaries. each dictionary contains: + 'id': (mandatory) VIM network id + 'name': (mandatory) VIM network name + 'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER' + 'error_msg': (optional) text that explains the ERROR status + other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param + List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity, + authorization, or some other unspecific error + """ + + self.logger.debug("Getting all subnets from VIM") + try: + self._reload_connection() + tfilters = {} + if filter_dict != {}: + if 'tenant_id' in filter_dict: + tfilters['vpcId'] = filter_dict['tenant_id'] + subnets = self.conn_vpc.get_all_subnets(subnet_ids=filter_dict.get('name', None), filters=tfilters) + net_list = [] + for net in subnets: + net_list.append( + {'id': str(net.id), 'name': str(net.id), 'status': str(net.state), 'vpc_id': str(net.vpc_id), + 'cidr_block': str(net.cidr_block), 'type': 'bridge'}) + return net_list + except Exception as e: + self.format_vimconn_exception(e) + + def get_network(self, net_id): + """Obtain network details from the 'net_id' VIM network + Return a dict that contains: + 'id': (mandatory) VIM network id, that is, net_id + 'name': (mandatory) VIM network name + 'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER' + 'error_msg': (optional) text that explains the ERROR status + other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param + Raises an exception upon error or when network is not found + """ + + self.logger.debug("Getting Subnet from VIM") + try: + self._reload_connection() + subnet = self.conn_vpc.get_all_subnets(net_id)[0] + return {'id': str(subnet.id), 'name': str(subnet.id), 'status': str(subnet.state), + 'vpc_id': str(subnet.vpc_id), 'cidr_block': str(subnet.cidr_block)} + except Exception as e: + self.format_vimconn_exception(e) + + def delete_network(self, net_id, created_items=None): + """ + Removes a tenant network from VIM and its associated elements + :param net_id: VIM identifier of the network, provided by method new_network + :param created_items: dictionary with extra items to be deleted. provided by method new_network + Returns the network identifier or raises an exception upon error or when network is not found + """ + + self.logger.debug("Deleting subnet from VIM") + try: + self._reload_connection() + self.logger.debug("DELETING NET_ID: " + str(net_id)) + self.conn_vpc.delete_subnet(net_id) + return net_id + except Exception as e: + self.format_vimconn_exception(e) + + def refresh_nets_status(self, net_list): + """Get the status of the networks + Params: + 'net_list': a list with the VIM network id to be get the status + Returns a dictionary with: + 'net_id': #VIM id of this network + status: #Mandatory. Text with one of: + # DELETED (not found at vim) + # VIM_ERROR (Cannot connect to VIM, authentication problems, VIM response error, ...) + # OTHER (Vim reported other status not understood) + # ERROR (VIM indicates an ERROR status) + # ACTIVE, INACTIVE, DOWN (admin down), + # BUILD (on building process) + error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR + vim_info: #Text with plain information obtained from vim (yaml.safe_dump) + 'net_id2': ... + """ + + self._reload_connection() + try: + dict_entry = {} + for net_id in net_list: + subnet_dict = {} + subnet = None + try: + subnet = self.conn_vpc.get_all_subnets(net_id)[0] + if subnet.state == "pending": + subnet_dict['status'] = "BUILD" + elif subnet.state == "available": + subnet_dict['status'] = 'ACTIVE' + else: + subnet_dict['status'] = 'ERROR' + subnet_dict['error_msg'] = '' + except Exception as e: + subnet_dict['status'] = 'DELETED' + subnet_dict['error_msg'] = 'Network not found' + finally: + try: + subnet_dict['vim_info'] = yaml.safe_dump(subnet, default_flow_style=True, width=256) + except yaml.YAMLError as e: + subnet_dict['vim_info'] = str(subnet) + dict_entry[net_id] = subnet_dict + return dict_entry + except Exception as e: + self.format_vimconn_exception(e) + + def get_flavor(self, flavor_id): + """Obtain flavor details from the VIM + Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } + Raises an exception upon error or if not found + """ + + self.logger.debug("Getting instance type") + try: + if flavor_id in self.flavor_info: + return self.flavor_info[flavor_id] + else: + raise vimconn.vimconnNotFoundException("Cannot find flavor with this flavor ID/Name") + except Exception as e: + self.format_vimconn_exception(e) + + def get_flavor_id_from_data(self, flavor_dict): + """Obtain flavor id that match the flavor description + Params: + 'flavor_dict': dictionary that contains: + 'disk': main hard disk in GB + 'ram': memory in MB + 'vcpus': number of virtual cpus + #todo: complete parameters for EPA + Returns the flavor_id or raises a vimconnNotFoundException + """ + + self.logger.debug("Getting flavor id from data") + try: + flavor = None + for key, values in self.flavor_info.items(): + if (values["ram"], values["cpus"], values["disk"]) == ( + flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"]): + flavor = (key, values) + break + elif (values["ram"], values["cpus"], values["disk"]) >= ( + flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"]): + if not flavor: + flavor = (key, values) + else: + if (flavor[1]["ram"], flavor[1]["cpus"], flavor[1]["disk"]) >= ( + values["ram"], values["cpus"], values["disk"]): + flavor = (key, values) + if flavor: + return flavor[0] + raise vimconn.vimconnNotFoundException("Cannot find flavor with this flavor ID/Name") + except Exception as e: + self.format_vimconn_exception(e) + + def new_image(self, image_dict): + """ Adds a tenant image to VIM + Params: image_dict + name (string) - The name of the AMI. Valid only for EBS-based images. + description (string) - The description of the AMI. + image_location (string) - Full path to your AMI manifest in Amazon S3 storage. Only used for S3-based AMI’s. + architecture (string) - The architecture of the AMI. Valid choices are: * i386 * x86_64 + kernel_id (string) - The ID of the kernel with which to launch the instances + root_device_name (string) - The root device name (e.g. /dev/sdh) + block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) - A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. + virtualization_type (string) - The virutalization_type of the image. Valid choices are: * paravirtual * hvm + sriov_net_support (string) - Advanced networking support. Valid choices are: * simple + snapshot_id (string) - A snapshot ID for the snapshot to be used as root device for the image. Mutually exclusive with block_device_map, requires root_device_name + delete_root_volume_on_termination (bool) - Whether to delete the root volume of the image after instance termination. Only applies when creating image from snapshot_id. Defaults to False. Note that leaving volumes behind after instance termination is not free + Returns: image_id - image ID of the newly created image + """ + + try: + self._reload_connection() + image_location = image_dict.get('image_location', None) + if image_location: + image_location = str(self.account_id) + str(image_location) + + image_id = self.conn.register_image(image_dict.get('name', None), image_dict.get('description', None), + image_location, image_dict.get('architecture', None), + image_dict.get('kernel_id', None), + image_dict.get('root_device_name', None), + image_dict.get('block_device_map', None), + image_dict.get('virtualization_type', None), + image_dict.get('sriov_net_support', None), + image_dict.get('snapshot_id', None), + image_dict.get('delete_root_volume_on_termination', None)) + return image_id + except Exception as e: + self.format_vimconn_exception(e) + + def delete_image(self, image_id): + """Deletes a tenant image from VIM + Returns the image_id if image is deleted or raises an exception on error""" + + try: + self._reload_connection() + self.conn.deregister_image(image_id) + return image_id + except Exception as e: + self.format_vimconn_exception(e) + + def get_image_id_from_path(self, path): + ''' + Params: path - location of the image + Returns: image_id - ID of the matching image + ''' + self._reload_connection() + try: + filters = {} + if path: + tokens = path.split('/') + filters['owner_id'] = tokens[0] + filters['name'] = '/'.join(tokens[1:]) + image = self.conn.get_all_images(filters=filters)[0] + return image.id + except Exception as e: + self.format_vimconn_exception(e) + + def get_image_list(self, filter_dict={}): + """Obtain tenant images from VIM + Filter_dict can be: + name: image name + id: image uuid + checksum: image checksum + location: image path + Returns the image list of dictionaries: + [{}, ...] + List can be empty + """ + + self.logger.debug("Getting image list from VIM") + try: + self._reload_connection() + image_id = None + filters = {} + if 'id' in filter_dict: + image_id = filter_dict['id'] + if 'name' in filter_dict: + filters['name'] = filter_dict['name'] + if 'location' in filter_dict: + filters['location'] = filter_dict['location'] + # filters['image_type'] = 'machine' + # filter_dict['owner_id'] = self.account_id + images = self.conn.get_all_images(image_id, filters=filters) + image_list = [] + for image in images: + image_list.append({'id': str(image.id), 'name': str(image.name), 'status': str(image.state), + 'owner': str(image.owner_id), 'location': str(image.location), + 'is_public': str(image.is_public), 'architecture': str(image.architecture), + 'platform': str(image.platform)}) + return image_list + except Exception as e: + self.format_vimconn_exception(e) + + def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, + disk_list=None, availability_zone_index=None, availability_zone_list=None): + """Create a new VM/instance in AWS + Params: name + decription + start: (boolean) indicates if VM must start or created in pause mode. + image_id - image ID in AWS + flavor_id - instance type ID in AWS + net_list + name + net_id - subnet_id from AWS + vpci - (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities + model: (optional and only have sense for type==virtual) interface model: virtio, e1000, ... + mac_address: (optional) mac address to assign to this interface + type: (mandatory) can be one of: + virtual, in this case always connected to a network of type 'net_type=bridge' + 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it + can created unconnected + 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity. + VFnotShared - (SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs + are allocated on the same physical NIC + bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS + port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing or True, it must apply the default VIM behaviour + vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this interface. 'net_list' is modified + elastic_ip - True/False to define if an elastic_ip is required + cloud_config': (optional) dictionary with: + key-pairs': (optional) list of strings with the public key to be inserted to the default user + users': (optional) list of users to be inserted, each item is a dict with: + name': (mandatory) user name, + key-pairs': (optional) list of strings with the public key to be inserted to the user + user-data': (optional) string is a text script to be passed directly to cloud-init + config-files': (optional). List of files to be transferred. Each item is a dict with: + dest': (mandatory) string with the destination absolute path + encoding': (optional, by default text). Can be one of: + b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64' + content' (mandatory): string with the content of the file + permissions': (optional) string with file permissions, typically octal notation '0644' + owner: (optional) file owner, string with the format 'owner:group' + boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk) + security-groups: + subnet_id + security_group_id + disk_list': (optional) list with additional disks to the VM. Each item is a dict with: + image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted + size': (mandatory) string with the size of the disk in GB + Returns a tuple with the instance identifier and created_items or raises an exception on error + created_items can be None or a dictionary where this method can include key-values that will be passed to + the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc. + Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same + as not present. + """ + + self.logger.debug("Creating a new VM instance") + try: + self._reload_connection() + instance = None + _, userdata = self._create_user_data(cloud_config) + + if not net_list: + reservation = self.conn.run_instances( + image_id, + key_name=self.key_pair, + instance_type=flavor_id, + security_groups=self.security_groups, + user_data=userdata + ) + else: + for index, subnet in enumerate(net_list): + net_intr = boto.ec2.networkinterface.NetworkInterfaceSpecification(subnet_id=subnet.get('net_id'), + groups=None, + associate_public_ip_address=True) + + if subnet.get('elastic_ip'): + eip = self.conn.allocate_address() + self.conn.associate_address(allocation_id=eip.allocation_id, network_interface_id=net_intr.id) + + if index == 0: + reservation = self.conn.run_instances( + image_id, + key_name=self.key_pair, + instance_type=flavor_id, + security_groups=self.security_groups, + network_interfaces=boto.ec2.networkinterface.NetworkInterfaceCollection(net_intr), + user_data=userdata + ) + else: + while True: + try: + self.conn.attach_network_interface( + network_interface_id=boto.ec2.networkinterface.NetworkInterfaceCollection(net_intr), + instance_id=instance.id, device_index=0) + break + except: + time.sleep(10) + net_list[index]['vim_id'] = reservation.instances[0].interfaces[index].id + + instance = reservation.instances[0] + return instance.id, None + except Exception as e: + self.format_vimconn_exception(e) + + def get_vminstance(self, vm_id): + """Returns the VM instance information from VIM""" + + try: + self._reload_connection() + reservation = self.conn.get_all_instances(vm_id) + return reservation[0].instances[0].__dict__ + except Exception as e: + self.format_vimconn_exception(e) + + def delete_vminstance(self, vm_id, created_items=None): + """Removes a VM instance from VIM + Returns the instance identifier""" + + try: + self._reload_connection() + self.logger.debug("DELETING VM_ID: " + str(vm_id)) + self.conn.terminate_instances(vm_id) + return vm_id + except Exception as e: + self.format_vimconn_exception(e) + + def refresh_vms_status(self, vm_list): + """ Get the status of the virtual machines and their interfaces/ports + Params: the list of VM identifiers + Returns a dictionary with: + vm_id: #VIM id of this Virtual Machine + status: #Mandatory. Text with one of: + # DELETED (not found at vim) + # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) + # OTHER (Vim reported other status not understood) + # ERROR (VIM indicates an ERROR status) + # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running), + # BUILD (on building process), ERROR + # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address + # + error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR + vim_info: #Text with plain information obtained from vim (yaml.safe_dump) + interfaces: list with interface info. Each item a dictionary with: + vim_interface_id - The ID of the ENI. + vim_net_id - The ID of the VPC subnet. + mac_address - The MAC address of the interface. + ip_address - The IP address of the interface within the subnet. + """ + self.logger.debug("Getting VM instance information from VIM") + try: + self._reload_connection() + reservation = self.conn.get_all_instances(vm_list)[0] + instances = {} + instance_dict = {} + for instance in reservation.instances: + try: + if instance.state in ("pending"): + instance_dict['status'] = "BUILD" + elif instance.state in ("available", "running", "up"): + instance_dict['status'] = 'ACTIVE' + else: + instance_dict['status'] = 'ERROR' + instance_dict['error_msg'] = "" + instance_dict['interfaces'] = [] + interface_dict = {} + for interface in instance.interfaces: + interface_dict['vim_interface_id'] = interface.id + interface_dict['vim_net_id'] = interface.subnet_id + interface_dict['mac_address'] = interface.mac_address + if hasattr(interface, 'publicIp') and interface.publicIp != None: + interface_dict['ip_address'] = interface.publicIp + ";" + interface.private_ip_address + else: + interface_dict['ip_address'] = interface.private_ip_address + instance_dict['interfaces'].append(interface_dict) + except Exception as e: + self.logger.error("Exception getting vm status: %s", str(e), exc_info=True) + instance_dict['status'] = "DELETED" + instance_dict['error_msg'] = str(e) + finally: + try: + instance_dict['vim_info'] = yaml.safe_dump(instance, default_flow_style=True, width=256) + except yaml.YAMLError as e: + # self.logger.error("Exception getting vm status: %s", str(e), exc_info=True) + instance_dict['vim_info'] = str(instance) + instances[instance.id] = instance_dict + return instances + except Exception as e: + self.logger.error("Exception getting vm status: %s", str(e), exc_info=True) + self.format_vimconn_exception(e) + + def action_vminstance(self, vm_id, action_dict, created_items={}): + """Send and action over a VM instance from VIM + Returns the vm_id if the action was successfully sent to the VIM""" + + self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict)) + try: + self._reload_connection() + if "start" in action_dict: + self.conn.start_instances(vm_id) + elif "stop" in action_dict or "stop" in action_dict: + self.conn.stop_instances(vm_id) + elif "terminate" in action_dict: + self.conn.terminate_instances(vm_id) + elif "reboot" in action_dict: + self.conn.reboot_instances(vm_id) + return None + except Exception as e: + self.format_vimconn_exception(e) diff --git a/RO-VIM-aws/requirements.txt b/RO-VIM-aws/requirements.txt new file mode 100644 index 00000000..3cbc8514 --- /dev/null +++ b/RO-VIM-aws/requirements.txt @@ -0,0 +1,20 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +PyYAML +requests +netaddr +boto +git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO diff --git a/RO-VIM-aws/setup.py b/RO-VIM-aws/setup.py new file mode 100644 index 00000000..30b90bd3 --- /dev/null +++ b/RO-VIM-aws/setup.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +from setuptools import setup + +_name = "osm_rovim_aws" + +README = """ +=========== +osm-rovim_aws +=========== + +osm-ro pluging for aws VIM +""" + +setup( + name=_name, + description='OSM ro vim plugin for aws', + long_description=README, + version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + # version=VERSION, + # python_requires='>3.5.0', + author='ETSI OSM', + # TODO py3 author_email='', + maintainer='OSM_TECH@LIST.ETSI.ORG', # TODO py3 + # TODO py3 maintainer_email='', + url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', + license='Apache 2.0', + + packages=[_name], + include_package_data=True, + dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"], + install_requires=[ + "requests", "netaddr", "PyYAML", "osm-ro", "boto" + ], + setup_requires=['setuptools-version-command'], + entry_points={ + 'osm_rovim.plugins': ['rovim_aws = osm_rovim_aws.vimconn_aws'], + }, +) diff --git a/RO-VIM-aws/stdeb.cfg b/RO-VIM-aws/stdeb.cfg new file mode 100644 index 00000000..2193709e --- /dev/null +++ b/RO-VIM-aws/stdeb.cfg @@ -0,0 +1,18 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +[DEFAULT] +X-Python3-Version : >= 3.5 +Depends3: python3-boto, python3-requests, python3-netaddr, python3-yaml, python3-osm-ro diff --git a/RO-VIM-aws/tox.ini b/RO-VIM-aws/tox.ini new file mode 100644 index 00000000..067b0d43 --- /dev/null +++ b/RO-VIM-aws/tox.ini @@ -0,0 +1,41 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +[tox] +envlist = py3 +toxworkdir={homedir}/.tox + +[testenv] +basepython = python3 +install_command = python3 -m pip install -r requirements.txt -U {opts} {packages} +# deps = -r{toxinidir}/test-requirements.txt +commands=python3 -m unittest discover -v + +[testenv:flake8] +basepython = python3 +deps = flake8 +commands = flake8 osm_rovim_aws --max-line-length 120 \ + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504 + +[testenv:unittest] +basepython = python3 +commands = python3 -m unittest osm_rovim_aws.tests + +[testenv:build] +basepython = python3 +deps = stdeb + setuptools-version-command +commands = python3 setup.py --command-packages=stdeb.command bdist_deb + diff --git a/RO-VIM-azure/Makefile b/RO-VIM-azure/Makefile new file mode 100644 index 00000000..d5b779a3 --- /dev/null +++ b/RO-VIM-azure/Makefile @@ -0,0 +1,25 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +all: clean package + +clean: + rm -rf dist deb_dist osm_rovim_azure-*.tar.gz osm_rovim_azure.egg-info .eggs + +package: + python3 setup.py --command-packages=stdeb.command sdist_dsc + cp debian/python3-osm-rovim-azure.postinst deb_dist/osm-rovim-azure*/debian/ + cd deb_dist/osm-rovim-azure*/ && dpkg-buildpackage -rfakeroot -uc -us + diff --git a/RO-VIM-azure/debian/python3-osm-rovim-azure.postinst b/RO-VIM-azure/debian/python3-osm-rovim-azure.postinst new file mode 100755 index 00000000..ebb69b1e --- /dev/null +++ b/RO-VIM-azure/debian/python3-osm-rovim-azure.postinst @@ -0,0 +1,24 @@ +#!/bin/bash + +## +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: OSM_TECH@list.etsi.org +## + +echo "POST INSTALL OSM-ROVIM-AZURE" + +#Pip packages required for azure connector +python3 -m pip install azure + diff --git a/RO-VIM-azure/osm_rovim_azure/vimconn_azure.py b/RO-VIM-azure/osm_rovim_azure/vimconn_azure.py new file mode 100755 index 00000000..7f2b2ea3 --- /dev/null +++ b/RO-VIM-azure/osm_rovim_azure/vimconn_azure.py @@ -0,0 +1,1304 @@ +# -*- coding: utf-8 -*- +## +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +## + +import base64 +import vimconn +import logging +import netaddr +import re + +from os import getenv +from azure.common.credentials import ServicePrincipalCredentials +from azure.mgmt.resource import ResourceManagementClient +from azure.mgmt.network import NetworkManagementClient +from azure.mgmt.compute import ComputeManagementClient +from azure.mgmt.compute.models import DiskCreateOption +from msrestazure.azure_exceptions import CloudError +from msrest.exceptions import AuthenticationError +from requests.exceptions import ConnectionError + +__author__ = 'Isabel Lloret, Sergio Gonzalez, Alfonso Tierno' +__date__ = '$18-apr-2019 23:59:59$' + + +if getenv('OSMRO_PDB_DEBUG'): + import sys + print(sys.path) + import pdb + pdb.set_trace() + + +class vimconnector(vimconn.vimconnector): + + # Translate azure provisioning state to OSM provision state + # The first three ones are the transitional status once a user initiated action has been requested + # Once the operation is complete, it will transition into the states Succeeded or Failed + # https://docs.microsoft.com/en-us/azure/virtual-machines/windows/states-lifecycle + provision_state2osm = { + "Creating": "BUILD", + "Updating": "BUILD", + "Deleting": "INACTIVE", + "Succeeded": "ACTIVE", + "Failed": "ERROR" + } + + # Translate azure power state to OSM provision state + power_state2osm = { + "starting": "INACTIVE", + "running": "ACTIVE", + "stopping": "INACTIVE", + "stopped": "INACTIVE", + "unknown": "OTHER", + "deallocated": "BUILD", + "deallocating": "BUILD" + } + + def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None, + config={}, persistent_info={}): + """ + Constructor of VIM. Raise an exception is some needed parameter is missing, but it must not do any connectivity + checking against the VIM + Using common constructor parameters. + In this case: config must include the following parameters: + subscription_id: assigned azure subscription identifier + region_name: current region for azure network + resource_group: used for all azure created resources + vnet_name: base vnet for azure, created networks will be subnets from this base network + config may also include the following parameter: + flavors_pattern: pattern that will be used to select a range of vm sizes, for example + "^((?!Standard_B).)*$" will filter out Standard_B range that is cheap but is very overused + "^Standard_B" will select a serie B maybe for test environment + """ + + vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, + config, persistent_info) + + # Variable that indicates if client must be reloaded or initialized + self.reload_client = True + + self.vnet_address_space = None + # LOGGER + self.logger = logging.getLogger('openmano.vim.azure') + if log_level: + logging.basicConfig() + self.logger.setLevel(getattr(logging, log_level)) + + self.tenant = (tenant_id or tenant_name) + + # Store config to create azure subscription later + self._config = { + "user": user, + "passwd": passwd, + "tenant": tenant_id or tenant_name + } + + # SUBSCRIPTION + if 'subscription_id' in config: + self._config["subscription_id"] = config.get('subscription_id') + # self.logger.debug('Setting subscription to: %s', self.config["subscription_id"]) + else: + raise vimconn.vimconnException('Subscription not specified') + + # REGION + if 'region_name' in config: + self.region = config.get('region_name') + else: + raise vimconn.vimconnException('Azure region_name is not specified at config') + + # RESOURCE_GROUP + if 'resource_group' in config: + self.resource_group = config.get('resource_group') + else: + raise vimconn.vimconnException('Azure resource_group is not specified at config') + + # VNET_NAME + if 'vnet_name' in config: + self.vnet_name = config["vnet_name"] + + # public ssh key + self.pub_key = config.get('pub_key') + + # flavor pattern regex + if 'flavors_pattern' in config: + self._config['flavors_pattern'] = config['flavors_pattern'] + + def _reload_connection(self): + """ + Called before any operation, checks python azure clients + """ + if self.reload_client: + self.logger.debug('reloading azure client') + try: + self.credentials = ServicePrincipalCredentials( + client_id=self._config["user"], + secret=self._config["passwd"], + tenant=self._config["tenant"] + ) + self.conn = ResourceManagementClient(self.credentials, self._config["subscription_id"]) + self.conn_compute = ComputeManagementClient(self.credentials, self._config["subscription_id"]) + self.conn_vnet = NetworkManagementClient(self.credentials, self._config["subscription_id"]) + self._check_or_create_resource_group() + self._check_or_create_vnet() + + # Set to client created + self.reload_client = False + except Exception as e: + self._format_vimconn_exception(e) + + def _get_resource_name_from_resource_id(self, resource_id): + """ + Obtains resource_name from the azure complete identifier: resource_name will always be last item + """ + try: + resource = str(resource_id.split('/')[-1]) + return resource + except Exception as e: + raise vimconn.vimconnException("Unable to get resource name from resource_id '{}' Error: '{}'". + format(resource_id, e)) + + def _get_location_from_resource_group(self, resource_group_name): + try: + location = self.conn.resource_groups.get(resource_group_name).location + return location + except Exception as e: + raise vimconn.vimconnNotFoundException("Location '{}' not found".format(resource_group_name)) + + def _get_resource_group_name_from_resource_id(self, resource_id): + + try: + rg = str(resource_id.split('/')[4]) + return rg + except Exception as e: + raise vimconn.vimconnException("Unable to get resource group from invalid resource_id format '{}'". + format(resource_id)) + + def _get_net_name_from_resource_id(self, resource_id): + + try: + net_name = str(resource_id.split('/')[8]) + return net_name + except Exception as e: + raise vimconn.vimconnException("Unable to get azure net_name from invalid resource_id format '{}'". + format(resource_id)) + + def _check_subnets_for_vm(self, net_list): + # All subnets must belong to the same resource group and vnet + rg_vnet = set(self._get_resource_group_name_from_resource_id(net['net_id']) + + self._get_net_name_from_resource_id(net['net_id']) for net in net_list) + + if len(rg_vnet) != 1: + raise self._format_vimconn_exception('Azure VMs can only attach to subnets in same VNET') + + def _format_vimconn_exception(self, e): + """ + Transforms a generic or azure exception to a vimcommException + """ + if isinstance(e, vimconn.vimconnException): + raise + elif isinstance(e, AuthenticationError): + raise vimconn.vimconnAuthException(type(e).__name__ + ': ' + str(e)) + elif isinstance(e, ConnectionError): + raise vimconn.vimconnConnectionException(type(e).__name__ + ': ' + str(e)) + else: + # In case of generic error recreate client + self.reload_client = True + raise vimconn.vimconnException(type(e).__name__ + ': ' + str(e)) + + def _check_or_create_resource_group(self): + """ + Creates the base resource group if it does not exist + """ + try: + rg_exists = self.conn.resource_groups.check_existence(self.resource_group) + if not rg_exists: + self.logger.debug("create base rgroup: %s", self.resource_group) + self.conn.resource_groups.create_or_update(self.resource_group, {'location': self.region}) + except Exception as e: + self._format_vimconn_exception(e) + + def _check_or_create_vnet(self): + """ + Try to get existent base vnet, in case it does not exist it creates it + """ + try: + vnet = self.conn_vnet.virtual_networks.get(self.resource_group, self.vnet_name) + self.vnet_address_space = vnet.address_space.address_prefixes[0] + self.vnet_id = vnet.id + return + except CloudError as e: + if e.error.error and "notfound" in e.error.error.lower(): + pass + # continue and create it + else: + self._format_vimconn_exception(e) + + # if it does not exist, create it + try: + vnet_params = { + 'location': self.region, + 'address_space': { + 'address_prefixes': ["10.0.0.0/8"] + }, + } + self.vnet_address_space = "10.0.0.0/8" + + self.logger.debug("create base vnet: %s", self.vnet_name) + self.conn_vnet.virtual_networks.create_or_update(self.resource_group, self.vnet_name, vnet_params) + vnet = self.conn_vnet.virtual_networks.get(self.resource_group, self.vnet_name) + self.vnet_id = vnet.id + except Exception as e: + self._format_vimconn_exception(e) + + def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None): + """ + Adds a tenant network to VIM + :param net_name: name of the network + :param net_type: not used for azure networks + :param ip_profile: is a dict containing the IP parameters of the network (Currently only IPv4 is implemented) + 'ip-version': can be one of ['IPv4','IPv6'] + 'subnet-address': ip_prefix_schema, that is X.X.X.X/Y + 'gateway-address': (Optional) ip_schema, that is X.X.X.X, not implemented for azure connector + 'dns-address': (Optional) ip_schema, not implemented for azure connector + 'dhcp': (Optional) dict containing, not implemented for azure connector + 'enabled': {'type': 'boolean'}, + 'start-address': ip_schema, first IP to grant + 'count': number of IPs to grant. + :param shared: Not allowed for Azure Connector + :param provider_network_profile: (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk} + :return: a tuple with the network identifier and created_items, or raises an exception on error + created_items can be None or a dictionary where this method can include key-values that will be passed to + the method delete_network. Can be used to store created segments, created l2gw connections, etc. + Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same + as not present. + """ + return self._new_subnet(net_name, ip_profile) + + def _new_subnet(self, net_name, ip_profile): + """ + Adds a tenant network to VIM. It creates a new subnet at existing base vnet + :param net_name: subnet name + :param ip_profile: + subnet-address: if it is not provided a subnet/24 in the default vnet is created, + otherwise it creates a subnet in the indicated address + :return: a tuple with the network identifier and created_items, or raises an exception on error + """ + self.logger.debug('create subnet name %s, ip_profile %s', net_name, ip_profile) + self._reload_connection() + + if ip_profile is None: + # get a non used vnet ip range /24 and allocate automatically inside the range self.vnet_address_space + used_subnets = self.get_network_list() + for ip_range in netaddr.IPNetwork(self.vnet_address_space).subnet(24): + for used_subnet in used_subnets: + subnet_range = netaddr.IPNetwork(used_subnet["cidr_block"]) + if subnet_range in ip_range or ip_range in subnet_range: + # this range overlaps with an existing subnet ip range. Breaks and look for another + break + else: + ip_profile = {"subnet_address": str(ip_range)} + self.logger.debug('dinamically obtained ip_profile: %s', ip_range) + break + else: + raise vimconn.vimconnException("Cannot find a non-used subnet range in {}". + format(self.vnet_address_space)) + else: + ip_profile = {"subnet_address": ip_profile['subnet_address']} + + try: + # subnet_name = "{}-{}".format(net_name[:24], uuid4()) + subnet_params = { + 'address_prefix': ip_profile['subnet_address'] + } + # Assign a not duplicated net name + subnet_name = self._get_unused_subnet_name(net_name) + + self.logger.debug('creating subnet_name: {}'.format(subnet_name)) + async_creation = self.conn_vnet.subnets.create_or_update(self.resource_group, self.vnet_name, + subnet_name, subnet_params) + async_creation.wait() + self.logger.debug('created subnet_name: {}'.format(subnet_name)) + + return "{}/subnets/{}".format(self.vnet_id, subnet_name), None + except Exception as e: + self._format_vimconn_exception(e) + + def _get_unused_subnet_name(self, subnet_name): + """ + Adds a prefix to the subnet_name with a number in case the indicated name is repeated + Checks subnets with the indicated name (without suffix) and adds a suffix with a number + """ + all_subnets = self.conn_vnet.subnets.list(self.resource_group, self.vnet_name) + # Filter to subnets starting with the indicated name + subnets = list(filter(lambda subnet: (subnet.name.startswith(subnet_name)), all_subnets)) + net_names = [str(subnet.name) for subnet in subnets] + + # get the name with the first not used suffix + name_suffix = 0 + # name = subnet_name + "-" + str(name_suffix) + name = subnet_name # first subnet created will have no prefix + while name in net_names: + name_suffix += 1 + name = subnet_name + "-" + str(name_suffix) + return name + + def _create_nic(self, net, nic_name, static_ip=None): + + self.logger.debug('create nic name %s, net_name %s', nic_name, net) + self._reload_connection() + + subnet_id = net['net_id'] + location = self._get_location_from_resource_group(self.resource_group) + try: + net_ifz = {'location': location} + net_ip_config = {'name': nic_name + '-ipconfiguration', 'subnet': {'id': subnet_id}} + if static_ip: + net_ip_config['privateIPAddress'] = static_ip + net_ip_config['privateIPAllocationMethod'] = 'Static' + net_ifz['ip_configurations'] = [net_ip_config] + mac_address = net.get('mac_address') + if mac_address: + net_ifz['mac_address'] = mac_address + + async_nic_creation = self.conn_vnet.network_interfaces.create_or_update(self.resource_group, nic_name, + net_ifz) + async_nic_creation.wait() + self.logger.debug('created nic name %s', nic_name) + + public_ip = net.get('floating_ip') + if public_ip: + public_ip_address_params = { + 'location': location, + 'public_ip_allocation_method': 'Dynamic' + } + public_ip_name = nic_name + '-public-ip' + public_ip = self.conn_vnet.public_ip_addresses.create_or_update( + self.resource_group, + public_ip_name, + public_ip_address_params + ) + self.logger.debug('created public IP: {}'.format(public_ip.result())) + + # Associate NIC to Public IP + nic_data = self.conn_vnet.network_interfaces.get( + self.resource_group, + nic_name) + + nic_data.ip_configurations[0].public_ip_address = public_ip.result() + + self.conn_vnet.network_interfaces.create_or_update( + self.resource_group, + nic_name, + nic_data) + + except Exception as e: + self._format_vimconn_exception(e) + + return async_nic_creation.result() + + def new_flavor(self, flavor_data): + """ + It is not allowed to create new flavors in Azure, must always use an existing one + """ + raise vimconn.vimconnAuthException("It is not possible to create new flavors in AZURE") + + def new_tenant(self, tenant_name, tenant_description): + """ + It is not allowed to create new tenants in azure + """ + raise vimconn.vimconnAuthException("It is not possible to create a TENANT in AZURE") + + def new_image(self, image_dict): + """ + It is not allowed to create new images in Azure, must always use an existing one + """ + raise vimconn.vimconnAuthException("It is not possible to create new images in AZURE") + + def get_image_id_from_path(self, path): + """Get the image id from image path in the VIM database. + Returns the image_id or raises a vimconnNotFoundException + """ + raise vimconn.vimconnAuthException("It is not possible to obtain image from path in AZURE") + + def get_image_list(self, filter_dict={}): + """Obtain tenant images from VIM + Filter_dict can be: + name: image name with the format: publisher:offer:sku:version + If some part of the name is provide ex: publisher:offer it will search all availables skus and version + for the provided publisher and offer + id: image uuid, currently not supported for azure + Returns the image list of dictionaries: + [{}, ...] + List can be empty + """ + + self.logger.debug("get_image_list filter {}".format(filter_dict)) + + self._reload_connection() + try: + image_list = [] + if filter_dict.get("name"): + # name will have the format 'publisher:offer:sku:version' + # publisher is required, offer sku and version will be searched if not provided + params = filter_dict["name"].split(":") + publisher = params[0] + if publisher: + # obtain offer list + offer_list = self._get_offer_list(params, publisher) + for offer in offer_list: + # obtain skus + sku_list = self._get_sku_list(params, publisher, offer) + for sku in sku_list: + # if version is defined get directly version, else list images + if len(params) == 4 and params[3]: + version = params[3] + image_list = self._get_version_image_list(publisher, offer, sku, version) + else: + image_list = self._get_sku_image_list(publisher, offer, sku) + else: + raise vimconn.vimconnAuthException( + "List images in Azure must include name param with at least publisher") + else: + raise vimconn.vimconnAuthException("List images in Azure must include name param with at" + " least publisher") + + return image_list + except Exception as e: + self._format_vimconn_exception(e) + + def _get_offer_list(self, params, publisher): + """ + Helper method to obtain offer list for defined publisher + """ + if len(params) >= 2 and params[1]: + return [params[1]] + else: + try: + # get list of offers from azure + result_offers = self.conn_compute.virtual_machine_images.list_offers(self.region, publisher) + return [offer.name for offer in result_offers] + except CloudError as e: + # azure raises CloudError when not found + self.logger.info("error listing offers for publisher {}, Error: {}".format(publisher, e)) + return [] + + def _get_sku_list(self, params, publisher, offer): + """ + Helper method to obtain sku list for defined publisher and offer + """ + if len(params) >= 3 and params[2]: + return [params[2]] + else: + try: + # get list of skus from azure + result_skus = self.conn_compute.virtual_machine_images.list_skus(self.region, publisher, offer) + return [sku.name for sku in result_skus] + except CloudError as e: + # azure raises CloudError when not found + self.logger.info("error listing skus for publisher {}, offer {}, Error: {}".format(publisher, offer, e)) + return [] + + def _get_sku_image_list(self, publisher, offer, sku): + """ + Helper method to obtain image list for publisher, offer and sku + """ + image_list = [] + try: + result_images = self.conn_compute.virtual_machine_images.list(self.region, publisher, offer, sku) + for result_image in result_images: + image_list.append({ + 'id': str(result_image.id), + 'name': ":".join([publisher, offer, sku, result_image.name]) + }) + except CloudError as e: + self.logger.info( + "error listing skus for publisher {}, offer {}, Error: {}".format(publisher, offer, e)) + image_list = [] + return image_list + + def _get_version_image_list(self, publisher, offer, sku, version): + image_list = [] + try: + result_image = self.conn_compute.virtual_machine_images.get(self.region, publisher, offer, sku, version) + if result_image: + image_list.append({ + 'id': str(result_image.id), + 'name': ":".join([publisher, offer, sku, version]) + }) + except CloudError as e: + # azure gives CloudError when not found + self.logger.info("error listing images for publisher {}, offer {}, sku {}, version {} Error: {}". + format(publisher, offer, sku, version, e)) + image_list = [] + return image_list + + def get_network_list(self, filter_dict={}): + """Obtain tenant networks of VIM + Filter_dict can be: + name: network name + id: network id + shared: boolean, not implemented in Azure + tenant_id: tenant, not used in Azure, all networks same tenants + admin_state_up: boolean, not implemented in Azure + status: 'ACTIVE', not implemented in Azure # + Returns the network list of dictionaries + """ + # self.logger.debug('getting network list for vim, filter %s', filter_dict) + try: + self._reload_connection() + + vnet = self.conn_vnet.virtual_networks.get(self.resource_group, self.vnet_name) + subnet_list = [] + + for subnet in vnet.subnets: + if filter_dict: + if filter_dict.get("id") and str(subnet.id) != filter_dict["id"]: + continue + if filter_dict.get("name") and \ + str(subnet.name) != filter_dict["name"]: + continue + + name = self._get_resource_name_from_resource_id(subnet.id) + + subnet_list.append({ + 'id': str(subnet.id), + 'name': name, + 'status': self.provision_state2osm[subnet.provisioning_state], + 'cidr_block': str(subnet.address_prefix), + 'type': 'bridge', + 'shared': False + }) + + return subnet_list + except Exception as e: + self._format_vimconn_exception(e) + + def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, + disk_list=None, availability_zone_index=None, availability_zone_list=None): + + self.logger.debug("new vm instance name: %s, image_id: %s, flavor_id: %s, net_list: %s, cloud_config: %s, " + "disk_list: %s, availability_zone_index: %s, availability_zone_list: %s", + name, image_id, flavor_id, net_list, cloud_config, disk_list, + availability_zone_index, availability_zone_list) + + self._reload_connection() + + # Validate input data is valid + # The virtual machine name must have less or 64 characters and it can not have the following + # characters: (~ ! @ # $ % ^ & * ( ) = + _ [ ] { } \ | ; : ' " , < > / ?.) + vm_name = self._check_vm_name(name) + # Obtain vm unused name + vm_name = self._get_unused_vm_name(vm_name) + + # At least one network must be provided + if not net_list: + raise vimconn.vimconnException("At least one net must be provided to create a new VM") + + # image_id are several fields of the image_id + image_reference = self._get_image_reference(image_id) + + self._check_subnets_for_vm(net_list) + vm_nics = [] + for idx, net in enumerate(net_list): + # Fault with subnet_id + # subnet_id=net['subnet_id'] + # subnet_id=net['net_id'] + nic_name = vm_name + '-nic-'+str(idx) + vm_nic = self._create_nic(net, nic_name, net.get('ip_address')) + vm_nics.append({'id': str(vm_nic.id)}) + net['vim_id'] = vm_nic.id + + try: + + # cloud-init configuration + # cloud config + if cloud_config: + config_drive, userdata = self._create_user_data(cloud_config) + custom_data = base64.b64encode(userdata.encode('utf-8')).decode('latin-1') + key_data = None + key_pairs = cloud_config.get("key-pairs") + if key_pairs: + key_data = key_pairs[0] + + if cloud_config.get("users"): + user_name = cloud_config.get("users")[0].get("name", "osm") + else: + user_name = "osm" # DEFAULT USER IS OSM + + os_profile = { + 'computer_name': vm_name, + 'admin_username': user_name, + 'linux_configuration': { + "disable_password_authentication": True, + "ssh": { + "public_keys": [{ + "path": "/home/{}/.ssh/authorized_keys".format(user_name), + "key_data": key_data + }] + } + }, + 'custom_data': custom_data + } + else: + os_profile = { + 'computer_name': vm_name, + 'admin_username': 'osm', + 'admin_password': 'Osm4u!', + } + + vm_parameters = { + 'location': self.region, + 'os_profile': os_profile, + 'hardware_profile': { + 'vm_size': flavor_id + }, + 'storage_profile': { + 'image_reference': image_reference + } + } + + # Add data disks if they are provided + if disk_list: + data_disks = [] + for lun_name, disk in enumerate(disk_list): + self.logger.debug("add disk size: %s, image: %s", disk.get("size"), disk.get("image_id")) + if not disk.get("image_id"): + data_disks.append({ + 'lun': lun_name, # You choose the value, depending of what is available for you + 'name': vm_name + "_data_disk-" + str(lun_name), + 'create_option': DiskCreateOption.empty, + 'disk_size_gb': disk.get("size") + }) + else: + # self.logger.debug("currently not able to create data disks from image for azure, ignoring") + data_disks.append({ + 'lun': lun_name, # You choose the value, depending of what is available for you + 'name': vm_name + "_data_disk-" + str(lun_name), + 'create_option': 'Attach', + 'disk_size_gb': disk.get("size"), + 'managed_disk': { + 'id': disk.get("image_id") + } + }) + + if data_disks: + vm_parameters["storage_profile"]["data_disks"] = data_disks + + # If the machine has several networks one must be marked as primary + # As it is not indicated in the interface the first interface will be marked as primary + if len(vm_nics) > 1: + for idx, vm_nic in enumerate(vm_nics): + if idx == 0: + vm_nics[0]['Primary'] = True + else: + vm_nics[idx]['Primary'] = False + + vm_parameters['network_profile'] = {'network_interfaces': vm_nics} + + self.logger.debug("create vm name: %s", vm_name) + creation_result = self.conn_compute.virtual_machines.create_or_update( + self.resource_group, + vm_name, + vm_parameters + ) + # creation_result.wait() + result = creation_result.result() + self.logger.debug("created vm name: %s", vm_name) + + if start: + self.conn_compute.virtual_machines.start( + self.resource_group, + vm_name) + # start_result.wait() + + return result.id, None + + # run_command_parameters = { + # 'command_id': 'RunShellScript', # For linux, don't change it + # 'script': [ + # 'date > /tmp/test.txt' + # ] + # } + except Exception as e: + self.logger.debug('Exception creating new vminstance: %s', e, exc_info=True) + self._format_vimconn_exception(e) + + def _get_unused_vm_name(self, vm_name): + """ + Checks the vm name and in case it is used adds a suffix to the name to allow creation + :return: + """ + all_vms = self.conn_compute.virtual_machines.list(self.resource_group) + # Filter to vms starting with the indicated name + vms = list(filter(lambda vm: (vm.name.startswith(vm_name)), all_vms)) + vm_names = [str(vm.name) for vm in vms] + + # get the name with the first not used suffix + name_suffix = 0 + # name = subnet_name + "-" + str(name_suffix) + name = vm_name # first subnet created will have no prefix + while name in vm_names: + name_suffix += 1 + name = vm_name + "-" + str(name_suffix) + return name + + # It is necesary extract from image_id data to create the VM with this format + # 'image_reference': { + # 'publisher': vm_reference['publisher'], + # 'offer': vm_reference['offer'], + # 'sku': vm_reference['sku'], + # 'version': vm_reference['version'] + # }, + def _get_image_reference(self, image_id): + + try: + # The data input format example: + # /Subscriptions/ca3d18ab-d373-4afb-a5d6-7c44f098d16a/Providers/Microsoft.Compute/Locations/westeurope/ + # Publishers/Canonical/ArtifactTypes/VMImage/ + # Offers/UbuntuServer/ + # Skus/18.04-LTS/ + # Versions/18.04.201809110 + publisher = str(image_id.split('/')[8]) + offer = str(image_id.split('/')[12]) + sku = str(image_id.split('/')[14]) + version = str(image_id.split('/')[16]) + + return { + 'publisher': publisher, + 'offer': offer, + 'sku': sku, + 'version': version + } + except Exception as e: + raise vimconn.vimconnException( + "Unable to get image_reference from invalid image_id format: '{}'".format(image_id)) + + # Azure VM names can not have some special characters + def _check_vm_name(self, vm_name): + """ + Checks vm name, in case the vm has not allowed characters they are removed, not error raised + """ + + chars_not_allowed_list = "~!@#$%^&*()=+_[]{}|;:<>/?." + + # First: the VM name max length is 64 characters + vm_name_aux = vm_name[:64] + + # Second: replace not allowed characters + for elem in chars_not_allowed_list: + # Check if string is in the main string + if elem in vm_name_aux: + # self.logger.debug('Dentro del IF') + # Replace the string + vm_name_aux = vm_name_aux.replace(elem, '-') + + return vm_name_aux + + def get_flavor_id_from_data(self, flavor_dict): + + self.logger.debug("getting flavor id from data, flavor_dict: %s", flavor_dict) + filter_dict = flavor_dict or {} + try: + self._reload_connection() + vm_sizes_list = [vm_size.serialize() for vm_size in + self.conn_compute.virtual_machine_sizes.list(self.region)] + + cpus = filter_dict.get('vcpus') or 0 + memMB = filter_dict.get('ram') or 0 + + # Filter + if self._config.get("flavors_pattern"): + filtered_sizes = [size for size in vm_sizes_list if size['numberOfCores'] >= cpus and + size['memoryInMB'] >= memMB and + re.search(self._config.get("flavors_pattern"), size["name"])] + else: + filtered_sizes = [size for size in vm_sizes_list if size['numberOfCores'] >= cpus and + size['memoryInMB'] >= memMB] + + # Sort + listedFilteredSizes = sorted(filtered_sizes, key=lambda k: (k['numberOfCores'], k['memoryInMB'], + k['resourceDiskSizeInMB'])) + + if listedFilteredSizes: + return listedFilteredSizes[0]['name'] + raise vimconn.vimconnNotFoundException("Cannot find any flavor matching '{}'".format(str(flavor_dict))) + + except Exception as e: + self._format_vimconn_exception(e) + + def _get_flavor_id_from_flavor_name(self, flavor_name): + + # self.logger.debug("getting flavor id from flavor name {}".format(flavor_name)) + try: + self._reload_connection() + vm_sizes_list = [vm_size.serialize() for vm_size in + self.conn_compute.virtual_machine_sizes.list(self.region)] + + output_flavor = None + for size in vm_sizes_list: + if size['name'] == flavor_name: + output_flavor = size + + # None is returned if not found anything + return output_flavor + + except Exception as e: + self._format_vimconn_exception(e) + + def check_vim_connectivity(self): + try: + self._reload_connection() + return True + except Exception as e: + raise vimconn.vimconnException("Connectivity issue with Azure API: {}".format(e)) + + def get_network(self, net_id): + + # self.logger.debug('get network id: {}'.format(net_id)) + # res_name = self._get_resource_name_from_resource_id(net_id) + self._reload_connection() + + filter_dict = {'name': net_id} + network_list = self.get_network_list(filter_dict) + + if not network_list: + raise vimconn.vimconnNotFoundException("network '{}' not found".format(net_id)) + else: + return network_list[0] + + def delete_network(self, net_id, created_items=None): + + self.logger.debug('deleting network {} - {}'.format(self.resource_group, net_id)) + + self._reload_connection() + res_name = self._get_resource_name_from_resource_id(net_id) + filter_dict = {'name': res_name} + network_list = self.get_network_list(filter_dict) + if not network_list: + raise vimconn.vimconnNotFoundException("network '{}' not found".format(net_id)) + + try: + # Subnet API fails (CloudError: Azure Error: ResourceNotFound) + # Put the initial virtual_network API + async_delete = self.conn_vnet.subnets.delete(self.resource_group, self.vnet_name, res_name) + async_delete.wait() + return net_id + + except CloudError as e: + if e.error.error and "notfound" in e.error.error.lower(): + raise vimconn.vimconnNotFoundException("network '{}' not found".format(net_id)) + else: + self._format_vimconn_exception(e) + except Exception as e: + self._format_vimconn_exception(e) + + def delete_vminstance(self, vm_id, created_items=None): + """ Deletes a vm instance from the vim. + """ + self.logger.debug('deleting VM instance {} - {}'.format(self.resource_group, vm_id)) + self._reload_connection() + + try: + + res_name = self._get_resource_name_from_resource_id(vm_id) + vm = self.conn_compute.virtual_machines.get(self.resource_group, res_name) + + # Shuts down the virtual machine and releases the compute resources + # vm_stop = self.conn_compute.virtual_machines.power_off(self.resource_group, resName) + # vm_stop.wait() + + vm_delete = self.conn_compute.virtual_machines.delete(self.resource_group, res_name) + vm_delete.wait() + self.logger.debug('deleted VM name: %s', res_name) + + # Delete OS Disk + os_disk_name = vm.storage_profile.os_disk.name + self.logger.debug('delete OS DISK: %s', os_disk_name) + self.conn_compute.disks.delete(self.resource_group, os_disk_name) + self.logger.debug('deleted OS DISK name: %s', os_disk_name) + + for data_disk in vm.storage_profile.data_disks: + self.logger.debug('delete data_disk: %s', data_disk.name) + self.conn_compute.disks.delete(self.resource_group, data_disk.name) + self.logger.debug('deleted OS DISK name: %s', data_disk.name) + + # After deleting VM, it is necessary to delete NIC, because if is not deleted delete_network + # does not work because Azure says that is in use the subnet + network_interfaces = vm.network_profile.network_interfaces + + for network_interface in network_interfaces: + + nic_name = self._get_resource_name_from_resource_id(network_interface.id) + nic_data = self.conn_vnet.network_interfaces.get( + self.resource_group, + nic_name) + + public_ip_name = None + exist_public_ip = nic_data.ip_configurations[0].public_ip_address + if exist_public_ip: + public_ip_id = nic_data.ip_configurations[0].public_ip_address.id + + # Delete public_ip + public_ip_name = self._get_resource_name_from_resource_id(public_ip_id) + + # Public ip must be deleted afterwards of nic that is attached + + self.logger.debug('delete NIC name: %s', nic_name) + nic_delete = self.conn_vnet.network_interfaces.delete(self.resource_group, nic_name) + nic_delete.wait() + self.logger.debug('deleted NIC name: %s', nic_name) + + # Delete list of public ips + if public_ip_name: + self.logger.debug('delete PUBLIC IP - ' + public_ip_name) + self.conn_vnet.public_ip_addresses.delete(self.resource_group, public_ip_name) + + except CloudError as e: + if e.error.error and "notfound" in e.error.error.lower(): + raise vimconn.vimconnNotFoundException("No vm instance found '{}'".format(vm_id)) + else: + self._format_vimconn_exception(e) + except Exception as e: + self._format_vimconn_exception(e) + + def action_vminstance(self, vm_id, action_dict, created_items={}): + """Send and action over a VM instance from VIM + Returns the vm_id if the action was successfully sent to the VIM + """ + + self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict)) + try: + self._reload_connection() + resName = self._get_resource_name_from_resource_id(vm_id) + if "start" in action_dict: + self.conn_compute.virtual_machines.start(self.resource_group, resName) + elif "stop" in action_dict or "shutdown" in action_dict or "shutoff" in action_dict: + self.conn_compute.virtual_machines.power_off(self.resource_group, resName) + elif "terminate" in action_dict: + self.conn_compute.virtual_machines.delete(self.resource_group, resName) + elif "reboot" in action_dict: + self.conn_compute.virtual_machines.restart(self.resource_group, resName) + return None + except CloudError as e: + if e.error.error and "notfound" in e.error.error.lower(): + raise vimconn.vimconnNotFoundException("No vm found '{}'".format(vm_id)) + else: + self._format_vimconn_exception(e) + except Exception as e: + self._format_vimconn_exception(e) + + def delete_flavor(self, flavor_id): + raise vimconn.vimconnAuthException("It is not possible to delete a FLAVOR in AZURE") + + def delete_tenant(self, tenant_id,): + raise vimconn.vimconnAuthException("It is not possible to delete a TENANT in AZURE") + + def delete_image(self, image_id): + raise vimconn.vimconnAuthException("It is not possible to delete a IMAGE in AZURE") + + def get_vminstance(self, vm_id): + """ + Obtaing the vm instance data from v_id + """ + self.logger.debug("get vm instance: %s", vm_id) + self._reload_connection() + try: + resName = self._get_resource_name_from_resource_id(vm_id) + vm = self.conn_compute.virtual_machines.get(self.resource_group, resName) + except CloudError as e: + if e.error.error and "notfound" in e.error.error.lower(): + raise vimconn.vimconnNotFoundException("No vminstance found '{}'".format(vm_id)) + else: + self._format_vimconn_exception(e) + except Exception as e: + self._format_vimconn_exception(e) + + return vm + + def get_flavor(self, flavor_id): + """ + Obtains the flavor_data from the flavor_id + """ + self._reload_connection() + self.logger.debug("get flavor from id: %s", flavor_id) + flavor_data = self._get_flavor_id_from_flavor_name(flavor_id) + if flavor_data: + flavor = { + 'id': flavor_id, + 'name': flavor_id, + 'ram': flavor_data['memoryInMB'], + 'vcpus': flavor_data['numberOfCores'], + 'disk': flavor_data['resourceDiskSizeInMB']/1024 + } + return flavor + else: + raise vimconn.vimconnNotFoundException("flavor '{}' not found".format(flavor_id)) + + def get_tenant_list(self, filter_dict={}): + """ Obtains the list of tenants + For the azure connector only the azure tenant will be returned if it is compatible + with filter_dict + """ + tenants_azure = [{'name': self.tenant, 'id': self.tenant}] + tenant_list = [] + + self.logger.debug("get tenant list: %s", filter_dict) + for tenant_azure in tenants_azure: + if filter_dict: + if filter_dict.get("id") and str(tenant_azure.get("id")) != filter_dict["id"]: + continue + if filter_dict.get("name") and str(tenant_azure.get("name")) != filter_dict["name"]: + continue + + tenant_list.append(tenant_azure) + + return tenant_list + + def refresh_nets_status(self, net_list): + """Get the status of the networks + Params: the list of network identifiers + Returns a dictionary with: + net_id: #VIM id of this network + status: #Mandatory. Text with one of: + # DELETED (not found at vim) + # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) + # OTHER (Vim reported other status not understood) + # ERROR (VIM indicates an ERROR status) + # ACTIVE, INACTIVE, DOWN (admin down), + # BUILD (on building process) + # + error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR + vim_info: #Text with plain information obtained from vim (yaml.safe_dump) + + """ + + out_nets = {} + self._reload_connection() + + self.logger.debug("reload nets status net_list: %s", net_list) + for net_id in net_list: + try: + netName = self._get_net_name_from_resource_id(net_id) + resName = self._get_resource_name_from_resource_id(net_id) + + net = self.conn_vnet.subnets.get(self.resource_group, netName, resName) + + out_nets[net_id] = { + "status": self.provision_state2osm[net.provisioning_state], + "vim_info": str(net) + } + except CloudError as e: + if e.error.error and "notfound" in e.error.error.lower(): + self.logger.info("Not found subnet net_name: %s, subnet_name: %s", netName, resName) + out_nets[net_id] = { + "status": "DELETED", + "error_msg": str(e) + } + else: + self.logger.error("CloudError Exception %s when searching subnet", e) + out_nets[net_id] = { + "status": "VIM_ERROR", + "error_msg": str(e) + } + except vimconn.vimconnNotFoundException as e: + self.logger.error("VimConnNotFoundException %s when searching subnet", e) + out_nets[net_id] = { + "status": "DELETED", + "error_msg": str(e) + } + except Exception as e: + self.logger.error("Exception %s when searching subnet", e, exc_info=True) + out_nets[net_id] = { + "status": "VIM_ERROR", + "error_msg": str(e) + } + return out_nets + + def refresh_vms_status(self, vm_list): + """ Get the status of the virtual machines and their interfaces/ports + Params: the list of VM identifiers + Returns a dictionary with: + vm_id: # VIM id of this Virtual Machine + status: # Mandatory. Text with one of: + # DELETED (not found at vim) + # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) + # OTHER (Vim reported other status not understood) + # ERROR (VIM indicates an ERROR status) + # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running), + # BUILD (on building process), ERROR + # ACTIVE:NoMgmtIP (Active but none of its interfaces has an IP address + # (ACTIVE:NoMgmtIP is not returned for Azure) + # + error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR + vim_info: #Text with plain information obtained from vim (yaml.safe_dump) + interfaces: list with interface info. Each item a dictionary with: + vim_interface_id - The ID of the interface + mac_address - The MAC address of the interface. + ip_address - The IP address of the interface within the subnet. + """ + + out_vms = {} + self._reload_connection() + + self.logger.debug("refresh vm status vm_list: %s", vm_list) + search_vm_list = vm_list or {} + + for vm_id in search_vm_list: + out_vm = {} + try: + res_name = self._get_resource_name_from_resource_id(vm_id) + + vm = self.conn_compute.virtual_machines.get(self.resource_group, res_name) + out_vm['vim_info'] = str(vm) + out_vm['status'] = self.provision_state2osm.get(vm.provisioning_state, 'OTHER') + if vm.provisioning_state == 'Succeeded': + # check if machine is running or stopped + instance_view = self.conn_compute.virtual_machines.instance_view(self.resource_group, + res_name) + for status in instance_view.statuses: + splitted_status = status.code.split("/") + if len(splitted_status) == 2 and splitted_status[0] == 'PowerState': + out_vm['status'] = self.power_state2osm.get(splitted_status[1], 'OTHER') + + network_interfaces = vm.network_profile.network_interfaces + out_vm['interfaces'] = self._get_vm_interfaces_status(vm_id, network_interfaces) + + except CloudError as e: + if e.error.error and "notfound" in e.error.error.lower(): + self.logger.debug("Not found vm id: %s", vm_id) + out_vm['status'] = "DELETED" + out_vm['error_msg'] = str(e) + out_vm['vim_info'] = None + else: + # maybe connection error or another type of error, return vim error + self.logger.error("Exception %s refreshing vm_status", e) + out_vm['status'] = "VIM_ERROR" + out_vm['error_msg'] = str(e) + out_vm['vim_info'] = None + except Exception as e: + self.logger.error("Exception %s refreshing vm_status", e, exc_info=True) + out_vm['status'] = "VIM_ERROR" + out_vm['error_msg'] = str(e) + out_vm['vim_info'] = None + + out_vms[vm_id] = out_vm + + return out_vms + + def _get_vm_interfaces_status(self, vm_id, interfaces): + """ + Gets the interfaces detail for a vm + :param interfaces: List of interfaces. + :return: Dictionary with list of interfaces including, vim_interface_id, mac_address and ip_address + """ + try: + interface_list = [] + for network_interface in interfaces: + interface_dict = {} + nic_name = self._get_resource_name_from_resource_id(network_interface.id) + interface_dict['vim_interface_id'] = network_interface.id + + nic_data = self.conn_vnet.network_interfaces.get( + self.resource_group, + nic_name) + + ips = [] + if nic_data.ip_configurations[0].public_ip_address: + self.logger.debug("Obtain public ip address") + public_ip_name = self._get_resource_name_from_resource_id( + nic_data.ip_configurations[0].public_ip_address.id) + public_ip = self.conn_vnet.public_ip_addresses.get(self.resource_group, public_ip_name) + self.logger.debug("Public ip address is: %s", public_ip.ip_address) + ips.append(public_ip.ip_address) + + private_ip = nic_data.ip_configurations[0].private_ip_address + ips.append(private_ip) + + interface_dict['mac_address'] = nic_data.mac_address + interface_dict['ip_address'] = ";".join(ips) + interface_list.append(interface_dict) + + return interface_list + except Exception as e: + self.logger.error("Exception %s obtaining interface data for vm: %s, error: %s", vm_id, e, exc_info=True) + self._format_vimconn_exception(e) + + +if __name__ == "__main__": + + # Making some basic test + vim_id = 'azure' + vim_name = 'azure' + needed_test_params = { + "client_id": "AZURE_CLIENT_ID", + "secret": "AZURE_SECRET", + "tenant": "AZURE_TENANT", + "resource_group": "AZURE_RESOURCE_GROUP", + "subscription_id": "AZURE_SUBSCRIPTION_ID", + "vnet_name": "AZURE_VNET_NAME", + } + test_params = {} + + for param, env_var in needed_test_params.items(): + value = getenv(env_var) + if not value: + raise Exception("Provide a valid value for env '{}'".format(env_var)) + test_params[param] = value + + config = { + 'region_name': getenv("AZURE_REGION_NAME", 'westeurope'), + 'resource_group': getenv("AZURE_RESOURCE_GROUP"), + 'subscription_id': getenv("AZURE_SUBSCRIPTION_ID"), + 'pub_key': getenv("AZURE_PUB_KEY", None), + 'vnet_name': getenv("AZURE_VNET_NAME", 'myNetwork'), + } + + virtualMachine = { + 'name': 'sergio', + 'description': 'new VM', + 'status': 'running', + 'image': { + 'publisher': 'Canonical', + 'offer': 'UbuntuServer', + 'sku': '16.04.0-LTS', + 'version': 'latest' + }, + 'hardware_profile': { + 'vm_size': 'Standard_DS1_v2' + }, + 'networks': [ + 'sergio' + ] + } + + vnet_config = { + 'subnet_address': '10.1.2.0/24', + # 'subnet_name': 'subnet-oam' + } + ########################### + + azure = vimconnector(vim_id, vim_name, tenant_id=test_params["tenant"], tenant_name=None, url=None, url_admin=None, + user=test_params["client_id"], passwd=test_params["secret"], log_level=None, config=config) + + # azure.get_flavor_id_from_data("here") + # subnets=azure.get_network_list() + # azure.new_vminstance(virtualMachine['name'], virtualMachine['description'], virtualMachine['status'], + # virtualMachine['image'], virtualMachine['hardware_profile']['vm_size'], subnets) + + azure.new_network("mynet", None) + net_id = "/subscriptions/82f80cc1-876b-4591-9911-1fb5788384fd/resourceGroups/osmRG/providers/Microsoft."\ + "Network/virtualNetworks/test" + net_id_not_found = "/subscriptions/82f80cc1-876b-4591-9911-1fb5788384fd/resourceGroups/osmRG/providers/"\ + "Microsoft.Network/virtualNetworks/testALF" + azure.refresh_nets_status([net_id, net_id_not_found]) diff --git a/RO-VIM-azure/requirements.txt b/RO-VIM-azure/requirements.txt new file mode 100644 index 00000000..6cfff525 --- /dev/null +++ b/RO-VIM-azure/requirements.txt @@ -0,0 +1,20 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +PyYAML +requests +netaddr +azure +git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO diff --git a/RO-VIM-azure/setup.py b/RO-VIM-azure/setup.py new file mode 100644 index 00000000..557fedaf --- /dev/null +++ b/RO-VIM-azure/setup.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +from setuptools import setup + +_name = "osm_rovim_azure" + +README = """ +=========== +osm-rovim_azure +=========== + +osm-ro pluging for azure VIM +""" + +setup( + name=_name, + description='OSM ro vim plugin for azure', + long_description=README, + version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + # version=VERSION, + # python_requires='>3.5.0', + author='ETSI OSM', + author_email='alfonso.tiernosepulveda@telefonica.com', + maintainer='Alfonso Tierno', + maintainer_email='alfonso.tiernosepulveda@telefonica.com', + url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', + license='Apache 2.0', + + packages=[_name], + include_package_data=True, + dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"], + install_requires=["requests", "netaddr", "PyYAML", "azure", "osm-ro"], + setup_requires=['setuptools-version-command'], + entry_points={ + 'osm_rovim.plugins': ['rovim_azure = osm_rovim_azure.vimconn_azure'], + }, +) diff --git a/RO-VIM-azure/stdeb.cfg b/RO-VIM-azure/stdeb.cfg new file mode 100644 index 00000000..968c55e1 --- /dev/null +++ b/RO-VIM-azure/stdeb.cfg @@ -0,0 +1,19 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +[DEFAULT] +X-Python3-Version : >= 3.5 +Depends3: python3-requests, python3-netaddr, python3-yaml, python3-osm-ro, python3-pip + diff --git a/RO-VIM-azure/tox.ini b/RO-VIM-azure/tox.ini new file mode 100644 index 00000000..9bc1472c --- /dev/null +++ b/RO-VIM-azure/tox.ini @@ -0,0 +1,41 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +[tox] +envlist = py3 +toxworkdir={homedir}/.tox + +[testenv] +basepython = python3 +install_command = python3 -m pip install -r requirements.txt -U {opts} {packages} +# deps = -r{toxinidir}/test-requirements.txt +commands=python3 -m unittest discover -v + +[testenv:flake8] +basepython = python3 +deps = flake8 +commands = flake8 osm_rovim_azure --max-line-length 120 \ + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504 + +[testenv:unittest] +basepython = python3 +commands = python3 -m unittest osm_rovim_azure.tests + +[testenv:build] +basepython = python3 +deps = stdeb + setuptools-version-command +commands = python3 setup.py --command-packages=stdeb.command bdist_deb + diff --git a/RO-VIM-fos/Makefile b/RO-VIM-fos/Makefile new file mode 100644 index 00000000..26934532 --- /dev/null +++ b/RO-VIM-fos/Makefile @@ -0,0 +1,24 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +all: clean package + +clean: + rm -rf dist deb_dist osm_rovim_fos-*.tar.gz osm_rovim_fos.egg-info .eggs + +package: + python3 setup.py --command-packages=stdeb.command sdist_dsc + cp debian/python3-osm-rovim-fos.postinst deb_dist/osm-rovim-fos*/debian/ + cd deb_dist/osm-rovim-fos*/ && dpkg-buildpackage -rfakeroot -uc -us diff --git a/RO-VIM-fos/debian/python3-osm-rovim-fos.postinst b/RO-VIM-fos/debian/python3-osm-rovim-fos.postinst new file mode 100755 index 00000000..744b26fa --- /dev/null +++ b/RO-VIM-fos/debian/python3-osm-rovim-fos.postinst @@ -0,0 +1,24 @@ +#!/bin/bash + +## +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: OSM_TECH@list.etsi.org +## + +echo "POST INSTALL OSM-ROVIM-FOS" + +#Pip packages required for vmware connector +python3 -m pip install fog05rest>=0.0.4 + diff --git a/RO-VIM-fos/osm_rovim_fos/vimconn_fos.py b/RO-VIM-fos/osm_rovim_fos/vimconn_fos.py new file mode 100644 index 00000000..c30c1f15 --- /dev/null +++ b/RO-VIM-fos/osm_rovim_fos/vimconn_fos.py @@ -0,0 +1,878 @@ +# -*- coding: utf-8 -*- + +## +# Copyright 2019 ADLINK Technology Inc.. +# This file is part of ETSI OSM +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# + +""" +Eclipse fog05 connector, implements methods to interact with fog05 using REST Client + REST Proxy + +Manages LXD containers on x86_64 by default, currently missing EPA and VF/PF +Support config dict: + - arch : cpu architecture for the VIM + - hypervisor: virtualization technology supported by the VIM, can + can be one of: LXD, KVM, BARE, XEN, DOCKER, MCU + the selected VIM need to have at least a node with support + for the selected hypervisor + +""" +__author__="Gabriele Baldoni" +__date__ ="$13-may-2019 10:35:12$" + +import uuid +import socket +import struct +from . import vimconn +import random +import yaml +from functools import partial +from fog05rest import FIMAPI +from fog05rest import fimerrors + + +class vimconnector(vimconn.vimconnector): + def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None, + config={}, persistent_info={}): + """Constructor of VIM + Params: + 'uuid': id asigned to this VIM + 'name': name assigned to this VIM, can be used for logging + 'tenant_id', 'tenant_name': (only one of them is mandatory) VIM tenant to be used + 'url_admin': (optional), url used for administrative tasks + 'user', 'passwd': credentials of the VIM user + 'log_level': provider if it should use a different log_level than the general one + 'config': dictionary with extra VIM information. This contains a consolidate version of general VIM config + at creation and particular VIM config at teh attachment + 'persistent_info': dict where the class can store information that will be available among class + destroy/creation cycles. This info is unique per VIM/credential. At first call it will contain an + empty dict. Useful to store login/tokens information for speed up communication + + Returns: Raise an exception is some needed parameter is missing, but it must not do any connectivity + check against the VIM + """ + + vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, + config, persistent_info) + + self.logger.debug('vimconn_fos init with config: {}'.format(config)) + self.arch = config.get('arch', 'x86_64') + self.hv = config.get('hypervisor', 'LXD') + self.nodes = config.get('nodes', []) + self.fdu_node_map = {} + self.fos_api = FIMAPI(locator=self.url) + + + def __get_ip_range(self, first, count): + int_first = struct.unpack('!L', socket.inet_aton(first))[0] + int_last = int_first + count + last = socket.inet_ntoa(struct.pack('!L', int_last)) + return (first, last) + + def __name_filter(self, desc, filter_name=None): + if filter_name is None: + return True + return desc.get('name') == filter_name + + def __id_filter(self, desc, filter_id=None): + if filter_id is None: + return True + return desc.get('uuid') == filter_id + + def __checksum_filter(self, desc, filter_checksum=None): + if filter_checksum is None: + return True + return desc.get('checksum') == filter_checksum + + def check_vim_connectivity(self): + """Checks VIM can be reached and user credentials are ok. + Returns None if success or raised vimconnConnectionException, vimconnAuthException, ... + """ + try: + self.fos_api.check() + return None + except fimerrors.FIMAuthExcetpion as fae: + raise vimconn.vimconnAuthException("Unable to authenticate to the VIM. Error {}".format(fae)) + except Exception as e: + raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e)) + + def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None): + """Adds a tenant network to VIM + Params: + 'net_name': name of the network + 'net_type': one of: + 'bridge': overlay isolated network + 'data': underlay E-LAN network for Passthrough and SRIOV interfaces + 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces. + 'ip_profile': is a dict containing the IP parameters of the network + 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented) + 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y + 'gateway_address': (Optional) ip_schema, that is X.X.X.X + 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X] + 'dhcp_enabled': True or False + 'dhcp_start_address': ip_schema, first IP to grant + 'dhcp_count': number of IPs to grant. + 'shared': if this network can be seen/use by other tenants/organization + Returns the network identifier on success or raises and exception on failure + """ + self.logger.debug('new_network: {}'.format(locals())) + if net_type in ['data','ptp']: + raise vimconn.vimconnNotImplemented('{} type of network not supported'.format(net_type)) + + net_uuid = '{}'.format(uuid.uuid4()) + desc = { + 'uuid':net_uuid, + 'name':net_name, + 'net_type':'ELAN', + 'is_mgmt':False + } + + if ip_profile is not None: + ip = {} + if ip_profile.get('ip_version') == 'IPv4': + ip_info = {} + ip_range = self.__get_ip_range(ip_profile.get('dhcp_start_address'), ip_profile.get('dhcp_count')) + dhcp_range = '{},{}'.format(ip_range[0],ip_range[1]) + ip.update({'subnet':ip_profile.get('subnet_address')}) + ip.update({'dns':ip_profile.get('dns', None)}) + ip.update({'dhcp_enable':ip_profile.get('dhcp_enabled', False)}) + ip.update({'dhcp_range': dhcp_range}) + ip.update({'gateway':ip_profile.get('gateway_address', None)}) + desc.update({'ip_configuration':ip_info}) + else: + raise vimconn.vimconnNotImplemented('IPV6 network is not implemented at VIM') + desc.update({'ip_configuration':ip}) + self.logger.debug('VIM new_network args: {} - Generated Eclipse fog05 Descriptor {}'.format(locals(), desc)) + try: + self.fos_api.network.add_network(desc) + except fimerrors.FIMAResouceExistingException as free: + raise vimconn.vimconnConflictException("Network already exists at VIM. Error {}".format(free)) + except Exception as e: + raise vimconn.vimconnException("Unable to create network {}. Error {}".format(net_name, e)) + # No way from the current rest service to get the actual error, most likely it will be an already existing error + return net_uuid,{} + + def get_network_list(self, filter_dict={}): + """Obtain tenant networks of VIM + Params: + 'filter_dict' (optional) contains entries to return only networks that matches ALL entries: + name: string => returns only networks with this name + id: string => returns networks with this VIM id, this imply returns one network at most + shared: boolean >= returns only networks that are (or are not) shared + tenant_id: sting => returns only networks that belong to this tenant/project + ,#(not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active + #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status + Returns the network list of dictionaries. each dictionary contains: + 'id': (mandatory) VIM network id + 'name': (mandatory) VIM network name + 'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER' + 'network_type': (optional) can be 'vxlan', 'vlan' or 'flat' + 'segmentation_id': (optional) in case network_type is vlan or vxlan this field contains the segmentation id + 'error_msg': (optional) text that explains the ERROR status + other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param + List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity, + authorization, or some other unspecific error + """ + self.logger.debug('get_network_list: {}'.format(filter_dict)) + res = [] + try: + nets = self.fos_api.network.list() + except Exception as e: + raise vimconn.vimconnConnectionException("Cannot get network list from VIM, connection error. Error {}".format(e)) + + filters = [ + partial(self.__name_filter, filter_name=filter_dict.get('name')), + partial(self.__id_filter,filter_id=filter_dict.get('id')) + ] + + r1 = [] + + for n in nets: + match = True + for f in filters: + match = match and f(n) + if match: + r1.append(n) + + for n in r1: + osm_net = { + 'id':n.get('uuid'), + 'name':n.get('name'), + 'status':'ACTIVE' + } + res.append(osm_net) + return res + + def get_network(self, net_id): + """Obtain network details from the 'net_id' VIM network + Return a dict that contains: + 'id': (mandatory) VIM network id, that is, net_id + 'name': (mandatory) VIM network name + 'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER' + 'error_msg': (optional) text that explains the ERROR status + other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param + Raises an exception upon error or when network is not found + """ + self.logger.debug('get_network: {}'.format(net_id)) + res = self.get_network_list(filter_dict={'id':net_id}) + if len(res) == 0: + raise vimconn.vimconnNotFoundException("Network {} not found at VIM".format(net_id)) + return res[0] + + def delete_network(self, net_id, created_items=None): + """Deletes a tenant network from VIM + Returns the network identifier or raises an exception upon error or when network is not found + """ + self.logger.debug('delete_network: {}'.format(net_id)) + try: + self.fos_api.network.remove_network(net_id) + except fimerrors.FIMNotFoundException as fnfe: + raise vimconn.vimconnNotFoundException("Network {} not found at VIM (already deleted?). Error {}".format(net_id, fnfe)) + except Exception as e: + raise vimconn.vimconnException("Cannot delete network {} from VIM. Error {}".format(net_id, e)) + return net_id + + def refresh_nets_status(self, net_list): + """Get the status of the networks + Params: + 'net_list': a list with the VIM network id to be get the status + Returns a dictionary with: + 'net_id': #VIM id of this network + status: #Mandatory. Text with one of: + # DELETED (not found at vim) + # VIM_ERROR (Cannot connect to VIM, authentication problems, VIM response error, ...) + # OTHER (Vim reported other status not understood) + # ERROR (VIM indicates an ERROR status) + # ACTIVE, INACTIVE, DOWN (admin down), + # BUILD (on building process) + error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR + vim_info: #Text with plain information obtained from vim (yaml.safe_dump) + 'net_id2': ... + """ + self.logger.debug('Refeshing network status with args: {}'.format(locals())) + r = {} + for n in net_list: + try: + osm_n = self.get_network(n) + r.update({ + osm_n.get('id'):{'status':osm_n.get('status')} + }) + except vimconn.vimconnNotFoundException: + r.update({ + n:{'status':'VIM_ERROR'} + }) + return r + + def get_flavor(self, flavor_id): + """Obtain flavor details from the VIM + Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } + Raises an exception upon error or if not found + """ + self.logger.debug('VIM get_flavor with args: {}'.format(locals())) + try: + r = self.fos_api.flavor.get(flavor_id) + except Exception as e: + raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e)) + if r is None: + raise vimconn.vimconnNotFoundException("Flavor not found at VIM") + return {'id':r.get('uuid'), 'name':r.get('name'), 'fos':r} + + def get_flavor_id_from_data(self, flavor_dict): + """Obtain flavor id that match the flavor description + Params: + 'flavor_dict': dictionary that contains: + 'disk': main hard disk in GB + 'ram': meomry in MB + 'vcpus': number of virtual cpus + #TODO: complete parameters for EPA + Returns the flavor_id or raises a vimconnNotFoundException + """ + self.logger.debug('VIM get_flavor_id_from_data with args : {}'.format(locals())) + + try: + flvs = self.fos_api.flavor.list() + except Exception as e: + raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e)) + r = [x.get('uuid') for x in flvs if (x.get('cpu_min_count') == flavor_dict.get('vcpus') and x.get('ram_size_mb') == flavor_dict.get('ram') and x.get('storage_size_gb') == flavor_dict.get('disk'))] + if len(r) == 0: + raise vimconn.vimconnNotFoundException ( "No flavor found" ) + return r[0] + + def new_flavor(self, flavor_data): + """Adds a tenant flavor to VIM + flavor_data contains a dictionary with information, keys: + name: flavor name + ram: memory (cloud type) in MBytes + vpcus: cpus (cloud type) + extended: EPA parameters + - numas: #items requested in same NUMA + memory: number of 1G huge pages memory + paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads + interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa + - name: interface name + dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC + bandwidth: X Gbps; requested guarantee bandwidth + vpci: requested virtual PCI address + disk: disk size + is_public: + #TODO to concrete + Returns the flavor identifier""" + self.logger.debug('VIM new_flavor with args: {}'.format(locals())) + flv_id = '{}'.format(uuid.uuid4()) + desc = { + 'uuid':flv_id, + 'name':flavor_data.get('name'), + 'cpu_arch': self.arch, + 'cpu_min_count': flavor_data.get('vcpus'), + 'cpu_min_freq': 0.0, + 'ram_size_mb':float(flavor_data.get('ram')), + 'storage_size_gb':float(flavor_data.get('disk')) + } + try: + self.fos_api.flavor.add(desc) + except fimerrors.FIMAResouceExistingException as free: + raise vimconn.vimconnConflictException("Flavor {} already exist at VIM. Error {}".format(flv_id, free)) + except Exception as e: + raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e)) + return flv_id + + + def delete_flavor(self, flavor_id): + """Deletes a tenant flavor from VIM identify by its id + Returns the used id or raise an exception""" + try: + self.fos_api.flavor.remove(flavor_id) + except fimerrors.FIMNotFoundException as fnfe: + raise vimconn.vimconnNotFoundException("Flavor {} not found at VIM (already deleted?). Error {}".format(flavor_id, fnfe)) + except Exception as e: + raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e)) + return flavor_id + + def new_image(self, image_dict): + """ Adds a tenant image to VIM. imge_dict is a dictionary with: + name: name + disk_format: qcow2, vhd, vmdk, raw (by default), ... + location: path or URI + public: "yes" or "no" + metadata: metadata of the image + Returns the image id or raises an exception if failed + """ + self.logger.debug('VIM new_image with args: {}'.format(locals())) + img_id = '{}'.format(uuid.uuid4()) + desc = { + 'name':image_dict.get('name'), + 'uuid':img_id, + 'uri':image_dict.get('location') + } + try: + self.fos_api.image.add(desc) + except fimerrors.FIMAResouceExistingException as free: + raise vimconn.vimconnConflictException("Image {} already exist at VIM. Error {}".format(img_id, free)) + except Exception as e: + raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e)) + return img_id + + def get_image_id_from_path(self, path): + + """Get the image id from image path in the VIM database. + Returns the image_id or raises a vimconnNotFoundException + """ + self.logger.debug('VIM get_image_id_from_path with args: {}'.format(locals())) + try: + imgs = self.fos_api.image.list() + except Exception as e: + raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e)) + res = [x.get('uuid') for x in imgs if x.get('uri')==path] + if len(res) == 0: + raise vimconn.vimconnNotFoundException("Image with this path was not found") + return res[0] + + def get_image_list(self, filter_dict={}): + """Obtain tenant images from VIM + Filter_dict can be: + name: image name + id: image uuid + checksum: image checksum + location: image path + Returns the image list of dictionaries: + [{}, ...] + List can be empty + """ + self.logger.debug('VIM get_image_list args: {}'.format(locals())) + r = [] + try: + fimgs = self.fos_api.image.list() + except Exception as e: + raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e)) + + filters = [ + partial(self.__name_filter, filter_name=filter_dict.get('name')), + partial(self.__id_filter,filter_id=filter_dict.get('id')), + partial(self.__checksum_filter,filter_checksum=filter_dict.get('checksum')) + ] + + r1 = [] + + for i in fimgs: + match = True + for f in filters: + match = match and f(i) + if match: + r1.append(i) + + for i in r1: + img_info = { + 'name':i.get('name'), + 'id':i.get('uuid'), + 'checksum':i.get('checksum'), + 'location':i.get('uri'), + 'fos':i + } + r.append(img_info) + return r + #raise vimconnNotImplemented( "Should have implemented this" ) + + def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None, + availability_zone_index=None, availability_zone_list=None): + """Adds a VM instance to VIM + Params: + 'start': (boolean) indicates if VM must start or created in pause mode. + 'image_id','flavor_id': image and flavor VIM id to use for the VM + 'net_list': list of interfaces, each one is a dictionary with: + 'name': (optional) name for the interface. + 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual + 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities + 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ... + 'mac_address': (optional) mac address to assign to this interface + 'ip_address': (optional) IP address to assign to this interface + #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided, + the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF + 'type': (mandatory) can be one of: + 'virtual', in this case always connected to a network of type 'net_type=bridge' + 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it + can created unconnected + 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity. + 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs + are allocated on the same physical NIC + 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS + 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing + or True, it must apply the default VIM behaviour + After execution the method will add the key: + 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this + interface. 'net_list' is modified + 'cloud_config': (optional) dictionary with: + 'key-pairs': (optional) list of strings with the public key to be inserted to the default user + 'users': (optional) list of users to be inserted, each item is a dict with: + 'name': (mandatory) user name, + 'key-pairs': (optional) list of strings with the public key to be inserted to the user + 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init, + or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file + 'config-files': (optional). List of files to be transferred. Each item is a dict with: + 'dest': (mandatory) string with the destination absolute path + 'encoding': (optional, by default text). Can be one of: + 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64' + 'content' (mandatory): string with the content of the file + 'permissions': (optional) string with file permissions, typically octal notation '0644' + 'owner': (optional) file owner, string with the format 'owner:group' + 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk) + 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with: + 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted + 'size': (mandatory) string with the size of the disk in GB + availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required + availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if + availability_zone_index is None + Returns a tuple with the instance identifier and created_items or raises an exception on error + created_items can be None or a dictionary where this method can include key-values that will be passed to + the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc. + Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same + as not present. + """ + self.logger.debug('new_vminstance with rgs: {}'.format(locals())) + fdu_uuid = '{}'.format(uuid.uuid4()) + + flv = self.fos_api.flavor.get(flavor_id) + img = self.fos_api.image.get(image_id) + + if flv is None: + raise vimconn.vimconnNotFoundException("Flavor {} not found at VIM".format(flavor_id)) + if img is None: + raise vimconn.vimconnNotFoundException("Image {} not found at VIM".format(image_id)) + + created_items = { + 'fdu_id':'', + 'node_id':'', + 'connection_points':[] + } + + fdu_desc = { + 'name':name, + 'uuid':fdu_uuid, + 'computation_requirements':flv, + 'image':img, + 'hypervisor':self.hv, + 'migration_kind':'LIVE', + 'interfaces':[], + 'io_ports':[], + 'connection_points':[], + 'depends_on':[] + } + + nets = [] + cps = [] + intf_id = 0 + for n in net_list: + cp_id = '{}'.format(uuid.uuid4()) + n.update({'vim_id':cp_id}) + pair_id = n.get('net_id') + + cp_d = { + 'uuid':cp_id, + 'pair_id':pair_id + } + intf_d = { + 'name':n.get('name','eth{}'.format(intf_id)), + 'is_mgmt':False, + 'if_type':'INTERNAL', + 'virtual_interface':{ + 'intf_type':n.get('model','VIRTIO'), + 'vpci':n.get('vpci','0:0:0'), + 'bandwidth':int(n.get('bw', 100)) + } + } + if n.get('mac_address', None) is not None: + intf_d['mac_address'] = n['mac_address'] + + created_items['connection_points'].append(cp_id) + fdu_desc['connection_points'].append(cp_d) + fdu_desc['interfaces'].append(intf_d) + + intf_id = intf_id + 1 + + if cloud_config is not None: + configuration = { + 'conf_type':'CLOUD_INIT' + } + if cloud_config.get('user-data') is not None: + configuration.update({'script':cloud_config.get('user-data')}) + if cloud_config.get('key-pairs') is not None: + configuration.update({'ssh_keys':cloud_config.get('key-pairs')}) + + if 'script' in configuration: + fdu_desc.update({'configuration':configuration}) + + ### NODE Selection ### + # Infrastructure info + # nodes dict with + # uuid -> node uuid + # computational capabilities -> cpu, ram, and disk available + # hypervisors -> list of available hypervisors (eg. KVM, LXD, BARE) + # + # + + # UPDATING AVAILABLE INFRASTRUCTURE + + if len(self.nodes) == 0: + nodes_id = self.fos_api.node.list() + else: + nodes_id = self.nodes + nodes = [] + for n in nodes_id: + n_info = self.fos_api.node.info(n) + if n_info is None: + continue + n_plugs = [] + for p in self.fos_api.node.plugins(n): + n_plugs.append(self.fos_api.plugin.info(n,p)) + + n_cpu_number = len(n_info.get('cpu')) + n_cpu_arch = n_info.get('cpu')[0].get('arch') + n_cpu_freq = n_info.get('cpu')[0].get('frequency') + n_ram = n_info.get('ram').get('size') + n_disk_size = sorted(list(filter(lambda x: 'sda' in x['local_address'], n_info.get('disks'))), key= lambda k: k['dimension'])[-1].get('dimension') + + hvs = [] + for p in n_plugs: + if p.get('type') == 'runtime': + hvs.append(p.get('name')) + + ni = { + 'uuid':n, + 'computational_capabilities':{ + 'cpu_count':n_cpu_number, + 'cpu_arch':n_cpu_arch, + 'cpu_freq':n_cpu_freq, + 'ram_size':n_ram, + 'disk_size':n_disk_size + }, + 'hypervisors':hvs + } + nodes.append(ni) + + # NODE SELECTION + compatible_nodes = [] + for n in nodes: + if fdu_desc.get('hypervisor') in n.get('hypervisors'): + n_comp = n.get('computational_capabilities') + f_comp = fdu_desc.get('computation_requirements') + if f_comp.get('cpu_arch') == n_comp.get('cpu_arch'): + if f_comp.get('cpu_min_count') <= n_comp.get('cpu_count') and f_comp.get('ram_size_mb') <= n_comp.get('ram_size'): + if f_comp.get('disk_size_gb') <= n_comp.get('disk_size'): + compatible_nodes.append(n) + + if len(compatible_nodes) == 0: + raise vimconn.vimconnConflictException("No available nodes at VIM") + selected_node = random.choice(compatible_nodes) + + created_items.update({'fdu_id':fdu_uuid, 'node_id': selected_node.get('uuid')}) + + self.logger.debug('FOS Node {} FDU Descriptor: {}'.format(selected_node.get('uuid'), fdu_desc)) + + try: + self.fos_api.fdu.onboard(fdu_desc) + instanceid = self.fos_api.fdu.instantiate(fdu_uuid, selected_node.get('uuid')) + created_items.update({'instance_id':instanceid}) + + self.fdu_node_map.update({instanceid: selected_node.get('uuid')}) + self.logger.debug('new_vminstance return: {}'.format((fdu_uuid, created_items))) + return (instanceid, created_items) + except fimerrors.FIMAResouceExistingException as free: + raise vimconn.vimconnConflictException("VM already exists at VIM. Error {}".format(free)) + except Exception as e: + raise vimconn.vimconnException("Error while instantiating VM {}. Error {}".format(name, e)) + + + def get_vminstance(self,vm_id): + """Returns the VM instance information from VIM""" + self.logger.debug('VIM get_vminstance with args: {}'.format(locals())) + + try: + intsinfo = self.fos_api.fdu.instance_info(vm_id) + except Exception as e: + raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e)) + if intsinfo is None: + raise vimconn.vimconnNotFoundException('VM with id {} not found!'.format(vm_id)) + return intsinfo + + + def delete_vminstance(self, vm_id, created_items=None): + """ + Removes a VM instance from VIM and each associate elements + :param vm_id: VIM identifier of the VM, provided by method new_vminstance + :param created_items: dictionary with extra items to be deleted. provided by method new_vminstance and/or method + action_vminstance + :return: None or the same vm_id. Raises an exception on fail + """ + self.logger.debug('FOS delete_vminstance with args: {}'.format(locals())) + fduid = created_items.get('fdu_id') + try: + self.fos_api.fdu.terminate(vm_id) + self.fos_api.fdu.offload(fduid) + except Exception as e: + raise vimconn.vimconnException("Error on deletting VM with id {}. Error {}".format(vm_id,e)) + return vm_id + + #raise vimconnNotImplemented( "Should have implemented this" ) + + def refresh_vms_status(self, vm_list): + """Get the status of the virtual machines and their interfaces/ports + Params: the list of VM identifiers + Returns a dictionary with: + vm_id: #VIM id of this Virtual Machine + status: #Mandatory. Text with one of: + # DELETED (not found at vim) + # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) + # OTHER (Vim reported other status not understood) + # ERROR (VIM indicates an ERROR status) + # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running), + # BUILD (on building process), ERROR + # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address + # + error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR + vim_info: #Text with plain information obtained from vim (yaml.safe_dump) + interfaces: list with interface info. Each item a dictionary with: + vim_info: #Text with plain information obtained from vim (yaml.safe_dump) + mac_address: #Text format XX:XX:XX:XX:XX:XX + vim_net_id: #network id where this interface is connected, if provided at creation + vim_interface_id: #interface/port VIM id + ip_address: #null, or text with IPv4, IPv6 address + compute_node: #identification of compute node where PF,VF interface is allocated + pci: #PCI address of the NIC that hosts the PF,VF + vlan: #physical VLAN used for VF + """ + self.logger.debug('FOS refresh_vms_status with args: {}'.format(locals())) + fos2osm_status = { + 'DEFINE':'OTHER', + 'CONFIGURE':'INACTIVE', + 'RUN':'ACTIVE', + 'PAUSE':'PAUSED', + 'ERROR':'ERROR' + } + + r = {} + + for vm in vm_list: + self.logger.debug('FOS refresh_vms_status for {}'.format(vm)) + + info = {} + nid = self.fdu_node_map.get(vm) + if nid is None: + r.update({vm:{ + 'status':'VIM_ERROR', + 'error_msg':'Not compute node associated for VM' + }}) + continue + + try: + vm_info = self.fos_api.fdu.instance_info(vm) + except: + r.update({vm:{ + 'status':'VIM_ERROR', + 'error_msg':'unable to connect to VIM' + }}) + continue + + if vm_info is None: + r.update({vm:{'status':'DELETED'}}) + continue + + + desc = self.fos_api.fdu.info(vm_info['fdu_uuid']) + osm_status = fos2osm_status.get(vm_info.get('status')) + + self.logger.debug('FOS status info {}'.format(vm_info)) + self.logger.debug('FOS status is {} <-> OSM Status {}'.format(vm_info.get('status'), osm_status)) + info.update({'status':osm_status}) + if vm_info.get('status') == 'ERROR': + info.update({'error_msg':vm_info.get('error_code')}) + info.update({'vim_info':yaml.safe_dump(vm_info)}) + faces = [] + i = 0 + for intf_name in vm_info.get('hypervisor_info').get('network',[]): + intf_info = vm_info.get('hypervisor_info').get('network').get(intf_name) + face = {} + face['compute_node'] = nid + face['vim_info'] = yaml.safe_dump(intf_info) + face['mac_address'] = intf_info.get('hwaddr') + addrs = [] + for a in intf_info.get('addresses'): + addrs.append(a.get('address')) + if len(addrs) >= 0: + face['ip_address'] = ','.join(addrs) + else: + face['ip_address'] = '' + face['pci'] = '0:0:0.0' + # getting net id by CP + try: + cp_info = vm_info.get('connection_points')[i] + except IndexError: + cp_info = None + if cp_info is not None: + cp_id = cp_info['cp_uuid'] + cps_d = desc['connection_points'] + matches = [x for x in cps_d if x['uuid'] == cp_id] + if len(matches) > 0: + cpd = matches[0] + face['vim_net_id'] = cpd.get('pair_id','') + else: + face['vim_net_id'] = '' + face['vim_interface_id'] = cp_id + # cp_info.get('uuid') + else: + face['vim_net_id'] = '' + face['vim_interface_id'] = intf_name + faces.append(face) + i += 1 + + + + info.update({'interfaces':faces}) + r.update({vm:info}) + self.logger.debug('FOS refresh_vms_status res for {} is {}'.format(vm, info)) + self.logger.debug('FOS refresh_vms_status res is {}'.format(r)) + return r + + + #raise vimconnNotImplemented( "Should have implemented this" ) + + def action_vminstance(self, vm_id, action_dict, created_items={}): + """ + Send and action over a VM instance. Returns created_items if the action was successfully sent to the VIM. + created_items is a dictionary with items that + :param vm_id: VIM identifier of the VM, provided by method new_vminstance + :param action_dict: dictionary with the action to perform + :param created_items: provided by method new_vminstance is a dictionary with key-values that will be passed to + the method delete_vminstance. Can be used to store created ports, volumes, etc. Format is vimconnector + dependent, but do not use nested dictionaries and a value of None should be the same as not present. This + method can modify this value + :return: None, or a console dict + """ + self.logger.debug('VIM action_vminstance with args: {}'.format(locals())) + nid = self.fdu_node_map.get(vm_id) + if nid is None: + raise vimconn.vimconnNotFoundException('No node for this VM') + try: + fdu_info = self.fos_api.fdu.instance_info(vm_id) + if "start" in action_dict: + if fdu_info.get('status') == 'CONFIGURE': + self.fos_api.fdu.start(vm_id) + elif fdu_info.get('status') == 'PAUSE': + self.fos_api.fdu.resume(vm_id) + else: + raise vimconn.vimconnConflictException("Cannot start from this state") + elif "pause" in action_dict: + if fdu_info.get('status') == 'RUN': + self.fos_api.fdu.pause(vm_id) + else: + raise vimconn.vimconnConflictException("Cannot pause from this state") + elif "resume" in action_dict: + if fdu_info.get('status') == 'PAUSE': + self.fos_api.fdu.resume(vm_id) + else: + raise vimconn.vimconnConflictException("Cannot resume from this state") + elif "shutoff" in action_dict or "shutdown" or "forceOff" in action_dict: + if fdu_info.get('status') == 'RUN': + self.fos_api.fdu.stop(vm_id) + else: + raise vimconn.vimconnConflictException("Cannot shutoff from this state") + elif "terminate" in action_dict: + if fdu_info.get('status') == 'RUN': + self.fos_api.fdu.stop(vm_id) + self.fos_api.fdu.clean(vm_id) + self.fos_api.fdu.undefine(vm_id) + # self.fos_api.fdu.offload(vm_id) + elif fdu_info.get('status') == 'CONFIGURE': + self.fos_api.fdu.clean(vm_id) + self.fos_api.fdu.undefine(vm_id) + # self.fos_api.fdu.offload(vm_id) + elif fdu_info.get('status') == 'PAUSE': + self.fos_api.fdu.resume(vm_id) + self.fos_api.fdu.stop(vm_id) + self.fos_api.fdu.clean(vm_id) + self.fos_api.fdu.undefine(vm_id) + # self.fos_api.fdu.offload(vm_id) + else: + raise vimconn.vimconnConflictException("Cannot terminate from this state") + elif "rebuild" in action_dict: + raise vimconnNotImplemented("Rebuild not implememnted") + elif "reboot" in action_dict: + if fdu_info.get('status') == 'RUN': + self.fos_api.fdu.stop(vm_id) + self.fos_api.fdu.start(vm_id) + else: + raise vimconn.vimconnConflictException("Cannot reboot from this state") + except Exception as e: + raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e)) diff --git a/RO-VIM-fos/requirements.txt b/RO-VIM-fos/requirements.txt new file mode 100644 index 00000000..0164a303 --- /dev/null +++ b/RO-VIM-fos/requirements.txt @@ -0,0 +1,20 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +PyYAML +requests +netaddr +fog05rest>=0.0.4 +git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO diff --git a/RO-VIM-fos/setup.py b/RO-VIM-fos/setup.py new file mode 100644 index 00000000..95d97ca3 --- /dev/null +++ b/RO-VIM-fos/setup.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +from setuptools import setup + +_name = "osm_rovim_fos" + +README = """ +=========== +osm-rovim_fos +=========== + +osm-ro pluging for fos VIM +""" + +setup( + name=_name, + description='OSM ro vim plugin for fos', + long_description=README, + version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + # version=VERSION, + # python_requires='>3.5.0', + author='ETSI OSM', + # TODO py3 author_email='', + maintainer='OSM_TECH@LIST.ETSI.ORG', # TODO py3 + # TODO py3 maintainer_email='', + url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', + license='Apache 2.0', + + packages=[_name], + include_package_data=True, + dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"], + install_requires=[ + "requests", "netaddr", "PyYAML", "osm-ro", "fog05rest>=0.0.4" + ], + setup_requires=['setuptools-version-command'], + entry_points={ + 'osm_rovim.plugins': ['rovim_fos = osm_rovim_fos.vimconn_fos'], + }, +) diff --git a/RO-VIM-fos/stdeb.cfg b/RO-VIM-fos/stdeb.cfg new file mode 100644 index 00000000..cf4b3539 --- /dev/null +++ b/RO-VIM-fos/stdeb.cfg @@ -0,0 +1,18 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +[DEFAULT] +X-Python3-Version : >= 3.5 +Depends3: python3-pip, python3-requests, python3-netaddr, python3-yaml, python3-osm-ro diff --git a/RO-VIM-fos/tox.ini b/RO-VIM-fos/tox.ini new file mode 100644 index 00000000..297800b0 --- /dev/null +++ b/RO-VIM-fos/tox.ini @@ -0,0 +1,41 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +[tox] +envlist = py3 +toxworkdir={homedir}/.tox + +[testenv] +basepython = python3 +install_command = python3 -m pip install -r requirements.txt -U {opts} {packages} +# deps = -r{toxinidir}/test-requirements.txt +commands=python3 -m unittest discover -v + +[testenv:flake8] +basepython = python3 +deps = flake8 +commands = flake8 osm_rovim_fos --max-line-length 120 \ + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504 + +[testenv:unittest] +basepython = python3 +commands = python3 -m unittest osm_rovim_fos.tests + +[testenv:build] +basepython = python3 +deps = stdeb + setuptools-version-command +commands = python3 setup.py --command-packages=stdeb.command bdist_deb + diff --git a/RO-VIM-opennebula/Makefile b/RO-VIM-opennebula/Makefile new file mode 100644 index 00000000..2ec6a44d --- /dev/null +++ b/RO-VIM-opennebula/Makefile @@ -0,0 +1,26 @@ +## +# Copyright 2017 Telefonica Digital Spain S.L.U. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +all: clean package + +clean: + rm -rf dist deb_dist osm_rovim_opennebula-*.tar.gz osm_rovim_opennebula.egg-info .eggs + +package: + python3 setup.py --command-packages=stdeb.command sdist_dsc + cp debian/python3-osm-rovim-opennebula.postinst deb_dist/osm-rovim-opennebula*/debian/ + cd deb_dist/osm-rovim-opennebula*/ && dpkg-buildpackage -rfakeroot -uc -us + diff --git a/RO-VIM-opennebula/debian/python3-osm-rovim-opennebula.postinst b/RO-VIM-opennebula/debian/python3-osm-rovim-opennebula.postinst new file mode 100755 index 00000000..27aacc74 --- /dev/null +++ b/RO-VIM-opennebula/debian/python3-osm-rovim-opennebula.postinst @@ -0,0 +1,26 @@ +#!/bin/bash + +## +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: OSM_TECH@list.etsi.org +## + +echo "POST INSTALL OSM-ROVIM-OPENNEBULA" + +#Pip packages required for opennebula connector +python3 -m pip install -e git+https://github.com/python-oca/python-oca#egg=oca +python3 -m pip install untangle +python3 -m pip install pyone + diff --git a/RO-VIM-opennebula/osm_rovim_opennebula/vimconn_opennebula.py b/RO-VIM-opennebula/osm_rovim_opennebula/vimconn_opennebula.py new file mode 100644 index 00000000..d788dcbb --- /dev/null +++ b/RO-VIM-opennebula/osm_rovim_opennebula/vimconn_opennebula.py @@ -0,0 +1,687 @@ +# -*- coding: utf-8 -*- + +## +# Copyright 2017 Telefonica Digital Spain S.L.U. +# This file is part of ETSI OSM +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: patent-office@telefonica.com +## + +""" +vimconnector implements all the methods to interact with OpenNebula using the XML-RPC API. +""" +__author__ = "Jose Maria Carmona Perez,Juan Antonio Hernando Labajo, Emilio Abraham Garrido Garcia,Alberto Florez " \ + "Pages, Andres Pozo Munoz, Santiago Perez Marin, Onlife Networks Telefonica I+D Product Innovation " +__date__ = "$13-dec-2017 11:09:29$" +from osm_ro import vimconn +import requests +import logging +import oca +import untangle +import math +import random +import pyone + +class vimconnector(vimconn.vimconnector): + def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, + log_level="DEBUG", config={}, persistent_info={}): + + """Constructor of VIM + Params: + 'uuid': id asigned to this VIM + 'name': name assigned to this VIM, can be used for logging + 'tenant_id', 'tenant_name': (only one of them is mandatory) VIM tenant to be used + 'url_admin': (optional), url used for administrative tasks + 'user', 'passwd': credentials of the VIM user + 'log_level': provider if it should use a different log_level than the general one + 'config': dictionary with extra VIM information. This contains a consolidate version of general VIM config + at creation and particular VIM config at teh attachment + 'persistent_info': dict where the class can store information that will be available among class + destroy/creation cycles. This info is unique per VIM/credential. At first call it will contain an + empty dict. Useful to store login/tokens information for speed up communication + + Returns: Raise an exception is some needed parameter is missing, but it must not do any connectivity + check against the VIM + """ + + vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, + config) + + def _new_one_connection(self): + return pyone.OneServer(self.url, session=self.user + ':' + self.passwd) + + def new_tenant(self, tenant_name, tenant_description): + # '''Adds a new tenant to VIM with this name and description, returns the tenant identifier''' + try: + client = oca.Client(self.user + ':' + self.passwd, self.url) + group_list = oca.GroupPool(client) + user_list = oca.UserPool(client) + group_list.info() + user_list.info() + create_primarygroup = 1 + # create group-tenant + for group in group_list: + if str(group.name) == str(tenant_name): + create_primarygroup = 0 + break + if create_primarygroup == 1: + oca.Group.allocate(client, tenant_name) + group_list.info() + # set to primary_group the tenant_group and oneadmin to secondary_group + for group in group_list: + if str(group.name) == str(tenant_name): + for user in user_list: + if str(user.name) == str(self.user): + if user.name == "oneadmin": + return str(0) + else: + self._add_secondarygroup(user.id, group.id) + user.chgrp(group.id) + return str(group.id) + except Exception as e: + self.logger.error("Create new tenant error: " + str(e)) + raise vimconn.vimconnException(e) + + def delete_tenant(self, tenant_id): + """Delete a tenant from VIM. Returns the old tenant identifier""" + try: + client = oca.Client(self.user + ':' + self.passwd, self.url) + group_list = oca.GroupPool(client) + user_list = oca.UserPool(client) + group_list.info() + user_list.info() + for group in group_list: + if str(group.id) == str(tenant_id): + for user in user_list: + if str(user.name) == str(self.user): + self._delete_secondarygroup(user.id, group.id) + group.delete(client) + return None + raise vimconn.vimconnNotFoundException("Group {} not found".format(tenant_id)) + except Exception as e: + self.logger.error("Delete tenant " + str(tenant_id) + " error: " + str(e)) + raise vimconn.vimconnException(e) + + def _add_secondarygroup(self, id_user, id_group): + # change secondary_group to primary_group + params = ' \ + \ + one.user.addgroup\ + \ + \ + {}:{}\ + \ + \ + {}\ + \ + \ + {}\ + \ + \ + '.format(self.user, self.passwd, (str(id_user)), (str(id_group))) + requests.post(self.url, params) + + def _delete_secondarygroup(self, id_user, id_group): + params = ' \ + \ + one.user.delgroup\ + \ + \ + {}:{}\ + \ + \ + {}\ + \ + \ + {}\ + \ + \ + '.format(self.user, self.passwd, (str(id_user)), (str(id_group))) + requests.post(self.url, params) + + def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None): # , **vim_specific): + """Adds a tenant network to VIM + Params: + 'net_name': name of the network + 'net_type': one of: + 'bridge': overlay isolated network + 'data': underlay E-LAN network for Passthrough and SRIOV interfaces + 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces. + 'ip_profile': is a dict containing the IP parameters of the network + 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented) + 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y + 'gateway_address': (Optional) ip_schema, that is X.X.X.X + 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X] + 'dhcp_enabled': True or False + 'dhcp_start_address': ip_schema, first IP to grant + 'dhcp_count': number of IPs to grant. + 'shared': if this network can be seen/use by other tenants/organization + 'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk} + Returns a tuple with the network identifier and created_items, or raises an exception on error + created_items can be None or a dictionary where this method can include key-values that will be passed to + the method delete_network. Can be used to store created segments, created l2gw connections, etc. + Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same + as not present. + """ + + # oca library method cannot be used in this case (problem with cluster parameters) + try: + vlan = None + if provider_network_profile: + vlan = provider_network_profile.get("segmentation-id") + created_items = {} + one = self._new_one_connection() + size = "254" + if ip_profile is None: + subnet_rand = random.randint(0, 255) + ip_start = "192.168.{}.1".format(subnet_rand) + else: + index = ip_profile["subnet_address"].find("/") + ip_start = ip_profile["subnet_address"][:index] + if "dhcp_count" in ip_profile and ip_profile["dhcp_count"] is not None: + size = str(ip_profile["dhcp_count"]) + elif "dhcp_count" not in ip_profile and ip_profile["ip_version"] == "IPv4": + prefix = ip_profile["subnet_address"][index + 1:] + size = int(math.pow(2, 32 - prefix)) + if "dhcp_start_address" in ip_profile and ip_profile["dhcp_start_address"] is not None: + ip_start = str(ip_profile["dhcp_start_address"]) + if ip_profile["ip_version"] == "IPv6": + ip_prefix_type = "GLOBAL_PREFIX" + + if vlan is not None: + vlan_id = vlan + else: + vlan_id = str(random.randint(100, 4095)) + #if "internal" in net_name: + # OpenNebula not support two networks with same name + random_net_name = str(random.randint(1, 1000000)) + net_name = net_name + random_net_name + net_id = one.vn.allocate({ + 'NAME': net_name, + 'VN_MAD': '802.1Q', + 'PHYDEV': self.config["network"]["phydev"], + 'VLAN_ID': vlan_id + }, self.config["cluster"]["id"]) + arpool = {'AR_POOL': { + 'AR': { + 'TYPE': 'IP4', + 'IP': ip_start, + 'SIZE': size + } + } + } + one.vn.add_ar(net_id, arpool) + return net_id, created_items + except Exception as e: + self.logger.error("Create new network error: " + str(e)) + raise vimconn.vimconnException(e) + + def get_network_list(self, filter_dict={}): + """Obtain tenant networks of VIM + Params: + 'filter_dict' (optional) contains entries to return only networks that matches ALL entries: + name: string => returns only networks with this name + id: string => returns networks with this VIM id, this imply returns one network at most + shared: boolean >= returns only networks that are (or are not) shared + tenant_id: sting => returns only networks that belong to this tenant/project + ,#(not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active + #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status + Returns the network list of dictionaries. each dictionary contains: + 'id': (mandatory) VIM network id + 'name': (mandatory) VIM network name + 'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER' + 'network_type': (optional) can be 'vxlan', 'vlan' or 'flat' + 'segmentation_id': (optional) in case network_type is vlan or vxlan this field contains the segmentation id + 'error_msg': (optional) text that explains the ERROR status + other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param + List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity, + authorization, or some other unspecific error + """ + + try: + one = self._new_one_connection() + net_pool = one.vnpool.info(-2, -1, -1).VNET + response = [] + if "name" in filter_dict: + network_name_filter = filter_dict["name"] + else: + network_name_filter = None + if "id" in filter_dict: + network_id_filter = filter_dict["id"] + else: + network_id_filter = None + for network in net_pool: + if network.NAME == network_name_filter or str(network.ID) == str(network_id_filter): + net_dict = {"name": network.NAME, "id": str(network.ID), "status": "ACTIVE"} + response.append(net_dict) + return response + except Exception as e: + self.logger.error("Get network list error: " + str(e)) + raise vimconn.vimconnException(e) + + def get_network(self, net_id): + """Obtain network details from the 'net_id' VIM network + Return a dict that contains: + 'id': (mandatory) VIM network id, that is, net_id + 'name': (mandatory) VIM network name + 'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER' + 'error_msg': (optional) text that explains the ERROR status + other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param + Raises an exception upon error or when network is not found + """ + try: + one = self._new_one_connection() + net_pool = one.vnpool.info(-2, -1, -1).VNET + net = {} + for network in net_pool: + if str(network.ID) == str(net_id): + net['id'] = network.ID + net['name'] = network.NAME + net['status'] = "ACTIVE" + break + if net: + return net + else: + raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id)) + except Exception as e: + self.logger.error("Get network " + str(net_id) + " error): " + str(e)) + raise vimconn.vimconnException(e) + + def delete_network(self, net_id, created_items=None): + """ + Removes a tenant network from VIM and its associated elements + :param net_id: VIM identifier of the network, provided by method new_network + :param created_items: dictionary with extra items to be deleted. provided by method new_network + Returns the network identifier or raises an exception upon error or when network is not found + """ + try: + + one = self._new_one_connection() + one.vn.delete(int(net_id)) + return net_id + except Exception as e: + self.logger.error("Delete network " + str(net_id) + "error: network not found" + str(e)) + raise vimconn.vimconnException(e) + + def refresh_nets_status(self, net_list): + """Get the status of the networks + Params: + 'net_list': a list with the VIM network id to be get the status + Returns a dictionary with: + 'net_id': #VIM id of this network + status: #Mandatory. Text with one of: + # DELETED (not found at vim) + # VIM_ERROR (Cannot connect to VIM, authentication problems, VIM response error, ...) + # OTHER (Vim reported other status not understood) + # ERROR (VIM indicates an ERROR status) + # ACTIVE, INACTIVE, DOWN (admin down), + # BUILD (on building process) + error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR + vim_info: #Text with plain information obtained from vim (yaml.safe_dump) + 'net_id2': ... + """ + net_dict = {} + try: + for net_id in net_list: + net = {} + try: + net_vim = self.get_network(net_id) + net["status"] = net_vim["status"] + net["vim_info"] = None + except vimconn.vimconnNotFoundException as e: + self.logger.error("Exception getting net status: {}".format(str(e))) + net['status'] = "DELETED" + net['error_msg'] = str(e) + except vimconn.vimconnException as e: + self.logger.error(e) + net["status"] = "VIM_ERROR" + net["error_msg"] = str(e) + net_dict[net_id] = net + return net_dict + except vimconn.vimconnException as e: + self.logger.error(e) + for k in net_dict: + net_dict[k]["status"] = "VIM_ERROR" + net_dict[k]["error_msg"] = str(e) + return net_dict + + def get_flavor(self, flavor_id): # Esta correcto + """Obtain flavor details from the VIM + Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } + Raises an exception upon error or if not found + """ + try: + + one = self._new_one_connection() + template = one.template.info(int(flavor_id)) + if template is not None: + return {'id': template.ID, 'name': template.NAME} + raise vimconn.vimconnNotFoundException("Flavor {} not found".format(flavor_id)) + except Exception as e: + self.logger.error("get flavor " + str(flavor_id) + " error: " + str(e)) + raise vimconn.vimconnException(e) + + def new_flavor(self, flavor_data): + """Adds a tenant flavor to VIM + flavor_data contains a dictionary with information, keys: + name: flavor name + ram: memory (cloud type) in MBytes + vpcus: cpus (cloud type) + extended: EPA parameters + - numas: #items requested in same NUMA + memory: number of 1G huge pages memory + paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads + interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa + - name: interface name + dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC + bandwidth: X Gbps; requested guarantee bandwidth + vpci: requested virtual PCI address + disk: disk size + is_public: + #TODO to concrete + Returns the flavor identifier""" + + disk_size = str(int(flavor_data["disk"])*1024) + + try: + one = self._new_one_connection() + template_id = one.template.allocate({ + 'TEMPLATE': { + 'NAME': flavor_data["name"], + 'CPU': flavor_data["vcpus"], + 'VCPU': flavor_data["vcpus"], + 'MEMORY': flavor_data["ram"], + 'DISK': { + 'SIZE': disk_size + }, + 'CONTEXT': { + 'NETWORK': "YES", + 'SSH_PUBLIC_KEY': '$USER[SSH_PUBLIC_KEY]' + }, + 'GRAPHICS': { + 'LISTEN': '0.0.0.0', + 'TYPE': 'VNC' + }, + 'CLUSTER_ID': self.config["cluster"]["id"] + } + }) + return template_id + + except Exception as e: + self.logger.error("Create new flavor error: " + str(e)) + raise vimconn.vimconnException(e) + + def delete_flavor(self, flavor_id): + """ Deletes a tenant flavor from VIM + Returns the old flavor_id + """ + try: + one = self._new_one_connection() + one.template.delete(int(flavor_id), False) + return flavor_id + except Exception as e: + self.logger.error("Error deleting flavor " + str(flavor_id) + ". Flavor not found") + raise vimconn.vimconnException(e) + + def get_image_list(self, filter_dict={}): + """Obtain tenant images from VIM + Filter_dict can be: + name: image name + id: image uuid + checksum: image checksum + location: image path + Returns the image list of dictionaries: + [{}, ...] + List can be empty + """ + try: + one = self._new_one_connection() + image_pool = one.imagepool.info(-2, -1, -1).IMAGE + images = [] + if "name" in filter_dict: + image_name_filter = filter_dict["name"] + else: + image_name_filter = None + if "id" in filter_dict: + image_id_filter = filter_dict["id"] + else: + image_id_filter = None + for image in image_pool: + if str(image_name_filter) == str(image.NAME) or str(image.ID) == str(image_id_filter): + images_dict = {"name": image.NAME, "id": str(image.ID)} + images.append(images_dict) + return images + except Exception as e: + self.logger.error("Get image list error: " + str(e)) + raise vimconn.vimconnException(e) + + def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None, + availability_zone_index=None, availability_zone_list=None): + + """Adds a VM instance to VIM + Params: + 'start': (boolean) indicates if VM must start or created in pause mode. + 'image_id','flavor_id': image and flavor VIM id to use for the VM + 'net_list': list of interfaces, each one is a dictionary with: + 'name': (optional) name for the interface. + 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual + 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities + 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ... + 'mac_address': (optional) mac address to assign to this interface + 'ip_address': (optional) IP address to assign to this interface + #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided, + the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF + 'type': (mandatory) can be one of: + 'virtual', in this case always connected to a network of type 'net_type=bridge' + 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it + can created unconnected + 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity. + 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs + are allocated on the same physical NIC + 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS + 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing + or True, it must apply the default VIM behaviour + After execution the method will add the key: + 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this + interface. 'net_list' is modified + 'cloud_config': (optional) dictionary with: + 'key-pairs': (optional) list of strings with the public key to be inserted to the default user + 'users': (optional) list of users to be inserted, each item is a dict with: + 'name': (mandatory) user name, + 'key-pairs': (optional) list of strings with the public key to be inserted to the user + 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init, + or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file + 'config-files': (optional). List of files to be transferred. Each item is a dict with: + 'dest': (mandatory) string with the destination absolute path + 'encoding': (optional, by default text). Can be one of: + 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64' + 'content' (mandatory): string with the content of the file + 'permissions': (optional) string with file permissions, typically octal notation '0644' + 'owner': (optional) file owner, string with the format 'owner:group' + 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk) + 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with: + 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted + 'size': (mandatory) string with the size of the disk in GB + availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required + availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if + availability_zone_index is None + Returns a tuple with the instance identifier and created_items or raises an exception on error + created_items can be None or a dictionary where this method can include key-values that will be passed to + the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc. + Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same + as not present. + """ + self.logger.debug( + "new_vminstance input: image='{}' flavor='{}' nics='{}'".format(image_id, flavor_id, str(net_list))) + try: + one = self._new_one_connection() + template_vim = one.template.info(int(flavor_id), True) + disk_size = str(template_vim.TEMPLATE["DISK"]["SIZE"]) + + one = self._new_one_connection() + template_updated = "" + for net in net_list: + net_in_vim = one.vn.info(int(net["net_id"])) + net["vim_id"] = str(net_in_vim.ID) + network = 'NIC = [NETWORK = "{}",NETWORK_UNAME = "{}" ]'.format( + net_in_vim.NAME, net_in_vim.UNAME) + template_updated += network + + template_updated += "DISK = [ IMAGE_ID = {},\n SIZE = {}]".format(image_id, disk_size) + + if isinstance(cloud_config, dict): + if cloud_config.get("key-pairs"): + context = 'CONTEXT = [\n NETWORK = "YES",\n SSH_PUBLIC_KEY = "' + for key in cloud_config["key-pairs"]: + context += key + '\n' + # if False: + # context += '"\n USERNAME = ' + context += '"]' + template_updated += context + + vm_instance_id = one.template.instantiate(int(flavor_id), name, False, template_updated) + self.logger.info( + "Instanciating in OpenNebula a new VM name:{} id:{}".format(name, flavor_id)) + return str(vm_instance_id), None + except pyone.OneNoExistsException as e: + self.logger.error("Network with id " + str(e) + " not found: " + str(e)) + raise vimconn.vimconnNotFoundException(e) + except Exception as e: + self.logger.error("Create new vm instance error: " + str(e)) + raise vimconn.vimconnException(e) + + def get_vminstance(self, vm_id): + """Returns the VM instance information from VIM""" + try: + one = self._new_one_connection() + vm = one.vm.info(int(vm_id)) + return vm + except Exception as e: + self.logger.error("Getting vm instance error: " + str(e) + ": VM Instance not found") + raise vimconn.vimconnException(e) + + def delete_vminstance(self, vm_id, created_items=None): + """ + Removes a VM instance from VIM and its associated elements + :param vm_id: VIM identifier of the VM, provided by method new_vminstance + :param created_items: dictionary with extra items to be deleted. provided by method new_vminstance and/or method + action_vminstance + :return: None or the same vm_id. Raises an exception on fail + """ + try: + one = self._new_one_connection() + one.vm.recover(int(vm_id), 3) + vm = None + while True: + if vm is not None and vm.LCM_STATE == 0: + break + else: + vm = one.vm.info(int(vm_id)) + + except pyone.OneNoExistsException as e: + self.logger.info("The vm " + str(vm_id) + " does not exist or is already deleted") + raise vimconn.vimconnNotFoundException("The vm {} does not exist or is already deleted".format(vm_id)) + except Exception as e: + self.logger.error("Delete vm instance " + str(vm_id) + " error: " + str(e)) + raise vimconn.vimconnException(e) + + def refresh_vms_status(self, vm_list): + """Get the status of the virtual machines and their interfaces/ports + Params: the list of VM identifiers + Returns a dictionary with: + vm_id: #VIM id of this Virtual Machine + status: #Mandatory. Text with one of: + # DELETED (not found at vim) + # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) + # OTHER (Vim reported other status not understood) + # ERROR (VIM indicates an ERROR status) + # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running), + # BUILD (on building process), ERROR + # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address + # + error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR + vim_info: #Text with plain information obtained from vim (yaml.safe_dump) + interfaces: list with interface info. Each item a dictionary with: + vim_info: #Text with plain information obtained from vim (yaml.safe_dump) + mac_address: #Text format XX:XX:XX:XX:XX:XX + vim_net_id: #network id where this interface is connected, if provided at creation + vim_interface_id: #interface/port VIM id + ip_address: #null, or text with IPv4, IPv6 address + compute_node: #identification of compute node where PF,VF interface is allocated + pci: #PCI address of the NIC that hosts the PF,VF + vlan: #physical VLAN used for VF + """ + vm_dict = {} + try: + for vm_id in vm_list: + vm = {} + if self.get_vminstance(vm_id) is not None: + vm_element = self.get_vminstance(vm_id) + else: + self.logger.info("The vm " + str(vm_id) + " does not exist.") + vm['status'] = "DELETED" + vm['error_msg'] = ("The vm " + str(vm_id) + " does not exist.") + continue + vm["vim_info"] = None + vm_status = vm_element.LCM_STATE + if vm_status == 3: + vm['status'] = "ACTIVE" + elif vm_status == 36: + vm['status'] = "ERROR" + vm['error_msg'] = "VM failure" + else: + vm['status'] = "BUILD" + + if vm_element is not None: + interfaces = self._get_networks_vm(vm_element) + vm["interfaces"] = interfaces + vm_dict[vm_id] = vm + return vm_dict + except Exception as e: + self.logger.error(e) + for k in vm_dict: + vm_dict[k]["status"] = "VIM_ERROR" + vm_dict[k]["error_msg"] = str(e) + return vm_dict + + def _get_networks_vm(self, vm_element): + interfaces = [] + try: + if isinstance(vm_element.TEMPLATE["NIC"], list): + for net in vm_element.TEMPLATE["NIC"]: + interface = {'vim_info': None, "mac_address": str(net["MAC"]), "vim_net_id": str(net["NETWORK_ID"]), + "vim_interface_id": str(net["NETWORK_ID"])} + # maybe it should be 2 different keys for ip_address if an interface has ipv4 and ipv6 + if u'IP' in net: + interface["ip_address"] = str(net["IP"]) + if u'IP6_GLOBAL' in net: + interface["ip_address"] = str(net["IP6_GLOBAL"]) + interfaces.append(interface) + else: + net = vm_element.TEMPLATE["NIC"] + interface = {'vim_info': None, "mac_address": str(net["MAC"]), "vim_net_id": str(net["NETWORK_ID"]), + "vim_interface_id": str(net["NETWORK_ID"])} + # maybe it should be 2 different keys for ip_address if an interface has ipv4 and ipv6 + if u'IP' in net: + interface["ip_address"] = str(net["IP"]) + if u'IP6_GLOBAL' in net: + interface["ip_address"] = str(net["IP6_GLOBAL"]) + interfaces.append(interface) + return interfaces + except Exception as e: + self.logger.error("Error getting vm interface_information of vm_id: " + str(vm_element.ID)) diff --git a/RO-VIM-opennebula/requirements.txt b/RO-VIM-opennebula/requirements.txt new file mode 100644 index 00000000..71b09d85 --- /dev/null +++ b/RO-VIM-opennebula/requirements.txt @@ -0,0 +1,23 @@ +## +# Copyright 2017 Telefonica Digital Spain S.L.U. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +PyYAML +requests +netaddr +untangle +pyone +git+https://github.com/python-oca/python-oca#egg=oca +git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO diff --git a/RO-VIM-opennebula/setup.py b/RO-VIM-opennebula/setup.py new file mode 100644 index 00000000..c27bca3d --- /dev/null +++ b/RO-VIM-opennebula/setup.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +## +# Copyright 2017 Telefonica Digital Spain S.L.U. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +from setuptools import setup + +_name = "osm_rovim_opennebula" + +README = """ +=========== +osm-rovim_opennebula +=========== + +osm-ro pluging for opennebula VIM +""" + +setup( + name=_name, + description='OSM ro vim plugin for opennebula', + long_description=README, + version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + # version=VERSION, + # python_requires='>3.5.0', + author='ETSI OSM', + # TODO py3 author_email='', + maintainer='OSM_TECH@LIST.ETSI.ORG', # TODO py3 + # TODO py3 maintainer_email='', + url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', + license='Apache 2.0', + + packages=[_name], + include_package_data=True, + dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"], + install_requires=["requests", "netaddr", "PyYAML", "osm-ro",], + setup_requires=['setuptools-version-command'], + entry_points={ + 'osm_rovim.plugins': ['rovim_opennebula = osm_rovim_opennebula.vimconn_opennebula'], + }, +) diff --git a/RO-VIM-opennebula/stdeb.cfg b/RO-VIM-opennebula/stdeb.cfg new file mode 100644 index 00000000..00071bd6 --- /dev/null +++ b/RO-VIM-opennebula/stdeb.cfg @@ -0,0 +1,20 @@ +# +# Copyright 2017 Telefonica Digital Spain S.L.U. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +[DEFAULT] +X-Python3-Version : >= 3.5 +Depends3: python3-requests, python3-netaddr, python3-yaml, python3-osm-ro, python3-pip + diff --git a/RO-VIM-opennebula/tox.ini b/RO-VIM-opennebula/tox.ini new file mode 100644 index 00000000..6fb9d372 --- /dev/null +++ b/RO-VIM-opennebula/tox.ini @@ -0,0 +1,42 @@ +## +# Copyright 2017 Telefonica Digital Spain S.L.U. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +[tox] +envlist = py3 +toxworkdir={homedir}/.tox + +[testenv] +basepython = python3 +install_command = python3 -m pip install -r requirements.txt -U {opts} {packages} +# deps = -r{toxinidir}/test-requirements.txt +commands=python3 -m unittest discover -v + +[testenv:flake8] +basepython = python3 +deps = flake8 +commands = flake8 osm_rovim_opennebula --max-line-length 120 \ + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504 + +[testenv:unittest] +basepython = python3 +commands = python3 -m unittest osm_rovim_opennebula.tests + +[testenv:build] +basepython = python3 +deps = stdeb + setuptools-version-command +commands = python3 setup.py --command-packages=stdeb.command bdist_deb + diff --git a/RO-VIM-openstack/Makefile b/RO-VIM-openstack/Makefile new file mode 100644 index 00000000..dfafea33 --- /dev/null +++ b/RO-VIM-openstack/Makefile @@ -0,0 +1,25 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +all: clean package + +clean: + rm -rf dist deb_dist osm_rovim_openstack-*.tar.gz osm_rovim_openstack.egg-info .eggs + +package: + python3 setup.py --command-packages=stdeb.command sdist_dsc + cp debian/python3-osm-rovim-openstack.postinst deb_dist/osm-rovim-openstack*/debian/ + cd deb_dist/osm-rovim-openstack*/ && dpkg-buildpackage -rfakeroot -uc -us + diff --git a/RO-VIM-openstack/debian/python3-osm-rovim-openstack.postinst b/RO-VIM-openstack/debian/python3-osm-rovim-openstack.postinst new file mode 100755 index 00000000..055d4a5e --- /dev/null +++ b/RO-VIM-openstack/debian/python3-osm-rovim-openstack.postinst @@ -0,0 +1,23 @@ +#!/bin/bash + +## +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: OSM_TECH@list.etsi.org +## + +echo "POST INSTALL OSM-ROVIM-OPENSTACK" + +#Pip packages required for openstack connector +python3 -m pip install networking-l2gw diff --git a/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py b/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py new file mode 100644 index 00000000..5eb23f07 --- /dev/null +++ b/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py @@ -0,0 +1,854 @@ +# -*- coding: utf-8 -*- + +## +# Copyright 2017 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +""" +This module contains unit tests for the OpenStack VIM connector +Run this directly with python2 or python3. +""" + +import copy +import unittest + +import mock +from neutronclient.v2_0.client import Client + +from osm_ro import vimconn +from osm_ro.vimconn_openstack import vimconnector + + +__author__ = "Igor D.C." +__date__ = "$23-aug-2017 23:59:59$" + + +class TestSfcOperations(unittest.TestCase): + def setUp(self): + # instantiate dummy VIM connector so we can test it + self.vimconn = vimconnector( + '123', 'openstackvim', '456', '789', 'http://dummy.url', None, + 'user', 'pass') + + def _test_new_sfi(self, create_sfc_port_pair, sfc_encap, + ingress_ports=['5311c75d-d718-4369-bbda-cdcc6da60fcc'], + egress_ports=['230cdf1b-de37-4891-bc07-f9010cf1f967']): + # input to VIM connector + name = 'osm_sfi' + # + ingress_ports + # + egress_ports + # TODO(igordc): must be changed to NSH in Queens (MPLS is a workaround) + correlation = 'nsh' + if sfc_encap is not None: + if not sfc_encap: + correlation = None + + # what OpenStack is assumed to respond (patch OpenStack's return value) + dict_from_neutron = {'port_pair': { + 'id': '3d7ddc13-923c-4332-971e-708ed82902ce', + 'name': name, + 'description': '', + 'tenant_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c', + 'project_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c', + 'ingress': ingress_ports[0] if len(ingress_ports) else None, + 'egress': egress_ports[0] if len(egress_ports) else None, + 'service_function_parameters': {'correlation': correlation} + }} + create_sfc_port_pair.return_value = dict_from_neutron + + # what the VIM connector is expected to + # send to OpenStack based on the input + dict_to_neutron = {'port_pair': { + 'name': name, + 'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc', + 'egress': '230cdf1b-de37-4891-bc07-f9010cf1f967', + 'service_function_parameters': {'correlation': correlation} + }} + + # call the VIM connector + if sfc_encap is None: + result = self.vimconn.new_sfi(name, ingress_ports, egress_ports) + else: + result = self.vimconn.new_sfi(name, ingress_ports, egress_ports, + sfc_encap) + + # assert that the VIM connector made the expected call to OpenStack + create_sfc_port_pair.assert_called_with(dict_to_neutron) + # assert that the VIM connector had the expected result / return value + self.assertEqual(result, dict_from_neutron['port_pair']['id']) + + def _test_new_sf(self, create_sfc_port_pair_group): + # input to VIM connector + name = 'osm_sf' + instances = ['bbd01220-cf72-41f2-9e70-0669c2e5c4cd', + '12ba215e-3987-4892-bd3a-d0fd91eecf98', + 'e25a7c79-14c8-469a-9ae1-f601c9371ffd'] + + # what OpenStack is assumed to respond (patch OpenStack's return value) + dict_from_neutron = {'port_pair_group': { + 'id': '3d7ddc13-923c-4332-971e-708ed82902ce', + 'name': name, + 'description': '', + 'tenant_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c', + 'project_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c', + 'port_pairs': instances, + 'group_id': 1, + 'port_pair_group_parameters': { + "lb_fields": [], + "ppg_n_tuple_mapping": { + "ingress_n_tuple": {}, + "egress_n_tuple": {} + }} + }} + create_sfc_port_pair_group.return_value = dict_from_neutron + + # what the VIM connector is expected to + # send to OpenStack based on the input + dict_to_neutron = {'port_pair_group': { + 'name': name, + 'port_pairs': ['bbd01220-cf72-41f2-9e70-0669c2e5c4cd', + '12ba215e-3987-4892-bd3a-d0fd91eecf98', + 'e25a7c79-14c8-469a-9ae1-f601c9371ffd'] + }} + + # call the VIM connector + result = self.vimconn.new_sf(name, instances) + + # assert that the VIM connector made the expected call to OpenStack + create_sfc_port_pair_group.assert_called_with(dict_to_neutron) + # assert that the VIM connector had the expected result / return value + self.assertEqual(result, dict_from_neutron['port_pair_group']['id']) + + def _test_new_sfp(self, create_sfc_port_chain, sfc_encap, spi): + # input to VIM connector + name = 'osm_sfp' + classifications = ['2bd2a2e5-c5fd-4eac-a297-d5e255c35c19', + '00f23389-bdfa-43c2-8b16-5815f2582fa8'] + sfs = ['2314daec-c262-414a-86e3-69bb6fa5bc16', + 'd8bfdb5d-195e-4f34-81aa-6135705317df'] + + # TODO(igordc): must be changed to NSH in Queens (MPLS is a workaround) + correlation = 'nsh' + chain_id = 33 + if spi: + chain_id = spi + + # what OpenStack is assumed to respond (patch OpenStack's return value) + dict_from_neutron = {'port_chain': { + 'id': '5bc05721-079b-4b6e-a235-47cac331cbb6', + 'name': name, + 'description': '', + 'tenant_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c', + 'project_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c', + 'chain_id': chain_id, + 'flow_classifiers': classifications, + 'port_pair_groups': sfs, + 'chain_parameters': {'correlation': correlation} + }} + create_sfc_port_chain.return_value = dict_from_neutron + + # what the VIM connector is expected to + # send to OpenStack based on the input + dict_to_neutron = {'port_chain': { + 'name': name, + 'flow_classifiers': ['2bd2a2e5-c5fd-4eac-a297-d5e255c35c19', + '00f23389-bdfa-43c2-8b16-5815f2582fa8'], + 'port_pair_groups': ['2314daec-c262-414a-86e3-69bb6fa5bc16', + 'd8bfdb5d-195e-4f34-81aa-6135705317df'], + 'chain_parameters': {'correlation': correlation} + }} + if spi: + dict_to_neutron['port_chain']['chain_id'] = spi + + # call the VIM connector + if sfc_encap is None: + if spi is None: + result = self.vimconn.new_sfp(name, classifications, sfs) + else: + result = self.vimconn.new_sfp(name, classifications, sfs, + spi=spi) + else: + if spi is None: + result = self.vimconn.new_sfp(name, classifications, sfs, + sfc_encap) + else: + result = self.vimconn.new_sfp(name, classifications, sfs, + sfc_encap, spi) + + # assert that the VIM connector made the expected call to OpenStack + create_sfc_port_chain.assert_called_with(dict_to_neutron) + # assert that the VIM connector had the expected result / return value + self.assertEqual(result, dict_from_neutron['port_chain']['id']) + + def _test_new_classification(self, create_sfc_flow_classifier, ctype): + # input to VIM connector + name = 'osm_classification' + definition = {'ethertype': 'IPv4', + 'logical_source_port': + 'aaab0ab0-1452-4636-bb3b-11dca833fa2b', + 'protocol': 'tcp', + 'source_ip_prefix': '192.168.2.0/24', + 'source_port_range_max': 99, + 'source_port_range_min': 50} + + # what OpenStack is assumed to respond (patch OpenStack's return value) + dict_from_neutron = {'flow_classifier': copy.copy(definition)} + dict_from_neutron['flow_classifier'][ + 'id'] = '7735ec2c-fddf-4130-9712-32ed2ab6a372' + dict_from_neutron['flow_classifier']['name'] = name + dict_from_neutron['flow_classifier']['description'] = '' + dict_from_neutron['flow_classifier'][ + 'tenant_id'] = '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c' + dict_from_neutron['flow_classifier'][ + 'project_id'] = '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c' + create_sfc_flow_classifier.return_value = dict_from_neutron + + # what the VIM connector is expected to + # send to OpenStack based on the input + dict_to_neutron = {'flow_classifier': copy.copy(definition)} + dict_to_neutron['flow_classifier']['name'] = 'osm_classification' + + # call the VIM connector + result = self.vimconn.new_classification(name, ctype, definition) + + # assert that the VIM connector made the expected call to OpenStack + create_sfc_flow_classifier.assert_called_with(dict_to_neutron) + # assert that the VIM connector had the expected result / return value + self.assertEqual(result, dict_from_neutron['flow_classifier']['id']) + + @mock.patch.object(Client, 'create_sfc_flow_classifier') + def test_new_classification(self, create_sfc_flow_classifier): + self._test_new_classification(create_sfc_flow_classifier, + 'legacy_flow_classifier') + + @mock.patch.object(Client, 'create_sfc_flow_classifier') + def test_new_classification_unsupported_type(self, create_sfc_flow_classifier): + self.assertRaises(vimconn.vimconnNotSupportedException, + self._test_new_classification, + create_sfc_flow_classifier, 'h265') + + @mock.patch.object(Client, 'create_sfc_port_pair') + def test_new_sfi_with_sfc_encap(self, create_sfc_port_pair): + self._test_new_sfi(create_sfc_port_pair, True) + + @mock.patch.object(Client, 'create_sfc_port_pair') + def test_new_sfi_without_sfc_encap(self, create_sfc_port_pair): + self._test_new_sfi(create_sfc_port_pair, False) + + @mock.patch.object(Client, 'create_sfc_port_pair') + def test_new_sfi_default_sfc_encap(self, create_sfc_port_pair): + self._test_new_sfi(create_sfc_port_pair, None) + + @mock.patch.object(Client, 'create_sfc_port_pair') + def test_new_sfi_bad_ingress_ports(self, create_sfc_port_pair): + ingress_ports = ['5311c75d-d718-4369-bbda-cdcc6da60fcc', + 'a0273f64-82c9-11e7-b08f-6328e53f0fa7'] + self.assertRaises(vimconn.vimconnNotSupportedException, + self._test_new_sfi, + create_sfc_port_pair, True, ingress_ports=ingress_ports) + ingress_ports = [] + self.assertRaises(vimconn.vimconnNotSupportedException, + self._test_new_sfi, + create_sfc_port_pair, True, ingress_ports=ingress_ports) + + @mock.patch.object(Client, 'create_sfc_port_pair') + def test_new_sfi_bad_egress_ports(self, create_sfc_port_pair): + egress_ports = ['230cdf1b-de37-4891-bc07-f9010cf1f967', + 'b41228fe-82c9-11e7-9b44-17504174320b'] + self.assertRaises(vimconn.vimconnNotSupportedException, + self._test_new_sfi, + create_sfc_port_pair, True, egress_ports=egress_ports) + egress_ports = [] + self.assertRaises(vimconn.vimconnNotSupportedException, + self._test_new_sfi, + create_sfc_port_pair, True, egress_ports=egress_ports) + + @mock.patch.object(vimconnector, 'get_sfi') + @mock.patch.object(Client, 'create_sfc_port_pair_group') + def test_new_sf(self, create_sfc_port_pair_group, get_sfi): + get_sfi.return_value = {'sfc_encap': True} + self._test_new_sf(create_sfc_port_pair_group) + + @mock.patch.object(vimconnector, 'get_sfi') + @mock.patch.object(Client, 'create_sfc_port_pair_group') + def test_new_sf_inconsistent_sfc_encap(self, create_sfc_port_pair_group, + get_sfi): + get_sfi.return_value = {'sfc_encap': 'nsh'} + self.assertRaises(vimconn.vimconnNotSupportedException, + self._test_new_sf, create_sfc_port_pair_group) + + @mock.patch.object(Client, 'create_sfc_port_chain') + def test_new_sfp_with_sfc_encap(self, create_sfc_port_chain): + self._test_new_sfp(create_sfc_port_chain, True, None) + + @mock.patch.object(Client, 'create_sfc_port_chain') + def test_new_sfp_without_sfc_encap(self, create_sfc_port_chain): + self._test_new_sfp(create_sfc_port_chain, False, None) + self._test_new_sfp(create_sfc_port_chain, False, 25) + + @mock.patch.object(Client, 'create_sfc_port_chain') + def test_new_sfp_default_sfc_encap(self, create_sfc_port_chain): + self._test_new_sfp(create_sfc_port_chain, None, None) + + @mock.patch.object(Client, 'create_sfc_port_chain') + def test_new_sfp_with_sfc_encap_spi(self, create_sfc_port_chain): + self._test_new_sfp(create_sfc_port_chain, True, 25) + + @mock.patch.object(Client, 'create_sfc_port_chain') + def test_new_sfp_default_sfc_encap_spi(self, create_sfc_port_chain): + self._test_new_sfp(create_sfc_port_chain, None, 25) + + @mock.patch.object(Client, 'list_sfc_flow_classifiers') + def test_get_classification_list(self, list_sfc_flow_classifiers): + # what OpenStack is assumed to return to the VIM connector + list_sfc_flow_classifiers.return_value = {'flow_classifiers': [ + {'source_port_range_min': 2000, + 'destination_ip_prefix': '192.168.3.0/24', + 'protocol': 'udp', + 'description': '', + 'ethertype': 'IPv4', + 'l7_parameters': {}, + 'source_port_range_max': 2000, + 'destination_port_range_min': 3000, + 'source_ip_prefix': '192.168.2.0/24', + 'logical_destination_port': None, + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'destination_port_range_max': None, + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b', + 'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d', + 'name': 'fc1'}]} + + # call the VIM connector + filter_dict = {'protocol': 'tcp', 'ethertype': 'IPv4'} + result = self.vimconn.get_classification_list(filter_dict.copy()) + + # assert that VIM connector called OpenStack with the expected filter + list_sfc_flow_classifiers.assert_called_with(**filter_dict) + # assert that the VIM connector successfully + # translated and returned the OpenStack result + self.assertEqual(result, [ + {'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d', + 'name': 'fc1', + 'description': '', + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'ctype': 'legacy_flow_classifier', + 'definition': { + 'source_port_range_min': 2000, + 'destination_ip_prefix': '192.168.3.0/24', + 'protocol': 'udp', + 'ethertype': 'IPv4', + 'l7_parameters': {}, + 'source_port_range_max': 2000, + 'destination_port_range_min': 3000, + 'source_ip_prefix': '192.168.2.0/24', + 'logical_destination_port': None, + 'destination_port_range_max': None, + 'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b'} + }]) + + def _test_get_sfi_list(self, list_port_pair, correlation, sfc_encap): + # what OpenStack is assumed to return to the VIM connector + list_port_pair.return_value = {'port_pairs': [ + {'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc', + 'description': '', + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'egress': '5311c75d-d718-4369-bbda-cdcc6da60fcc', + 'service_function_parameters': {'correlation': correlation}, + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'id': 'c121ebdd-7f2d-4213-b933-3325298a6966', + 'name': 'osm_sfi'}]} + + # call the VIM connector + filter_dict = {'name': 'osm_sfi', 'description': ''} + result = self.vimconn.get_sfi_list(filter_dict.copy()) + + # assert that VIM connector called OpenStack with the expected filter + list_port_pair.assert_called_with(**filter_dict) + # assert that the VIM connector successfully + # translated and returned the OpenStack result + self.assertEqual(result, [ + {'ingress_ports': ['5311c75d-d718-4369-bbda-cdcc6da60fcc'], + 'description': '', + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'egress_ports': ['5311c75d-d718-4369-bbda-cdcc6da60fcc'], + 'sfc_encap': sfc_encap, + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'id': 'c121ebdd-7f2d-4213-b933-3325298a6966', + 'name': 'osm_sfi'}]) + + @mock.patch.object(Client, 'list_sfc_port_pairs') + def test_get_sfi_list_with_sfc_encap(self, list_sfc_port_pairs): + self._test_get_sfi_list(list_sfc_port_pairs, 'nsh', True) + + @mock.patch.object(Client, 'list_sfc_port_pairs') + def test_get_sfi_list_without_sfc_encap(self, list_sfc_port_pairs): + self._test_get_sfi_list(list_sfc_port_pairs, None, False) + + @mock.patch.object(Client, 'list_sfc_port_pair_groups') + def test_get_sf_list(self, list_sfc_port_pair_groups): + # what OpenStack is assumed to return to the VIM connector + list_sfc_port_pair_groups.return_value = {'port_pair_groups': [ + {'port_pairs': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2', + '0d63799c-82d6-11e7-8deb-a746bb3ae9f5'], + 'description': '', + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'port_pair_group_parameters': {}, + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'id': 'f4a0bde8-82d5-11e7-90e1-a72b762fa27f', + 'name': 'osm_sf'}]} + + # call the VIM connector + filter_dict = {'name': 'osm_sf', 'description': ''} + result = self.vimconn.get_sf_list(filter_dict.copy()) + + # assert that VIM connector called OpenStack with the expected filter + list_sfc_port_pair_groups.assert_called_with(**filter_dict) + # assert that the VIM connector successfully + # translated and returned the OpenStack result + self.assertEqual(result, [ + {'sfis': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2', + '0d63799c-82d6-11e7-8deb-a746bb3ae9f5'], + 'description': '', + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'id': 'f4a0bde8-82d5-11e7-90e1-a72b762fa27f', + 'name': 'osm_sf'}]) + + def _test_get_sfp_list(self, list_sfc_port_chains, correlation, sfc_encap): + # what OpenStack is assumed to return to the VIM connector + list_sfc_port_chains.return_value = {'port_chains': [ + {'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25', + '7dc9013e-82d6-11e7-a5a6-a3a8d78a5518'], + 'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e', + '1387ab44-82d7-11e7-9bb0-476337183905'], + 'description': '', + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'chain_parameters': {'correlation': correlation}, + 'chain_id': 40, + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47', + 'name': 'osm_sfp'}]} + + # call the VIM connector + filter_dict = {'name': 'osm_sfp', 'description': ''} + result = self.vimconn.get_sfp_list(filter_dict.copy()) + + # assert that VIM connector called OpenStack with the expected filter + list_sfc_port_chains.assert_called_with(**filter_dict) + # assert that the VIM connector successfully + # translated and returned the OpenStack result + self.assertEqual(result, [ + {'service_functions': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25', + '7dc9013e-82d6-11e7-a5a6-a3a8d78a5518'], + 'classifications': ['1333c2f4-82d7-11e7-a5df-9327f33d104e', + '1387ab44-82d7-11e7-9bb0-476337183905'], + 'description': '', + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'sfc_encap': sfc_encap, + 'spi': 40, + 'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47', + 'name': 'osm_sfp'}]) + + @mock.patch.object(Client, 'list_sfc_port_chains') + def test_get_sfp_list_with_sfc_encap(self, list_sfc_port_chains): + self._test_get_sfp_list(list_sfc_port_chains, 'nsh', True) + + @mock.patch.object(Client, 'list_sfc_port_chains') + def test_get_sfp_list_without_sfc_encap(self, list_sfc_port_chains): + self._test_get_sfp_list(list_sfc_port_chains, None, False) + + @mock.patch.object(Client, 'list_sfc_flow_classifiers') + def test_get_classification(self, list_sfc_flow_classifiers): + # what OpenStack is assumed to return to the VIM connector + list_sfc_flow_classifiers.return_value = {'flow_classifiers': [ + {'source_port_range_min': 2000, + 'destination_ip_prefix': '192.168.3.0/24', + 'protocol': 'udp', + 'description': '', + 'ethertype': 'IPv4', + 'l7_parameters': {}, + 'source_port_range_max': 2000, + 'destination_port_range_min': 3000, + 'source_ip_prefix': '192.168.2.0/24', + 'logical_destination_port': None, + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'destination_port_range_max': None, + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b', + 'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d', + 'name': 'fc1'} + ]} + + # call the VIM connector + result = self.vimconn.get_classification( + '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d') + + # assert that VIM connector called OpenStack with the expected filter + list_sfc_flow_classifiers.assert_called_with( + id='22198366-d4e8-4d6b-b4d2-637d5d6cbb7d') + # assert that VIM connector successfully returned the OpenStack result + self.assertEqual(result, + {'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d', + 'name': 'fc1', + 'description': '', + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'ctype': 'legacy_flow_classifier', + 'definition': { + 'source_port_range_min': 2000, + 'destination_ip_prefix': '192.168.3.0/24', + 'protocol': 'udp', + 'ethertype': 'IPv4', + 'l7_parameters': {}, + 'source_port_range_max': 2000, + 'destination_port_range_min': 3000, + 'source_ip_prefix': '192.168.2.0/24', + 'logical_destination_port': None, + 'destination_port_range_max': None, + 'logical_source_port': + 'aaab0ab0-1452-4636-bb3b-11dca833fa2b'} + }) + + @mock.patch.object(Client, 'list_sfc_flow_classifiers') + def test_get_classification_many_results(self, list_sfc_flow_classifiers): + # what OpenStack is assumed to return to the VIM connector + list_sfc_flow_classifiers.return_value = {'flow_classifiers': [ + {'source_port_range_min': 2000, + 'destination_ip_prefix': '192.168.3.0/24', + 'protocol': 'udp', + 'description': '', + 'ethertype': 'IPv4', + 'l7_parameters': {}, + 'source_port_range_max': 2000, + 'destination_port_range_min': 3000, + 'source_ip_prefix': '192.168.2.0/24', + 'logical_destination_port': None, + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'destination_port_range_max': None, + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b', + 'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d', + 'name': 'fc1'}, + {'source_port_range_min': 1000, + 'destination_ip_prefix': '192.168.3.0/24', + 'protocol': 'udp', + 'description': '', + 'ethertype': 'IPv4', + 'l7_parameters': {}, + 'source_port_range_max': 1000, + 'destination_port_range_min': 3000, + 'source_ip_prefix': '192.168.2.0/24', + 'logical_destination_port': None, + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'destination_port_range_max': None, + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b', + 'id': '3196bafc-82dd-11e7-a205-9bf6c14b0721', + 'name': 'fc2'} + ]} + + # call the VIM connector + self.assertRaises(vimconn.vimconnConflictException, + self.vimconn.get_classification, + '3196bafc-82dd-11e7-a205-9bf6c14b0721') + + # assert the VIM connector called OpenStack with the expected filter + list_sfc_flow_classifiers.assert_called_with( + id='3196bafc-82dd-11e7-a205-9bf6c14b0721') + + @mock.patch.object(Client, 'list_sfc_flow_classifiers') + def test_get_classification_no_results(self, list_sfc_flow_classifiers): + # what OpenStack is assumed to return to the VIM connector + list_sfc_flow_classifiers.return_value = {'flow_classifiers': []} + + # call the VIM connector + self.assertRaises(vimconn.vimconnNotFoundException, + self.vimconn.get_classification, + '3196bafc-82dd-11e7-a205-9bf6c14b0721') + + # assert the VIM connector called OpenStack with the expected filter + list_sfc_flow_classifiers.assert_called_with( + id='3196bafc-82dd-11e7-a205-9bf6c14b0721') + + @mock.patch.object(Client, 'list_sfc_port_pairs') + def test_get_sfi(self, list_sfc_port_pairs): + # what OpenStack is assumed to return to the VIM connector + list_sfc_port_pairs.return_value = {'port_pairs': [ + {'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc', + 'description': '', + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'egress': '5311c75d-d718-4369-bbda-cdcc6da60fcc', + 'service_function_parameters': {'correlation': 'nsh'}, + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'id': 'c121ebdd-7f2d-4213-b933-3325298a6966', + 'name': 'osm_sfi1'}, + ]} + + # call the VIM connector + result = self.vimconn.get_sfi('c121ebdd-7f2d-4213-b933-3325298a6966') + + # assert the VIM connector called OpenStack with the expected filter + list_sfc_port_pairs.assert_called_with( + id='c121ebdd-7f2d-4213-b933-3325298a6966') + # assert the VIM connector successfully returned the OpenStack result + self.assertEqual(result, + {'ingress_ports': [ + '5311c75d-d718-4369-bbda-cdcc6da60fcc'], + 'egress_ports': [ + '5311c75d-d718-4369-bbda-cdcc6da60fcc'], + 'sfc_encap': True, + 'description': '', + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'id': 'c121ebdd-7f2d-4213-b933-3325298a6966', + 'name': 'osm_sfi1'}) + + @mock.patch.object(Client, 'list_sfc_port_pairs') + def test_get_sfi_many_results(self, list_sfc_port_pairs): + # what OpenStack is assumed to return to the VIM connector + list_sfc_port_pairs.return_value = {'port_pairs': [ + {'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc', + 'description': '', + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'egress': '5311c75d-d718-4369-bbda-cdcc6da60fcc', + 'service_function_parameters': {'correlation': 'nsh'}, + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'id': 'c121ebdd-7f2d-4213-b933-3325298a6966', + 'name': 'osm_sfi1'}, + {'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc', + 'description': '', + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'egress': '5311c75d-d718-4369-bbda-cdcc6da60fcc', + 'service_function_parameters': {'correlation': 'nsh'}, + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'id': 'c0436d92-82db-11e7-8f9c-5fa535f1261f', + 'name': 'osm_sfi2'} + ]} + + # call the VIM connector + self.assertRaises(vimconn.vimconnConflictException, + self.vimconn.get_sfi, + 'c0436d92-82db-11e7-8f9c-5fa535f1261f') + + # assert that VIM connector called OpenStack with the expected filter + list_sfc_port_pairs.assert_called_with( + id='c0436d92-82db-11e7-8f9c-5fa535f1261f') + + @mock.patch.object(Client, 'list_sfc_port_pairs') + def test_get_sfi_no_results(self, list_sfc_port_pairs): + # what OpenStack is assumed to return to the VIM connector + list_sfc_port_pairs.return_value = {'port_pairs': []} + + # call the VIM connector + self.assertRaises(vimconn.vimconnNotFoundException, + self.vimconn.get_sfi, + 'b22892fc-82d9-11e7-ae85-0fea6a3b3757') + + # assert that VIM connector called OpenStack with the expected filter + list_sfc_port_pairs.assert_called_with( + id='b22892fc-82d9-11e7-ae85-0fea6a3b3757') + + @mock.patch.object(Client, 'list_sfc_port_pair_groups') + def test_get_sf(self, list_sfc_port_pair_groups): + # what OpenStack is assumed to return to the VIM connector + list_sfc_port_pair_groups.return_value = {'port_pair_groups': [ + {'port_pairs': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2'], + 'description': '', + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'port_pair_group_parameters': {}, + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'id': 'aabba8a6-82d9-11e7-a18a-d3c7719b742d', + 'name': 'osm_sf1'} + ]} + + # call the VIM connector + result = self.vimconn.get_sf('b22892fc-82d9-11e7-ae85-0fea6a3b3757') + + # assert that VIM connector called OpenStack with the expected filter + list_sfc_port_pair_groups.assert_called_with( + id='b22892fc-82d9-11e7-ae85-0fea6a3b3757') + # assert that VIM connector successfully returned the OpenStack result + self.assertEqual(result, + {'description': '', + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'sfis': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2'], + 'id': 'aabba8a6-82d9-11e7-a18a-d3c7719b742d', + 'name': 'osm_sf1'}) + + @mock.patch.object(Client, 'list_sfc_port_pair_groups') + def test_get_sf_many_results(self, list_sfc_port_pair_groups): + # what OpenStack is assumed to return to the VIM connector + list_sfc_port_pair_groups.return_value = {'port_pair_groups': [ + {'port_pairs': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2'], + 'description': '', + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'port_pair_group_parameters': {}, + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'id': 'aabba8a6-82d9-11e7-a18a-d3c7719b742d', + 'name': 'osm_sf1'}, + {'port_pairs': ['0d63799c-82d6-11e7-8deb-a746bb3ae9f5'], + 'description': '', + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'port_pair_group_parameters': {}, + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'id': 'b22892fc-82d9-11e7-ae85-0fea6a3b3757', + 'name': 'osm_sf2'} + ]} + + # call the VIM connector + self.assertRaises(vimconn.vimconnConflictException, + self.vimconn.get_sf, + 'b22892fc-82d9-11e7-ae85-0fea6a3b3757') + + # assert that VIM connector called OpenStack with the expected filter + list_sfc_port_pair_groups.assert_called_with( + id='b22892fc-82d9-11e7-ae85-0fea6a3b3757') + + @mock.patch.object(Client, 'list_sfc_port_pair_groups') + def test_get_sf_no_results(self, list_sfc_port_pair_groups): + # what OpenStack is assumed to return to the VIM connector + list_sfc_port_pair_groups.return_value = {'port_pair_groups': []} + + # call the VIM connector + self.assertRaises(vimconn.vimconnNotFoundException, + self.vimconn.get_sf, + 'b22892fc-82d9-11e7-ae85-0fea6a3b3757') + + # assert that VIM connector called OpenStack with the expected filter + list_sfc_port_pair_groups.assert_called_with( + id='b22892fc-82d9-11e7-ae85-0fea6a3b3757') + + @mock.patch.object(Client, 'list_sfc_port_chains') + def test_get_sfp(self, list_sfc_port_chains): + # what OpenStack is assumed to return to the VIM connector + list_sfc_port_chains.return_value = {'port_chains': [ + {'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25'], + 'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e'], + 'description': '', + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'chain_parameters': {'correlation': 'nsh'}, + 'chain_id': 40, + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47', + 'name': 'osm_sfp1'}]} + + # call the VIM connector + result = self.vimconn.get_sfp('821bc9be-82d7-11e7-8ce3-23a08a27ab47') + + # assert that VIM connector called OpenStack with the expected filter + list_sfc_port_chains.assert_called_with( + id='821bc9be-82d7-11e7-8ce3-23a08a27ab47') + # assert that VIM connector successfully returned the OpenStack result + self.assertEqual(result, + {'service_functions': [ + '7d8e3bf8-82d6-11e7-a032-8ff028839d25'], + 'classifications': [ + '1333c2f4-82d7-11e7-a5df-9327f33d104e'], + 'description': '', + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'sfc_encap': True, + 'spi': 40, + 'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47', + 'name': 'osm_sfp1'}) + + @mock.patch.object(Client, 'list_sfc_port_chains') + def test_get_sfp_many_results(self, list_sfc_port_chains): + # what OpenStack is assumed to return to the VIM connector + list_sfc_port_chains.return_value = {'port_chains': [ + {'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25'], + 'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e'], + 'description': '', + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'chain_parameters': {'correlation': 'nsh'}, + 'chain_id': 40, + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47', + 'name': 'osm_sfp1'}, + {'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25'], + 'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e'], + 'description': '', + 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'chain_parameters': {'correlation': 'nsh'}, + 'chain_id': 50, + 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', + 'id': '5d002f38-82de-11e7-a770-f303f11ce66a', + 'name': 'osm_sfp2'} + ]} + + # call the VIM connector + self.assertRaises(vimconn.vimconnConflictException, + self.vimconn.get_sfp, + '5d002f38-82de-11e7-a770-f303f11ce66a') + + # assert that VIM connector called OpenStack with the expected filter + list_sfc_port_chains.assert_called_with( + id='5d002f38-82de-11e7-a770-f303f11ce66a') + + @mock.patch.object(Client, 'list_sfc_port_chains') + def test_get_sfp_no_results(self, list_sfc_port_chains): + # what OpenStack is assumed to return to the VIM connector + list_sfc_port_chains.return_value = {'port_chains': []} + + # call the VIM connector + self.assertRaises(vimconn.vimconnNotFoundException, + self.vimconn.get_sfp, + '5d002f38-82de-11e7-a770-f303f11ce66a') + + # assert that VIM connector called OpenStack with the expected filter + list_sfc_port_chains.assert_called_with( + id='5d002f38-82de-11e7-a770-f303f11ce66a') + + @mock.patch.object(Client, 'delete_sfc_flow_classifier') + def test_delete_classification(self, delete_sfc_flow_classifier): + result = self.vimconn.delete_classification( + '638f957c-82df-11e7-b7c8-132706021464') + delete_sfc_flow_classifier.assert_called_with( + '638f957c-82df-11e7-b7c8-132706021464') + self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464') + + @mock.patch.object(Client, 'delete_sfc_port_pair') + def test_delete_sfi(self, delete_sfc_port_pair): + result = self.vimconn.delete_sfi( + '638f957c-82df-11e7-b7c8-132706021464') + delete_sfc_port_pair.assert_called_with( + '638f957c-82df-11e7-b7c8-132706021464') + self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464') + + @mock.patch.object(Client, 'delete_sfc_port_pair_group') + def test_delete_sf(self, delete_sfc_port_pair_group): + result = self.vimconn.delete_sf('638f957c-82df-11e7-b7c8-132706021464') + delete_sfc_port_pair_group.assert_called_with( + '638f957c-82df-11e7-b7c8-132706021464') + self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464') + + @mock.patch.object(Client, 'delete_sfc_port_chain') + def test_delete_sfp(self, delete_sfc_port_chain): + result = self.vimconn.delete_sfp( + '638f957c-82df-11e7-b7c8-132706021464') + delete_sfc_port_chain.assert_called_with( + '638f957c-82df-11e7-b7c8-132706021464') + self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464') + + +if __name__ == '__main__': + unittest.main() diff --git a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py new file mode 100644 index 00000000..15ef7133 --- /dev/null +++ b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py @@ -0,0 +1,2227 @@ +# -*- coding: utf-8 -*- + +## +# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +## + +''' +osconnector implements all the methods to interact with openstack using the python-neutronclient. + +For the VNF forwarding graph, The OpenStack VIM connector calls the +networking-sfc Neutron extension methods, whose resources are mapped +to the VIM connector's SFC resources as follows: +- Classification (OSM) -> Flow Classifier (Neutron) +- Service Function Instance (OSM) -> Port Pair (Neutron) +- Service Function (OSM) -> Port Pair Group (Neutron) +- Service Function Path (OSM) -> Port Chain (Neutron) +''' +__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa" +__date__ = "$22-sep-2017 23:59:59$" + +from osm_ro import vimconn +# import json +import logging +import netaddr +import time +import yaml +import random +import re +import copy +from pprint import pformat + +from novaclient import client as nClient, exceptions as nvExceptions +from keystoneauth1.identity import v2, v3 +from keystoneauth1 import session +import keystoneclient.exceptions as ksExceptions +import keystoneclient.v3.client as ksClient_v3 +import keystoneclient.v2_0.client as ksClient_v2 +from glanceclient import client as glClient +import glanceclient.exc as gl1Exceptions +from cinderclient import client as cClient +from http.client import HTTPException # TODO py3 check that this base exception matches python2 httplib.HTTPException +from neutronclient.neutron import client as neClient +from neutronclient.common import exceptions as neExceptions +from requests.exceptions import ConnectionError + + +"""contain the openstack virtual machine status to openmano status""" +vmStatus2manoFormat={'ACTIVE':'ACTIVE', + 'PAUSED':'PAUSED', + 'SUSPENDED': 'SUSPENDED', + 'SHUTOFF':'INACTIVE', + 'BUILD':'BUILD', + 'ERROR':'ERROR','DELETED':'DELETED' + } +netStatus2manoFormat={'ACTIVE':'ACTIVE','PAUSED':'PAUSED','INACTIVE':'INACTIVE','BUILD':'BUILD','ERROR':'ERROR','DELETED':'DELETED' + } + +supportedClassificationTypes = ['legacy_flow_classifier'] + +#global var to have a timeout creating and deleting volumes +volume_timeout = 600 +server_timeout = 600 + + +class SafeDumper(yaml.SafeDumper): + def represent_data(self, data): + # Openstack APIs use custom subclasses of dict and YAML safe dumper + # is designed to not handle that (reference issue 142 of pyyaml) + if isinstance(data, dict) and data.__class__ != dict: + # A simple solution is to convert those items back to dicts + data = dict(data.items()) + + return super(SafeDumper, self).represent_data(data) + + +class vimconnector(vimconn.vimconnector): + def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, + log_level=None, config={}, persistent_info={}): + '''using common constructor parameters. In this case + 'url' is the keystone authorization url, + 'url_admin' is not use + ''' + api_version = config.get('APIversion') + if api_version and api_version not in ('v3.3', 'v2.0', '2', '3'): + raise vimconn.vimconnException("Invalid value '{}' for config:APIversion. " + "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)) + vim_type = config.get('vim_type') + if vim_type and vim_type not in ('vio', 'VIO'): + raise vimconn.vimconnException("Invalid value '{}' for config:vim_type." + "Allowed values are 'vio' or 'VIO'".format(vim_type)) + + if config.get('dataplane_net_vlan_range') is not None: + #validate vlan ranges provided by user + self._validate_vlan_ranges(config.get('dataplane_net_vlan_range'), 'dataplane_net_vlan_range') + + if config.get('multisegment_vlan_range') is not None: + #validate vlan ranges provided by user + self._validate_vlan_ranges(config.get('multisegment_vlan_range'), 'multisegment_vlan_range') + + vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, + config) + + if self.config.get("insecure") and self.config.get("ca_cert"): + raise vimconn.vimconnException("options insecure and ca_cert are mutually exclusive") + self.verify = True + if self.config.get("insecure"): + self.verify = False + if self.config.get("ca_cert"): + self.verify = self.config.get("ca_cert") + + if not url: + raise TypeError('url param can not be NoneType') + self.persistent_info = persistent_info + self.availability_zone = persistent_info.get('availability_zone', None) + self.session = persistent_info.get('session', {'reload_client': True}) + self.my_tenant_id = self.session.get('my_tenant_id') + self.nova = self.session.get('nova') + self.neutron = self.session.get('neutron') + self.cinder = self.session.get('cinder') + self.glance = self.session.get('glance') + # self.glancev1 = self.session.get('glancev1') + self.keystone = self.session.get('keystone') + self.api_version3 = self.session.get('api_version3') + self.vim_type = self.config.get("vim_type") + if self.vim_type: + self.vim_type = self.vim_type.upper() + if self.config.get("use_internal_endpoint"): + self.endpoint_type = "internalURL" + else: + self.endpoint_type = None + + self.logger = logging.getLogger('openmano.vim.openstack') + + # allow security_groups to be a list or a single string + if isinstance(self.config.get('security_groups'), str): + self.config['security_groups'] = [self.config['security_groups']] + self.security_groups_id = None + + ####### VIO Specific Changes ######### + if self.vim_type == "VIO": + self.logger = logging.getLogger('openmano.vim.vio') + + if log_level: + self.logger.setLevel( getattr(logging, log_level)) + + def __getitem__(self, index): + """Get individuals parameters. + Throw KeyError""" + if index == 'project_domain_id': + return self.config.get("project_domain_id") + elif index == 'user_domain_id': + return self.config.get("user_domain_id") + else: + return vimconn.vimconnector.__getitem__(self, index) + + def __setitem__(self, index, value): + """Set individuals parameters and it is marked as dirty so to force connection reload. + Throw KeyError""" + if index == 'project_domain_id': + self.config["project_domain_id"] = value + elif index == 'user_domain_id': + self.config["user_domain_id"] = value + else: + vimconn.vimconnector.__setitem__(self, index, value) + self.session['reload_client'] = True + + def serialize(self, value): + """Serialization of python basic types. + + In the case value is not serializable a message will be logged and a + simple representation of the data that cannot be converted back to + python is returned. + """ + if isinstance(value, str): + return value + + try: + return yaml.dump(value, Dumper=SafeDumper, + default_flow_style=True, width=256) + except yaml.representer.RepresenterError: + self.logger.debug('The following entity cannot be serialized in YAML:\n\n%s\n\n', pformat(value), + exc_info=True) + return str(value) + + def _reload_connection(self): + '''Called before any operation, it check if credentials has changed + Throw keystoneclient.apiclient.exceptions.AuthorizationFailure + ''' + #TODO control the timing and possible token timeout, but it seams that python client does this task for us :-) + if self.session['reload_client']: + if self.config.get('APIversion'): + self.api_version3 = self.config['APIversion'] == 'v3.3' or self.config['APIversion'] == '3' + else: # get from ending auth_url that end with v3 or with v2.0 + self.api_version3 = self.url.endswith("/v3") or self.url.endswith("/v3/") + self.session['api_version3'] = self.api_version3 + if self.api_version3: + if self.config.get('project_domain_id') or self.config.get('project_domain_name'): + project_domain_id_default = None + else: + project_domain_id_default = 'default' + if self.config.get('user_domain_id') or self.config.get('user_domain_name'): + user_domain_id_default = None + else: + user_domain_id_default = 'default' + auth = v3.Password(auth_url=self.url, + username=self.user, + password=self.passwd, + project_name=self.tenant_name, + project_id=self.tenant_id, + project_domain_id=self.config.get('project_domain_id', project_domain_id_default), + user_domain_id=self.config.get('user_domain_id', user_domain_id_default), + project_domain_name=self.config.get('project_domain_name'), + user_domain_name=self.config.get('user_domain_name')) + else: + auth = v2.Password(auth_url=self.url, + username=self.user, + password=self.passwd, + tenant_name=self.tenant_name, + tenant_id=self.tenant_id) + sess = session.Session(auth=auth, verify=self.verify) + # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River Titanium cloud and StarlingX + region_name = self.config.get('region_name') + if self.api_version3: + self.keystone = ksClient_v3.Client(session=sess, endpoint_type=self.endpoint_type, region_name=region_name) + else: + self.keystone = ksClient_v2.Client(session=sess, endpoint_type=self.endpoint_type) + self.session['keystone'] = self.keystone + # In order to enable microversion functionality an explicit microversion must be specified in 'config'. + # This implementation approach is due to the warning message in + # https://developer.openstack.org/api-guide/compute/microversions.html + # where it is stated that microversion backwards compatibility is not guaranteed and clients should + # always require an specific microversion. + # To be able to use 'device role tagging' functionality define 'microversion: 2.32' in datacenter config + version = self.config.get("microversion") + if not version: + version = "2.1" + # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River Titanium cloud and StarlingX + self.nova = self.session['nova'] = nClient.Client(str(version), session=sess, endpoint_type=self.endpoint_type, region_name=region_name) + self.neutron = self.session['neutron'] = neClient.Client('2.0', session=sess, endpoint_type=self.endpoint_type, region_name=region_name) + self.cinder = self.session['cinder'] = cClient.Client(2, session=sess, endpoint_type=self.endpoint_type, region_name=region_name) + try: + self.my_tenant_id = self.session['my_tenant_id'] = sess.get_project_id() + except Exception as e: + self.logger.error("Cannot get project_id from session", exc_info=True) + if self.endpoint_type == "internalURL": + glance_service_id = self.keystone.services.list(name="glance")[0].id + glance_endpoint = self.keystone.endpoints.list(glance_service_id, interface="internal")[0].url + else: + glance_endpoint = None + self.glance = self.session['glance'] = glClient.Client(2, session=sess, endpoint=glance_endpoint) + # using version 1 of glance client in new_image() + # self.glancev1 = self.session['glancev1'] = glClient.Client('1', session=sess, + # endpoint=glance_endpoint) + self.session['reload_client'] = False + self.persistent_info['session'] = self.session + # add availablity zone info inside self.persistent_info + self._set_availablity_zones() + self.persistent_info['availability_zone'] = self.availability_zone + self.security_groups_id = None # force to get again security_groups_ids next time they are needed + + def __net_os2mano(self, net_list_dict): + '''Transform the net openstack format to mano format + net_list_dict can be a list of dict or a single dict''' + if type(net_list_dict) is dict: + net_list_=(net_list_dict,) + elif type(net_list_dict) is list: + net_list_=net_list_dict + else: + raise TypeError("param net_list_dict must be a list or a dictionary") + for net in net_list_: + if net.get('provider:network_type') == "vlan": + net['type']='data' + else: + net['type']='bridge' + + def __classification_os2mano(self, class_list_dict): + """Transform the openstack format (Flow Classifier) to mano format + (Classification) class_list_dict can be a list of dict or a single dict + """ + if isinstance(class_list_dict, dict): + class_list_ = [class_list_dict] + elif isinstance(class_list_dict, list): + class_list_ = class_list_dict + else: + raise TypeError( + "param class_list_dict must be a list or a dictionary") + for classification in class_list_: + id = classification.pop('id') + name = classification.pop('name') + description = classification.pop('description') + project_id = classification.pop('project_id') + tenant_id = classification.pop('tenant_id') + original_classification = copy.deepcopy(classification) + classification.clear() + classification['ctype'] = 'legacy_flow_classifier' + classification['definition'] = original_classification + classification['id'] = id + classification['name'] = name + classification['description'] = description + classification['project_id'] = project_id + classification['tenant_id'] = tenant_id + + def __sfi_os2mano(self, sfi_list_dict): + """Transform the openstack format (Port Pair) to mano format (SFI) + sfi_list_dict can be a list of dict or a single dict + """ + if isinstance(sfi_list_dict, dict): + sfi_list_ = [sfi_list_dict] + elif isinstance(sfi_list_dict, list): + sfi_list_ = sfi_list_dict + else: + raise TypeError( + "param sfi_list_dict must be a list or a dictionary") + for sfi in sfi_list_: + sfi['ingress_ports'] = [] + sfi['egress_ports'] = [] + if sfi.get('ingress'): + sfi['ingress_ports'].append(sfi['ingress']) + if sfi.get('egress'): + sfi['egress_ports'].append(sfi['egress']) + del sfi['ingress'] + del sfi['egress'] + params = sfi.get('service_function_parameters') + sfc_encap = False + if params: + correlation = params.get('correlation') + if correlation: + sfc_encap = True + sfi['sfc_encap'] = sfc_encap + del sfi['service_function_parameters'] + + def __sf_os2mano(self, sf_list_dict): + """Transform the openstack format (Port Pair Group) to mano format (SF) + sf_list_dict can be a list of dict or a single dict + """ + if isinstance(sf_list_dict, dict): + sf_list_ = [sf_list_dict] + elif isinstance(sf_list_dict, list): + sf_list_ = sf_list_dict + else: + raise TypeError( + "param sf_list_dict must be a list or a dictionary") + for sf in sf_list_: + del sf['port_pair_group_parameters'] + sf['sfis'] = sf['port_pairs'] + del sf['port_pairs'] + + def __sfp_os2mano(self, sfp_list_dict): + """Transform the openstack format (Port Chain) to mano format (SFP) + sfp_list_dict can be a list of dict or a single dict + """ + if isinstance(sfp_list_dict, dict): + sfp_list_ = [sfp_list_dict] + elif isinstance(sfp_list_dict, list): + sfp_list_ = sfp_list_dict + else: + raise TypeError( + "param sfp_list_dict must be a list or a dictionary") + for sfp in sfp_list_: + params = sfp.pop('chain_parameters') + sfc_encap = False + if params: + correlation = params.get('correlation') + if correlation: + sfc_encap = True + sfp['sfc_encap'] = sfc_encap + sfp['spi'] = sfp.pop('chain_id') + sfp['classifications'] = sfp.pop('flow_classifiers') + sfp['service_functions'] = sfp.pop('port_pair_groups') + + # placeholder for now; read TODO note below + def _validate_classification(self, type, definition): + # only legacy_flow_classifier Type is supported at this point + return True + # TODO(igordcard): this method should be an abstract method of an + # abstract Classification class to be implemented by the specific + # Types. Also, abstract vimconnector should call the validation + # method before the implemented VIM connectors are called. + + def _format_exception(self, exception): + '''Transform a keystone, nova, neutron exception into a vimconn exception''' + + message_error = exception.message + + if isinstance(exception, (neExceptions.NetworkNotFoundClient, nvExceptions.NotFound, ksExceptions.NotFound, + gl1Exceptions.HTTPNotFound)): + raise vimconn.vimconnNotFoundException(type(exception).__name__ + ": " + message_error) + elif isinstance(exception, (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError, + ConnectionError, ksExceptions.ConnectionError, neExceptions.ConnectionFailed)): + raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + message_error) + elif isinstance(exception, (KeyError, nvExceptions.BadRequest, ksExceptions.BadRequest)): + raise vimconn.vimconnException(type(exception).__name__ + ": " + message_error) + elif isinstance(exception, (nvExceptions.ClientException, ksExceptions.ClientException, + neExceptions.NeutronException)): + raise vimconn.vimconnUnexpectedResponse(type(exception).__name__ + ": " + message_error) + elif isinstance(exception, nvExceptions.Conflict): + raise vimconn.vimconnConflictException(type(exception).__name__ + ": " + message_error) + elif isinstance(exception, vimconn.vimconnException): + raise exception + else: # () + self.logger.error("General Exception " + message_error, exc_info=True) + raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + message_error) + + def _get_ids_from_name(self): + """ + Obtain ids from name of tenant and security_groups. Store at self .security_groups_id" + :return: None + """ + # get tenant_id if only tenant_name is supplied + self._reload_connection() + if not self.my_tenant_id: + raise vimconn.vimconnConnectionException("Error getting tenant information from name={} id={}". + format(self.tenant_name, self.tenant_id)) + if self.config.get('security_groups') and not self.security_groups_id: + # convert from name to id + neutron_sg_list = self.neutron.list_security_groups(tenant_id=self.my_tenant_id)["security_groups"] + + self.security_groups_id = [] + for sg in self.config.get('security_groups'): + for neutron_sg in neutron_sg_list: + if sg in (neutron_sg["id"], neutron_sg["name"]): + self.security_groups_id.append(neutron_sg["id"]) + break + else: + self.security_groups_id = None + raise vimconn.vimconnConnectionException("Not found security group {} for this tenant".format(sg)) + + def check_vim_connectivity(self): + # just get network list to check connectivity and credentials + self.get_network_list(filter_dict={}) + + def get_tenant_list(self, filter_dict={}): + '''Obtain tenants of VIM + filter_dict can contain the following keys: + name: filter by tenant name + id: filter by tenant uuid/id + + Returns the tenant list of dictionaries: [{'name':', 'id':', ...}, ...] + ''' + self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict)) + try: + self._reload_connection() + if self.api_version3: + project_class_list = self.keystone.projects.list(name=filter_dict.get("name")) + else: + project_class_list = self.keystone.tenants.findall(**filter_dict) + project_list=[] + for project in project_class_list: + if filter_dict.get('id') and filter_dict["id"] != project.id: + continue + project_list.append(project.to_dict()) + return project_list + except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e: + self._format_exception(e) + + def new_tenant(self, tenant_name, tenant_description): + '''Adds a new tenant to openstack VIM. Returns the tenant identifier''' + self.logger.debug("Adding a new tenant name: %s", tenant_name) + try: + self._reload_connection() + if self.api_version3: + project = self.keystone.projects.create(tenant_name, self.config.get("project_domain_id", "default"), + description=tenant_description, is_domain=False) + else: + project = self.keystone.tenants.create(tenant_name, tenant_description) + return project.id + except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.BadRequest, ConnectionError) as e: + self._format_exception(e) + + def delete_tenant(self, tenant_id): + '''Delete a tenant from openstack VIM. Returns the old tenant identifier''' + self.logger.debug("Deleting tenant %s from VIM", tenant_id) + try: + self._reload_connection() + if self.api_version3: + self.keystone.projects.delete(tenant_id) + else: + self.keystone.tenants.delete(tenant_id) + return tenant_id + except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.NotFound, ConnectionError) as e: + self._format_exception(e) + + def new_network(self,net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None): + """Adds a tenant network to VIM + Params: + 'net_name': name of the network + 'net_type': one of: + 'bridge': overlay isolated network + 'data': underlay E-LAN network for Passthrough and SRIOV interfaces + 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces. + 'ip_profile': is a dict containing the IP parameters of the network + 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented) + 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y + 'gateway_address': (Optional) ip_schema, that is X.X.X.X + 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X] + 'dhcp_enabled': True or False + 'dhcp_start_address': ip_schema, first IP to grant + 'dhcp_count': number of IPs to grant. + 'shared': if this network can be seen/use by other tenants/organization + 'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk} + Returns a tuple with the network identifier and created_items, or raises an exception on error + created_items can be None or a dictionary where this method can include key-values that will be passed to + the method delete_network. Can be used to store created segments, created l2gw connections, etc. + Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same + as not present. + """ + self.logger.debug("Adding a new network to VIM name '%s', type '%s'", net_name, net_type) + # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile)) + + try: + vlan = None + if provider_network_profile: + vlan = provider_network_profile.get("segmentation-id") + new_net = None + created_items = {} + self._reload_connection() + network_dict = {'name': net_name, 'admin_state_up': True} + if net_type=="data" or net_type=="ptp": + if self.config.get('dataplane_physical_net') == None: + raise vimconn.vimconnConflictException("You must provide a 'dataplane_physical_net' at config value before creating sriov network") + if not self.config.get('multisegment_support'): + network_dict["provider:physical_network"] = self.config[ + 'dataplane_physical_net'] # "physnet_sriov" #TODO physical + network_dict["provider:network_type"] = "vlan" + if vlan!=None: + network_dict["provider:network_type"] = vlan + else: + ###### Multi-segment case ###### + segment_list = [] + segment1_dict = {} + segment1_dict["provider:physical_network"] = '' + segment1_dict["provider:network_type"] = 'vxlan' + segment_list.append(segment1_dict) + segment2_dict = {} + segment2_dict["provider:physical_network"] = self.config['dataplane_physical_net'] + segment2_dict["provider:network_type"] = "vlan" + if self.config.get('multisegment_vlan_range'): + vlanID = self._generate_multisegment_vlanID() + segment2_dict["provider:segmentation_id"] = vlanID + # else + # raise vimconn.vimconnConflictException( + # "You must provide 'multisegment_vlan_range' at config dict before creating a multisegment network") + segment_list.append(segment2_dict) + network_dict["segments"] = segment_list + + ####### VIO Specific Changes ######### + if self.vim_type == "VIO": + if vlan is not None: + network_dict["provider:segmentation_id"] = vlan + else: + if self.config.get('dataplane_net_vlan_range') is None: + raise vimconn.vimconnConflictException("You must provide "\ + "'dataplane_net_vlan_range' in format [start_ID - end_ID]"\ + "at config value before creating sriov network with vlan tag") + + network_dict["provider:segmentation_id"] = self._generate_vlanID() + + network_dict["shared"] = shared + if self.config.get("disable_network_port_security"): + network_dict["port_security_enabled"] = False + new_net = self.neutron.create_network({'network':network_dict}) + # print new_net + # create subnetwork, even if there is no profile + if not ip_profile: + ip_profile = {} + if not ip_profile.get('subnet_address'): + #Fake subnet is required + subnet_rand = random.randint(0, 255) + ip_profile['subnet_address'] = "192.168.{}.0/24".format(subnet_rand) + if 'ip_version' not in ip_profile: + ip_profile['ip_version'] = "IPv4" + subnet = {"name": net_name+"-subnet", + "network_id": new_net["network"]["id"], + "ip_version": 4 if ip_profile['ip_version']=="IPv4" else 6, + "cidr": ip_profile['subnet_address'] + } + # Gateway should be set to None if not needed. Otherwise openstack assigns one by default + if ip_profile.get('gateway_address'): + subnet['gateway_ip'] = ip_profile['gateway_address'] + else: + subnet['gateway_ip'] = None + if ip_profile.get('dns_address'): + subnet['dns_nameservers'] = ip_profile['dns_address'].split(";") + if 'dhcp_enabled' in ip_profile: + subnet['enable_dhcp'] = False if \ + ip_profile['dhcp_enabled']=="false" or ip_profile['dhcp_enabled']==False else True + if ip_profile.get('dhcp_start_address'): + subnet['allocation_pools'] = [] + subnet['allocation_pools'].append(dict()) + subnet['allocation_pools'][0]['start'] = ip_profile['dhcp_start_address'] + if ip_profile.get('dhcp_count'): + #parts = ip_profile['dhcp_start_address'].split('.') + #ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3]) + ip_int = int(netaddr.IPAddress(ip_profile['dhcp_start_address'])) + ip_int += ip_profile['dhcp_count'] - 1 + ip_str = str(netaddr.IPAddress(ip_int)) + subnet['allocation_pools'][0]['end'] = ip_str + #self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet)) + self.neutron.create_subnet({"subnet": subnet} ) + + if net_type == "data" and self.config.get('multisegment_support'): + if self.config.get('l2gw_support'): + l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ()) + for l2gw in l2gw_list: + l2gw_conn = {} + l2gw_conn["l2_gateway_id"] = l2gw["id"] + l2gw_conn["network_id"] = new_net["network"]["id"] + l2gw_conn["segmentation_id"] = str(vlanID) + new_l2gw_conn = self.neutron.create_l2_gateway_connection({"l2_gateway_connection": l2gw_conn}) + created_items["l2gwconn:" + str(new_l2gw_conn["l2_gateway_connection"]["id"])] = True + return new_net["network"]["id"], created_items + except Exception as e: + #delete l2gw connections (if any) before deleting the network + for k, v in created_items.items(): + if not v: # skip already deleted + continue + try: + k_item, _, k_id = k.partition(":") + if k_item == "l2gwconn": + self.neutron.delete_l2_gateway_connection(k_id) + except Exception as e2: + self.logger.error("Error deleting l2 gateway connection: {}: {}".format(type(e2).__name__, e2)) + if new_net: + self.neutron.delete_network(new_net['network']['id']) + self._format_exception(e) + + def get_network_list(self, filter_dict={}): + '''Obtain tenant networks of VIM + Filter_dict can be: + name: network name + id: network uuid + shared: boolean + tenant_id: tenant + admin_state_up: boolean + status: 'ACTIVE' + Returns the network list of dictionaries + ''' + self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict)) + try: + self._reload_connection() + filter_dict_os = filter_dict.copy() + if self.api_version3 and "tenant_id" in filter_dict_os: + filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id') #T ODO check + net_dict = self.neutron.list_networks(**filter_dict_os) + net_list = net_dict["networks"] + self.__net_os2mano(net_list) + return net_list + except (neExceptions.ConnectionFailed, ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e: + self._format_exception(e) + + def get_network(self, net_id): + '''Obtain details of network from VIM + Returns the network information from a network id''' + self.logger.debug(" Getting tenant network %s from VIM", net_id) + filter_dict={"id": net_id} + net_list = self.get_network_list(filter_dict) + if len(net_list)==0: + raise vimconn.vimconnNotFoundException("Network '{}' not found".format(net_id)) + elif len(net_list)>1: + raise vimconn.vimconnConflictException("Found more than one network with this criteria") + net = net_list[0] + subnets=[] + for subnet_id in net.get("subnets", () ): + try: + subnet = self.neutron.show_subnet(subnet_id) + except Exception as e: + self.logger.error("osconnector.get_network(): Error getting subnet %s %s" % (net_id, str(e))) + subnet = {"id": subnet_id, "fault": str(e)} + subnets.append(subnet) + net["subnets"] = subnets + net["encapsulation"] = net.get('provider:network_type') + net["encapsulation_type"] = net.get('provider:network_type') + net["segmentation_id"] = net.get('provider:segmentation_id') + net["encapsulation_id"] = net.get('provider:segmentation_id') + return net + + def delete_network(self, net_id, created_items=None): + """ + Removes a tenant network from VIM and its associated elements + :param net_id: VIM identifier of the network, provided by method new_network + :param created_items: dictionary with extra items to be deleted. provided by method new_network + Returns the network identifier or raises an exception upon error or when network is not found + """ + self.logger.debug("Deleting network '%s' from VIM", net_id) + if created_items == None: + created_items = {} + try: + self._reload_connection() + #delete l2gw connections (if any) before deleting the network + for k, v in created_items.items(): + if not v: # skip already deleted + continue + try: + k_item, _, k_id = k.partition(":") + if k_item == "l2gwconn": + self.neutron.delete_l2_gateway_connection(k_id) + except Exception as e: + self.logger.error("Error deleting l2 gateway connection: {}: {}".format(type(e).__name__, e)) + #delete VM ports attached to this networks before the network + ports = self.neutron.list_ports(network_id=net_id) + for p in ports['ports']: + try: + self.neutron.delete_port(p["id"]) + except Exception as e: + self.logger.error("Error deleting port %s: %s", p["id"], str(e)) + self.neutron.delete_network(net_id) + return net_id + except (neExceptions.ConnectionFailed, neExceptions.NetworkNotFoundClient, neExceptions.NeutronException, + ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e: + self._format_exception(e) + + def refresh_nets_status(self, net_list): + '''Get the status of the networks + Params: the list of network identifiers + Returns a dictionary with: + net_id: #VIM id of this network + status: #Mandatory. Text with one of: + # DELETED (not found at vim) + # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) + # OTHER (Vim reported other status not understood) + # ERROR (VIM indicates an ERROR status) + # ACTIVE, INACTIVE, DOWN (admin down), + # BUILD (on building process) + # + error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR + vim_info: #Text with plain information obtained from vim (yaml.safe_dump) + + ''' + net_dict={} + for net_id in net_list: + net = {} + try: + net_vim = self.get_network(net_id) + if net_vim['status'] in netStatus2manoFormat: + net["status"] = netStatus2manoFormat[ net_vim['status'] ] + else: + net["status"] = "OTHER" + net["error_msg"] = "VIM status reported " + net_vim['status'] + + if net['status'] == "ACTIVE" and not net_vim['admin_state_up']: + net['status'] = 'DOWN' + + net['vim_info'] = self.serialize(net_vim) + + if net_vim.get('fault'): #TODO + net['error_msg'] = str(net_vim['fault']) + except vimconn.vimconnNotFoundException as e: + self.logger.error("Exception getting net status: %s", str(e)) + net['status'] = "DELETED" + net['error_msg'] = str(e) + except vimconn.vimconnException as e: + self.logger.error("Exception getting net status: %s", str(e)) + net['status'] = "VIM_ERROR" + net['error_msg'] = str(e) + net_dict[net_id] = net + return net_dict + + def get_flavor(self, flavor_id): + '''Obtain flavor details from the VIM. Returns the flavor dict details''' + self.logger.debug("Getting flavor '%s'", flavor_id) + try: + self._reload_connection() + flavor = self.nova.flavors.find(id=flavor_id) + #TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema) + return flavor.to_dict() + except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e: + self._format_exception(e) + + def get_flavor_id_from_data(self, flavor_dict): + """Obtain flavor id that match the flavor description + Returns the flavor_id or raises a vimconnNotFoundException + flavor_dict: contains the required ram, vcpus, disk + If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus + and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a + vimconnNotFoundException is raised + """ + exact_match = False if self.config.get('use_existing_flavors') else True + try: + self._reload_connection() + flavor_candidate_id = None + flavor_candidate_data = (10000, 10000, 10000) + flavor_target = (flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"]) + # numa=None + extended = flavor_dict.get("extended", {}) + if extended: + #TODO + raise vimconn.vimconnNotFoundException("Flavor with EPA still not implemented") + # if len(numas) > 1: + # raise vimconn.vimconnNotFoundException("Cannot find any flavor with more than one numa") + # numa=numas[0] + # numas = extended.get("numas") + for flavor in self.nova.flavors.list(): + epa = flavor.get_keys() + if epa: + continue + # TODO + flavor_data = (flavor.ram, flavor.vcpus, flavor.disk) + if flavor_data == flavor_target: + return flavor.id + elif not exact_match and flavor_target < flavor_data < flavor_candidate_data: + flavor_candidate_id = flavor.id + flavor_candidate_data = flavor_data + if not exact_match and flavor_candidate_id: + return flavor_candidate_id + raise vimconn.vimconnNotFoundException("Cannot find any flavor matching '{}'".format(str(flavor_dict))) + except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e: + self._format_exception(e) + + def process_resource_quota(self, quota, prefix, extra_specs): + """ + :param prefix: + :param extra_specs: + :return: + """ + if 'limit' in quota: + extra_specs["quota:" + prefix + "_limit"] = quota['limit'] + if 'reserve' in quota: + extra_specs["quota:" + prefix + "_reservation"] = quota['reserve'] + if 'shares' in quota: + extra_specs["quota:" + prefix + "_shares_level"] = "custom" + extra_specs["quota:" + prefix + "_shares_share"] = quota['shares'] + + def new_flavor(self, flavor_data, change_name_if_used=True): + '''Adds a tenant flavor to openstack VIM + if change_name_if_used is True, it will change name in case of conflict, because it is not supported name repetition + Returns the flavor identifier + ''' + self.logger.debug("Adding flavor '%s'", str(flavor_data)) + retry=0 + max_retries=3 + name_suffix = 0 + try: + name=flavor_data['name'] + while retry 1: + return -1, "Can not add flavor with more than one numa" + extra_specs["hw:numa_nodes"] = str(numa_nodes) + extra_specs["hw:mem_page_size"] = "large" + extra_specs["hw:cpu_policy"] = "dedicated" + extra_specs["hw:numa_mempolicy"] = "strict" + if self.vim_type == "VIO": + extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}' + extra_specs["vmware:latency_sensitivity_level"] = "high" + for numa in numas: + #overwrite ram and vcpus + #check if key 'memory' is present in numa else use ram value at flavor + if 'memory' in numa: + ram = numa['memory']*1024 + #See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html + extra_specs["hw:cpu_sockets"] = 1 + if 'paired-threads' in numa: + vcpus = numa['paired-threads']*2 + #cpu_thread_policy "require" implies that the compute node must have an STM architecture + extra_specs["hw:cpu_thread_policy"] = "require" + extra_specs["hw:cpu_policy"] = "dedicated" + elif 'cores' in numa: + vcpus = numa['cores'] + # cpu_thread_policy "prefer" implies that the host must not have an SMT architecture, or a non-SMT architecture will be emulated + extra_specs["hw:cpu_thread_policy"] = "isolate" + extra_specs["hw:cpu_policy"] = "dedicated" + elif 'threads' in numa: + vcpus = numa['threads'] + # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture + extra_specs["hw:cpu_thread_policy"] = "prefer" + extra_specs["hw:cpu_policy"] = "dedicated" + # for interface in numa.get("interfaces",() ): + # if interface["dedicated"]=="yes": + # raise vimconn.vimconnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable) + # #TODO, add the key 'pci_passthrough:alias"="