From c0e42e20731ff8eb24b4856b9ddb8fd5342e07f1 Mon Sep 17 00:00:00 2001 From: tierno Date: Fri, 11 May 2018 11:36:10 +0200 Subject: [PATCH] Initial LCM contribution Change-Id: I13835e7db4febfc4f625297936ab343e02d99402 Signed-off-by: tierno --- .gitignore-common | 35 + Dockerfile | 12 + Dockerfile.fromdeb | 65 ++ Dockerfile.local | 66 ++ LICENSE | 201 +++++ Makefile | 14 + README.rst | 6 + devops-stages/stage-archive.sh | 10 + devops-stages/stage-build.sh | 4 + devops-stages/stage-test.sh | 2 + osm_lcm/ROclient.py | 1000 +++++++++++++++++++++ osm_lcm/lcm.cfg | 54 ++ osm_lcm/lcm.py | 1482 ++++++++++++++++++++++++++++++++ python3-osm-lcm.postinst | 26 + requirements.txt | 4 + setup.py | 46 + tox.ini | 20 + 17 files changed, 3047 insertions(+) create mode 100644 .gitignore-common create mode 100644 Dockerfile create mode 100644 Dockerfile.fromdeb create mode 100644 Dockerfile.local create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 README.rst create mode 100755 devops-stages/stage-archive.sh create mode 100755 devops-stages/stage-build.sh create mode 100755 devops-stages/stage-test.sh create mode 100644 osm_lcm/ROclient.py create mode 100644 osm_lcm/lcm.cfg create mode 100644 osm_lcm/lcm.py create mode 100755 python3-osm-lcm.postinst create mode 100644 requirements.txt create mode 100644 setup.py create mode 100644 tox.ini diff --git a/.gitignore-common b/.gitignore-common new file mode 100644 index 0000000..f0f10bd --- /dev/null +++ b/.gitignore-common @@ -0,0 +1,35 @@ +# This is a template with common files to be igonored, after clone make a copy to .gitignore +# cp .gitignore-common .gitignore + +*.pyc +*.pyo + +#auto-ignore +.gitignore + +#logs +logs + +#pycharm +.idea + +#eclipse +.project +.pydevproject +.settings + +#local stuff files that end in ".local" or folders called "local" +local +osm_nbi/local +osm_nbi/test/local + +#local stuff files that end in ".temp" or folders called "temp" +*.temp +osm_nbi/temp +osm_nbi/test/temp + +#distribution and package generation +build +dist +*.egg-info + diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..0000ba9 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,12 @@ +# This Dockerfile is intented for devops and deb package generation +# +# Use Dockerfile.local for running osm/LCM in a docker container from source +# Use Dockerfile.fromdeb for running osm/LCM in a docker container from last stable package + + +FROM ubuntu:16.04 + +RUN apt-get update && apt-get -y install git make python python3 \ + libcurl4-gnutls-dev libgnutls-dev tox python-dev python3-dev \ + debhelper python-setuptools python-all python3-all apt-utils + diff --git a/Dockerfile.fromdeb b/Dockerfile.fromdeb new file mode 100644 index 0000000..25f6c66 --- /dev/null +++ b/Dockerfile.fromdeb @@ -0,0 +1,65 @@ +# This creates som/LCM docker from from last stable package + +FROM ubuntu:16.04 + +# Set the working directory to /app +WORKDIR /app/osm_lcm + +# avoid extra information from packages +RUN echo 'path-exclude /usr/share/doc/*\n\ +path-include /usr/share/doc/*/copyright\n\ +path-exclude /usr/share/man/*\n\ +path-exclude /usr/share/groff/*\n\ +path-exclude /usr/share/info/*\n\ +path-exclude /usr/share/lintian/*\n\ +path-exclude /usr/share/linda/*\n'\ +> /etc/dpkg/dpkg.cfg.d/01_nodoc && \ + echo 'APT::Install-Recommends "false";\n\ +APT::AutoRemove::RecommendsImportant "false";\n\ +APT::AutoRemove::SuggestsImportant "false";\n'\ +> /etc/apt/apt.conf.d/99_norecommends + + +RUN apt-get update && apt-get install -y curl software-properties-common \ + && add-apt-repository -y "deb http://osm-download.etsi.org/repository/osm/debian/ReleaseFOUR testing common LCM" \ + && curl "http://osm-download.etsi.org/repository/osm/debian/ReleaseFOUR/OSM%20ETSI%20Release%20Key.gpg" | apt-key add - \ + && apt-get update && apt-get install -y python3-osm-lcm python3-osm-common \ + && rm -rf /var/lib/apt/lists/* + +EXPOSE 9999 + +LABEL Maintainer="alfonso.tiernosepulveda@telefonica.com" \ + Description="This implements life cycle management engine for OSM" \ + Version="1.0" \ + Author="Alfonso Tierno" + +# Used for local storage +VOLUME /app/storage +# Used for logs +VOLUME /app/log + +# The following ENV can be added with "docker run -e xxx' to configure LCM +ENV OSMLCM_RO_HOST ro +ENV OSMLCM_RO_PORT 9090 +ENV OSMLCM_RO_TENANT osm + +# VCA +ENV OSMLCM_VCA_HOST vca +ENV OSMLCM_VCA_PORT: 17070 +ENV OSMLCM_VCA_USER: admin +ENV OSMLCM_VCA_SECRET: secret + +# database +ENV OSMLCM_DATABASE_DRIVER mongo +ENV OSMLCM_DATABASE_HOST mongo +ENV OSMLCM_DATABASE_PORT 27017 +ENV OSMLCM_STORAGE_DRIVER local +ENV OSMLCM_STORAGE_PATH /app/storage + +# message +ENV OSMLCM_MESSAGE_DRIVER kafka +ENV OSMLCM_MESSAGE_HOST kafka +ENV OSMLCM_MESSAGE_PORT 9092 + +# Run app.py when the container launches +CMD ["python3", "lcm.py"] diff --git a/Dockerfile.local b/Dockerfile.local new file mode 100644 index 0000000..c2a3b48 --- /dev/null +++ b/Dockerfile.local @@ -0,0 +1,66 @@ +FROM ubuntu:16.04 + +# Set the working directory to /app +WORKDIR /app/osm_lcm + +# Copy the current directory contents into the container at /app +ADD . /app + +RUN apt-get update && apt-get install -y git python3 \ + python3-pip python3-pymongo python3-yaml python3-aiohttp \ + python3-stdeb python3-setuptools python3-all python-all \ + python3-bitarray python3-regex python3-lxml debhelper dh-python tox wget \ + python3-cffi \ + && pip3 install pip==9.0.3 \ + && pip3 install -U aiokafka pyang lxml six enum34 + +RUN git clone https://osm.etsi.org/gerrit/osm/N2VC.git \ + && cd N2VC \ + && cd modules/libjuju && python3 setup.py develop && cd ../.. \ + && pip3 install -U -r requirements.txt \ + && python3 setup.py develop \ + && cd .. + +RUN git clone https://osm.etsi.org/gerrit/osm/common.git \ + && cd common && python3 setup.py develop && cd .. +# && pip3 install -U -r requirements.txt \ +# && cd .. + +RUN mkdir -p /app/storage/kafka && mkdir -p /app/log + + +LABEL Maintainer="alfonso.tiernosepulveda@telefonica.com" \ + Description="This implements a north bound interface for OSM" \ + Version="1.0" \ + Author="Alfonso Tierno" + +# Used for local storage +VOLUME /app/storage +# Used for logs +VOLUME /app/log + +# The following ENV can be added with "docker run -e xxx' to configure LCM +ENV OSMLCM_RO_HOST ro +ENV OSMLCM_RO_PORT 9090 +ENV OSMLCM_RO_TENANT osm + +# VCA +ENV OSMLCM_VCA_HOST vca +ENV OSMLCM_VCA_PORT: 17070 +ENV OSMLCM_VCA_USER: admin +ENV OSMLCM_VCA_SECRET: secret + +# database +ENV OSMLCM_DATABASE_DRIVER mongo +ENV OSMLCM_DATABASE_HOST mongo +ENV OSMLCM_DATABASE_PORT 27017 +ENV OSMLCM_STORAGE_DRIVER local +ENV OSMLCM_STORAGE_PATH /app/storage + +# message +ENV OSMLCM_MESSAGE_DRIVER kafka +ENV OSMLCM_MESSAGE_HOST kafka +ENV OSMLCM_MESSAGE_PORT 9092 + +# Run app.py when the container launches +CMD ["python3", "lcm.py"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..8dada3e --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..b439211 --- /dev/null +++ b/Makefile @@ -0,0 +1,14 @@ + +clean: + rm -rf dist deb_dist .build osm_lcm-*.tar.gz osm_lcm.egg-info eggs + +package: + python3 setup.py --command-packages=stdeb.command sdist_dsc + cp python3-osm-lcm.postinst deb_dist/osm-lcm*/debian + cd deb_dist/osm-lcm*/debian && echo "osm-common python3-osm-common" > py3dist-overrides + # cd deb_dist/osm-lcm*/debian && echo "pip3 python3-pip" >> py3dist-overrides + cd deb_dist/osm-lcm*/ && dpkg-buildpackage -rfakeroot -uc -us + mkdir -p .build + cp deb_dist/python3-osm-lcm*.deb .build/ + + diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..466163f --- /dev/null +++ b/README.rst @@ -0,0 +1,6 @@ +=========== +osm-lcm +=========== + +osm-lcm is the Lightweight Build Life Cycle Management for OSM. It interact with RO module for resource orchestration and N2VC for resoruce orchestration + diff --git a/devops-stages/stage-archive.sh b/devops-stages/stage-archive.sh new file mode 100755 index 0000000..5e8e072 --- /dev/null +++ b/devops-stages/stage-archive.sh @@ -0,0 +1,10 @@ +#!/bin/sh +MDG=LCM +rm -rf pool +rm -rf dists +mkdir -p pool/$MDG +mv deb_dist/*.deb pool/$MDG/ +mkdir -p dists/unstable/$MDG/binary-amd64/ +apt-ftparchive packages pool/$MDG > dists/unstable/$MDG/binary-amd64/Packages +gzip -9fk dists/unstable/$MDG/binary-amd64/Packages +echo "dists/**,pool/$MDG/*.deb" diff --git a/devops-stages/stage-build.sh b/devops-stages/stage-build.sh new file mode 100755 index 0000000..58af160 --- /dev/null +++ b/devops-stages/stage-build.sh @@ -0,0 +1,4 @@ +#!/bin/sh +rm -rf deb_dist +tox -e build +#make clean package diff --git a/devops-stages/stage-test.sh b/devops-stages/stage-test.sh new file mode 100755 index 0000000..0333d84 --- /dev/null +++ b/devops-stages/stage-test.sh @@ -0,0 +1,2 @@ +#!/bin/sh +#tox diff --git a/osm_lcm/ROclient.py b/osm_lcm/ROclient.py new file mode 100644 index 0000000..b6983d3 --- /dev/null +++ b/osm_lcm/ROclient.py @@ -0,0 +1,1000 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +""" +asyncio RO python client to interact with RO-server +""" + +import asyncio +import aiohttp + +import json +import yaml +import logging +import sys +from urllib.parse import quote +from uuid import UUID +from copy import deepcopy + +__author__ = "Alfonso Tierno, Pablo Montes" +__date__ = "$09-Jan-2018 09:09:48$" +__version__ = "0.1.0-r470" +version_date = "Jan 2018" +requests = None + +class ROClientException(Exception): + def __init__(self, message, http_code=400): + self.http_code = http_code + Exception.__init__(self, message) + """Common Exception for all openmano client exceptions""" + + +def remove_envelop(item, indata=None): + """ + Obtain the useful data removing the envelop. It goes through the vnfd or nsd catalog and returns the + vnfd or nsd content + :param item: can be 'tenant', 'vim', 'vnfd', 'nsd', 'ns' + :param indata: Content to be inspected + :return: the useful part of indata (a reference, not a new dictionay) + """ + clean_indata = indata + if not indata: + return {} + if item == "vnfd": + if clean_indata.get('vnfd:vnfd-catalog'): + clean_indata = clean_indata['vnfd:vnfd-catalog'] + elif clean_indata.get('vnfd-catalog'): + clean_indata = clean_indata['vnfd-catalog'] + if clean_indata.get('vnfd'): + if not isinstance(clean_indata['vnfd'], list) or len(clean_indata['vnfd']) != 1: + raise ROClientException("'vnfd' must be a list only one element") + clean_indata = clean_indata['vnfd'][0] + elif item == "nsd": + if clean_indata.get('nsd:nsd-catalog'): + clean_indata = clean_indata['nsd:nsd-catalog'] + elif clean_indata.get('nsd-catalog'): + clean_indata = clean_indata['nsd-catalog'] + if clean_indata.get('nsd'): + if not isinstance(clean_indata['nsd'], list) or len(clean_indata['nsd']) != 1: + raise ROClientException("'nsd' must be a list only one element") + clean_indata = clean_indata['nsd'][0] + elif item == "sdn": + if len(indata) == 1 and "sdn_controller" in indata: + clean_indata = indata["sdn_controller"] + elif item == "tenant": + if len(indata) == 1 and "tenant" in indata: + clean_indata = indata["tenant"] + elif item in ("vim", "vim_account", "datacenters"): + if len(indata) == 1 and "datacenter" in indata: + clean_indata = indata["datacenter"] + elif item == "ns" or item == "instances": + if len(indata) == 1 and "instance" in indata: + clean_indata = indata["instance"] + else: + assert False, "remove_envelop with unknown item {}".format(item) + + return clean_indata + + +class ROClient: + headers_req = {'Accept': 'application/yaml', 'content-type': 'application/yaml'} + client_to_RO = {'tenant': 'tenants', 'vim': 'datacenters', 'vim_account': 'datacenters', 'sdn': 'sdn_controllers', + 'vnfd': 'vnfs', 'nsd': 'scenarios', + 'ns': 'instances'} + mandatory_for_create = { + 'tenant': ("name", ), + 'vnfd': ("name", "id", "connection-point", "vdu"), + 'nsd': ("name", "id", "constituent-vnfd"), + 'ns': ("name", "scenario", "datacenter"), + 'vim': ("name", "vim_url"), + 'vim_account': (), + 'sdn': ("name", "port", 'ip', 'dpid', 'type'), + } + timeout_large = 120 + timeout_short = 30 + + def __init__(self, loop, endpoint_url, **kwargs): + self.loop = loop + self.endpoint_url = endpoint_url + + self.username = kwargs.get("username") + self.password = kwargs.get("password") + self.tenant_id_name = kwargs.get("tenant") + self.tenant = None + self.datacenter_id_name = kwargs.get("datacenter") + self.datacenter = None + logger_name = kwargs.get('logger_name', 'ROClient') + self.logger = logging.getLogger(logger_name) + if kwargs.get("loglevel"): + self.logger.setLevel(kwargs["loglevel"]) + global requests + requests = kwargs.get("TODO remove") + + def __getitem__(self, index): + if index == 'tenant': + return self.tenant_id_name + elif index == 'datacenter': + return self.datacenter_id_name + elif index == 'username': + return self.username + elif index == 'password': + return self.password + elif index == 'endpoint_url': + return self.endpoint_url + else: + raise KeyError("Invalid key '%s'" %str(index)) + + def __setitem__(self,index, value): + if index == 'tenant': + self.tenant_id_name = value + elif index == 'datacenter' or index == 'vim': + self.datacenter_id_name = value + elif index == 'username': + self.username = value + elif index == 'password': + self.password = value + elif index == 'endpoint_url': + self.endpoint_url = value + else: + raise KeyError("Invalid key '{}'".format(index)) + self.tenant = None # force to reload tenant with different credentials + self.datacenter = None # force to reload datacenter with different credentials + + def _parse(self, descriptor, descriptor_format, response=False): + #try yaml + if descriptor_format and descriptor_format != "json" and descriptor_format != "yaml": + raise ROClientException("'descriptor_format' must be a 'json' or 'yaml' text") + if descriptor_format != "json": + try: + return yaml.load(descriptor) + except yaml.YAMLError as exc: + error_pos = "" + if hasattr(exc, 'problem_mark'): + mark = exc.problem_mark + error_pos = " at line:{} column:{}s".format(mark.line+1, mark.column+1) + error_text = "yaml format error" + error_pos + elif descriptor_format != "yaml": + try: + return json.loads(descriptor) + except Exception as e: + if response: + error_text = "json format error" + str(e) + + if response: + raise ROClientException(error_text) + raise ROClientException(error_text) + + def _parse_yaml(self, descriptor, response=False): + try: + return yaml.load(descriptor) + except yaml.YAMLError as exc: + error_pos = "" + if hasattr(exc, 'problem_mark'): + mark = exc.problem_mark + error_pos = " at line:{} column:{}s".format(mark.line+1, mark.column+1) + error_text = "yaml format error" + error_pos + if response: + raise ROClientException(error_text) + raise ROClientException(error_text) + + @staticmethod + def check_if_uuid(uuid_text): + """ + Check if text correspond to an uuid foramt + :param uuid_text: + :return: True if it is an uuid False if not + """ + try: + UUID(uuid_text) + return True + except (ValueError, TypeError): + return False + + @staticmethod + def _create_envelop(item, indata=None): + """ + Returns a new dict that incledes indata with the expected envelop + :param item: can be 'tenant', 'vim', 'vnfd', 'nsd', 'ns' + :param indata: Content to be enveloped + :return: a new dic with {: {indata} } where envelop can be e.g. tenant, datacenter, ... + """ + if item == "vnfd": + return {'vnfd-catalog': {'vnfd': [indata]}} + elif item == "nsd": + return {'nsd-catalog': {'nsd': [indata]}} + elif item == "tenant": + return {'tenant': indata} + elif item in ("vim", "vim_account", "datacenter"): + return {'datacenter': indata} + elif item == "ns" or item == "instances": + return {'instance': indata} + elif item == "sdn": + return {'sdn_controller': indata} + else: + assert False, "_create_envelop with unknown item {}".format(item) + + @staticmethod + def update_descriptor(desc, kwargs): + desc = deepcopy(desc) # do not modify original descriptor + try: + for k, v in kwargs.items(): + update_content = desc + kitem_old = None + klist = k.split(".") + for kitem in klist: + if kitem_old is not None: + update_content = update_content[kitem_old] + if isinstance(update_content, dict): + kitem_old = kitem + elif isinstance(update_content, list): + kitem_old = int(kitem) + else: + raise ROClientException( + "Invalid query string '{}'. Descriptor is not a list nor dict at '{}'".format(k, kitem)) + if v == "__DELETE__": + del update_content[kitem_old] + else: + update_content[kitem_old] = v + return desc + except KeyError: + raise ROClientException( + "Invalid query string '{}'. Descriptor does not contain '{}'".format(k, kitem_old)) + except ValueError: + raise ROClientException("Invalid query string '{}'. Expected integer index list instead of '{}'".format( + k, kitem)) + except IndexError: + raise ROClientException( + "Invalid query string '{}'. Index '{}' out of range".format(k, kitem_old)) + + @staticmethod + def check_ns_status(ns_descriptor): + """ + Inspect RO instance descriptor and indicates the status + :param ns_descriptor: instance descriptor obtained with self.show("ns", ) + :return: status, message: status can be BUILD,ACTIVE,ERROR, message is a text message + """ + net_total = 0 + vm_total = 0 + net_done = 0 + vm_done = 0 + + for net in ns_descriptor["nets"]: + net_total += 1 + if net["status"] in ("ERROR", "VIM_ERROR"): + return "ERROR", net["error_msg"] + elif net["status"] == "ACTIVE": + net_done += 1 + for vnf in ns_descriptor["vnfs"]: + for vm in vnf["vms"]: + vm_total += 1 + if vm["status"] in ("ERROR", "VIM_ERROR"): + return "ERROR", vm["error_msg"] + elif vm["status"] == "ACTIVE": + vm_done += 1 + + if net_total == net_done and vm_total == vm_done: + return "ACTIVE", "VMs {}, networks: {}".format(vm_total, net_total) + else: + return "BUILD", "VMs: {}/{}, networks: {}/{}".format(vm_done, vm_total, net_done, net_total) + + @staticmethod + def get_ns_vnf_info(ns_descriptor): + """ + Get a dict with the VIM_id, ip_addresses, mac_addresses of every vnf and vdu + :param ns_descriptor: instance descriptor obtained with self.show("ns", ) + :return: dict with {: {ip_address: XXXX, vdur:{ip_address: XXX, vim_id: XXXX}}} + """ + ns_info = {} + for vnf in ns_descriptor["vnfs"]: + if not vnf.get("ip_address"): + raise ROClientException("No ip_address returned for ns member_vnf_index '{}'".format( + vnf["member_vnf_index"]), http_code=500) + vnfr_info = { + "ip_address": vnf.get("ip_address"), + "vdur": {} + } + for vm in vnf["vms"]: + vdur = { + "vim_id": vm.get("vim_vm_id"), + "ip_address": vm.get("ip_address") + } + vnfr_info["vdur"][vm["vdu_osm_id"]] = vdur + ns_info[str(vnf["member_vnf_index"])] = vnfr_info + return ns_info + + + async def _get_item_uuid(self, session, item, item_id_name, all_tenants=False): + if all_tenants: + tenant_text = "/any" + elif all_tenants is None: + tenant_text = "" + else: + if not self.tenant: + await self._get_tenant(session) + tenant_text = "/" + self.tenant + + item_id = 0 + url = "{}{}/{}".format(self.endpoint_url, tenant_text, item) + if self.check_if_uuid(item_id_name): + item_id = item_id_name + url += "/" + item_id_name + elif item_id_name and item_id_name.startswith("'") and item_id_name.endswith("'"): + item_id_name = item_id_name[1:-1] + self.logger.debug("openmano GET %s", url) + with aiohttp.Timeout(self.timeout_short): + async with session.get(url, headers=self.headers_req) as response: + response_text = await response.read() + self.logger.debug("GET {} [{}] {}".format(url, response.status, response_text[:100])) + if response.status == 404: # NOT_FOUND + raise ROClientException("No {} found with id '{}'".format(item[:-1], item_id_name), + http_code=404) + if response.status >= 300: + raise ROClientException(response_text, http_code=response.status) + content = self._parse_yaml(response_text, response=True) + + if item_id: + return item_id + desc = content[item] + assert isinstance(desc, list), "_get_item_uuid get a non dict with a list inside {}".format(type(desc)) + uuid = None + for i in desc: + if item_id_name and i["name"] != item_id_name: + continue + if uuid: # found more than one + raise ROClientException( + "Found more than one {} with name '{}'. uuid must be used".format(item, item_id_name), + http_code=404) + uuid = i["uuid"] + if not uuid: + raise ROClientException("No {} found with name '{}'".format(item[:-1], item_id_name), http_code=404) + return uuid + + async def _get_item(self, session, item, item_id_name, all_tenants=False): + if all_tenants: + tenant_text = "/any" + elif all_tenants is None: + tenant_text = "" + else: + if not self.tenant: + await self._get_tenant(session) + tenant_text = "/" + self.tenant + + if self.check_if_uuid(item_id_name): + uuid = item_id_name + else: + # check that exist + uuid = await self._get_item_uuid(session, item, item_id_name, all_tenants) + + url = "{}{}/{}/{}".format(self.endpoint_url, tenant_text, item, uuid) + self.logger.debug("GET %s", url ) + with aiohttp.Timeout(self.timeout_short): + async with session.get(url, headers=self.headers_req) as response: + response_text = await response.read() + self.logger.debug("GET {} [{}] {}".format(url, response.status, response_text[:100])) + if response.status >= 300: + raise ROClientException(response_text, http_code=response.status) + + return self._parse_yaml(response_text, response=True) + + async def _get_tenant(self, session): + if not self.tenant: + self.tenant = await self._get_item_uuid(session, "tenants", self.tenant_id_name, None) + return self.tenant + + async def _get_datacenter(self, session): + if not self.tenant: + await self._get_tenant(session) + if not self.datacenter: + self.datacenter = await self._get_item_uuid(session, "datacenters", self.datacenter_id_name, True) + return self.datacenter + + async def _create_item(self, session, item, descriptor, item_id_name=None, action=None, all_tenants=False): + if all_tenants: + tenant_text = "/any" + elif all_tenants is None: + tenant_text = "" + else: + if not self.tenant: + await self._get_tenant(session) + tenant_text = "/" + self.tenant + payload_req = yaml.safe_dump(descriptor) + #print payload_req + + api_version_text = "" + if item == "vnfs": + # assumes version v3 only + api_version_text = "/v3" + item = "vnfd" + elif item == "scenarios": + # assumes version v3 only + api_version_text = "/v3" + item = "nsd" + + if not item_id_name: + uuid="" + elif self.check_if_uuid(item_id_name): + uuid = "/{}".format(item_id_name) + else: + # check that exist + uuid = await self._get_item_uuid(session, item, item_id_name, all_tenants) + uuid = "/{}".format(uuid) + if not action: + action = "" + else: + action = "/".format(action) + + url = "{}{apiver}{tenant}/{item}{id}{action}".format(self.endpoint_url, apiver=api_version_text, tenant=tenant_text, + item=item, id=uuid, action=action) + self.logger.debug("openmano POST %s %s", url, payload_req) + with aiohttp.Timeout(self.timeout_large): + async with session.post(url, headers=self.headers_req, data=payload_req) as response: + response_text = await response.read() + self.logger.debug("POST {} [{}] {}".format(url, response.status, response_text[:100])) + if response.status >= 300: + raise ROClientException(response_text, http_code=response.status) + + return self._parse_yaml(response_text, response=True) + + async def _del_item(self, session, item, item_id_name, all_tenants=False): + if all_tenants: + tenant_text = "/any" + elif all_tenants is None: + tenant_text = "" + else: + if not self.tenant: + await self._get_tenant(session) + tenant_text = "/" + self.tenant + if not self.check_if_uuid(item_id_name): + # check that exist + _all_tenants = all_tenants + if item == "datacenters": + _all_tenants = True + uuid = await self._get_item_uuid(session, item, item_id_name, all_tenants=_all_tenants) + else: + uuid = item_id_name + + url = "{}{}/{}/{}".format(self.endpoint_url, tenant_text, item, uuid) + self.logger.debug("DELETE %s", url) + with aiohttp.Timeout(self.timeout_short): + async with session.delete(url, headers=self.headers_req) as response: + response_text = await response.read() + self.logger.debug("DELETE {} [{}] {}".format(url, response.status, response_text[:100])) + if response.status >= 300: + raise ROClientException(response_text, http_code=response.status) + return self._parse_yaml(response_text, response=True) + + async def _list_item(self, session, item, all_tenants=False, filter_dict=None): + if all_tenants: + tenant_text = "/any" + elif all_tenants is None: + tenant_text = "" + else: + if not self.tenant: + await self._get_tenant(session) + tenant_text = "/" + self.tenant + + url = "{}{}/{}".format(self.endpoint_url, tenant_text, item) + separator = "?" + if filter_dict: + for k in filter_dict: + url += separator + quote(str(k)) + "=" + quote(str(filter_dict[k])) + separator = "&" + self.logger.debug("openmano GET %s", url) + with aiohttp.Timeout(self.timeout_short): + async with session.get(url, headers=self.headers_req) as response: + response_text = await response.read() + self.logger.debug("GET {} [{}] {}".format(url, response.status, response_text[:100])) + if response.status >= 300: + raise ROClientException(response_text, http_code=response.status) + return self._parse_yaml(response_text, response=True) + + async def _edit_item(self, session, item, item_id, descriptor, all_tenants=False): + if all_tenants: + tenant_text = "/any" + elif all_tenants is None: + tenant_text = "" + else: + if not self.tenant: + await self._get_tenant(session) + tenant_text = "/" + self.tenant + + payload_req = yaml.safe_dump(descriptor) + + #print payload_req + + url = "{}{}/{}/{}".format(self.endpoint_url, tenant_text, item, item_id) + self.logger.debug("openmano PUT %s %s", url, payload_req) + with aiohttp.Timeout(self.timeout_large): + async with session.put(url, headers=self.headers_req, data=payload_req) as response: + response_text = await response.read() + self.logger.debug("PUT {} [{}] {}".format(url, response.status, response_text[:100])) + if response.status >= 300: + raise ROClientException(response_text, http_code=response.status) + return self._parse_yaml(response_text, response=True) + + async def get_list(self, item, all_tenants=False, filter_by=None): + """ + Obtain a list of items filtering by the specigy filter_by. + :param item: can be 'tenant', 'vim', 'vnfd', 'nsd', 'ns' + :param all_tenants: True if not filtering by tenant. Only allowed for admin + :param filter_by: dictionary with filtering + :return: a list of dict. It can be empty. Raises ROClientException on Error, + """ + try: + if item not in self.client_to_RO: + raise ROClientException("Invalid item {}".format(item)) + if item == 'tenant': + all_tenants = None + with aiohttp.ClientSession(loop=self.loop) as session: + content = await self._list_item(session, self.client_to_RO[item], all_tenants=all_tenants, + filter_dict=filter_by) + if isinstance(content, dict): + if len(content) == 1: + for _, v in content.items(): + return v + return content.values()[0] + else: + raise ROClientException("Output not a list neither dict with len equal 1", http_code=500) + return content + except aiohttp.errors.ClientOSError as e: + raise ROClientException(e, http_code=504) + + async def show(self, item, item_id_name=None, all_tenants=False): + """ + Obtain the information of an item from its id or name + :param item: can be 'tenant', 'vim', 'vnfd', 'nsd', 'ns' + :param item_id_name: RO id or name of the item. Raise and exception if more than one found + :param all_tenants: True if not filtering by tenant. Only allowed for admin + :return: dictionary with the information or raises ROClientException on Error, NotFound, found several + """ + try: + if item not in self.client_to_RO: + raise ROClientException("Invalid item {}".format(item)) + if item == 'tenant': + all_tenants = None + elif item == 'vim': + all_tenants = True + elif item == 'vim_account': + all_tenants = False + + with aiohttp.ClientSession(loop=self.loop) as session: + content = await self._get_item(session, self.client_to_RO[item], item_id_name, all_tenants=all_tenants) + return remove_envelop(item, content) + except aiohttp.errors.ClientOSError as e: + raise ROClientException(e, http_code=504) + + async def delete(self, item, item_id_name=None, all_tenants=False): + """ + Delete the information of an item from its id or name + :param item: can be 'tenant', 'vim', 'vnfd', 'nsd', 'ns' + :param item_id_name: RO id or name of the item. Raise and exception if more than one found + :param all_tenants: True if not filtering by tenant. Only allowed for admin + :return: dictionary with the information or raises ROClientException on Error, NotFound, found several + """ + try: + if item not in self.client_to_RO: + raise ROClientException("Invalid item {}".format(item)) + if item == 'tenant' or item == 'vim': + all_tenants = None + + with aiohttp.ClientSession(loop=self.loop) as session: + return await self._del_item(session, self.client_to_RO[item], item_id_name, all_tenants=all_tenants) + except aiohttp.errors.ClientOSError as e: + raise ROClientException(e, http_code=504) + + async def edit(self, item, item_id_name, descriptor=None, descriptor_format=None, **kwargs): + """ Edit an item + :param item: can be 'tenant', 'vim', 'vnfd', 'nsd', 'ns', 'vim' + :param descriptor: can be a dict, or a yaml/json text. Autodetect unless descriptor_format is provided + :param descriptor_format: Can be 'json' or 'yaml' + :param kwargs: Overrides descriptor with values as name, description, vim_url, vim_url_admin, vim_type + keys can be a dot separated list to specify elements inside dict + :return: dictionary with the information or raises ROClientException on Error + """ + try: + if isinstance(descriptor, str): + descriptor = self._parse(descriptor, descriptor_format) + elif descriptor: + pass + else: + descriptor = {} + + if item not in self.client_to_RO: + raise ROClientException("Invalid item {}".format(item)) + desc = remove_envelop(item, descriptor) + + # Override descriptor with kwargs + if kwargs: + desc = self.update_descriptor(desc, kwargs) + all_tenants = False + if item in ('tenant', 'vim'): + all_tenants = None + + create_desc = self._create_envelop(item, desc) + + with aiohttp.ClientSession(loop=self.loop) as session: + _all_tenants = all_tenants + if item == 'vim': + _all_tenants = True + item_id = await self._get_item_uuid(session, self.client_to_RO[item], item_id_name, all_tenants=_all_tenants) + # await self._get_tenant(session) + outdata = await self._edit_item(session, self.client_to_RO[item], item_id, create_desc, all_tenants=all_tenants) + return remove_envelop(item, outdata) + except aiohttp.errors.ClientOSError as e: + raise ROClientException(e, http_code=504) + + async def create(self, item, descriptor=None, descriptor_format=None, **kwargs): + """ + Creates an item from its descriptor + :param item: can be 'tenant', 'vnfd', 'nsd', 'ns', 'vim', 'vim_account', 'sdn' + :param descriptor: can be a dict, or a yaml/json text. Autodetect unless descriptor_format is provided + :param descriptor_format: Can be 'json' or 'yaml' + :param kwargs: Overrides descriptor with values as name, description, vim_url, vim_url_admin, vim_type + keys can be a dot separated list to specify elements inside dict + :return: dictionary with the information or raises ROClientException on Error + """ + try: + if isinstance(descriptor, str): + descriptor = self._parse(descriptor, descriptor_format) + elif descriptor: + pass + else: + descriptor = {} + + if item not in self.client_to_RO: + raise ROClientException("Invalid item {}".format(item)) + desc = remove_envelop(item, descriptor) + + # Override descriptor with kwargs + if kwargs: + desc = self.update_descriptor(desc, kwargs) + + for mandatory in self.mandatory_for_create[item]: + if mandatory not in desc: + raise ROClientException("'{}' is mandatory parameter for {}".format(mandatory, item)) + + all_tenants = False + if item in ('tenant', 'vim'): + all_tenants = None + + create_desc = self._create_envelop(item, desc) + + with aiohttp.ClientSession(loop=self.loop) as session: + outdata = await self._create_item(session, self.client_to_RO[item], create_desc, + all_tenants=all_tenants) + return remove_envelop(item, outdata) + except aiohttp.errors.ClientOSError as e: + raise ROClientException(e, http_code=504) + + async def attach_datacenter(self, datacenter=None, descriptor=None, descriptor_format=None, **kwargs): + + if isinstance(descriptor, str): + descriptor = self._parse(descriptor, descriptor_format) + elif descriptor: + pass + else: + descriptor = {} + desc = remove_envelop("vim", descriptor) + + # # check that exist + # uuid = self._get_item_uuid(session, "datacenters", uuid_name, all_tenants=True) + # tenant_text = "/" + self._get_tenant() + if kwargs: + desc = self.update_descriptor(desc, kwargs) + + if not desc.get("vim_tenant_name") and not desc.get("vim_tenant_id"): + raise ROClientException("Wrong descriptor. At least vim_tenant_name or vim_tenant_id must be provided") + create_desc = self._create_envelop("vim", desc) + payload_req = yaml.safe_dump(create_desc) + with aiohttp.ClientSession(loop=self.loop) as session: + # check that exist + item_id = await self._get_item_uuid(session, "datacenters", datacenter, all_tenants=True) + await self._get_tenant(session) + + url = "{}/{tenant}/datacenters/{datacenter}".format(self.endpoint_url, tenant=self.tenant, + datacenter=item_id) + self.logger.debug("openmano POST %s %s", url, payload_req) + with aiohttp.Timeout(self.timeout_large): + async with session.post(url, headers=self.headers_req, data=payload_req) as response: + response_text = await response.read() + self.logger.debug("POST {} [{}] {}".format(url, response.status, response_text[:100])) + if response.status >= 300: + raise ROClientException(response_text, http_code=response.status) + + response_desc = self._parse_yaml(response_text, response=True) + desc = remove_envelop("vim", response_desc) + return desc + + async def detach_datacenter(self, datacenter=None): + #TODO replace the code with delete_item(vim_account,...) + with aiohttp.ClientSession(loop=self.loop) as session: + # check that exist + item_id = await self._get_item_uuid(session, "datacenters", datacenter, all_tenants=False) + tenant = await self._get_tenant(session) + + url = "{}/{tenant}/datacenters/{datacenter}".format(self.endpoint_url, tenant=tenant, + datacenter=item_id) + self.logger.debug("openmano DELETE %s", url) + with aiohttp.Timeout(self.timeout_large): + async with session.delete(url, headers=self.headers_req) as response: + response_text = await response.read() + self.logger.debug("DELETE {} [{}] {}".format(url, response.status, response_text[:100])) + if response.status >= 300: + raise ROClientException(response_text, http_code=response.status) + + response_desc = self._parse_yaml(response_text, response=True) + desc = remove_envelop("vim", response_desc) + return desc + + + # TODO convert to asyncio + + #DATACENTERS + + def edit_datacenter(self, uuid=None, name=None, descriptor=None, descriptor_format=None, all_tenants=False, **kwargs): + """Edit the parameters of a datacenter + Params: must supply a descriptor or/and a parameter to change + uuid or/and name. If only name is supplied, there must be only one or an exception is raised + descriptor: with format {'datacenter':{params to change info}} + must be a dictionary or a json/yaml text. + parameters to change can be supplyied by the descriptor or as parameters: + new_name: the datacenter name + vim_url: the datacenter URL + vim_url_admin: the datacenter URL for administrative issues + vim_type: the datacenter type, can be openstack or openvim. + public: boolean, available to other tenants + description: datacenter description + Return: Raises an exception on error, not found or found several + Obtain a dictionary with format {'datacenter':{new_datacenter_info}} + """ + + if isinstance(descriptor, str): + descriptor = self.parse(descriptor, descriptor_format) + elif descriptor: + pass + elif kwargs: + descriptor={"datacenter": {}} + else: + raise ROClientException("Missing descriptor") + + if 'datacenter' not in descriptor or len(descriptor)!=1: + raise ROClientException("Descriptor must contain only one 'datacenter' field") + for param in kwargs: + if param=='new_name': + descriptor['datacenter']['name'] = kwargs[param] + else: + descriptor['datacenter'][param] = kwargs[param] + return self._edit_item("datacenters", descriptor, uuid, name, all_tenants=None) + + + def edit_scenario(self, uuid=None, name=None, descriptor=None, descriptor_format=None, all_tenants=False, **kwargs): + """Edit the parameters of a scenario + Params: must supply a descriptor or/and a parameters to change + uuid or/and name. If only name is supplied, there must be only one or an exception is raised + descriptor: with format {'scenario':{params to change info}} + must be a dictionary or a json/yaml text. + parameters to change can be supplyied by the descriptor or as parameters: + new_name: the scenario name + public: boolean, available to other tenants + description: scenario description + tenant_id. Propietary tenant + Return: Raises an exception on error, not found or found several + Obtain a dictionary with format {'scenario':{new_scenario_info}} + """ + + if isinstance(descriptor, str): + descriptor = self.parse(descriptor, descriptor_format) + elif descriptor: + pass + elif kwargs: + descriptor={"scenario": {}} + else: + raise ROClientException("Missing descriptor") + + if 'scenario' not in descriptor or len(descriptor)>2: + raise ROClientException("Descriptor must contain only one 'scenario' field") + for param in kwargs: + if param=='new_name': + descriptor['scenario']['name'] = kwargs[param] + else: + descriptor['scenario'][param] = kwargs[param] + return self._edit_item("scenarios", descriptor, uuid, name, all_tenants=None) + + #VIM ACTIONS + def vim_action(self, action, item, uuid=None, all_tenants=False, **kwargs): + """Perform an action over a vim + Params: + action: can be 'list', 'get'/'show', 'delete' or 'create' + item: can be 'tenants' or 'networks' + uuid: uuid of the tenant/net to show or to delete. Ignore otherwise + other parameters: + datacenter_name, datacenter_id: datacenters to act on, if missing uses classes store datacenter + descriptor, descriptor_format: descriptor needed on creation, can be a dict or a yaml/json str + must be a dictionary or a json/yaml text. + name: for created tenant/net Overwrite descriptor name if any + description: tenant descriptor. Overwrite descriptor description if any + + Return: Raises an exception on error + Obtain a dictionary with format {'tenant':{new_tenant_info}} + """ + if item not in ("tenants", "networks", "images"): + raise ROClientException("Unknown value for item '{}', must be 'tenants', 'nets' or " + "images".format(str(item))) + + image_actions = ['list','get','show','delete'] + if item == "images" and action not in image_actions: + raise ROClientException("Only available actions for item '{}' are {}\n" + "Requested action was '{}'".format(item, ', '.join(image_actions), action)) + if all_tenants: + tenant_text = "/any" + else: + tenant_text = "/"+self._get_tenant() + + if "datacenter_id" in kwargs or "datacenter_name" in kwargs: + datacenter = self._get_item_uuid(session, "datacenters", kwargs.get("datacenter"), all_tenants=all_tenants) + else: + datacenter = self.get_datacenter(session) + + if action=="list": + url = "{}{}/vim/{}/{}".format(self.endpoint_url, tenant_text, datacenter, item) + self.logger.debug("GET %s", url ) + mano_response = requests.get(url, headers=self.headers_req) + self.logger.debug("openmano response: %s", mano_response.text ) + content = self._parse_yaml(mano_response.text, response=True) + if mano_response.status_code==200: + return content + else: + raise ROClientException(str(content), http_code=mano_response.status) + elif action=="get" or action=="show": + url = "{}{}/vim/{}/{}/{}".format(self.endpoint_url, tenant_text, datacenter, item, uuid) + self.logger.debug("GET %s", url ) + mano_response = requests.get(url, headers=self.headers_req) + self.logger.debug("openmano response: %s", mano_response.text ) + content = self._parse_yaml(mano_response.text, response=True) + if mano_response.status_code==200: + return content + else: + raise ROClientException(str(content), http_code=mano_response.status) + elif action=="delete": + url = "{}{}/vim/{}/{}/{}".format(self.endpoint_url, tenant_text, datacenter, item, uuid) + self.logger.debug("DELETE %s", url ) + mano_response = requests.delete(url, headers=self.headers_req) + self.logger.debug("openmano response: %s", mano_response.text ) + content = self._parse_yaml(mano_response.text, response=True) + if mano_response.status_code==200: + return content + else: + raise ROClientException(str(content), http_code=mano_response.status) + elif action=="create": + if "descriptor" in kwargs: + if isinstance(kwargs["descriptor"], str): + descriptor = self._parse(kwargs["descriptor"], kwargs.get("descriptor_format") ) + else: + descriptor = kwargs["descriptor"] + elif "name" in kwargs: + descriptor={item[:-1]: {"name": kwargs["name"]}} + else: + raise ROClientException("Missing descriptor") + + if item[:-1] not in descriptor or len(descriptor)!=1: + raise ROClientException("Descriptor must contain only one 'tenant' field") + if "name" in kwargs: + descriptor[ item[:-1] ]['name'] = kwargs["name"] + if "description" in kwargs: + descriptor[ item[:-1] ]['description'] = kwargs["description"] + payload_req = yaml.safe_dump(descriptor) + #print payload_req + url = "{}{}/vim/{}/{}".format(self.endpoint_url, tenant_text, datacenter, item) + self.logger.debug("openmano POST %s %s", url, payload_req) + mano_response = requests.post(url, headers = self.headers_req, data=payload_req) + self.logger.debug("openmano response: %s", mano_response.text ) + content = self._parse_yaml(mano_response.text, response=True) + if mano_response.status_code==200: + return content + else: + raise ROClientException(str(content), http_code=mano_response.status) + else: + raise ROClientException("Unknown value for action '{}".format(str(action))) + + +if __name__ == '__main__': + RO_URL = "http://localhost:9090/openmano" + TEST_TENANT = "myTenant" + TEST_VIM1 = "myvim" + TEST_URL1 = "https://localhost:5000/v1" + TEST_TYPE1 = "openstack" + TEST_CONFIG1 = {"use_floating_ip": True} + TEST_VIM2 = "myvim2" + TEST_URL2 = "https://localhost:5000/v2" + TEST_TYPE2 = "openvim" + TEST_CONFIG2 = {"config2": "config2", "config3": True} + + streamformat = "%(asctime)s %(name)s %(levelname)s: %(message)s" + logging.basicConfig(format=streamformat) + logger = logging.getLogger("ROClient") + + tenant_id = None + vim_id = False + loop = asyncio.get_event_loop() + myClient = ROClient(endpoint_url=RO_URL, loop=loop, loglevel="DEBUG") + try: + # test tenant + content = loop.run_until_complete(myClient.get_list("tenant")) + print("tenants", content) + content = loop.run_until_complete(myClient.create("tenant", name=TEST_TENANT)) + tenant_id = True + content = loop.run_until_complete(myClient.show("tenant", TEST_TENANT)) + print("tenant", TEST_TENANT, content) + content = loop.run_until_complete(myClient.edit("tenant", TEST_TENANT, description="another description")) + content = loop.run_until_complete(myClient.show("tenant", TEST_TENANT)) + print("tenant edited", TEST_TENANT, content) + myClient["tenant"] = TEST_TENANT + + + # test VIM + content = loop.run_until_complete(myClient.create("vim", name=TEST_VIM1, type=TEST_TYPE1, vim_url=TEST_URL1, config=TEST_CONFIG1)) + vim_id = True + content = loop.run_until_complete(myClient.get_list("vim")) + print("vim", content) + content = loop.run_until_complete(myClient.show("vim", TEST_VIM1)) + print("vim", TEST_VIM1, content) + content = loop.run_until_complete(myClient.edit("vim", TEST_VIM1, description="another description", + name=TEST_VIM2, type=TEST_TYPE2, vim_url=TEST_URL2, + config=TEST_CONFIG2)) + content = loop.run_until_complete(myClient.show("vim", TEST_VIM2)) + print("vim edited", TEST_VIM2, content) + + # test VIM_ACCOUNT + content = loop.run_until_complete(myClient.attach_datacenter(TEST_VIM2, vim_username='user', + vim_password='pass', vim_tenant_name='vimtenant1', config=TEST_CONFIG1)) + vim_id = True + content = loop.run_until_complete(myClient.get_list("vim_account")) + print("vim_account", content) + content = loop.run_until_complete(myClient.show("vim_account", TEST_VIM2)) + print("vim_account", TEST_VIM2, content) + content = loop.run_until_complete(myClient.edit("vim_account", TEST_VIM2, vim_username='user2', vim_password='pass2', + vim_tenant_name="vimtenant2", config=TEST_CONFIG2)) + content = loop.run_until_complete(myClient.show("vim_account", TEST_VIM2)) + print("vim_account edited", TEST_VIM2, content) + + myClient["vim"] = TEST_VIM2 + + except Exception as e: + logger.error("Error {}".format(e), exc_info=True) + + for item in (("vim_account", TEST_VIM1), ("vim", TEST_VIM1), + ("vim_account", TEST_VIM2), ("vim", TEST_VIM2), + ("tenant", TEST_TENANT)): + try: + content = loop.run_until_complete(myClient.delete(item[0], item[1])) + print("{} {} deleted; {}".format(item[0], item[1], content)) + except Exception as e: + if e.http_code == 404: + print("{} {} not present or already deleted".format(item[0], item[1])) + else: + logger.error("Error {}".format(e), exc_info=True) + + loop.close() + + diff --git a/osm_lcm/lcm.cfg b/osm_lcm/lcm.cfg new file mode 100644 index 0000000..c62ee25 --- /dev/null +++ b/osm_lcm/lcm.cfg @@ -0,0 +1,54 @@ + +# TODO currently is a pure yaml format. Consider to change it to [ini] style with yaml inside to be coherent with other modules + +#[global] +global: + loglevel: DEBUG + #logfile: /var/log/osm/lcm.log + +#[RO] +RO: + host: ro # hostname or IP + port: 9090 + tenant: osm + loglevel: DEBUG + #logfile: /var/log/osm/lcm-ro.log + +#[VCA] +VCA: + host: vca + port: 17070 + user: admin + secret: secret + loglevel: DEBUG + #logfile: /var/log/osm/lcm-vca.log + +#[database] +database: + driver: mongo # mongo or memory + host: mongo # hostname or IP + port: 27017 + name: osm + user: user + password: password + loglevel: DEBUG + #logfile: /var/log/osm/lcm-database.log + +#[storage] +storage: + driver: local # local filesystem + # for local provide file path + path: /app/storage + loglevel: DEBUG + #logfile: /var/log/osm/lcm-storage.log + +#[message] +message: + driver: kafka # local or kafka + # for local provide file path + path: /app/storage/kafka + # for kafka provide host and port + host: kafka + port: 9092 + loglevel: DEBUG + #logfile: /var/log/osm/lcm-message.log diff --git a/osm_lcm/lcm.py b/osm_lcm/lcm.py new file mode 100644 index 0000000..69daaa1 --- /dev/null +++ b/osm_lcm/lcm.py @@ -0,0 +1,1482 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +import asyncio +import yaml +import ROclient +from osm_common import dbmemory +from osm_common import dbmongo +from osm_common import fslocal +from osm_common import msglocal +from osm_common import msgkafka +import logging +import functools +import sys +from osm_common.dbbase import DbException +from osm_common.fsbase import FsException +from osm_common.msgbase import MsgException +from os import environ +# from vca import DeployApplication, RemoveApplication +from n2vc.vnf import N2VC +from n2vc import version as N2VC_version +# import os.path +# import time + +from copy import deepcopy +from http import HTTPStatus +from time import time + + +class LcmException(Exception): + pass + + +class Lcm: + + def __init__(self, config_file): + """ + Init, Connect to database, filesystem storage, and messaging + :param config: two level dictionary with configuration. Top level should contain 'database', 'storage', + :return: None + """ + + self.db = None + self.msg = None + self.fs = None + self.pings_not_received = 1 + + # contains created tasks/futures to be able to cancel + self.lcm_ns_tasks = {} + self.lcm_vim_tasks = {} + self.lcm_sdn_tasks = {} + # logging + self.logger = logging.getLogger('lcm') + # load configuration + config = self.read_config_file(config_file) + self.config = config + self.ro_config={ + "endpoint_url": "http://{}:{}/openmano".format(config["RO"]["host"], config["RO"]["port"]), + "tenant": config.get("tenant", "osm"), + "logger_name": "lcm.ROclient", + "loglevel": "ERROR", + } + + self.vca = config["VCA"] # TODO VCA + self.loop = None + + # logging + log_format_simple = "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(message)s" + log_formatter_simple = logging.Formatter(log_format_simple, datefmt='%Y-%m-%dT%H:%M:%S') + config["database"]["logger_name"] = "lcm.db" + config["storage"]["logger_name"] = "lcm.fs" + config["message"]["logger_name"] = "lcm.msg" + if "logfile" in config["global"]: + file_handler = logging.handlers.RotatingFileHandler(config["global"]["logfile"], + maxBytes=100e6, backupCount=9, delay=0) + file_handler.setFormatter(log_formatter_simple) + self.logger.addHandler(file_handler) + else: + str_handler = logging.StreamHandler() + str_handler.setFormatter(log_formatter_simple) + self.logger.addHandler(str_handler) + + if config["global"].get("loglevel"): + self.logger.setLevel(config["global"]["loglevel"]) + + # logging other modules + for k1, logname in {"message": "lcm.msg", "database": "lcm.db", "storage": "lcm.fs"}.items(): + config[k1]["logger_name"] = logname + logger_module = logging.getLogger(logname) + if "logfile" in config[k1]: + file_handler = logging.handlers.RotatingFileHandler(config[k1]["logfile"], + maxBytes=100e6, backupCount=9, delay=0) + file_handler.setFormatter(log_formatter_simple) + logger_module.addHandler(file_handler) + if "loglevel" in config[k1]: + logger_module.setLevel(config[k1]["loglevel"]) + + self.n2vc = N2VC( + log=self.logger, + server=config['VCA']['host'], + port=config['VCA']['port'], + user=config['VCA']['user'], + secret=config['VCA']['secret'], + # TODO: This should point to the base folder where charms are stored, + # if there is a common one (like object storage). Otherwise, leave + # it unset and pass it via DeployCharms + # artifacts=config['VCA'][''], + artifacts=None, + ) + # check version of N2VC + # TODO enhance with int conversion or from distutils.version import LooseVersion + # or with list(map(int, version.split("."))) + if N2VC_version < "0.0.2": + raise LcmException("Not compatible osm/N2VC version '{}'. Needed '0.0.2' or higher".format(N2VC_version)) + try: + if config["database"]["driver"] == "mongo": + self.db = dbmongo.DbMongo() + self.db.db_connect(config["database"]) + elif config["database"]["driver"] == "memory": + self.db = dbmemory.DbMemory() + self.db.db_connect(config["database"]) + else: + raise LcmException("Invalid configuration param '{}' at '[database]':'driver'".format( + config["database"]["driver"])) + + if config["storage"]["driver"] == "local": + self.fs = fslocal.FsLocal() + self.fs.fs_connect(config["storage"]) + else: + raise LcmException("Invalid configuration param '{}' at '[storage]':'driver'".format( + config["storage"]["driver"])) + + if config["message"]["driver"] == "local": + self.msg = msglocal.MsgLocal() + self.msg.connect(config["message"]) + elif config["message"]["driver"] == "kafka": + self.msg = msgkafka.MsgKafka() + self.msg.connect(config["message"]) + else: + raise LcmException("Invalid configuration param '{}' at '[message]':'driver'".format( + config["storage"]["driver"])) + except (DbException, FsException, MsgException) as e: + self.logger.critical(str(e), exc_info=True) + raise LcmException(str(e)) + + def update_db(self, item, _id, _desc): + try: + self.db.replace(item, _id, _desc) + except DbException as e: + self.logger.error("Updating {} _id={}: {}".format(item, _id, e)) + + def update_db_2(self, item, _id, _desc): + try: + self.db.set_one(item, {"_id": _id}, _desc) + except DbException as e: + self.logger.error("Updating {} _id={}: {}".format(item, _id, e)) + + async def vim_create(self, vim_content, order_id): + vim_id = vim_content["_id"] + logging_text = "Task vim_create={} ".format(vim_id) + self.logger.debug(logging_text + "Enter") + db_vim = None + exc = None + try: + step = "Getting vim from db" + db_vim = self.db.get_one("vim_accounts", {"_id": vim_id}) + if "_admin" not in db_vim: + db_vim["_admin"] = {} + if "deployed" not in db_vim["_admin"]: + db_vim["_admin"]["deployed"] = {} + db_vim["_admin"]["deployed"]["RO"] = None + + step = "Creating vim at RO" + RO = ROclient.ROClient(self.loop, **self.ro_config) + vim_RO = deepcopy(vim_content) + vim_RO.pop("_id", None) + vim_RO.pop("_admin", None) + vim_RO.pop("schema_version", None) + vim_RO.pop("schema_type", None) + vim_RO.pop("vim_tenant_name", None) + vim_RO["type"] = vim_RO.pop("vim_type") + vim_RO.pop("vim_user", None) + vim_RO.pop("vim_password", None) + desc = await RO.create("vim", descriptor=vim_RO) + RO_vim_id = desc["uuid"] + db_vim["_admin"]["deployed"]["RO"] = RO_vim_id + self.update_db("vim_accounts", vim_id, db_vim) + + step = "Attach vim to RO tenant" + vim_RO = {"vim_tenant_name": vim_content["vim_tenant_name"], + "vim_username": vim_content["vim_user"], + "vim_password": vim_content["vim_password"], + "config": vim_content["config"] + } + desc = await RO.attach_datacenter(RO_vim_id , descriptor=vim_RO) + db_vim["_admin"]["operationalState"] = "ENABLED" + self.update_db("vim_accounts", vim_id, db_vim) + + self.logger.debug(logging_text + "Exit Ok RO_vim_id".format(RO_vim_id)) + return RO_vim_id + + except (ROclient.ROClientException, DbException) as e: + self.logger.error(logging_text + "Exit Exception {}".format(e)) + exc = e + except Exception as e: + self.logger.critical(logging_text + "Exit Exception {}".format(e), exc_info=True) + exc = e + finally: + if exc and db_vim: + db_vim["_admin"]["operationalState"] = "ERROR" + db_vim["_admin"]["detailed-status"] = "ERROR {}: {}".format(step , exc) + self.update_db("vim_accounts", vim_id, db_vim) + + async def vim_edit(self, vim_content, order_id): + vim_id = vim_content["_id"] + logging_text = "Task vim_edit={} ".format(vim_id) + self.logger.debug(logging_text + "Enter") + db_vim = None + exc = None + step = "Getting vim from db" + try: + db_vim = self.db.get_one("vim_accounts", {"_id": vim_id}) + if db_vim.get("_admin") and db_vim["_admin"].get("deployed") and db_vim["_admin"]["deployed"].get("RO"): + RO_vim_id = db_vim["_admin"]["deployed"]["RO"] + step = "Editing vim at RO" + RO = ROclient.ROClient(self.loop, **self.ro_config) + vim_RO = deepcopy(vim_content) + vim_RO.pop("_id", None) + vim_RO.pop("_admin", None) + vim_RO.pop("schema_version", None) + vim_RO.pop("schema_type", None) + vim_RO.pop("vim_tenant_name", None) + vim_RO["type"] = vim_RO.pop("vim_type") + vim_RO.pop("vim_user", None) + vim_RO.pop("vim_password", None) + if vim_RO: + desc = await RO.edit("vim", RO_vim_id, descriptor=vim_RO) + + step = "Editing vim-account at RO tenant" + vim_RO = {} + for k in ("vim_tenant_name", "vim_password", "config"): + if k in vim_content: + vim_RO[k] = vim_content[k] + if "vim_user" in vim_content: + vim_content["vim_username"] = vim_content["vim_user"] + if vim_RO: + desc = await RO.edit("vim_account", RO_vim_id, descriptor=vim_RO) + db_vim["_admin"]["operationalState"] = "ENABLED" + self.update_db("vim_accounts", vim_id, db_vim) + + self.logger.debug(logging_text + "Exit Ok RO_vim_id".format(RO_vim_id)) + return RO_vim_id + + except (ROclient.ROClientException, DbException) as e: + self.logger.error(logging_text + "Exit Exception {}".format(e)) + exc = e + except Exception as e: + self.logger.critical(logging_text + "Exit Exception {}".format(e), exc_info=True) + exc = e + finally: + if exc and db_vim: + db_vim["_admin"]["operationalState"] = "ERROR" + db_vim["_admin"]["detailed-status"] = "ERROR {}: {}".format(step , exc) + self.update_db("vim_accounts", vim_id, db_vim) + + async def vim_delete(self, vim_id, order_id): + logging_text = "Task vim_delete={} ".format(vim_id) + self.logger.debug(logging_text + "Enter") + db_vim = None + exc = None + step = "Getting vim from db" + try: + db_vim = self.db.get_one("vim_accounts", {"_id": vim_id}) + if db_vim.get("_admin") and db_vim["_admin"].get("deployed") and db_vim["_admin"]["deployed"].get("RO"): + RO_vim_id = db_vim["_admin"]["deployed"]["RO"] + RO = ROclient.ROClient(self.loop, **self.ro_config) + step = "Detaching vim from RO tenant" + try: + await RO.detach_datacenter(RO_vim_id) + except ROclient.ROClientException as e: + if e.http_code == 404: # not found + self.logger.debug(logging_text + "RO_vim_id={} already detached".format(RO_vim_id)) + else: + raise + + step = "Deleting vim from RO" + try: + await RO.delete("vim", RO_vim_id) + except ROclient.ROClientException as e: + if e.http_code == 404: # not found + self.logger.debug(logging_text + "RO_vim_id={} already deleted".format(RO_vim_id)) + else: + raise + else: + # nothing to delete + self.logger.error(logging_text + "Skipping. There is not RO information at database") + self.db.del_one("vim_accounts", {"_id": vim_id}) + self.logger.debug("vim_delete task vim_id={} Exit Ok".format(vim_id)) + return None + + except (ROclient.ROClientException, DbException) as e: + self.logger.error(logging_text + "Exit Exception {}".format(e)) + exc = e + except Exception as e: + self.logger.critical(logging_text + "Exit Exception {}".format(e), exc_info=True) + exc = e + finally: + if exc and db_vim: + db_vim["_admin"]["operationalState"] = "ERROR" + db_vim["_admin"]["detailed-status"] = "ERROR {}: {}".format(step , exc) + self.update_db("vim_accounts", vim_id, db_vim) + + async def sdn_create(self, sdn_content, order_id): + sdn_id = sdn_content["_id"] + logging_text = "Task sdn_create={} ".format(sdn_id) + self.logger.debug(logging_text + "Enter") + db_sdn = None + exc = None + try: + step = "Getting sdn from db" + db_sdn = self.db.get_one("sdns", {"_id": sdn_id}) + if "_admin" not in db_sdn: + db_sdn["_admin"] = {} + if "deployed" not in db_sdn["_admin"]: + db_sdn["_admin"]["deployed"] = {} + db_sdn["_admin"]["deployed"]["RO"] = None + + step = "Creating sdn at RO" + RO = ROclient.ROClient(self.loop, **self.ro_config) + sdn_RO = deepcopy(sdn_content) + sdn_RO.pop("_id", None) + sdn_RO.pop("_admin", None) + sdn_RO.pop("schema_version", None) + sdn_RO.pop("schema_type", None) + sdn_RO.pop("description", None) + desc = await RO.create("sdn", descriptor=sdn_RO) + RO_sdn_id = desc["uuid"] + db_sdn["_admin"]["deployed"]["RO"] = RO_sdn_id + db_sdn["_admin"]["operationalState"] = "ENABLED" + self.update_db("sdns", sdn_id, db_sdn) + self.logger.debug(logging_text + "Exit Ok RO_sdn_id".format(RO_sdn_id)) + return RO_sdn_id + + except (ROclient.ROClientException, DbException) as e: + self.logger.error(logging_text + "Exit Exception {}".format(e)) + exc = e + except Exception as e: + self.logger.critical(logging_text + "Exit Exception {}".format(e), exc_info=True) + exc = e + finally: + if exc and db_sdn: + db_sdn["_admin"]["operationalState"] = "ERROR" + db_sdn["_admin"]["detailed-status"] = "ERROR {}: {}".format(step , exc) + self.update_db("sdns", sdn_id, db_sdn) + + async def sdn_edit(self, sdn_content, order_id): + sdn_id = sdn_content["_id"] + logging_text = "Task sdn_edit={} ".format(sdn_id) + self.logger.debug(logging_text + "Enter") + db_sdn = None + exc = None + step = "Getting sdn from db" + try: + db_sdn = self.db.get_one("sdns", {"_id": sdn_id}) + if db_sdn.get("_admin") and db_sdn["_admin"].get("deployed") and db_sdn["_admin"]["deployed"].get("RO"): + RO_sdn_id = db_sdn["_admin"]["deployed"]["RO"] + RO = ROclient.ROClient(self.loop, **self.ro_config) + step = "Editing sdn at RO" + sdn_RO = deepcopy(sdn_content) + sdn_RO.pop("_id", None) + sdn_RO.pop("_admin", None) + sdn_RO.pop("schema_version", None) + sdn_RO.pop("schema_type", None) + sdn_RO.pop("description", None) + if sdn_RO: + desc = await RO.edit("sdn", RO_sdn_id, descriptor=sdn_RO) + db_sdn["_admin"]["operationalState"] = "ENABLED" + self.update_db("sdns", sdn_id, db_sdn) + + self.logger.debug(logging_text + "Exit Ok RO_sdn_id".format(RO_sdn_id)) + return RO_sdn_id + + except (ROclient.ROClientException, DbException) as e: + self.logger.error(logging_text + "Exit Exception {}".format(e)) + exc = e + except Exception as e: + self.logger.critical(logging_text + "Exit Exception {}".format(e), exc_info=True) + exc = e + finally: + if exc and db_sdn: + db_sdn["_admin"]["operationalState"] = "ERROR" + db_sdn["_admin"]["detailed-status"] = "ERROR {}: {}".format(step , exc) + self.update_db("sdns", sdn_id, db_sdn) + + async def sdn_delete(self, sdn_id, order_id): + logging_text = "Task sdn_delete={} ".format(sdn_id) + self.logger.debug(logging_text + "Enter") + db_sdn = None + exc = None + step = "Getting sdn from db" + try: + db_sdn = self.db.get_one("sdns", {"_id": sdn_id}) + if db_sdn.get("_admin") and db_sdn["_admin"].get("deployed") and db_sdn["_admin"]["deployed"].get("RO"): + RO_sdn_id = db_sdn["_admin"]["deployed"]["RO"] + RO = ROclient.ROClient(self.loop, **self.ro_config) + step = "Deleting sdn from RO" + try: + await RO.delete("sdn", RO_sdn_id) + except ROclient.ROClientException as e: + if e.http_code == 404: # not found + self.logger.debug(logging_text + "RO_sdn_id={} already deleted".format(RO_sdn_id)) + else: + raise + else: + # nothing to delete + self.logger.error(logging_text + "Skipping. There is not RO information at database") + self.db.del_one("sdns", {"_id": sdn_id}) + self.logger.debug("sdn_delete task sdn_id={} Exit Ok".format(sdn_id)) + return None + + except (ROclient.ROClientException, DbException) as e: + self.logger.error(logging_text + "Exit Exception {}".format(e)) + exc = e + except Exception as e: + self.logger.critical(logging_text + "Exit Exception {}".format(e), exc_info=True) + exc = e + finally: + if exc and db_sdn: + db_sdn["_admin"]["operationalState"] = "ERROR" + db_sdn["_admin"]["detailed-status"] = "ERROR {}: {}".format(step , exc) + self.update_db("sdns", sdn_id, db_sdn) + + def vnfd2RO(self, vnfd, new_id=None): + """ + Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd + :param vnfd: input vnfd + :param new_id: overrides vnf id if provided + :return: copy of vnfd + """ + ci_file = None + try: + vnfd_RO = deepcopy(vnfd) + vnfd_RO.pop("_id", None) + vnfd_RO.pop("_admin", None) + if new_id: + vnfd_RO["id"] = new_id + for vdu in vnfd_RO["vdu"]: + if "cloud-init-file" in vdu: + base_folder = vnfd["_admin"]["storage"] + clout_init_file = "{}/{}/cloud_init/{}".format( + base_folder["folder"], + base_folder["pkg-dir"], + vdu["cloud-init-file"] + ) + ci_file = self.fs.file_open(clout_init_file, "r") + # TODO: detect if binary or text. Propose to read as binary and try to decode to utf8. If fails convert to base 64 or similar + clout_init_content = ci_file.read() + ci_file.close() + ci_file = None + vdu.pop("cloud-init-file", None) + vdu["cloud-init"] = clout_init_content + return vnfd_RO + except FsException as e: + raise LcmException("Error reading file at vnfd {}: {} ".format(vnfd["_id"], e)) + finally: + if ci_file: + ci_file.close() + + def n2vc_callback(self, model_name, application_name, status, message, db_nsr, db_nslcmop, vnf_member_index, task=None): + """ + Callback both for charm status change and task completion + :param model_name: Charm model name + :param application_name: Charm application name + :param status: Can be + - blocked: The unit needs manual intervention + - maintenance: The unit is actively deploying/configuring + - waiting: The unit is waiting for another charm to be ready + - active: The unit is deployed, configured, and ready + - error: The charm has failed and needs attention. + - terminated: The charm has been destroyed + - removing, + - removed + :param message: detailed message error + :param db_nsr: nsr database content + :param db_nslcmop: nslcmop database content + :param vnf_member_index: NSD vnf-member-index + :param task: None for charm status change, or task for completion task callback + :return: + """ + nsr_id = None + nslcmop_id = None + update_nsr = update_nslcmop = False + try: + nsr_id = db_nsr["_id"] + nslcmop_id = db_nslcmop["_id"] + nsr_lcm = db_nsr["_admin"]["deployed"] + ns_action = db_nslcmop["lcmOperationType"] + logging_text = "Task ns={} {}={} [n2vc_callback] vnf_index={}".format(nsr_id, ns_action, nslcmop_id, + vnf_member_index) + + if task: + if task.cancelled(): + self.logger.debug(logging_text + " task Cancelled") + # TODO update db_nslcmop + return + + if task.done(): + exc = task.exception() + if exc: + self.logger.error(logging_text + " task Exception={}".format(exc)) + if ns_action in ("instantiate", "terminate"): + nsr_lcm["VCA"][vnf_member_index]['operational-status'] = "error" + nsr_lcm["VCA"][vnf_member_index]['detailed-status'] = str(exc) + elif ns_action == "action": + db_nslcmop["operationState"] = "FAILED" + db_nslcmop["detailed-status"] = str(exc) + db_nslcmop["statusEnteredTime"] = time() + update_nslcmop = True + return + + else: + self.logger.debug(logging_text + " task Done") + # TODO revise with Adam if action is finished and ok when task is done + if ns_action == "action": + db_nslcmop["operationState"] = "COMPLETED" + db_nslcmop["detailed-status"] = "Done" + db_nslcmop["statusEnteredTime"] = time() + update_nslcmop = True + # task is Done, but callback is still ongoing. So ignore + return + elif status: + self.logger.debug(logging_text + " Enter status={}".format(status)) + if nsr_lcm["VCA"][vnf_member_index]['operational-status'] == status: + return # same status, ignore + nsr_lcm["VCA"][vnf_member_index]['operational-status'] = status + nsr_lcm["VCA"][vnf_member_index]['detailed-status'] = str(message) + else: + self.logger.critical(logging_text + " Enter with bad parameters", exc_info=True) + return + + all_active = True + status_map = {} + n2vc_error_text = [] # contain text error list. If empty no one is in error status + for vnf_index, vca_info in nsr_lcm["VCA"].items(): + vca_status = vca_info["operational-status"] + if vca_status not in status_map: + # Initialize it + status_map[vca_status] = 0 + status_map[vca_status] += 1 + + if vca_status != "active": + all_active = False + elif vca_status in ("error", "blocked"): + n2vc_error_text.append("member_vnf_index={} {}: {}".format(vnf_member_index, vca_status, + vca_info["detailed-status"])) + + if all_active: + self.logger.debug("[n2vc_callback] ns_instantiate={} vnf_index={} All active".format(nsr_id, vnf_member_index)) + db_nsr["config-status"] = "configured" + db_nsr["detailed-status"] = "done" + db_nslcmop["operationState"] = "COMPLETED" + db_nslcmop["detailed-status"] = "Done" + db_nslcmop["statusEnteredTime"] = time() + elif n2vc_error_text: + db_nsr["config-status"] = "failed" + error_text = "fail configuring " + ";".join(n2vc_error_text) + db_nsr["detailed-status"] = error_text + db_nslcmop["operationState"] = "FAILED_TEMP" + db_nslcmop["detailed-status"] = error_text + db_nslcmop["statusEnteredTime"] = time() + else: + cs = "configuring: " + separator = "" + for status, num in status_map.items(): + cs += separator + "{}: {}".format(status, num) + separator = ", " + db_nsr["config-status"] = cs + db_nsr["detailed-status"] = cs + db_nslcmop["detailed-status"] = cs + update_nsr = update_nslcmop = True + + except Exception as e: + self.logger.critical("[n2vc_callback] vnf_index={} Exception {}".format(vnf_member_index, e), exc_info=True) + finally: + try: + if update_nslcmop: + self.update_db("nslcmops", nslcmop_id, db_nslcmop) + if update_nsr: + self.update_db("nsrs", nsr_id, db_nsr) + except Exception as e: + self.logger.critical("[n2vc_callback] vnf_index={} Update database Exception {}".format( + vnf_member_index, e), exc_info=True) + + def ns_params_2_RO(self, ns_params): + """ + Creates a RO ns descriptor from OSM ns_instantite params + :param ns_params: OSM instantiate params + :return: The RO ns descriptor + """ + vim_2_RO = {} + def vim_account_2_RO(vim_account): + if vim_account in vim_2_RO: + return vim_2_RO[vim_account] + db_vim = self.db.get_one("vim_accounts", {"_id": vim_account}) + # if db_vim["_admin"]["operationalState"] == "PROCESSING": + # #TODO check if VIM is creating and wait + if db_vim["_admin"]["operationalState"] != "ENABLED": + raise LcmException("VIM={} is not available. operationalState={}".format( + vim_account, db_vim["_admin"]["operationalState"])) + RO_vim_id = db_vim["_admin"]["deployed"]["RO"] + vim_2_RO[vim_account] = RO_vim_id + return RO_vim_id + + if not ns_params: + return None + RO_ns_params = { + # "name": ns_params["nsName"], + # "description": ns_params.get("nsDescription"), + "datacenter": vim_account_2_RO(ns_params["vimAccountId"]), + # "scenario": ns_params["nsdId"], + "vnfs": {}, + "networks": {}, + } + if ns_params.get("ssh-authorized-key"): + RO_ns_params["cloud-config"] = {"key-pairs": ns_params["ssh-authorized-key"]} + if ns_params.get("vnf"): + for vnf in ns_params["vnf"]: + RO_vnf = {} + if "vimAccountId" in vnf: + RO_vnf["datacenter"] = vim_account_2_RO(vnf["vimAccountId"]) + if RO_vnf: + RO_ns_params["vnfs"][vnf["member-vnf-index"]] = RO_vnf + if ns_params.get("vld"): + for vld in ns_params["vld"]: + RO_vld = {} + if "ip-profile" in vld: + RO_vld["ip-profile"] = vld["ip-profile"] + if "vim-network-name" in vld: + RO_vld["sites"] = [] + if isinstance(vld["vim-network-name"], dict): + for vim_account, vim_net in vld["vim-network-name"].items(): + RO_vld["sites"].append({ + "netmap-use": vim_net, + "datacenter": vim_account_2_RO(vim_account) + }) + else: #isinstance str + RO_vld["sites"].append({"netmap-use": vld["vim-network-name"]}) + if RO_vld: + RO_ns_params["networks"][vld["name"]] = RO_vld + return RO_ns_params + + async def ns_instantiate(self, nsr_id, nslcmop_id): + logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id) + self.logger.debug(logging_text + "Enter") + # get all needed from database + db_nsr = None + db_nslcmop = None + db_vnfr = {} + exc = None + step = "Getting nsr, nslcmop, RO_vims from db" + try: + db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id}) + db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) + nsd = db_nsr["nsd"] + nsr_name = db_nsr["name"] # TODO short-name?? + needed_vnfd = {} + vnfr_filter = {"nsr-id-ref": nsr_id, "member-vnf-index-ref": None} + for c_vnf in nsd["constituent-vnfd"]: + vnfd_id = c_vnf["vnfd-id-ref"] + vnfr_filter["member-vnf-index-ref"] = c_vnf["member-vnf-index"] + db_vnfr[c_vnf["member-vnf-index"]] = self.db.get_one("vnfrs", vnfr_filter) + if vnfd_id not in needed_vnfd: + step = "Getting vnfd={} from db".format(vnfd_id) + needed_vnfd[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id}) + + nsr_lcm = db_nsr["_admin"].get("deployed") + if not nsr_lcm: + nsr_lcm = db_nsr["_admin"]["deployed"] = { + "id": nsr_id, + "RO": {"vnfd_id": {}, "nsd_id": None, "nsr_id": None, "nsr_status": "SCHEDULED"}, + "nsr_ip": {}, + "VCA": {}, + } + db_nsr["detailed-status"] = "creating" + db_nsr["operational-status"] = "init" + + RO = ROclient.ROClient(self.loop, **self.ro_config) + + # get vnfds, instantiate at RO + for vnfd_id, vnfd in needed_vnfd.items(): + step = db_nsr["detailed-status"] = "Creating vnfd={} at RO".format(vnfd_id) + self.logger.debug(logging_text + step) + vnfd_id_RO = nsr_id + "." + vnfd_id[:200] + + # look if present + vnfd_list = await RO.get_list("vnfd", filter_by={"osm_id": vnfd_id_RO}) + if vnfd_list: + nsr_lcm["RO"]["vnfd_id"][vnfd_id] = vnfd_list[0]["uuid"] + self.logger.debug(logging_text + "RO vnfd={} exist. Using RO_id={}".format( + vnfd_id, vnfd_list[0]["uuid"])) + else: + vnfd_RO = self.vnfd2RO(vnfd, vnfd_id_RO) + desc = await RO.create("vnfd", descriptor=vnfd_RO) + nsr_lcm["RO"]["vnfd_id"][vnfd_id] = desc["uuid"] + db_nsr["_admin"]["nsState"] = "INSTANTIATED" + self.update_db("nsrs", nsr_id, db_nsr) + + # create nsd at RO + nsd_id = nsd["id"] + step = db_nsr["detailed-status"] = "Creating nsd={} at RO".format(nsd_id) + self.logger.debug(logging_text + step) + + nsd_id_RO = nsd_id + "." + nsd_id[:200] + nsd_list = await RO.get_list("nsd", filter_by={"osm_id": nsd_id_RO}) + if nsd_list: + nsr_lcm["RO"]["nsd_id"] = nsd_list[0]["uuid"] + self.logger.debug(logging_text + "RO nsd={} exist. Using RO_id={}".format( + nsd_id, nsd_list[0]["uuid"])) + else: + nsd_RO = deepcopy(nsd) + nsd_RO["id"] = nsd_id_RO + nsd_RO.pop("_id", None) + nsd_RO.pop("_admin", None) + for c_vnf in nsd_RO["constituent-vnfd"]: + vnfd_id = c_vnf["vnfd-id-ref"] + c_vnf["vnfd-id-ref"] = nsr_id + "." + vnfd_id[:200] + desc = await RO.create("nsd", descriptor=nsd_RO) + db_nsr["_admin"]["nsState"] = "INSTANTIATED" + nsr_lcm["RO"]["nsd_id"] = desc["uuid"] + self.update_db("nsrs", nsr_id, db_nsr) + + # Crate ns at RO + # if present use it unless in error status + RO_nsr_id = nsr_lcm["RO"].get("nsr_id") + if RO_nsr_id: + try: + step = db_nsr["detailed-status"] = "Looking for existing ns at RO" + self.logger.debug(logging_text + step + " RO_ns_id={}".format(RO_nsr_id)) + desc = await RO.show("ns", RO_nsr_id) + except ROclient.ROClientException as e: + if e.http_code != HTTPStatus.NOT_FOUND: + raise + RO_nsr_id = nsr_lcm["RO"]["nsr_id"] = None + if RO_nsr_id: + ns_status, ns_status_info = RO.check_ns_status(desc) + nsr_lcm["RO"]["nsr_status"] = ns_status + if ns_status == "ERROR": + step = db_nsr["detailed-status"] = "Deleting ns at RO" + self.logger.debug(logging_text + step + " RO_ns_id={}".format(RO_nsr_id)) + await RO.delete("ns", RO_nsr_id) + RO_nsr_id = nsr_lcm["RO"]["nsr_id"] = None + if not RO_nsr_id: + step = db_nsr["detailed-status"] = "Creating ns at RO" + self.logger.debug(logging_text + step) + RO_ns_params = self.ns_params_2_RO(db_nsr.get("instantiate_params")) + desc = await RO.create("ns", descriptor=RO_ns_params, + name=db_nsr["name"], + scenario=nsr_lcm["RO"]["nsd_id"]) + RO_nsr_id = nsr_lcm["RO"]["nsr_id"] = desc["uuid"] + db_nsr["_admin"]["nsState"] = "INSTANTIATED" + nsr_lcm["RO"]["nsr_status"] = "BUILD" + + self.update_db("nsrs", nsr_id, db_nsr) + # update VNFR vimAccount + step = "Updating VNFR vimAcccount" + for vnf_index, vnfr in db_vnfr.items(): + if vnfr.get("vim-account-id"): + continue + if db_nsr["instantiate_params"].get("vnf") and db_nsr["instantiate_params"]["vnf"].get(vnf_index) \ + and db_nsr["instantiate_params"]["vnf"][vnf_index].get("vimAccountId"): + vnfr["vim-account-id"] = db_nsr["instantiate_params"]["vnf"][vnf_index]["vimAccountId"] + else: + vnfr["vim-account-id"] = db_nsr["instantiate_params"]["vimAccountId"] + self.update_db("vnfrs", vnfr["_id"], vnfr) + + # wait until NS is ready + step = ns_status_detailed = "Waiting ns ready at RO" + db_nsr["detailed-status"] = ns_status_detailed + self.logger.debug(logging_text + step + " RO_ns_id={}".format(RO_nsr_id)) + deployment_timeout = 2*3600 # Two hours + while deployment_timeout > 0: + desc = await RO.show("ns", RO_nsr_id) + ns_status, ns_status_info = RO.check_ns_status(desc) + nsr_lcm["RO"]["nsr_status"] = ns_status + if ns_status == "ERROR": + raise ROclient.ROClientException(ns_status_info) + elif ns_status == "BUILD": + db_nsr["detailed-status"] = ns_status_detailed + "; {}".format(ns_status_info) + self.update_db("nsrs", nsr_id, db_nsr) + elif ns_status == "ACTIVE": + step = "Getting ns VIM information" + ns_RO_info = nsr_lcm["nsr_ip"] = RO.get_ns_vnf_info(desc) + break + else: + assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status) + + await asyncio.sleep(5, loop=self.loop) + deployment_timeout -= 5 + if deployment_timeout <= 0: + raise ROclient.ROClientException("Timeout waiting ns to be ready") + step = "Updating VNFRs" + for vnf_index, vnfr_deployed in ns_RO_info.items(): + vnfr = db_vnfr[vnf_index] + vnfr["ip-address"] = vnfr_deployed.get("ip_address") + for vdu_id, vdu_deployed in vnfr_deployed["vdur"].items(): + for vdur in vnfr["vdur"]: + if vdur["vdu-id-ref"] == vdu_id: + vdur["vim-id"] = vdu_deployed.get("vim_id") + vdur["ip-address"] = vdu_deployed.get("ip_address") + break + self.update_db("vnfrs", vnfr["_id"], vnfr) + + db_nsr["detailed-status"] = "Configuring vnfr" + self.update_db("nsrs", nsr_id, db_nsr) + + # The parameters we'll need to deploy a charm + number_to_configure = 0 + + def deploy(): + """An inner function to deploy the charm from either vnf or vdu + """ + + # Login to the VCA. + # if number_to_configure == 0: + # self.logger.debug("Logging into N2VC...") + # task = asyncio.ensure_future(self.n2vc.login()) + # yield from asyncio.wait_for(task, 30.0) + # self.logger.debug("Logged into N2VC!") + + ## await self.n2vc.login() + + # Note: The charm needs to exist on disk at the location + # specified by charm_path. + base_folder = vnfd["_admin"]["storage"] + storage_params = self.fs.get_params() + charm_path = "{}{}/{}/charms/{}".format( + storage_params["path"], + base_folder["folder"], + base_folder["pkg-dir"], + proxy_charm + ) + + # Setup the runtime parameters for this VNF + params['rw_mgmt_ip'] = db_vnfr[vnf_index]["ip-address"] + + # ns_name will be ignored in the current version of N2VC + # but will be implemented for the next point release. + model_name = 'default' + application_name = self.n2vc.FormatApplicationName( + nsr_name, + vnf_index, + vnfd['name'], + ) + + nsr_lcm["VCA"][vnf_index] = { + "model": model_name, + "application": application_name, + "operational-status": "init", + "detailed-status": "", + "vnfd_id": vnfd_id, + } + + self.logger.debug("Task create_ns={} Passing artifacts path '{}' for {}".format(nsr_id, charm_path, proxy_charm)) + task = asyncio.ensure_future( + self.n2vc.DeployCharms( + model_name, # The network service name + application_name, # The application name + vnfd, # The vnf descriptor + charm_path, # Path to charm + params, # Runtime params, like mgmt ip + {}, # for native charms only + self.n2vc_callback, # Callback for status changes + db_nsr, # Callback parameter + db_nslcmop, + vnf_index, # Callback parameter + None, # Callback parameter (task) + ) + ) + task.add_done_callback(functools.partial(self.n2vc_callback, model_name, application_name, None, None, + db_nsr, db_nslcmop, vnf_index)) + self.lcm_ns_tasks[nsr_id][nslcmop_id]["create_charm:" + vnf_index] = task + + # TODO: Make this call inside deploy() + # Login to the VCA. If there are multiple calls to login(), + # subsequent calls will be a nop and return immediately. + await self.n2vc.login() + + step = "Looking for needed vnfd to configure" + self.logger.debug(logging_text + step) + for c_vnf in nsd["constituent-vnfd"]: + vnfd_id = c_vnf["vnfd-id-ref"] + vnf_index = str(c_vnf["member-vnf-index"]) + vnfd = needed_vnfd[vnfd_id] + + # Check if this VNF has a charm configuration + vnf_config = vnfd.get("vnf-configuration") + + if vnf_config and vnf_config.get("juju"): + proxy_charm = vnf_config["juju"]["charm"] + params = {} + + if proxy_charm: + if 'initial-config-primitive' in vnf_config: + params['initial-config-primitive'] = vnf_config['initial-config-primitive'] + + deploy() + number_to_configure += 1 + + # Deploy charms for each VDU that supports one. + for vdu in vnfd['vdu']: + vdu_config = vdu.get('vdu-configuration') + proxy_charm = None + params = {} + + if vdu_config and vdu_config.get("juju"): + proxy_charm = vdu_config["juju"]["charm"] + + if 'initial-config-primitive' in vdu_config: + params['initial-config-primitive'] = vdu_config['initial-config-primitive'] + + if proxy_charm: + deploy() + number_to_configure += 1 + + if number_to_configure: + db_nsr["config-status"] = "configuring" + db_nsr["detailed-status"] = "configuring: init: {}".format(number_to_configure) + db_nslcmop["detailed-status"] = "configuring: init: {}".format(number_to_configure) + else: + db_nslcmop["operationState"] = "COMPLETED" + db_nslcmop["detailed-status"] = "done" + db_nsr["config-status"] = "configured" + db_nsr["detailed-status"] = "done" + db_nsr["operational-status"] = "running" + self.update_db("nsrs", nsr_id, db_nsr) + self.update_db("nslcmops", nslcmop_id, db_nslcmop) + self.logger.debug("Task ns_instantiate={} Exit Ok".format(nsr_id)) + return nsr_lcm + + except (ROclient.ROClientException, DbException, LcmException) as e: + self.logger.error(logging_text + "Exit Exception {}".format(e)) + exc = e + except Exception as e: + self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True) + exc = e + finally: + if exc: + if db_nsr: + db_nsr["detailed-status"] = "ERROR {}: {}".format(step, exc) + db_nsr["operational-status"] = "failed" + self.update_db("nsrs", nsr_id, db_nsr) + if db_nslcmop: + db_nslcmop["detailed-status"] = "FAILED {}: {}".format(step, exc) + db_nslcmop["operationState"] = "FAILED" + db_nslcmop["statusEnteredTime"] = time() + self.update_db("nslcmops", nslcmop_id, db_nslcmop) + + async def ns_terminate(self, nsr_id, nslcmop_id): + logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id) + self.logger.debug(logging_text + "Enter") + db_nsr = None + db_nslcmop = None + exc = None + step = "Getting nsr, nslcmop from db" + failed_detail = [] # annotates all failed error messages + vca_task_list = [] + vca_task_dict = {} + try: + db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id}) + db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) + # nsd = db_nsr["nsd"] + nsr_lcm = deepcopy(db_nsr["_admin"]["deployed"]) + if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED": + return + # TODO ALF remove + # db_vim = self.db.get_one("vim_accounts", {"_id": db_nsr["datacenter"]}) + # #TODO check if VIM is creating and wait + # RO_vim_id = db_vim["_admin"]["deployed"]["RO"] + + db_nsr_update = { + "operational-status": "terminating", + "config-status": "terminating", + "detailed-status": "Deleting charms", + } + self.update_db_2("nsrs", nsr_id, db_nsr_update) + + try: + self.logger.debug(logging_text + step) + for vnf_index, deploy_info in nsr_lcm["VCA"].items(): + if deploy_info and deploy_info.get("application"): + task = asyncio.ensure_future( + self.n2vc.RemoveCharms( + deploy_info['model'], + deploy_info['application'], + # self.n2vc_callback, + # db_nsr, + # db_nslcmop, + # vnf_index, + ) + ) + vca_task_list.append(task) + vca_task_dict[vnf_index] = task + # task.add_done_callback(functools.partial(self.n2vc_callback, deploy_info['model'], + # deploy_info['application'], None, db_nsr, + # db_nslcmop, vnf_index)) + self.lcm_ns_tasks[nsr_id][nslcmop_id]["delete_charm:" + vnf_index] = task + except Exception as e: + self.logger.debug(logging_text + "Failed while deleting charms: {}".format(e)) + # remove from RO + + RO = ROclient.ROClient(self.loop, **self.ro_config) + # Delete ns + RO_nsr_id = nsr_lcm["RO"].get("nsr_id") + if RO_nsr_id: + try: + step = db_nsr["detailed-status"] = "Deleting ns at RO" + self.logger.debug(logging_text + step) + desc = await RO.delete("ns", RO_nsr_id) + nsr_lcm["RO"]["nsr_id"] = None + nsr_lcm["RO"]["nsr_status"] = "DELETED" + except ROclient.ROClientException as e: + if e.http_code == 404: # not found + nsr_lcm["RO"]["nsr_id"] = None + nsr_lcm["RO"]["nsr_status"] = "DELETED" + self.logger.debug(logging_text + "RO_ns_id={} already deleted".format(RO_nsr_id)) + elif e.http_code == 409: #conflict + failed_detail.append("RO_ns_id={} delete conflict: {}".format(RO_nsr_id, e)) + self.logger.debug(logging_text + failed_detail[-1]) + else: + failed_detail.append("RO_ns_id={} delete error: {}".format(RO_nsr_id, e)) + self.logger.error(logging_text + failed_detail[-1]) + + # Delete nsd + RO_nsd_id = nsr_lcm["RO"]["nsd_id"] + if RO_nsd_id: + try: + step = db_nsr["detailed-status"] = "Deleting nsd at RO" + desc = await RO.delete("nsd", RO_nsd_id) + self.logger.debug(logging_text + "RO_nsd_id={} deleted".format(RO_nsd_id)) + nsr_lcm["RO"]["nsd_id"] = None + except ROclient.ROClientException as e: + if e.http_code == 404: # not found + nsr_lcm["RO"]["nsd_id"] = None + self.logger.debug(logging_text + "RO_nsd_id={} already deleted".format(RO_nsd_id)) + elif e.http_code == 409: #conflict + failed_detail.append("RO_nsd_id={} delete conflict: {}".format(RO_nsd_id, e)) + self.logger.debug(logging_text + failed_detail[-1]) + else: + failed_detail.append("RO_nsd_id={} delete error: {}".format(RO_nsd_id, e)) + self.logger.error(logging_text + failed_detail[-1]) + + for vnf_id, RO_vnfd_id in nsr_lcm["RO"]["vnfd_id"].items(): + if not RO_vnfd_id: + continue + try: + step = db_nsr["detailed-status"] = "Deleting vnfd={} at RO".format(vnf_id) + desc = await RO.delete("vnfd", RO_vnfd_id) + self.logger.debug(logging_text + "RO_vnfd_id={} deleted".format(RO_vnfd_id)) + nsr_lcm["RO"]["vnfd_id"][vnf_id] = None + except ROclient.ROClientException as e: + if e.http_code == 404: # not found + nsr_lcm["RO"]["vnfd_id"][vnf_id] = None + self.logger.debug(logging_text + "RO_vnfd_id={} already deleted ".format(RO_vnfd_id)) + elif e.http_code == 409: #conflict + failed_detail.append("RO_vnfd_id={} delete conflict: {}".format(RO_vnfd_id, e)) + self.logger.debug(logging_text + failed_detail[-1]) + else: + failed_detail.append("RO_vnfd_id={} delete error: {}".format(RO_vnfd_id, e)) + self.logger.error(logging_text + failed_detail[-1]) + + if vca_task_list: + await asyncio.wait(vca_task_list, timeout=300) + for vnf_index, task in vca_task_dict.items(): + if task.cancelled(): + failed_detail.append("VCA[{}] Deletion has been cancelled".format(vnf_index)) + elif task.done(): + exc = task.exception() + if exc: + failed_detail.append("VCA[{}] Deletion exception: {}".format(vnf_index, exc)) + else: + nsr_lcm["VCA"][vnf_index] = None + else: # timeout + # TODO Should it be cancelled?!! + task.cancel() + failed_detail.append("VCA[{}] Deletion timeout".format(vnf_index)) + + if failed_detail: + self.logger.error(logging_text + " ;".join(failed_detail)) + db_nsr_update = { + "operational-status": "failed", + "detailed-status": "Deletion errors " + "; ".join(failed_detail), + "_admin": {"deployed": nsr_lcm, } + } + db_nslcmop_update = { + "detailed-status": "; ".join(failed_detail), + "operationState": "FAILED", + "statusEnteredTime": time() + } + elif db_nslcmop["operationParams"].get("autoremove"): + self.db.del_one("nsrs", {"_id": nsr_id}) + self.db.del_list("nslcmops", {"nsInstanceId": nsr_id}) + self.db.del_list("vnfrs", {"nsr-id-ref": nsr_id}) + else: + db_nsr_update = { + "operational-status": "terminated", + "detailed-status": "Done", + "_admin": {"deployed": nsr_lcm, "nsState": "NOT_INSTANTIATED"} + } + db_nslcmop_update = { + "detailed-status": "Done", + "operationState": "COMPLETED", + "statusEnteredTime": time() + } + self.logger.debug(logging_text + "Exit") + + except (ROclient.ROClientException, DbException) as e: + self.logger.error(logging_text + "Exit Exception {}".format(e)) + exc = e + except Exception as e: + self.logger.critical(logging_text + "Exit Exception {}".format(e), exc_info=True) + exc = e + finally: + if exc and db_nslcmop: + db_nslcmop_update = { + "detailed-status": "FAILED {}: {}".format(step, exc), + "operationState": "FAILED", + "statusEnteredTime": time(), + } + if db_nslcmop_update: + self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update) + if db_nsr_update: + self.update_db_2("nsrs", nsr_id, db_nsr_update) + + async def ns_action(self, nsr_id, nslcmop_id): + logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id) + self.logger.debug(logging_text + "Enter") + # get all needed from database + db_nsr = None + db_nslcmop = None + db_nslcmop_update = None + exc = None + try: + step = "Getting information from database" + db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id}) + db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) + nsr_lcm = db_nsr["_admin"].get("deployed") + vnf_index = db_nslcmop["operationParams"]["vnf_member_index"] + + #TODO check if ns is in a proper status + vca_deployed = nsr_lcm["VCA"].get(vnf_index) + if not vca_deployed: + raise LcmException("charm for vnf_member_index={} is not deployed".format(vnf_index)) + model_name = vca_deployed.get("model") + application_name = vca_deployed.get("application") + if not model_name or not application_name: + raise LcmException("charm for vnf_member_index={} is not properly deployed".format(vnf_index)) + if vca_deployed["operational-status"] != "active": + raise LcmException("charm for vnf_member_index={} operational_status={} not 'active'".format( + vnf_index, vca_deployed["operational-status"])) + primitive = db_nslcmop["operationParams"]["primitive"] + primitive_params = db_nslcmop["operationParams"]["primitive_params"] + callback = None # self.n2vc_callback + callback_args = () # [db_nsr, db_nslcmop, vnf_index, None] + await self.n2vc.login() + task = asyncio.ensure_future( + self.n2vc.ExecutePrimitive( + model_name, + application_name, + primitive, callback, + *callback_args, + **primitive_params + ) + ) + # task.add_done_callback(functools.partial(self.n2vc_callback, model_name, application_name, None, + # db_nsr, db_nslcmop, vnf_index)) + # self.lcm_ns_tasks[nsr_id][nslcmop_id]["action: " + primitive] = task + # wait until completed with timeout + await asyncio.wait((task,), timeout=300) + + result = "FAILED" # by default + result_detail = "" + if task.cancelled(): + db_nslcmop["detailed-status"] = "Task has been cancelled" + elif task.done(): + exc = task.exception() + if exc: + result_detail = str(exc) + else: + self.logger.debug(logging_text + " task Done") + # TODO revise with Adam if action is finished and ok when task is done or callback is needed + result = "COMPLETED" + result_detail = "Done" + else: # timeout + # TODO Should it be cancelled?!! + task.cancel() + result_detail = "timeout" + + db_nslcmop_update = { + "detailed-status": result_detail, + "operationState": result, + "statusEnteredTime": time() + } + self.logger.debug(logging_text + " task Done with result {} {}".format(result, result_detail)) + return # database update is called inside finally + + except (DbException, LcmException) as e: + self.logger.error(logging_text + "Exit Exception {}".format(e)) + exc = e + except Exception as e: + self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True) + exc = e + finally: + if exc and db_nslcmop: + db_nslcmop_update = { + "detailed-status": "FAILED {}: {}".format(step, exc), + "operationState": "FAILED", + "statusEnteredTime": time(), + } + if db_nslcmop_update: + self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update) + + async def test(self, param=None): + self.logger.debug("Starting/Ending test task: {}".format(param)) + + def cancel_tasks(self, topic, _id): + """ + Cancel all active tasks of a concrete nsr or vim identified for _id + :param topic: can be ns or vim_account + :param _id: nsr or vim identity + :return: None, or raises an exception if not possible + """ + if topic == "ns": + lcm_tasks = self.lcm_ns_tasks + elif topic== "vim_account": + lcm_tasks = self.lcm_vim_tasks + elif topic== "sdn": + lcm_tasks = self.lcm_sdn_tasks + + if not lcm_tasks.get(_id): + return + for order_id, tasks_set in lcm_tasks[_id].items(): + for task_name, task in tasks_set.items(): + result = task.cancel() + if result: + self.logger.debug("{} _id={} order_id={} task={} cancelled".format(topic, _id, order_id, task_name)) + lcm_tasks[_id] = {} + + async def kafka_ping(self): + self.logger.debug("Task kafka_ping Enter") + consecutive_errors = 0 + first_start = True + kafka_has_received = False + self.pings_not_received = 1 + while True: + try: + await self.msg.aiowrite("admin", "ping", {"from": "lcm", "to": "lcm"}, self.loop) + # time between pings are low when it is not received and at starting + wait_time = 5 if not kafka_has_received else 120 + if not self.pings_not_received: + kafka_has_received = True + self.pings_not_received += 1 + await asyncio.sleep(wait_time, loop=self.loop) + if self.pings_not_received > 10: + raise LcmException("It is not receiving pings from Kafka bus") + consecutive_errors = 0 + first_start = False + except LcmException: + raise + except Exception as e: + # if not first_start is the first time after starting. So leave more time and wait + # to allow kafka starts + if consecutive_errors == 8 if not first_start else 30: + self.logger.error("Task kafka_read task exit error too many errors. Exception: {}".format(e)) + raise + consecutive_errors += 1 + self.logger.error("Task kafka_read retrying after Exception {}".format(e)) + wait_time = 1 if not first_start else 5 + await asyncio.sleep(wait_time, loop=self.loop) + + async def kafka_read(self): + self.logger.debug("Task kafka_read Enter") + order_id = 1 + # future = asyncio.Future() + consecutive_errors = 0 + first_start = True + while consecutive_errors < 10: + try: + topics = ("admin", "ns", "vim_account", "sdn") + topic, command, params = await self.msg.aioread(topics, self.loop) + self.logger.debug("Task kafka_read receives {} {}: {}".format(topic, command, params)) + consecutive_errors = 0 + first_start = False + order_id += 1 + if command == "exit": + print("Bye!") + break + elif command.startswith("#"): + continue + elif command == "echo": + # just for test + print(params) + sys.stdout.flush() + continue + elif command == "test": + asyncio.Task(self.test(params), loop=self.loop) + continue + + if topic == "admin": + if command == "ping" and params["to"] == "lcm" and params["from"] == "lcm": + self.pings_not_received = 0 + continue + elif topic == "ns": + if command == "instantiate": + # self.logger.debug("Deploying NS {}".format(nsr_id)) + nslcmop = params + nslcmop_id = nslcmop["_id"] + nsr_id = nslcmop["nsInstanceId"] + task = asyncio.ensure_future(self.ns_instantiate(nsr_id, nslcmop_id)) + if nsr_id not in self.lcm_ns_tasks: + self.lcm_ns_tasks[nsr_id] = {} + self.lcm_ns_tasks[nsr_id][nslcmop_id] = {"ns_instantiate": task} + continue + elif command == "terminate": + # self.logger.debug("Deleting NS {}".format(nsr_id)) + nslcmop = params + nslcmop_id = nslcmop["_id"] + nsr_id = nslcmop["nsInstanceId"] + self.cancel_tasks(topic, nsr_id) + task = asyncio.ensure_future(self.ns_terminate(nsr_id, nslcmop_id)) + if nsr_id not in self.lcm_ns_tasks: + self.lcm_ns_tasks[nsr_id] = {} + self.lcm_ns_tasks[nsr_id][nslcmop_id] = {"ns_terminate": task} + continue + elif command == "action": + # self.logger.debug("Update NS {}".format(nsr_id)) + nslcmop = params + nslcmop_id = nslcmop["_id"] + nsr_id = nslcmop["nsInstanceId"] + task = asyncio.ensure_future(self.ns_action(nsr_id, nslcmop_id)) + if nsr_id not in self.lcm_ns_tasks: + self.lcm_ns_tasks[nsr_id] = {} + self.lcm_ns_tasks[nsr_id][nslcmop_id] = {"ns_action": task} + continue + elif command == "show": + try: + db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) + print( + "nsr:\n _id={}\n operational-status: {}\n config-status: {}\n detailed-status: " + "{}\n deploy: {}\n tasks: {}".format( + nsr_id, db_nsr["operational-status"], + db_nsr["config-status"], db_nsr["detailed-status"], + db_nsr["_admin"]["deployed"], self.lcm_ns_tasks.get(nsr_id))) + except Exception as e: + print("nsr {} not found: {}".format(nsr_id, e)) + sys.stdout.flush() + continue + elif command == "deleted": + continue # TODO cleaning of task just in case should be done + elif topic == "vim_account": + vim_id = params["_id"] + if command == "create": + task = asyncio.ensure_future(self.vim_create(params, order_id)) + if vim_id not in self.lcm_vim_tasks: + self.lcm_vim_tasks[vim_id] = {} + self.lcm_vim_tasks[vim_id][order_id] = {"vim_create": task} + continue + elif command == "delete": + self.cancel_tasks(topic, vim_id) + task = asyncio.ensure_future(self.vim_delete(vim_id, order_id)) + if vim_id not in self.lcm_vim_tasks: + self.lcm_vim_tasks[vim_id] = {} + self.lcm_vim_tasks[vim_id][order_id] = {"vim_delete": task} + continue + elif command == "show": + print("not implemented show with vim_account") + sys.stdout.flush() + continue + elif command == "edit": + task = asyncio.ensure_future(self.vim_edit(vim_id, order_id)) + if vim_id not in self.lcm_vim_tasks: + self.lcm_vim_tasks[vim_id] = {} + self.lcm_vim_tasks[vim_id][order_id] = {"vim_edit": task} + continue + elif topic == "sdn": + _sdn_id = params["_id"] + if command == "create": + task = asyncio.ensure_future(self.sdn_create(params, order_id)) + if _sdn_id not in self.lcm_sdn_tasks: + self.lcm_sdn_tasks[_sdn_id] = {} + self.lcm_sdn_tasks[_sdn_id][order_id] = {"sdn_create": task} + continue + elif command == "delete": + self.cancel_tasks(topic, _sdn_id) + task = asyncio.ensure_future(self.sdn_delete(_sdn_id, order_id)) + if _sdn_id not in self.lcm_sdn_tasks: + self.lcm_sdn_tasks[_sdn_id] = {} + self.lcm_sdn_tasks[_sdn_id][order_id] = {"sdn_delete": task} + continue + elif command == "edit": + task = asyncio.ensure_future(self.sdn_edit(_sdn_id, order_id)) + if _sdn_id not in self.lcm_sdn_tasks: + self.lcm_sdn_tasks[_sdn_id] = {} + self.lcm_sdn_tasks[_sdn_id][order_id] = {"sdn_edit": task} + continue + self.logger.critical("unknown topic {} and command '{}'".format(topic, command)) + except Exception as e: + # if not first_start is the first time after starting. So leave more time and wait + # to allow kafka starts + if consecutive_errors == 8 if not first_start else 30: + self.logger.error("Task kafka_read task exit error too many errors. Exception: {}".format(e)) + raise + consecutive_errors += 1 + self.logger.error("Task kafka_read retrying after Exception {}".format(e)) + wait_time = 2 if not first_start else 5 + await asyncio.sleep(wait_time, loop=self.loop) + + # self.logger.debug("Task kafka_read terminating") + self.logger.debug("Task kafka_read exit") + + def start(self): + self.loop = asyncio.get_event_loop() + self.loop.run_until_complete(asyncio.gather( + self.kafka_read(), + self.kafka_ping() + )) + # TODO + # self.logger.debug("Terminating cancelling creation tasks") + # self.cancel_tasks("ALL", "create") + # timeout = 200 + # while self.is_pending_tasks(): + # self.logger.debug("Task kafka_read terminating. Waiting for tasks termination") + # await asyncio.sleep(2, loop=self.loop) + # timeout -= 2 + # if not timeout: + # self.cancel_tasks("ALL", "ALL") + self.loop.close() + self.loop = None + if self.db: + self.db.db_disconnect() + if self.msg: + self.msg.disconnect() + if self.fs: + self.fs.fs_disconnect() + + + def read_config_file(self, config_file): + # TODO make a [ini] + yaml inside parser + # the configparser library is not suitable, because it does not admit comments at the end of line, + # and not parse integer or boolean + try: + with open(config_file) as f: + conf = yaml.load(f) + for k, v in environ.items(): + if not k.startswith("OSMLCM_"): + continue + k_items = k.lower().split("_") + c = conf + try: + for k_item in k_items[1:-1]: + if k_item in ("ro", "vca"): + # put in capital letter + k_item = k_item.upper() + c = c[k_item] + if k_items[-1] == "port": + c[k_items[-1]] = int(v) + else: + c[k_items[-1]] = v + except Exception as e: + self.logger.warn("skipping environ '{}' on exception '{}'".format(k, e)) + + return conf + except Exception as e: + self.logger.critical("At config file '{}': {}".format(config_file, e)) + exit(1) + + +if __name__ == '__main__': + + config_file = "lcm.cfg" + lcm = Lcm(config_file) + + lcm.start() diff --git a/python3-osm-lcm.postinst b/python3-osm-lcm.postinst new file mode 100755 index 0000000..63e30b1 --- /dev/null +++ b/python3-osm-lcm.postinst @@ -0,0 +1,26 @@ +#!/bin/bash + +## +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: OSM_TECH@list.etsi.org +## + +echo "POST INSTALL OSM-LCM" + +#Creation of log folder +mkdir -p /var/log/osm + +systemctl enable osm-lcm.service + diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..825516b --- /dev/null +++ b/requirements.txt @@ -0,0 +1,4 @@ +pymongo +yaml +n2vc + diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..4a46e9f --- /dev/null +++ b/setup.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 + +import os +from setuptools import setup + +_name = "osm_lcm" +# version is at first line of osm_lcm/html_public/version +here = os.path.abspath(os.path.dirname(__file__)) +VERSION = "4.0.1rc1" +with open(os.path.join(here, 'README.rst')) as readme_file: + README = readme_file.read() + +setup( + name=_name, + description='OSM North Bound Interface', + long_description=README, + # version_command=('git describe --tags --long --dirty', 'pep440-git'), + version=VERSION, + # python_requires='>3.5.0', + author='ETSI OSM', + author_email='alfonso.tiernosepulveda@telefonica.com', + maintainer='Alfonso Tierno', + maintainer_email='alfonso.tiernosepulveda@telefonica.com', + url='https://osm.etsi.org/gitweb/?p=osm/LCM.git;a=summary', + license='Apache 2.0', + + packages=[_name], + include_package_data=True, + data_files=[('/etc/osm/', ['osm_lcm/lcm.cfg']), + ('/etc/systemd/system/', ['osm_lcm/osm-lcm.service']), + ], + dependency_links=[ + "git+https://osm.etsi.org/gerrit/osm/common.git@master#egg=osm-common-0.1.4" + ], + install_requires=[ + 'pymongo', 'PyYAML', + # 'osm-common', + ], + # setup_requires=['setuptools-version-command'], + # test_suite='nose.collector', + # entry_points=''' + # [console_scripts] + # osm=osm_lcm.lcm:lcm + # ''', +) + diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..ef1f5eb --- /dev/null +++ b/tox.ini @@ -0,0 +1,20 @@ +[tox] +envlist = py27,py3,flake8 +toxworkdir={homedir}/.tox + +[testenv] +deps=nose + mock +commands=nosetests + +[testenv:flake8] +basepython = python3 +deps = flake8 +commands = + flake8 setup.py + +[testenv:build] +basepython = python3 +deps = stdeb + setuptools-version-command +commands = python3 setup.py --command-packages=stdeb.command bdist_deb -- 2.17.1