# under the License.
##
-version = '8.0.1.post0'
-version_date = '2020-06-29'
+version = "8.0.1.post0"
+version_date = "2020-06-29"
# Obtain installed package version. Ignore if error, e.g. pkg_resources not installed
try:
from pkg_resources import get_distribution
+
version = get_distribution("osm_ng_ro").version
except Exception:
pass
:param response: cherrypy response
:return: string with teh html response
"""
- response.headers["Content-Type"] = 'text/html'
+ response.headers["Content-Type"] = "text/html"
+
if response.status == HTTPStatus.UNAUTHORIZED.value:
- if response.headers.get("WWW-Authenticate") and request.config.get("auth.allow_basic_authentication"):
- response.headers["WWW-Authenticate"] = "Basic" + response.headers["WWW-Authenticate"][6:]
+ if response.headers.get("WWW-Authenticate") and request.config.get(
+ "auth.allow_basic_authentication"
+ ):
+ response.headers["WWW-Authenticate"] = (
+ "Basic" + response.headers["WWW-Authenticate"][6:]
+ )
+
return
else:
return html_auth2.format(error=data)
+
if request.path_info in ("/version", "/system"):
- return "<pre>" + yaml.safe_dump(data, explicit_start=False, indent=4, default_flow_style=False) + "</pre>"
+ return (
+ "<pre>"
+ + yaml.safe_dump(
+ data, explicit_start=False, indent=4, default_flow_style=False
+ )
+ + "</pre>"
+ )
+
body = html_body.format(item=request.path_info)
+
if response.status and response.status > 202:
- body += html_body_error.format(yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False))
+ body += html_body_error.format(
+ yaml.safe_dump(
+ data, explicit_start=True, indent=4, default_flow_style=False
+ )
+ )
elif isinstance(data, (list, tuple)):
# if request.path_info == "/ns/v1/deploy":
# body += html_upload_body.format(request.path_info + "_content", "VNFD")
data_id = k.pop("_id", None)
elif isinstance(k, str):
data_id = k
+
if request.path_info == "/ns/v1/deploy":
- body += '<p> <a href="/ro/{url}/{id}?METHOD=DELETE"> <img src="/ro/static/delete.png" height="25"' \
- ' width="25"> </a><a href="/ro/{url}/{id}">{id}</a>: {t} </p>' \
- .format(url=request.path_info, id=data_id, t=html_escape(str(k)))
+ body += (
+ '<p> <a href="/ro/{url}/{id}?METHOD=DELETE"> <img src="/ro/static/delete.png" height="25"'
+ ' width="25"> </a><a href="/ro/{url}/{id}">{id}</a>: {t} </p>'.format(
+ url=request.path_info, id=data_id, t=html_escape(str(k))
+ )
+ )
else:
- body += '<p> <a href="/ro/{url}/{id}">{id}</a>: {t} </p>'.format(url=request.path_info, id=data_id,
- t=html_escape(str(k)))
+ body += '<p> <a href="/ro/{url}/{id}">{id}</a>: {t} </p>'.format(
+ url=request.path_info, id=data_id, t=html_escape(str(k))
+ )
elif isinstance(data, dict):
if "Location" in response.headers:
body += '<a href="{}"> show </a>'.format(response.headers["Location"])
else:
- body += '<a href="/ro/{}?METHOD=DELETE"> <img src="/ro/static/delete.png" height="25" width="25"> </a>'\
- .format(request.path_info[:request.path_info.rfind("/")])
- if request.path_info.startswith("/nslcm/v1/ns_instances_content/") or \
- request.path_info.startswith("/nslcm/v1/ns_instances/"):
- _id = request.path_info[request.path_info.rfind("/")+1:]
+ body += (
+ '<a href="/ro/{}?METHOD=DELETE"> <img src="/ro/static/delete.png" height="25" width="25"> </a>'
+ ).format(request.path_info[: request.path_info.rfind("/")])
+
+ if request.path_info.startswith(
+ "/nslcm/v1/ns_instances_content/"
+ ) or request.path_info.startswith("/nslcm/v1/ns_instances/"):
+ _id = request.path_info[request.path_info.rfind("/") + 1 :]
body += html_nslcmop_body.format(id=_id)
- elif request.path_info.startswith("/nsilcm/v1/netslice_instances_content/") or \
- request.path_info.startswith("/nsilcm/v1/netslice_instances/"):
- _id = request.path_info[request.path_info.rfind("/")+1:]
+ elif request.path_info.startswith(
+ "/nsilcm/v1/netslice_instances_content/"
+ ) or request.path_info.startswith("/nsilcm/v1/netslice_instances/"):
+ _id = request.path_info[request.path_info.rfind("/") + 1 :]
body += html_nsilcmop_body.format(id=_id)
- body += "<pre>" + html_escape(yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False)) + \
- "</pre>"
+
+ body += (
+ "<pre>"
+ + html_escape(
+ yaml.safe_dump(
+ data, explicit_start=True, indent=4, default_flow_style=False
+ )
+ )
+ + "</pre>"
+ )
elif data is None:
if request.method == "DELETE" or "METHOD=DELETE" in request.query_string:
body += "<pre> deleted </pre>"
else:
body = html_escape(str(data))
+
user_text = " "
+
if toke_info:
if toke_info.get("username"):
user_text += "user: {}".format(toke_info.get("username"))
+
if toke_info.get("project_id"):
user_text += ", project: {}".format(toke_info.get("project_name"))
+
return html_start.format(user_text) + body + html_end
# yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False)
# tags=False,
# limitations under the License.
##
-import logging
# import yaml
+import logging
from traceback import format_exc as traceback_format_exc
from osm_ng_ro.ns_thread import NsWorker, NsWorkerException, deep_get
from osm_ng_ro.validation import validate_input, deploy_schema
-from osm_common import dbmongo, dbmemory, fslocal, fsmongo, msglocal, msgkafka, version as common_version
+from osm_common import (
+ dbmongo,
+ dbmemory,
+ fslocal,
+ fsmongo,
+ msglocal,
+ msgkafka,
+ version as common_version,
+)
from osm_common.dbbase import DbException
from osm_common.fsbase import FsException
from osm_common.msgbase import MsgException
from threading import Lock
from random import choice as random_choice
from time import time
-from jinja2 import Environment, TemplateError, TemplateNotFound, StrictUndefined, UndefinedError
+from jinja2 import (
+ Environment,
+ TemplateError,
+ TemplateNotFound,
+ StrictUndefined,
+ UndefinedError,
+)
from cryptography.hazmat.primitives import serialization as crypto_serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend as crypto_default_backend
class NsException(Exception):
-
def __init__(self, message, http_code=HTTPStatus.BAD_REQUEST):
self.http_code = http_code
super(Exception, self).__init__(message)
text_id_ = f.readline()
_, _, text_id = text_id_.rpartition("/")
text_id = text_id.replace("\n", "")[:12]
+
if text_id:
return text_id
except Exception:
pass
+
# Return a random id
return "".join(random_choice("0123456789abcdef") for _ in range(12))
def versiontuple(v):
"""utility for compare dot separate versions. Fills with zeros to proper number comparison"""
filled = []
+
for point in v.split("."):
filled.append(point.zfill(8))
+
return tuple(filled)
class Ns(object):
-
def __init__(self):
self.db = None
self.fs = None
self.config = config
self.config["process_id"] = get_process_id() # used for HA identity
self.logger = logging.getLogger("ro.ns")
+
# check right version of common
if versiontuple(common_version) < versiontuple(min_common_version):
- raise NsException("Not compatible osm/common version '{}'. Needed '{}' or higher".format(
- common_version, min_common_version))
+ raise NsException(
+ "Not compatible osm/common version '{}'. Needed '{}' or higher".format(
+ common_version, min_common_version
+ )
+ )
try:
if not self.db:
self.db = dbmemory.DbMemory()
self.db.db_connect(config["database"])
else:
- raise NsException("Invalid configuration param '{}' at '[database]':'driver'".format(
- config["database"]["driver"]))
+ raise NsException(
+ "Invalid configuration param '{}' at '[database]':'driver'".format(
+ config["database"]["driver"]
+ )
+ )
+
if not self.fs:
if config["storage"]["driver"] == "local":
self.fs = fslocal.FsLocal()
elif config["storage"]["driver"] is None:
pass
else:
- raise NsException("Invalid configuration param '{}' at '[storage]':'driver'".format(
- config["storage"]["driver"]))
+ raise NsException(
+ "Invalid configuration param '{}' at '[storage]':'driver'".format(
+ config["storage"]["driver"]
+ )
+ )
+
if not self.msg:
if config["message"]["driver"] == "local":
self.msg = msglocal.MsgLocal()
self.msg = msgkafka.MsgKafka()
self.msg.connect(config["message"])
else:
- raise NsException("Invalid configuration param '{}' at '[message]':'driver'".format(
- config["message"]["driver"]))
+ raise NsException(
+ "Invalid configuration param '{}' at '[message]':'driver'".format(
+ config["message"]["driver"]
+ )
+ )
# TODO load workers to deal with exising database tasks
self.write_lock = Lock()
except (DbException, FsException, MsgException) as e:
raise NsException(str(e), http_code=e.http_code)
-
+
def get_assigned_vims(self):
return list(self.vims_assigned.keys())
try:
if self.db:
self.db.db_disconnect()
+
if self.fs:
self.fs.fs_disconnect()
+
if self.msg:
self.msg.disconnect()
+
self.write_lock = None
except (DbException, FsException, MsgException) as e:
raise NsException(str(e), http_code=e.http_code)
+
for worker in self.workers:
worker.insert_task(("terminate",))
return the index of the assigned worker thread. Worker threads are storead at self.workers
"""
# Look for a thread in idle status
- worker_id = next((i for i in range(len(self.workers)) if self.workers[i] and self.workers[i].idle), None)
+ worker_id = next(
+ (
+ i
+ for i in range(len(self.workers))
+ if self.workers[i] and self.workers[i].idle
+ ),
+ None,
+ )
+
if worker_id is not None:
# unset idle status to avoid race conditions
self.workers[worker_id].idle = False
else:
worker_id = len(self.workers)
+
if worker_id < self.config["global"]["server.ns_threads"]:
# create a new worker
- self.workers.append(NsWorker(worker_id, self.config, self.plugins, self.db))
+ self.workers.append(
+ NsWorker(worker_id, self.config, self.plugins, self.db)
+ )
self.workers[worker_id].start()
else:
# reached maximum number of threads, assign VIM to an existing one
worker_id = self.next_worker
- self.next_worker = (self.next_worker + 1) % self.config["global"]["server.ns_threads"]
+ self.next_worker = (self.next_worker + 1) % self.config["global"][
+ "server.ns_threads"
+ ]
+
return worker_id
def assign_vim(self, target_id):
def unload_unused_vims(self):
with self.write_lock:
vims_to_unload = []
+
for target_id in self.vims_assigned:
- if not self.db.get_one("ro_tasks",
- q_filter={"target_id": target_id,
- "tasks.status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED']},
- fail_on_empty=False):
+ if not self.db.get_one(
+ "ro_tasks",
+ q_filter={
+ "target_id": target_id,
+ "tasks.status": ["SCHEDULED", "BUILD", "DONE", "FAILED"],
+ },
+ fail_on_empty=False,
+ ):
vims_to_unload.append(target_id)
+
for target_id in vims_to_unload:
self._unload_vim(target_id)
vnfd_id, _, other = where.partition(":")
_type, _, name = other.partition(":")
vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
+
if _type == "file":
base_folder = vnfd["_admin"]["storage"]
- cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"], name)
+ cloud_init_file = "{}/{}/cloud_init/{}".format(
+ base_folder["folder"], base_folder["pkg-dir"], name
+ )
+
if not self.fs:
- raise NsException("Cannot read file '{}'. Filesystem not loaded, change configuration at storage.driver"
- .format(cloud_init_file))
+ raise NsException(
+ "Cannot read file '{}'. Filesystem not loaded, change configuration at storage.driver".format(
+ cloud_init_file
+ )
+ )
+
with self.fs.file_open(cloud_init_file, "r") as ci_file:
cloud_init_content = ci_file.read()
elif _type == "vdu":
cloud_init_content = vnfd["vdu"][int(name)]["cloud-init"]
else:
raise NsException("Mismatch descriptor for cloud init: {}".format(where))
+
return cloud_init_content
def _parse_jinja2(self, cloud_init_content, params, context):
-
try:
env = Environment(undefined=StrictUndefined)
template = env.from_string(cloud_init_content)
+
return template.render(params or {})
except UndefinedError as e:
raise NsException(
"Variable '{}' defined at vnfd='{}' must be provided in the instantiation parameters"
- "inside the 'additionalParamsForVnf' block".format(e, context))
+ "inside the 'additionalParamsForVnf' block".format(e, context)
+ )
except (TemplateError, TemplateNotFound) as e:
- raise NsException("Error parsing Jinja2 to cloud-init content at vnfd='{}': {}".format(context, e))
+ raise NsException(
+ "Error parsing Jinja2 to cloud-init content at vnfd='{}': {}".format(
+ context, e
+ )
+ )
def _create_db_ro_nsrs(self, nsr_id, now):
try:
key = rsa.generate_private_key(
- backend=crypto_default_backend(),
- public_exponent=65537,
- key_size=2048
+ backend=crypto_default_backend(), public_exponent=65537, key_size=2048
)
private_key = key.private_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PrivateFormat.PKCS8,
- crypto_serialization.NoEncryption())
+ crypto_serialization.NoEncryption(),
+ )
public_key = key.public_key().public_bytes(
crypto_serialization.Encoding.OpenSSH,
- crypto_serialization.PublicFormat.OpenSSH
+ crypto_serialization.PublicFormat.OpenSSH,
)
- private_key = private_key.decode('utf8')
+ private_key = private_key.decode("utf8")
# Change first line because Paramiko needs a explicit start with 'BEGIN RSA PRIVATE KEY'
i = private_key.find("\n")
private_key = "-----BEGIN RSA PRIVATE KEY-----" + private_key[i:]
- public_key = public_key.decode('utf8')
+ public_key = public_key.decode("utf8")
except Exception as e:
raise NsException("Cannot create ssh-keys: {}".format(e))
schema_version = "1.1"
- private_key_encrypted = self.db.encrypt(private_key, schema_version=schema_version, salt=nsr_id)
+ private_key_encrypted = self.db.encrypt(
+ private_key, schema_version=schema_version, salt=nsr_id
+ )
db_content = {
"_id": nsr_id,
"_admin": {
"created": now,
"modified": now,
- "schema_version": schema_version
+ "schema_version": schema_version,
},
"public_key": public_key,
"private_key": private_key_encrypted,
- "actions": []
+ "actions": [],
}
self.db.create("ro_nsrs", db_content)
+
return db_content
def deploy(self, session, indata, version, nsr_id, *args, **kwargs):
action_id = indata.get("action_id", str(uuid4()))
task_index = 0
# get current deployment
- db_nsr_update = {} # update operation on nsrs
+ db_nsr_update = {} # update operation on nsrs
db_vnfrs_update = {}
- db_vnfrs = {} # vnf's info indexed by _id
+ db_vnfrs = {} # vnf's info indexed by _id
nb_ro_tasks = 0 # for logging
vdu2cloud_init = indata.get("cloud_init_content") or {}
- step = ''
+ step = ""
logging_text = "Task deploy nsr_id={} action_id={} ".format(nsr_id, action_id)
self.logger.debug(logging_text + "Enter")
+
try:
step = "Getting ns and vnfr record from db"
db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
# read from db: vnf's of this ns
step = "Getting vnfrs from db"
db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
+
if not db_vnfrs_list:
raise NsException("Cannot obtain associated VNF for ns")
+
for vnfr in db_vnfrs_list:
db_vnfrs[vnfr["_id"]] = vnfr
db_vnfrs_update[vnfr["_id"]] = {}
+
now = time()
db_ro_nsr = self.db.get_one("ro_nsrs", {"_id": nsr_id}, fail_on_empty=False)
+
if not db_ro_nsr:
db_ro_nsr = self._create_db_ro_nsrs(nsr_id, now)
+
ro_nsr_public_key = db_ro_nsr["public_key"]
# check that action_id is not in the list of actions. Suffixed with :index
if action_id in db_ro_nsr["actions"]:
index = 1
+
while True:
new_action_id = "{}:{}".format(action_id, index)
+
if new_action_id not in db_ro_nsr["actions"]:
action_id = new_action_id
- self.logger.debug(logging_text + "Changing action_id in use to {}".format(action_id))
+ self.logger.debug(
+ logging_text
+ + "Changing action_id in use to {}".format(action_id)
+ )
break
+
index += 1
- def _create_task(target_id, item, action, target_record, target_record_id, extra_dict=None):
+ def _create_task(
+ target_id,
+ item,
+ action,
+ target_record,
+ target_record_id,
+ extra_dict=None,
+ ):
nonlocal task_index
nonlocal action_id
nonlocal nsr_id
"target_record": target_record,
"target_record_id": target_record_id,
}
+
if extra_dict:
- task.update(extra_dict) # params, find_params, depends_on
+ task.update(extra_dict) # params, find_params, depends_on
+
task_index += 1
+
return task
def _create_ro_task(target_id, task):
"to_check_at": now,
"tasks": [task],
}
+
return db_ro_task
def _process_image_params(target_image, vim_info, target_record_id):
find_params = {}
+
if target_image.get("image"):
find_params["filter_dict"] = {"name": target_image.get("image")}
+
if target_image.get("vim_image_id"):
- find_params["filter_dict"] = {"id": target_image.get("vim_image_id")}
+ find_params["filter_dict"] = {
+ "id": target_image.get("vim_image_id")
+ }
+
if target_image.get("image_checksum"):
- find_params["filter_dict"] = {"checksum": target_image.get("image_checksum")}
+ find_params["filter_dict"] = {
+ "checksum": target_image.get("image_checksum")
+ }
+
return {"find_params": find_params}
def _process_flavor_params(target_flavor, vim_info, target_record_id):
-
def _get_resource_allocation_params(quota_descriptor):
"""
read the quota_descriptor from vnfd and fetch the resource allocation properties from the
:return: quota params for limit, reserve, shares from the descriptor object
"""
quota = {}
+
if quota_descriptor.get("limit"):
quota["limit"] = int(quota_descriptor["limit"])
+
if quota_descriptor.get("reserve"):
quota["reserve"] = int(quota_descriptor["reserve"])
+
if quota_descriptor.get("shares"):
quota["shares"] = int(quota_descriptor["shares"])
+
return quota
flavor_data = {
}
numa = {}
extended = {}
+
if target_flavor.get("guest-epa"):
extended = {}
epa_vcpu_set = False
+
if target_flavor["guest-epa"].get("numa-node-policy"):
- numa_node_policy = target_flavor["guest-epa"].get("numa-node-policy")
+ numa_node_policy = target_flavor["guest-epa"].get(
+ "numa-node-policy"
+ )
+
if numa_node_policy.get("node"):
numa_node = numa_node_policy["node"][0]
+
if numa_node.get("num-cores"):
numa["cores"] = numa_node["num-cores"]
epa_vcpu_set = True
+
if numa_node.get("paired-threads"):
- if numa_node["paired-threads"].get("num-paired-threads"):
- numa["paired-threads"] = int(numa_node["paired-threads"]["num-paired-threads"])
+ if numa_node["paired-threads"].get(
+ "num-paired-threads"
+ ):
+ numa["paired-threads"] = int(
+ numa_node["paired-threads"][
+ "num-paired-threads"
+ ]
+ )
epa_vcpu_set = True
- if len(numa_node["paired-threads"].get("paired-thread-ids")):
+
+ if len(
+ numa_node["paired-threads"].get("paired-thread-ids")
+ ):
numa["paired-threads-id"] = []
- for pair in numa_node["paired-threads"]["paired-thread-ids"]:
+
+ for pair in numa_node["paired-threads"][
+ "paired-thread-ids"
+ ]:
numa["paired-threads-id"].append(
- (str(pair["thread-a"]), str(pair["thread-b"]))
+ (
+ str(pair["thread-a"]),
+ str(pair["thread-b"]),
+ )
)
+
if numa_node.get("num-threads"):
numa["threads"] = int(numa_node["num-threads"])
epa_vcpu_set = True
+
if numa_node.get("memory-mb"):
- numa["memory"] = max(int(numa_node["memory-mb"] / 1024), 1)
+ numa["memory"] = max(
+ int(numa_node["memory-mb"] / 1024), 1
+ )
+
if target_flavor["guest-epa"].get("mempage-size"):
- extended["mempage-size"] = target_flavor["guest-epa"].get("mempage-size")
- if target_flavor["guest-epa"].get("cpu-pinning-policy") and not epa_vcpu_set:
- if target_flavor["guest-epa"]["cpu-pinning-policy"] == "DEDICATED":
- if target_flavor["guest-epa"].get("cpu-thread-pinning-policy") and \
- target_flavor["guest-epa"]["cpu-thread-pinning-policy"] != "PREFER":
+ extended["mempage-size"] = target_flavor["guest-epa"].get(
+ "mempage-size"
+ )
+
+ if (
+ target_flavor["guest-epa"].get("cpu-pinning-policy")
+ and not epa_vcpu_set
+ ):
+ if (
+ target_flavor["guest-epa"]["cpu-pinning-policy"]
+ == "DEDICATED"
+ ):
+ if (
+ target_flavor["guest-epa"].get(
+ "cpu-thread-pinning-policy"
+ )
+ and target_flavor["guest-epa"][
+ "cpu-thread-pinning-policy"
+ ]
+ != "PREFER"
+ ):
numa["cores"] = max(flavor_data["vcpus"], 1)
else:
numa["threads"] = max(flavor_data["vcpus"], 1)
+
epa_vcpu_set = True
+
if target_flavor["guest-epa"].get("cpu-quota") and not epa_vcpu_set:
- cpuquota = _get_resource_allocation_params(target_flavor["guest-epa"].get("cpu-quota"))
+ cpuquota = _get_resource_allocation_params(
+ target_flavor["guest-epa"].get("cpu-quota")
+ )
+
if cpuquota:
extended["cpu-quota"] = cpuquota
+
if target_flavor["guest-epa"].get("mem-quota"):
- vduquota = _get_resource_allocation_params(target_flavor["guest-epa"].get("mem-quota"))
+ vduquota = _get_resource_allocation_params(
+ target_flavor["guest-epa"].get("mem-quota")
+ )
+
if vduquota:
extended["mem-quota"] = vduquota
+
if target_flavor["guest-epa"].get("disk-io-quota"):
- diskioquota = _get_resource_allocation_params(target_flavor["guest-epa"].get("disk-io-quota"))
+ diskioquota = _get_resource_allocation_params(
+ target_flavor["guest-epa"].get("disk-io-quota")
+ )
+
if diskioquota:
extended["disk-io-quota"] = diskioquota
+
if target_flavor["guest-epa"].get("vif-quota"):
- vifquota = _get_resource_allocation_params(target_flavor["guest-epa"].get("vif-quota"))
+ vifquota = _get_resource_allocation_params(
+ target_flavor["guest-epa"].get("vif-quota")
+ )
+
if vifquota:
extended["vif-quota"] = vifquota
+
if numa:
extended["numas"] = [numa]
+
if extended:
flavor_data["extended"] = extended
flavor_data_name = flavor_data.copy()
flavor_data_name["name"] = target_flavor["name"]
extra_dict["params"] = {"flavor_data": flavor_data_name}
+
return extra_dict
def _ip_profile_2_ro(ip_profile):
if not ip_profile:
return None
+
ro_ip_profile = {
- "ip_version": "IPv4" if "v4" in ip_profile.get("ip-version", "ipv4") else "IPv6",
+ "ip_version": "IPv4"
+ if "v4" in ip_profile.get("ip-version", "ipv4")
+ else "IPv6",
"subnet_address": ip_profile.get("subnet-address"),
"gateway_address": ip_profile.get("gateway-address"),
"dhcp_enabled": ip_profile["dhcp-params"].get("enabled", True)
- if "dhcp_params" in ip_profile else False,
+ if "dhcp_params" in ip_profile
+ else False,
"dhcp_start_address": ip_profile["dhcp-params"].get("start-address")
- if "dhcp_params" in ip_profile else None,
- "dhcp_count": ip_profile["dhcp-params"].get("count") if "dhcp_params" in ip_profile else None,
+ if "dhcp_params" in ip_profile
+ else None,
+ "dhcp_count": ip_profile["dhcp-params"].get("count")
+ if "dhcp_params" in ip_profile
+ else None,
}
+
if ip_profile.get("dns-server"):
- ro_ip_profile["dns_address"] = ";".join([v["address"] for v in ip_profile["dns-server"]])
- if ip_profile.get('security-group'):
- ro_ip_profile["security_group"] = ip_profile['security-group']
+ ro_ip_profile["dns_address"] = ";".join(
+ [v["address"] for v in ip_profile["dns-server"]]
+ )
+
+ if ip_profile.get("security-group"):
+ ro_ip_profile["security_group"] = ip_profile["security-group"]
+
return ro_ip_profile
def _process_net_params(target_vld, vim_info, target_record_id):
if vim_info.get("sdn"):
# vnf_preffix = "vnfrs:{}".format(vnfr_id)
# ns_preffix = "nsrs:{}".format(nsr_id)
- vld_target_record_id, _, _ = target_record_id.rpartition(".") # remove the ending ".sdn
- extra_dict["params"] = {k: vim_info[k] for k in ("sdn-ports", "target_vim", "vlds", "type")
- if vim_info.get(k)}
+ # remove the ending ".sdn
+ vld_target_record_id, _, _ = target_record_id.rpartition(".")
+ extra_dict["params"] = {
+ k: vim_info[k]
+ for k in ("sdn-ports", "target_vim", "vlds", "type")
+ if vim_info.get(k)
+ }
+
# TODO needed to add target_id in the dependency.
if vim_info.get("target_vim"):
- extra_dict["depends_on"] = [vim_info.get("target_vim") + " " + vld_target_record_id]
+ extra_dict["depends_on"] = [
+ vim_info.get("target_vim") + " " + vld_target_record_id
+ ]
+
return extra_dict
if vim_info.get("vim_network_name"):
- extra_dict["find_params"] = {"filter_dict": {"name": vim_info.get("vim_network_name")}}
+ extra_dict["find_params"] = {
+ "filter_dict": {"name": vim_info.get("vim_network_name")}
+ }
elif vim_info.get("vim_network_id"):
- extra_dict["find_params"] = {"filter_dict": {"id": vim_info.get("vim_network_id")}}
+ extra_dict["find_params"] = {
+ "filter_dict": {"id": vim_info.get("vim_network_id")}
+ }
elif target_vld.get("mgmt-network"):
extra_dict["find_params"] = {"mgmt": True, "name": target_vld["id"]}
else:
# create
extra_dict["params"] = {
- "net_name": "{}-{}".format(indata["name"][:16], target_vld.get("name", target_vld["id"])[:16]),
- "ip_profile": _ip_profile_2_ro(vim_info.get('ip_profile')),
- "provider_network_profile": vim_info.get('provider_network'),
+ "net_name": "{}-{}".format(
+ indata["name"][:16],
+ target_vld.get("name", target_vld["id"])[:16],
+ ),
+ "ip_profile": _ip_profile_2_ro(vim_info.get("ip_profile")),
+ "provider_network_profile": vim_info.get("provider_network"),
}
+
if not target_vld.get("underlay"):
extra_dict["params"]["net_type"] = "bridge"
else:
- extra_dict["params"]["net_type"] = "ptp" if target_vld.get("type") == "ELINE" else "data"
+ extra_dict["params"]["net_type"] = (
+ "ptp" if target_vld.get("type") == "ELINE" else "data"
+ )
+
return extra_dict
def _process_vdu_params(target_vdu, vim_info, target_record_id):
nonlocal vnfr
nonlocal vdu2cloud_init
nonlocal tasks_by_target_record_id
+
vnf_preffix = "vnfrs:{}".format(vnfr_id)
ns_preffix = "nsrs:{}".format(nsr_id)
image_text = ns_preffix + ":image." + target_vdu["ns-image-id"]
flavor_text = ns_preffix + ":flavor." + target_vdu["ns-flavor-id"]
extra_dict = {"depends_on": [image_text, flavor_text]}
net_list = []
+
for iface_index, interface in enumerate(target_vdu["interfaces"]):
if interface.get("ns-vld-id"):
net_text = ns_preffix + ":vld." + interface["ns-vld-id"]
elif interface.get("vnf-vld-id"):
net_text = vnf_preffix + ":vld." + interface["vnf-vld-id"]
else:
- self.logger.error("Interface {} from vdu {} not connected to any vld".format(
- iface_index, target_vdu["vdu-name"]))
- continue # interface not connected to any vld
+ self.logger.error(
+ "Interface {} from vdu {} not connected to any vld".format(
+ iface_index, target_vdu["vdu-name"]
+ )
+ )
+
+ continue # interface not connected to any vld
+
extra_dict["depends_on"].append(net_text)
- net_item = {x: v for x, v in interface.items() if x in
- ("name", "vpci", "port_security", "port_security_disable_strategy", "floating_ip")}
+ net_item = {
+ x: v
+ for x, v in interface.items()
+ if x
+ in (
+ "name",
+ "vpci",
+ "port_security",
+ "port_security_disable_strategy",
+ "floating_ip",
+ )
+ }
net_item["net_id"] = "TASK-" + net_text
net_item["type"] = "virtual"
+
# TODO mac_address: used for SR-IOV ifaces #TODO for other types
# TODO floating_ip: True/False (or it can be None)
if interface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH"):
# mark the net create task as type data
- if deep_get(tasks_by_target_record_id, net_text, "params", "net_type"):
- tasks_by_target_record_id[net_text]["params"]["net_type"] = "data"
+ if deep_get(
+ tasks_by_target_record_id, net_text, "params", "net_type"
+ ):
+ tasks_by_target_record_id[net_text]["params"][
+ "net_type"
+ ] = "data"
+
net_item["use"] = "data"
net_item["model"] = interface["type"]
net_item["type"] = interface["type"]
- elif interface.get("type") == "OM-MGMT" or interface.get("mgmt-interface") or \
- interface.get("mgmt-vnf"):
+ elif (
+ interface.get("type") == "OM-MGMT"
+ or interface.get("mgmt-interface")
+ or interface.get("mgmt-vnf")
+ ):
net_item["use"] = "mgmt"
- else: # if interface.get("type") in ("VIRTIO", "E1000", "PARAVIRT"):
+ else:
+ # if interface.get("type") in ("VIRTIO", "E1000", "PARAVIRT"):
net_item["use"] = "bridge"
net_item["model"] = interface.get("type")
+
if interface.get("ip-address"):
net_item["ip_address"] = interface["ip-address"]
+
if interface.get("mac-address"):
net_item["mac_address"] = interface["mac-address"]
+
net_list.append(net_item)
+
if interface.get("mgmt-vnf"):
extra_dict["mgmt_vnf_interface"] = iface_index
elif interface.get("mgmt-interface"):
extra_dict["mgmt_vdu_interface"] = iface_index
+
# cloud config
cloud_config = {}
+
if target_vdu.get("cloud-init"):
if target_vdu["cloud-init"] not in vdu2cloud_init:
- vdu2cloud_init[target_vdu["cloud-init"]] = self._get_cloud_init(target_vdu["cloud-init"])
+ vdu2cloud_init[target_vdu["cloud-init"]] = self._get_cloud_init(
+ target_vdu["cloud-init"]
+ )
+
cloud_content_ = vdu2cloud_init[target_vdu["cloud-init"]]
- cloud_config["user-data"] = self._parse_jinja2(cloud_content_, target_vdu.get("additionalParams"),
- target_vdu["cloud-init"])
+ cloud_config["user-data"] = self._parse_jinja2(
+ cloud_content_,
+ target_vdu.get("additionalParams"),
+ target_vdu["cloud-init"],
+ )
+
if target_vdu.get("boot-data-drive"):
cloud_config["boot-data-drive"] = target_vdu.get("boot-data-drive")
+
ssh_keys = []
+
if target_vdu.get("ssh-keys"):
ssh_keys += target_vdu.get("ssh-keys")
+
if target_vdu.get("ssh-access-required"):
ssh_keys.append(ro_nsr_public_key)
+
if ssh_keys:
cloud_config["key-pairs"] = ssh_keys
extra_dict["params"] = {
- "name": "{}-{}-{}-{}".format(indata["name"][:16], vnfr["member-vnf-index-ref"][:16],
- target_vdu["vdu-name"][:32], target_vdu.get("count-index") or 0),
+ "name": "{}-{}-{}-{}".format(
+ indata["name"][:16],
+ vnfr["member-vnf-index-ref"][:16],
+ target_vdu["vdu-name"][:32],
+ target_vdu.get("count-index") or 0,
+ ),
"description": target_vdu["vdu-name"],
"start": True,
"image_id": "TASK-" + image_text,
"availability_zone_index": None, # TODO
"availability_zone_list": None, # TODO
}
+
return extra_dict
- def _process_items(target_list, existing_list, db_record, db_update, db_path, item, process_params):
+ def _process_items(
+ target_list,
+ existing_list,
+ db_record,
+ db_update,
+ db_path,
+ item,
+ process_params,
+ ):
nonlocal db_new_tasks
nonlocal tasks_by_target_record_id
nonlocal task_index
# step 1 items (networks,vdus,...) to be deleted/updated
for item_index, existing_item in enumerate(existing_list):
- target_item = next((t for t in target_list if t["id"] == existing_item["id"]), None)
- for target_vim, existing_viminfo in existing_item.get("vim_info", {}).items():
+ target_item = next(
+ (t for t in target_list if t["id"] == existing_item["id"]), None
+ )
+
+ for target_vim, existing_viminfo in existing_item.get(
+ "vim_info", {}
+ ).items():
if existing_viminfo is None:
continue
+
if target_item:
- target_viminfo = target_item.get("vim_info", {}).get(target_vim)
+ target_viminfo = target_item.get("vim_info", {}).get(
+ target_vim
+ )
else:
target_viminfo = None
+
if target_viminfo is None:
# must be deleted
self._assign_vim(target_vim)
- target_record_id = "{}.{}".format(db_record, existing_item["id"])
+ target_record_id = "{}.{}".format(
+ db_record, existing_item["id"]
+ )
item_ = item
+
if target_vim.startswith("sdn"):
# item must be sdn-net instead of net if target_vim is a sdn
item_ = "sdn_net"
target_record_id += ".sdn"
+
task = _create_task(
- target_vim, item_, "DELETE",
- target_record="{}.{}.vim_info.{}".format(db_record, item_index, target_vim),
- target_record_id=target_record_id)
+ target_vim,
+ item_,
+ "DELETE",
+ target_record="{}.{}.vim_info.{}".format(
+ db_record, item_index, target_vim
+ ),
+ target_record_id=target_record_id,
+ )
tasks_by_target_record_id[target_record_id] = task
db_new_tasks.append(task)
# TODO delete
# step 2 items (networks,vdus,...) to be created
for target_item in target_list:
item_index = -1
+
for item_index, existing_item in enumerate(existing_list):
if existing_item["id"] == target_item["id"]:
break
existing_list.append(target_item)
existing_item = None
- for target_vim, target_viminfo in target_item.get("vim_info", {}).items():
+ for target_vim, target_viminfo in target_item.get(
+ "vim_info", {}
+ ).items():
existing_viminfo = None
+
if existing_item:
- existing_viminfo = existing_item.get("vim_info", {}).get(target_vim)
+ existing_viminfo = existing_item.get("vim_info", {}).get(
+ target_vim
+ )
+
# TODO check if different. Delete and create???
# TODO delete if not exist
if existing_viminfo is not None:
target_record_id = "{}.{}".format(db_record, target_item["id"])
item_ = item
+
if target_vim.startswith("sdn"):
# item must be sdn-net instead of net if target_vim is a sdn
item_ = "sdn_net"
target_record_id += ".sdn"
- extra_dict = process_params(target_item, target_viminfo, target_record_id)
+ extra_dict = process_params(
+ target_item, target_viminfo, target_record_id
+ )
self._assign_vim(target_vim)
task = _create_task(
- target_vim, item_, "CREATE",
- target_record="{}.{}.vim_info.{}".format(db_record, item_index, target_vim),
+ target_vim,
+ item_,
+ "CREATE",
+ target_record="{}.{}.vim_info.{}".format(
+ db_record, item_index, target_vim
+ ),
target_record_id=target_record_id,
- extra_dict=extra_dict)
+ extra_dict=extra_dict,
+ )
tasks_by_target_record_id[target_record_id] = task
db_new_tasks.append(task)
+
if target_item.get("common_id"):
task["common_id"] = target_item["common_id"]
key = indata["action"].get("key")
user = indata["action"].get("user")
password = indata["action"].get("password")
+
for vnf in indata.get("vnf", ()):
if vnf["_id"] not in db_vnfrs:
raise NsException("Invalid vnf={}".format(vnf["_id"]))
+
db_vnfr = db_vnfrs[vnf["_id"]]
+
for target_vdu in vnf.get("vdur", ()):
- vdu_index, vdur = next((i_v for i_v in enumerate(db_vnfr["vdur"]) if
- i_v[1]["id"] == target_vdu["id"]), (None, None))
+ vdu_index, vdur = next(
+ (
+ i_v
+ for i_v in enumerate(db_vnfr["vdur"])
+ if i_v[1]["id"] == target_vdu["id"]
+ ),
+ (None, None),
+ )
+
if not vdur:
- raise NsException("Invalid vdu vnf={}.{}".format(vnf["_id"], target_vdu["id"]))
- target_vim, vim_info = next(k_v for k_v in vdur["vim_info"].items())
+ raise NsException(
+ "Invalid vdu vnf={}.{}".format(
+ vnf["_id"], target_vdu["id"]
+ )
+ )
+
+ target_vim, vim_info = next(
+ k_v for k_v in vdur["vim_info"].items()
+ )
self._assign_vim(target_vim)
- target_record = "vnfrs:{}:vdur.{}.ssh_keys".format(vnf["_id"], vdu_index)
+ target_record = "vnfrs:{}:vdur.{}.ssh_keys".format(
+ vnf["_id"], vdu_index
+ )
extra_dict = {
- "depends_on": ["vnfrs:{}:vdur.{}".format(vnf["_id"], vdur["id"])],
+ "depends_on": [
+ "vnfrs:{}:vdur.{}".format(vnf["_id"], vdur["id"])
+ ],
"params": {
"ip_address": vdur.get("ip-address"),
"user": user,
"password": password,
"private_key": db_ro_nsr["private_key"],
"salt": db_ro_nsr["_id"],
- "schema_version": db_ro_nsr["_admin"]["schema_version"]
- }
+ "schema_version": db_ro_nsr["_admin"][
+ "schema_version"
+ ],
+ },
}
- task = _create_task(target_vim, "vdu", "EXEC",
- target_record=target_record,
- target_record_id=None,
- extra_dict=extra_dict)
+ task = _create_task(
+ target_vim,
+ "vdu",
+ "EXEC",
+ target_record=target_record,
+ target_record_id=None,
+ extra_dict=extra_dict,
+ )
db_new_tasks.append(task)
with self.write_lock:
# compute network differences
# NS.vld
step = "process NS VLDs"
- _process_items(target_list=indata["ns"]["vld"] or [], existing_list=db_nsr.get("vld") or [],
- db_record="nsrs:{}:vld".format(nsr_id), db_update=db_nsr_update,
- db_path="vld", item="net", process_params=_process_net_params)
+ _process_items(
+ target_list=indata["ns"]["vld"] or [],
+ existing_list=db_nsr.get("vld") or [],
+ db_record="nsrs:{}:vld".format(nsr_id),
+ db_update=db_nsr_update,
+ db_path="vld",
+ item="net",
+ process_params=_process_net_params,
+ )
step = "process NS images"
- _process_items(target_list=indata.get("image") or [], existing_list=db_nsr.get("image") or [],
- db_record="nsrs:{}:image".format(nsr_id),
- db_update=db_nsr_update, db_path="image", item="image",
- process_params=_process_image_params)
+ _process_items(
+ target_list=indata.get("image") or [],
+ existing_list=db_nsr.get("image") or [],
+ db_record="nsrs:{}:image".format(nsr_id),
+ db_update=db_nsr_update,
+ db_path="image",
+ item="image",
+ process_params=_process_image_params,
+ )
step = "process NS flavors"
- _process_items(target_list=indata.get("flavor") or [], existing_list=db_nsr.get("flavor") or [],
- db_record="nsrs:{}:flavor".format(nsr_id),
- db_update=db_nsr_update, db_path="flavor", item="flavor",
- process_params=_process_flavor_params)
+ _process_items(
+ target_list=indata.get("flavor") or [],
+ existing_list=db_nsr.get("flavor") or [],
+ db_record="nsrs:{}:flavor".format(nsr_id),
+ db_update=db_nsr_update,
+ db_path="flavor",
+ item="flavor",
+ process_params=_process_flavor_params,
+ )
# VNF.vld
for vnfr_id, vnfr in db_vnfrs.items():
# vnfr_id need to be set as global variable for among others nested method _process_vdu_params
step = "process VNF={} VLDs".format(vnfr_id)
- target_vnf = next((vnf for vnf in indata.get("vnf", ()) if vnf["_id"] == vnfr_id), None)
+ target_vnf = next(
+ (
+ vnf
+ for vnf in indata.get("vnf", ())
+ if vnf["_id"] == vnfr_id
+ ),
+ None,
+ )
target_list = target_vnf.get("vld") if target_vnf else None
- _process_items(target_list=target_list or [], existing_list=vnfr.get("vld") or [],
- db_record="vnfrs:{}:vld".format(vnfr_id), db_update=db_vnfrs_update[vnfr["_id"]],
- db_path="vld", item="net", process_params=_process_net_params)
+ _process_items(
+ target_list=target_list or [],
+ existing_list=vnfr.get("vld") or [],
+ db_record="vnfrs:{}:vld".format(vnfr_id),
+ db_update=db_vnfrs_update[vnfr["_id"]],
+ db_path="vld",
+ item="net",
+ process_params=_process_net_params,
+ )
target_list = target_vnf.get("vdur") if target_vnf else None
step = "process VNF={} VDUs".format(vnfr_id)
- _process_items(target_list=target_list or [], existing_list=vnfr.get("vdur") or [],
- db_record="vnfrs:{}:vdur".format(vnfr_id),
- db_update=db_vnfrs_update[vnfr["_id"]], db_path="vdur", item="vdu",
- process_params=_process_vdu_params)
+ _process_items(
+ target_list=target_list or [],
+ existing_list=vnfr.get("vdur") or [],
+ db_record="vnfrs:{}:vdur".format(vnfr_id),
+ db_update=db_vnfrs_update[vnfr["_id"]],
+ db_path="vdur",
+ item="vdu",
+ process_params=_process_vdu_params,
+ )
for db_task in db_new_tasks:
step = "Updating database, Appending tasks to ro_tasks"
target_id = db_task.pop("target_id")
common_id = db_task.get("common_id")
+
if common_id:
- if self.db.set_one("ro_tasks",
- q_filter={"target_id": target_id,
- "tasks.common_id": common_id},
- update_dict={"to_check_at": now, "modified_at": now},
- push={"tasks": db_task}, fail_on_empty=False):
+ if self.db.set_one(
+ "ro_tasks",
+ q_filter={
+ "target_id": target_id,
+ "tasks.common_id": common_id,
+ },
+ update_dict={"to_check_at": now, "modified_at": now},
+ push={"tasks": db_task},
+ fail_on_empty=False,
+ ):
continue
- if not self.db.set_one("ro_tasks",
- q_filter={"target_id": target_id,
- "tasks.target_record": db_task["target_record"]},
- update_dict={"to_check_at": now, "modified_at": now},
- push={"tasks": db_task}, fail_on_empty=False):
+
+ if not self.db.set_one(
+ "ro_tasks",
+ q_filter={
+ "target_id": target_id,
+ "tasks.target_record": db_task["target_record"],
+ },
+ update_dict={"to_check_at": now, "modified_at": now},
+ push={"tasks": db_task},
+ fail_on_empty=False,
+ ):
# Create a ro_task
step = "Updating database, Creating ro_tasks"
db_ro_task = _create_ro_task(target_id, db_task)
nb_ro_tasks += 1
self.db.create("ro_tasks", db_ro_task)
+
step = "Updating database, nsrs"
if db_nsr_update:
self.db.set_one("nsrs", {"_id": nsr_id}, db_nsr_update)
+
for vnfr_id, db_vnfr_update in db_vnfrs_update.items():
if db_vnfr_update:
step = "Updating database, vnfrs={}".format(vnfr_id)
self.db.set_one("vnfrs", {"_id": vnfr_id}, db_vnfr_update)
- self.logger.debug(logging_text + "Exit. Created {} ro_tasks; {} tasks".format(nb_ro_tasks,
- len(db_new_tasks)))
- return {"status": "ok", "nsr_id": nsr_id, "action_id": action_id}, action_id, True
+ self.logger.debug(
+ logging_text
+ + "Exit. Created {} ro_tasks; {} tasks".format(
+ nb_ro_tasks, len(db_new_tasks)
+ )
+ )
+ return (
+ {"status": "ok", "nsr_id": nsr_id, "action_id": action_id},
+ action_id,
+ True,
+ )
except Exception as e:
if isinstance(e, (DbException, NsException)):
- self.logger.error(logging_text + "Exit Exception while '{}': {}".format(step, e))
+ self.logger.error(
+ logging_text + "Exit Exception while '{}': {}".format(step, e)
+ )
else:
e = traceback_format_exc()
- self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(step, e), exc_info=True)
+ self.logger.critical(
+ logging_text + "Exit Exception while '{}': {}".format(step, e),
+ exc_info=True,
+ )
+
raise NsException(e)
def delete(self, session, indata, version, nsr_id, *args, **kwargs):
self.logger.debug("ns.delete version={} nsr_id={}".format(version, nsr_id))
# self.db.del_list({"_id": ro_task["_id"], "tasks.nsr_id.ne": nsr_id})
+
with self.write_lock:
try:
NsWorker.delete_db_tasks(self.db, nsr_id, None)
except NsWorkerException as e:
raise NsException(e)
+
return None, None, True
def status(self, session, indata, version, nsr_id, action_id, *args, **kwargs):
ro_tasks = self.db.get_list("ro_tasks", {"tasks.action_id": action_id})
global_status = "DONE"
details = []
+
for ro_task in ro_tasks:
for task in ro_task["tasks"]:
if task and task["action_id"] == action_id:
task_list.append(task)
total += 1
+
if task["status"] == "FAILED":
global_status = "FAILED"
- error_text = "Error at {} {}: {}".format(task["action"].lower(), task["item"],
- ro_task["vim_info"].get("vim_details") or "unknown")
+ error_text = "Error at {} {}: {}".format(
+ task["action"].lower(),
+ task["item"],
+ ro_task["vim_info"].get("vim_details") or "unknown",
+ )
details.append(error_text)
elif task["status"] in ("SCHEDULED", "BUILD"):
if global_status != "FAILED":
global_status = "BUILD"
else:
done += 1
+
return_data = {
"status": global_status,
- "details": ". ".join(details) if details else "progress {}/{}".format(done, total),
+ "details": ". ".join(details)
+ if details
+ else "progress {}/{}".format(done, total),
"nsr_id": nsr_id,
"action_id": action_id,
- "tasks": task_list
+ "tasks": task_list,
}
+
return return_data, None, True
def cancel(self, session, indata, version, nsr_id, action_id, *args, **kwargs):
- print("ns.cancel session={} indata={} version={} nsr_id={}, action_id={}".format(session, indata, version,
- nsr_id, action_id))
+ print(
+ "ns.cancel session={} indata={} version={} nsr_id={}, action_id={}".format(
+ session, indata, version, nsr_id, action_id
+ )
+ )
+
return None, None, True
def get_deploy(self, session, indata, version, nsr_id, action_id, *args, **kwargs):
nsrs = self.db.get_list("nsrs", {})
return_data = []
+
for ns in nsrs:
return_data.append({"_id": ns["_id"], "name": ns["name"]})
+
return return_data, None, True
def get_actions(self, session, indata, version, nsr_id, action_id, *args, **kwargs):
ro_tasks = self.db.get_list("ro_tasks", {"tasks.nsr_id": nsr_id})
return_data = []
+
for ro_task in ro_tasks:
for task in ro_task["tasks"]:
if task["action_id"] not in return_data:
return_data.append(task["action_id"])
+
return return_data, None, True
A ro_task can contain several 'tasks', each one with a target, where to store the results
"""
+import logging
+import queue
import threading
import time
-import queue
-import logging
import yaml
+from copy import deepcopy
+from http import HTTPStatus
+from os import mkdir
from pkg_resources import iter_entry_points
+from shutil import rmtree
+from unittest.mock import Mock
+
# from osm_common import dbmongo, dbmemory, fslocal, fsmongo, msglocal, msgkafka, version as common_version
from osm_common.dbbase import DbException
from osm_ro_plugin.vim_dummy import VimDummyConnector
from osm_ro_plugin.sdn_dummy import SdnDummyConnector
from osm_ro_plugin import vimconn, sdnconn
from osm_ng_ro.vim_admin import LockRenew
-from copy import deepcopy
-from unittest.mock import Mock
-from http import HTTPStatus
-from os import mkdir
-from shutil import rmtree
+
__author__ = "Alfonso Tierno"
__date__ = "$28-Sep-2017 12:07:15$"
class FailingConnector:
def __init__(self, error_msg):
self.error_msg = error_msg
+
for method in dir(vimconn.VimConnector):
if method[0] != "_":
- setattr(self, method, Mock(side_effect=vimconn.VimConnException(error_msg)))
+ setattr(
+ self, method, Mock(side_effect=vimconn.VimConnException(error_msg))
+ )
+
for method in dir(sdnconn.SdnConnectorBase):
if method[0] != "_":
- setattr(self, method, Mock(side_effect=sdnconn.SdnConnectorError(error_msg)))
+ setattr(
+ self, method, Mock(side_effect=sdnconn.SdnConnectorError(error_msg))
+ )
class NsWorkerExceptionNotFound(NsWorkerException):
class VimInteractionBase:
- """ Base class to call VIM/SDN for creating, deleting and refresh networks, VMs, flavors, ...
+ """Base class to call VIM/SDN for creating, deleting and refresh networks, VMs, flavors, ...
It implements methods that does nothing and return ok"""
+
def __init__(self, db, my_vims, db_vims, logger):
self.db = db
self.logger = logger
"""skip calling VIM to get image, flavor status. Assumes ok"""
if ro_task["vim_info"]["vim_status"] == "VIM_ERROR":
return "FAILED", {}
+
return "DONE", {}
def delete(self, ro_task, task_index):
class VimInteractionNet(VimInteractionBase):
-
def new(self, ro_task, task_index, task_depends):
vim_net_id = None
task = ro_task["tasks"][task_index]
created = False
created_items = {}
target_vim = self.my_vims[ro_task["target_id"]]
+
try:
# FIND
if task.get("find_params"):
# if management, get configuration of VIM
if task["find_params"].get("filter_dict"):
vim_filter = task["find_params"]["filter_dict"]
- elif task["find_params"].get("mgmt"): # mamagement network
- if deep_get(self.db_vims[ro_task["target_id"]], "config", "management_network_id"):
- vim_filter = {"id": self.db_vims[ro_task["target_id"]]["config"]["management_network_id"]}
- elif deep_get(self.db_vims[ro_task["target_id"]], "config", "management_network_name"):
- vim_filter = {"name": self.db_vims[ro_task["target_id"]]["config"]["management_network_name"]}
+ # mamagement network
+ elif task["find_params"].get("mgmt"):
+ if deep_get(
+ self.db_vims[ro_task["target_id"]],
+ "config",
+ "management_network_id",
+ ):
+ vim_filter = {
+ "id": self.db_vims[ro_task["target_id"]]["config"][
+ "management_network_id"
+ ]
+ }
+ elif deep_get(
+ self.db_vims[ro_task["target_id"]],
+ "config",
+ "management_network_name",
+ ):
+ vim_filter = {
+ "name": self.db_vims[ro_task["target_id"]]["config"][
+ "management_network_name"
+ ]
+ }
else:
vim_filter = {"name": task["find_params"]["name"]}
else:
- raise NsWorkerExceptionNotFound("Invalid find_params for new_net {}".format(task["find_params"]))
+ raise NsWorkerExceptionNotFound(
+ "Invalid find_params for new_net {}".format(task["find_params"])
+ )
vim_nets = target_vim.get_network_list(vim_filter)
if not vim_nets and not task.get("params"):
- raise NsWorkerExceptionNotFound("Network not found with this criteria: '{}'".format(
- task.get("find_params")))
+ raise NsWorkerExceptionNotFound(
+ "Network not found with this criteria: '{}'".format(
+ task.get("find_params")
+ )
+ )
elif len(vim_nets) > 1:
raise NsWorkerException(
- "More than one network found with this criteria: '{}'".format(task["find_params"]))
+ "More than one network found with this criteria: '{}'".format(
+ task["find_params"]
+ )
+ )
+
if vim_nets:
vim_net_id = vim_nets[0]["id"]
else:
vim_net_id, created_items = target_vim.new_network(**params)
created = True
- ro_vim_item_update = {"vim_id": vim_net_id,
- "vim_status": "BUILD",
- "created": created,
- "created_items": created_items,
- "vim_details": None}
+ ro_vim_item_update = {
+ "vim_id": vim_net_id,
+ "vim_status": "BUILD",
+ "created": created,
+ "created_items": created_items,
+ "vim_details": None,
+ }
self.logger.debug(
- "task={} {} new-net={} created={}".format(task_id, ro_task["target_id"], vim_net_id, created))
+ "task={} {} new-net={} created={}".format(
+ task_id, ro_task["target_id"], vim_net_id, created
+ )
+ )
+
return "BUILD", ro_vim_item_update
except (vimconn.VimConnException, NsWorkerException) as e:
- self.logger.error("task={} vim={} new-net: {}".format(task_id, ro_task["target_id"], e))
- ro_vim_item_update = {"vim_status": "VIM_ERROR",
- "created": created,
- "vim_details": str(e)}
+ self.logger.error(
+ "task={} vim={} new-net: {}".format(task_id, ro_task["target_id"], e)
+ )
+ ro_vim_item_update = {
+ "vim_status": "VIM_ERROR",
+ "created": created,
+ "vim_details": str(e),
+ }
+
return "FAILED", ro_vim_item_update
def refresh(self, ro_task):
"""Call VIM to get network status"""
ro_task_id = ro_task["_id"]
target_vim = self.my_vims[ro_task["target_id"]]
-
vim_id = ro_task["vim_info"]["vim_id"]
net_to_refresh_list = [vim_id]
+
try:
vim_dict = target_vim.refresh_nets_status(net_to_refresh_list)
vim_info = vim_dict[vim_id]
+
if vim_info["status"] == "ACTIVE":
task_status = "DONE"
elif vim_info["status"] == "BUILD":
task_status = "FAILED"
except vimconn.VimConnException as e:
# Mark all tasks at VIM_ERROR status
- self.logger.error("ro_task={} vim={} get-net={}: {}".format(ro_task_id, ro_task["target_id"], vim_id, e))
+ self.logger.error(
+ "ro_task={} vim={} get-net={}: {}".format(
+ ro_task_id, ro_task["target_id"], vim_id, e
+ )
+ )
vim_info = {"status": "VIM_ERROR", "error_msg": str(e)}
task_status = "FAILED"
ro_vim_item_update = {}
if ro_task["vim_info"]["vim_status"] != vim_info["status"]:
ro_vim_item_update["vim_status"] = vim_info["status"]
+
if ro_task["vim_info"]["vim_name"] != vim_info.get("name"):
ro_vim_item_update["vim_name"] = vim_info.get("name")
+
if vim_info["status"] in ("ERROR", "VIM_ERROR"):
if ro_task["vim_info"]["vim_details"] != vim_info.get("error_msg"):
ro_vim_item_update["vim_details"] = vim_info.get("error_msg")
else:
if ro_task["vim_info"]["vim_details"] != vim_info["vim_info"]:
ro_vim_item_update["vim_details"] = vim_info["vim_info"]
+
if ro_vim_item_update:
- self.logger.debug("ro_task={} {} get-net={}: status={} {}".format(
- ro_task_id, ro_task["target_id"], vim_id, ro_vim_item_update.get("vim_status"),
- ro_vim_item_update.get("vim_details") if ro_vim_item_update.get("vim_status") != "ACTIVE" else ''))
+ self.logger.debug(
+ "ro_task={} {} get-net={}: status={} {}".format(
+ ro_task_id,
+ ro_task["target_id"],
+ vim_id,
+ ro_vim_item_update.get("vim_status"),
+ ro_vim_item_update.get("vim_details")
+ if ro_vim_item_update.get("vim_status") != "ACTIVE"
+ else "",
+ )
+ )
+
return task_status, ro_vim_item_update
def delete(self, ro_task, task_index):
task = ro_task["tasks"][task_index]
task_id = task["task_id"]
net_vim_id = ro_task["vim_info"]["vim_id"]
- ro_vim_item_update_ok = {"vim_status": "DELETED",
- "created": False,
- "vim_details": "DELETED",
- "vim_id": None}
+ ro_vim_item_update_ok = {
+ "vim_status": "DELETED",
+ "created": False,
+ "vim_details": "DELETED",
+ "vim_id": None,
+ }
+
try:
if net_vim_id or ro_task["vim_info"]["created_items"]:
target_vim = self.my_vims[ro_task["target_id"]]
- target_vim.delete_network(net_vim_id, ro_task["vim_info"]["created_items"])
-
+ target_vim.delete_network(
+ net_vim_id, ro_task["vim_info"]["created_items"]
+ )
except vimconn.VimConnNotFoundException:
ro_vim_item_update_ok["vim_details"] = "already deleted"
-
except vimconn.VimConnException as e:
- self.logger.error("ro_task={} vim={} del-net={}: {}".format(ro_task["_id"], ro_task["target_id"],
- net_vim_id, e))
- ro_vim_item_update = {"vim_status": "VIM_ERROR",
- "vim_details": "Error while deleting: {}".format(e)}
+ self.logger.error(
+ "ro_task={} vim={} del-net={}: {}".format(
+ ro_task["_id"], ro_task["target_id"], net_vim_id, e
+ )
+ )
+ ro_vim_item_update = {
+ "vim_status": "VIM_ERROR",
+ "vim_details": "Error while deleting: {}".format(e),
+ }
+
return "FAILED", ro_vim_item_update
- self.logger.debug("task={} {} del-net={} {}".format(task_id, ro_task["target_id"], net_vim_id,
- ro_vim_item_update_ok.get("vim_details", "")))
+ self.logger.debug(
+ "task={} {} del-net={} {}".format(
+ task_id,
+ ro_task["target_id"],
+ net_vim_id,
+ ro_vim_item_update_ok.get("vim_details", ""),
+ )
+ )
+
return "DONE", ro_vim_item_update_ok
class VimInteractionVdu(VimInteractionBase):
- max_retries_inject_ssh_key = 20 # 20 times
- time_retries_inject_ssh_key = 30 # wevery 30 seconds
+ max_retries_inject_ssh_key = 20 # 20 times
+ time_retries_inject_ssh_key = 30 # wevery 30 seconds
def new(self, ro_task, task_index, task_depends):
task = ro_task["tasks"][task_index]
created = False
created_items = {}
target_vim = self.my_vims[ro_task["target_id"]]
+
try:
created = True
params = task["params"]
params_copy = deepcopy(params)
net_list = params_copy["net_list"]
+
for net in net_list:
- if "net_id" in net and net["net_id"].startswith("TASK-"): # change task_id into network_id
+ # change task_id into network_id
+ if "net_id" in net and net["net_id"].startswith("TASK-"):
network_id = task_depends[net["net_id"]]
+
if not network_id:
- raise NsWorkerException("Cannot create VM because depends on a network not created or found "
- "for {}".format(net["net_id"]))
+ raise NsWorkerException(
+ "Cannot create VM because depends on a network not created or found "
+ "for {}".format(net["net_id"])
+ )
+
net["net_id"] = network_id
+
if params_copy["image_id"].startswith("TASK-"):
params_copy["image_id"] = task_depends[params_copy["image_id"]]
+
if params_copy["flavor_id"].startswith("TASK-"):
params_copy["flavor_id"] = task_depends[params_copy["flavor_id"]]
vim_vm_id, created_items = target_vim.new_vminstance(**params_copy)
interfaces = [iface["vim_id"] for iface in params_copy["net_list"]]
- ro_vim_item_update = {"vim_id": vim_vm_id,
- "vim_status": "BUILD",
- "created": created,
- "created_items": created_items,
- "vim_details": None,
- "interfaces_vim_ids": interfaces,
- "interfaces": [],
- }
+ ro_vim_item_update = {
+ "vim_id": vim_vm_id,
+ "vim_status": "BUILD",
+ "created": created,
+ "created_items": created_items,
+ "vim_details": None,
+ "interfaces_vim_ids": interfaces,
+ "interfaces": [],
+ }
self.logger.debug(
- "task={} {} new-vm={} created={}".format(task_id, ro_task["target_id"], vim_vm_id, created))
+ "task={} {} new-vm={} created={}".format(
+ task_id, ro_task["target_id"], vim_vm_id, created
+ )
+ )
+
return "BUILD", ro_vim_item_update
except (vimconn.VimConnException, NsWorkerException) as e:
- self.logger.error("task={} {} new-vm: {}".format(task_id, ro_task["target_id"], e))
- ro_vim_item_update = {"vim_status": "VIM_ERROR",
- "created": created,
- "vim_details": str(e)}
+ self.logger.error(
+ "task={} {} new-vm: {}".format(task_id, ro_task["target_id"], e)
+ )
+ ro_vim_item_update = {
+ "vim_status": "VIM_ERROR",
+ "created": created,
+ "vim_details": str(e),
+ }
+
return "FAILED", ro_vim_item_update
def delete(self, ro_task, task_index):
task = ro_task["tasks"][task_index]
task_id = task["task_id"]
vm_vim_id = ro_task["vim_info"]["vim_id"]
- ro_vim_item_update_ok = {"vim_status": "DELETED",
- "created": False,
- "vim_details": "DELETED",
- "vim_id": None}
+ ro_vim_item_update_ok = {
+ "vim_status": "DELETED",
+ "created": False,
+ "vim_details": "DELETED",
+ "vim_id": None,
+ }
+
try:
if vm_vim_id or ro_task["vim_info"]["created_items"]:
target_vim = self.my_vims[ro_task["target_id"]]
- target_vim.delete_vminstance(vm_vim_id, ro_task["vim_info"]["created_items"])
-
+ target_vim.delete_vminstance(
+ vm_vim_id, ro_task["vim_info"]["created_items"]
+ )
except vimconn.VimConnNotFoundException:
ro_vim_item_update_ok["vim_details"] = "already deleted"
-
except vimconn.VimConnException as e:
- self.logger.error("ro_task={} vim={} del-vm={}: {}".format(ro_task["_id"], ro_task["target_id"],
- vm_vim_id, e))
- ro_vim_item_update = {"vim_status": "VIM_ERROR",
- "vim_details": "Error while deleting: {}".format(e)}
+ self.logger.error(
+ "ro_task={} vim={} del-vm={}: {}".format(
+ ro_task["_id"], ro_task["target_id"], vm_vim_id, e
+ )
+ )
+ ro_vim_item_update = {
+ "vim_status": "VIM_ERROR",
+ "vim_details": "Error while deleting: {}".format(e),
+ }
+
return "FAILED", ro_vim_item_update
- self.logger.debug("task={} {} del-vm={} {}".format(task_id, ro_task["target_id"], vm_vim_id,
- ro_vim_item_update_ok.get("vim_details", "")))
+ self.logger.debug(
+ "task={} {} del-vm={} {}".format(
+ task_id,
+ ro_task["target_id"],
+ vm_vim_id,
+ ro_vim_item_update_ok.get("vim_details", ""),
+ )
+ )
+
return "DONE", ro_vim_item_update_ok
def refresh(self, ro_task):
"""Call VIM to get vm status"""
ro_task_id = ro_task["_id"]
target_vim = self.my_vims[ro_task["target_id"]]
-
vim_id = ro_task["vim_info"]["vim_id"]
+
if not vim_id:
return None, None
+
vm_to_refresh_list = [vim_id]
try:
vim_dict = target_vim.refresh_vms_status(vm_to_refresh_list)
vim_info = vim_dict[vim_id]
+
if vim_info["status"] == "ACTIVE":
task_status = "DONE"
elif vim_info["status"] == "BUILD":
task_status = "BUILD"
else:
task_status = "FAILED"
+
# try to load and parse vim_information
try:
vim_info_info = yaml.safe_load(vim_info["vim_info"])
pass
except vimconn.VimConnException as e:
# Mark all tasks at VIM_ERROR status
- self.logger.error("ro_task={} vim={} get-vm={}: {}".format(ro_task_id, ro_task["target_id"], vim_id, e))
+ self.logger.error(
+ "ro_task={} vim={} get-vm={}: {}".format(
+ ro_task_id, ro_task["target_id"], vim_id, e
+ )
+ )
vim_info = {"status": "VIM_ERROR", "error_msg": str(e)}
task_status = "FAILED"
ro_vim_item_update = {}
+
# Interfaces cannot be present if e.g. VM is not present, that is status=DELETED
vim_interfaces = []
if vim_info.get("interfaces"):
for vim_iface_id in ro_task["vim_info"]["interfaces_vim_ids"]:
- iface = next((iface for iface in vim_info["interfaces"] if vim_iface_id == iface["vim_interface_id"]),
- None)
+ iface = next(
+ (
+ iface
+ for iface in vim_info["interfaces"]
+ if vim_iface_id == iface["vim_interface_id"]
+ ),
+ None,
+ )
# if iface:
# iface.pop("vim_info", None)
vim_interfaces.append(iface)
- task_create = next(t for t in ro_task["tasks"] if t and t["action"] == "CREATE" and t["status"] != "FINISHED")
+ task_create = next(
+ t
+ for t in ro_task["tasks"]
+ if t and t["action"] == "CREATE" and t["status"] != "FINISHED"
+ )
if vim_interfaces and task_create.get("mgmt_vnf_interface") is not None:
- vim_interfaces[task_create["mgmt_vnf_interface"]]["mgmt_vnf_interface"] = True
- mgmt_vdu_iface = task_create.get("mgmt_vdu_interface", task_create.get("mgmt_vnf_interface", 0))
+ vim_interfaces[task_create["mgmt_vnf_interface"]][
+ "mgmt_vnf_interface"
+ ] = True
+
+ mgmt_vdu_iface = task_create.get(
+ "mgmt_vdu_interface", task_create.get("mgmt_vnf_interface", 0)
+ )
if vim_interfaces:
vim_interfaces[mgmt_vdu_iface]["mgmt_vdu_interface"] = True
if ro_task["vim_info"]["interfaces"] != vim_interfaces:
ro_vim_item_update["interfaces"] = vim_interfaces
+
if ro_task["vim_info"]["vim_status"] != vim_info["status"]:
ro_vim_item_update["vim_status"] = vim_info["status"]
+
if ro_task["vim_info"]["vim_name"] != vim_info.get("name"):
ro_vim_item_update["vim_name"] = vim_info.get("name")
+
if vim_info["status"] in ("ERROR", "VIM_ERROR"):
if ro_task["vim_info"]["vim_details"] != vim_info.get("error_msg"):
ro_vim_item_update["vim_details"] = vim_info.get("error_msg")
else:
if ro_task["vim_info"]["vim_details"] != vim_info["vim_info"]:
ro_vim_item_update["vim_details"] = vim_info["vim_info"]
+
if ro_vim_item_update:
- self.logger.debug("ro_task={} {} get-vm={}: status={} {}".format(
- ro_task_id, ro_task["target_id"], vim_id, ro_vim_item_update.get("vim_status"),
- ro_vim_item_update.get("vim_details") if ro_vim_item_update.get("vim_status") != "ACTIVE" else ''))
+ self.logger.debug(
+ "ro_task={} {} get-vm={}: status={} {}".format(
+ ro_task_id,
+ ro_task["target_id"],
+ vim_id,
+ ro_vim_item_update.get("vim_status"),
+ ro_vim_item_update.get("vim_details")
+ if ro_vim_item_update.get("vim_status") != "ACTIVE"
+ else "",
+ )
+ )
+
return task_status, ro_vim_item_update
def exec(self, ro_task, task_index, task_depends):
target_vim = self.my_vims[ro_task["target_id"]]
db_task_update = {"retries": 0}
retries = task.get("retries", 0)
+
try:
params = task["params"]
params_copy = deepcopy(params)
- params_copy["ro_key"] = self.db.decrypt(params_copy.pop("private_key"),
- params_copy.pop("schema_version"), params_copy.pop("salt"))
+ params_copy["ro_key"] = self.db.decrypt(
+ params_copy.pop("private_key"),
+ params_copy.pop("schema_version"),
+ params_copy.pop("salt"),
+ )
params_copy["ip_addr"] = params_copy.pop("ip_address")
target_vim.inject_user_key(**params_copy)
self.logger.debug(
- "task={} {} action-vm=inject_key".format(task_id, ro_task["target_id"]))
- return "DONE", None, db_task_update, # params_copy["key"]
+ "task={} {} action-vm=inject_key".format(task_id, ro_task["target_id"])
+ )
+
+ return (
+ "DONE",
+ None,
+ db_task_update,
+ ) # params_copy["key"]
except (vimconn.VimConnException, NsWorkerException) as e:
retries += 1
+
if retries < self.max_retries_inject_ssh_key:
- return "BUILD", None, {"retries": retries, "next_retry": self.time_retries_inject_ssh_key}
- self.logger.error("task={} {} inject-ssh-key: {}".format(task_id, ro_task["target_id"], e))
+ return (
+ "BUILD",
+ None,
+ {
+ "retries": retries,
+ "next_retry": self.time_retries_inject_ssh_key,
+ },
+ )
+
+ self.logger.error(
+ "task={} {} inject-ssh-key: {}".format(task_id, ro_task["target_id"], e)
+ )
ro_vim_item_update = {"vim_details": str(e)}
+
return "FAILED", ro_vim_item_update, db_task_update
class VimInteractionImage(VimInteractionBase):
-
def new(self, ro_task, task_index, task_depends):
task = ro_task["tasks"][task_index]
task_id = task["task_id"]
created = False
created_items = {}
target_vim = self.my_vims[ro_task["target_id"]]
+
try:
# FIND
if task.get("find_params"):
vim_images = target_vim.get_image_list(**task["find_params"])
+
if not vim_images:
- raise NsWorkerExceptionNotFound("Image not found with this criteria: '{}'".format(
- task["find_params"]))
+ raise NsWorkerExceptionNotFound(
+ "Image not found with this criteria: '{}'".format(
+ task["find_params"]
+ )
+ )
elif len(vim_images) > 1:
raise NsWorkerException(
- "More than one network found with this criteria: '{}'".format(task["find_params"]))
+ "More than one network found with this criteria: '{}'".format(
+ task["find_params"]
+ )
+ )
else:
vim_image_id = vim_images[0]["id"]
- ro_vim_item_update = {"vim_id": vim_image_id,
- "vim_status": "DONE",
- "created": created,
- "created_items": created_items,
- "vim_details": None}
+ ro_vim_item_update = {
+ "vim_id": vim_image_id,
+ "vim_status": "DONE",
+ "created": created,
+ "created_items": created_items,
+ "vim_details": None,
+ }
self.logger.debug(
- "task={} {} new-image={} created={}".format(task_id, ro_task["target_id"], vim_image_id, created))
+ "task={} {} new-image={} created={}".format(
+ task_id, ro_task["target_id"], vim_image_id, created
+ )
+ )
+
return "DONE", ro_vim_item_update
except (NsWorkerException, vimconn.VimConnException) as e:
- self.logger.error("task={} {} new-image: {}".format(task_id, ro_task["target_id"], e))
- ro_vim_item_update = {"vim_status": "VIM_ERROR",
- "created": created,
- "vim_details": str(e)}
+ self.logger.error(
+ "task={} {} new-image: {}".format(task_id, ro_task["target_id"], e)
+ )
+ ro_vim_item_update = {
+ "vim_status": "VIM_ERROR",
+ "created": created,
+ "vim_details": str(e),
+ }
+
return "FAILED", ro_vim_item_update
class VimInteractionFlavor(VimInteractionBase):
-
def delete(self, ro_task, task_index):
task = ro_task["tasks"][task_index]
task_id = task["task_id"]
flavor_vim_id = ro_task["vim_info"]["vim_id"]
- ro_vim_item_update_ok = {"vim_status": "DELETED",
- "created": False,
- "vim_details": "DELETED",
- "vim_id": None}
+ ro_vim_item_update_ok = {
+ "vim_status": "DELETED",
+ "created": False,
+ "vim_details": "DELETED",
+ "vim_id": None,
+ }
+
try:
if flavor_vim_id:
target_vim = self.my_vims[ro_task["target_id"]]
target_vim.delete_flavor(flavor_vim_id)
-
except vimconn.VimConnNotFoundException:
ro_vim_item_update_ok["vim_details"] = "already deleted"
-
except vimconn.VimConnException as e:
- self.logger.error("ro_task={} vim={} del-flavor={}: {}".format(
- ro_task["_id"], ro_task["target_id"], flavor_vim_id, e))
- ro_vim_item_update = {"vim_status": "VIM_ERROR",
- "vim_details": "Error while deleting: {}".format(e)}
+ self.logger.error(
+ "ro_task={} vim={} del-flavor={}: {}".format(
+ ro_task["_id"], ro_task["target_id"], flavor_vim_id, e
+ )
+ )
+ ro_vim_item_update = {
+ "vim_status": "VIM_ERROR",
+ "vim_details": "Error while deleting: {}".format(e),
+ }
+
return "FAILED", ro_vim_item_update
- self.logger.debug("task={} {} del-flavor={} {}".format(
- task_id, ro_task["target_id"], flavor_vim_id, ro_vim_item_update_ok.get("vim_details", "")))
+ self.logger.debug(
+ "task={} {} del-flavor={} {}".format(
+ task_id,
+ ro_task["target_id"],
+ flavor_vim_id,
+ ro_vim_item_update_ok.get("vim_details", ""),
+ )
+ )
+
return "DONE", ro_vim_item_update_ok
def new(self, ro_task, task_index, task_depends):
created = False
created_items = {}
target_vim = self.my_vims[ro_task["target_id"]]
+
try:
# FIND
vim_flavor_id = None
+
if task.get("find_params"):
try:
flavor_data = task["find_params"]["flavor_data"]
vim_flavor_id = target_vim.new_flavor(flavor_data)
created = True
- ro_vim_item_update = {"vim_id": vim_flavor_id,
- "vim_status": "DONE",
- "created": created,
- "created_items": created_items,
- "vim_details": None}
+ ro_vim_item_update = {
+ "vim_id": vim_flavor_id,
+ "vim_status": "DONE",
+ "created": created,
+ "created_items": created_items,
+ "vim_details": None,
+ }
self.logger.debug(
- "task={} {} new-flavor={} created={}".format(task_id, ro_task["target_id"], vim_flavor_id, created))
+ "task={} {} new-flavor={} created={}".format(
+ task_id, ro_task["target_id"], vim_flavor_id, created
+ )
+ )
+
return "DONE", ro_vim_item_update
except (vimconn.VimConnException, NsWorkerException) as e:
- self.logger.error("task={} vim={} new-flavor: {}".format(task_id, ro_task["target_id"], e))
- ro_vim_item_update = {"vim_status": "VIM_ERROR",
- "created": created,
- "vim_details": str(e)}
+ self.logger.error(
+ "task={} vim={} new-flavor: {}".format(task_id, ro_task["target_id"], e)
+ )
+ ro_vim_item_update = {
+ "vim_status": "VIM_ERROR",
+ "created": created,
+ "vim_details": str(e),
+ }
+
return "FAILED", ro_vim_item_update
class VimInteractionSdnNet(VimInteractionBase):
-
@staticmethod
def _match_pci(port_pci, mapping):
"""
pci_index = 0
while True:
bracket_start = mapping.find("[", mapping_index)
+
if bracket_start == -1:
break
+
bracket_end = mapping.find("]", bracket_start)
if bracket_end == -1:
break
+
length = bracket_start - mapping_index
- if length and port_pci[pci_index:pci_index + length] != mapping[mapping_index:bracket_start]:
+ if (
+ length
+ and port_pci[pci_index : pci_index + length]
+ != mapping[mapping_index:bracket_start]
+ ):
return False
- if port_pci[pci_index + length] not in mapping[bracket_start+1:bracket_end]:
+
+ if (
+ port_pci[pci_index + length]
+ not in mapping[bracket_start + 1 : bracket_end]
+ ):
return False
+
pci_index += length + 1
mapping_index = bracket_end + 1
if port_pci[pci_index:] != mapping[mapping_index:]:
return False
+
return True
def _get_interfaces(self, vlds_to_connect, vim_account_id):
:return:
"""
interfaces = []
+
for vld in vlds_to_connect:
table, _, db_id = vld.partition(":")
db_id, _, vld = db_id.partition(":")
_, _, vld_id = vld.partition(".")
+
if table == "vnfrs":
q_filter = {"vim-account-id": vim_account_id, "_id": db_id}
iface_key = "vnf-vld-id"
else: # table == "nsrs"
q_filter = {"vim-account-id": vim_account_id, "nsr-id-ref": db_id}
iface_key = "ns-vld-id"
+
db_vnfrs = self.db.get_list("vnfrs", q_filter=q_filter)
+
for db_vnfr in db_vnfrs:
for vdu_index, vdur in enumerate(db_vnfr.get("vdur", ())):
for iface_index, interface in enumerate(vdur["interfaces"]):
- if interface.get(iface_key) == vld_id and \
- interface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH"):
+ if interface.get(iface_key) == vld_id and interface.get(
+ "type"
+ ) in ("SR-IOV", "PCI-PASSTHROUGH"):
# only SR-IOV o PT
interface_ = interface.copy()
- interface_["id"] = "vnfrs:{}:vdu.{}.interfaces.{}".format(db_vnfr["_id"], vdu_index,
- iface_index)
+ interface_["id"] = "vnfrs:{}:vdu.{}.interfaces.{}".format(
+ db_vnfr["_id"], vdu_index, iface_index
+ )
+
if vdur.get("status") == "ERROR":
interface_["status"] = "ERROR"
+
interfaces.append(interface_)
+
return interfaces
def refresh(self, ro_task):
# look for task create
- task_create_index, _ = next(i_t for i_t in enumerate(ro_task["tasks"])
- if i_t[1] and i_t[1]["action"] == "CREATE" and i_t[1]["status"] != "FINISHED")
+ task_create_index, _ = next(
+ i_t
+ for i_t in enumerate(ro_task["tasks"])
+ if i_t[1]
+ and i_t[1]["action"] == "CREATE"
+ and i_t[1]["status"] != "FINISHED"
+ )
return self.new(ro_task, task_create_index, None)
created = ro_task["vim_info"].get("created", False)
try:
-
# CREATE
params = task["params"]
vlds_to_connect = params["vlds"]
associated_vim = params["target_vim"]
- additional_ports = params.get("sdn-ports") or () # external additional ports
+ # external additional ports
+ additional_ports = params.get("sdn-ports") or ()
_, _, vim_account_id = associated_vim.partition(":")
+
if associated_vim:
# get associated VIM
if associated_vim not in self.db_vims:
- self.db_vims[associated_vim] = self.db.get_one("vim_accounts", {"_id": vim_account_id})
+ self.db_vims[associated_vim] = self.db.get_one(
+ "vim_accounts", {"_id": vim_account_id}
+ )
+
db_vim = self.db_vims[associated_vim]
# look for ports to connect
pending_ports = error_ports = 0
vlan_used = None
sdn_need_update = False
+
for port in ports:
vlan_used = port.get("vlan") or vlan_used
+
# TODO. Do not connect if already done
if not port.get("compute_node") or not port.get("pci"):
if port.get("status") == "ERROR":
else:
pending_ports += 1
continue
+
pmap = None
- compute_node_mappings = next((c for c in db_vim["config"].get("sdn-port-mapping", ())
- if c and c["compute_node"] == port["compute_node"]), None)
+ compute_node_mappings = next(
+ (
+ c
+ for c in db_vim["config"].get("sdn-port-mapping", ())
+ if c and c["compute_node"] == port["compute_node"]
+ ),
+ None,
+ )
+
if compute_node_mappings:
# process port_mapping pci of type 0000:af:1[01].[1357]
- pmap = next((p for p in compute_node_mappings["ports"]
- if self._match_pci(port["pci"], p.get("pci"))), None)
+ pmap = next(
+ (
+ p
+ for p in compute_node_mappings["ports"]
+ if self._match_pci(port["pci"], p.get("pci"))
+ ),
+ None,
+ )
+
if not pmap:
if not db_vim["config"].get("mapping_not_needed"):
- error_list.append("Port mapping not found for compute_node={} pci={}".format(
- port["compute_node"], port["pci"]))
+ error_list.append(
+ "Port mapping not found for compute_node={} pci={}".format(
+ port["compute_node"], port["pci"]
+ )
+ )
continue
+
pmap = {}
service_endpoint_id = "{}:{}".format(port["compute_node"], port["pci"])
new_port = {
- "service_endpoint_id": pmap.get("service_endpoint_id") or service_endpoint_id,
- "service_endpoint_encapsulation_type": "dot1q" if port["type"] == "SR-IOV" else None,
+ "service_endpoint_id": pmap.get("service_endpoint_id")
+ or service_endpoint_id,
+ "service_endpoint_encapsulation_type": "dot1q"
+ if port["type"] == "SR-IOV"
+ else None,
"service_endpoint_encapsulation_info": {
"vlan": port.get("vlan"),
"mac": port.get("mac_address"),
- "device_id": pmap.get("device_id") or port["compute_node"], # device_id
- "device_interface_id": pmap.get("device_interface_id") or port["pci"],
+ "device_id": pmap.get("device_id") or port["compute_node"],
+ "device_interface_id": pmap.get("device_interface_id")
+ or port["pci"],
"switch_dpid": pmap.get("switch_id") or pmap.get("switch_dpid"),
"switch_port": pmap.get("switch_port"),
"service_mapping_info": pmap.get("service_mapping_info"),
- }
+ },
}
# TODO
sdn_ports.append(new_port)
if error_ports:
- error_list.append("{} interfaces have not been created as VDU is on ERROR status".format(error_ports))
+ error_list.append(
+ "{} interfaces have not been created as VDU is on ERROR status".format(
+ error_ports
+ )
+ )
# connect external ports
for index, additional_port in enumerate(additional_ports):
- additional_port_id = additional_port.get("service_endpoint_id") or "external-{}".format(index)
- sdn_ports.append({
- "service_endpoint_id": additional_port_id,
- "service_endpoint_encapsulation_type": additional_port.get("service_endpoint_encapsulation_type",
- "dot1q"),
- "service_endpoint_encapsulation_info": {
- "vlan": additional_port.get("vlan") or vlan_used,
- "mac": additional_port.get("mac_address"),
- "device_id": additional_port.get("device_id"),
- "device_interface_id": additional_port.get("device_interface_id"),
- "switch_dpid": additional_port.get("switch_dpid") or additional_port.get("switch_id"),
- "switch_port": additional_port.get("switch_port"),
- "service_mapping_info": additional_port.get("service_mapping_info"),
- }})
+ additional_port_id = additional_port.get(
+ "service_endpoint_id"
+ ) or "external-{}".format(index)
+ sdn_ports.append(
+ {
+ "service_endpoint_id": additional_port_id,
+ "service_endpoint_encapsulation_type": additional_port.get(
+ "service_endpoint_encapsulation_type", "dot1q"
+ ),
+ "service_endpoint_encapsulation_info": {
+ "vlan": additional_port.get("vlan") or vlan_used,
+ "mac": additional_port.get("mac_address"),
+ "device_id": additional_port.get("device_id"),
+ "device_interface_id": additional_port.get(
+ "device_interface_id"
+ ),
+ "switch_dpid": additional_port.get("switch_dpid")
+ or additional_port.get("switch_id"),
+ "switch_port": additional_port.get("switch_port"),
+ "service_mapping_info": additional_port.get(
+ "service_mapping_info"
+ ),
+ },
+ }
+ )
new_connected_ports.append(additional_port_id)
sdn_info = ""
+
# if there are more ports to connect or they have been modified, call create/update
if error_list:
sdn_status = "ERROR"
sdn_info = "; ".join(error_list)
elif set(connected_ports) != set(new_connected_ports) or sdn_need_update:
last_update = time.time()
+
if not sdn_net_id:
if len(sdn_ports) < 2:
sdn_status = "ACTIVE"
+
if not pending_ports:
- self.logger.debug("task={} {} new-sdn-net done, less than 2 ports".
- format(task_id, ro_task["target_id"]))
+ self.logger.debug(
+ "task={} {} new-sdn-net done, less than 2 ports".format(
+ task_id, ro_task["target_id"]
+ )
+ )
else:
net_type = params.get("type") or "ELAN"
- sdn_net_id, created_items = target_vim.create_connectivity_service(
- net_type, sdn_ports)
+ (
+ sdn_net_id,
+ created_items,
+ ) = target_vim.create_connectivity_service(net_type, sdn_ports)
created = True
- self.logger.debug("task={} {} new-sdn-net={} created={}".
- format(task_id, ro_task["target_id"], sdn_net_id, created))
+ self.logger.debug(
+ "task={} {} new-sdn-net={} created={}".format(
+ task_id, ro_task["target_id"], sdn_net_id, created
+ )
+ )
else:
created_items = target_vim.edit_connectivity_service(
- sdn_net_id, conn_info=created_items, connection_points=sdn_ports)
+ sdn_net_id, conn_info=created_items, connection_points=sdn_ports
+ )
created = True
- self.logger.debug("task={} {} update-sdn-net={} created={}".
- format(task_id, ro_task["target_id"], sdn_net_id, created))
+ self.logger.debug(
+ "task={} {} update-sdn-net={} created={}".format(
+ task_id, ro_task["target_id"], sdn_net_id, created
+ )
+ )
+
connected_ports = new_connected_ports
elif sdn_net_id:
- wim_status_dict = target_vim.get_connectivity_service_status(sdn_net_id, conn_info=created_items)
+ wim_status_dict = target_vim.get_connectivity_service_status(
+ sdn_net_id, conn_info=created_items
+ )
sdn_status = wim_status_dict["sdn_status"]
+
if wim_status_dict.get("sdn_info"):
sdn_info = str(wim_status_dict.get("sdn_info")) or ""
+
if wim_status_dict.get("error_msg"):
sdn_info = wim_status_dict.get("error_msg") or ""
if pending_ports:
if sdn_status != "ERROR":
sdn_info = "Waiting for getting interfaces location from VIM. Obtained '{}' of {}".format(
- len(ports)-pending_ports, len(ports))
+ len(ports) - pending_ports, len(ports)
+ )
+
if sdn_status == "ACTIVE":
sdn_status = "BUILD"
- ro_vim_item_update = {"vim_id": sdn_net_id,
- "vim_status": sdn_status,
- "created": created,
- "created_items": created_items,
- "connected_ports": connected_ports,
- "vim_details": sdn_info,
- "last_update": last_update}
+ ro_vim_item_update = {
+ "vim_id": sdn_net_id,
+ "vim_status": sdn_status,
+ "created": created,
+ "created_items": created_items,
+ "connected_ports": connected_ports,
+ "vim_details": sdn_info,
+ "last_update": last_update,
+ }
+
return sdn_status, ro_vim_item_update
except Exception as e:
- self.logger.error("task={} vim={} new-net: {}".format(task_id, ro_task["target_id"], e),
- exc_info=not isinstance(e, (sdnconn.SdnConnectorError, vimconn.VimConnException)))
- ro_vim_item_update = {"vim_status": "VIM_ERROR",
- "created": created,
- "vim_details": str(e)}
+ self.logger.error(
+ "task={} vim={} new-net: {}".format(task_id, ro_task["target_id"], e),
+ exc_info=not isinstance(
+ e, (sdnconn.SdnConnectorError, vimconn.VimConnException)
+ ),
+ )
+ ro_vim_item_update = {
+ "vim_status": "VIM_ERROR",
+ "created": created,
+ "vim_details": str(e),
+ }
+
return "FAILED", ro_vim_item_update
def delete(self, ro_task, task_index):
task = ro_task["tasks"][task_index]
task_id = task["task_id"]
sdn_vim_id = ro_task["vim_info"].get("vim_id")
- ro_vim_item_update_ok = {"vim_status": "DELETED",
- "created": False,
- "vim_details": "DELETED",
- "vim_id": None}
+ ro_vim_item_update_ok = {
+ "vim_status": "DELETED",
+ "created": False,
+ "vim_details": "DELETED",
+ "vim_id": None,
+ }
+
try:
if sdn_vim_id:
target_vim = self.my_vims[ro_task["target_id"]]
- target_vim.delete_connectivity_service(sdn_vim_id, ro_task["vim_info"].get("created_items"))
+ target_vim.delete_connectivity_service(
+ sdn_vim_id, ro_task["vim_info"].get("created_items")
+ )
except Exception as e:
- if isinstance(e, sdnconn.SdnConnectorError) and e.http_code == HTTPStatus.NOT_FOUND.value:
+ if (
+ isinstance(e, sdnconn.SdnConnectorError)
+ and e.http_code == HTTPStatus.NOT_FOUND.value
+ ):
ro_vim_item_update_ok["vim_details"] = "already deleted"
else:
- self.logger.error("ro_task={} vim={} del-sdn-net={}: {}".format(ro_task["_id"], ro_task["target_id"],
- sdn_vim_id, e),
- exc_info=not isinstance(e, (sdnconn.SdnConnectorError, vimconn.VimConnException)))
- ro_vim_item_update = {"vim_status": "VIM_ERROR",
- "vim_details": "Error while deleting: {}".format(e)}
+ self.logger.error(
+ "ro_task={} vim={} del-sdn-net={}: {}".format(
+ ro_task["_id"], ro_task["target_id"], sdn_vim_id, e
+ ),
+ exc_info=not isinstance(
+ e, (sdnconn.SdnConnectorError, vimconn.VimConnException)
+ ),
+ )
+ ro_vim_item_update = {
+ "vim_status": "VIM_ERROR",
+ "vim_details": "Error while deleting: {}".format(e),
+ }
+
return "FAILED", ro_vim_item_update
- self.logger.debug("task={} {} del-sdn-net={} {}".format(task_id, ro_task["target_id"], sdn_vim_id,
- ro_vim_item_update_ok.get("vim_details", "")))
+ self.logger.debug(
+ "task={} {} del-sdn-net={} {}".format(
+ task_id,
+ ro_task["target_id"],
+ sdn_vim_id,
+ ro_vim_item_update_ok.get("vim_details", ""),
+ )
+ )
+
return "DONE", ro_vim_item_update_ok
self.config = config
self.plugins = plugins
self.plugin_name = "unknown"
- self.logger = logging.getLogger('ro.worker{}'.format(worker_index))
+ self.logger = logging.getLogger("ro.worker{}".format(worker_index))
self.worker_index = worker_index
self.task_queue = queue.Queue(self.QUEUE_SIZE)
- self.my_vims = {} # targetvim: vimplugin class
- self.db_vims = {} # targetvim: vim information from database
- self.vim_targets = [] # targetvim list
+ # targetvim: vimplugin class
+ self.my_vims = {}
+ # targetvim: vim information from database
+ self.db_vims = {}
+ # targetvim list
+ self.vim_targets = []
self.my_id = config["process_id"] + ":" + str(worker_index)
self.db = db
self.item2class = {
"net": VimInteractionNet(self.db, self.my_vims, self.db_vims, self.logger),
"vdu": VimInteractionVdu(self.db, self.my_vims, self.db_vims, self.logger),
- "image": VimInteractionImage(self.db, self.my_vims, self.db_vims, self.logger),
- "flavor": VimInteractionFlavor(self.db, self.my_vims, self.db_vims, self.logger),
- "sdn_net": VimInteractionSdnNet(self.db, self.my_vims, self.db_vims, self.logger),
+ "image": VimInteractionImage(
+ self.db, self.my_vims, self.db_vims, self.logger
+ ),
+ "flavor": VimInteractionFlavor(
+ self.db, self.my_vims, self.db_vims, self.logger
+ ),
+ "sdn_net": VimInteractionSdnNet(
+ self.db, self.my_vims, self.db_vims, self.logger
+ ),
}
self.time_last_task_processed = None
- self.tasks_to_delete = [] # lists of tasks to delete because nsrs or vnfrs has been deleted from db
- self.idle = True # it is idle when there are not vim_targets associated
+ # lists of tasks to delete because nsrs or vnfrs has been deleted from db
+ self.tasks_to_delete = []
+ # it is idle when there are not vim_targets associated
+ self.idle = True
self.task_locked_time = config["global"]["task_locked_time"]
def insert_task(self, task):
"""
if not db_vim.get("config"):
return
+
file_name = ""
+
try:
if db_vim["config"].get("ca_cert_content"):
file_name = "{}:{}".format(target_id, self.worker_index)
+
try:
mkdir(file_name)
except FileExistsError:
pass
+
file_name = file_name + "/ca_cert"
+
with open(file_name, "w") as f:
f.write(db_vim["config"]["ca_cert_content"])
del db_vim["config"]["ca_cert_content"]
db_vim["config"]["ca_cert"] = file_name
except Exception as e:
- raise NsWorkerException("Error writing to file '{}': {}".format(file_name, e))
+ raise NsWorkerException(
+ "Error writing to file '{}': {}".format(file_name, e)
+ )
def _load_plugin(self, name, type="vim"):
# type can be vim or sdn
if "rovim_dummy" not in self.plugins:
self.plugins["rovim_dummy"] = VimDummyConnector
+
if "rosdn_dummy" not in self.plugins:
self.plugins["rosdn_dummy"] = SdnDummyConnector
+
if name in self.plugins:
return self.plugins[name]
+
try:
- for v in iter_entry_points('osm_ro{}.plugins'.format(type), name):
+ for v in iter_entry_points("osm_ro{}.plugins".format(type), name):
self.plugins[name] = v.load()
except Exception as e:
raise NsWorkerException("Cannot load plugin osm_{}: {}".format(name, e))
+
if name and name not in self.plugins:
- raise NsWorkerException("Plugin 'osm_{n}' has not been installed".format(n=name))
+ raise NsWorkerException(
+ "Plugin 'osm_{n}' has not been installed".format(n=name)
+ )
+
return self.plugins[name]
def _unload_vim(self, target_id):
try:
self.db_vims.pop(target_id, None)
self.my_vims.pop(target_id, None)
+
if target_id in self.vim_targets:
self.vim_targets.remove(target_id)
+
self.logger.info("Unloaded {}".format(target_id))
rmtree("{}:{}".format(target_id, self.worker_index))
except FileNotFoundError:
op_text = ""
step = ""
loaded = target_id in self.vim_targets
- target_database = "vim_accounts" if target == "vim" else "wim_accounts" if target == "wim" else "sdns"
+ target_database = (
+ "vim_accounts"
+ if target == "vim"
+ else "wim_accounts"
+ if target == "wim"
+ else "sdns"
+ )
+
try:
step = "Getting {} from db".format(target_id)
db_vim = self.db.get_one(target_database, {"_id": _id})
- for op_index, operation in enumerate(db_vim["_admin"].get("operations", ())):
+
+ for op_index, operation in enumerate(
+ db_vim["_admin"].get("operations", ())
+ ):
if operation["operationState"] != "PROCESSING":
continue
+
locked_at = operation.get("locked_at")
+
if locked_at is not None and locked_at >= now - self.task_locked_time:
# some other thread is doing this operation
return
+
# lock
op_text = "_admin.operations.{}.".format(op_index)
- if not self.db.set_one(target_database,
- q_filter={"_id": _id,
- op_text + "operationState": "PROCESSING",
- op_text + "locked_at": locked_at
- },
- update_dict={op_text + "locked_at": now,
- "admin.current_operation": op_index},
- fail_on_empty=False):
+
+ if not self.db.set_one(
+ target_database,
+ q_filter={
+ "_id": _id,
+ op_text + "operationState": "PROCESSING",
+ op_text + "locked_at": locked_at,
+ },
+ update_dict={
+ op_text + "locked_at": now,
+ "admin.current_operation": op_index,
+ },
+ fail_on_empty=False,
+ ):
return
+
unset_dict[op_text + "locked_at"] = None
unset_dict["current_operation"] = None
step = "Loading " + target_id
error_text = self._load_vim(target_id)
+
if not error_text:
step = "Checking connectivity"
- if target == 'vim':
+
+ if target == "vim":
self.my_vims[target_id].check_vim_connectivity()
else:
self.my_vims[target_id].check_credentials()
+
update_dict["_admin.operationalState"] = "ENABLED"
update_dict["_admin.detailed-status"] = ""
unset_dict[op_text + "detailed-status"] = None
update_dict[op_text + "operationState"] = "COMPLETED"
+
return
except Exception as e:
unset_dict.pop(op_text + "detailed-status", None)
update_dict["_admin.operationalState"] = "ERROR"
update_dict["_admin.detailed-status"] = error_text
+
if op_text:
update_dict[op_text + "statusEnteredTime"] = now
- self.db.set_one(target_database, q_filter={"_id": _id}, update_dict=update_dict, unset=unset_dict,
- fail_on_empty=False)
+
+ self.db.set_one(
+ target_database,
+ q_filter={"_id": _id},
+ update_dict=update_dict,
+ unset=unset_dict,
+ fail_on_empty=False,
+ )
+
if not loaded:
self._unload_vim(target_id)
:return: None if ok, descriptive text if error
"""
target, _, _id = target_id.partition(":")
- target_database = "vim_accounts" if target == "vim" else "wim_accounts" if target == "wim" else "sdns"
+ target_database = (
+ "vim_accounts"
+ if target == "vim"
+ else "wim_accounts"
+ if target == "wim"
+ else "sdns"
+ )
plugin_name = ""
vim = None
+
try:
step = "Getting {}={} from db".format(target, _id)
# TODO process for wim, sdnc, ...
step = "Decrypting password"
schema_version = vim.get("schema_version")
- self.db.encrypt_decrypt_fields(vim, "decrypt", fields=('password', 'secret'),
- schema_version=schema_version, salt=_id)
+ self.db.encrypt_decrypt_fields(
+ vim,
+ "decrypt",
+ fields=("password", "secret"),
+ schema_version=schema_version,
+ salt=_id,
+ )
self._process_vim_config(target_id, vim)
+
if target == "vim":
plugin_name = "rovim_" + vim["vim_type"]
step = "Loading plugin '{}'".format(plugin_name)
vim_module_conn = self._load_plugin(plugin_name)
step = "Loading {}'".format(target_id)
self.my_vims[target_id] = vim_module_conn(
- uuid=vim['_id'], name=vim['name'],
- tenant_id=vim.get('vim_tenant_id'), tenant_name=vim.get('vim_tenant_name'),
- url=vim['vim_url'], url_admin=None,
- user=vim['vim_user'], passwd=vim['vim_password'],
- config=vim.get('config') or {}, persistent_info={}
+ uuid=vim["_id"],
+ name=vim["name"],
+ tenant_id=vim.get("vim_tenant_id"),
+ tenant_name=vim.get("vim_tenant_name"),
+ url=vim["vim_url"],
+ url_admin=None,
+ user=vim["vim_user"],
+ passwd=vim["vim_password"],
+ config=vim.get("config") or {},
+ persistent_info={},
)
else: # sdn
plugin_name = "rosdn_" + vim["type"]
wim_config = wim.pop("config", {}) or {}
wim["uuid"] = wim["_id"]
wim["wim_url"] = wim["url"]
+
if wim.get("dpid"):
wim_config["dpid"] = wim.pop("dpid")
+
if wim.get("switch_id"):
wim_config["switch_id"] = wim.pop("switch_id")
- self.my_vims[target_id] = vim_module_conn(wim, wim, wim_config) # wim, wim_account, config
+
+ # wim, wim_account, config
+ self.my_vims[target_id] = vim_module_conn(wim, wim, wim_config)
self.db_vims[target_id] = vim
self.error_status = None
- self.logger.info("Connector loaded for {}, plugin={}".format(target_id, plugin_name))
+
+ self.logger.info(
+ "Connector loaded for {}, plugin={}".format(target_id, plugin_name)
+ )
except Exception as e:
- self.logger.error("Cannot load {} plugin={}: {} {}".format(
- target_id, plugin_name, step, e))
+ self.logger.error(
+ "Cannot load {} plugin={}: {} {}".format(
+ target_id, plugin_name, step, e
+ )
+ )
+
self.db_vims[target_id] = vim or {}
self.db_vims[target_id] = FailingConnector(str(e))
error_status = "{} Error: {}".format(step, e)
+
return error_status
finally:
if target_id not in self.vim_targets:
:return: None
"""
now = time.time()
+
if not self.time_last_task_processed:
self.time_last_task_processed = now
+
try:
while True:
locked = self.db.set_one(
"ro_tasks",
- q_filter={"target_id": self.vim_targets,
- "tasks.status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
- "locked_at.lt": now - self.task_locked_time,
- "to_check_at.lt": self.time_last_task_processed},
+ q_filter={
+ "target_id": self.vim_targets,
+ "tasks.status": ["SCHEDULED", "BUILD", "DONE", "FAILED"],
+ "locked_at.lt": now - self.task_locked_time,
+ "to_check_at.lt": self.time_last_task_processed,
+ },
update_dict={"locked_by": self.my_id, "locked_at": now},
- fail_on_empty=False)
+ fail_on_empty=False,
+ )
+
if locked:
# read and return
ro_task = self.db.get_one(
"ro_tasks",
- q_filter={"target_id": self.vim_targets,
- "tasks.status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
- "locked_at": now})
+ q_filter={
+ "target_id": self.vim_targets,
+ "tasks.status": ["SCHEDULED", "BUILD", "DONE", "FAILED"],
+ "locked_at": now,
+ },
+ )
return ro_task
+
if self.time_last_task_processed == now:
self.time_last_task_processed = None
return None
except DbException as e:
self.logger.error("Database exception at _get_db_task: {}".format(e))
except Exception as e:
- self.logger.critical("Unexpected exception at _get_db_task: {}".format(e), exc_info=True)
+ self.logger.critical(
+ "Unexpected exception at _get_db_task: {}".format(e), exc_info=True
+ )
+
return None
def _delete_task(self, ro_task, task_index, task_depends, db_update):
"""
my_task = ro_task["tasks"][task_index]
task_id = my_task["task_id"]
- needed_delete = ro_task["vim_info"]["created"] or ro_task["vim_info"].get("created_items", False)
+ needed_delete = ro_task["vim_info"]["created"] or ro_task["vim_info"].get(
+ "created_items", False
+ )
+
if my_task["status"] == "FAILED":
return None, None # TODO need to be retry??
+
try:
for index, task in enumerate(ro_task["tasks"]):
if index == task_index or not task:
continue # own task
- if my_task["target_record"] == task["target_record"] and task["action"] == "CREATE":
+
+ if (
+ my_task["target_record"] == task["target_record"]
+ and task["action"] == "CREATE"
+ ):
# set to finished
- db_update["tasks.{}.status".format(index)] = task["status"] = "FINISHED"
- elif task["action"] == "CREATE" and task["status"] not in ("FINISHED", "SUPERSEDED"):
+ db_update["tasks.{}.status".format(index)] = task[
+ "status"
+ ] = "FINISHED"
+ elif task["action"] == "CREATE" and task["status"] not in (
+ "FINISHED",
+ "SUPERSEDED",
+ ):
needed_delete = False
+
if needed_delete:
return self.item2class[my_task["item"]].delete(ro_task, task_index)
else:
return "SUPERSEDED", None
except Exception as e:
if not isinstance(e, NsWorkerException):
- self.logger.critical("Unexpected exception at _delete_task task={}: {}".format(task_id, e),
- exc_info=True)
+ self.logger.critical(
+ "Unexpected exception at _delete_task task={}: {}".format(
+ task_id, e
+ ),
+ exc_info=True,
+ )
+
return "FAILED", {"vim_status": "VIM_ERROR", "vim_details": str(e)}
def _create_task(self, ro_task, task_index, task_depends, db_update):
my_task = ro_task["tasks"][task_index]
task_id = my_task["task_id"]
task_status = None
+
if my_task["status"] == "FAILED":
return None, None # TODO need to be retry??
elif my_task["status"] == "SCHEDULED":
for index, task in enumerate(ro_task["tasks"]):
if index == task_index or not task:
continue # own task
- if task["action"] == "CREATE" and task["status"] not in ("SCHEDULED", "FINISHED", "SUPERSEDED"):
+
+ if task["action"] == "CREATE" and task["status"] not in (
+ "SCHEDULED",
+ "FINISHED",
+ "SUPERSEDED",
+ ):
return task["status"], "COPY_VIM_INFO"
try:
task_status, ro_vim_item_update = self.item2class[my_task["item"]].new(
- ro_task, task_index, task_depends)
+ ro_task, task_index, task_depends
+ )
# TODO update other CREATE tasks
except Exception as e:
if not isinstance(e, NsWorkerException):
- self.logger.error("Error executing task={}: {}".format(task_id, e), exc_info=True)
+ self.logger.error(
+ "Error executing task={}: {}".format(task_id, e), exc_info=True
+ )
+
task_status = "FAILED"
ro_vim_item_update = {"vim_status": "VIM_ERROR", "vim_details": str(e)}
# TODO update ro_vim_item_update
+
return task_status, ro_vim_item_update
else:
return None, None
:param target_id:
:return: database ro_task plus index of task
"""
- if task_id.startswith("vim:") or task_id.startswith("sdn:") or task_id.startswith("wim:"):
+ if (
+ task_id.startswith("vim:")
+ or task_id.startswith("sdn:")
+ or task_id.startswith("wim:")
+ ):
target_id, _, task_id = task_id.partition(" ")
if task_id.startswith("nsrs:") or task_id.startswith("vnfrs:"):
ro_task_dependency = self.db.get_one(
"ro_tasks",
- q_filter={"target_id": target_id,
- "tasks.target_record_id": task_id
- },
- fail_on_empty=False)
+ q_filter={"target_id": target_id, "tasks.target_record_id": task_id},
+ fail_on_empty=False,
+ )
+
if ro_task_dependency:
for task_index, task in enumerate(ro_task_dependency["tasks"]):
if task["target_record_id"] == task_id:
for task_index, task in enumerate(ro_task["tasks"]):
if task and task["task_id"] == task_id:
return ro_task, task_index
+
ro_task_dependency = self.db.get_one(
"ro_tasks",
- q_filter={"tasks.ANYINDEX.task_id": task_id,
- "tasks.ANYINDEX.target_record.ne": None
- },
- fail_on_empty=False)
+ q_filter={
+ "tasks.ANYINDEX.task_id": task_id,
+ "tasks.ANYINDEX.target_record.ne": None,
+ },
+ fail_on_empty=False,
+ )
+
if ro_task_dependency:
for task_index, task in ro_task_dependency["tasks"]:
if task["task_id"] == task_id:
def _process_pending_tasks(self, ro_task):
ro_task_id = ro_task["_id"]
now = time.time()
- next_check_at = now + (24*60*60) # one day
+ # one day
+ next_check_at = now + (24 * 60 * 60)
db_ro_task_update = {}
def _update_refresh(new_status):
nonlocal ro_task
next_refresh = time.time()
+
if task["item"] in ("image", "flavor"):
next_refresh += self.REFRESH_IMAGE
elif new_status == "BUILD":
next_refresh += self.REFRESH_ACTIVE
else:
next_refresh += self.REFRESH_ERROR
+
next_check_at = min(next_check_at, next_refresh)
db_ro_task_update["vim_info.refresh_at"] = next_refresh
ro_task["vim_info"]["refresh_at"] = next_refresh
# 0: get task_status_create
lock_object = None
task_status_create = None
- task_create = next((t for t in ro_task["tasks"] if t and t["action"] == "CREATE" and
- t["status"] in ("BUILD", "DONE")), None)
+ task_create = next(
+ (
+ t
+ for t in ro_task["tasks"]
+ if t
+ and t["action"] == "CREATE"
+ and t["status"] in ("BUILD", "DONE")
+ ),
+ None,
+ )
+
if task_create:
task_status_create = task_create["status"]
+
# 1: look for tasks in status SCHEDULED, or in status CREATE if action is DONE or BUILD
for task_action in ("DELETE", "CREATE", "EXEC"):
db_vim_update = None
new_status = None
+
for task_index, task in enumerate(ro_task["tasks"]):
if not task:
continue # task deleted
+
task_depends = {}
target_update = None
- if (task_action in ("DELETE", "EXEC") and task["status"] not in ("SCHEDULED", "BUILD")) or \
- task["action"] != task_action or \
- (task_action == "CREATE" and task["status"] in ("FINISHED", "SUPERSEDED")):
+
+ if (
+ (
+ task_action in ("DELETE", "EXEC")
+ and task["status"] not in ("SCHEDULED", "BUILD")
+ )
+ or task["action"] != task_action
+ or (
+ task_action == "CREATE"
+ and task["status"] in ("FINISHED", "SUPERSEDED")
+ )
+ ):
continue
+
task_path = "tasks.{}.status".format(task_index)
try:
db_vim_info_update = None
+
if task["status"] == "SCHEDULED":
# check if tasks that this depends on have been completed
dependency_not_completed = False
- for dependency_task_id in (task.get("depends_on") or ()):
- dependency_ro_task, dependency_task_index = \
- self._get_dependency(dependency_task_id, target_id=ro_task["target_id"])
- dependency_task = dependency_ro_task["tasks"][dependency_task_index]
+
+ for dependency_task_id in task.get("depends_on") or ():
+ (
+ dependency_ro_task,
+ dependency_task_index,
+ ) = self._get_dependency(
+ dependency_task_id, target_id=ro_task["target_id"]
+ )
+ dependency_task = dependency_ro_task["tasks"][
+ dependency_task_index
+ ]
+
if dependency_task["status"] == "SCHEDULED":
dependency_not_completed = True
- next_check_at = min(next_check_at, dependency_ro_task["to_check_at"])
+ next_check_at = min(
+ next_check_at, dependency_ro_task["to_check_at"]
+ )
break
elif dependency_task["status"] == "FAILED":
error_text = "Cannot {} {} because depends on failed {} {} id={}): {}".format(
- task["action"], task["item"], dependency_task["action"],
- dependency_task["item"], dependency_task_id,
- dependency_ro_task["vim_info"].get("vim_details"))
- self.logger.error("task={} {}".format(task["task_id"], error_text))
+ task["action"],
+ task["item"],
+ dependency_task["action"],
+ dependency_task["item"],
+ dependency_task_id,
+ dependency_ro_task["vim_info"].get(
+ "vim_details"
+ ),
+ )
+ self.logger.error(
+ "task={} {}".format(task["task_id"], error_text)
+ )
raise NsWorkerException(error_text)
- task_depends[dependency_task_id] = dependency_ro_task["vim_info"]["vim_id"]
- task_depends["TASK-{}".format(dependency_task_id)] = \
- dependency_ro_task["vim_info"]["vim_id"]
+ task_depends[dependency_task_id] = dependency_ro_task[
+ "vim_info"
+ ]["vim_id"]
+ task_depends[
+ "TASK-{}".format(dependency_task_id)
+ ] = dependency_ro_task["vim_info"]["vim_id"]
+
if dependency_not_completed:
# TODO set at vim_info.vim_details that it is waiting
continue
+
# before calling VIM-plugin as it can take more than task_locked_time, insert to LockRenew
# the task of renew this locking. It will update database locket_at periodically
if not lock_object:
- lock_object = LockRenew.add_lock_object("ro_tasks", ro_task, self)
+ lock_object = LockRenew.add_lock_object(
+ "ro_tasks", ro_task, self
+ )
+
if task["action"] == "DELETE":
- new_status, db_vim_info_update = self._delete_task(ro_task, task_index,
- task_depends, db_ro_task_update)
- new_status = "FINISHED" if new_status == "DONE" else new_status
+ (new_status, db_vim_info_update,) = self._delete_task(
+ ro_task, task_index, task_depends, db_ro_task_update
+ )
+ new_status = (
+ "FINISHED" if new_status == "DONE" else new_status
+ )
# ^with FINISHED instead of DONE it will not be refreshing
+
if new_status in ("FINISHED", "SUPERSEDED"):
target_update = "DELETE"
elif task["action"] == "EXEC":
- new_status, db_vim_info_update, db_task_update = self.item2class[task["item"]].exec(
- ro_task, task_index, task_depends)
- new_status = "FINISHED" if new_status == "DONE" else new_status
+ (
+ new_status,
+ db_vim_info_update,
+ db_task_update,
+ ) = self.item2class[task["item"]].exec(
+ ro_task, task_index, task_depends
+ )
+ new_status = (
+ "FINISHED" if new_status == "DONE" else new_status
+ )
# ^with FINISHED instead of DONE it will not be refreshing
+
if db_task_update:
# load into database the modified db_task_update "retries" and "next_retry"
if db_task_update.get("retries"):
- db_ro_task_update["tasks.{}.retries".format(task_index)] = db_task_update["retries"]
- next_check_at = time.time() + db_task_update.get("next_retry", 60)
+ db_ro_task_update[
+ "tasks.{}.retries".format(task_index)
+ ] = db_task_update["retries"]
+
+ next_check_at = time.time() + db_task_update.get(
+ "next_retry", 60
+ )
target_update = None
elif task["action"] == "CREATE":
if task["status"] == "SCHEDULED":
new_status = task_status_create
target_update = "COPY_VIM_INFO"
else:
- new_status, db_vim_info_update = \
- self.item2class[task["item"]].new(ro_task, task_index, task_depends)
+ new_status, db_vim_info_update = self.item2class[
+ task["item"]
+ ].new(ro_task, task_index, task_depends)
# self._create_task(ro_task, task_index, task_depends, db_ro_task_update)
_update_refresh(new_status)
else:
- if ro_task["vim_info"]["refresh_at"] and now > ro_task["vim_info"]["refresh_at"]:
- new_status, db_vim_info_update = self.item2class[task["item"]].refresh(ro_task)
+ if (
+ ro_task["vim_info"]["refresh_at"]
+ and now > ro_task["vim_info"]["refresh_at"]
+ ):
+ new_status, db_vim_info_update = self.item2class[
+ task["item"]
+ ].refresh(ro_task)
_update_refresh(new_status)
+
except Exception as e:
new_status = "FAILED"
- db_vim_info_update = {"vim_status": "VIM_ERROR", "vim_details": str(e)}
- if not isinstance(e, (NsWorkerException, vimconn.VimConnException)):
- self.logger.error("Unexpected exception at _delete_task task={}: {}".
- format(task["task_id"], e), exc_info=True)
+ db_vim_info_update = {
+ "vim_status": "VIM_ERROR",
+ "vim_details": str(e),
+ }
+
+ if not isinstance(
+ e, (NsWorkerException, vimconn.VimConnException)
+ ):
+ self.logger.error(
+ "Unexpected exception at _delete_task task={}: {}".format(
+ task["task_id"], e
+ ),
+ exc_info=True,
+ )
try:
if db_vim_info_update:
db_vim_update = db_vim_info_update.copy()
- db_ro_task_update.update({"vim_info." + k: v for k, v in db_vim_info_update.items()})
+ db_ro_task_update.update(
+ {
+ "vim_info." + k: v
+ for k, v in db_vim_info_update.items()
+ }
+ )
ro_task["vim_info"].update(db_vim_info_update)
if new_status:
if task_action == "CREATE":
task_status_create = new_status
db_ro_task_update[task_path] = new_status
- if target_update or db_vim_update:
+ if target_update or db_vim_update:
if target_update == "DELETE":
self._update_target(task, None)
elif target_update == "COPY_VIM_INFO":
self._update_target(task, db_vim_update)
except Exception as e:
- if isinstance(e, DbException) and e.http_code == HTTPStatus.NOT_FOUND:
+ if (
+ isinstance(e, DbException)
+ and e.http_code == HTTPStatus.NOT_FOUND
+ ):
# if the vnfrs or nsrs has been removed from database, this task must be removed
- self.logger.debug("marking to delete task={}".format(task["task_id"]))
+ self.logger.debug(
+ "marking to delete task={}".format(task["task_id"])
+ )
self.tasks_to_delete.append(task)
else:
- self.logger.error("Unexpected exception at _update_target task={}: {}".
- format(task["task_id"], e), exc_info=True)
+ self.logger.error(
+ "Unexpected exception at _update_target task={}: {}".format(
+ task["task_id"], e
+ ),
+ exc_info=True,
+ )
locked_at = ro_task["locked_at"]
+
if lock_object:
- locked_at = [lock_object["locked_at"], lock_object["locked_at"] + self.task_locked_time]
+ locked_at = [
+ lock_object["locked_at"],
+ lock_object["locked_at"] + self.task_locked_time,
+ ]
# locked_at contains two times to avoid race condition. In case the lock has been renew, it will
# contain exactly locked_at + self.task_locked_time
LockRenew.remove_lock_object(lock_object)
- q_filter = {"_id": ro_task["_id"], "to_check_at": ro_task["to_check_at"], "locked_at": locked_at}
+
+ q_filter = {
+ "_id": ro_task["_id"],
+ "to_check_at": ro_task["to_check_at"],
+ "locked_at": locked_at,
+ }
# modify own task. Try filtering by to_next_check. For race condition if to_check_at has been modified,
# outside this task (by ro_nbi) do not update it
db_ro_task_update["locked_by"] = None
db_ro_task_update["locked_at"] = int(now - self.task_locked_time)
db_ro_task_update["modified_at"] = now
db_ro_task_update["to_check_at"] = next_check_at
- if not self.db.set_one("ro_tasks",
- update_dict=db_ro_task_update,
- q_filter=q_filter,
- fail_on_empty=False):
+
+ if not self.db.set_one(
+ "ro_tasks",
+ update_dict=db_ro_task_update,
+ q_filter=q_filter,
+ fail_on_empty=False,
+ ):
del db_ro_task_update["to_check_at"]
del q_filter["to_check_at"]
- self.db.set_one("ro_tasks",
- q_filter=q_filter,
- update_dict=db_ro_task_update,
- fail_on_empty=True)
+ self.db.set_one(
+ "ro_tasks",
+ q_filter=q_filter,
+ update_dict=db_ro_task_update,
+ fail_on_empty=True,
+ )
except DbException as e:
- self.logger.error("ro_task={} Error updating database {}".format(ro_task_id, e))
+ self.logger.error(
+ "ro_task={} Error updating database {}".format(ro_task_id, e)
+ )
except Exception as e:
- self.logger.error("Error executing ro_task={}: {}".format(ro_task_id, e), exc_info=True)
+ self.logger.error(
+ "Error executing ro_task={}: {}".format(ro_task_id, e), exc_info=True
+ )
def _update_target(self, task, ro_vim_item_update):
table, _, temp = task["target_record"].partition(":")
_id, _, path_vim_status = temp.partition(":")
- path_item = path_vim_status[:path_vim_status.rfind(".")]
- path_item = path_item[:path_item.rfind(".")]
+ path_item = path_vim_status[: path_vim_status.rfind(".")]
+ path_item = path_item[: path_item.rfind(".")]
# path_vim_status: dot separated list targeting vim information, e.g. "vdur.10.vim_info.vim:id"
# path_item: dot separated list targeting record information, e.g. "vdur.10"
+
if ro_vim_item_update:
- update_dict = {path_vim_status + "." + k: v for k, v in ro_vim_item_update.items() if k in
- ('vim_id', 'vim_details', 'vim_name', 'vim_status', 'interfaces')}
+ update_dict = {
+ path_vim_status + "." + k: v
+ for k, v in ro_vim_item_update.items()
+ if k
+ in ("vim_id", "vim_details", "vim_name", "vim_status", "interfaces")
+ }
+
if path_vim_status.startswith("vdur."):
# for backward compatibility, add vdur.name apart from vdur.vim_name
if ro_vim_item_update.get("vim_name"):
update_dict[path_item + ".name"] = ro_vim_item_update["vim_name"]
+
# for backward compatibility, add vdur.vim-id apart from vdur.vim_id
if ro_vim_item_update.get("vim_id"):
update_dict[path_item + ".vim-id"] = ro_vim_item_update["vim_id"]
+
# update general status
if ro_vim_item_update.get("vim_status"):
- update_dict[path_item + ".status"] = ro_vim_item_update["vim_status"]
+ update_dict[path_item + ".status"] = ro_vim_item_update[
+ "vim_status"
+ ]
+
if ro_vim_item_update.get("interfaces"):
path_interfaces = path_item + ".interfaces"
+
for i, iface in enumerate(ro_vim_item_update.get("interfaces")):
if iface:
- update_dict.update({path_interfaces + ".{}.".format(i) + k: v for k, v in iface.items() if
- k in ('vlan', 'compute_node', 'pci')})
+ update_dict.update(
+ {
+ path_interfaces + ".{}.".format(i) + k: v
+ for k, v in iface.items()
+ if k in ("vlan", "compute_node", "pci")
+ }
+ )
+
# put ip_address and mac_address with ip-address and mac-address
- if iface.get('ip_address'):
- update_dict[path_interfaces + ".{}.".format(i) + "ip-address"] = iface['ip_address']
- if iface.get('mac_address'):
- update_dict[path_interfaces + ".{}.".format(i) + "mac-address"] = iface['mac_address']
+ if iface.get("ip_address"):
+ update_dict[
+ path_interfaces + ".{}.".format(i) + "ip-address"
+ ] = iface["ip_address"]
+
+ if iface.get("mac_address"):
+ update_dict[
+ path_interfaces + ".{}.".format(i) + "mac-address"
+ ] = iface["mac_address"]
+
if iface.get("mgmt_vnf_interface") and iface.get("ip_address"):
- update_dict["ip-address"] = iface.get("ip_address").split(";")[0]
+ update_dict["ip-address"] = iface.get("ip_address").split(
+ ";"
+ )[0]
+
if iface.get("mgmt_vdu_interface") and iface.get("ip_address"):
- update_dict[path_item + ".ip-address"] = iface.get("ip_address").split(";")[0]
+ update_dict[path_item + ".ip-address"] = iface.get(
+ "ip_address"
+ ).split(";")[0]
self.db.set_one(table, q_filter={"_id": _id}, update_dict=update_dict)
else:
update_dict = {path_item + ".status": "DELETED"}
- self.db.set_one(table, q_filter={"_id": _id}, update_dict=update_dict, unset={path_vim_status: None})
+ self.db.set_one(
+ table,
+ q_filter={"_id": _id},
+ update_dict=update_dict,
+ unset={path_vim_status: None},
+ )
def _process_delete_db_tasks(self):
"""
task = self.tasks_to_delete[0]
vnfrs_deleted = None
nsr_id = task["nsr_id"]
+
if task["target_record"].startswith("vnfrs:"):
# check if nsrs is present
if self.db.get_one("nsrs", {"_id": nsr_id}, fail_on_empty=False):
vnfrs_deleted = task["target_record"].split(":")[1]
+
try:
self.delete_db_tasks(self.db, nsr_id, vnfrs_deleted)
except Exception as e:
- self.logger.error("Error deleting task={}: {}".format(task["task_id"], e))
+ self.logger.error(
+ "Error deleting task={}: {}".format(task["task_id"], e)
+ )
self.tasks_to_delete.pop(0)
@staticmethod
ro_tasks = db.get_list("ro_tasks", {"tasks.nsr_id": nsr_id})
now = time.time()
conflict = False
+
for ro_task in ro_tasks:
db_update = {}
to_delete_ro_task = True
+
for index, task in enumerate(ro_task["tasks"]):
if not task:
pass
- elif (not vnfrs_deleted and task["nsr_id"] == nsr_id) or \
- (vnfrs_deleted and task["target_record"].startswith("vnfrs:"+vnfrs_deleted)):
+ elif (not vnfrs_deleted and task["nsr_id"] == nsr_id) or (
+ vnfrs_deleted
+ and task["target_record"].startswith("vnfrs:" + vnfrs_deleted)
+ ):
db_update["tasks.{}".format(index)] = None
else:
- to_delete_ro_task = False # used by other nsr, ro_task cannot be deleted
+ # used by other nsr, ro_task cannot be deleted
+ to_delete_ro_task = False
+
# delete or update if nobody has changed ro_task meanwhile. Used modified_at for known if changed
if to_delete_ro_task:
- if not db.del_one("ro_tasks",
- q_filter={"_id": ro_task["_id"], "modified_at": ro_task["modified_at"]},
- fail_on_empty=False):
+ if not db.del_one(
+ "ro_tasks",
+ q_filter={
+ "_id": ro_task["_id"],
+ "modified_at": ro_task["modified_at"],
+ },
+ fail_on_empty=False,
+ ):
conflict = True
elif db_update:
db_update["modified_at"] = now
- if not db.set_one("ro_tasks",
- q_filter={"_id": ro_task["_id"], "modified_at": ro_task["modified_at"]},
- update_dict=db_update,
- fail_on_empty=False):
+ if not db.set_one(
+ "ro_tasks",
+ q_filter={
+ "_id": ro_task["_id"],
+ "modified_at": ro_task["modified_at"],
+ },
+ update_dict=db_update,
+ fail_on_empty=False,
+ ):
conflict = True
if not conflict:
return
if isinstance(e, queue.Empty):
pass
else:
- self.logger.critical("Error processing task: {}".format(e), exc_info=True)
+ self.logger.critical(
+ "Error processing task: {}".format(e), exc_info=True
+ )
# step 2: process pending_tasks, delete not needed tasks
try:
if not busy:
time.sleep(5)
except Exception as e:
- self.logger.critical("Unexpected exception at run: " + str(e), exc_info=True)
+ self.logger.critical(
+ "Unexpected exception at run: " + str(e), exc_info=True
+ )
self.logger.info("Finishing")
__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
-__version__ = "0.1." # file version, not NBI version
+__version__ = "0.1." # file version, not NBI version
version_date = "May 2020"
-database_version = '1.2'
-auth_database_version = '1.0'
-ro_server = None # instance of Server class
-vim_admin_thread = None # instance of VimAdminThread class
+database_version = "1.2"
+auth_database_version = "1.0"
+ro_server = None # instance of Server class
+vim_admin_thread = None # instance of VimAdminThread class
# vim_threads = None # instance of VimThread class
"tokens": {
"METHODS": ("POST",),
"ROLE_PERMISSION": "tokens:",
- "<ID>": {
- "METHODS": ("DELETE",),
- "ROLE_PERMISSION": "tokens:id:"
- }
+ "<ID>": {"METHODS": ("DELETE",), "ROLE_PERMISSION": "tokens:id:"},
},
}
},
"cancel": {
"METHODS": ("POST",),
"ROLE_PERMISSION": "deploy:id:id:cancel",
- }
- }
- }
+ },
+ },
+ },
},
}
},
class RoException(Exception):
-
def __init__(self, message, http_code=HTTPStatus.METHOD_NOT_ALLOWED):
Exception.__init__(self, message)
self.http_code = http_code
class Authenticator:
-
def __init__(self, valid_url_methods, valid_query_string):
self.valid_url_methods = valid_url_methods
self.valid_query_string = valid_query_string
def authorize(self, *args, **kwargs):
return {"token": "ok", "id": "ok"}
-
+
def new_token(self, token_info, indata, remote):
- return {"token": "ok",
- "id": "ok",
- "remote": remote}
+ return {"token": "ok", "id": "ok", "remote": remote}
def del_token(self, token_id):
pass
def _format_in(self, kwargs):
try:
indata = None
+
if cherrypy.request.body.length:
error_text = "Invalid input format "
cherrypy.request.headers.pop("Content-File-MD5", None)
elif "application/yaml" in cherrypy.request.headers["Content-Type"]:
error_text = "Invalid yaml format "
- indata = yaml.load(cherrypy.request.body, Loader=yaml.SafeLoader)
+ indata = yaml.load(
+ cherrypy.request.body, Loader=yaml.SafeLoader
+ )
cherrypy.request.headers.pop("Content-File-MD5", None)
- elif "application/binary" in cherrypy.request.headers["Content-Type"] or \
- "application/gzip" in cherrypy.request.headers["Content-Type"] or \
- "application/zip" in cherrypy.request.headers["Content-Type"] or \
- "text/plain" in cherrypy.request.headers["Content-Type"]:
+ elif (
+ "application/binary" in cherrypy.request.headers["Content-Type"]
+ or "application/gzip"
+ in cherrypy.request.headers["Content-Type"]
+ or "application/zip" in cherrypy.request.headers["Content-Type"]
+ or "text/plain" in cherrypy.request.headers["Content-Type"]
+ ):
indata = cherrypy.request.body # .read()
- elif "multipart/form-data" in cherrypy.request.headers["Content-Type"]:
+ elif (
+ "multipart/form-data"
+ in cherrypy.request.headers["Content-Type"]
+ ):
if "descriptor_file" in kwargs:
filecontent = kwargs.pop("descriptor_file")
+
if not filecontent.file:
- raise RoException("empty file or content", HTTPStatus.BAD_REQUEST)
+ raise RoException(
+ "empty file or content", HTTPStatus.BAD_REQUEST
+ )
+
indata = filecontent.file # .read()
+
if filecontent.content_type.value:
- cherrypy.request.headers["Content-Type"] = filecontent.content_type.value
+ cherrypy.request.headers[
+ "Content-Type"
+ ] = filecontent.content_type.value
else:
# raise cherrypy.HTTPError(HTTPStatus.Not_Acceptable,
# "Only 'Content-Type' of type 'application/json' or
# 'application/yaml' for input format are available")
error_text = "Invalid yaml format "
- indata = yaml.load(cherrypy.request.body, Loader=yaml.SafeLoader)
+ indata = yaml.load(
+ cherrypy.request.body, Loader=yaml.SafeLoader
+ )
cherrypy.request.headers.pop("Content-File-MD5", None)
else:
error_text = "Invalid yaml format "
indata = yaml.load(cherrypy.request.body, Loader=yaml.SafeLoader)
cherrypy.request.headers.pop("Content-File-MD5", None)
+
if not indata:
indata = {}
kwargs[k] = yaml.load(v, Loader=yaml.SafeLoader)
except Exception:
pass
- elif k.endswith(".gt") or k.endswith(".lt") or k.endswith(".gte") or k.endswith(".lte"):
+ elif (
+ k.endswith(".gt")
+ or k.endswith(".lt")
+ or k.endswith(".gte")
+ or k.endswith(".lte")
+ ):
try:
kwargs[k] = int(v)
except Exception:
:return: None
"""
accept = cherrypy.request.headers.get("Accept")
+
if data is None:
if accept and "text/html" in accept:
- return html.format(data, cherrypy.request, cherrypy.response, token_info)
+ return html.format(
+ data, cherrypy.request, cherrypy.response, token_info
+ )
+
# cherrypy.response.status = HTTPStatus.NO_CONTENT.value
return
elif hasattr(data, "read"): # file object
if _format:
cherrypy.response.headers["Content-Type"] = _format
elif "b" in data.mode: # binariy asssumig zip
- cherrypy.response.headers["Content-Type"] = 'application/zip'
+ cherrypy.response.headers["Content-Type"] = "application/zip"
else:
- cherrypy.response.headers["Content-Type"] = 'text/plain'
+ cherrypy.response.headers["Content-Type"] = "text/plain"
+
# TODO check that cherrypy close file. If not implement pending things to close per thread next
return data
+
if accept:
if "application/json" in accept:
- cherrypy.response.headers["Content-Type"] = 'application/json; charset=utf-8'
+ cherrypy.response.headers[
+ "Content-Type"
+ ] = "application/json; charset=utf-8"
a = json.dumps(data, indent=4) + "\n"
+
return a.encode("utf8")
elif "text/html" in accept:
- return html.format(data, cherrypy.request, cherrypy.response, token_info)
-
- elif "application/yaml" in accept or "*/*" in accept or "text/plain" in accept:
+ return html.format(
+ data, cherrypy.request, cherrypy.response, token_info
+ )
+ elif (
+ "application/yaml" in accept
+ or "*/*" in accept
+ or "text/plain" in accept
+ ):
pass
# if there is not any valid accept, raise an error. But if response is already an error, format in yaml
elif cherrypy.response.status >= 400:
- raise cherrypy.HTTPError(HTTPStatus.NOT_ACCEPTABLE.value,
- "Only 'Accept' of type 'application/json' or 'application/yaml' "
- "for output format are available")
- cherrypy.response.headers["Content-Type"] = 'application/yaml'
- return yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False, tags=False,
- encoding='utf-8', allow_unicode=True) # , canonical=True, default_style='"'
+ raise cherrypy.HTTPError(
+ HTTPStatus.NOT_ACCEPTABLE.value,
+ "Only 'Accept' of type 'application/json' or 'application/yaml' "
+ "for output format are available",
+ )
+
+ cherrypy.response.headers["Content-Type"] = "application/yaml"
+
+ return yaml.safe_dump(
+ data,
+ explicit_start=True,
+ indent=4,
+ default_flow_style=False,
+ tags=False,
+ encoding="utf-8",
+ allow_unicode=True,
+ ) # , canonical=True, default_style='"'
@cherrypy.expose
def index(self, *args, **kwargs):
token_info = None
+
try:
if cherrypy.request.method == "GET":
token_info = self.authenticator.authorize()
- outdata = token_info # Home page
+ outdata = token_info # Home page
else:
- raise cherrypy.HTTPError(HTTPStatus.METHOD_NOT_ALLOWED.value,
- "Method {} not allowed for tokens".format(cherrypy.request.method))
+ raise cherrypy.HTTPError(
+ HTTPStatus.METHOD_NOT_ALLOWED.value,
+ "Method {} not allowed for tokens".format(cherrypy.request.method),
+ )
return self._format_out(outdata, token_info)
-
except (NsException, AuthException) as e:
# cherrypy.log("index Exception {}".format(e))
cherrypy.response.status = e.http_code.value
+
return self._format_out("Welcome to OSM!", token_info)
@cherrypy.expose
# TODO consider to remove and provide version using the static version file
try:
if cherrypy.request.method != "GET":
- raise RoException("Only method GET is allowed", HTTPStatus.METHOD_NOT_ALLOWED)
+ raise RoException(
+ "Only method GET is allowed",
+ HTTPStatus.METHOD_NOT_ALLOWED,
+ )
elif args or kwargs:
- raise RoException("Invalid URL or query string for version", HTTPStatus.METHOD_NOT_ALLOWED)
+ raise RoException(
+ "Invalid URL or query string for version",
+ HTTPStatus.METHOD_NOT_ALLOWED,
+ )
+
# TODO include version of other modules, pick up from some kafka admin message
osm_ng_ro_version = {"version": ro_version, "date": ro_version_date}
+
return self._format_out(osm_ng_ro_version)
except RoException as e:
cherrypy.response.status = e.http_code.value
"status": e.http_code.value,
"detail": str(e),
}
+
return self._format_out(problem_details, None)
def new_token(self, engine_session, indata, *args, **kwargs):
token_info = self.authenticator.authorize()
except Exception:
token_info = None
+
if kwargs:
indata.update(kwargs)
+
# This is needed to log the user when authentication fails
cherrypy.request.login = "{}".format(indata.get("username", "-"))
- token_info = self.authenticator.new_token(token_info, indata, cherrypy.request.remote)
- cherrypy.session['Authorization'] = token_info["id"]
+ token_info = self.authenticator.new_token(
+ token_info, indata, cherrypy.request.remote
+ )
+ cherrypy.session["Authorization"] = token_info["id"]
self._set_location_header("admin", "v1", "tokens", token_info["id"])
# for logging
# cherrypy.response.cookie["Authorization"] = outdata["id"]
# cherrypy.response.cookie["Authorization"]['expires'] = 3600
+
return token_info, token_info["id"], True
def del_token(self, engine_session, indata, version, _id, *args, **kwargs):
token_id = _id
+
if not token_id and "id" in kwargs:
token_id = kwargs["id"]
elif not token_id:
token_info = self.authenticator.authorize()
# for logging
token_id = token_info["id"]
+
self.authenticator.del_token(token_id)
token_info = None
- cherrypy.session['Authorization'] = "logout"
+ cherrypy.session["Authorization"] = "logout"
# cherrypy.response.cookie["Authorization"] = token_id
# cherrypy.response.cookie["Authorization"]['expires'] = 0
+
return None, None, True
-
+
@cherrypy.expose
def test(self, *args, **kwargs):
- if not cherrypy.config.get("server.enable_test") or (isinstance(cherrypy.config["server.enable_test"], str) and
- cherrypy.config["server.enable_test"].lower() == "false"):
+ if not cherrypy.config.get("server.enable_test") or (
+ isinstance(cherrypy.config["server.enable_test"], str)
+ and cherrypy.config["server.enable_test"].lower() == "false"
+ ):
cherrypy.response.status = HTTPStatus.METHOD_NOT_ALLOWED.value
+
return "test URL is disabled"
+
thread_info = None
- if args and args[0] == "help":
- return "<html><pre>\ninit\nfile/<name> download file\ndb-clear/table\nfs-clear[/folder]\nlogin\nlogin2\n"\
- "sleep/<time>\nmessage/topic\n</pre></html>"
+ if args and args[0] == "help":
+ return (
+ "<html><pre>\ninit\nfile/<name> download file\ndb-clear/table\nfs-clear[/folder]\nlogin\nlogin2\n"
+ "sleep/<time>\nmessage/topic\n</pre></html>"
+ )
elif args and args[0] == "init":
try:
# self.ns.load_dbase(cherrypy.request.app.config)
self.ns.create_admin()
+
return "Done. User 'admin', password 'admin' created"
except Exception:
cherrypy.response.status = HTTPStatus.FORBIDDEN.value
+
return self._format_out("Database already initialized")
elif args and args[0] == "file":
- return cherrypy.lib.static.serve_file(cherrypy.tree.apps['/ro'].config["storage"]["path"] + "/" + args[1],
- "text/plain", "attachment")
+ return cherrypy.lib.static.serve_file(
+ cherrypy.tree.apps["/ro"].config["storage"]["path"] + "/" + args[1],
+ "text/plain",
+ "attachment",
+ )
elif args and args[0] == "file2":
- f_path = cherrypy.tree.apps['/ro'].config["storage"]["path"] + "/" + args[1]
+ f_path = cherrypy.tree.apps["/ro"].config["storage"]["path"] + "/" + args[1]
f = open(f_path, "r")
cherrypy.response.headers["Content-type"] = "text/plain"
return f
folders = (args[1],)
else:
folders = self.ns.fs.dir_ls(".")
+
for folder in folders:
self.ns.fs.file_delete(folder)
+
return ",".join(folders) + " folders deleted\n"
elif args and args[0] == "login":
if not cherrypy.request.headers.get("Authorization"):
- cherrypy.response.headers["WWW-Authenticate"] = 'Basic realm="Access to OSM site", charset="UTF-8"'
+ cherrypy.response.headers[
+ "WWW-Authenticate"
+ ] = 'Basic realm="Access to OSM site", charset="UTF-8"'
cherrypy.response.status = HTTPStatus.UNAUTHORIZED.value
elif args and args[0] == "login2":
if not cherrypy.request.headers.get("Authorization"):
- cherrypy.response.headers["WWW-Authenticate"] = 'Bearer realm="Access to OSM site"'
+ cherrypy.response.headers[
+ "WWW-Authenticate"
+ ] = 'Bearer realm="Access to OSM site"'
cherrypy.response.status = HTTPStatus.UNAUTHORIZED.value
elif args and args[0] == "sleep":
sleep_time = 5
+
try:
sleep_time = int(args[1])
except Exception:
cherrypy.response.status = HTTPStatus.FORBIDDEN.value
return self._format_out("Database already initialized")
+
thread_info = cherrypy.thread_data
print(thread_info)
time.sleep(sleep_time)
elif len(args) >= 2 and args[0] == "message":
main_topic = args[1]
return_text = "<html><pre>{} ->\n".format(main_topic)
+
try:
- if cherrypy.request.method == 'POST':
+ if cherrypy.request.method == "POST":
to_send = yaml.load(cherrypy.request.body, Loader=yaml.SafeLoader)
for k, v in to_send.items():
self.ns.msg.write(main_topic, k, v)
return_text += " {}: {}\n".format(k, v)
- elif cherrypy.request.method == 'GET':
+ elif cherrypy.request.method == "GET":
for k, v in kwargs.items():
- self.ns.msg.write(main_topic, k, yaml.load(v, Loader=yaml.SafeLoader))
- return_text += " {}: {}\n".format(k, yaml.load(v, Loader=yaml.SafeLoader))
+ self.ns.msg.write(
+ main_topic, k, yaml.load(v, Loader=yaml.SafeLoader)
+ )
+ return_text += " {}: {}\n".format(
+ k, yaml.load(v, Loader=yaml.SafeLoader)
+ )
except Exception as e:
return_text += "Error: " + str(e)
+
return_text += "</pre></html>\n"
+
return return_text
return_text = (
- "<html><pre>\nheaders:\n args: {}\n".format(args) +
- " kwargs: {}\n".format(kwargs) +
- " headers: {}\n".format(cherrypy.request.headers) +
- " path_info: {}\n".format(cherrypy.request.path_info) +
- " query_string: {}\n".format(cherrypy.request.query_string) +
- " session: {}\n".format(cherrypy.session) +
- " cookie: {}\n".format(cherrypy.request.cookie) +
- " method: {}\n".format(cherrypy.request.method) +
- " session: {}\n".format(cherrypy.session.get('fieldname')) +
- " body:\n")
+ "<html><pre>\nheaders:\n args: {}\n".format(args)
+ + " kwargs: {}\n".format(kwargs)
+ + " headers: {}\n".format(cherrypy.request.headers)
+ + " path_info: {}\n".format(cherrypy.request.path_info)
+ + " query_string: {}\n".format(cherrypy.request.query_string)
+ + " session: {}\n".format(cherrypy.session)
+ + " cookie: {}\n".format(cherrypy.request.cookie)
+ + " method: {}\n".format(cherrypy.request.method)
+ + " session: {}\n".format(cherrypy.session.get("fieldname"))
+ + " body:\n"
+ )
return_text += " length: {}\n".format(cherrypy.request.body.length)
+
if cherrypy.request.body.length:
return_text += " content: {}\n".format(
- str(cherrypy.request.body.read(int(cherrypy.request.headers.get('Content-Length', 0)))))
+ str(
+ cherrypy.request.body.read(
+ int(cherrypy.request.headers.get("Content-Length", 0))
+ )
+ )
+ )
+
if thread_info:
return_text += "thread: {}\n".format(thread_info)
+
return_text += "</pre></html>"
+
return return_text
@staticmethod
def _check_valid_url_method(method, *args):
if len(args) < 3:
- raise RoException("URL must contain at least 'main_topic/version/topic'", HTTPStatus.METHOD_NOT_ALLOWED)
+ raise RoException(
+ "URL must contain at least 'main_topic/version/topic'",
+ HTTPStatus.METHOD_NOT_ALLOWED,
+ )
reference = valid_url_methods
for arg in args:
if arg is None:
break
+
if not isinstance(reference, dict):
- raise RoException("URL contains unexpected extra items '{}'".format(arg),
- HTTPStatus.METHOD_NOT_ALLOWED)
+ raise RoException(
+ "URL contains unexpected extra items '{}'".format(arg),
+ HTTPStatus.METHOD_NOT_ALLOWED,
+ )
if arg in reference:
reference = reference[arg]
# reference = reference["*"]
break
else:
- raise RoException("Unexpected URL item {}".format(arg), HTTPStatus.METHOD_NOT_ALLOWED)
+ raise RoException(
+ "Unexpected URL item {}".format(arg),
+ HTTPStatus.METHOD_NOT_ALLOWED,
+ )
+
if "TODO" in reference and method in reference["TODO"]:
- raise RoException("Method {} not supported yet for this URL".format(method), HTTPStatus.NOT_IMPLEMENTED)
+ raise RoException(
+ "Method {} not supported yet for this URL".format(method),
+ HTTPStatus.NOT_IMPLEMENTED,
+ )
elif "METHODS" not in reference or method not in reference["METHODS"]:
- raise RoException("Method {} not supported for this URL".format(method), HTTPStatus.METHOD_NOT_ALLOWED)
+ raise RoException(
+ "Method {} not supported for this URL".format(method),
+ HTTPStatus.METHOD_NOT_ALLOWED,
+ )
+
return reference["ROLE_PERMISSION"] + method.lower()
@staticmethod
:return: None
"""
# Use cherrypy.request.base for absoluted path and make use of request.header HOST just in case behind aNAT
- cherrypy.response.headers["Location"] = "/ro/{}/{}/{}/{}".format(main_topic, version, topic, id)
+ cherrypy.response.headers["Location"] = "/ro/{}/{}/{}/{}".format(
+ main_topic, version, topic, id
+ )
+
return
@cherrypy.expose
- def default(self, main_topic=None, version=None, topic=None, _id=None, _id2=None, *args, **kwargs):
+ def default(
+ self,
+ main_topic=None,
+ version=None,
+ topic=None,
+ _id=None,
+ _id2=None,
+ *args,
+ **kwargs,
+ ):
token_info = None
outdata = None
_format = None
method = "DONE"
rollback = []
engine_session = None
+
try:
if not main_topic or not version or not topic:
- raise RoException("URL must contain at least 'main_topic/version/topic'",
- HTTPStatus.METHOD_NOT_ALLOWED)
- if main_topic not in ("admin", "ns",):
- raise RoException("URL main_topic '{}' not supported".format(main_topic),
- HTTPStatus.METHOD_NOT_ALLOWED)
- if version != 'v1':
- raise RoException("URL version '{}' not supported".format(version), HTTPStatus.METHOD_NOT_ALLOWED)
-
- if kwargs and "METHOD" in kwargs and kwargs["METHOD"] in ("PUT", "POST", "DELETE", "GET", "PATCH"):
+ raise RoException(
+ "URL must contain at least 'main_topic/version/topic'",
+ HTTPStatus.METHOD_NOT_ALLOWED,
+ )
+
+ if main_topic not in (
+ "admin",
+ "ns",
+ ):
+ raise RoException(
+ "URL main_topic '{}' not supported".format(main_topic),
+ HTTPStatus.METHOD_NOT_ALLOWED,
+ )
+
+ if version != "v1":
+ raise RoException(
+ "URL version '{}' not supported".format(version),
+ HTTPStatus.METHOD_NOT_ALLOWED,
+ )
+
+ if (
+ kwargs
+ and "METHOD" in kwargs
+ and kwargs["METHOD"] in ("PUT", "POST", "DELETE", "GET", "PATCH")
+ ):
method = kwargs.pop("METHOD")
else:
method = cherrypy.request.method
- role_permission = self._check_valid_url_method(method, main_topic, version, topic, _id, _id2, *args,
- **kwargs)
+ role_permission = self._check_valid_url_method(
+ method, main_topic, version, topic, _id, _id2, *args, **kwargs
+ )
# skip token validation if requesting a token
indata = self._format_in(kwargs)
+
if main_topic != "admin" or topic != "tokens":
token_info = self.authenticator.authorize(role_permission, _id)
+
outdata, created_id, done = self.map_operation[role_permission](
- engine_session, indata, version, _id, _id2, *args, *kwargs)
+ engine_session, indata, version, _id, _id2, *args, *kwargs
+ )
+
if created_id:
self._set_location_header(main_topic, version, topic, _id)
- cherrypy.response.status = HTTPStatus.ACCEPTED.value if not done else HTTPStatus.OK.value if \
- outdata is not None else HTTPStatus.NO_CONTENT.value
+
+ cherrypy.response.status = (
+ HTTPStatus.ACCEPTED.value
+ if not done
+ else HTTPStatus.OK.value
+ if outdata is not None
+ else HTTPStatus.NO_CONTENT.value
+ )
+
return self._format_out(outdata, token_info, _format)
except Exception as e:
- if isinstance(e, (RoException, NsException, DbException, FsException, MsgException, AuthException,
- ValidationError)):
+ if isinstance(
+ e,
+ (
+ RoException,
+ NsException,
+ DbException,
+ FsException,
+ MsgException,
+ AuthException,
+ ValidationError,
+ ),
+ ):
http_code_value = cherrypy.response.status = e.http_code.value
http_code_name = e.http_code.name
cherrypy.log("Exception {}".format(e))
else:
- http_code_value = cherrypy.response.status = HTTPStatus.BAD_REQUEST.value # INTERNAL_SERVER_ERROR
+ http_code_value = (
+ cherrypy.response.status
+ ) = HTTPStatus.BAD_REQUEST.value # INTERNAL_SERVER_ERROR
cherrypy.log("CRITICAL: Exception {}".format(e), traceback=True)
http_code_name = HTTPStatus.BAD_REQUEST.name
+
if hasattr(outdata, "close"): # is an open file
outdata.close()
+
error_text = str(e)
rollback.reverse()
+
for rollback_item in rollback:
try:
if rollback_item.get("operation") == "set":
- self.ns.db.set_one(rollback_item["topic"], {"_id": rollback_item["_id"]},
- rollback_item["content"], fail_on_empty=False)
+ self.ns.db.set_one(
+ rollback_item["topic"],
+ {"_id": rollback_item["_id"]},
+ rollback_item["content"],
+ fail_on_empty=False,
+ )
else:
- self.ns.db.del_one(rollback_item["topic"], {"_id": rollback_item["_id"]},
- fail_on_empty=False)
+ self.ns.db.del_one(
+ rollback_item["topic"],
+ {"_id": rollback_item["_id"]},
+ fail_on_empty=False,
+ )
except Exception as e2:
- rollback_error_text = "Rollback Exception {}: {}".format(rollback_item, e2)
+ rollback_error_text = "Rollback Exception {}: {}".format(
+ rollback_item, e2
+ )
cherrypy.log(rollback_error_text)
error_text += ". " + rollback_error_text
+
# if isinstance(e, MsgException):
# error_text = "{} has been '{}' but other modules cannot be informed because an error on bus".format(
# engine_topic[:-1], method, error_text)
"status": http_code_value,
"detail": error_text,
}
+
return self._format_out(problem_details, token_info)
# raise cherrypy.HTTPError(e.http_code.value, str(e))
finally:
if method in ("PUT", "PATCH", "POST") and isinstance(outdata, dict):
for logging_id in ("id", "op_id", "nsilcmop_id", "nslcmop_id"):
if outdata.get(logging_id):
- cherrypy.request.login += ";{}={}".format(logging_id, outdata[logging_id][:36])
+ cherrypy.request.login += ";{}={}".format(
+ logging_id, outdata[logging_id][:36]
+ )
def _start_service():
cherrypy.log.error("Starting osm_ng_ro")
# update general cherrypy configuration
update_dict = {}
+ engine_config = cherrypy.tree.apps["/ro"].config
- engine_config = cherrypy.tree.apps['/ro'].config
for k, v in environ.items():
if not k.startswith("OSMRO_"):
continue
+
k1, _, k2 = k[6:].lower().partition("_")
+
if not k2:
continue
+
try:
if k1 in ("server", "test", "auth", "log"):
# update [global] configuration
- update_dict[k1 + '.' + k2] = yaml.safe_load(v)
+ update_dict[k1 + "." + k2] = yaml.safe_load(v)
elif k1 == "static":
# update [/static] configuration
engine_config["/static"]["tools.staticdir." + k2] = yaml.safe_load(v)
elif k1 == "tools":
# update [/] configuration
- engine_config["/"]["tools." + k2.replace('_', '.')] = yaml.safe_load(v)
+ engine_config["/"]["tools." + k2.replace("_", ".")] = yaml.safe_load(v)
elif k1 in ("message", "database", "storage", "authentication"):
engine_config[k1][k2] = yaml.safe_load(v)
engine_config["global"].update(update_dict)
# logging cherrypy
- log_format_simple = "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(message)s"
- log_formatter_simple = logging.Formatter(log_format_simple, datefmt='%Y-%m-%dT%H:%M:%S')
+ log_format_simple = (
+ "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(message)s"
+ )
+ log_formatter_simple = logging.Formatter(
+ log_format_simple, datefmt="%Y-%m-%dT%H:%M:%S"
+ )
logger_server = logging.getLogger("cherrypy.error")
logger_access = logging.getLogger("cherrypy.access")
logger_cherry = logging.getLogger("cherrypy")
logger = logging.getLogger("ro")
if "log.file" in engine_config["global"]:
- file_handler = logging.handlers.RotatingFileHandler(engine_config["global"]["log.file"],
- maxBytes=100e6, backupCount=9, delay=0)
+ file_handler = logging.handlers.RotatingFileHandler(
+ engine_config["global"]["log.file"], maxBytes=100e6, backupCount=9, delay=0
+ )
file_handler.setFormatter(log_formatter_simple)
logger_cherry.addHandler(file_handler)
logger.addHandler(file_handler)
+
# log always to standard output
- for format_, logger in {"ro.server %(filename)s:%(lineno)s": logger_server,
- "ro.access %(filename)s:%(lineno)s": logger_access,
- "%(name)s %(filename)s:%(lineno)s": logger
- }.items():
+ for format_, logger in {
+ "ro.server %(filename)s:%(lineno)s": logger_server,
+ "ro.access %(filename)s:%(lineno)s": logger_access,
+ "%(name)s %(filename)s:%(lineno)s": logger,
+ }.items():
log_format_cherry = "%(asctime)s %(levelname)s {} %(message)s".format(format_)
- log_formatter_cherry = logging.Formatter(log_format_cherry, datefmt='%Y-%m-%dT%H:%M:%S')
+ log_formatter_cherry = logging.Formatter(
+ log_format_cherry, datefmt="%Y-%m-%dT%H:%M:%S"
+ )
str_handler = logging.StreamHandler()
str_handler.setFormatter(log_formatter_cherry)
logger.addHandler(str_handler)
if engine_config["global"].get("log.level"):
logger_cherry.setLevel(engine_config["global"]["log.level"])
logger.setLevel(engine_config["global"]["log.level"])
+
# logging other modules
- for k1, logname in {"message": "ro.msg", "database": "ro.db", "storage": "ro.fs"}.items():
+ for k1, logname in {
+ "message": "ro.msg",
+ "database": "ro.db",
+ "storage": "ro.fs",
+ }.items():
engine_config[k1]["logger_name"] = logname
logger_module = logging.getLogger(logname)
+
if "logfile" in engine_config[k1]:
- file_handler = logging.handlers.RotatingFileHandler(engine_config[k1]["logfile"],
- maxBytes=100e6, backupCount=9, delay=0)
+ file_handler = logging.handlers.RotatingFileHandler(
+ engine_config[k1]["logfile"], maxBytes=100e6, backupCount=9, delay=0
+ )
file_handler.setFormatter(log_formatter_simple)
logger_module.addHandler(file_handler)
+
if "loglevel" in engine_config[k1]:
logger_module.setLevel(engine_config[k1]["loglevel"])
# TODO add more entries, e.g.: storage
engine_config["assignment"] = {}
# ^ each VIM, SDNc will be assigned one worker id. Ns class will add items and VimThread will auto-assign
- cherrypy.tree.apps['/ro'].root.ns.start(engine_config)
- cherrypy.tree.apps['/ro'].root.authenticator.start(engine_config)
- cherrypy.tree.apps['/ro'].root.ns.init_db(target_version=database_version)
+ cherrypy.tree.apps["/ro"].root.ns.start(engine_config)
+ cherrypy.tree.apps["/ro"].root.authenticator.start(engine_config)
+ cherrypy.tree.apps["/ro"].root.ns.init_db(target_version=database_version)
# # start subscriptions thread:
vim_admin_thread = VimAdminThread(config=engine_config, engine=ro_server.ns)
TODO: Ending database connections.
"""
global vim_admin_thread
+
# terminate vim_admin_thread
if vim_admin_thread:
vim_admin_thread.terminate()
+
vim_admin_thread = None
- cherrypy.tree.apps['/ro'].root.ns.stop()
+ cherrypy.tree.apps["/ro"].root.ns.stop()
cherrypy.log.error("Stopping osm_ng_ro")
def ro_main(config_file):
global ro_server
+
ro_server = Server()
- cherrypy.engine.subscribe('start', _start_service)
- cherrypy.engine.subscribe('stop', _stop_service)
- cherrypy.quickstart(ro_server, '/ro', config_file)
+ cherrypy.engine.subscribe("start", _start_service)
+ cherrypy.engine.subscribe("stop", _stop_service)
+ cherrypy.quickstart(ro_server, "/ro", config_file)
def usage():
- print("""Usage: {} [options]
+ print(
+ """Usage: {} [options]
-c|--config [configuration_file]: loads the configuration file (default: ./ro.cfg)
-h|--help: shows this help
- """.format(sys.argv[0]))
+ """.format(
+ sys.argv[0]
+ )
+ )
# --log-socket-host HOST: send logs to this host")
# --log-socket-port PORT: send logs using this port (default: 9022)")
-if __name__ == '__main__':
+if __name__ == "__main__":
try:
# load parameters and configuration
opts, args = getopt.getopt(sys.argv[1:], "hvc:", ["config=", "help"])
# TODO add "log-socket-host=", "log-socket-port=", "log-file="
config_file = None
+
for o, a in opts:
if o in ("-h", "--help"):
usage()
config_file = a
else:
assert False, "Unhandled option"
+
if config_file:
if not path.isfile(config_file):
- print("configuration file '{}' that not exist".format(config_file), file=sys.stderr)
+ print(
+ "configuration file '{}' that not exist".format(config_file),
+ file=sys.stderr,
+ )
exit(1)
else:
- for config_file in (path.dirname(__file__) + "/ro.cfg", "./ro.cfg", "/etc/osm/ro.cfg"):
+ for config_file in (
+ path.dirname(__file__) + "/ro.cfg",
+ "./ro.cfg",
+ "/etc/osm/ro.cfg",
+ ):
if path.isfile(config_file):
break
else:
- print("No configuration file 'ro.cfg' found neither at local folder nor at /etc/osm/", file=sys.stderr)
+ print(
+ "No configuration file 'ro.cfg' found neither at local folder nor at /etc/osm/",
+ file=sys.stderr,
+ )
exit(1)
+
ro_main(config_file)
except KeyboardInterrupt:
print("KeyboardInterrupt. Finishing", file=sys.stderr)
"""
# Basis schemas
-name_schema = {"type": "string", "minLength": 1, "maxLength": 255, "pattern": "^[^,;()'\"]+$"}
+name_schema = {
+ "type": "string",
+ "minLength": 1,
+ "maxLength": 255,
+ "pattern": "^[^,;()'\"]+$",
+}
string_schema = {"type": "string", "minLength": 1, "maxLength": 255}
ssh_key_schema = {"type": "string", "minLength": 1}
-id_schema = {"type": "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
+id_schema = {
+ "type": "string",
+ "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$",
+}
bool_schema = {"type": "boolean"}
null_schema = {"type": "null"}
object_schema = {"type": "object"}
"vim_info": object_schema,
"common_id": string_schema,
},
- "additionalProperties": True
+ "additionalProperties": True,
}
deploy_item_list = {
"type": "object",
"properties": {
"vld": deploy_item_list,
- }
+ },
},
},
- "additionalProperties": False
+ "additionalProperties": False,
}
try:
if schema_to_use:
js_v(indata, schema_to_use)
+
return None
except js_e.ValidationError as e:
if e.path:
error_pos = "at '" + ":".join(map(str, e.path)) + "'"
else:
error_pos = ""
+
raise ValidationError("Format error {} '{}' ".format(error_pos, e.message))
except js_e.SchemaError:
- raise ValidationError("Bad json schema {}".format(schema_to_use), http_code=HTTPStatus.INTERNAL_SERVER_ERROR)
+ raise ValidationError(
+ "Bad json schema {}".format(schema_to_use),
+ http_code=HTTPStatus.INTERNAL_SERVER_ERROR,
+ )
class VimAdminException(Exception):
-
def __init__(self, message, http_code=HTTPStatus.BAD_REQUEST):
self.http_code = http_code
Exception.__init__(self, message)
"initial_lock_time": database_object["locked_at"],
"locked_at": database_object["locked_at"],
"thread": thread_object,
- "unlocked": False # True when it is not needed any more
+ "unlocked": False, # True when it is not needed any more
}
LockRenew.renew_list.append(lock_object)
+
return lock_object
@staticmethod
async def renew_locks(self):
while not self.to_terminate:
if not self.renew_list:
- await asyncio.sleep(self.task_locked_time - self.task_relock_time, loop=self.loop)
+ await asyncio.sleep(
+ self.task_locked_time - self.task_relock_time, loop=self.loop
+ )
continue
+
lock_object = self.renew_list[0]
- if lock_object["unlocked"] or not lock_object["thread"] or not lock_object["thread"].is_alive():
+
+ if (
+ lock_object["unlocked"]
+ or not lock_object["thread"]
+ or not lock_object["thread"].is_alive()
+ ):
# task has been finished or locker thread is dead, not needed to re-locked.
self.renew_list.pop(0)
continue
locked_at = lock_object["locked_at"]
now = time()
- time_to_relock = locked_at + self.task_locked_time - self.task_relock_time - now
+ time_to_relock = (
+ locked_at + self.task_locked_time - self.task_relock_time - now
+ )
+
if time_to_relock < 1:
if lock_object["initial_lock_time"] + self.task_max_locked_time < now:
self.renew_list.pop(0)
# re-lock
new_locked_at = locked_at + self.task_locked_time
+
try:
- if self.db.set_one(lock_object["table"],
- update_dict={"locked_at": new_locked_at, "modified_at": now},
- q_filter={"_id": lock_object["_id"], "locked_at": locked_at},
- fail_on_empty=False):
- self.logger.debug("Renew lock for {}.{}".format(lock_object["table"], lock_object["_id"]))
+ if self.db.set_one(
+ lock_object["table"],
+ update_dict={
+ "locked_at": new_locked_at,
+ "modified_at": now,
+ },
+ q_filter={
+ "_id": lock_object["_id"],
+ "locked_at": locked_at,
+ },
+ fail_on_empty=False,
+ ):
+ self.logger.debug(
+ "Renew lock for {}.{}".format(
+ lock_object["table"], lock_object["_id"]
+ )
+ )
lock_object["locked_at"] = new_locked_at
self.renew_list.append(lock_object)
else:
- self.logger.info("Cannot renew lock for {}.{}".format(lock_object["table"],
- lock_object["_id"]))
+ self.logger.info(
+ "Cannot renew lock for {}.{}".format(
+ lock_object["table"], lock_object["_id"]
+ )
+ )
except Exception as e:
- self.logger.error("Exception when trying to renew lock for {}.{}: {}".format(
- lock_object["table"], lock_object["_id"], e))
+ self.logger.error(
+ "Exception when trying to renew lock for {}.{}: {}".format(
+ lock_object["table"], lock_object["_id"], e
+ )
+ )
else:
# wait until it is time to re-lock it
await asyncio.sleep(time_to_relock, loop=self.loop)
def stop(self):
# unlock all locked items
now = time()
+
for lock_object in self.renew_list:
locked_at = lock_object["locked_at"]
+
if not lock_object["unlocked"] or locked_at + self.task_locked_time >= now:
- self.db.set_one(lock_object["table"], update_dict={"locked_at": 0},
- q_filter={"_id": lock_object["_id"], "locked_at": locked_at},
- fail_on_empty=False)
+ self.db.set_one(
+ lock_object["table"],
+ update_dict={"locked_at": 0},
+ q_filter={"_id": lock_object["_id"], "locked_at": locked_at},
+ fail_on_empty=False,
+ )
class VimAdminThread(threading.Thread):
self.last_rotask_time = 0
self.next_check_unused_vim = time() + self.TIME_CHECK_UNUSED_VIM
self.logger = logging.getLogger("ro.vimadmin")
- self.aiomain_task_kafka = None # asyncio task for receiving vim actions from kafka bus
- self.aiomain_task_vim = None # asyncio task for watching ro_tasks not processed by nobody
+ # asyncio task for receiving vim actions from kafka bus
+ self.aiomain_task_kafka = None
+ # asyncio task for watching ro_tasks not processed by nobody
+ self.aiomain_task_vim = None
self.aiomain_task_renew_lock = None
# ^asyncio task for maintain an ro_task locked when VIM plugin takes too much time processing an order
self.lock_renew = LockRenew(config, self.logger)
self.task_locked_time = config["global"]["task_locked_time"]
async def vim_watcher(self):
- """ Reads database periodically looking for tasks not processed by nobody because of a reboot
+ """Reads database periodically looking for tasks not processed by nobody because of a reboot
in order to load this vim"""
# firstly read VIMS not processed
for target_database in ("vim_accounts", "wim_accounts", "sdns"):
- unattended_targets = self.db.get_list(target_database,
- q_filter={"_admin.operations.operationState": "PROCESSING"})
+ unattended_targets = self.db.get_list(
+ target_database,
+ q_filter={"_admin.operations.operationState": "PROCESSING"},
+ )
+
for target in unattended_targets:
target_id = "{}:{}".format(target_database[:3], target["_id"])
self.logger.info("ordered to check {}".format(target_id))
while not self.to_terminate:
now = time()
processed_vims = []
+
if not self.last_rotask_time:
self.last_rotask_time = 0
- ro_tasks = self.db.get_list("ro_tasks",
- q_filter={"target_id.ncont": self.engine.get_assigned_vims(),
- "tasks.status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
- "locked_at.lt": now - self.task_locked_time,
- "to_check_at.gt": self.last_rotask_time,
- "to_check_at.lte": now - self.MAX_TIME_UNATTENDED})
+
+ ro_tasks = self.db.get_list(
+ "ro_tasks",
+ q_filter={
+ "target_id.ncont": self.engine.get_assigned_vims(),
+ "tasks.status": ["SCHEDULED", "BUILD", "DONE", "FAILED"],
+ "locked_at.lt": now - self.task_locked_time,
+ "to_check_at.gt": self.last_rotask_time,
+ "to_check_at.lte": now - self.MAX_TIME_UNATTENDED,
+ },
+ )
self.last_rotask_time = now - self.MAX_TIME_UNATTENDED
+
for ro_task in ro_tasks:
# if already checked ignore
if ro_task["target_id"] in processed_vims:
continue
+
processed_vims.append(ro_task["target_id"])
+
# if already assigned ignore
if ro_task["target_id"] in self.engine.get_assigned_vims():
continue
+
# if there is some task locked on this VIM, there is an RO working on it, so ignore
- if self.db.get_list("ro_tasks",
- q_filter={"target_id": ro_task["target_id"],
- "tasks.status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
- "locked_at.gt": now - self.task_locked_time}):
+ if self.db.get_list(
+ "ro_tasks",
+ q_filter={
+ "target_id": ro_task["target_id"],
+ "tasks.status": ["SCHEDULED", "BUILD", "DONE", "FAILED"],
+ "locked_at.gt": now - self.task_locked_time,
+ },
+ ):
continue
+
# unattended, assign vim
self.engine.assign_vim(ro_task["target_id"])
- self.logger.debug("ordered to load {}. Inactivity detected".format(ro_task["target_id"]))
+ self.logger.debug(
+ "ordered to load {}. Inactivity detected".format(
+ ro_task["target_id"]
+ )
+ )
# every 2 hours check if there are vims without any ro_task and unload it
if now > self.next_check_unused_vim:
self.next_check_unused_vim = now + self.TIME_CHECK_UNUSED_VIM
self.engine.unload_unused_vims()
+
await asyncio.sleep(self.MAX_TIME_UNATTENDED, loop=self.loop)
async def aiomain(self):
try:
if not self.aiomain_task_kafka:
# await self.msg.aiowrite("admin", "echo", "dummy message", loop=self.loop)
- await self.msg.aiowrite("vim_account", "echo", "dummy message", loop=self.loop)
+ await self.msg.aiowrite(
+ "vim_account", "echo", "dummy message", loop=self.loop
+ )
kafka_working = True
self.logger.debug("Starting vim_account subscription task")
self.aiomain_task_kafka = asyncio.ensure_future(
- self.msg.aioread(self.kafka_topics, loop=self.loop, group_id=False,
- aiocallback=self._msg_callback),
- loop=self.loop)
+ self.msg.aioread(
+ self.kafka_topics,
+ loop=self.loop,
+ group_id=False,
+ aiocallback=self._msg_callback,
+ ),
+ loop=self.loop,
+ )
+
if not self.aiomain_task_vim:
self.aiomain_task_vim = asyncio.ensure_future(
- self.vim_watcher(),
- loop=self.loop)
+ self.vim_watcher(), loop=self.loop
+ )
+
if not self.aiomain_task_renew_lock:
- self.aiomain_task_renew_lock = asyncio.ensure_future(self.lock_renew.renew_locks(), loop=self.loop)
+ self.aiomain_task_renew_lock = asyncio.ensure_future(
+ self.lock_renew.renew_locks(), loop=self.loop
+ )
done, _ = await asyncio.wait(
- [self.aiomain_task_kafka, self.aiomain_task_vim, self.aiomain_task_renew_lock],
- timeout=None, loop=self.loop, return_when=asyncio.FIRST_COMPLETED)
+ [
+ self.aiomain_task_kafka,
+ self.aiomain_task_vim,
+ self.aiomain_task_renew_lock,
+ ],
+ timeout=None,
+ loop=self.loop,
+ return_when=asyncio.FIRST_COMPLETED,
+ )
+
try:
if self.aiomain_task_kafka in done:
exc = self.aiomain_task_kafka.exception()
- self.logger.error("kafka subscription task exception: {}".format(exc))
+ self.logger.error(
+ "kafka subscription task exception: {}".format(exc)
+ )
self.aiomain_task_kafka = None
+
if self.aiomain_task_vim in done:
exc = self.aiomain_task_vim.exception()
- self.logger.error("vim_account watcher task exception: {}".format(exc))
+ self.logger.error(
+ "vim_account watcher task exception: {}".format(exc)
+ )
self.aiomain_task_vim = None
+
if self.aiomain_task_renew_lock in done:
exc = self.aiomain_task_renew_lock.exception()
self.logger.error("renew_locks task exception: {}".format(exc))
except Exception as e:
if self.to_terminate:
return
+
if kafka_working:
# logging only first time
- self.logger.critical("Error accessing kafka '{}'. Retrying ...".format(e))
+ self.logger.critical(
+ "Error accessing kafka '{}'. Retrying ...".format(e)
+ )
kafka_working = False
+
await asyncio.sleep(10, loop=self.loop)
def run(self):
self.db = dbmemory.DbMemory()
self.db.db_connect(self.config["database"])
else:
- raise VimAdminException("Invalid configuration param '{}' at '[database]':'driver'".format(
- self.config["database"]["driver"]))
+ raise VimAdminException(
+ "Invalid configuration param '{}' at '[database]':'driver'".format(
+ self.config["database"]["driver"]
+ )
+ )
+
self.lock_renew.start(self.db, self.loop)
if not self.msg:
config_msg = self.config["message"].copy()
config_msg["loop"] = self.loop
+
if config_msg["driver"] == "local":
self.msg = msglocal.MsgLocal()
self.msg.connect(config_msg)
self.msg = msgkafka.MsgKafka()
self.msg.connect(config_msg)
else:
- raise VimAdminException("Invalid configuration param '{}' at '[message]':'driver'".format(
- config_msg["driver"]))
+ raise VimAdminException(
+ "Invalid configuration param '{}' at '[message]':'driver'".format(
+ config_msg["driver"]
+ )
+ )
except (DbException, MsgException) as e:
raise VimAdminException(str(e), http_code=e.http_code)
self.logger.info("Starting")
while not self.to_terminate:
try:
- self.loop.run_until_complete(asyncio.ensure_future(self.aiomain(), loop=self.loop))
+ self.loop.run_until_complete(
+ asyncio.ensure_future(self.aiomain(), loop=self.loop)
+ )
# except asyncio.CancelledError:
# break # if cancelled it should end, breaking loop
except Exception as e:
if not self.to_terminate:
- self.logger.exception("Exception '{}' at messaging read loop".format(e), exc_info=True)
+ self.logger.exception(
+ "Exception '{}' at messaging read loop".format(e), exc_info=True
+ )
self.logger.info("Finishing")
self._stop()
try:
if command == "echo":
return
+
if topic in self.kafka_topics:
- target = topic[0:3] # vim, wim or sdn
+ target = topic[0:3] # vim, wim or sdn
target_id = target + ":" + params["_id"]
+
if command in ("edited", "edit"):
self.engine.reload_vim(target_id)
self.logger.debug("ordered to reload {}".format(target_id))
elif command in ("create", "created"):
self.engine.check_vim(target_id)
self.logger.debug("ordered to check {}".format(target_id))
-
except (DbException, MsgException) as e:
- self.logger.error("Error while processing topic={} command={}: {}".format(topic, command, e))
+ self.logger.error(
+ "Error while processing topic={} command={}: {}".format(
+ topic, command, e
+ )
+ )
except Exception as e:
- self.logger.exception("Exception while processing topic={} command={}: {}".format(topic, command, e),
- exc_info=True)
+ self.logger.exception(
+ "Exception while processing topic={} command={}: {}".format(
+ topic, command, e
+ ),
+ exc_info=True,
+ )
def _stop(self):
"""
try:
if self.db:
self.db.db_disconnect()
+
if self.msg:
self.msg.disconnect()
except (DbException, MsgException) as e:
"""
self.to_terminate = True
self.lock_renew.to_terminate = True
+
if self.aiomain_task_kafka:
self.loop.call_soon_threadsafe(self.aiomain_task_kafka.cancel)
+
if self.aiomain_task_vim:
self.loop.call_soon_threadsafe(self.aiomain_task_vim.cancel)
+
if self.aiomain_task_renew_lock:
self.loop.call_soon_threadsafe(self.aiomain_task_renew_lock.cancel)
+
self.lock_renew.stop()
_readme = "osm-ng-ro is the New Generation Resource Orchestrator for OSM"
setup(
name=_name,
- description='OSM Resource Orchestrator',
+ description="OSM Resource Orchestrator",
long_description=_readme,
- version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
- author='ETSI OSM',
- author_email='alfonso.tiernosepulveda@telefonica.com',
- maintainer='Alfonso Tierno',
- maintainer_email='alfonso.tiernosepulveda@telefonica.com',
- url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
- license='Apache 2.0',
-
+ version_command=(
+ "git describe --match v* --tags --long --dirty",
+ "pep440-git-full",
+ ),
+ author="ETSI OSM",
+ author_email="alfonso.tiernosepulveda@telefonica.com",
+ maintainer="Alfonso Tierno",
+ maintainer_email="alfonso.tiernosepulveda@telefonica.com",
+ url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary",
+ license="Apache 2.0",
packages=find_packages(exclude=["temp", "local"]),
include_package_data=True,
install_requires=[
- 'CherryPy==18.1.2',
- 'osm-common @ git+https://osm.etsi.org/gerrit/osm/common.git#egg=osm-common',
- 'jsonschema',
- 'PyYAML',
- 'requests',
- 'cryptography', # >=2.5 installed right version with the debian post-install script
+ "CherryPy==18.1.2",
+ "osm-common @ git+https://osm.etsi.org/gerrit/osm/common.git#egg=osm-common",
+ "jsonschema",
+ "PyYAML",
+ "requests",
+ "cryptography", # >=2.5 installed right version with the debian post-install script
"osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin",
],
- setup_requires=['setuptools-version-command'],
+ setup_requires=["setuptools-version-command"],
)
basepython = python3
deps = flake8
commands = flake8 osm_ng_ro --max-line-length 120 \
- --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp,osm_im --ignore W291,W293,E226,E402,W504
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp,osm_im --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241
[testenv:build]
basepython = python3
switchport_def = self._int_SRIOV.format(service=s_type, vlan_id=vlan_id)\r
else:\r
switchport_def = self._int_PASSTROUGH.format(vlan_id=vlan_id)\r
- return self._basic_int.format(uuid=uuid,\r
- interface=interface,\r
- type=i_type,\r
- switchport_def=switchport_def)\r
+\r
+ return self._basic_int.format(\r
+ uuid=uuid, interface=interface, type=i_type, switchport_def=switchport_def\r
+ )\r
\r
def getElan_sriov(self, uuid, interface, vlan_id, index):\r
return self._get_interface(uuid, interface, vlan_id, "ELAN", index, "trunk")\r
return self._get_interface(uuid, interface, vlan_id, "ELINE", index, "trunk")\r
\r
def getElan_passthrough(self, uuid, interface, vlan_id, index):\r
- return self._get_interface(uuid, interface, vlan_id, "ELAN", index, "dot1q-tunnel")\r
+ return self._get_interface(\r
+ uuid, interface, vlan_id, "ELAN", index, "dot1q-tunnel"\r
+ )\r
\r
def getEline_passthrough(self, uuid, interface, vlan_id, index):\r
- return self._get_interface(uuid, interface, vlan_id, "ELINE", index, "dot1q-tunnel")\r
+ return self._get_interface(\r
+ uuid, interface, vlan_id, "ELINE", index, "dot1q-tunnel"\r
+ )\r
\r
_basic_vlan = """\r
vlan {vlan}\r
def _get_vlan(self, uuid, vlan_id, vni_id, s_type):\r
if self.topology == self._VLAN:\r
return self._configLet_VLAN.format(service=s_type, vlan=vlan_id, uuid=uuid)\r
+\r
if self.topology == self._VLAN_MLAG:\r
- return self._configLet_VLAN_MLAG.format(service=s_type, vlan=vlan_id, uuid=uuid)\r
+ return self._configLet_VLAN_MLAG.format(\r
+ service=s_type, vlan=vlan_id, uuid=uuid\r
+ )\r
+\r
if self.topology == self._VXLAN:\r
- return self._configLet_VXLAN.format(service=s_type, vlan=vlan_id, uuid=uuid, vni=vni_id)\r
+ return self._configLet_VXLAN.format(\r
+ service=s_type, vlan=vlan_id, uuid=uuid, vni=vni_id\r
+ )\r
+\r
if self.topology == self._VXLAN_MLAG:\r
- return self._configLet_VXLAN_MLAG.format(service=s_type, vlan=vlan_id, uuid=uuid, vni=vni_id)\r
+ return self._configLet_VXLAN_MLAG.format(\r
+ service=s_type, vlan=vlan_id, uuid=uuid, vni=vni_id\r
+ )\r
\r
def getElan_vlan(self, uuid, vlan_id, vni_id):\r
return self._get_vlan(uuid, vlan_id, vni_id, "ELAN")\r
\r
def _get_bgp(self, uuid, vlan_id, vni_id, loopback0, bgp, s_type):\r
if self.topology == self._VXLAN or self.topology == self._VXLAN_MLAG:\r
- return self._configLet_BGP.format(uuid=uuid,\r
- bgp=bgp,\r
- vlan=vlan_id,\r
- loopback=loopback0,\r
- vni=vni_id)\r
+ return self._configLet_BGP.format(\r
+ uuid=uuid, bgp=bgp, vlan=vlan_id, loopback=loopback0, vni=vni_id\r
+ )\r
\r
def getElan_bgp(self, uuid, vlan_id, vni_id, loopback0, bgp):\r
return self._get_bgp(uuid, vlan_id, vni_id, loopback0, bgp, "ELAN")\r
def __apply_state(self, task, state):\r
t_id = self.__get_id(task)\r
self.cvpClientApi.add_note_to_task(t_id, "Executed by OSM")\r
+\r
if state == "executed":\r
return self.__execute_task(t_id)\r
elif state == "cancelled":\r
\r
def update_all_tasks(self, data):\r
new_data = dict()\r
+\r
for task_id in data.keys():\r
res = self.cvpClientApi.get_task_by_id(task_id)\r
new_data[task_id] = res\r
+\r
return new_data\r
\r
def get_pending_tasks(self):\r
- return self.cvpClientApi.get_tasks_by_status('Pending')\r
+ return self.cvpClientApi.get_tasks_by_status("Pending")\r
\r
def get_pending_tasks_old(self):\r
taskList = []\r
- tasksField = {'workOrderId': 'workOrderId',\r
- 'workOrderState': 'workOrderState',\r
- 'currentTaskName': 'currentTaskName',\r
- 'description': 'description',\r
- 'workOrderUserDefinedStatus':\r
- 'workOrderUserDefinedStatus',\r
- 'note': 'note',\r
- 'taskStatus': 'taskStatus',\r
- 'workOrderDetails': 'workOrderDetails'}\r
- tasks = self.cvpClientApi.get_tasks_by_status('Pending')\r
+ tasksField = {\r
+ "workOrderId": "workOrderId",\r
+ "workOrderState": "workOrderState",\r
+ "currentTaskName": "currentTaskName",\r
+ "description": "description",\r
+ "workOrderUserDefinedStatus": "workOrderUserDefinedStatus",\r
+ "note": "note",\r
+ "taskStatus": "taskStatus",\r
+ "workOrderDetails": "workOrderDetails",\r
+ }\r
+ tasks = self.cvpClientApi.get_tasks_by_status("Pending")\r
+\r
# Reduce task data to required fields\r
for task in tasks:\r
taskFacts = {}\r
for field in task.keys():\r
if field in tasksField:\r
taskFacts[tasksField[field]] = task[field]\r
+\r
taskList.append(taskFacts)\r
+\r
return taskList\r
\r
def task_action(self, tasks, wait, state):\r
now = time.time()\r
while (now - start) < wait:\r
data = self.update_all_tasks(data)\r
+\r
if all([self.__terminal(self.__get_state(t)) for t in data.values()]):\r
break\r
+\r
time.sleep(1)\r
now = time.time()\r
\r
if wait:\r
for i, task in data.items():\r
if not self.__terminal(self.__get_state(task)):\r
- warnings.append("Task {} has not completed in {} seconds".\r
- format(i, wait))\r
+ warnings.append(\r
+ "Task {} has not completed in {} seconds".format(i, wait)\r
+ )\r
\r
return changed, data, warnings\r
#
# This work has been performed in the context of Arista Telefonica OSM PoC.
##
+
from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError
import re
import socket
+
# Required by compare function
import difflib
+
# Library that uses Levenshtein Distance to calculate the differences
# between strings.
# from fuzzywuzzy import fuzz
class SdnError(Enum):
- UNREACHABLE = 'Unable to reach the WIM url, connect error.',
- TIMEOUT = 'Unable to reach the WIM url, timeout.',
- VLAN_INCONSISTENT = \
- 'VLAN value inconsistent between the connection points',
- VLAN_NOT_PROVIDED = 'VLAN value not provided',
- CONNECTION_POINTS_SIZE = \
- 'Unexpected number of connection points: 2 expected.',
- ENCAPSULATION_TYPE = \
- 'Unexpected service_endpoint_encapsulation_type. ' \
- 'Only "dotq1" is accepted.',
- BANDWIDTH = 'Unable to get the bandwidth.',
- STATUS = 'Unable to get the status for the service.',
- DELETE = 'Unable to delete service.',
- CLEAR_ALL = 'Unable to clear all the services',
- UNKNOWN_ACTION = 'Unknown action invoked.',
- BACKUP = 'Unable to get the backup parameter.',
- UNSUPPORTED_FEATURE = "Unsupported feature",
- UNAUTHORIZED = "Failed while authenticating",
+ UNREACHABLE = "Unable to reach the WIM url, connect error."
+ TIMEOUT = "Unable to reach the WIM url, timeout."
+ VLAN_INCONSISTENT = "VLAN value inconsistent between the connection points"
+ VLAN_NOT_PROVIDED = "VLAN value not provided"
+ CONNECTION_POINTS_SIZE = "Unexpected number of connection points: 2 expected."
+ ENCAPSULATION_TYPE = (
+ 'Unexpected service_endpoint_encapsulation_type. Only "dotq1" is accepted.'
+ )
+ BANDWIDTH = "Unable to get the bandwidth."
+ STATUS = "Unable to get the status for the service."
+ DELETE = "Unable to delete service."
+ CLEAR_ALL = "Unable to clear all the services"
+ UNKNOWN_ACTION = "Unknown action invoked."
+ BACKUP = "Unable to get the backup parameter."
+ UNSUPPORTED_FEATURE = "Unsupported feature"
+ UNAUTHORIZED = "Failed while authenticating"
INTERNAL_ERROR = "Internal error"
-- All created services identification is stored in a generic ConfigLet 'OSM_metadata'
to keep track of the managed resources by OSM in the Arista deployment.
"""
+
__supported_service_types = ["ELINE (L2)", "ELINE", "ELAN"]
__service_types_ELAN = "ELAN"
__service_types_ELINE = "ELINE"
__ELINE_num_connection_points = 2
__supported_service_types = ["ELINE", "ELAN"]
__supported_encapsulation_types = ["dot1q"]
- __WIM_LOGGER = 'ro.sdn.arista'
- __SERVICE_ENDPOINT_MAPPING = 'service_endpoint_mapping'
+ __WIM_LOGGER = "ro.sdn.arista"
+ __SERVICE_ENDPOINT_MAPPING = "service_endpoint_mapping"
__ENCAPSULATION_TYPE_PARAM = "service_endpoint_encapsulation_type"
__ENCAPSULATION_INFO_PARAM = "service_endpoint_encapsulation_info"
__BACKUP_PARAM = "backup"
__SW_PORT_PARAM = "switch_port"
__VLAN_PARAM = "vlan"
__VNI_PARAM = "vni"
- __SEPARATOR = '_'
- __MANAGED_BY_OSM = '## Managed by OSM '
+ __SEPARATOR = "_"
+ __MANAGED_BY_OSM = "## Managed by OSM "
__OSM_PREFIX = "osm_"
__OSM_METADATA = "OSM_metadata"
- __METADATA_PREFIX = '!## Service'
+ __METADATA_PREFIX = "!## Service"
__EXC_TASK_EXEC_WAIT = 10
__ROLLB_TASK_EXEC_WAIT = 10
__API_REQUEST_TOUT = 60
- __SWITCH_TAG_NAME = 'topology_type'
- __SWITCH_TAG_VALUE = 'leaf'
+ __SWITCH_TAG_NAME = "topology_type"
+ __SWITCH_TAG_VALUE = "leaf"
__LOOPBACK_INTF = "Loopback0"
_VLAN = "VLAN"
_VXLAN = "VXLAN"
:param logger (logging.Logger): optional logger object. If none is passed 'ro.sdn.sdnconn' is used.
"""
self.__regex = re.compile(
- r'^(?:http|ftp)s?://' # http:// or https://
- r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
- r'localhost|' # localhost...
- r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
- r'(?::\d+)?', re.IGNORECASE) # optional port
+ r"^(?:http|ftp)s?://" # http:// or https://
+ r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|" # domain...
+ r"localhost|" # localhost...
+ r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" # ...or ip
+ r"(?::\d+)?",
+ re.IGNORECASE,
+ ) # optional port
self.raiseException = True
self.logger = logger or logging.getLogger(self.__WIM_LOGGER)
super().__init__(wim, wim_account, config, self.logger)
self.__wim = wim
self.__wim_account = wim_account
self.__config = config
+
if self.is_valid_destination(self.__wim.get("wim_url")):
self.__wim_url = self.__wim.get("wim_url")
else:
- raise SdnConnectorError(message='Invalid wim_url value',
- http_code=500)
+ raise SdnConnectorError(message="Invalid wim_url value", http_code=500)
+
self.__user = wim_account.get("user")
self.__passwd = wim_account.get("password")
self.client = None
self.cvp_inventory = None
self.cvp_tags = None
- self.logger.debug("Arista SDN plugin {}, cvprac version {}, user:{} and config:{}".
- format(wim, cvprac_version, self.__user,
- self.delete_keys_from_dict(config, ('passwd',))))
+ self.logger.debug(
+ "Arista SDN plugin {}, cvprac version {}, user:{} and config:{}".format(
+ wim,
+ cvprac_version,
+ self.__user,
+ self.delete_keys_from_dict(config, ("passwd",)),
+ )
+ )
self.allDeviceFacts = []
self.taskC = None
+
try:
self.__load_topology()
self.__load_switches()
except (ConnectTimeout, Timeout) as ct:
- raise SdnConnectorError(message=SdnError.TIMEOUT + " " + str(ct), http_code=408)
+ raise SdnConnectorError(
+ message=SdnError.TIMEOUT + " " + str(ct), http_code=408
+ )
except ConnectionError as ce:
- raise SdnConnectorError(message=SdnError.UNREACHABLE + " " + str(ce), http_code=404)
+ raise SdnConnectorError(
+ message=SdnError.UNREACHABLE + " " + str(ce), http_code=404
+ )
except SdnConnectorError as sc:
raise sc
except CvpLoginError as le:
raise SdnConnectorError(message=le.msg, http_code=500) from le
except Exception as e:
- raise SdnConnectorError(message="Unable to load switches from CVP" + " " + str(e),
- http_code=500) from e
- self.logger.debug("Using topology {} in Arista Leaf switches: {}".format(
- self.topology,
- self.delete_keys_from_dict(self.switches, ('passwd',))))
+ raise SdnConnectorError(
+ message="Unable to load switches from CVP " + str(e), http_code=500
+ ) from e
+
+ self.logger.debug(
+ "Using topology {} in Arista Leaf switches: {}".format(
+ self.topology, self.delete_keys_from_dict(self.switches, ("passwd",))
+ )
+ )
self.clC = AristaSDNConfigLet(self.topology)
def __load_topology(self):
self.topology = self._VXLAN_MLAG
- if self.__config and self.__config.get('topology'):
- topology = self.__config.get('topology')
+
+ if self.__config and self.__config.get("topology"):
+ topology = self.__config.get("topology")
+
if topology == "VLAN":
self.topology = self._VLAN
elif topology == "VXLAN":
self.topology = self._VXLAN_MLAG
def __load_switches(self):
- """ Retrieves the switches to configure in the following order
+ """Retrieves the switches to configure in the following order
1. from incoming configuration:
1.1 using port mapping
using user and password from WIM
for port in self.__config.get(self.__SERVICE_ENDPOINT_MAPPING):
switch_dpid = port.get(self.__SW_ID_PARAM)
if switch_dpid and switch_dpid not in self.switches:
- self.switches[switch_dpid] = {'passwd': self.__passwd,
- 'ip': None,
- 'usr': self.__user,
- 'lo0': None,
- 'AS': None,
- 'serialNumber': None,
- 'mlagPeerDevice': None}
-
- if self.__config and self.__config.get('switches'):
+ self.switches[switch_dpid] = {
+ "passwd": self.__passwd,
+ "ip": None,
+ "usr": self.__user,
+ "lo0": None,
+ "AS": None,
+ "serialNumber": None,
+ "mlagPeerDevice": None,
+ }
+
+ if self.__config and self.__config.get("switches"):
# Not directly from json, complete one by one
- config_switches = self.__config.get('switches')
+ config_switches = self.__config.get("switches")
for cs, cs_content in config_switches.items():
if cs not in self.switches:
- self.switches[cs] = {'passwd': self.__passwd,
- 'ip': None,
- 'usr': self.__user,
- 'lo0': None,
- 'AS': None,
- 'serialNumber': None,
- 'mlagPeerDevice': None}
+ self.switches[cs] = {
+ "passwd": self.__passwd,
+ "ip": None,
+ "usr": self.__user,
+ "lo0": None,
+ "AS": None,
+ "serialNumber": None,
+ "mlagPeerDevice": None,
+ }
+
if cs_content:
self.switches[cs].update(cs_content)
# Load the rest of the data
if self.client is None:
self.client = self.__connect()
+
self.__load_inventory()
+
if not self.switches:
self.__get_tags(self.__SWITCH_TAG_NAME, self.__SWITCH_TAG_VALUE)
+
for device in self.allDeviceFacts:
# get the switches whose topology_tag is 'leaf'
- if device['serialNumber'] in self.cvp_tags:
- if not self.switches.get(device['hostname']):
- switch_data = {'passwd': self.__passwd,
- 'ip': device['ipAddress'],
- 'usr': self.__user,
- 'lo0': None,
- 'AS': None,
- 'serialNumber': None,
- 'mlagPeerDevice': None}
- self.switches[device['hostname']] = switch_data
+ if device["serialNumber"] in self.cvp_tags:
+ if not self.switches.get(device["hostname"]):
+ switch_data = {
+ "passwd": self.__passwd,
+ "ip": device["ipAddress"],
+ "usr": self.__user,
+ "lo0": None,
+ "AS": None,
+ "serialNumber": None,
+ "mlagPeerDevice": None,
+ }
+ self.switches[device["hostname"]] = switch_data
+
if len(self.switches) == 0:
self.logger.error("Unable to load Leaf switches from CVP")
return
# used to make eAPI calls by using switch.py module
for s in self.switches:
for device in self.allDeviceFacts:
- if device['hostname'] == s:
- if not self.switches[s].get('ip'):
- self.switches[s]['ip'] = device['ipAddress']
- self.switches[s]['serialNumber'] = device['serialNumber']
+ if device["hostname"] == s:
+ if not self.switches[s].get("ip"):
+ self.switches[s]["ip"] = device["ipAddress"]
+ self.switches[s]["serialNumber"] = device["serialNumber"]
break
# Each switch has a different loopback address,
# so it's a different configLet
- if not self.switches[s].get('lo0'):
- inf = self.__get_interface_ip(self.switches[s]['serialNumber'], self.__LOOPBACK_INTF)
- self.switches[s]["lo0"] = inf.split('/')[0]
- if not self.switches[s].get('AS'):
- self.switches[s]["AS"] = self.__get_device_ASN(self.switches[s]['serialNumber'])
+ if not self.switches[s].get("lo0"):
+ inf = self.__get_interface_ip(
+ self.switches[s]["serialNumber"], self.__LOOPBACK_INTF
+ )
+ self.switches[s]["lo0"] = inf.split("/")[0]
+
+ if not self.switches[s].get("AS"):
+ self.switches[s]["AS"] = self.__get_device_ASN(
+ self.switches[s]["serialNumber"]
+ )
+
if self.topology in (self._VXLAN_MLAG, self._VLAN_MLAG):
for s in self.switches:
- if not self.switches[s].get('mlagPeerDevice'):
- self.switches[s]['mlagPeerDevice'] = self.__get_peer_MLAG(self.switches[s]['serialNumber'])
-
- def __check_service(self, service_type, connection_points,
- check_vlan=True, check_num_cp=True, kwargs=None):
- """ Reviews the connection points elements looking for semantic errors in the incoming data
- """
+ if not self.switches[s].get("mlagPeerDevice"):
+ self.switches[s]["mlagPeerDevice"] = self.__get_peer_MLAG(
+ self.switches[s]["serialNumber"]
+ )
+
+ def __check_service(
+ self,
+ service_type,
+ connection_points,
+ check_vlan=True,
+ check_num_cp=True,
+ kwargs=None,
+ ):
+ """Reviews the connection points elements looking for semantic errors in the incoming data"""
if service_type not in self.__supported_service_types:
- raise Exception("The service '{}' is not supported. Only '{}' are accepted".format(
- service_type,
- self.__supported_service_types))
+ raise Exception(
+ "The service '{}' is not supported. Only '{}' are accepted".format(
+ service_type, self.__supported_service_types
+ )
+ )
if check_num_cp:
if len(connection_points) < 2:
raise Exception(SdnError.CONNECTION_POINTS_SIZE)
- if (len(connection_points) != self.__ELINE_num_connection_points and
- service_type == self.__service_types_ELINE):
+
+ if (
+ len(connection_points) != self.__ELINE_num_connection_points
+ and service_type == self.__service_types_ELINE
+ ):
raise Exception(SdnError.CONNECTION_POINTS_SIZE)
if check_vlan:
- vlan_id = ''
+ vlan_id = ""
+
for cp in connection_points:
enc_type = cp.get(self.__ENCAPSULATION_TYPE_PARAM)
- if (enc_type and
- enc_type not in self.__supported_encapsulation_types):
+
+ if enc_type and enc_type not in self.__supported_encapsulation_types:
raise Exception(SdnError.ENCAPSULATION_TYPE)
+
encap_info = cp.get(self.__ENCAPSULATION_INFO_PARAM)
cp_vlan_id = str(encap_info.get(self.__VLAN_PARAM))
+
if cp_vlan_id:
if not vlan_id:
vlan_id = cp_vlan_id
elif vlan_id != cp_vlan_id:
raise Exception(SdnError.VLAN_INCONSISTENT)
+
if not vlan_id:
raise Exception(SdnError.VLAN_NOT_PROVIDED)
+
if vlan_id in self.__get_srvVLANs():
- raise Exception('VLAN {} already assigned to a connectivity service'.format(vlan_id))
+ raise Exception(
+ "VLAN {} already assigned to a connectivity service".format(vlan_id)
+ )
# Commented out for as long as parameter isn't implemented
# bandwidth = kwargs.get(self.__BANDWIDTH_PARAM)
# if not isinstance(bandwidth, int):
- # self.__exception(SdnError.BANDWIDTH, http_code=400)
+ # self.__exception(SdnError.BANDWIDTH, http_code=400)
# Commented out for as long as parameter isn't implemented
# backup = kwargs.get(self.__BACKUP_PARAM)
# if not isinstance(backup, bool):
- # self.__exception(SdnError.BACKUP, http_code=400)
+ # self.__exception(SdnError.BACKUP, http_code=400)
def check_credentials(self):
"""Retrieves the CloudVision version information, as the easiest way
try:
if self.client is None:
self.client = self.__connect()
+
result = self.client.api.get_cvp_info()
self.logger.debug(result)
except CvpLoginError as e:
self.logger.info(str(e))
self.client = None
- raise SdnConnectorError(message=SdnError.UNAUTHORIZED + " " + str(e),
- http_code=401) from e
+
+ raise SdnConnectorError(
+ message=SdnError.UNAUTHORIZED + " " + str(e), http_code=401
+ ) from e
except Exception as ex:
self.client = None
self.logger.error(str(ex))
- raise SdnConnectorError(message=SdnError.INTERNAL_ERROR + " " + str(ex),
- http_code=500) from ex
+
+ raise SdnConnectorError(
+ message=SdnError.INTERNAL_ERROR + " " + str(ex), http_code=500
+ ) from ex
def get_connectivity_service_status(self, service_uuid, conn_info=None):
"""Monitor the status of the connectivity service established
new information available for the connectivity service.
"""
try:
- self.logger.debug("invoked get_connectivity_service_status '{}'".format(service_uuid))
+ self.logger.debug(
+ "invoked get_connectivity_service_status '{}'".format(service_uuid)
+ )
+
if not service_uuid:
- raise SdnConnectorError(message='No connection service UUID',
- http_code=500)
+ raise SdnConnectorError(
+ message="No connection service UUID", http_code=500
+ )
self.__get_Connection()
- if conn_info is None:
- raise SdnConnectorError(message='No connection information for service UUID {}'.format(service_uuid),
- http_code=500)
- if 'configLetPerSwitch' in conn_info.keys():
+ if conn_info is None:
+ raise SdnConnectorError(
+ message="No connection information for service UUID {}".format(
+ service_uuid
+ ),
+ http_code=500,
+ )
+
+ if "configLetPerSwitch" in conn_info.keys():
c_info = conn_info
else:
c_info = None
- cls_perSw = self.__get_serviceData(service_uuid,
- conn_info['service_type'],
- conn_info['vlan_id'],
- c_info)
+
+ cls_perSw = self.__get_serviceData(
+ service_uuid, conn_info["service_type"], conn_info["vlan_id"], c_info
+ )
t_isCancelled = False
t_isFailed = False
t_isPending = False
failed_switches = []
+
for s in self.switches:
if len(cls_perSw[s]) > 0:
for cl in cls_perSw[s]:
# Fix 1030 SDN-ARISTA Key error note when deploy a NS
# Added protection to check that 'note' exists and additionally
# verify that it is managed by OSM
- if (not cls_perSw[s][0]['config'] or
- not cl.get('note') or
- self.__MANAGED_BY_OSM not in cl['note']):
+ if (
+ not cls_perSw[s][0]["config"]
+ or not cl.get("note")
+ or self.__MANAGED_BY_OSM not in cl["note"]
+ ):
continue
- note = cl['note']
+
+ note = cl["note"]
t_id = note.split(self.__SEPARATOR)[1]
result = self.client.api.get_task_by_id(t_id)
- if result['workOrderUserDefinedStatus'] == 'Completed':
+
+ if result["workOrderUserDefinedStatus"] == "Completed":
continue
- elif result['workOrderUserDefinedStatus'] == 'Cancelled':
+ elif result["workOrderUserDefinedStatus"] == "Cancelled":
t_isCancelled = True
- elif result['workOrderUserDefinedStatus'] == 'Failed':
+ elif result["workOrderUserDefinedStatus"] == "Failed":
t_isFailed = True
else:
t_isPending = True
+
failed_switches.append(s)
+
if t_isCancelled:
- error_msg = 'Some works were cancelled in switches: {}'.format(str(failed_switches))
- sdn_status = 'DOWN'
+ error_msg = "Some works were cancelled in switches: {}".format(
+ str(failed_switches)
+ )
+ sdn_status = "DOWN"
elif t_isFailed:
- error_msg = 'Some works failed in switches: {}'.format(str(failed_switches))
- sdn_status = 'ERROR'
+ error_msg = "Some works failed in switches: {}".format(
+ str(failed_switches)
+ )
+ sdn_status = "ERROR"
elif t_isPending:
- error_msg = 'Some works are still under execution in switches: {}'.format(str(failed_switches))
- sdn_status = 'BUILD'
+ error_msg = (
+ "Some works are still under execution in switches: {}".format(
+ str(failed_switches)
+ )
+ )
+ sdn_status = "BUILD"
else:
- error_msg = ''
- sdn_status = 'ACTIVE'
- sdn_info = ''
- return {'sdn_status': sdn_status,
- 'error_msg': error_msg,
- 'sdn_info': sdn_info}
+ error_msg = ""
+ sdn_status = "ACTIVE"
+
+ sdn_info = ""
+
+ return {
+ "sdn_status": sdn_status,
+ "error_msg": error_msg,
+ "sdn_info": sdn_info,
+ }
except CvpLoginError as e:
self.logger.info(str(e))
self.client = None
- raise SdnConnectorError(message=SdnError.UNAUTHORIZED + " " + str(e),
- http_code=401) from e
+
+ raise SdnConnectorError(
+ message=SdnError.UNAUTHORIZED + " " + str(e), http_code=401
+ ) from e
except Exception as ex:
self.client = None
self.logger.error(str(ex), exc_info=True)
- raise SdnConnectorError(message=str(ex) + " " + str(ex),
- http_code=500) from ex
- def create_connectivity_service(self, service_type, connection_points,
- **kwargs):
- """Stablish SDN/WAN connectivity between the endpoints
+ raise SdnConnectorError(
+ message=str(ex) + " " + str(ex), http_code=500
+ ) from ex
+
+ def create_connectivity_service(self, service_type, connection_points, **kwargs):
+ """Establish SDN/WAN connectivity between the endpoints
:param service_type:
(str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2), ``L3``.
:param connection_points: (list): each point corresponds to
Provide the parameter http_code
"""
try:
- self.logger.debug("invoked create_connectivity_service '{}' ports: {}".
- format(service_type, connection_points))
+ self.logger.debug(
+ "invoked create_connectivity_service '{}' ports: {}".format(
+ service_type, connection_points
+ )
+ )
self.__get_Connection()
- self.__check_service(service_type,
- connection_points,
- check_vlan=True,
- kwargs=kwargs)
+ self.__check_service(
+ service_type, connection_points, check_vlan=True, kwargs=kwargs
+ )
service_uuid = str(uuid.uuid4())
- self.logger.info("Service with uuid {} created.".
- format(service_uuid))
+ self.logger.info("Service with uuid {} created.".format(service_uuid))
s_uid, s_connInf = self.__processConnection(
- service_uuid,
- service_type,
- connection_points,
- kwargs)
+ service_uuid, service_type, connection_points, kwargs
+ )
+
try:
- self.__addMetadata(s_uid, service_type, s_connInf['vlan_id'])
+ self.__addMetadata(s_uid, service_type, s_connInf["vlan_id"])
except Exception:
pass
except CvpLoginError as e:
self.logger.info(str(e))
self.client = None
- raise SdnConnectorError(message=SdnError.UNAUTHORIZED + " " + str(e),
- http_code=401) from e
+
+ raise SdnConnectorError(
+ message=SdnError.UNAUTHORIZED + " " + str(e), http_code=401
+ ) from e
except SdnConnectorError as sde:
raise sde
except ValueError as err:
self.client = None
self.logger.error(str(err), exc_info=True)
- raise SdnConnectorError(message=str(err),
- http_code=500) from err
+
+ raise SdnConnectorError(message=str(err), http_code=500) from err
except Exception as ex:
self.client = None
self.logger.error(str(ex), exc_info=True)
+
if self.raiseException:
raise ex
- raise SdnConnectorError(message=str(ex),
- http_code=500) from ex
- def __processConnection(self,
- service_uuid,
- service_type,
- connection_points,
- kwargs):
+ raise SdnConnectorError(message=str(ex), http_code=500) from ex
+
+ def __processConnection(
+ self, service_uuid, service_type, connection_points, kwargs
+ ):
"""
Invoked from creation and edit methods
cls_perSw = {}
cls_cp = {}
cl_bgp = {}
+
for s in self.switches:
cls_perSw[s] = []
cls_cp[s] = []
+
vlan_processed = False
- vlan_id = ''
+ vlan_id = ""
i = 0
processed_connection_points = []
+
for cp in connection_points:
i += 1
encap_info = cp.get(self.__ENCAPSULATION_INFO_PARAM)
+
if not vlan_processed:
vlan_id = str(encap_info.get(self.__VLAN_PARAM))
+
if not vlan_id:
continue
+
vni_id = encap_info.get(self.__VNI_PARAM)
+
if not vni_id:
vni_id = str(10000 + int(vlan_id))
if service_type == self.__service_types_ELAN:
- cl_vlan = self.clC.getElan_vlan(service_uuid,
- vlan_id,
- vni_id)
+ cl_vlan = self.clC.getElan_vlan(service_uuid, vlan_id, vni_id)
else:
- cl_vlan = self.clC.getEline_vlan(service_uuid,
- vlan_id,
- vni_id)
+ cl_vlan = self.clC.getEline_vlan(service_uuid, vlan_id, vni_id)
+
vlan_processed = True
encap_type = cp.get(self.__ENCAPSULATION_TYPE_PARAM)
switch_id = encap_info.get(self.__SW_ID_PARAM)
interface = encap_info.get(self.__SW_PORT_PARAM)
- switches = [{'name': switch_id, 'interface': interface}]
+ switches = [{"name": switch_id, "interface": interface}]
# remove those connections that are equal. This happens when several sriovs are located in the same
# compute node interface, that is, in the same switch and interface
switches = [x for x in switches if x not in processed_connection_points]
+
if not switches:
continue
+
processed_connection_points += switches
+
for switch in switches:
if not interface:
raise SdnConnectorError(
- message="Connection point switch port empty for switch_dpid {}".format(switch_id),
- http_code=406)
+ message="Connection point switch port empty for switch_dpid {}".format(
+ switch_id
+ ),
+ http_code=406,
+ )
# it should be only one switch where the mac is attached
- if encap_type == 'dot1q':
+ if encap_type == "dot1q":
# SRIOV configLet for Leaf switch mac's attached to
if service_type == self.__service_types_ELAN:
- cl_encap = self.clC.getElan_sriov(service_uuid, interface, vlan_id, i)
+ cl_encap = self.clC.getElan_sriov(
+ service_uuid, interface, vlan_id, i
+ )
else:
- cl_encap = self.clC.getEline_sriov(service_uuid, interface, vlan_id, i)
+ cl_encap = self.clC.getEline_sriov(
+ service_uuid, interface, vlan_id, i
+ )
elif not encap_type:
# PT configLet for Leaf switch attached to the mac
if service_type == self.__service_types_ELAN:
- cl_encap = self.clC.getElan_passthrough(service_uuid,
- interface,
- vlan_id, i)
+ cl_encap = self.clC.getElan_passthrough(
+ service_uuid, interface, vlan_id, i
+ )
else:
- cl_encap = self.clC.getEline_passthrough(service_uuid,
- interface,
- vlan_id, i)
- if cls_cp.get(switch['name']):
- cls_cp[switch['name']] = str(cls_cp[switch['name']]) + cl_encap
+ cl_encap = self.clC.getEline_passthrough(
+ service_uuid, interface, vlan_id, i
+ )
+
+ if cls_cp.get(switch["name"]):
+ cls_cp[switch["name"]] = str(cls_cp[switch["name"]]) + cl_encap
else:
- cls_cp[switch['name']] = cl_encap
+ cls_cp[switch["name"]] = cl_encap
# at least 1 connection point has to be received
if not vlan_processed:
- raise SdnConnectorError(message=SdnError.UNSUPPORTED_FEATURE,
- http_code=406)
+ raise SdnConnectorError(
+ message=SdnError.UNSUPPORTED_FEATURE, http_code=406
+ )
for s in self.switches:
# for cl in cp_configLets:
- cl_name = (self.__OSM_PREFIX +
- s +
- self.__SEPARATOR + service_type + str(vlan_id) +
- self.__SEPARATOR + service_uuid)
- cl_config = ''
+ cl_name = (
+ self.__OSM_PREFIX
+ + s
+ + self.__SEPARATOR
+ + service_type
+ + str(vlan_id)
+ + self.__SEPARATOR
+ + service_uuid
+ )
+ cl_config = ""
+
# Apply BGP configuration only for VXLAN topologies
if self.topology in (self._VXLAN_MLAG, self._VXLAN):
if service_type == self.__service_types_ELAN:
- cl_bgp[s] = self.clC.getElan_bgp(service_uuid,
- vlan_id,
- vni_id,
- self.switches[s]['lo0'],
- self.switches[s]['AS'])
+ cl_bgp[s] = self.clC.getElan_bgp(
+ service_uuid,
+ vlan_id,
+ vni_id,
+ self.switches[s]["lo0"],
+ self.switches[s]["AS"],
+ )
else:
- cl_bgp[s] = self.clC.getEline_bgp(service_uuid,
- vlan_id,
- vni_id,
- self.switches[s]['lo0'],
- self.switches[s]['AS'])
+ cl_bgp[s] = self.clC.getEline_bgp(
+ service_uuid,
+ vlan_id,
+ vni_id,
+ self.switches[s]["lo0"],
+ self.switches[s]["AS"],
+ )
else:
- cl_bgp[s] = ''
+ cl_bgp[s] = ""
if not cls_cp.get(s):
# Apply VLAN configuration to peer MLAG switch,
# only necessary when there are no connection points in the switch
if self.topology in (self._VXLAN_MLAG, self._VLAN_MLAG):
for p in self.switches:
- if self.switches[p]['mlagPeerDevice'] == s:
+ if self.switches[p]["mlagPeerDevice"] == s:
if cls_cp.get(p):
if self.topology == self._VXLAN_MLAG:
cl_config = str(cl_vlan) + str(cl_bgp[s])
else:
cl_config = str(cl_vlan) + str(cl_bgp[s]) + str(cls_cp[s])
- cls_perSw[s] = [{'name': cl_name, 'config': cl_config}]
+ cls_perSw[s] = [{"name": cl_name, "config": cl_config}]
allLeafConfigured, allLeafModified = self.__updateConnection(cls_perSw)
"vlan_id": vlan_id,
"connection_points": connection_points,
"configLetPerSwitch": cls_perSw,
- 'allLeafConfigured': allLeafConfigured,
- 'allLeafModified': allLeafModified}
+ "allLeafConfigured": allLeafConfigured,
+ "allLeafModified": allLeafModified,
+ }
return service_uuid, conn_info
except Exception as ex:
- self.logger.debug("Exception processing connection {}: {}".
- format(service_uuid, str(ex)))
+ self.logger.debug(
+ "Exception processing connection {}: {}".format(service_uuid, str(ex))
+ )
raise ex
def __updateConnection(self, cls_perSw):
- """ Invoked in the creation and modification
+ """Invoked in the creation and modification
checks if the new connection points config is:
- already in the Cloud Vision, the configLet is modified, and applied to the switch,
for s in self.switches:
allLeafConfigured[s] = False
allLeafModified[s] = False
+
cl_toDelete = []
+
for s in self.switches:
toDelete_in_cvp = False
- if not (cls_perSw.get(s) and cls_perSw[s][0].get('config')):
+ if not (cls_perSw.get(s) and cls_perSw[s][0].get("config")):
# when there is no configuration, means that there is no interface
# in the switch to be connected, so the configLet has to be removed from CloudVision
# after removing the ConfigLet from the switch if it was already there
# get config let name and key
cl = cls_perSw[s]
+
try:
- cvp_cl = self.client.api.get_configlet_by_name(cl[0]['name'])
+ cvp_cl = self.client.api.get_configlet_by_name(cl[0]["name"])
# remove configLet
cl_toDelete.append(cvp_cl)
cl[0] = cvp_cl
else:
res = self.__configlet_modify(cls_perSw[s])
allLeafConfigured[s] = res[0]
+
if not allLeafConfigured[s]:
continue
+
cl = cls_perSw[s]
+
res = self.__device_modify(
- device_to_update=s,
- new_configlets=cl,
- delete=toDelete_in_cvp)
+ device_to_update=s, new_configlets=cl, delete=toDelete_in_cvp
+ )
+
if "errorMessage" in str(res):
raise Exception(str(res))
+
self.logger.info("Device {} modify result {}".format(s, res))
- for t_id in res[1]['tasks']:
+
+ for t_id in res[1]["tasks"]:
if not toDelete_in_cvp:
- note_msg = "{}{}{}{}##".format(self.__MANAGED_BY_OSM,
- self.__SEPARATOR,
- t_id,
- self.__SEPARATOR)
+ note_msg = "{}{}{}{}##".format(
+ self.__MANAGED_BY_OSM,
+ self.__SEPARATOR,
+ t_id,
+ self.__SEPARATOR,
+ )
self.client.api.add_note_to_configlet(
- cls_perSw[s][0]['key'],
- note_msg)
- cls_perSw[s][0]['note'] = note_msg
- tasks = {t_id: {'workOrderId': t_id}}
+ cls_perSw[s][0]["key"], note_msg
+ )
+ cls_perSw[s][0]["note"] = note_msg
+
+ tasks = {t_id: {"workOrderId": t_id}}
self.__exec_task(tasks, self.__EXC_TASK_EXEC_WAIT)
+
# with just one configLet assigned to a device,
# delete all if there are errors in next loops
if not toDelete_in_cvp:
allLeafModified[s] = True
+
if len(cl_toDelete) > 0:
self.__configlet_modify(cl_toDelete, delete=True)
return allLeafConfigured, allLeafModified
except Exception as ex:
try:
- self.__rollbackConnection(cls_perSw,
- allLeafConfigured,
- allLeafModified)
+ self.__rollbackConnection(cls_perSw, allLeafConfigured, allLeafModified)
except Exception as e:
- self.logger.error("Exception rolling back in updating connection: {}".
- format(e), exc_info=True)
+ self.logger.error(
+ "Exception rolling back in updating connection: {}".format(e),
+ exc_info=True,
+ )
+
raise ex
- def __rollbackConnection(self,
- cls_perSw,
- allLeafConfigured,
- allLeafModified):
- """ Removes the given configLet from the devices and then remove the configLets
- """
+ def __rollbackConnection(self, cls_perSw, allLeafConfigured, allLeafModified):
+ """Removes the given configLet from the devices and then remove the configLets"""
for s in self.switches:
if allLeafModified[s]:
try:
res = self.__device_modify(
device_to_update=s,
new_configlets=cls_perSw[s],
- delete=True)
+ delete=True,
+ )
+
if "errorMessage" in str(res):
raise Exception(str(res))
+
tasks = dict()
- for t_id in res[1]['tasks']:
- tasks[t_id] = {'workOrderId': t_id}
+
+ for t_id in res[1]["tasks"]:
+ tasks[t_id] = {"workOrderId": t_id}
+
self.__exec_task(tasks)
self.logger.info("Device {} modify result {}".format(s, res))
except Exception as e:
- self.logger.error('Error removing configlets from device {}: {}'.format(s, e))
+ self.logger.error(
+ "Error removing configlets from device {}: {}".format(s, e)
+ )
pass
+
for s in self.switches:
if allLeafConfigured[s]:
self.__configlet_modify(cls_perSw[s], delete=True)
def __exec_task(self, tasks, tout=10):
if self.taskC is None:
self.__connect()
+
data = self.taskC.update_all_tasks(tasks).values()
- self.taskC.task_action(data, tout, 'executed')
+ self.taskC.task_action(data, tout, "executed")
def __device_modify(self, device_to_update, new_configlets, delete):
- """ Updates the devices (switches) adding or removing the configLet,
+ """Updates the devices (switches) adding or removing the configLet,
the tasks Id's associated to the change are returned
"""
- self.logger.info('Enter in __device_modify delete: {}'.format(delete))
+ self.logger.info("Enter in __device_modify delete: {}".format(delete))
updated = []
changed = False
# Task Ids that have been identified during device actions
newTasks = []
- if (len(new_configlets) == 0 or
- device_to_update is None or
- len(device_to_update) == 0):
- data = {'updated': updated, 'tasks': newTasks}
+ if (
+ len(new_configlets) == 0
+ or device_to_update is None
+ or len(device_to_update) == 0
+ ):
+ data = {"updated": updated, "tasks": newTasks}
+
return [changed, data]
self.__load_inventory()
for try_device in allDeviceFacts:
# Add Device Specific Configlets
# self.logger.debug(device)
- if try_device['hostname'] not in device_to_update:
+ if try_device["hostname"] not in device_to_update:
continue
+
dev_cvp_configlets = self.client.api.get_configlets_by_device_id(
- try_device['systemMacAddress'])
+ try_device["systemMacAddress"]
+ )
# self.logger.debug(dev_cvp_configlets)
- try_device['deviceSpecificConfiglets'] = []
+ try_device["deviceSpecificConfiglets"] = []
+
for cvp_configlet in dev_cvp_configlets:
- if int(cvp_configlet['containerCount']) == 0:
- try_device['deviceSpecificConfiglets'].append(
- {'name': cvp_configlet['name'],
- 'key': cvp_configlet['key']})
+ if int(cvp_configlet["containerCount"]) == 0:
+ try_device["deviceSpecificConfiglets"].append(
+ {"name": cvp_configlet["name"], "key": cvp_configlet["key"]}
+ )
+
# self.logger.debug(device)
device = try_device
break
update_devices = []
if delete:
- for cvp_configlet in device['deviceSpecificConfiglets']:
+ for cvp_configlet in device["deviceSpecificConfiglets"]:
for cl in new_configlets:
- if cvp_configlet['name'] == cl['name']:
+ if cvp_configlet["name"] == cl["name"]:
remove_configlets.append(cvp_configlet)
device_update = True
else:
for configlet in new_configlets:
- if configlet not in device['deviceSpecificConfiglets']:
+ if configlet not in device["deviceSpecificConfiglets"]:
add_configlets.append(configlet)
device_update = True
+
if device_update:
- update_devices.append({'hostname': device['hostname'],
- 'configlets': [add_configlets,
- remove_configlets],
- 'device': device})
+ update_devices.append(
+ {
+ "hostname": device["hostname"],
+ "configlets": [add_configlets, remove_configlets],
+ "device": device,
+ }
+ )
+
self.logger.info("Device to modify: {}".format(update_devices))
up_device = update_devices[0]
- cl_toAdd = up_device['configlets'][0]
- cl_toDel = up_device['configlets'][1]
+ cl_toAdd = up_device["configlets"][0]
+ cl_toDel = up_device["configlets"][1]
+
# Update Configlets
try:
if delete and len(cl_toDel) > 0:
r = self.client.api.remove_configlets_from_device(
- 'OSM',
- up_device['device'],
- cl_toDel,
- create_task=True)
+ "OSM", up_device["device"], cl_toDel, create_task=True
+ )
dev_action = r
- self.logger.debug("remove_configlets_from_device {} {}".format(dev_action, cl_toDel))
+ self.logger.debug(
+ "remove_configlets_from_device {} {}".format(dev_action, cl_toDel)
+ )
elif len(cl_toAdd) > 0:
r = self.client.api.apply_configlets_to_device(
- 'OSM',
- up_device['device'],
- cl_toAdd,
- create_task=True)
+ "OSM", up_device["device"], cl_toAdd, create_task=True
+ )
dev_action = r
- self.logger.debug("apply_configlets_to_device {} {}".format(dev_action, cl_toAdd))
-
+ self.logger.debug(
+ "apply_configlets_to_device {} {}".format(dev_action, cl_toAdd)
+ )
except Exception as error:
errorMessage = str(error)
- msg = "errorMessage: Device {} Configlets couldnot be updated: {}".format(
- up_device['hostname'], errorMessage)
+ msg = "errorMessage: Device {} Configlets could not be updated: {}".format(
+ up_device["hostname"], errorMessage
+ )
raise SdnConnectorError(msg) from error
else:
if "errorMessage" in str(dev_action):
m = "Device {} Configlets update fail: {}".format(
- up_device['name'], dev_action['errorMessage'])
+ up_device["name"], dev_action["errorMessage"]
+ )
raise SdnConnectorError(m)
else:
changed = True
- if 'taskIds' in str(dev_action):
+ if "taskIds" in str(dev_action):
# Fix 1030 SDN-ARISTA Key error note when deploy a NS
- if not dev_action['data']['taskIds']:
- raise SdnConnectorError("No taskIds found: Device {} Configlets could not be updated".format(
- up_device['hostname']))
- for taskId in dev_action['data']['taskIds']:
- updated.append({
- up_device['hostname']: "Configlets-{}".format(taskId)})
+ if not dev_action["data"]["taskIds"]:
+ raise SdnConnectorError(
+ "No taskIds found: Device {} Configlets could not be updated".format(
+ up_device["hostname"]
+ )
+ )
+
+ for taskId in dev_action["data"]["taskIds"]:
+ updated.append(
+ {up_device["hostname"]: "Configlets-{}".format(taskId)}
+ )
newTasks.append(taskId)
else:
- updated.append({up_device['hostname']:
- "Configlets-No_Specific_Tasks"})
- data = {'updated': updated, 'tasks': newTasks}
+ updated.append(
+ {up_device["hostname"]: "Configlets-No_Specific_Tasks"}
+ )
+
+ data = {"updated": updated, "tasks": newTasks}
+
return [changed, data]
def __configlet_modify(self, configletsToApply, delete=False):
- ''' adds/update or delete the provided configLets
+ """Adds/update or delete the provided configLets
:param configletsToApply: list of configLets to apply
:param delete: flag to indicate if the configLets have to be deleted
from Cloud Vision Portal
:return: data: dict of module actions and taskIDs
- '''
- self.logger.info('Enter in __configlet_modify delete:{}'.format(
- delete))
+ """
+ self.logger.info("Enter in __configlet_modify delete:{}".format(delete))
# Compare configlets against cvp_facts-configlets
changed = False
to_update = False
to_create = False
to_check = False
+
try:
- cvp_cl = self.client.api.get_configlet_by_name(cl['name'])
- cl['key'] = cvp_cl['key']
- cl['note'] = cvp_cl['note']
+ cvp_cl = self.client.api.get_configlet_by_name(cl["name"])
+ cl["key"] = cvp_cl["key"]
+ cl["note"] = cvp_cl["note"]
found_in_cvp = True
except CvpApiError as error:
if "Entity does not exist" in error.msg:
if delete:
if found_in_cvp:
to_delete = True
- configlet = {'name': cvp_cl['name'],
- 'data': cvp_cl}
+ configlet = {"name": cvp_cl["name"], "data": cvp_cl}
else:
if found_in_cvp:
- cl_compare = self.__compare(cl['config'],
- cvp_cl['config'])
+ cl_compare = self.__compare(cl["config"], cvp_cl["config"])
+
# compare function returns a floating point number
if cl_compare[0] != 100.0:
to_update = True
- configlet = {'name': cl['name'],
- 'data': cvp_cl,
- 'config': cl['config']}
+ configlet = {
+ "name": cl["name"],
+ "data": cvp_cl,
+ "config": cl["config"],
+ }
else:
to_check = True
- configlet = {'name': cl['name'],
- 'key': cvp_cl['key'],
- 'data': cvp_cl,
- 'config': cl['config']}
+ configlet = {
+ "name": cl["name"],
+ "key": cvp_cl["key"],
+ "data": cvp_cl,
+ "config": cl["config"],
+ }
else:
to_create = True
- configlet = {'name': cl['name'],
- 'config': cl['config']}
+ configlet = {"name": cl["name"], "config": cl["config"]}
try:
if to_delete:
- operation = 'delete'
+ operation = "delete"
resp = self.client.api.delete_configlet(
- configlet['data']['name'],
- configlet['data']['key'])
+ configlet["data"]["name"], configlet["data"]["key"]
+ )
elif to_update:
- operation = 'update'
+ operation = "update"
resp = self.client.api.update_configlet(
- configlet['config'],
- configlet['data']['key'],
- configlet['data']['name'],
- wait_task_ids=True)
+ configlet["config"],
+ configlet["data"]["key"],
+ configlet["data"]["name"],
+ wait_task_ids=True,
+ )
elif to_create:
- operation = 'create'
+ operation = "create"
resp = self.client.api.add_configlet(
- configlet['name'],
- configlet['config'])
+ configlet["name"], configlet["config"]
+ )
else:
- operation = 'checked'
- resp = 'checked'
+ operation = "checked"
+ resp = "checked"
except Exception as error:
- errorMessage = str(error).split(':')[-1]
+ errorMessage = str(error).split(":")[-1]
message = "Configlet {} cannot be {}: {}".format(
- cl['name'], operation, errorMessage)
+ cl["name"], operation, errorMessage
+ )
+
if to_delete:
- deleted.append({configlet['name']: message})
+ deleted.append({configlet["name"]: message})
elif to_update:
- updated.append({configlet['name']: message})
+ updated.append({configlet["name"]: message})
elif to_create:
- new.append({configlet['name']: message})
+ new.append({configlet["name"]: message})
elif to_check:
- checked.append({configlet['name']: message})
-
+ checked.append({configlet["name"]: message})
else:
if "error" in str(resp).lower():
message = "Configlet {} cannot be deleted: {}".format(
- cl['name'], resp['errorMessage'])
+ cl["name"], resp["errorMessage"]
+ )
+
if to_delete:
- deleted.append({configlet['name']: message})
+ deleted.append({configlet["name"]: message})
elif to_update:
- updated.append({configlet['name']: message})
+ updated.append({configlet["name"]: message})
elif to_create:
- new.append({configlet['name']: message})
+ new.append({configlet["name"]: message})
elif to_check:
- checked.append({configlet['name']: message})
+ checked.append({configlet["name"]: message})
else:
if to_delete:
changed = True
- deleted.append({configlet['name']: "success"})
+ deleted.append({configlet["name"]: "success"})
elif to_update:
changed = True
- updated.append({configlet['name']: "success"})
+ updated.append({configlet["name"]: "success"})
elif to_create:
changed = True
- cl['key'] = resp # This key is used in API call deviceApplyConfigLet FGA
- new.append({configlet['name']: "success"})
+ # This key is used in API call deviceApplyConfigLet FGA
+ cl["key"] = resp
+ new.append({configlet["name"]: "success"})
elif to_check:
changed = False
- checked.append({configlet['name']: "success"})
+ checked.append({configlet["name"]: "success"})
+
+ data = {"new": new, "updated": updated, "deleted": deleted, "checked": checked}
- data = {'new': new, 'updated': updated, 'deleted': deleted, 'checked': checked}
return [changed, data]
def __get_configletsDevices(self, configlets):
for s in self.switches:
configlet = configlets[s]
+
# Add applied Devices
if len(configlet) > 0:
- configlet['devices'] = []
- applied_devices = self.client.api.get_applied_devices(
- configlet['name'])
- for device in applied_devices['data']:
- configlet['devices'].append(device['hostName'])
+ configlet["devices"] = []
+ applied_devices = self.client.api.get_applied_devices(configlet["name"])
+
+ for device in applied_devices["data"]:
+ configlet["devices"].append(device["hostName"])
def __get_serviceData(self, service_uuid, service_type, vlan_id, conn_info=None):
cls_perSw = {}
+
for s in self.switches:
cls_perSw[s] = []
+
if not conn_info:
- srv_cls = self.__get_serviceConfigLets(service_uuid,
- service_type,
- vlan_id)
+ srv_cls = self.__get_serviceConfigLets(service_uuid, service_type, vlan_id)
self.__get_configletsDevices(srv_cls)
+
for s in self.switches:
cl = srv_cls[s]
if len(cl) > 0:
- for dev in cl['devices']:
+ for dev in cl["devices"]:
cls_perSw[dev].append(cl)
else:
- cls_perSw = conn_info['configLetPerSwitch']
+ cls_perSw = conn_info["configLetPerSwitch"]
+
return cls_perSw
def delete_connectivity_service(self, service_uuid, conn_info=None):
:raises: SdnConnectorException: In case of error. The parameter http_code must be filled
"""
try:
- self.logger.debug('invoked delete_connectivity_service {}'.
- format(service_uuid))
+ self.logger.debug(
+ "invoked delete_connectivity_service {}".format(service_uuid)
+ )
+
if not service_uuid:
- raise SdnConnectorError(message='No connection service UUID',
- http_code=500)
+ raise SdnConnectorError(
+ message="No connection service UUID", http_code=500
+ )
self.__get_Connection()
+
if conn_info is None:
- raise SdnConnectorError(message='No connection information for service UUID {}'.format(service_uuid),
- http_code=500)
+ raise SdnConnectorError(
+ message="No connection information for service UUID {}".format(
+ service_uuid
+ ),
+ http_code=500,
+ )
+
c_info = None
- cls_perSw = self.__get_serviceData(service_uuid,
- conn_info['service_type'],
- conn_info['vlan_id'],
- c_info)
+ cls_perSw = self.__get_serviceData(
+ service_uuid, conn_info["service_type"], conn_info["vlan_id"], c_info
+ )
allLeafConfigured = {}
allLeafModified = {}
+
for s in self.switches:
allLeafConfigured[s] = True
allLeafModified[s] = True
+
found_in_cvp = False
+
for s in self.switches:
if cls_perSw[s]:
found_in_cvp = True
+
if found_in_cvp:
- self.__rollbackConnection(cls_perSw,
- allLeafConfigured,
- allLeafModified)
+ self.__rollbackConnection(cls_perSw, allLeafConfigured, allLeafModified)
else:
# if the service is not defined in Cloud Vision, return a 404 - NotFound error
- raise SdnConnectorError(message='Service {} was not found in Arista Cloud Vision {}'.
- format(service_uuid, self.__wim_url),
- http_code=404)
+ raise SdnConnectorError(
+ message="Service {} was not found in Arista Cloud Vision {}".format(
+ service_uuid, self.__wim_url
+ ),
+ http_code=404,
+ )
+
self.__removeMetadata(service_uuid)
except CvpLoginError as e:
self.logger.info(str(e))
self.client = None
- raise SdnConnectorError(message=SdnError.UNAUTHORIZED + " " + str(e),
- http_code=401) from e
+ raise SdnConnectorError(
+ message=SdnError.UNAUTHORIZED + " " + str(e), http_code=401
+ ) from e
except SdnConnectorError as sde:
raise sde
except Exception as ex:
self.client = None
self.logger.error(ex)
+
if self.raiseException:
raise ex
- raise SdnConnectorError(message=SdnError.INTERNAL_ERROR + " " + str(ex),
- http_code=500) from ex
+
+ raise SdnConnectorError(
+ message=SdnError.INTERNAL_ERROR + " " + str(ex), http_code=500
+ ) from ex
def __addMetadata(self, service_uuid, service_type, vlan_id):
- """ Adds the connectivity service from 'OSM_metadata' configLet
- """
+ """Adds the connectivity service from 'OSM_metadata' configLet"""
found_in_cvp = False
+
try:
cvp_cl = self.client.api.get_configlet_by_name(self.__OSM_METADATA)
found_in_cvp = True
pass
else:
raise error
+
try:
- new_serv = '{} {} {} {}\n'.format(self.__METADATA_PREFIX, service_type, vlan_id, service_uuid)
+ new_serv = "{} {} {} {}\n".format(
+ self.__METADATA_PREFIX, service_type, vlan_id, service_uuid
+ )
if found_in_cvp:
- cl_config = cvp_cl['config'] + new_serv
+ cl_config = cvp_cl["config"] + new_serv
else:
cl_config = new_serv
- cl_meta = [{'name': self.__OSM_METADATA, 'config': cl_config}]
+
+ cl_meta = [{"name": self.__OSM_METADATA, "config": cl_config}]
self.__configlet_modify(cl_meta)
except Exception as e:
- self.logger.error('Error in setting metadata in CloudVision from OSM for service {}: {}'.
- format(service_uuid, str(e)))
+ self.logger.error(
+ "Error in setting metadata in CloudVision from OSM for service {}: {}".format(
+ service_uuid, str(e)
+ )
+ )
pass
def __removeMetadata(self, service_uuid):
- """ Removes the connectivity service from 'OSM_metadata' configLet
- """
+ """Removes the connectivity service from 'OSM_metadata' configLet"""
found_in_cvp = False
+
try:
cvp_cl = self.client.api.get_configlet_by_name(self.__OSM_METADATA)
found_in_cvp = True
pass
else:
raise error
+
try:
if found_in_cvp:
- if service_uuid in cvp_cl['config']:
- cl_config = ''
- for line in cvp_cl['config'].split('\n'):
+ if service_uuid in cvp_cl["config"]:
+ cl_config = ""
+
+ for line in cvp_cl["config"].split("\n"):
if service_uuid in line:
continue
else:
cl_config = cl_config + line
- cl_meta = [{'name': self.__OSM_METADATA, 'config': cl_config}]
+
+ cl_meta = [{"name": self.__OSM_METADATA, "config": cl_config}]
self.__configlet_modify(cl_meta)
except Exception as e:
- self.logger.error('Error in removing metadata in CloudVision from OSM for service {}: {}'.
- format(service_uuid, str(e)))
+ self.logger.error(
+ "Error in removing metadata in CloudVision from OSM for service {}: {}".format(
+ service_uuid, str(e)
+ )
+ )
pass
- def edit_connectivity_service(self,
- service_uuid,
- conn_info=None,
- connection_points=None,
- **kwargs):
- """ Change an existing connectivity service.
+ def edit_connectivity_service(
+ self, service_uuid, conn_info=None, connection_points=None, **kwargs
+ ):
+ """Change an existing connectivity service.
This method's arguments and return value follow the same convention as
:meth:`~.create_connectivity_service`.
SdnConnectorError: In case of error.
"""
try:
- self.logger.debug('invoked edit_connectivity_service for service {}. ports: {}'.format(service_uuid,
- connection_points))
+ self.logger.debug(
+ "invoked edit_connectivity_service for service {}. ports: {}".format(
+ service_uuid, connection_points
+ )
+ )
if not service_uuid:
- raise SdnConnectorError(message='Unable to perform operation, missing or empty uuid',
- http_code=500)
+ raise SdnConnectorError(
+ message="Unable to perform operation, missing or empty uuid",
+ http_code=500,
+ )
+
if not conn_info:
- raise SdnConnectorError(message='Unable to perform operation, missing or empty connection information',
- http_code=500)
+ raise SdnConnectorError(
+ message="Unable to perform operation, missing or empty connection information",
+ http_code=500,
+ )
if connection_points is None:
return None
self.__get_Connection()
- cls_currentPerSw = conn_info['configLetPerSwitch']
- service_type = conn_info['service_type']
-
- self.__check_service(service_type,
- connection_points,
- check_vlan=False,
- check_num_cp=False,
- kwargs=kwargs)
+ cls_currentPerSw = conn_info["configLetPerSwitch"]
+ service_type = conn_info["service_type"]
- s_uid, s_connInf = self.__processConnection(
- service_uuid,
+ self.__check_service(
service_type,
connection_points,
- kwargs)
- self.logger.info("Service with uuid {} configuration updated".
- format(s_uid))
+ check_vlan=False,
+ check_num_cp=False,
+ kwargs=kwargs,
+ )
+
+ s_uid, s_connInf = self.__processConnection(
+ service_uuid, service_type, connection_points, kwargs
+ )
+ self.logger.info("Service with uuid {} configuration updated".format(s_uid))
+
return s_connInf
except CvpLoginError as e:
self.logger.info(str(e))
self.client = None
- raise SdnConnectorError(message=SdnError.UNAUTHORIZED + " " + str(e),
- http_code=401) from e
+ raise SdnConnectorError(
+ message=SdnError.UNAUTHORIZED + " " + str(e), http_code=401
+ ) from e
except SdnConnectorError as sde:
raise sde
except Exception as ex:
# TODO check if there are pending task, and cancel them before restoring
self.__updateConnection(cls_currentPerSw)
except Exception as e:
- self.logger.error("Unable to restore configuration in service {} after an error in the configuration"
- " updated: {}".format(service_uuid, str(e)))
+ self.logger.error(
+ "Unable to restore configuration in service {} after an error in the configuration"
+ " updated: {}".format(service_uuid, str(e))
+ )
+
if self.raiseException:
raise ex
- raise SdnConnectorError(message=str(ex),
- http_code=500) from ex
+
+ raise SdnConnectorError(message=str(ex), http_code=500) from ex
def clear_all_connectivity_services(self):
- """ Removes all connectivity services from Arista CloudVision with two steps:
- - retrives all the services from Arista CloudVision
+ """Removes all connectivity services from Arista CloudVision with two steps:
+ - retrieves all the services from Arista CloudVision
- removes each service
"""
try:
- self.logger.debug('invoked AristaImpl ' +
- 'clear_all_connectivity_services')
+ self.logger.debug("invoked AristaImpl clear_all_connectivity_services")
self.__get_Connection()
s_list = self.__get_srvUUIDs()
+
for serv in s_list:
conn_info = {}
- conn_info['service_type'] = serv['type']
- conn_info['vlan_id'] = serv['vlan']
-
- self.delete_connectivity_service(serv['uuid'], conn_info)
+ conn_info["service_type"] = serv["type"]
+ conn_info["vlan_id"] = serv["vlan"]
+ self.delete_connectivity_service(serv["uuid"], conn_info)
except CvpLoginError as e:
self.logger.info(str(e))
self.client = None
- raise SdnConnectorError(message=SdnError.UNAUTHORIZED + " " + str(e),
- http_code=401) from e
+
+ raise SdnConnectorError(
+ message=SdnError.UNAUTHORIZED + " " + str(e), http_code=401
+ ) from e
except SdnConnectorError as sde:
raise sde
except Exception as ex:
self.client = None
self.logger.error(ex)
+
if self.raiseException:
raise ex
- raise SdnConnectorError(message=SdnError.INTERNAL_ERROR + " " + str(ex),
- http_code=500) from ex
+
+ raise SdnConnectorError(
+ message=SdnError.INTERNAL_ERROR + " " + str(ex), http_code=500
+ ) from ex
def get_all_active_connectivity_services(self):
- """ Return the uuid of all the active connectivity services with two steps:
+ """Return the uuid of all the active connectivity services with two steps:
- retrives all the services from Arista CloudVision
- retrives the status of each server
"""
try:
- self.logger.debug('invoked AristaImpl {}'.format(
- 'get_all_active_connectivity_services'))
+ self.logger.debug(
+ "invoked AristaImpl {}".format("get_all_active_connectivity_services")
+ )
self.__get_Connection()
s_list = self.__get_srvUUIDs()
result = []
+
for serv in s_list:
conn_info = {}
- conn_info['service_type'] = serv['type']
- conn_info['vlan_id'] = serv['vlan']
+ conn_info["service_type"] = serv["type"]
+ conn_info["vlan_id"] = serv["vlan"]
+ status = self.get_connectivity_service_status(serv["uuid"], conn_info)
+
+ if status["sdn_status"] == "ACTIVE":
+ result.append(serv["uuid"])
- status = self.get_connectivity_service_status(serv['uuid'], conn_info)
- if status['sdn_status'] == 'ACTIVE':
- result.append(serv['uuid'])
return result
except CvpLoginError as e:
self.logger.info(str(e))
self.client = None
- raise SdnConnectorError(message=SdnError.UNAUTHORIZED + " " + str(e),
- http_code=401) from e
+ raise SdnConnectorError(
+ message=SdnError.UNAUTHORIZED + " " + str(e), http_code=401
+ ) from e
except SdnConnectorError as sde:
raise sde
except Exception as ex:
self.client = None
self.logger.error(ex)
+
if self.raiseException:
raise ex
- raise SdnConnectorError(message=SdnError.INTERNAL_ERROR,
- http_code=500) from ex
+
+ raise SdnConnectorError(
+ message=SdnError.INTERNAL_ERROR, http_code=500
+ ) from ex
def __get_serviceConfigLets(self, service_uuid, service_type, vlan_id):
- """ Return the configLet's associated with a connectivity service,
+ """Return the configLet's associated with a connectivity service,
There should be one, as maximum, per device (switch) for a given
connectivity service
"""
srv_cls = {}
+
for s in self.switches:
srv_cls[s] = []
found_in_cvp = False
- name = (self.__OSM_PREFIX +
- s +
- self.__SEPARATOR + service_type + str(vlan_id) +
- self.__SEPARATOR + service_uuid)
+ name = (
+ self.__OSM_PREFIX
+ + s
+ + self.__SEPARATOR
+ + service_type
+ + str(vlan_id)
+ + self.__SEPARATOR
+ + service_uuid
+ )
+
try:
cvp_cl = self.client.api.get_configlet_by_name(name)
found_in_cvp = True
pass
else:
raise error
+
if found_in_cvp:
srv_cls[s] = cvp_cl
+
return srv_cls
def __get_srvVLANs(self):
- """ Returns a list with all the VLAN id's used in the connectivity services managed
+ """Returns a list with all the VLAN id's used in the connectivity services managed
in tha Arista CloudVision by checking the 'OSM_metadata' configLet where this
information is stored
"""
found_in_cvp = False
+
try:
cvp_cl = self.client.api.get_configlet_by_name(self.__OSM_METADATA)
found_in_cvp = True
pass
else:
raise error
+
s_vlan_list = []
if found_in_cvp:
- lines = cvp_cl['config'].split('\n')
+ lines = cvp_cl["config"].split("\n")
+
for line in lines:
if self.__METADATA_PREFIX in line:
- s_vlan = line.split(' ')[3]
+ s_vlan = line.split(" ")[3]
else:
continue
- if (s_vlan is not None and
- len(s_vlan) > 0 and
- s_vlan not in s_vlan_list):
+
+ if s_vlan is not None and len(s_vlan) > 0 and s_vlan not in s_vlan_list:
s_vlan_list.append(s_vlan)
return s_vlan_list
def __get_srvUUIDs(self):
- """ Retrieves all the connectivity services, managed in tha Arista CloudVision
+ """Retrieves all the connectivity services, managed in tha Arista CloudVision
by checking the 'OSM_metadata' configLet where this information is stored
"""
found_in_cvp = False
+
try:
cvp_cl = self.client.api.get_configlet_by_name(self.__OSM_METADATA)
found_in_cvp = True
pass
else:
raise error
+
serv_list = []
if found_in_cvp:
- lines = cvp_cl['config'].split('\n')
+ lines = cvp_cl["config"].split("\n")
+
for line in lines:
if self.__METADATA_PREFIX in line:
- line = line.split(' ')
- serv = {'uuid': line[4], 'type': line[2], 'vlan': line[3]}
+ line = line.split(" ")
+ serv = {"uuid": line[4], "type": line[2], "vlan": line[3]}
else:
continue
- if (serv is not None and
- len(serv) > 0 and
- serv not in serv_list):
+
+ if serv is not None and len(serv) > 0 and serv not in serv_list:
serv_list.append(serv)
return serv_list
def __get_Connection(self):
- """ Open a connection with Arista CloudVision,
- invoking the version retrival as test
+ """Open a connection with Arista CloudVision,
+ invoking the version retrival as test
"""
try:
if self.client is None:
self.client = self.__connect()
+
self.client.api.get_cvp_info()
except (CvpSessionLogOutError, RequestException) as e:
self.logger.debug("Connection error '{}'. Reconnecting".format(e))
self.client.api.get_cvp_info()
def __connect(self):
- ''' Connects to CVP device using user provided credentials from initialization.
+ """Connects to CVP device using user provided credentials from initialization.
:return: CvpClient object with connection instantiated.
- '''
+ """
client = CvpClient()
protocol, _, rest_url = self.__wim_url.rpartition("://")
host, _, port = rest_url.partition(":")
+
if port and port.endswith("/"):
port = int(port[:-1])
elif port:
else:
port = 443
- client.connect([host],
- self.__user,
- self.__passwd,
- protocol=protocol or "https",
- port=port,
- connect_timeout=2)
+ client.connect(
+ [host],
+ self.__user,
+ self.__passwd,
+ protocol=protocol or "https",
+ port=port,
+ connect_timeout=2,
+ )
client.api = CvpApi(client, request_timeout=self.__API_REQUEST_TOUT)
self.taskC = AristaCVPTask(client.api)
+
return client
def __compare(self, fromText, toText, lines=10):
- """ Compare text string in 'fromText' with 'toText' and produce
+ """Compare text string in 'fromText' with 'toText' and produce
diffRatio - a score as a float in the range [0, 1] 2.0*M / T
T is the total number of elements in both sequences,
M is the number of matches.
tolines = toText.splitlines(1)
diff = list(difflib.unified_diff(fromlines, tolines, n=lines))
textComp = difflib.SequenceMatcher(None, fromText, toText)
- diffRatio = round(textComp.quick_ratio()*100, 2)
+ diffRatio = round(textComp.quick_ratio() * 100, 2)
+
return [diffRatio, diff]
def __load_inventory(self):
- """ Get Inventory Data for All Devices (aka switches) from the Arista CloudVision
- """
+ """Get Inventory Data for All Devices (aka switches) from the Arista CloudVision"""
if not self.cvp_inventory:
self.cvp_inventory = self.client.api.get_inventory()
+
self.allDeviceFacts = []
+
for device in self.cvp_inventory:
self.allDeviceFacts.append(device)
def __get_tags(self, name, value):
if not self.cvp_tags:
self.cvp_tags = []
- url = '/api/v1/rest/analytics/tags/labels/devices/{}/value/{}/elements'.format(name, value)
- self.logger.debug('get_tags: URL {}'.format(url))
+ url = "/api/v1/rest/analytics/tags/labels/devices/{}/value/{}/elements".format(
+ name, value
+ )
+ self.logger.debug("get_tags: URL {}".format(url))
data = self.client.get(url, timeout=self.__API_REQUEST_TOUT)
- for dev in data['notifications']:
- for elem in dev['updates']:
+
+ for dev in data["notifications"]:
+ for elem in dev["updates"]:
self.cvp_tags.append(elem)
- self.logger.debug('Available devices with tag_name {} - value {}: {} '.format(name, value, self.cvp_tags))
+
+ self.logger.debug(
+ "Available devices with tag_name {} - value {}: {}".format(
+ name, value, self.cvp_tags
+ )
+ )
def __get_interface_ip(self, device_id, interface):
- url = '/api/v1/rest/{}/Sysdb/ip/config/ipIntfConfig/{}/'.format(device_id, interface)
- self.logger.debug('get_interface_ip: URL {}'.format(url))
+ url = "/api/v1/rest/{}/Sysdb/ip/config/ipIntfConfig/{}/".format(
+ device_id, interface
+ )
+ self.logger.debug("get_interface_ip: URL {}".format(url))
data = None
+
try:
data = self.client.get(url, timeout=self.__API_REQUEST_TOUT)
- if data['notifications']:
- for notification in data['notifications']:
- for update in notification['updates']:
- if update == 'addrWithMask':
- return notification['updates'][update]['value']
+
+ if data["notifications"]:
+ for notification in data["notifications"]:
+ for update in notification["updates"]:
+ if update == "addrWithMask":
+ return notification["updates"][update]["value"]
except Exception as e:
- raise SdnConnectorError("Invalid response from url {}: data {} - {}".format(url, data, str(e)))
- raise SdnConnectorError("Unable to get ip for interface {} in device {}, data {}".
- format(interface, device_id, data))
+ raise SdnConnectorError(
+ "Invalid response from url {}: data {} - {}".format(url, data, str(e))
+ )
+
+ raise SdnConnectorError(
+ "Unable to get ip for interface {} in device {}, data {}".format(
+ interface, device_id, data
+ )
+ )
def __get_device_ASN(self, device_id):
- url = '/api/v1/rest/{}/Sysdb/routing/bgp/config/'.format(device_id)
- self.logger.debug('get_device_ASN: URL {}'.format(url))
+ url = "/api/v1/rest/{}/Sysdb/routing/bgp/config/".format(device_id)
+ self.logger.debug("get_device_ASN: URL {}".format(url))
data = None
+
try:
data = self.client.get(url, timeout=self.__API_REQUEST_TOUT)
- if data['notifications']:
- for notification in data['notifications']:
- for update in notification['updates']:
- if update == 'asNumber':
- return notification['updates'][update]['value']['value']['int']
+ if data["notifications"]:
+ for notification in data["notifications"]:
+ for update in notification["updates"]:
+ if update == "asNumber":
+ return notification["updates"][update]["value"]["value"][
+ "int"
+ ]
except Exception as e:
- raise SdnConnectorError("Invalid response from url {}: data {} - {}".format(url, data, str(e)))
- raise SdnConnectorError("Unable to get AS in device {}, data {}".format(device_id, data))
+ raise SdnConnectorError(
+ "Invalid response from url {}: data {} - {}".format(url, data, str(e))
+ )
+
+ raise SdnConnectorError(
+ "Unable to get AS in device {}, data {}".format(device_id, data)
+ )
def __get_peer_MLAG(self, device_id):
peer = None
- url = '/api/v1/rest/{}/Sysdb/mlag/status/'.format(device_id)
- self.logger.debug('get_MLAG_status: URL {}'.format(url))
+ url = "/api/v1/rest/{}/Sysdb/mlag/status/".format(device_id)
+ self.logger.debug("get_MLAG_status: URL {}".format(url))
+
try:
data = self.client.get(url, timeout=self.__API_REQUEST_TOUT)
- if data['notifications']:
+
+ if data["notifications"]:
found = False
- for notification in data['notifications']:
- for update in notification['updates']:
- if update == 'systemId':
- mlagSystemId = notification['updates'][update]['value']
+
+ for notification in data["notifications"]:
+ for update in notification["updates"]:
+ if update == "systemId":
+ mlagSystemId = notification["updates"][update]["value"]
found = True
break
+
if found:
break
+
# search the MLAG System Id
if found:
for s in self.switches:
- if self.switches[s]['serialNumber'] == device_id:
+ if self.switches[s]["serialNumber"] == device_id:
continue
- url = '/api/v1/rest/{}/Sysdb/mlag/status/'.format(self.switches[s]['serialNumber'])
- self.logger.debug('Searching for MLAG system id {} in switch {}'.format(mlagSystemId, s))
+
+ url = "/api/v1/rest/{}/Sysdb/mlag/status/".format(
+ self.switches[s]["serialNumber"]
+ )
+ self.logger.debug(
+ "Searching for MLAG system id {} in switch {}".format(
+ mlagSystemId, s
+ )
+ )
data = self.client.get(url, timeout=self.__API_REQUEST_TOUT)
found = False
- for notification in data['notifications']:
- for update in notification['updates']:
- if update == 'systemId':
- if mlagSystemId == notification['updates'][update]['value']:
+
+ for notification in data["notifications"]:
+ for update in notification["updates"]:
+ if update == "systemId":
+ if (
+ mlagSystemId
+ == notification["updates"][update]["value"]
+ ):
peer = s
found = True
break
+
if found:
break
+
if found:
break
+
if peer is None:
- self.logger.error('No Peer device found for device {} with MLAG address {}'.format(device_id,
- mlagSystemId))
+ self.logger.error(
+ "No Peer device found for device {} with MLAG address {}".format(
+ device_id, mlagSystemId
+ )
+ )
else:
- self.logger.debug('Peer MLAG for device {} - value {}'.format(device_id, peer))
+ self.logger.debug(
+ "Peer MLAG for device {} - value {}".format(device_id, peer)
+ )
+
return peer
except Exception:
- raise SdnConnectorError("Invalid response from url {}: data {}".format(url, data))
+ raise SdnConnectorError(
+ "Invalid response from url {}: data {}".format(url, data)
+ )
def is_valid_destination(self, url):
- """ Check that the provided WIM URL is correct
- """
+ """Check that the provided WIM URL is correct"""
if re.match(self.__regex, url):
return True
elif self.is_valid_ipv4_address(url):
return self.is_valid_ipv6_address(url)
def is_valid_ipv4_address(self, address):
- """ Checks that the given IP is IPv4 valid
- """
+ """Checks that the given IP is IPv4 valid"""
try:
socket.inet_pton(socket.AF_INET, address)
except AttributeError: # no inet_pton here, sorry
socket.inet_aton(address)
except socket.error:
return False
- return address.count('.') == 3
+
+ return address.count(".") == 3
except socket.error: # not a valid address
return False
+
return True
def is_valid_ipv6_address(self, address):
- """ Checks that the given IP is IPv6 valid
- """
+ """Checks that the given IP is IPv6 valid"""
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error: # not a valid address
return False
+
return True
def delete_keys_from_dict(self, dict_del, lst_keys):
if dict_del is None:
return dict_del
+
dict_copy = {k: v for k, v in dict_del.items() if k not in lst_keys}
+
for k, v in dict_copy.items():
if isinstance(v, dict):
dict_copy[k] = self.delete_keys_from_dict(v, lst_keys)
+
return dict_copy
setup(
name=_name,
- description='OSM ro sdn plugin for arista with CloudVision',
+ description="OSM ro sdn plugin for arista with CloudVision",
long_description=README,
- version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ version_command=(
+ "git describe --match v* --tags --long --dirty",
+ "pep440-git-full",
+ ),
# version=VERSION,
# python_requires='>3.5.0',
- author='ETSI OSM',
- author_email='OSM_TECH@LIST.ETSI.ORG',
- maintainer='Oscar Luis Peral',
- maintainer_email='oscarluis.peral@atos.net',
- url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
- license='Apache 2.0',
-
+ author="ETSI OSM",
+ author_email="OSM_TECH@LIST.ETSI.ORG",
+ maintainer="Oscar Luis Peral",
+ maintainer_email="oscarluis.peral@atos.net",
+ url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary",
+ license="Apache 2.0",
packages=[_name],
include_package_data=True,
install_requires=[
"requests",
"uuid",
"cvprac",
- "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin"
+ "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin",
],
- setup_requires=['setuptools-version-command'],
+ setup_requires=["setuptools-version-command"],
entry_points={
- 'osm_rosdn.plugins': ['rosdn_arista_cloudvision = osm_rosdn_arista_cloudvision.'
- 'wimconn_arista:AristaSdnConnector']
+ "osm_rosdn.plugins": [
+ "rosdn_arista_cloudvision = osm_rosdn_arista_cloudvision.wimconn_arista:AristaSdnConnector"
+ ]
},
)
basepython = python3
deps = flake8
commands = flake8 osm_rosdn_arista_cloudvision --max-line-length 120 \
- --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241
[testenv:unittest]
basepython = python3
import paramiko
import requests
import struct
+
# import sys
from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError
-class DpbSshInterface():
+class DpbSshInterface:
""" Communicate with the DPB via SSH """
__LOGGER_NAME_EXT = ".ssh"
__FUNCTION_MAP_POS = 1
- def __init__(self, username, password, wim_url, wim_port, network, auth_data, logger_name):
+ def __init__(
+ self, username, password, wim_url, wim_port, network, auth_data, logger_name
+ ):
self.logger = logging.getLogger(logger_name + self.__LOGGER_NAME_EXT)
self.__username = username
self.__password = password
"""post request to dpb via ssh
notes:
- - session_id need only be unique per ssh session, thus is currently safe if
+ - session_id need only be unique per ssh session, thus is currently safe if
ro is restarted
"""
self._check_connection()
+
if data is None:
data = {}
- url_ext_info = url_params.split('/')
+
+ url_ext_info = url_params.split("/")
+
for i in range(0, len(url_ext_info)):
if url_ext_info[i] == "service":
- data["service-id"] = int(url_ext_info[i+1])
+ data["service-id"] = int(url_ext_info[i + 1])
+
data["type"] = function[self.__FUNCTION_MAP_POS]
data = {
"session": self.__session_id,
- "content": data
+ "content": data,
}
self.__session_id += 1
try:
data = json.dumps(data).encode("utf-8")
- data_packed = struct.pack(
- ">I" + str(len(data)) + "s", len(data), data)
+ data_packed = struct.pack(">I" + str(len(data)) + "s", len(data), data)
self.__stdin.write(data_packed)
self.logger.debug("Data sent to DPB via SSH")
except Exception as e:
- raise SdnConnectorError(
- "Failed to write via SSH | text: {}".format(e), 500)
+ raise SdnConnectorError("Failed to write via SSH | text: {}".format(e), 500)
try:
data_len = struct.unpack(">I", self.__stdout.read(4))[0]
- data = struct.unpack(str(data_len) + "s",
- self.__stdout.read(data_len))[0]
+ data = struct.unpack(str(data_len) + "s", self.__stdout.read(data_len))[0]
+
return json.loads(data).get("content", {})
except Exception as e:
raise SdnConnectorError(
- "Could not get response from WIM | text: {}".format(e), 500)
+ "Could not get response from WIM | text: {}".format(e), 500
+ )
def get(self, function, url_params=""):
raise SdnConnectorError("SSH Get not implemented", 500)
def __create_client(self):
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+
return ssh_client
def __connect(self):
private_key = None
password = None
+
if self.__auth_data.get("auth_type", "PASS") == "KEY":
private_key = self.__build_private_key_obj()
+
if self.__auth_data.get("auth_type", "PASS") == "PASS":
password = self.__password
try:
- self.__ssh_client.connect(hostname=self.__url,
- port=self.__port,
- username=self.__username,
- password=password,
- pkey=private_key,
- look_for_keys=False,
- compress=False)
+ self.__ssh_client.connect(
+ hostname=self.__url,
+ port=self.__port,
+ username=self.__username,
+ password=password,
+ pkey=private_key,
+ look_for_keys=False,
+ compress=False,
+ )
stdin, stdout, stderr = self.__ssh_client.exec_command(
- command=self.__network)
+ command=self.__network
+ )
except paramiko.BadHostKeyException as e:
raise SdnConnectorError(
- "Could not add SSH host key | text: {}".format(e), 500)
+ "Could not add SSH host key | text: {}".format(e), 500
+ )
except paramiko.AuthenticationException as e:
raise SdnConnectorError(
- "Could not authorize SSH connection | text: {}".format(e), 400)
+ "Could not authorize SSH connection | text: {}".format(e), 400
+ )
except paramiko.SSHException as e:
raise SdnConnectorError(
- "Could not establish the SSH connection | text: {}".format(e), 500)
+ "Could not establish the SSH connection | text: {}".format(e), 500
+ )
except Exception as e:
raise SdnConnectorError(
- "Unknown error occurred when connecting via SSH | text: {}".format(e), 500)
+ "Unknown error occurred when connecting via SSH | text: {}".format(e),
+ 500,
+ )
try:
data_len = struct.unpack(">I", stdout.read(4))[0]
- data = json.loads(struct.unpack(
- str(data_len) + "s", stdout.read(data_len))[0])
+ data = json.loads(
+ struct.unpack(str(data_len) + "s", stdout.read(data_len))[0]
+ )
except Exception as e:
raise SdnConnectorError(
- "Failed to get response from DPB | text: {}".format(e), 500)
+ "Failed to get response from DPB | text: {}".format(e), 500
+ )
+
if "error" in data:
- raise SdnConnectorError(
- data.get("msg", data.get("error", "ERROR")), 500)
+ raise SdnConnectorError(data.get("msg", data.get("error", "ERROR")), 500)
+
self.logger.info("SSH connection to DPB established OK")
+
return stdin, stdout
def __build_private_key_obj(self):
try:
- with open(self.__auth_data.get("key_file"), 'r') as key_file:
+ with open(self.__auth_data.get("key_file"), "r") as key_file:
if self.__auth_data.get("key_type") == "RSA":
- return paramiko.RSAKey.from_private_key(key_file,
- password=self.__auth_data.get("key_pass", None))
+ return paramiko.RSAKey.from_private_key(
+ key_file, password=self.__auth_data.get("key_pass", None)
+ )
elif self.__auth_data.get("key_type") == "ECDSA":
- return paramiko.ECDSAKey.from_private_key(key_file,
- password=self.__auth_data.get("key_pass", None))
+ return paramiko.ECDSAKey.from_private_key(
+ key_file, password=self.__auth_data.get("key_pass", None)
+ )
else:
raise SdnConnectorError("Key type not supported", 400)
except Exception as e:
raise SdnConnectorError(
- "Could not load private SSH key | text: {}".format(e), 500)
+ "Could not load private SSH key | text: {}".format(e), 500
+ )
-class DpbRestInterface():
+class DpbRestInterface:
""" Communicate with the DPB via the REST API """
__LOGGER_NAME_EXT = ".rest"
def __init__(self, wim_url, wim_port, network, logger_name):
self.logger = logging.getLogger(logger_name + self.__LOGGER_NAME_EXT)
self.__base_url = "http://{}:{}/network/{}".format(
- wim_url, str(wim_port), network)
+ wim_url, str(wim_port), network
+ )
self.logger.info("REST defined OK")
def post(self, function, url_params="", data=None, get_response=True):
- url = self.__base_url + url_params + \
- "/" + function[self.__FUNCTION_MAP_POS]
+ url = self.__base_url + url_params + "/" + function[self.__FUNCTION_MAP_POS]
+
try:
self.logger.info(data)
response = requests.post(url, json=data)
+
if response.status_code != 200:
raise SdnConnectorError(
- "REST request failed (status code: {})".format(response.status_code))
+ "REST request failed (status code: {})".format(response.status_code)
+ )
+
if get_response:
return response.json()
except Exception as e:
- raise SdnConnectorError(
- "REST request failed | text: {}".format(e), 500)
+ raise SdnConnectorError("REST request failed | text: {}".format(e), 500)
def get(self, function, url_params=""):
url = self.__base_url + url_params + function[self.__FUNCTION_MAP_POS]
+
try:
return requests.get(url)
except Exception as e:
- raise SdnConnectorError(
- "REST request failed | text: {}".format(e), 500)
+ raise SdnConnectorError("REST request failed | text: {}".format(e), 500)
class DpbConnector(SdnConnectorBase):
__SUPPORTED_CONNECTION_TYPES = ["REST", "SSH"]
__SUPPORTED_SSH_AUTH_TYPES = ["KEY", "PASS"]
__SUPPORTED_SSH_KEY_TYPES = ["ECDSA", "RSA"]
- __STATUS_MAP = {
- "ACTIVE": "ACTIVE",
- "ACTIVATING": "BUILD",
- "FAILED": "ERROR"}
+ __STATUS_MAP = {"ACTIVE": "ACTIVE", "ACTIVATING": "BUILD", "FAILED": "ERROR"}
__ACTIONS_MAP = {
"CREATE": ("create-service", "new-service"),
"DEFINE": ("define", "define-service"),
"DEACTIVATE": ("deactivate", "deactivate-service"),
"CHECK": ("await-status", "await-service-status"),
"GET": ("services", "NOT IMPLEMENTED"),
- "RESET": ("reset", "NOT IMPLEMENTED")
+ "RESET": ("reset", "NOT IMPLEMENTED"),
}
def __init__(self, wim, wim_account, config):
self.__password = self.__account.get("passwd", "")
self.__username = self.__account.get("user", "")
self.__network = self.__cli_config.get("network", "")
- self.__connection_type = self.__cli_config.get(
- "connection_type", "REST")
+ self.__connection_type = self.__cli_config.get("connection_type", "REST")
self.__port = self.__cli_config.get(
- "port", (80 if self.__connection_type == "REST" else 22))
+ "port", (80 if self.__connection_type == "REST" else 22)
+ )
self.__ssh_auth = self.__cli_config.get("ssh_auth", None)
if self.__connection_type == "SSH":
- interface = DpbSshInterface(self.__username,
- self.__password,
- self.__url,
- self.__port,
- self.__network,
- self.__ssh_auth,
- self.__LOGGER_NAME)
+ interface = DpbSshInterface(
+ self.__username,
+ self.__password,
+ self.__url,
+ self.__port,
+ self.__network,
+ self.__ssh_auth,
+ self.__LOGGER_NAME,
+ )
elif self.__connection_type == "REST":
- interface = DpbRestInterface(self.__url,
- self.__port,
- self.__network,
- self.__LOGGER_NAME)
+ interface = DpbRestInterface(
+ self.__url, self.__port, self.__network, self.__LOGGER_NAME
+ )
else:
raise SdnConnectorError(
- "Connection type not supported (must be SSH or REST)", 400)
+ "Connection type not supported (must be SSH or REST)", 400
+ )
+
self.__post = interface.post
self.__get = interface.get
self.logger.info("DPB WimConn Init OK")
def create_connectivity_service(self, service_type, connection_points, **kwargs):
self.logger.info("Creating a connectivity service")
+
try:
response = self.__post(self.__ACTIONS_MAP.get("CREATE"))
+
if "service-id" in response:
service_id = int(response.get("service-id"))
self.logger.debug("created service id {}".format(service_id))
else:
raise SdnConnectorError(
- "Invalid create service response (could be an issue with the DPB)", 500)
+ "Invalid create service response (could be an issue with the DPB)",
+ 500,
+ )
+
data = {"segment": []}
+
for point in connection_points:
- data["segment"].append({
- "terminal-name": point.get("service_endpoint_id"),
- "label": int((point.get("service_endpoint_encapsulation_info")).get("vlan")),
- "ingress-bw": 10.0,
- "egress-bw": 10.0})
+ data["segment"].append(
+ {
+ "terminal-name": point.get("service_endpoint_id"),
+ "label": int(
+ (point.get("service_endpoint_encapsulation_info")).get(
+ "vlan"
+ )
+ ),
+ "ingress-bw": 10.0,
+ "egress-bw": 10.0,
+ }
+ )
# "ingress-bw": (bandwidth.get(point.get("service_endpoint_id"))).get("ingress"),
# "egress-bw": (bandwidth.get(point.get("service_endpoint_id"))).get("egress")}
- self.__post(self.__ACTIONS_MAP.get("DEFINE"),
- "/service/"+str(service_id), data, get_response=False)
- self.__post(self.__ACTIONS_MAP.get("ACTIVATE"),
- "/service/"+str(service_id), get_response=False)
- self.logger.debug(
- "Created connectivity service id:{}".format(service_id))
+ self.__post(
+ self.__ACTIONS_MAP.get("DEFINE"),
+ "/service/" + str(service_id),
+ data,
+ get_response=False,
+ )
+ self.__post(
+ self.__ACTIONS_MAP.get("ACTIVATE"),
+ "/service/" + str(service_id),
+ get_response=False,
+ )
+ self.logger.debug("Created connectivity service id:{}".format(service_id))
+
return (str(service_id), None)
except Exception as e:
raise SdnConnectorError(
- "Connectivity service could not be made | text: {}".format(e), 500)
+ "Connectivity service could not be made | text: {}".format(e), 500
+ )
def get_connectivity_service_status(self, service_uuid, conn_info=None):
self.logger.info(
- "Checking connectivity service status id:{}".format(service_uuid))
- data = {
- "timeout-millis": 10000,
- "acceptable": ["ACTIVE", "FAILED"]
- }
+ "Checking connectivity service status id:{}".format(service_uuid)
+ )
+ data = {"timeout-millis": 10000, "acceptable": ["ACTIVE", "FAILED"]}
+
try:
- response = self.__post(self.__ACTIONS_MAP.get(
- "CHECK"), "/service/"+service_uuid, data)
+ response = self.__post(
+ self.__ACTIONS_MAP.get("CHECK"),
+ "/service/" + service_uuid,
+ data,
+ )
+
if "status" in response:
status = response.get("status", None)
self.logger.info("CHECKED CONNECTIVITY SERVICE STATUS")
+
return {"wim_status": self.__STATUS_MAP.get(status)}
else:
raise SdnConnectorError(
- "Invalid status check response (could be an issue with the DPB)", 500)
+ "Invalid status check response (could be an issue with the DPB)",
+ 500,
+ )
except Exception as e:
raise SdnConnectorError(
- "Failed to check service status | text: {}".format(e), 500)
+ "Failed to check service status | text: {}".format(e), 500
+ )
def delete_connectivity_service(self, service_uuid, conn_info=None):
- self.logger.info(
- "Deleting connectivity service id: {}".format(service_uuid))
+ self.logger.info("Deleting connectivity service id: {}".format(service_uuid))
+
try:
- self.__post(self.__ACTIONS_MAP.get("RELEASE"),
- "/service/"+service_uuid, get_response=False)
+ self.__post(
+ self.__ACTIONS_MAP.get("RELEASE"),
+ "/service/" + service_uuid,
+ get_response=False,
+ )
except Exception as e:
raise SdnConnectorError(
- "Could not delete service id:{} (could be an issue with the DPB): {}".format(service_uuid, e), 500)
- self.logger.debug(
- "Deleted connectivity service id:{}".format(service_uuid))
+ "Could not delete service id:{} (could be an issue with the DPB): {}".format(
+ service_uuid, e
+ ),
+ 500,
+ )
+
+ self.logger.debug("Deleted connectivity service id:{}".format(service_uuid))
+
return None
- def edit_connectivity_service(self, service_uuid, conn_info=None, connection_points=None, **kwargs):
- self.logger.info(
- "Editing connectivity service id: {}".format(service_uuid))
- data = {
- "timeout-millis": 10000,
- "acceptable": ["DORMANT"]
- }
+ def edit_connectivity_service(
+ self, service_uuid, conn_info=None, connection_points=None, **kwargs
+ ):
+ self.logger.info("Editing connectivity service id: {}".format(service_uuid))
+ data = {"timeout-millis": 10000, "acceptable": ["DORMANT"]}
+
try:
- self.__post(self.__ACTIONS_MAP.get("RESET"),
- "/service/"+service_uuid, get_response=False)
- response = self.__post(self.__ACTIONS_MAP.get(
- "CHECK"), "/service/"+service_uuid, data)
+ self.__post(
+ self.__ACTIONS_MAP.get("RESET"),
+ "/service/" + service_uuid,
+ get_response=False,
+ )
+ response = self.__post(
+ self.__ACTIONS_MAP.get("CHECK"),
+ "/service/" + service_uuid,
+ data,
+ )
+
if "status" in response:
- self.logger.debug(
- "Connectivity service {} reset".format(service_uuid))
+ self.logger.debug("Connectivity service {} reset".format(service_uuid))
else:
raise SdnConnectorError(
- "Invalid status check response (could be an issue with the DPB)", 500)
+ "Invalid status check response (could be an issue with the DPB)",
+ 500,
+ )
except Exception as e:
- raise SdnConnectorError(
- "Failed to reset service | text: {}".format(e), 500)
+ raise SdnConnectorError("Failed to reset service | text: {}".format(e), 500)
+
try:
data = {"segment": []}
+
for point in connection_points:
- data["segment"].append({
- "terminal-name": point.get("service_endpoint_id"),
- "label": int((point.get("service_endpoint_encapsulation_info")).get("vlan")),
- "ingress-bw": 10.0,
- "egress-bw": 10.0})
+ data["segment"].append(
+ {
+ "terminal-name": point.get("service_endpoint_id"),
+ "label": int(
+ (point.get("service_endpoint_encapsulation_info")).get(
+ "vlan"
+ )
+ ),
+ "ingress-bw": 10.0,
+ "egress-bw": 10.0,
+ }
+ )
# "ingress-bw": (bandwidth.get(point.get("service_endpoint_id"))).get("ingress"),
# "egress-bw": (bandwidth.get(point.get("service_endpoint_id"))).get("egress")}
- self.__post(self.__ACTIONS_MAP.get("DEFINE"), "/service/" +
- str(service_uuid), data, get_response=False)
- self.__post(self.__ACTIONS_MAP.get("ACTIVATE"),
- "/service/"+str(service_uuid), get_response=False)
+
+ self.__post(
+ self.__ACTIONS_MAP.get("DEFINE"),
+ "/service/" + str(service_uuid),
+ data,
+ get_response=False,
+ )
+ self.__post(
+ self.__ACTIONS_MAP.get("ACTIVATE"),
+ "/service/" + str(service_uuid),
+ get_response=False,
+ )
except Exception as e:
raise SdnConnectorError(
- "Failed to edit connectivity service | text: {}".format(e), 500)
- self.logger.debug(
- "Edited connectivity service {}".format(service_uuid))
+ "Failed to edit connectivity service | text: {}".format(e), 500
+ )
+
+ self.logger.debug("Edited connectivity service {}".format(service_uuid))
+
return conn_info
def __check_service(self, serv_type, points, kwargs):
setup(
name=_name,
- description='OSM ro sdn plugin for dpb',
+ description="OSM ro sdn plugin for dpb",
long_description=README,
- version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ version_command=(
+ "git describe --match v* --tags --long --dirty",
+ "pep440-git-full",
+ ),
# version=VERSION,
# python_requires='>3.5.0',
- author='ETSI OSM',
- author_email='OSM_TECH@LIST.ETSI.ORG',
- maintainer='ETSI OSM',
- maintainer_email='OSM_TECH@LIST.ETSI.ORG',
- url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
- license='Apache 2.0',
-
+ author="ETSI OSM",
+ author_email="OSM_TECH@LIST.ETSI.ORG'",
+ maintainer="ETSI OSM",
+ maintainer_email="OSM_TECH@LIST.ETSI.ORG",
+ url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary",
+ license="Apache 2.0",
packages=[_name],
include_package_data=True,
install_requires=[
"requests",
"osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin",
],
- setup_requires=['setuptools-version-command'],
+ setup_requires=["setuptools-version-command"],
entry_points={
- 'osm_rosdn.plugins': ['rosdn_dpb = osm_rosdn_dpb.wimconn_dpb:DpbConnector'],
+ "osm_rosdn.plugins": ["rosdn_dpb = osm_rosdn_dpb.wimconn_dpb:DpbConnector"],
},
)
basepython = python3
deps = flake8
commands = flake8 osm_rosdn_dpb --max-line-length 120 \
- --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241
[testenv:unittest]
basepython = python3
class SdnError(Enum):
- UNREACHABLE = 'Unable to reach the WIM.',
- SERVICE_TYPE_ERROR = 'Unexpected service_type. Only "L2" is accepted.',
- CONNECTION_POINTS_SIZE = \
- 'Unexpected number of connection points: 2 expected.',
- ENCAPSULATION_TYPE = \
- 'Unexpected service_endpoint_encapsulation_type. \
- Only "dotq1" is accepted.',
- BANDWIDTH = 'Unable to get the bandwidth.',
- STATUS = 'Unable to get the status for the service.',
- DELETE = 'Unable to delete service.',
- CLEAR_ALL = 'Unable to clear all the services',
- UNKNOWN_ACTION = 'Unknown action invoked.',
- BACKUP = 'Unable to get the backup parameter.',
- UNSUPPORTED_FEATURE = "Unsupported feature",
+ UNREACHABLE = "Unable to reach the WIM."
+ SERVICE_TYPE_ERROR = 'Unexpected service_type. Only "L2" is accepted.'
+ CONNECTION_POINTS_SIZE = "Unexpected number of connection points: 2 expected."
+ ENCAPSULATION_TYPE = (
+ 'Unexpected service_endpoint_encapsulation_type. Only "dotq1" is accepted.'
+ )
+ BANDWIDTH = "Unable to get the bandwidth."
+ STATUS = "Unable to get the status for the service."
+ DELETE = "Unable to delete service."
+ CLEAR_ALL = "Unable to clear all the services"
+ UNKNOWN_ACTION = "Unknown action invoked."
+ BACKUP = "Unable to get the backup parameter."
+ UNSUPPORTED_FEATURE = "Unsupported feature"
UNAUTHORIZED = "Failed while authenticating"
class SdnAPIActions(Enum):
- CHECK_CONNECTIVITY = "CHECK_CONNECTIVITY",
- CREATE_SERVICE = "CREATE_SERVICE",
- DELETE_SERVICE = "DELETE_SERVICE",
- CLEAR_ALL = "CLEAR_ALL",
- SERVICE_STATUS = "SERVICE_STATUS",
+ CHECK_CONNECTIVITY = "CHECK_CONNECTIVITY"
+ CREATE_SERVICE = "CREATE_SERVICE"
+ DELETE_SERVICE = "DELETE_SERVICE"
+ CLEAR_ALL = "CLEAR_ALL"
+ SERVICE_STATUS = "SERVICE_STATUS"
class DynpacConnector(SdnConnectorBase):
__supported_service_types = ["ELINE (L2)", "ELINE"]
__supported_encapsulation_types = ["dot1q"]
- __WIM_LOGGER = 'ro.sdn.dynpac'
+ __WIM_LOGGER = "ro.sdn.dynpac"
__ENCAPSULATION_TYPE_PARAM = "service_endpoint_encapsulation_type"
__ENCAPSULATION_INFO_PARAM = "service_endpoint_encapsulation_info"
__BACKUP_PARAM = "backup"
body = self.__get_body(service_type, connection_points, kwargs)
- headers = {'Content-type': 'application/x-www-form-urlencoded'}
+ headers = {"Content-type": "application/x-www-form-urlencoded"}
endpoint = "{}/service/create".format(self.__wim_url)
try:
description = "Description: {}.".format(error.get("description"))
exception = reason + description
self.__exception(exception, http_code=response.status_code)
+
uuid = response.content
self.logger.info("Service with uuid {} created.".format(uuid))
+
return (uuid, None)
- def edit_connectivity_service(self, service_uuid,
- conn_info, connection_points,
- **kwargs):
+ def edit_connectivity_service(
+ self, service_uuid, conn_info, connection_points, **kwargs
+ ):
self.__exception(SdnError.UNSUPPORTED_FEATURE, http_code=501)
def get_connectivity_service_status(self, service_uuid):
endpoint = "{}/service/status/{}".format(self.__wim_url, service_uuid)
+
try:
response = requests.get(endpoint)
except requests.exceptions.RequestException as e:
if response.status_code != 200:
self.__exception(SdnError.STATUS, http_code=response.status_code)
- self.logger.info("Status for service with uuid {}: {}"
- .format(service_uuid, response.content))
+
+ self.logger.info(
+ "Status for service with uuid {}: {}".format(service_uuid, response.content)
+ )
+
return response.content
def delete_connectivity_service(self, service_uuid, conn_info):
endpoint = "{}/service/delete/{}".format(self.__wim_url, service_uuid)
+
try:
response = requests.delete(endpoint)
except requests.exceptions.RequestException as e:
self.__exception(e.message, http_code=503)
+
if response.status_code != 200:
self.__exception(SdnError.DELETE, http_code=response.status_code)
def clear_all_connectivity_services(self):
endpoint = "{}/service/clearAll".format(self.__wim_url)
+
try:
response = requests.delete(endpoint)
http_code = response.status_code
except requests.exceptions.RequestException as e:
self.__exception(e.message, http_code=503)
+
if http_code != 200:
self.__exception(SdnError.CLEAR_ALL, http_code=http_code)
self.logger.info("{} services deleted".format(response.content))
+
return "{} services deleted".format(response.content)
def check_connectivity(self):
if http_code != 200:
self.__exception(SdnError.UNREACHABLE, http_code=http_code)
+
self.logger.info("Connectivity checked")
def check_credentials(self):
if http_code != 200:
self.__exception(SdnError.UNAUTHORIZED, http_code=http_code)
+
self.logger.info("Credentials checked")
# Private functions
def __exception(self, x, **kwargs):
http_code = kwargs.get("http_code")
+
if hasattr(x, "value"):
error = x.value
else:
error = x
+
self.logger.error(error)
+
raise SdnConnectorError(error, http_code=http_code)
def __check_service(self, service_type, connection_points, kwargs):
for connection_point in connection_points:
enc_type = connection_point.get(self.__ENCAPSULATION_TYPE_PARAM)
+
if enc_type not in self.__supported_encapsulation_types:
self.__exception(SdnError.ENCAPSULATION_TYPE, http_code=400)
# Commented out for as long as parameter isn't implemented
# bandwidth = kwargs.get(self.__BANDWIDTH_PARAM)
# if not isinstance(bandwidth, int):
- # self.__exception(SdnError.BANDWIDTH, http_code=400)
+ # self.__exception(SdnError.BANDWIDTH, http_code=400)
# Commented out for as long as parameter isn't implemented
# backup = kwargs.get(self.__BACKUP_PARAM)
# if not isinstance(backup, bool):
- # self.__exception(SdnError.BACKUP, http_code=400)
+ # self.__exception(SdnError.BACKUP, http_code=400)
def __get_body(self, service_type, connection_points, kwargs):
port_mapping = self.__config.get("service_endpoint_mapping")
selected_ports = []
+
for connection_point in connection_points:
endpoint_id = connection_point.get(self.__SERVICE_ENDPOINT_PARAM)
- port = filter(lambda x: x.get(self.__WAN_SERVICE_ENDPOINT_PARAM) == endpoint_id, port_mapping)[0]
+ port = filter(
+ lambda x: x.get(self.__WAN_SERVICE_ENDPOINT_PARAM) == endpoint_id,
+ port_mapping,
+ )[0]
port_info = port.get(self.__WAN_MAPPING_INFO_PARAM)
selected_ports.append(port_info)
+
if service_type == "ELINE (L2)" or service_type == "ELINE":
service_type = "L2"
+
body = {
- "connection_points": [{
- "wan_switch_dpid": selected_ports[0].get(self.__SW_ID_PARAM),
- "wan_switch_port": selected_ports[0].get(self.__SW_PORT_PARAM),
- "wan_vlan": connection_points[0].get(self.__ENCAPSULATION_INFO_PARAM).get(self.__VLAN_PARAM)
- }, {
- "wan_switch_dpid": selected_ports[1].get(self.__SW_ID_PARAM),
- "wan_switch_port": selected_ports[1].get(self.__SW_PORT_PARAM),
- "wan_vlan": connection_points[1].get(self.__ENCAPSULATION_INFO_PARAM).get(self.__VLAN_PARAM)
- }],
+ "connection_points": [
+ {
+ "wan_switch_dpid": selected_ports[0].get(self.__SW_ID_PARAM),
+ "wan_switch_port": selected_ports[0].get(self.__SW_PORT_PARAM),
+ "wan_vlan": connection_points[0]
+ .get(self.__ENCAPSULATION_INFO_PARAM)
+ .get(self.__VLAN_PARAM),
+ },
+ {
+ "wan_switch_dpid": selected_ports[1].get(self.__SW_ID_PARAM),
+ "wan_switch_port": selected_ports[1].get(self.__SW_PORT_PARAM),
+ "wan_vlan": connection_points[1]
+ .get(self.__ENCAPSULATION_INFO_PARAM)
+ .get(self.__VLAN_PARAM),
+ },
+ ],
"bandwidth": 100, # Hardcoded for as long as parameter isn't implemented
"service_type": service_type,
- "backup": False # Hardcoded for as long as parameter isn't implemented
+ "backup": False, # Hardcoded for as long as parameter isn't implemented
}
+
return "body={}".format(json.dumps(body))
setup(
name=_name,
- description='OSM ro sdn plugin for dynpac',
+ description="OSM ro sdn plugin for dynpac",
long_description=README,
- version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ version_command=(
+ "git describe --match v* --tags --long --dirty",
+ "pep440-git-full",
+ ),
# version=VERSION,
# python_requires='>3.5.0',
- author='ETSI OSM',
- author_email='OSM_TECH@LIST.ETSI.ORG',
- maintainer='ETSI OSM',
- maintainer_email='OSM_TECH@LIST.ETSI.ORG',
- url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
- license='Apache 2.0',
-
+ author="ETSI OSM",
+ author_email="OSM_TECH@LIST.ETSI.ORG",
+ maintainer="ETSI OSM",
+ maintainer_email="OSM_TECH@LIST.ETSI.ORG",
+ url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary",
+ license="Apache 2.0",
packages=[_name],
include_package_data=True,
install_requires=[
"requests",
- "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin"
+ "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin",
],
- setup_requires=['setuptools-version-command'],
+ setup_requires=["setuptools-version-command"],
entry_points={
- 'osm_rosdn.plugins': ['rosdn_dynpac = osm_rosdn_dynpac.wimconn_dynpac:DynpacConnector'],
+ "osm_rosdn.plugins": [
+ "rosdn_dynpac = osm_rosdn_dynpac.wimconn_dynpac:DynpacConnector"
+ ],
},
)
basepython = python3
deps = flake8
commands = flake8 osm_rosdn_dynpac --max-line-length 120 \
- --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241
[testenv:unittest]
basepython = python3
import json
import requests
import logging
-from osm_ro_plugin.openflow_conn import OpenflowConn, OpenflowConnUnexpectedResponse, OpenflowConnConnectionException
+from osm_ro_plugin.openflow_conn import (
+ OpenflowConn,
+ OpenflowConnUnexpectedResponse,
+ OpenflowConnConnectionException,
+)
class OfConnFloodLight(OpenflowConn):
"""
# check params
url = params.get("of_url")
+
if not url:
raise ValueError("'url' must be provided")
+
if not url.startswith("http"):
url = "http://" + url
+
if not url.endswith("/"):
url = url + "/"
+
self.url = url
OpenflowConn.__init__(self, params)
self.pp2ofi = {} # From Physical Port to OpenFlow Index
self.ofi2pp = {} # From OpenFlow Index to Physical Port
- self.headers = {'content-type': 'application/json', 'Accept': 'application/json'}
+ self.headers = {
+ "content-type": "application/json",
+ "Accept": "application/json",
+ }
self.version = None
- self.logger = logging.getLogger('ro.sdn.floodlightof')
+ self.logger = logging.getLogger("ro.sdn.floodlightof")
self.logger.setLevel(params.get("of_debug", "ERROR"))
self._set_version(params.get("of_version"))
parameter is missing or wrong
"""
try:
- of_response = requests.get(self.url + "wm/core/controller/switches/json", headers=self.headers)
- error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
+ of_response = requests.get(
+ self.url + "wm/core/controller/switches/json", headers=self.headers
+ )
+ error_text = "Openflow response {}: {}".format(
+ of_response.status_code, of_response.text
+ )
+
if of_response.status_code != 200:
self.logger.warning("get_of_switches " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
+
self.logger.debug("get_of_switches " + error_text)
info = of_response.json()
+
if not isinstance(info, (list, tuple)):
- self.logger.error("get_of_switches. Unexpected response not a list %s", str(type(info)))
- raise OpenflowConnUnexpectedResponse("Unexpected response, not a list. Wrong version?")
+ self.logger.error(
+ "get_of_switches. Unexpected response not a list %s",
+ str(type(info)),
+ )
+
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response, not a list. Wrong version?"
+ )
+
if len(info) == 0:
return info
+
# autodiscover version
if self.version is None:
- if 'dpid' in info[0] and 'inetAddress' in info[0]:
+ if "dpid" in info[0] and "inetAddress" in info[0]:
self._set_version("0.9")
# elif 'switchDPID' in info[0] and 'inetAddress' in info[0]:
# self._set_version("1.X")
else:
- self.logger.error("get_of_switches. Unexpected response, not found 'dpid' or 'switchDPID' "
- "field: %s", str(info[0]))
- raise OpenflowConnUnexpectedResponse("Unexpected response, not found 'dpid' or "
- "'switchDPID' field. Wrong version?")
+ self.logger.error(
+ "get_of_switches. Unexpected response, not found 'dpid' or 'switchDPID' "
+ "field: %s",
+ str(info[0]),
+ )
+
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response, not found 'dpid' or "
+ "'switchDPID' field. Wrong version?"
+ )
switch_list = []
for switch in info:
- switch_list.append((switch[self.ver_names["dpid"]], switch['inetAddress']))
+ switch_list.append(
+ (switch[self.ver_names["dpid"]], switch["inetAddress"])
+ )
+
return switch_list
except requests.exceptions.RequestException as e:
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("get_of_switches " + error_text)
+
raise OpenflowConnConnectionException(error_text)
except Exception as e:
# ValueError in the case that JSON can not be decoded
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("get_of_switches " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
def get_of_rules(self, translate_of_ports=True):
switch: DPID, all
Raise an openflowconnUnexpectedResponse exception if fails with text_error
"""
-
try:
# get translation, autodiscover version
+
if len(self.ofi2pp) == 0:
self.obtain_port_correspondence()
- of_response = requests.get(self.url + "wm/{}/list/{}/json".format(self.ver_names["URLmodifier"], self.dpid),
- headers=self.headers)
- error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
+ of_response = requests.get(
+ self.url
+ + "wm/{}/list/{}/json".format(self.ver_names["URLmodifier"], self.dpid),
+ headers=self.headers,
+ )
+ error_text = "Openflow response {}: {}".format(
+ of_response.status_code, of_response.text
+ )
+
if of_response.status_code != 200:
self.logger.warning("get_of_rules " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
+
self.logger.debug("get_of_rules " + error_text)
info = of_response.json()
+
if type(info) != dict:
- self.logger.error("get_of_rules. Unexpected response not a dict %s", str(type(info)))
- raise OpenflowConnUnexpectedResponse("Unexpected response, not a dict. Wrong version?")
+ self.logger.error(
+ "get_of_rules. Unexpected response not a dict %s", str(type(info))
+ )
+
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response, not a dict. Wrong version?"
+ )
+
rule_list = []
for switch, switch_info in info.items():
if switch_info is None:
continue
+
if str(switch) != self.dpid:
continue
+
for name, details in switch_info.items():
- rule = {
- "name": name,
- "switch": str(switch)
- }
+ rule = {"name": name, "switch": str(switch)}
# rule["active"] = "true"
rule["priority"] = int(details["priority"])
+
if self.version[0] == "0":
if translate_of_ports:
- rule["ingress_port"] = self.ofi2pp[details["match"]["inputPort"]]
+ rule["ingress_port"] = self.ofi2pp[
+ details["match"]["inputPort"]
+ ]
else:
rule["ingress_port"] = str(details["match"]["inputPort"])
+
dst_mac = details["match"]["dataLayerDestination"]
+
if dst_mac != "00:00:00:00:00:00":
rule["dst_mac"] = dst_mac
+
vlan = details["match"]["dataLayerVirtualLan"]
+
if vlan != -1:
rule["vlan_id"] = vlan
+
actionlist = []
+
for action in details["actions"]:
if action["type"] == "OUTPUT":
if translate_of_ports:
elif action["type"] == "STRIP_VLAN":
actionlist.append(("vlan", None))
elif action["type"] == "SET_VLAN_ID":
- actionlist.append(("vlan", action["virtualLanIdentifier"]))
+ actionlist.append(
+ ("vlan", action["virtualLanIdentifier"])
+ )
else:
actionlist.append((action["type"], str(action)))
- self.logger.warning("get_of_rules() Unknown action in rule %s: %s", rule["name"],
- str(action))
+ self.logger.warning(
+ "get_of_rules() Unknown action in rule %s: %s",
+ rule["name"],
+ str(action),
+ )
+
rule["actions"] = actionlist
elif self.version[0] == "1":
if translate_of_ports:
- rule["ingress_port"] = self.ofi2pp[details["match"]["in_port"]]
+ rule["ingress_port"] = self.ofi2pp[
+ details["match"]["in_port"]
+ ]
else:
rule["ingress_port"] = details["match"]["in_port"]
+
if "eth_dst" in details["match"]:
dst_mac = details["match"]["eth_dst"]
if dst_mac != "00:00:00:00:00:00":
rule["dst_mac"] = dst_mac
+
if "eth_vlan_vid" in details["match"]:
vlan = int(details["match"]["eth_vlan_vid"], 16) & 0xFFF
rule["vlan_id"] = str(vlan)
+
actionlist = []
- for action in details["instructions"]["instruction_apply_actions"]:
+ for action in details["instructions"][
+ "instruction_apply_actions"
+ ]:
if action == "output":
if translate_of_ports:
- port = self.ofi2pp[details["instructions"]["instruction_apply_actions"]["output"]]
+ port = self.ofi2pp[
+ details["instructions"][
+ "instruction_apply_actions"
+ ]["output"]
+ ]
else:
- port = details["instructions"]["instruction_apply_actions"]["output"]
+ port = details["instructions"][
+ "instruction_apply_actions"
+ ]["output"]
actionlist.append(("out", port))
elif action == "strip_vlan":
actionlist.append(("vlan", None))
elif action == "set_vlan_vid":
actionlist.append(
- ("vlan", details["instructions"]["instruction_apply_actions"]["set_vlan_vid"]))
+ (
+ "vlan",
+ details["instructions"][
+ "instruction_apply_actions"
+ ]["set_vlan_vid"],
+ )
+ )
else:
- self.logger.error("get_of_rules Unknown action in rule %s: %s", rule["name"],
- str(action))
+ self.logger.error(
+ "get_of_rules Unknown action in rule %s: %s",
+ rule["name"],
+ str(action),
+ )
# actionlist.append((action, str(details["instructions"]["instruction_apply_actions"])))
+
rule_list.append(rule)
return rule_list
except requests.exceptions.RequestException as e:
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("get_of_rules " + error_text)
+
raise OpenflowConnConnectionException(error_text)
except Exception as e:
# ValueError in the case that JSON can not be decoded
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("get_of_rules " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
def obtain_port_correspondence(self):
Raise an openflowconnUnexpectedResponse exception if fails with text_error
"""
try:
- of_response = requests.get(self.url + "wm/core/controller/switches/json", headers=self.headers)
+ of_response = requests.get(
+ self.url + "wm/core/controller/switches/json", headers=self.headers
+ )
# print vim_response.status_code
- error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
+ error_text = "Openflow response {}: {}".format(
+ of_response.status_code, of_response.text
+ )
+
if of_response.status_code != 200:
self.logger.warning("obtain_port_correspondence " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
+
self.logger.debug("obtain_port_correspondence " + error_text)
info = of_response.json()
if not isinstance(info, (list, tuple)):
- raise OpenflowConnUnexpectedResponse("unexpected openflow response, not a list. Wrong version?")
+ raise OpenflowConnUnexpectedResponse(
+ "unexpected openflow response, not a list. Wrong version?"
+ )
index = -1
if len(info) > 0:
# autodiscover version
if self.version is None:
- if 'dpid' in info[0] and 'ports' in info[0]:
+ if "dpid" in info[0] and "ports" in info[0]:
self._set_version("0.9")
- elif 'switchDPID' in info[0]:
+ elif "switchDPID" in info[0]:
self._set_version("1.X")
else:
- raise OpenflowConnUnexpectedResponse("unexpected openflow response, Wrong version?")
+ raise OpenflowConnUnexpectedResponse(
+ "unexpected openflow response, Wrong version?"
+ )
for i, info_item in enumerate(info):
if info_item[self.ver_names["dpid"]] == self.dpid:
index = i
break
+
if index == -1:
- text = "DPID '{}' not present in controller {}".format(self.dpid, self.url)
+ text = "DPID '{}' not present in controller {}".format(
+ self.dpid, self.url
+ )
# print self.name, ": get_of_controller_info ERROR", text
+
raise OpenflowConnUnexpectedResponse(text)
else:
if self.version[0] == "0":
ports = info[index]["ports"]
else: # version 1.X
- of_response = requests.get(self.url + "wm/core/switch/{}/port-desc/json".format(self.dpid),
- headers=self.headers)
+ of_response = requests.get(
+ self.url + "wm/core/switch/{}/port-desc/json".format(self.dpid),
+ headers=self.headers,
+ )
# print vim_response.status_code
- error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
+ error_text = "Openflow response {}: {}".format(
+ of_response.status_code, of_response.text
+ )
+
if of_response.status_code != 200:
self.logger.warning("obtain_port_correspondence " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
+
self.logger.debug("obtain_port_correspondence " + error_text)
info = of_response.json()
+
if type(info) != dict:
- raise OpenflowConnUnexpectedResponse("unexpected openflow port-desc response, "
- "not a dict. Wrong version?")
+ raise OpenflowConnUnexpectedResponse(
+ "unexpected openflow port-desc response, "
+ "not a dict. Wrong version?"
+ )
+
if "portDesc" not in info:
- raise OpenflowConnUnexpectedResponse("unexpected openflow port-desc response, "
- "'portDesc' not found. Wrong version?")
- if type(info["portDesc"]) != list and type(info["portDesc"]) != tuple:
- raise OpenflowConnUnexpectedResponse("unexpected openflow port-desc response at "
- "'portDesc', not a list. Wrong version?")
+ raise OpenflowConnUnexpectedResponse(
+ "unexpected openflow port-desc response, "
+ "'portDesc' not found. Wrong version?"
+ )
+
+ if (
+ type(info["portDesc"]) != list
+ and type(info["portDesc"]) != tuple
+ ):
+ raise OpenflowConnUnexpectedResponse(
+ "unexpected openflow port-desc response at "
+ "'portDesc', not a list. Wrong version?"
+ )
+
ports = info["portDesc"]
+
for port in ports:
self.pp2ofi[str(port["name"])] = str(port["portNumber"])
self.ofi2pp[port["portNumber"]] = str(port["name"])
# print self.name, ": get_of_controller_info ports:", self.pp2ofi
+
return self.pp2ofi
except requests.exceptions.RequestException as e:
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("obtain_port_correspondence " + error_text)
+
raise OpenflowConnConnectionException(error_text)
except Exception as e:
# ValueError in the case that JSON can not be decoded
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("obtain_port_correspondence " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
def del_flow(self, flow_name):
if self.version is None:
self.get_of_switches()
- of_response = requests.delete(self.url + "wm/{}/json".format(self.ver_names["URLmodifier"]),
- headers=self.headers,
- data='{{"switch":"{}","name":"{}"}}'.format(self.dpid, flow_name))
- error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
+ of_response = requests.delete(
+ self.url + "wm/{}/json".format(self.ver_names["URLmodifier"]),
+ headers=self.headers,
+ data='{{"switch":"{}","name":"{}"}}'.format(self.dpid, flow_name),
+ )
+ error_text = "Openflow response {}: {}".format(
+ of_response.status_code, of_response.text
+ )
+
if of_response.status_code != 200:
self.logger.warning("del_flow " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
+
self.logger.debug("del_flow OK " + error_text)
+
return None
except requests.exceptions.RequestException as e:
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("del_flow " + error_text)
+
raise OpenflowConnConnectionException(error_text)
except Exception as e:
# ValueError in the case that JSON can not be decoded
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("del_flow " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
def new_flow(self, data):
try:
# We have to build the data for the floodlight call from the generic data
- sdata = {'active': "true", "name": data["name"]}
+ sdata = {"active": "true", "name": data["name"]}
+
if data.get("priority"):
sdata["priority"] = str(data["priority"])
+
if data.get("vlan_id"):
sdata[self.ver_names["vlanid"]] = data["vlan_id"]
+
if data.get("dst_mac"):
sdata[self.ver_names["destmac"]] = data["dst_mac"]
- sdata['switch'] = self.dpid
- if not data['ingress_port'] in self.pp2ofi:
- error_text = 'Error. Port {} is not present in the switch'.format(data['ingress_port'])
+
+ sdata["switch"] = self.dpid
+ if not data["ingress_port"] in self.pp2ofi:
+ error_text = "Error. Port {} is not present in the switch".format(
+ data["ingress_port"]
+ )
self.logger.warning("new_flow " + error_text)
raise OpenflowConnUnexpectedResponse(error_text)
- sdata[self.ver_names["inport"]] = self.pp2ofi[data['ingress_port']]
- sdata['actions'] = ""
+ sdata[self.ver_names["inport"]] = self.pp2ofi[data["ingress_port"]]
+ sdata["actions"] = ""
+
+ for action in data["actions"]:
+ if len(sdata["actions"]) > 0:
+ sdata["actions"] += ","
- for action in data['actions']:
- if len(sdata['actions']) > 0:
- sdata['actions'] += ','
if action[0] == "vlan":
if action[1] is None:
- sdata['actions'] += self.ver_names["stripvlan"]
+ sdata["actions"] += self.ver_names["stripvlan"]
else:
- sdata['actions'] += self.ver_names["setvlan"] + "=" + str(action[1])
- elif action[0] == 'out':
- sdata['actions'] += "output=" + self.pp2ofi[action[1]]
+ sdata["actions"] += (
+ self.ver_names["setvlan"] + "=" + str(action[1])
+ )
+ elif action[0] == "out":
+ sdata["actions"] += "output=" + self.pp2ofi[action[1]]
+
+ of_response = requests.post(
+ self.url + "wm/{}/json".format(self.ver_names["URLmodifier"]),
+ headers=self.headers,
+ data=json.dumps(sdata),
+ )
+ error_text = "Openflow response {}: {}".format(
+ of_response.status_code, of_response.text
+ )
- of_response = requests.post(self.url + "wm/{}/json".format(self.ver_names["URLmodifier"]),
- headers=self.headers, data=json.dumps(sdata))
- error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
if of_response.status_code != 200:
self.logger.warning("new_flow " + error_text)
raise OpenflowConnUnexpectedResponse(error_text)
+
self.logger.debug("new_flow OK" + error_text)
+
return None
except requests.exceptions.RequestException as e:
if len(sw_list) == 0: # empty
return None
- url = self.url + "wm/{}/clear/{}/json".format(self.ver_names["URLmodifier"], self.dpid)
+ url = self.url + "wm/{}/clear/{}/json".format(
+ self.ver_names["URLmodifier"], self.dpid
+ )
of_response = requests.get(url)
- error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
+ error_text = "Openflow response {}: {}".format(
+ of_response.status_code, of_response.text
+ )
+
if of_response.status_code < 200 or of_response.status_code >= 300:
self.logger.warning("clear_all_flows " + error_text)
raise OpenflowConnUnexpectedResponse(error_text)
+
self.logger.debug("clear_all_flows OK " + error_text)
+
return None
except requests.exceptions.RequestException as e:
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("clear_all_flows " + error_text)
+
raise OpenflowConnConnectionException(error_text)
except Exception as e:
# ValueError in the case that JSON can not be decoded
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("clear_all_flows " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
class SdnConnectorFloodLightOf(SdnConnectorOpenFlow):
-
def __init__(self, wim, wim_account, config=None, logger=None):
- """Creates a connectivity based on pro-active openflow rules
- """
- self.logger = logging.getLogger('ro.sdn.floodlightof')
+ """Creates a connectivity based on pro-active openflow rules"""
+ self.logger = logging.getLogger("ro.sdn.floodlightof")
super().__init__(wim, wim_account, config, logger)
of_params = {
"of_url": wim["wim_url"],
setup(
name=_name,
- description='OSM RO plugin for SDN with floodlight openflow rules',
+ description="OSM RO plugin for SDN with floodlight openflow rules",
long_description=README,
- version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ version_command=(
+ "git describe --match v* --tags --long --dirty",
+ "pep440-git-full",
+ ),
# version=VERSION,
# python_requires='>3.5.0',
- author='ETSI OSM',
- author_email='alfonso.tiernosepulveda@telefonica.com',
- maintainer='Alfonso Tierno',
- maintainer_email='alfonso.tiernosepulveda@telefonica.com',
- url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
- license='Apache 2.0',
-
+ author="ETSI OSM",
+ author_email="alfonso.tiernosepulveda@telefonica.com",
+ maintainer="Alfonso Tierno",
+ maintainer_email="alfonso.tiernosepulveda@telefonica.com",
+ url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary",
+ license="Apache 2.0",
packages=[_name],
include_package_data=True,
install_requires=[
"requests",
- "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin"
+ "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin",
],
- setup_requires=['setuptools-version-command'],
+ setup_requires=["setuptools-version-command"],
entry_points={
- 'osm_rosdn.plugins': ['rosdn_floodlightof = osm_rosdn_floodlightof.sdnconn_floodlightof:'
- 'SdnConnectorFloodLightOf'],
+ "osm_rosdn.plugins": [
+ "rosdn_floodlightof = osm_rosdn_floodlightof.sdnconn_floodlightof:SdnConnectorFloodLightOf"
+ ],
},
)
basepython = python3
deps = flake8
commands = flake8 osm_rosdn_floodlightof --max-line-length 120 \
- --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241
[testenv:unittest]
basepython = python3
import uuid
import logging
from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError
-"""CHeck layer where we move it"""
+"""Check layer where we move it"""
-class WimconnectorIETFL2VPN(SdnConnectorBase):
+class WimconnectorIETFL2VPN(SdnConnectorBase):
def __init__(self, wim, wim_account, config=None, logger=None):
- """IETF L2VPM WIM connector
+ """IETF L2VPN WIM connector
Arguments: (To be completed)
wim (dict): WIM record, as stored in the database
wim_account (dict): WIM account record, as stored in the database
"""
- self.logger = logging.getLogger('ro.sdn.ietfl2vpn')
+ self.logger = logging.getLogger("ro.sdn.ietfl2vpn")
super().__init__(wim, wim_account, config, logger)
- self.headers = {'Content-Type': 'application/json'}
- self.mappings = {m['service_endpoint_id']: m
- for m in self.service_endpoint_mapping}
+ self.headers = {"Content-Type": "application/json"}
+ self.mappings = {
+ m["service_endpoint_id"]: m for m in self.service_endpoint_mapping
+ }
self.user = wim_account.get("user")
self.passwd = wim_account.get("passwordd")
+
if self.user and self.passwd is not None:
self.auth = (self.user, self.passwd)
else:
self.auth = None
+
self.logger.info("IETFL2VPN Connector Initialized.")
def check_credentials(self):
- endpoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
+ endpoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(
+ self.wim["wim_url"]
+ )
+
try:
- response = requests.get(endpoint, auth=self.auth)
+ response = requests.get(endpoint, auth=self.auth)
http_code = response.status_code
except requests.exceptions.RequestException as e:
raise SdnConnectorError(e.message, http_code=503)
if http_code != 200:
raise SdnConnectorError("Failed while authenticating", http_code=http_code)
+
self.logger.info("Credentials checked")
def get_connectivity_service_status(self, service_uuid, conn_info=None):
try:
self.logger.info("Sending get connectivity service stuatus")
servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(
- self.wim["wim_url"], service_uuid)
+ self.wim["wim_url"], service_uuid
+ )
response = requests.get(servicepoint, auth=self.auth)
+
if response.status_code != requests.codes.ok:
- raise SdnConnectorError("Unable to obtain connectivity servcice status", http_code=response.status_code)
- service_status = {'sdn_status': 'ACTIVE'}
+ raise SdnConnectorError(
+ "Unable to obtain connectivity servcice status",
+ http_code=response.status_code,
+ )
+
+ service_status = {"sdn_status": "ACTIVE"}
+
return service_status
except requests.exceptions.ConnectionError:
raise SdnConnectorError("Request Timeout", http_code=408)
-
+
def search_mapp(self, connection_point):
- id = connection_point['service_endpoint_id']
- if id not in self.mappings:
+ id = connection_point["service_endpoint_id"]
+ if id not in self.mappings:
raise SdnConnectorError("Endpoint {} not located".format(str(id)))
else:
return self.mappings[id]
"""
if service_type == "ELINE":
if len(connection_points) > 2:
- raise SdnConnectorError('Connections between more than 2 endpoints are not supported')
+ raise SdnConnectorError(
+ "Connections between more than 2 endpoints are not supported"
+ )
+
if len(connection_points) < 2:
- raise SdnConnectorError('Connections must be of at least 2 endpoints')
- """ First step, create the vpn service """
+ raise SdnConnectorError("Connections must be of at least 2 endpoints")
+
+ """ First step, create the vpn service """
uuid_l2vpn = str(uuid.uuid4())
vpn_service = {}
vpn_service["vpn-id"] = uuid_l2vpn
response_service_creation = None
conn_info = []
self.logger.info("Sending vpn-service :{}".format(vpn_service_l))
+
try:
- endpoint_service_creation = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(
- self.wim["wim_url"])
- response_service_creation = requests.post(endpoint_service_creation, headers=self.headers,
- json=vpn_service_l, auth=self.auth)
+ endpoint_service_creation = (
+ "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(
+ self.wim["wim_url"]
+ )
+ )
+ response_service_creation = requests.post(
+ endpoint_service_creation,
+ headers=self.headers,
+ json=vpn_service_l,
+ auth=self.auth,
+ )
except requests.exceptions.ConnectionError:
- raise SdnConnectorError("Request to create service Timeout", http_code=408)
+ raise SdnConnectorError(
+ "Request to create service Timeout", http_code=408
+ )
+
if response_service_creation.status_code == 409:
- raise SdnConnectorError("Service already exists", http_code=response_service_creation.status_code)
+ raise SdnConnectorError(
+ "Service already exists",
+ http_code=response_service_creation.status_code,
+ )
elif response_service_creation.status_code != requests.codes.created:
- raise SdnConnectorError("Request to create service not accepted",
- http_code=response_service_creation.status_code)
- """ Second step, create the connections and vpn attachments """
+ raise SdnConnectorError(
+ "Request to create service not accepted",
+ http_code=response_service_creation.status_code,
+ )
+
+ """ Second step, create the connections and vpn attachments """
for connection_point in connection_points:
connection_point_wan_info = self.search_mapp(connection_point)
site_network_access = {}
connection = {}
+
if connection_point["service_endpoint_encapsulation_type"] != "none":
- if connection_point["service_endpoint_encapsulation_type"] == "dot1q":
+ if (
+ connection_point["service_endpoint_encapsulation_type"]
+ == "dot1q"
+ ):
""" The connection is a VLAN """
connection["encapsulation-type"] = "dot1q-vlan-tagged"
tagged = {}
tagged_interf = {}
- service_endpoint_encapsulation_info = connection_point["service_endpoint_encapsulation_info"]
+ service_endpoint_encapsulation_info = connection_point[
+ "service_endpoint_encapsulation_info"
+ ]
+
if service_endpoint_encapsulation_info["vlan"] is None:
raise SdnConnectorError("VLAN must be provided")
- tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info["vlan"]
+
+ tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info[
+ "vlan"
+ ]
tagged["dot1q-vlan-tagged"] = tagged_interf
connection["tagged-interface"] = tagged
else:
raise NotImplementedError("Encapsulation type not implemented")
+
site_network_access["connection"] = connection
self.logger.info("Sending connection:{}".format(connection))
vpn_attach = {}
vpn_attach["vpn-id"] = uuid_l2vpn
- vpn_attach["site-role"] = vpn_service["svc-topo"]+"-role"
+ vpn_attach["site-role"] = vpn_service["svc-topo"] + "-role"
site_network_access["vpn-attachment"] = vpn_attach
self.logger.info("Sending vpn-attachement :{}".format(vpn_attach))
uuid_sna = str(uuid.uuid4())
site_network_access["network-access-id"] = uuid_sna
- site_network_access["bearer"] = connection_point_wan_info["service_mapping_info"]["bearer"]
+ site_network_access["bearer"] = connection_point_wan_info[
+ "service_mapping_info"
+ ]["bearer"]
site_network_accesses = {}
site_network_access_list = []
site_network_access_list.append(site_network_access)
- site_network_accesses["ietf-l2vpn-svc:site-network-access"] = site_network_access_list
+ site_network_accesses[
+ "ietf-l2vpn-svc:site-network-access"
+ ] = site_network_access_list
conn_info_d = {}
- conn_info_d["site"] = connection_point_wan_info["service_mapping_info"]["site-id"]
- conn_info_d["site-network-access-id"] = site_network_access["network-access-id"]
+ conn_info_d["site"] = connection_point_wan_info["service_mapping_info"][
+ "site-id"
+ ]
+ conn_info_d["site-network-access-id"] = site_network_access[
+ "network-access-id"
+ ]
conn_info_d["mapping"] = None
conn_info.append(conn_info_d)
+
try:
- endpoint_site_network_access_creation = \
- "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/sites/site={}/site-network-accesses/".format(
- self.wim["wim_url"], connection_point_wan_info["service_mapping_info"]["site-id"])
+ endpoint_site_network_access_creation = (
+ "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/"
+ "sites/site={}/site-network-accesses/".format(
+ self.wim["wim_url"],
+ connection_point_wan_info["service_mapping_info"][
+ "site-id"
+ ],
+ )
+ )
response_endpoint_site_network_access_creation = requests.post(
endpoint_site_network_access_creation,
headers=self.headers,
json=site_network_accesses,
- auth=self.auth)
-
- if response_endpoint_site_network_access_creation.status_code == 409:
+ auth=self.auth,
+ )
+
+ if (
+ response_endpoint_site_network_access_creation.status_code
+ == 409
+ ):
self.delete_connectivity_service(vpn_service["vpn-id"])
- raise SdnConnectorError("Site_Network_Access with ID '{}' already exists".format(
- site_network_access["network-access-id"]),
- http_code=response_endpoint_site_network_access_creation.status_code)
-
- elif response_endpoint_site_network_access_creation.status_code == 400:
+
+ raise SdnConnectorError(
+ "Site_Network_Access with ID '{}' already exists".format(
+ site_network_access["network-access-id"]
+ ),
+ http_code=response_endpoint_site_network_access_creation.status_code,
+ )
+ elif (
+ response_endpoint_site_network_access_creation.status_code
+ == 400
+ ):
self.delete_connectivity_service(vpn_service["vpn-id"])
- raise SdnConnectorError("Site {} does not exist".format(
- connection_point_wan_info["service_mapping_info"]["site-id"]),
- http_code=response_endpoint_site_network_access_creation.status_code)
-
- elif response_endpoint_site_network_access_creation.status_code != requests.codes.created and \
- response_endpoint_site_network_access_creation.status_code != requests.codes.no_content:
+
+ raise SdnConnectorError(
+ "Site {} does not exist".format(
+ connection_point_wan_info["service_mapping_info"][
+ "site-id"
+ ]
+ ),
+ http_code=response_endpoint_site_network_access_creation.status_code,
+ )
+ elif (
+ response_endpoint_site_network_access_creation.status_code
+ != requests.codes.created
+ and response_endpoint_site_network_access_creation.status_code
+ != requests.codes.no_content
+ ):
self.delete_connectivity_service(vpn_service["vpn-id"])
- raise SdnConnectorError("Request no accepted",
- http_code=response_endpoint_site_network_access_creation.status_code)
-
+
+ raise SdnConnectorError(
+ "Request no accepted",
+ http_code=response_endpoint_site_network_access_creation.status_code,
+ )
except requests.exceptions.ConnectionError:
self.delete_connectivity_service(vpn_service["vpn-id"])
+
raise SdnConnectorError("Request Timeout", http_code=408)
+
return uuid_l2vpn, conn_info
-
else:
raise NotImplementedError
try:
self.logger.info("Sending delete")
servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(
- self.wim["wim_url"], service_uuid)
+ self.wim["wim_url"], service_uuid
+ )
response = requests.delete(servicepoint, auth=self.auth)
+
if response.status_code != requests.codes.no_content:
- raise SdnConnectorError("Error in the request", http_code=response.status_code)
+ raise SdnConnectorError(
+ "Error in the request", http_code=response.status_code
+ )
except requests.exceptions.ConnectionError:
raise SdnConnectorError("Request Timeout", http_code=408)
- def edit_connectivity_service(self, service_uuid, conn_info=None,
- connection_points=None, **kwargs):
+ def edit_connectivity_service(
+ self, service_uuid, conn_info=None, connection_points=None, **kwargs
+ ):
"""Change an existing connectivity service, see
``create_connectivity_service``"""
-
# sites = {"sites": {}}
# site_list = []
vpn_service = {}
vpn_service["svc-topo"] = "any-to-any"
counter = 0
+
for connection_point in connection_points:
site_network_access = {}
connection_point_wan_info = self.search_mapp(connection_point)
params_site = {}
- params_site["site-id"] = connection_point_wan_info["service_mapping_info"]["site-id"]
+ params_site["site-id"] = connection_point_wan_info["service_mapping_info"][
+ "site-id"
+ ]
params_site["site-vpn-flavor"] = "site-vpn-flavor-single"
device_site = {}
device_site["device-id"] = connection_point_wan_info["device-id"]
params_site["devices"] = device_site
# network_access = {}
connection = {}
+
if connection_point["service_endpoint_encapsulation_type"] != "none":
if connection_point["service_endpoint_encapsulation_type"] == "dot1q":
""" The connection is a VLAN """
connection["encapsulation-type"] = "dot1q-vlan-tagged"
tagged = {}
tagged_interf = {}
- service_endpoint_encapsulation_info = connection_point["service_endpoint_encapsulation_info"]
+ service_endpoint_encapsulation_info = connection_point[
+ "service_endpoint_encapsulation_info"
+ ]
+
if service_endpoint_encapsulation_info["vlan"] is None:
raise SdnConnectorError("VLAN must be provided")
- tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info["vlan"]
+
+ tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info[
+ "vlan"
+ ]
tagged["dot1q-vlan-tagged"] = tagged_interf
connection["tagged-interface"] = tagged
else:
raise NotImplementedError("Encapsulation type not implemented")
+
site_network_access["connection"] = connection
vpn_attach = {}
vpn_attach["vpn-id"] = service_uuid
- vpn_attach["site-role"] = vpn_service["svc-topo"]+"-role"
+ vpn_attach["site-role"] = vpn_service["svc-topo"] + "-role"
site_network_access["vpn-attachment"] = vpn_attach
uuid_sna = conn_info[counter]["site-network-access-id"]
site_network_access["network-access-id"] = uuid_sna
- site_network_access["bearer"] = connection_point_wan_info["service_mapping_info"]["bearer"]
+ site_network_access["bearer"] = connection_point_wan_info[
+ "service_mapping_info"
+ ]["bearer"]
site_network_accesses = {}
site_network_access_list = []
site_network_access_list.append(site_network_access)
- site_network_accesses["ietf-l2vpn-svc:site-network-access"] = site_network_access_list
+ site_network_accesses[
+ "ietf-l2vpn-svc:site-network-access"
+ ] = site_network_access_list
+
try:
- endpoint_site_network_access_edit = \
- "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/sites/site={}/site-network-accesses/".format(
- self.wim["wim_url"], connection_point_wan_info["service_mapping_info"]["site-id"])
- response_endpoint_site_network_access_creation = requests.put(endpoint_site_network_access_edit,
- headers=self.headers,
- json=site_network_accesses,
- auth=self.auth)
+ endpoint_site_network_access_edit = (
+ "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/"
+ "sites/site={}/site-network-accesses/".format(
+ self.wim["wim_url"],
+ connection_point_wan_info["service_mapping_info"]["site-id"],
+ )
+ )
+ response_endpoint_site_network_access_creation = requests.put(
+ endpoint_site_network_access_edit,
+ headers=self.headers,
+ json=site_network_accesses,
+ auth=self.auth,
+ )
+
if response_endpoint_site_network_access_creation.status_code == 400:
- raise SdnConnectorError("Service does not exist",
- http_code=response_endpoint_site_network_access_creation.status_code)
- elif response_endpoint_site_network_access_creation.status_code != 201 and \
- response_endpoint_site_network_access_creation.status_code != 204:
- raise SdnConnectorError("Request no accepted",
- http_code=response_endpoint_site_network_access_creation.status_code)
+ raise SdnConnectorError(
+ "Service does not exist",
+ http_code=response_endpoint_site_network_access_creation.status_code,
+ )
+ elif (
+ response_endpoint_site_network_access_creation.status_code != 201
+ and response_endpoint_site_network_access_creation.status_code
+ != 204
+ ):
+ raise SdnConnectorError(
+ "Request no accepted",
+ http_code=response_endpoint_site_network_access_creation.status_code,
+ )
except requests.exceptions.ConnectionError:
raise SdnConnectorError("Request Timeout", http_code=408)
+
counter += 1
+
return None
def clear_all_connectivity_services(self):
"""Delete all WAN Links corresponding to a WIM"""
try:
self.logger.info("Sending clear all connectivity services")
- servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
+ servicepoint = (
+ "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(
+ self.wim["wim_url"]
+ )
+ )
response = requests.delete(servicepoint, auth=self.auth)
+
if response.status_code != requests.codes.no_content:
- raise SdnConnectorError("Unable to clear all connectivity services", http_code=response.status_code)
+ raise SdnConnectorError(
+ "Unable to clear all connectivity services",
+ http_code=response.status_code,
+ )
except requests.exceptions.ConnectionError:
raise SdnConnectorError("Request Timeout", http_code=408)
"""
try:
self.logger.info("Sending get all connectivity services")
- servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
+ servicepoint = (
+ "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(
+ self.wim["wim_url"]
+ )
+ )
response = requests.get(servicepoint, auth=self.auth)
+
if response.status_code != requests.codes.ok:
- raise SdnConnectorError("Unable to get all connectivity services", http_code=response.status_code)
+ raise SdnConnectorError(
+ "Unable to get all connectivity services",
+ http_code=response.status_code,
+ )
+
return response
except requests.exceptions.ConnectionError:
raise SdnConnectorError("Request Timeout", http_code=408)
setup(
name=_name,
- description='OSM ro sdn plugin for ietfl2vpn',
+ description="OSM ro sdn plugin for ietfl2vpn",
long_description=README,
- version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ version_command=(
+ "git describe --match v* --tags --long --dirty",
+ "pep440-git-full",
+ ),
# version=VERSION,
# python_requires='>3.5.0',
- author='ETSI OSM',
- author_email='OSM_TECH@LIST.ETSI.ORG',
- maintainer='ETSI OSM',
- maintainer_email='OSM_TECH@LIST.ETSI.ORG',
- url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
- license='Apache 2.0',
-
+ author="ETSI OSM",
+ author_email="OSM_TECH@LIST.ETSI.ORG",
+ maintainer="ETSI OSM",
+ maintainer_email="OSM_TECH@LIST.ETSI.ORG",
+ url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary",
+ license="Apache 2.0",
packages=[_name],
include_package_data=True,
install_requires=[
"requests",
- "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin"
+ "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin",
],
- setup_requires=['setuptools-version-command'],
+ setup_requires=["setuptools-version-command"],
entry_points={
- 'osm_rosdn.plugins': ['rosdn_ietfl2vpn = osm_rosdn_ietfl2vpn.wimconn_ietfl2vpn:WimconnectorIETFL2VPN'],
+ "osm_rosdn.plugins": [
+ "rosdn_ietfl2vpn = osm_rosdn_ietfl2vpn.wimconn_ietfl2vpn:WimconnectorIETFL2VPN"
+ ],
},
)
basepython = python3
deps = flake8
commands = flake8 osm_rosdn_ietfl2vpn --max-line-length 120 \
- --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241
[testenv:unittest]
basepython = python3
class ContrailHttp(object):
-
def __init__(self, auth_info, logger):
self._logger = logger
# default don't verify client cert
def get_cmd(self, url, headers):
self._logger.debug("")
resp = self._request("GET", url, headers)
+
return resp.json()
def post_headers_cmd(self, url, headers, post_fields_dict=None):
self._logger.debug("")
+
# obfuscate password before logging dict
- if post_fields_dict.get('auth', {}).get('identity', {}).get('password', {}).get('user', {}).get('password'):
+ if (
+ post_fields_dict.get("auth", {})
+ .get("identity", {})
+ .get("password", {})
+ .get("user", {})
+ .get("password")
+ ):
post_fields_dict_copy = copy.deepcopy(post_fields_dict)
- post_fields_dict['auth']['identity']['password']['user']['password'] = '******'
+ post_fields_dict["auth"]["identity"]["password"]["user"][
+ "password"
+ ] = "******"
json_data_log = post_fields_dict_copy
else:
json_data_log = post_fields_dict
+
self._logger.debug("Request POSTFIELDS: {}".format(json.dumps(json_data_log)))
resp = self._request("POST_HEADERS", url, headers, data=post_fields_dict)
+
return resp.text
def post_cmd(self, url, headers, post_fields_dict=None):
self._logger.debug("")
+
# obfuscate password before logging dict
- if post_fields_dict.get('auth', {}).get('identity', {}).get('password', {}).get('user', {}).get('password'):
+ if (
+ post_fields_dict.get("auth", {})
+ .get("identity", {})
+ .get("password", {})
+ .get("user", {})
+ .get("password")
+ ):
post_fields_dict_copy = copy.deepcopy(post_fields_dict)
- post_fields_dict['auth']['identity']['password']['user']['password'] = '******'
+ post_fields_dict["auth"]["identity"]["password"]["user"][
+ "password"
+ ] = "******"
json_data_log = post_fields_dict_copy
else:
json_data_log = post_fields_dict
+
self._logger.debug("Request POSTFIELDS: {}".format(json.dumps(json_data_log)))
resp = self._request("POST", url, headers, data=post_fields_dict)
+
return resp.text
def delete_cmd(self, url, headers):
self._logger.debug("")
resp = self._request("DELETE", url, headers)
+
return resp.text
def _get_token(self, headers):
if self.auth_url:
- self._logger.debug('Current Token: {}'.format(self.token))
- auth_url = self.auth_url + 'auth/tokens'
+ self._logger.debug("Current Token: {}".format(self.token))
+ auth_url = self.auth_url + "auth/tokens"
+
if self.token is None or self._token_expired():
if not self.auth_url:
self.token = ""
- resp = self._request_noauth(url=auth_url, op="POST", headers=headers,
- data=self.auth_dict)
- self.token = resp.headers.get('x-subject-token')
+
+ resp = self._request_noauth(
+ url=auth_url, op="POST", headers=headers, data=self.auth_dict
+ )
+ self.token = resp.headers.get("x-subject-token")
self.last_token_time = time.time()
- self._logger.debug('Obtained token: {}'.format(self.token))
+ self._logger.debug("Obtained token: {}".format(self.token))
return self.token
def _token_expired(self):
current_time = time.time()
- if self.last_token_time and (current_time - self.last_token_time < self.token_timeout):
+
+ if self.last_token_time and (
+ current_time - self.last_token_time < self.token_timeout
+ ):
return False
else:
return True
# TODO add again token
# token = self._get_token(headers)
token = None
+
if token:
- headers['X-Auth-Token'] = token
+ headers["X-Auth-Token"] = token
+
try:
return self._request_noauth(op, url, headers, data)
except AuthError:
# If there is an auth error retry just once
if retry_auth_error:
- return self._request(self, op, url, headers, data, retry_auth_error=False)
+ return self._request(
+ self, op, url, headers, data, retry_auth_error=False
+ )
def _request_noauth(self, op, url, headers, data=None):
# Method to execute http requests with error control
resp = self._http_delete(url, headers, json_data=data)
else:
raise HttpException("Unsupported operation: {}".format(op))
+
self._logger.info("Response HTTPCODE: {}".format(resp.status_code))
# Check http return code
if status_code == 401:
# Auth Error - set token to None to reload it and raise AuthError
self.token = None
+
raise AuthError("Auth error executing operation")
elif status_code == 409:
- raise DuplicateFound("Duplicate resource url: {}, response: {}".format(url, resp.text))
+ raise DuplicateFound(
+ "Duplicate resource url: {}, response: {}".format(
+ url, resp.text
+ )
+ )
elif status_code == 404:
- raise NotFound("Not found resource url: {}, response: {}".format(url, resp.text))
+ raise NotFound(
+ "Not found resource url: {}, response: {}".format(
+ url, resp.text
+ )
+ )
elif resp.status_code in [502, 503]:
if not self.max_retries or retry >= self.max_retries:
- raise ServiceUnavailableException("Service unavailable error url: {}".format(url))
+ raise ServiceUnavailableException(
+ "Service unavailable error url: {}".format(url)
+ )
continue
else:
- raise HttpException("Error status_code: {}, error_text: {}".format(resp.status_code, resp.text))
+ raise HttpException(
+ "Error status_code: {}, error_text: {}".format(
+ resp.status_code, resp.text
+ )
+ )
except ConnectionError as e:
- self._logger.error("Connection error executing request: {}".format(repr(e)))
+ self._logger.error(
+ "Connection error executing request: {}".format(repr(e))
+ )
+
if not self.max_retries or retry >= self.max_retries:
raise ConnectionError
+
continue
except Exception as e:
self._logger.error("Error executing request: {}".format(repr(e)))
from osm_ro_plugin.sdnconn import SdnConnectorError
from osm_rosdn_juniper_contrail.rest_lib import ContrailHttp
from osm_rosdn_juniper_contrail.rest_lib import NotFound
+
# from osm_rosdn_juniper_contrail.rest_lib import DuplicateFound
# from osm_rosdn_juniper_contrail.rest_lib import HttpException
""" Class with CRUD operations for the underlay API """
def __init__(self, url, config=None, user=None, password=None, logger=None):
-
self.logger = logger or logging.getLogger("ro.sdn.junipercontrail.sdnapi")
self.controller_url = url
if not url:
raise SdnConnectorError("'url' must be provided")
+
if not url.startswith("http"):
url = "http://" + url
+
if not url.endswith("/"):
url = url + "/"
- self.url = url
+ self.url = url
self.auth_url = None
self.project = None
self.domain = None
self.asn = None
self.fabric = None
+
if config:
self.auth_url = config.get("auth_url")
self.project = config.get("project")
self.fabric = config.get("fabric")
# Init http headers for all requests
- self.http_header = {'Content-Type': 'application/json'}
+ self.http_header = {"Content-Type": "application/json"}
if user:
self.user = user
+
if password:
self.password = password
- self.logger.debug("Config parameters for the underlay controller: auth_url: {}, project: {},"
- " domain: {}, user: {}, password: {}".format(self.auth_url, self.project,
- self.domain, self.user, self.password))
+ self.logger.debug(
+ "Config parameters for the underlay controller: auth_url: {}, project: {},"
+ " domain: {}, user: {}, password: {}".format(
+ self.auth_url, self.project, self.domain, self.user, self.password
+ )
+ )
auth_dict = {}
- auth_dict['auth'] = {}
- auth_dict['auth']['scope'] = {}
- auth_dict['auth']['scope']['project'] = {}
- auth_dict['auth']['scope']['project']['domain'] = {}
- auth_dict['auth']['scope']['project']['domain']["id"] = self.domain
- auth_dict['auth']['scope']['project']['name'] = self.project
- auth_dict['auth']['identity'] = {}
- auth_dict['auth']['identity']['methods'] = ['password']
- auth_dict['auth']['identity']['password'] = {}
- auth_dict['auth']['identity']['password']['user'] = {}
- auth_dict['auth']['identity']['password']['user']['name'] = self.user
- auth_dict['auth']['identity']['password']['user']['password'] = self.password
- auth_dict['auth']['identity']['password']['user']['domain'] = {}
- auth_dict['auth']['identity']['password']['user']['domain']['id'] = self.domain
+ auth_dict["auth"] = {}
+ auth_dict["auth"]["scope"] = {}
+ auth_dict["auth"]["scope"]["project"] = {}
+ auth_dict["auth"]["scope"]["project"]["domain"] = {}
+ auth_dict["auth"]["scope"]["project"]["domain"]["id"] = self.domain
+ auth_dict["auth"]["scope"]["project"]["name"] = self.project
+ auth_dict["auth"]["identity"] = {}
+ auth_dict["auth"]["identity"]["methods"] = ["password"]
+ auth_dict["auth"]["identity"]["password"] = {}
+ auth_dict["auth"]["identity"]["password"]["user"] = {}
+ auth_dict["auth"]["identity"]["password"]["user"]["name"] = self.user
+ auth_dict["auth"]["identity"]["password"]["user"]["password"] = self.password
+ auth_dict["auth"]["identity"]["password"]["user"]["domain"] = {}
+ auth_dict["auth"]["identity"]["password"]["user"]["domain"]["id"] = self.domain
self.auth_dict = auth_dict
# Init http lib
def check_auth(self):
response = self.http.get_cmd(url=self.auth_url, headers=self.http_header)
+
return response
# Helper methods for CRUD operations
def get_all_by_type(self, controller_url, type):
endpoint = controller_url + type
response = self.http.get_cmd(url=endpoint, headers=self.http_header)
+
return response.get(type)
def get_by_uuid(self, type, uuid):
try:
endpoint = self.controller_url + type + "/{}".format(uuid)
response = self.http.get_cmd(url=endpoint, headers=self.http_header)
+
return response.get(type)
except NotFound:
return None
Returns: If resource not found returns None
In case of error raises an Exception
"""
- payload = {
- "type": type,
- "fq_name": fq_name
- }
+ payload = {"type": type, "fq_name": fq_name}
+
try:
endpoint = self.controller_url + "fqname-to-id"
- resp = self.http.post_cmd(url=endpoint,
- headers=self.http_header,
- post_fields_dict=payload)
+ resp = self.http.post_cmd(
+ url=endpoint, headers=self.http_header, post_fields_dict=payload
+ )
+
return json.loads(resp).get("uuid")
except NotFound:
return None
def get_by_fq_name(self, type, fq_name):
# Obtain uuid by fqdn and then get data by uuid
uuid = self.get_uuid_from_fqname(type, fq_name)
+
if uuid:
return self.get_by_uuid(type, uuid)
else:
"uuid": uuid,
"ref-type": ref_type,
"ref-fq-name": ref_fq_name,
- "operation": "DELETE"
+ "operation": "DELETE",
}
endpoint = self.controller_url + "ref-update"
- resp = self.http.post_cmd(url=endpoint, headers=self.http_header, post_fields_dict=payload)
+ resp = self.http.post_cmd(
+ url=endpoint, headers=self.http_header, post_fields_dict=payload
+ )
+
return resp
# Aux methods to avoid code duplication of name conventions
def create_virtual_network(self, name, vni):
self.logger.debug("create vname, name: {}, vni: {}".format(name, vni))
- routetarget = '{}:{}'.format(self.asn, vni)
+ routetarget = "{}:{}".format(self.asn, vni)
vnet_dict = {
"virtual-network": {
"virtual_network_properties": {
"vxlan_network_identifier": vni,
},
"parent_type": "project",
- "fq_name": [
- self.domain,
- self.project,
- name
- ],
- "route_target_list": {
- "route_target": [
- "target:" + routetarget
- ]
- }
+ "fq_name": [self.domain, self.project, name],
+ "route_target_list": {"route_target": ["target:" + routetarget]},
}
}
- endpoint = self.controller_url + 'virtual-networks'
- resp = self.http.post_cmd(url=endpoint,
- headers=self.http_header,
- post_fields_dict=vnet_dict)
+ endpoint = self.controller_url + "virtual-networks"
+ resp = self.http.post_cmd(
+ url=endpoint, headers=self.http_header, post_fields_dict=vnet_dict
+ )
+
if not resp:
- raise SdnConnectorError('Error creating virtual network: empty response')
+ raise SdnConnectorError("Error creating virtual network: empty response")
+
vnet_info = json.loads(resp)
self.logger.debug("created vnet, vnet_info: {}".format(vnet_info))
- return vnet_info.get("virtual-network").get('uuid'), vnet_info.get("virtual-network")
+
+ return vnet_info.get("virtual-network").get("uuid"), vnet_info.get(
+ "virtual-network"
+ )
def get_virtual_networks(self):
- return self.get_all_by_type('virtual-networks')
+ return self.get_all_by_type("virtual-networks")
def get_virtual_network(self, network_id):
- return self.get_by_uuid('virtual-network', network_id)
+ return self.get_by_uuid("virtual-network", network_id)
def delete_virtual_network(self, network_id):
self.logger.debug("delete vnet uuid: {}".format(network_id))
- self.delete_by_uuid(self.controller_url, 'virtual-network', network_id)
+ self.delete_by_uuid(self.controller_url, "virtual-network", network_id)
self.logger.debug("deleted vnet uuid: {}".format(network_id))
# Vpg operations
def create_vpg(self, switch_id, switch_port):
- self.logger.debug("create vpg, switch_id: {}, switch_port: {}".format(switch_id, switch_port))
+ self.logger.debug(
+ "create vpg, switch_id: {}, switch_port: {}".format(switch_id, switch_port)
+ )
vpg_name = self.get_vpg_name(switch_id, switch_port)
vpg_dict = {
"virtual-port-group": {
"parent_type": "fabric",
- "fq_name": [
- "default-global-system-config",
- self.fabric,
- vpg_name
- ]
+ "fq_name": ["default-global-system-config", self.fabric, vpg_name],
}
}
- endpoint = self.controller_url + 'virtual-port-groups'
- resp = self.http.post_cmd(url=endpoint,
- headers=self.http_header,
- post_fields_dict=vpg_dict)
+ endpoint = self.controller_url + "virtual-port-groups"
+ resp = self.http.post_cmd(
+ url=endpoint, headers=self.http_header, post_fields_dict=vpg_dict
+ )
+
if not resp:
- raise SdnConnectorError('Error creating virtual port group: empty response')
+ raise SdnConnectorError("Error creating virtual port group: empty response")
+
vpg_info = json.loads(resp)
self.logger.debug("created vpg, vpg_info: {}".format(vpg_info))
- return vpg_info.get("virtual-port-group").get('uuid'), vpg_info.get("virtual-port-group")
+
+ return vpg_info.get("virtual-port-group").get("uuid"), vpg_info.get(
+ "virtual-port-group"
+ )
def get_vpgs(self):
- return self.get_all_by_type(self.controller_url, 'virtual-port-groups')
+ return self.get_all_by_type(self.controller_url, "virtual-port-groups")
def get_vpg(self, vpg_id):
return self.get_by_uuid(self.controller_url, "virtual-port-group", vpg_id)
def get_vpg_by_name(self, vpg_name):
- fq_name = ["default-global-system-config",
- self.fabric,
- vpg_name
- ]
+ fq_name = ["default-global-system-config", self.fabric, vpg_name]
+
return self.get_by_fq_name("virtual-port-group", fq_name)
def delete_vpg(self, vpg_id):
self.logger.debug("delete vpg, uuid: {}".format(vpg_id))
- self.delete_by_uuid(self.controller_url, 'virtual-port-group', vpg_id)
+ self.delete_by_uuid(self.controller_url, "virtual-port-group", vpg_id)
self.logger.debug("deleted vpg, uuid: {}".format(vpg_id))
def create_vmi(self, switch_id, switch_port, network, vlan):
- self.logger.debug("create vmi, switch_id: {}, switch_port: {}, network: {}, vlan: {}".format(
- switch_id, switch_port, network, vlan))
+ self.logger.debug(
+ "create vmi, switch_id: {}, switch_port: {}, network: {}, vlan: {}".format(
+ switch_id, switch_port, network, vlan
+ )
+ )
vmi_name = self.get_vmi_name(switch_id, switch_port, vlan)
vpg_name = self.get_vpg_name(switch_id, switch_port)
profile_dict = {
"port_id": switch_port.replace(":", "_"),
"switch_id": switch_port.replace(":", "_"),
"switch_info": switch_id,
- "fabric": self.fabric
+ "fabric": self.fabric,
}
]
-
}
vmi_dict = {
"virtual-machine-interface": {
"parent_type": "project",
- "fq_name": [
- self.domain,
- self.project,
- vmi_name
- ],
- "virtual_network_refs": [
- {
- "to": [
- self.domain,
- self.project,
- network
- ]
- }
- ],
+ "fq_name": [self.domain, self.project, vmi_name],
+ "virtual_network_refs": [{"to": [self.domain, self.project, network]}],
"virtual_machine_interface_properties": {
"sub_interface_vlan_tag": vlan
},
"virtual_machine_interface_bindings": {
"key_value_pair": [
- {
- "key": "vnic_type",
- "value": "baremetal"
- },
- {
- "key": "vif_type",
- "value": "vrouter"
- },
- {
- "key": "vpg",
- "value": vpg_name
- },
- {
- "key": "profile",
- "value": json.dumps(profile_dict)
- }
+ {"key": "vnic_type", "value": "baremetal"},
+ {"key": "vif_type", "value": "vrouter"},
+ {"key": "vpg", "value": vpg_name},
+ {"key": "profile", "value": json.dumps(profile_dict)},
]
- }
+ },
}
}
- endpoint = self.controller_url + 'virtual-machine-interfaces'
+ endpoint = self.controller_url + "virtual-machine-interfaces"
self.logger.debug("vmi_dict: {}".format(vmi_dict))
- resp = self.http.post_cmd(url=endpoint,
- headers=self.http_header,
- post_fields_dict=vmi_dict)
+ resp = self.http.post_cmd(
+ url=endpoint,
+ headers=self.http_header,
+ post_fields_dict=vmi_dict,
+ )
+
if not resp:
- raise SdnConnectorError('Error creating vmi: empty response')
+ raise SdnConnectorError("Error creating vmi: empty response")
+
vmi_info = json.loads(resp)
self.logger.debug("created vmi, info: {}".format(vmi_info))
- return vmi_info.get("virtual-machine-interface").get('uuid'), vmi_info.get("virtual-machine-interface")
+
+ return vmi_info.get("virtual-machine-interface").get("uuid"), vmi_info.get(
+ "virtual-machine-interface"
+ )
def get_vmi(self, vmi_uuid):
- return self.get_by_uuid(self.controller_url, 'virtual-machine-interface', vmi_uuid)
+ return self.get_by_uuid(
+ self.controller_url, "virtual-machine-interface", vmi_uuid
+ )
def delete_vmi(self, uuid):
self.logger.debug("delete vmi uuid: {}".format(uuid))
- self.delete_by_uuid(self.controller_url, 'virtual-machine-interface', uuid)
+ self.delete_by_uuid(self.controller_url, "virtual-machine-interface", uuid)
self.logger.debug("deleted vmi: {}".format(uuid))
def unref_vmi_vpg(self, vpg_id, vmi_id, vmi_fq_name):
- self.delete_ref("virtual-port-group", vpg_id, "virtual-machine-interface", vmi_id, vmi_fq_name)
+ self.delete_ref(
+ "virtual-port-group",
+ vpg_id,
+ "virtual-machine-interface",
+ vmi_id,
+ vmi_fq_name,
+ )
import random
from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError
+
# from osm_rosdn_juniper_contrail.rest_lib import ContrailHttp
# from osm_rosdn_juniper_contrail.rest_lib import NotFound
from osm_rosdn_juniper_contrail.rest_lib import DuplicateFound
tutorial_with_rest.html
- https://github.com/tonyliu0592/contrail-toolbox/blob/master/sriov/sriov
"""
+
_WIM_LOGGER = "ro.sdn.junipercontrail"
def __init__(self, wim, wim_account, config=None, logger=None):
:param logger (logging.Logger): optional logger object. If none is passed 'ro.sdn.sdnconn' is used.
"""
self.logger = logger or logging.getLogger(self._WIM_LOGGER)
- self.logger.debug('wim: {}, wim_account: {}, config: {}'.format(wim, wim_account, config))
+ self.logger.debug(
+ "wim: {}, wim_account: {}, config: {}".format(wim, wim_account, config)
+ )
super().__init__(wim, wim_account, config, logger)
self.user = wim_account.get("user")
self.fabric = None
overlay_url = None
self.vni_range = None
+
if config:
auth_url = config.get("auth_url")
self.project = config.get("project")
if not url:
raise SdnConnectorError("'url' must be provided")
+
if not url.startswith("http"):
url = "http://" + url
+
if not url.endswith("/"):
url = url + "/"
+
self.url = url
if not self.vni_range:
- self.vni_range = ['1000001-2000000']
+ self.vni_range = ["1000001-2000000"]
self.logger.info("No vni_range was provided. Using ['1000001-2000000']")
+
self.used_vni = set()
if auth_url:
if not auth_url.startswith("http"):
auth_url = "http://" + auth_url
+
if not auth_url.endswith("/"):
auth_url = auth_url + "/"
+
self.auth_url = auth_url
if overlay_url:
if not overlay_url.startswith("http"):
overlay_url = "http://" + overlay_url
+
if not overlay_url.endswith("/"):
overlay_url = overlay_url + "/"
+
self.overlay_url = overlay_url
if not self.project:
raise SdnConnectorError("'project' must be provided")
+
if not self.asn:
# TODO: Get ASN from controller config; otherwise raise ERROR for the moment
- raise SdnConnectorError("'asn' was not provided and it was not possible to obtain it")
+ raise SdnConnectorError(
+ "'asn' was not provided and it was not possible to obtain it"
+ )
+
if not self.fabric:
# TODO: Get FABRIC from controller config; otherwise raise ERROR for the moment
- raise SdnConnectorError("'fabric' was not provided and was not possible to obtain it")
+ raise SdnConnectorError(
+ "'fabric' was not provided and was not possible to obtain it"
+ )
+
if not self.domain:
- self.domain = 'default-domain'
+ self.domain = "default-domain"
self.logger.info("No domain was provided. Using 'default-domain'")
underlay_api_config = {
"project": self.project,
"domain": self.domain,
"asn": self.asn,
- "fabric": self.fabric
+ "fabric": self.fabric,
}
- self.underlay_api = UnderlayApi(url, underlay_api_config, user=self.user, password=self.password, logger=logger)
+ self.underlay_api = UnderlayApi(
+ url,
+ underlay_api_config,
+ user=self.user,
+ password=self.password,
+ logger=logger,
+ )
self._max_duplicate_retry = 2
self.logger.info("Juniper Contrail Connector Initialized.")
def _generate_vni(self):
"""
- Method to get unused VxLAN Network Identifier (VNI)
+ Method to get unused VxLAN Network Identifier (VNI)
Args:
None
Returns:
for vlanID_range in self.vni_range:
try:
start_vni, end_vni = map(int, vlanID_range.replace(" ", "").split("-"))
+
for i in range(start_vni, end_vni + 1):
vni = random.randrange(start_vni, end_vni, 1)
+
if vni not in self.used_vni:
return vni
except Exception as exp:
- raise SdnConnectorError("Exception {} occurred while searching a free VNI.".format(exp))
+ raise SdnConnectorError(
+ "Exception {} occurred while searching a free VNI.".format(exp)
+ )
else:
- raise SdnConnectorError("Unable to create the virtual network."
- " All VNI in VNI range {} are in use.".format(self.vni_range))
+ raise SdnConnectorError(
+ "Unable to create the virtual network."
+ " All VNI in VNI range {} are in use.".format(self.vni_range)
+ )
# Aux functions for testing
def get_url(self):
2 - It the virtual port group does not exist, create it
3 - Create virtual machine interface for the indicated network and vlan
"""
- self.logger.debug("create_port: switch_id: {}, switch_port: {}, network: {}, vlan: {}".format(
- switch_id, switch_port, network, vlan))
+ self.logger.debug(
+ "create_port: switch_id: {}, switch_port: {}, network: {}, vlan: {}".format(
+ switch_id, switch_port, network, vlan
+ )
+ )
# 1 - Check if the vpg exists
vpg_name = self.underlay_api.get_vpg_name(switch_id, switch_port)
vpg = self.underlay_api.get_vpg_by_name(vpg_name)
+
if not vpg:
# 2 - If it does not exist create it
vpg_id, _ = self.underlay_api.create_vpg(switch_id, switch_port)
return vpg_id, vmi_id
def _delete_port(self, switch_id, switch_port, vlan):
- self.logger.debug("delete port, switch_id: {}, switch_port: {}, vlan: {}".format(switch_id, switch_port, vlan))
+ self.logger.debug(
+ "delete port, switch_id: {}, switch_port: {}, vlan: {}".format(
+ switch_id, switch_port, vlan
+ )
+ )
vpg_name = self.underlay_api.get_vpg_name(switch_id, switch_port)
vmi_name = self.underlay_api.get_vmi_name(switch_id, switch_port, vlan)
# 1 - Obtain vpg by id (if not vpg_id must have been error creating ig, nothing to be done)
vpg_fqdn = ["default-global-system-config", self.fabric, vpg_name]
vpg = self.underlay_api.get_by_fq_name("virtual-port-group", vpg_fqdn)
+
if not vpg:
self.logger.warning("vpg: {} to be deleted not found".format(vpg_name))
else:
# 2 - Get vmi interfaces from vpg
vmi_list = vpg.get("virtual_machine_interface_refs")
+
if not vmi_list:
# must have been an error during port creation when vmi is created
# may happen if there has been an error during creation
- self.logger.warning("vpg: {} has not vmi, will delete nothing".format(vpg))
+ self.logger.warning(
+ "vpg: {} has not vmi, will delete nothing".format(vpg)
+ )
else:
num_vmis = len(vmi_list)
+
for vmi in vmi_list:
fqdn = vmi.get("to")
# check by name
+
if fqdn[2] == vmi_name:
- self.underlay_api.unref_vmi_vpg(vpg.get("uuid"), vmi.get("uuid"), fqdn)
+ self.underlay_api.unref_vmi_vpg(
+ vpg.get("uuid"), vmi.get("uuid"), fqdn
+ )
self.underlay_api.delete_vmi(vmi.get("uuid"))
num_vmis = num_vmis - 1
external URLs, etc are detected.
"""
self.logger.debug("")
+
try:
resp = self.underlay_api.check_auth()
if not resp:
- raise SdnConnectorError('Empty response')
+ raise SdnConnectorError("Empty response")
except Exception as e:
- self.logger.error('Error checking credentials')
- raise SdnConnectorError('Error checking credentials: {}'.format(str(e)))
+ self.logger.error("Error checking credentials")
+
+ raise SdnConnectorError("Error checking credentials: {}".format(str(e)))
def get_connectivity_service_status(self, service_uuid, conn_info=None):
"""Monitor the status of the connectivity service established
new information available for the connectivity service.
"""
self.logger.debug("")
+
try:
resp = self.underlay_api.get_virtual_network(service_uuid)
if not resp:
- raise SdnConnectorError('Empty response')
+ raise SdnConnectorError("Empty response")
+
if resp:
vnet_info = resp
# Check if conn_info reports error
if conn_info.get("sdn_status") == "ERROR":
- return {'sdn_status': 'ERROR', 'sdn_info': conn_info}
+ return {"sdn_status": "ERROR", "sdn_info": conn_info}
else:
- return {'sdn_status': 'ACTIVE', 'sdn_info': vnet_info}
+ return {"sdn_status": "ACTIVE", "sdn_info": vnet_info}
else:
- return {'sdn_status': 'ERROR', 'sdn_info': 'not found'}
+ return {"sdn_status": "ERROR", "sdn_info": "not found"}
except SdnConnectorError:
raise
except HttpException as e:
self.logger.error("Error getting connectivity service: {}".format(e))
- raise SdnConnectorError("Exception deleting connectivity service: {}".format(str(e)))
+
+ raise SdnConnectorError(
+ "Exception deleting connectivity service: {}".format(str(e))
+ )
except Exception as e:
- self.logger.error('Exception getting connectivity service info: %s', e, exc_info=True)
- return {'sdn_status': 'ERROR', 'error_msg': str(e)}
+ self.logger.error(
+ "Exception getting connectivity service info: %s", e, exc_info=True
+ )
+
+ return {"sdn_status": "ERROR", "error_msg": str(e)}
def create_connectivity_service(self, service_type, connection_points, **kwargs):
"""
# name = 'osm-plugin-' + overlay_name
# Else:
# name = 'osm-plugin-' + VNI
- self.logger.info("create_connectivity_service, service_type: {}, connection_points: {}".
- format(service_type, connection_points))
- if service_type.lower() != 'elan':
- raise SdnConnectorError('Only ELAN network type is supported by Juniper Contrail.')
+ self.logger.info(
+ "create_connectivity_service, service_type: {}, connection_points: {}".format(
+ service_type, connection_points
+ )
+ )
+
+ if service_type.lower() != "elan":
+ raise SdnConnectorError(
+ "Only ELAN network type is supported by Juniper Contrail."
+ )
try:
# Initialize data
# This data will be returned even if no cp can be created if something is created
work_cps = {}
for cp in connection_points:
- switch_id = cp.get("service_endpoint_encapsulation_info").get("switch_dpid")
- switch_port = cp.get("service_endpoint_encapsulation_info").get("switch_port")
+ switch_id = cp.get("service_endpoint_encapsulation_info").get(
+ "switch_dpid"
+ )
+ switch_port = cp.get("service_endpoint_encapsulation_info").get(
+ "switch_port"
+ )
service_endpoint_id = cp.get("service_endpoint_id")
cp_name = self.underlay_api.get_vpg_name(switch_id, switch_port)
add_cp = work_cps.get(cp_name)
+
if not add_cp:
# check cp has vlan
vlan = cp.get("service_endpoint_encapsulation_info").get("vlan")
+
if vlan:
# add cp to dict
service_endpoint_ids = []
service_endpoint_ids.append(service_endpoint_id)
- add_cp = {"service_endpoint_ids": service_endpoint_ids,
- "switch_dpid": switch_id,
- "switch_port": switch_port,
- "vlan": vlan}
+ add_cp = {
+ "service_endpoint_ids": service_endpoint_ids,
+ "switch_dpid": switch_id,
+ "switch_port": switch_port,
+ "vlan": vlan,
+ }
work_cps[cp_name] = add_cp
else:
- self.logger.warning("cp service_endpoint_id : {} has no vlan, ignore".format(
- service_endpoint_id))
+ self.logger.warning(
+ "cp service_endpoint_id : {} has no vlan, ignore".format(
+ service_endpoint_id
+ )
+ )
else:
# add service_endpoint_id to list
service_endpoint_ids = add_cp["service_endpoint_ids"]
retry = 0
while retry < self._max_duplicate_retry:
try:
- vnet_name = 'osm-plugin-' + str(vni)
- vnet_id, _ = self.underlay_api.create_virtual_network(vnet_name, vni)
+ vnet_name = "osm-plugin-" + str(vni)
+ vnet_id, _ = self.underlay_api.create_virtual_network(
+ vnet_name, vni
+ )
self.used_vni.add(vni)
break
except DuplicateFound as e:
- self.logger.debug("Duplicate error for vnet_name: {}".format(vnet_name))
+ self.logger.debug(
+ "Duplicate error for vnet_name: {}".format(vnet_name)
+ )
self.used_vni.add(vni)
retry += 1
+
if retry >= self._max_duplicate_retry:
raise e
else:
# Try to obtain a new vni
vni = self._generate_vni()
continue
+
conn_info = {
"vnet": {
"uuid": vnet_id,
- "name": vnet_name
+ "name": vnet_name,
},
- "connection_points": work_cps # dict with port_name as key
+ "connection_points": work_cps, # dict with port_name as key
}
# 4 - Create a port for each endpoint
switch_id = cp.get("switch_dpid")
switch_port = cp.get("switch_port")
vlan = cp.get("vlan")
- vpg_id, vmi_id = self._create_port(switch_id, switch_port, vnet_name, vlan)
+ vpg_id, vmi_id = self._create_port(
+ switch_id, switch_port, vnet_name, vlan
+ )
cp["vpg_id"] = vpg_id
cp["vmi_id"] = vmi_id
- self.logger.info("created connectivity service, uuid: {}, name: {}".format(vnet_id, vnet_name))
- return vnet_id, conn_info
+ self.logger.info(
+ "created connectivity service, uuid: {}, name: {}".format(
+ vnet_id, vnet_name
+ )
+ )
+ return vnet_id, conn_info
except Exception as e:
# Log error
if isinstance(e, SdnConnectorError) or isinstance(e, HttpException):
self.logger.error("Error creating connectivity service: {}".format(e))
else:
- self.logger.error("Error creating connectivity service: {}".format(e), exc_info=True)
+ self.logger.error(
+ "Error creating connectivity service: {}".format(e), exc_info=True
+ )
# If nothing is created raise error else return what has been created and mask as error
if not conn_info:
- raise SdnConnectorError("Exception create connectivity service: {}".format(str(e)))
+ raise SdnConnectorError(
+ "Exception create connectivity service: {}".format(str(e))
+ )
else:
conn_info["sdn_status"] = "ERROR"
conn_info["sdn_info"] = repr(e)
for cp in work_cps.values():
if not cp.get("vmi_id") or not cp.get("vpg_id"):
cp["sdn_status"] = "ERROR"
+
return vnet_id, conn_info
def delete_connectivity_service(self, service_uuid, conn_info=None):
:return: None
:raises: SdnConnectorException: In case of error. The parameter http_code must be filled
"""
- self.logger.info("delete_connectivity_service vnet_name: {}, connection_points: {}".
- format(service_uuid, conn_info))
+ self.logger.info(
+ "delete_connectivity_service vnet_name: {}, connection_points: {}".format(
+ service_uuid, conn_info
+ )
+ )
try:
vnet_uuid = service_uuid
- # vnet_name = conn_info["vnet"]["name"] # always should exist as the network is the first thing created
+ # vnet_name = conn_info["vnet"]["name"]
+ # always should exist as the network is the first thing created
work_cps = conn_info["connection_points"]
# 1: For each connection point delete vlan from vpg and it is is the
# last one, delete vpg
for cp in work_cps.values():
- self._delete_port(cp.get("switch_dpid"), cp.get("switch_port"), cp.get("vlan"))
+ self._delete_port(
+ cp.get("switch_dpid"), cp.get("switch_port"), cp.get("vlan")
+ )
# 2: Delete vnet
self.underlay_api.delete_virtual_network(vnet_uuid)
- self.logger.info("deleted connectivity_service vnet_uuid: {}, connection_points: {}".
- format(service_uuid, conn_info))
+ self.logger.info(
+ "deleted connectivity_service vnet_uuid: {}, connection_points: {}".format(
+ service_uuid, conn_info
+ )
+ )
except SdnConnectorError:
raise
except HttpException as e:
self.logger.error("Error deleting connectivity service: {}".format(e))
- raise SdnConnectorError("Exception deleting connectivity service: {}".format(str(e)))
+
+ raise SdnConnectorError(
+ "Exception deleting connectivity service: {}".format(str(e))
+ )
except Exception as e:
- self.logger.error("Error deleting connectivity service: {}".format(e), exc_info=True)
- raise SdnConnectorError("Exception deleting connectivity service: {}".format(str(e)))
+ self.logger.error(
+ "Error deleting connectivity service: {}".format(e),
+ exc_info=True,
+ )
- def edit_connectivity_service(self, service_uuid, conn_info=None, connection_points=None, **kwargs):
- """ Change an existing connectivity service.
+ raise SdnConnectorError(
+ "Exception deleting connectivity service: {}".format(str(e))
+ )
+
+ def edit_connectivity_service(
+ self, service_uuid, conn_info=None, connection_points=None, **kwargs
+ ):
+ """Change an existing connectivity service.
This method's arguments and return value follow the same convention as
:meth:`~.create_connectivity_service`.
# 2 - Obtain network: Check vnet exists and obtain name
# 3 - Delete unnecesary ports
# 4 - Add new ports
- self.logger.info("edit connectivity service, service_uuid: {}, conn_info: {}, "
- "connection points: {} ".format(service_uuid, conn_info, connection_points))
+ self.logger.info(
+ "edit connectivity service, service_uuid: {}, conn_info: {}, "
+ "connection points: {} ".format(service_uuid, conn_info, connection_points)
+ )
# conn_info should always exist and have connection_points and vnet elements
old_cp = conn_info.get("connection_points", {})
switch_port = cp.get("switch_port")
old_vlan = cp.get("vlan")
self._delete_port(switch_id, switch_port, old_vlan)
- deleted_ports.append(self.underlay_api.get_vpg_name(switch_id, switch_port))
+ deleted_ports.append(
+ self.underlay_api.get_vpg_name(switch_id, switch_port)
+ )
for port in deleted_ports:
del old_cp[port]
if conn_info.get("vnet", {}).get("sdn_status"):
del conn_info["vnet"]["sdn_status"]
except HttpException as e:
- self.logger.error("Error trying to delete old ports marked as error: {}".format(e))
+ self.logger.error(
+ "Error trying to delete old ports marked as error: {}".format(e)
+ )
+
raise SdnConnectorError(e)
except SdnConnectorError as e:
- self.logger.error("Error trying to delete old ports marked as error: {}".format(e))
+ self.logger.error(
+ "Error trying to delete old ports marked as error: {}".format(e)
+ )
+
raise
except Exception as e:
- self.logger.error("Error trying to delete old ports marked as error: {}".format(e), exc_info=True)
- raise SdnConnectorError("Error trying to delete old ports marked as error: {}".format(e))
+ self.logger.error(
+ "Error trying to delete old ports marked as error: {}".format(e),
+ exc_info=True,
+ )
- if connection_points:
+ raise SdnConnectorError(
+ "Error trying to delete old ports marked as error: {}".format(e)
+ )
+ if connection_points:
# Check and obtain what should be added and deleted, if there is an error here raise an exception
try:
work_cps = {}
for cp in connection_points:
- switch_id = cp.get("service_endpoint_encapsulation_info").get("switch_dpid")
- switch_port = cp.get("service_endpoint_encapsulation_info").get("switch_port")
+ switch_id = cp.get("service_endpoint_encapsulation_info").get(
+ "switch_dpid"
+ )
+ switch_port = cp.get("service_endpoint_encapsulation_info").get(
+ "switch_port"
+ )
service_endpoint_id = cp.get("service_endpoint_id")
cp_name = self.underlay_api.get_vpg_name(switch_id, switch_port)
add_cp = work_cps.get(cp_name)
+
if not add_cp:
# add cp to dict
# check cp has vlan
vlan = cp.get("service_endpoint_encapsulation_info").get("vlan")
+
if vlan:
service_endpoint_ids = []
service_endpoint_ids.append(service_endpoint_id)
- add_cp = {"service_endpoint_ids": service_endpoint_ids,
- "switch_dpid": switch_id,
- "switch_port": switch_port,
- "vlan": vlan}
+ add_cp = {
+ "service_endpoint_ids": service_endpoint_ids,
+ "switch_dpid": switch_id,
+ "switch_port": switch_port,
+ "vlan": vlan,
+ }
work_cps[cp_name] = add_cp
else:
- self.logger.warning("cp service_endpoint_id : {} has no vlan, ignore".
- format(service_endpoint_id))
+ self.logger.warning(
+ "cp service_endpoint_id : {} has no vlan, ignore".format(
+ service_endpoint_id
+ )
+ )
else:
# add service_endpoint_id to list
service_endpoint_ids = add_cp["service_endpoint_ids"]
if vnet:
vnet_name = vnet["name"]
else:
- raise SdnConnectorError("vnet uuid: {} not found".format(service_uuid))
-
+ raise SdnConnectorError(
+ "vnet uuid: {} not found".format(service_uuid)
+ )
except SdnConnectorError:
raise
except Exception as e:
- self.logger.error("Error edit connectivity service: {}".format(e), exc_info=True)
- raise SdnConnectorError("Exception edit connectivity service: {}".format(str(e)))
+ self.logger.error(
+ "Error edit connectivity service: {}".format(e), exc_info=True
+ )
+
+ raise SdnConnectorError(
+ "Exception edit connectivity service: {}".format(str(e))
+ )
# Delete unneeded ports and add new ones: if there is an error return conn_info
try:
cp = conn_info_cp[port_name]
switch_id = cp.get("switch_dpid")
switch_port = cp.get("switch_port")
- self.logger.debug("delete port switch_id={}, switch_port={}".format(switch_id, switch_port))
+ self.logger.debug(
+ "delete port switch_id={}, switch_port={}".format(
+ switch_id, switch_port
+ )
+ )
self._delete_port(switch_id, switch_port, vlan)
deleted_ports.append(port_name)
switch_id = cp.get("switch_dpid")
switch_port = cp.get("switch_port")
vlan = cp.get("vlan")
- self.logger.debug("add port switch_id={}, switch_port={}".format(switch_id, switch_port))
- vpg_id, vmi_id = self._create_port(switch_id, switch_port, vnet_name, vlan)
+ self.logger.debug(
+ "add port switch_id={}, switch_port={}".format(
+ switch_id, switch_port
+ )
+ )
+ vpg_id, vmi_id = self._create_port(
+ switch_id, switch_port, vnet_name, vlan
+ )
cp_added = cp.copy()
cp_added["vpg_id"] = vpg_id
cp_added["vmi_id"] = vmi_id
conn_info_cp[port_name] = cp_added
+
# replace endpoints in case they have changed
- conn_info_cp[port_name]["service_endpoint_ids"] = cp["service_endpoint_ids"]
+ conn_info_cp[port_name]["service_endpoint_ids"] = cp[
+ "service_endpoint_ids"
+ ]
conn_info["connection_points"] = conn_info_cp
return conn_info
except Exception as e:
# Log error
if isinstance(e, SdnConnectorError) or isinstance(e, HttpException):
- self.logger.error("Error edit connectivity service: {}".format(e), exc_info=True)
+ self.logger.error(
+ "Error edit connectivity service: {}".format(e), exc_info=True
+ )
else:
self.logger.error("Error edit connectivity service: {}".format(e))
for port_name, cp in work_cps.items():
curr_cp = conn_info_cp.get(port_name)
+
if not curr_cp:
cp_error = work_cps.get(port_name).copy()
cp_error["sdn_status"] = "ERROR"
conn_info_cp[port_name] = cp_error
- conn_info_cp[port_name]["service_endpoint_ids"] = cp["service_endpoint_ids"]
+
+ conn_info_cp[port_name]["service_endpoint_ids"] = cp[
+ "service_endpoint_ids"
+ ]
conn_info["sdn_status"] = "ERROR"
conn_info["sdn_info"] = repr(e)
conn_info["connection_points"] = conn_info_cp
- return conn_info
+ return conn_info
else:
# Connection points have not changed, so do nothing
self.logger.info("no new connection_points provided, nothing to be done")
+
return
-if __name__ == '__main__':
+if __name__ == "__main__":
# Init logger
log_format = "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(funcName)s(): %(message)s"
- log_formatter = logging.Formatter(log_format, datefmt='%Y-%m-%dT%H:%M:%S')
+ log_formatter = logging.Formatter(log_format, datefmt="%Y-%m-%dT%H:%M:%S")
handler = logging.StreamHandler()
handler.setFormatter(log_formatter)
- logger = logging.getLogger('ro.sdn.junipercontrail')
+ logger = logging.getLogger("ro.sdn.junipercontrail")
# logger.setLevel(level=logging.ERROR)
# logger.setLevel(level=logging.INFO)
logger.setLevel(level=logging.DEBUG)
logger.addHandler(handler)
# Read config
- with open('test.yaml') as f:
+ with open("test.yaml") as f:
config = yaml.safe_load(f.read())
- wim = {'wim_url': config.pop('wim_url')}
- wim_account = {'user': config.pop('user'), 'password': config.pop('password')}
- logger.info('wim: {}, wim_account: {}, config: {}'.format(wim, wim_account, config))
+
+ wim = {"wim_url": config.pop("wim_url")}
+ wim_account = {"user": config.pop("user"), "password": config.pop("password")}
+ logger.info("wim: {}, wim_account: {}, config: {}".format(wim, wim_account, config))
# Init controller
- juniper_contrail = JuniperContrail(wim=wim, wim_account=wim_account, config=config, logger=logger)
+ juniper_contrail = JuniperContrail(
+ wim=wim, wim_account=wim_account, config=config, logger=logger
+ )
# Tests
# Generate VNI
for i in range(5):
vni = juniper_contrail._generate_vni()
juniper_contrail.used_vni.add(vni)
+
print(juniper_contrail.used_vni)
# juniper_contrail.used_vni.remove(1000003)
print(juniper_contrail.used_vni)
+
for i in range(2):
vni = juniper_contrail._generate_vni()
juniper_contrail.used_vni.add(vni)
+
print(juniper_contrail.used_vni)
# 0. Check credentials
- print('0. Check credentials')
+ print("0. Check credentials")
# juniper_contrail.check_credentials()
# 1 - Create and delete connectivity service
"service_endpoint_encapsulation_info": {
"switch_dpid": "LEAF-1",
"switch_port": "xe-0/0/17",
- "vlan": "501"
- }
+ "vlan": "501",
+ },
}
conn_point_1 = {
"service_endpoint_id": "0000:81:10.3",
"service_endpoint_encapsulation_info": {
"switch_dpid": "LEAF-2",
"switch_port": "xe-0/0/16",
- "vlan": "501"
- }
+ "vlan": "501",
+ },
}
conn_point_2 = {
"service_endpoint_id": "0000:08:11.7",
"service_endpoint_encapsulation_info": {
"switch_dpid": "LEAF-2",
"switch_port": "xe-0/0/16",
- "vlan": "502"
- }
+ "vlan": "502",
+ },
}
conn_point_3 = {
"service_endpoint_id": "0000:83:10.4",
"service_endpoint_encapsulation_info": {
"switch_dpid": "LEAF-1",
"switch_port": "xe-0/0/17",
- "vlan": "502"
- }
+ "vlan": "502",
+ },
}
# 1 - Define connection points
logger.debug("create first connection service")
print("Create connectivity service")
connection_points = [conn_point_0, conn_point_1]
- service_id, conn_info = juniper_contrail.create_connectivity_service("ELAN", connection_points)
+ service_id, conn_info = juniper_contrail.create_connectivity_service(
+ "ELAN", connection_points
+ )
logger.info("Created connectivity service 1")
logger.info(service_id)
logger.info(yaml.safe_dump(conn_info, indent=4, default_flow_style=False))
logger.debug("create second connection service")
print("Create connectivity service")
connection_points = [conn_point_2, conn_point_3]
- service_id2, conn_info2 = juniper_contrail.create_connectivity_service("ELAN", connection_points)
+ service_id2, conn_info2 = juniper_contrail.create_connectivity_service(
+ "ELAN", connection_points
+ )
logger.info("Created connectivity service 2")
logger.info(service_id2)
logger.info(yaml.safe_dump(conn_info2, indent=4, default_flow_style=False))
setup(
name=_name,
- description='OSM RO SDN plugin for Juniper Contrail',
+ description="OSM RO SDN plugin for Juniper Contrail",
long_description=README,
- version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ version_command=(
+ "git describe --match v* --tags --long --dirty",
+ "pep440-git-full",
+ ),
# version=VERSION,
# python_requires='>3.5.0',
- author='ETSI OSM',
- author_email='OSM_TECH@list.etsi.org',
- maintainer='ETSI OSM',
- maintainer_email='OSM_TECH@list.etsi.org',
- url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
- license='Apache 2.0',
-
+ author="ETSI OSM",
+ author_email="OSM_TECH@list.etsi.org",
+ maintainer="ETSI OSM",
+ maintainer_email="OSM_TECH@list.etsi.org",
+ url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary",
+ license="Apache 2.0",
packages=[_name],
include_package_data=True,
- #dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
+ # dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
install_requires=[
"requests",
- "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin"
+ "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin",
],
- setup_requires=['setuptools-version-command'],
+ setup_requires=["setuptools-version-command"],
entry_points={
- 'osm_rosdn.plugins': ['rosdn_juniper_contrail = osm_rosdn_juniper_contrail.sdn_assist_juniper_contrail:JuniperContrail'],
+ "osm_rosdn.plugins": [
+ "rosdn_juniper_contrail = osm_rosdn_juniper_contrail.sdn_assist_juniper_contrail:JuniperContrail"
+ ],
},
)
basepython = python3
deps = flake8
commands = flake8 osm_rosdn_juniper_contrail --max-line-length 120 \
- --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241
[testenv:unittest]
basepython = python3
import requests
import base64
import logging
-from osm_ro_plugin.openflow_conn import OpenflowConn, OpenflowConnConnectionException, OpenflowConnUnexpectedResponse
+from osm_ro_plugin.openflow_conn import (
+ OpenflowConn,
+ OpenflowConnConnectionException,
+ OpenflowConnUnexpectedResponse,
+)
+
# OpenflowConnException, OpenflowConnAuthException, OpenflowConnNotFoundException,
# OpenflowConnConflictException, OpenflowConnNotSupportedException, OpenflowConnNotImplemented
"""OpenDayLight connector. No MAC learning is used"""
def __init__(self, params):
- """ Constructor.
- Params: dictionary with the following keys:
- of_dpid: DPID to use for this controller
- of_url: must be [http://HOST:PORT/]
- of_user: user credentials, can be missing or None
- of_password: password credentials
- of_debug: debug level for logging. Default to ERROR
- other keys are ignored
- Raise an exception if same parameter is missing or wrong
+ """Constructor.
+ Params: dictionary with the following keys:
+ of_dpid: DPID to use for this controller
+ of_url: must be [http://HOST:PORT/]
+ of_user: user credentials, can be missing or None
+ of_password: password credentials
+ of_debug: debug level for logging. Default to ERROR
+ other keys are ignored
+ Raise an exception if same parameter is missing or wrong
"""
-
OpenflowConn.__init__(self, params)
# check params
url = params.get("of_url")
+
if not url:
raise ValueError("'url' must be provided")
+
if not url.startswith("http"):
url = "http://" + url
+
if not url.endswith("/"):
url = url + "/"
+
self.url = url
# internal variables
self.name = "OpenDayLight"
- self.headers = {'content-type': 'application/json', 'Accept': 'application/json'}
+ self.headers = {
+ "content-type": "application/json",
+ "Accept": "application/json",
+ }
self.auth = None
self.pp2ofi = {} # From Physical Port to OpenFlow Index
self.ofi2pp = {} # From OpenFlow Index to Physical Port
self.dpid = str(params["of_dpid"])
- self.id = 'openflow:'+str(int(self.dpid.replace(':', ''), 16))
+ self.id = "openflow:" + str(int(self.dpid.replace(":", ""), 16))
+
if params and params.get("of_user"):
of_password = params.get("of_password", "")
- self.auth = base64.b64encode(bytes(params["of_user"] + ":" + of_password, "utf-8"))
+ self.auth = base64.b64encode(
+ bytes(params["of_user"] + ":" + of_password, "utf-8")
+ )
self.auth = self.auth.decode()
- self.headers['authorization'] = 'Basic ' + self.auth
+ self.headers["authorization"] = "Basic " + self.auth
- self.logger = logging.getLogger('ro.sdn.onosof')
+ self.logger = logging.getLogger("ro.sdn.onosof")
# self.logger.setLevel(getattr(logging, params.get("of_debug", "ERROR")))
self.logger.debug("odlof plugin initialized")
Raise an OpenflowConnConnectionException exception if fails with text_error
"""
try:
- of_response = requests.get(self.url + "restconf/operational/opendaylight-inventory:nodes",
- headers=self.headers)
- error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
+ of_response = requests.get(
+ self.url + "restconf/operational/opendaylight-inventory:nodes",
+ headers=self.headers,
+ )
+ error_text = "Openflow response {}: {}".format(
+ of_response.status_code, of_response.text
+ )
+
if of_response.status_code != 200:
self.logger.warning("get_of_switches " + error_text)
- raise OpenflowConnUnexpectedResponse("Error get_of_switches " + error_text)
+
+ raise OpenflowConnUnexpectedResponse(
+ "Error get_of_switches " + error_text
+ )
self.logger.debug("get_of_switches " + error_text)
info = of_response.json()
if not isinstance(info, dict):
- self.logger.error("get_of_switches. Unexpected response, not a dict: %s", str(info))
- raise OpenflowConnUnexpectedResponse("Unexpected response, not a dict. Wrong version?")
+ self.logger.error(
+ "get_of_switches. Unexpected response, not a dict: %s",
+ str(info),
+ )
- nodes = info.get('nodes')
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response, not a dict. Wrong version?"
+ )
+
+ nodes = info.get("nodes")
if type(nodes) is not dict:
- self.logger.error("get_of_switches. Unexpected response at 'nodes', not found or not a dict: %s",
- str(type(info)))
- raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes', not found or not a dict."
- " Wrong version?")
+ self.logger.error(
+ "get_of_switches. Unexpected response at 'nodes', not found or not a dict: %s",
+ str(type(info)),
+ )
+
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response at 'nodes', not found or not a dict."
+ " Wrong version?"
+ )
- node_list = nodes.get('node')
+ node_list = nodes.get("node")
if type(node_list) is not list:
- self.logger.error("get_of_switches. Unexpected response, at 'nodes':'node', "
- "not found or not a list: %s", str(type(node_list)))
- raise OpenflowConnUnexpectedResponse("Unexpected response, at 'nodes':'node', not found "
- "or not a list. Wrong version?")
+ self.logger.error(
+ "get_of_switches. Unexpected response, at 'nodes':'node', "
+ "not found or not a list: %s",
+ str(type(node_list)),
+ )
+
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response, at 'nodes':'node', not found "
+ "or not a list. Wrong version?"
+ )
switch_list = []
for node in node_list:
- node_id = node.get('id')
+ node_id = node.get("id")
if node_id is None:
- self.logger.error("get_of_switches. Unexpected response at 'nodes':'node'[]:'id', not found: %s",
- str(node))
- raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:'id', not found. "
- "Wrong version?")
+ self.logger.error(
+ "get_of_switches. Unexpected response at 'nodes':'node'[]:'id', not found: %s",
+ str(node),
+ )
- if node_id == 'controller-config':
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response at 'nodes':'node'[]:'id', not found. "
+ "Wrong version?"
+ )
+
+ if node_id == "controller-config":
continue
- node_ip_address = node.get('flow-node-inventory:ip-address')
+ node_ip_address = node.get("flow-node-inventory:ip-address")
if node_ip_address is None:
- self.logger.error("get_of_switches. Unexpected response at 'nodes':'node'[]:'flow-node-inventory:"
- "ip-address', not found: %s", str(node))
- raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:"
- "'flow-node-inventory:ip-address', not found. Wrong version?")
-
- node_id_hex = hex(int(node_id.split(':')[1])).split('x')[1].zfill(16)
- switch_list.append((':'.join(a+b for a, b in zip(node_id_hex[::2], node_id_hex[1::2])),
- node_ip_address))
- return switch_list
+ self.logger.error(
+ "get_of_switches. Unexpected response at 'nodes':'node'[]:'flow-node-inventory:"
+ "ip-address', not found: %s",
+ str(node),
+ )
+
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response at 'nodes':'node'[]:"
+ "'flow-node-inventory:ip-address', not found. Wrong version?"
+ )
+
+ node_id_hex = hex(int(node_id.split(":")[1])).split("x")[1].zfill(16)
+ switch_list.append(
+ (
+ ":".join(
+ a + b for a, b in zip(node_id_hex[::2], node_id_hex[1::2])
+ ),
+ node_ip_address,
+ )
+ )
+ return switch_list
except requests.exceptions.RequestException as e:
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("get_of_switches " + error_text)
+
raise OpenflowConnConnectionException(error_text)
except ValueError as e:
# ValueError in the case that JSON can not be decoded
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("get_of_switches " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
def obtain_port_correspondence(self):
Raise a OpenflowConnConnectionException expection in case of failure
"""
try:
- of_response = requests.get(self.url + "restconf/operational/opendaylight-inventory:nodes",
- headers=self.headers)
- error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
+ of_response = requests.get(
+ self.url + "restconf/operational/opendaylight-inventory:nodes",
+ headers=self.headers,
+ )
+ error_text = "Openflow response {}: {}".format(
+ of_response.status_code, of_response.text
+ )
+
if of_response.status_code != 200:
self.logger.warning("obtain_port_correspondence " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
+
self.logger.debug("obtain_port_correspondence " + error_text)
info = of_response.json()
if not isinstance(info, dict):
- self.logger.error("obtain_port_correspondence. Unexpected response not a dict: %s", str(info))
- raise OpenflowConnUnexpectedResponse("Unexpected openflow response, not a dict. Wrong version?")
+ self.logger.error(
+ "obtain_port_correspondence. Unexpected response not a dict: %s",
+ str(info),
+ )
- nodes = info.get('nodes')
- if not isinstance(nodes, dict):
- self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes', "
- "not found or not a dict: %s", str(type(nodes)))
- raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes',not found or not a dict. "
- "Wrong version?")
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected openflow response, not a dict. Wrong version?"
+ )
- node_list = nodes.get('node')
+ nodes = info.get("nodes")
+ if not isinstance(nodes, dict):
+ self.logger.error(
+ "obtain_port_correspondence. Unexpected response at 'nodes', "
+ "not found or not a dict: %s",
+ str(type(nodes)),
+ )
+
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response at 'nodes',not found or not a dict. "
+ "Wrong version?"
+ )
+
+ node_list = nodes.get("node")
if not isinstance(node_list, list):
- self.logger.error("obtain_port_correspondence. Unexpected response, at 'nodes':'node', "
- "not found or not a list: %s", str(type(node_list)))
- raise OpenflowConnUnexpectedResponse("Unexpected response, at 'nodes':'node', not found or not a list."
- " Wrong version?")
+ self.logger.error(
+ "obtain_port_correspondence. Unexpected response, at 'nodes':'node', "
+ "not found or not a list: %s",
+ str(type(node_list)),
+ )
+
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response, at 'nodes':'node', not found or not a list."
+ " Wrong version?"
+ )
for node in node_list:
- node_id = node.get('id')
+ node_id = node.get("id")
if node_id is None:
- self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:'id', "
- "not found: %s", str(node))
- raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:'id', not found. "
- "Wrong version?")
-
- if node_id == 'controller-config':
+ self.logger.error(
+ "obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:'id', "
+ "not found: %s",
+ str(node),
+ )
+
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response at 'nodes':'node'[]:'id', not found. "
+ "Wrong version?"
+ )
+
+ if node_id == "controller-config":
continue
# Figure out if this is the appropriate switch. The 'id' is 'openflow:' plus the decimal value
if self.id != node_id:
continue
- node_connector_list = node.get('node-connector')
+ node_connector_list = node.get("node-connector")
if not isinstance(node_connector_list, list):
- self.logger.error("obtain_port_correspondence. Unexpected response at "
- "'nodes':'node'[]:'node-connector', not found or not a list: %s", str(node))
- raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:'node-connector', "
- "not found or not a list. Wrong version?")
+ self.logger.error(
+ "obtain_port_correspondence. Unexpected response at "
+ "'nodes':'node'[]:'node-connector', not found or not a list: %s",
+ str(node),
+ )
- for node_connector in node_connector_list:
- self.pp2ofi[str(node_connector['flow-node-inventory:name'])] = str(node_connector['id'])
- self.ofi2pp[node_connector['id']] = str(node_connector['flow-node-inventory:name'])
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response at 'nodes':'node'[]:'node-connector', "
+ "not found or not a list. Wrong version?"
+ )
- node_ip_address = node.get('flow-node-inventory:ip-address')
+ for node_connector in node_connector_list:
+ self.pp2ofi[str(node_connector["flow-node-inventory:name"])] = str(
+ node_connector["id"]
+ )
+ self.ofi2pp[node_connector["id"]] = str(
+ node_connector["flow-node-inventory:name"]
+ )
+
+ node_ip_address = node.get("flow-node-inventory:ip-address")
if node_ip_address is None:
- self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:"
- "'flow-node-inventory:ip-address', not found: %s", str(node))
- raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:"
- "'flow-node-inventory:ip-address', not found. Wrong version?")
+ self.logger.error(
+ "obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:"
+ "'flow-node-inventory:ip-address', not found: %s",
+ str(node),
+ )
+
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response at 'nodes':'node'[]:"
+ "'flow-node-inventory:ip-address', not found. Wrong version?"
+ )
# If we found the appropriate dpid no need to continue in the for loop
break
except requests.exceptions.RequestException as e:
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("obtain_port_correspondence " + error_text)
+
raise OpenflowConnConnectionException(error_text)
except ValueError as e:
# ValueError in the case that JSON can not be decoded
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("obtain_port_correspondence " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
def get_of_rules(self, translate_of_ports=True):
Raise a OpenflowConnConnectionException exception in case of failure
"""
-
try:
# get rules
if len(self.ofi2pp) == 0:
self.obtain_port_correspondence()
- of_response = requests.get(self.url + "restconf/config/opendaylight-inventory:nodes/node/" + self.id +
- "/table/0", headers=self.headers)
- error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
+ of_response = requests.get(
+ self.url
+ + "restconf/config/opendaylight-inventory:nodes/node/"
+ + self.id
+ + "/table/0",
+ headers=self.headers,
+ )
+ error_text = "Openflow response {}: {}".format(
+ of_response.status_code, of_response.text
+ )
# The configured page does not exist if there are no rules installed. In that case we return an empty dict
if of_response.status_code == 404:
return []
-
elif of_response.status_code != 200:
self.logger.warning("get_of_rules " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
self.logger.debug("get_of_rules " + error_text)
info = of_response.json()
if not isinstance(info, dict):
- self.logger.error("get_of_rules. Unexpected response not a dict: %s", str(info))
- raise OpenflowConnUnexpectedResponse("Unexpected openflow response, not a dict. Wrong version?")
+ self.logger.error(
+ "get_of_rules. Unexpected response not a dict: %s", str(info)
+ )
- table = info.get('flow-node-inventory:table')
- if not isinstance(table, list):
- self.logger.error("get_of_rules. Unexpected response at 'flow-node-inventory:table', "
- "not a list: %s", str(type(table)))
- raise OpenflowConnUnexpectedResponse("Unexpected response at 'flow-node-inventory:table', not a list. "
- "Wrong version?")
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected openflow response, not a dict. Wrong version?"
+ )
- flow_list = table[0].get('flow')
+ table = info.get("flow-node-inventory:table")
+ if not isinstance(table, list):
+ self.logger.error(
+ "get_of_rules. Unexpected response at 'flow-node-inventory:table', "
+ "not a list: %s",
+ str(type(table)),
+ )
+
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response at 'flow-node-inventory:table', not a list. "
+ "Wrong version?"
+ )
+
+ flow_list = table[0].get("flow")
if flow_list is None:
return []
if not isinstance(flow_list, list):
- self.logger.error("get_of_rules. Unexpected response at 'flow-node-inventory:table'[0]:'flow', not a "
- "list: %s", str(type(flow_list)))
- raise OpenflowConnUnexpectedResponse("Unexpected response at 'flow-node-inventory:table'[0]:'flow', "
- "not a list. Wrong version?")
+ self.logger.error(
+ "get_of_rules. Unexpected response at 'flow-node-inventory:table'[0]:'flow', not a "
+ "list: %s",
+ str(type(flow_list)),
+ )
+
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response at 'flow-node-inventory:table'[0]:'flow', "
+ "not a list. Wrong version?"
+ )
# TODO translate ports according to translate_of_ports parameter
rules = [] # Response list
for flow in flow_list:
- if not ('id' in flow and 'match' in flow and 'instructions' in flow and
- 'instruction' in flow['instructions'] and
- 'apply-actions' in flow['instructions']['instruction'][0] and
- 'action' in flow['instructions']['instruction'][0]['apply-actions']):
- raise OpenflowConnUnexpectedResponse("unexpected openflow response, one or more elements are "
- "missing. Wrong version?")
-
- flow['instructions']['instruction'][0]['apply-actions']['action']
+ if not (
+ "id" in flow
+ and "match" in flow
+ and "instructions" in flow
+ and "instruction" in flow["instructions"]
+ and "apply-actions" in flow["instructions"]["instruction"][0]
+ and "action"
+ in flow["instructions"]["instruction"][0]["apply-actions"]
+ ):
+ raise OpenflowConnUnexpectedResponse(
+ "unexpected openflow response, one or more elements are "
+ "missing. Wrong version?"
+ )
+
+ flow["instructions"]["instruction"][0]["apply-actions"]["action"]
rule = dict()
- rule['switch'] = self.dpid
- rule['priority'] = flow.get('priority')
+ rule["switch"] = self.dpid
+ rule["priority"] = flow.get("priority")
# rule['name'] = flow['id']
# rule['cookie'] = flow['cookie']
- if 'in-port' in flow['match']:
- in_port = flow['match']['in-port']
+ if "in-port" in flow["match"]:
+ in_port = flow["match"]["in-port"]
if in_port not in self.ofi2pp:
- raise OpenflowConnUnexpectedResponse("Error: Ingress port {} is not in switch port list".
- format(in_port))
+ raise OpenflowConnUnexpectedResponse(
+ "Error: Ingress port {} is not in switch port list".format(
+ in_port
+ )
+ )
if translate_of_ports:
in_port = self.ofi2pp[in_port]
- rule['ingress_port'] = in_port
-
- if 'vlan-match' in flow['match'] and 'vlan-id' in flow['match']['vlan-match'] and \
- 'vlan-id' in flow['match']['vlan-match']['vlan-id'] and \
- 'vlan-id-present' in flow['match']['vlan-match']['vlan-id'] and \
- flow['match']['vlan-match']['vlan-id']['vlan-id-present'] is True:
- rule['vlan_id'] = flow['match']['vlan-match']['vlan-id']['vlan-id']
-
- if 'ethernet-match' in flow['match'] and 'ethernet-destination' in flow['match']['ethernet-match'] \
- and 'address' in flow['match']['ethernet-match']['ethernet-destination']:
- rule['dst_mac'] = flow['match']['ethernet-match']['ethernet-destination']['address']
-
- instructions = flow['instructions']['instruction'][0]['apply-actions']['action']
+ rule["ingress_port"] = in_port
+
+ if (
+ "vlan-match" in flow["match"]
+ and "vlan-id" in flow["match"]["vlan-match"]
+ and "vlan-id" in flow["match"]["vlan-match"]["vlan-id"]
+ and "vlan-id-present" in flow["match"]["vlan-match"]["vlan-id"]
+ and flow["match"]["vlan-match"]["vlan-id"]["vlan-id-present"]
+ is True
+ ):
+ rule["vlan_id"] = flow["match"]["vlan-match"]["vlan-id"][
+ "vlan-id"
+ ]
+
+ if (
+ "ethernet-match" in flow["match"]
+ and "ethernet-destination" in flow["match"]["ethernet-match"]
+ and "address"
+ in flow["match"]["ethernet-match"]["ethernet-destination"]
+ ):
+ rule["dst_mac"] = flow["match"]["ethernet-match"][
+ "ethernet-destination"
+ ]["address"]
+
+ instructions = flow["instructions"]["instruction"][0]["apply-actions"][
+ "action"
+ ]
max_index = 0
for instruction in instructions:
- if instruction['order'] > max_index:
- max_index = instruction['order']
+ if instruction["order"] > max_index:
+ max_index = instruction["order"]
- actions = [None]*(max_index+1)
+ actions = [None] * (max_index + 1)
for instruction in instructions:
- if 'output-action' in instruction:
- if 'output-node-connector' not in instruction['output-action']:
- raise OpenflowConnUnexpectedResponse("unexpected openflow response, one or more elementa "
- "are missing. Wrong version?")
+ if "output-action" in instruction:
+ if "output-node-connector" not in instruction["output-action"]:
+ raise OpenflowConnUnexpectedResponse(
+ "unexpected openflow response, one or more elementa "
+ "are missing. Wrong version?"
+ )
+
+ out_port = instruction["output-action"]["output-node-connector"]
- out_port = instruction['output-action']['output-node-connector']
if out_port not in self.ofi2pp:
- raise OpenflowConnUnexpectedResponse("Error: Output port {} is not in switch port list".
- format(out_port))
+ raise OpenflowConnUnexpectedResponse(
+ "Error: Output port {} is not in switch port list".format(
+ out_port
+ )
+ )
if translate_of_ports:
out_port = self.ofi2pp[out_port]
- actions[instruction['order']] = ('out', out_port)
-
- elif 'strip-vlan-action' in instruction:
- actions[instruction['order']] = ('vlan', None)
-
- elif 'set-field' in instruction:
- if not ('vlan-match' in instruction['set-field'] and
- 'vlan-id' in instruction['set-field']['vlan-match'] and
- 'vlan-id' in instruction['set-field']['vlan-match']['vlan-id']):
- raise OpenflowConnUnexpectedResponse("unexpected openflow response, one or more elements "
- "are missing. Wrong version?")
-
- actions[instruction['order']] = ('vlan',
- instruction['set-field']['vlan-match']['vlan-id']['vlan-id'])
+ actions[instruction["order"]] = ("out", out_port)
+ elif "strip-vlan-action" in instruction:
+ actions[instruction["order"]] = ("vlan", None)
+ elif "set-field" in instruction:
+ if not (
+ "vlan-match" in instruction["set-field"]
+ and "vlan-id" in instruction["set-field"]["vlan-match"]
+ and "vlan-id"
+ in instruction["set-field"]["vlan-match"]["vlan-id"]
+ ):
+ raise OpenflowConnUnexpectedResponse(
+ "unexpected openflow response, one or more elements "
+ "are missing. Wrong version?"
+ )
+
+ actions[instruction["order"]] = (
+ "vlan",
+ instruction["set-field"]["vlan-match"]["vlan-id"][
+ "vlan-id"
+ ],
+ )
actions = [x for x in actions if x is not None]
- rule['actions'] = list(actions)
+ rule["actions"] = list(actions)
rules.append(rule)
return rules
except requests.exceptions.RequestException as e:
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("get_of_rules " + error_text)
+
raise OpenflowConnConnectionException(error_text)
except ValueError as e:
# ValueError in the case that JSON can not be decoded
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("get_of_rules " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
def del_flow(self, flow_name):
:param flow_name: flow_name, this is the rule name
:return: Raise a OpenflowConnConnectionException expection in case of failure
"""
-
try:
- of_response = requests.delete(self.url + "restconf/config/opendaylight-inventory:nodes/node/" + self.id +
- "/table/0/flow/" + flow_name, headers=self.headers)
- error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
+ of_response = requests.delete(
+ self.url
+ + "restconf/config/opendaylight-inventory:nodes/node/"
+ + self.id
+ + "/table/0/flow/"
+ + flow_name,
+ headers=self.headers,
+ )
+ error_text = "Openflow response {}: {}".format(
+ of_response.status_code, of_response.text
+ )
+
if of_response.status_code != 200:
self.logger.warning("del_flow " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
+
self.logger.debug("del_flow OK " + error_text)
+
return None
except requests.exceptions.RequestException as e:
# raise an exception in case of contection error
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("del_flow " + error_text)
+
raise OpenflowConnConnectionException(error_text)
def new_flow(self, data):
('out', port): send to this port
:return: Raise a OpenflowConnConnectionException exception in case of failure
"""
-
try:
self.logger.debug("new_flow data: {}".format(data))
+
if len(self.pp2ofi) == 0:
self.obtain_port_correspondence()
# We have to build the data for the opendaylight call from the generic data
flow = {
- 'id': data['name'],
- 'flow-name': data['name'],
- 'idle-timeout': 0,
- 'hard-timeout': 0,
- 'table_id': 0,
- 'priority': data.get('priority'),
- 'match': {}
+ "id": data["name"],
+ "flow-name": data["name"],
+ "idle-timeout": 0,
+ "hard-timeout": 0,
+ "table_id": 0,
+ "priority": data.get("priority"),
+ "match": {},
}
- sdata = {'flow-node-inventory:flow': [flow]}
- if not data['ingress_port'] in self.pp2ofi:
- error_text = 'Error. Port ' + data['ingress_port'] + ' is not present in the switch'
+ sdata = {"flow-node-inventory:flow": [flow]}
+
+ if not data["ingress_port"] in self.pp2ofi:
+ error_text = (
+ "Error. Port "
+ + data["ingress_port"]
+ + " is not present in the switch"
+ )
self.logger.warning("new_flow " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
- flow['match']['in-port'] = self.pp2ofi[data['ingress_port']]
- if data.get('dst_mac'):
- flow['match']['ethernet-match'] = {
- 'ethernet-destination': {'address': data['dst_mac']}
+
+ flow["match"]["in-port"] = self.pp2ofi[data["ingress_port"]]
+
+ if data.get("dst_mac"):
+ flow["match"]["ethernet-match"] = {
+ "ethernet-destination": {"address": data["dst_mac"]}
}
- if data.get('vlan_id'):
- flow['match']['vlan-match'] = {
- 'vlan-id': {
- 'vlan-id-present': True,
- 'vlan-id': int(data['vlan_id'])
+
+ if data.get("vlan_id"):
+ flow["match"]["vlan-match"] = {
+ "vlan-id": {
+ "vlan-id-present": True,
+ "vlan-id": int(data["vlan_id"]),
}
}
+
actions = []
- flow['instructions'] = {
- 'instruction': [{
- 'order': 1,
- 'apply-actions': {'action': actions}
- }]
+ flow["instructions"] = {
+ "instruction": [{"order": 1, "apply-actions": {"action": actions}}]
}
order = 0
- for action in data['actions']:
- new_action = {'order': order}
+ for action in data["actions"]:
+ new_action = {"order": order}
if action[0] == "vlan":
if action[1] is None:
# strip vlan
- new_action['strip-vlan-action'] = {}
+ new_action["strip-vlan-action"] = {}
else:
- new_action['set-field'] = {
- 'vlan-match': {
- 'vlan-id': {
- 'vlan-id-present': True,
- 'vlan-id': int(action[1])
+ new_action["set-field"] = {
+ "vlan-match": {
+ "vlan-id": {
+ "vlan-id-present": True,
+ "vlan-id": int(action[1]),
}
}
}
- elif action[0] == 'out':
- new_action['output-action'] = {}
+ elif action[0] == "out":
+ new_action["output-action"] = {}
+
if not action[1] in self.pp2ofi:
- error_msg = 'Port ' + action[1] + ' is not present in the switch'
+ error_msg = (
+ "Port " + action[1] + " is not present in the switch"
+ )
+
raise OpenflowConnUnexpectedResponse(error_msg)
- new_action['output-action']['output-node-connector'] = self.pp2ofi[action[1]]
+ new_action["output-action"]["output-node-connector"] = self.pp2ofi[
+ action[1]
+ ]
else:
error_msg = "Unknown item '{}' in action list".format(action[0])
self.logger.error("new_flow " + error_msg)
+
raise OpenflowConnUnexpectedResponse(error_msg)
actions.append(new_action)
order += 1
# print json.dumps(sdata)
- of_response = requests.put(self.url + "restconf/config/opendaylight-inventory:nodes/node/" + self.id +
- "/table/0/flow/" + data['name'], headers=self.headers, data=json.dumps(sdata))
- error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
+ of_response = requests.put(
+ self.url
+ + "restconf/config/opendaylight-inventory:nodes/node/"
+ + self.id
+ + "/table/0/flow/"
+ + data["name"],
+ headers=self.headers,
+ data=json.dumps(sdata),
+ )
+ error_text = "Openflow response {}: {}".format(
+ of_response.status_code, of_response.text
+ )
+
if of_response.status_code != 200:
self.logger.warning("new_flow " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
+
self.logger.debug("new_flow OK " + error_text)
- return None
+ return None
except requests.exceptions.RequestException as e:
# raise an exception in case of contection error
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("new_flow " + error_text)
+
raise OpenflowConnConnectionException(error_text)
def clear_all_flows(self):
:return: Raise a OpenflowConnConnectionException expection in case of failure
"""
try:
- of_response = requests.delete(self.url + "restconf/config/opendaylight-inventory:nodes/node/" + self.id +
- "/table/0", headers=self.headers)
- error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
- if of_response.status_code != 200 and of_response.status_code != 404: # HTTP_Not_Found
+ of_response = requests.delete(
+ self.url
+ + "restconf/config/opendaylight-inventory:nodes/node/"
+ + self.id
+ + "/table/0",
+ headers=self.headers,
+ )
+ error_text = "Openflow response {}: {}".format(
+ of_response.status_code, of_response.text
+ )
+
+ if of_response.status_code != 200 and of_response.status_code != 404:
self.logger.warning("clear_all_flows " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
+
self.logger.debug("clear_all_flows OK " + error_text)
+
return None
except requests.exceptions.RequestException as e:
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("clear_all_flows " + error_text)
+
raise OpenflowConnConnectionException(error_text)
class SdnConnectorOdlOf(SdnConnectorOpenFlow):
-
def __init__(self, wim, wim_account, config=None, logger=None):
- """Creates a connectivity based on pro-active openflow rules
- """
- self.logger = logging.getLogger('ro.sdn.odlof')
+ """Creates a connectivity based on pro-active openflow rules"""
+ self.logger = logging.getLogger("ro.sdn.odlof")
super().__init__(wim, wim_account, config, logger)
of_params = {
"of_url": wim["wim_url"],
setup(
name=_name,
- description='OSM RO plugin for SDN with odl openflow rules',
+ description="OSM RO plugin for SDN with odl openflow rules",
long_description=README,
- version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ version_command=(
+ "git describe --match v* --tags --long --dirty",
+ "pep440-git-full",
+ ),
# version=VERSION,
# python_requires='>3.5.0',
- author='ETSI OSM',
- author_email='alfonso.tiernosepulveda@telefonica.com',
- maintainer='Alfonso Tierno',
- maintainer_email='alfonso.tiernosepulveda@telefonica.com',
- url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
- license='Apache 2.0',
-
+ author="ETSI OSM",
+ author_email="alfonso.tiernosepulveda@telefonica.com",
+ maintainer="Alfonso Tierno",
+ maintainer_email="alfonso.tiernosepulveda@telefonica.com",
+ url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary",
+ license="Apache 2.0",
packages=[_name],
include_package_data=True,
install_requires=[
"requests",
- "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin"
+ "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin",
],
- setup_requires=['setuptools-version-command'],
+ setup_requires=["setuptools-version-command"],
entry_points={
- 'osm_rosdn.plugins': ['rosdn_odlof = osm_rosdn_odlof.sdnconn_odlof:SdnConnectorOdlOf'],
+ "osm_rosdn.plugins": [
+ "rosdn_odlof = osm_rosdn_odlof.sdnconn_odlof:SdnConnectorOdlOf"
+ ],
},
)
-r{toxinidir}/requirements.txt
install_command = python3 -m pip install -U {opts} {packages}
commands = flake8 osm_rosdn_odlof --max-line-length 120 \
- --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241
[testenv:unittest]
basepython = python3
import requests
import base64
import logging
-from osm_ro_plugin.openflow_conn import OpenflowConn, OpenflowConnConnectionException, OpenflowConnUnexpectedResponse
+from osm_ro_plugin.openflow_conn import (
+ OpenflowConn,
+ OpenflowConnConnectionException,
+ OpenflowConnUnexpectedResponse,
+)
+
# OpenflowConnException, OpenflowConnAuthException, OpenflowConnNotFoundException, \
# OpenflowConnConflictException, OpenflowConnNotSupportedException, OpenflowConnNotImplemented
"""
ONOS connector. No MAC learning is used
"""
+
def __init__(self, params):
- """ Constructor.
- :param params: dictionary with the following keys:
- of_dpid: DPID to use for this controller ?? Does a controller have a dpid?
- of_url: must be [http://HOST:PORT/]
- of_user: user credentials, can be missing or None
- of_password: password credentials
- of_debug: debug level for logging. Default to ERROR
- other keys are ignored
- Raise an exception if same parameter is missing or wrong
+ """Constructor.
+ :param params: dictionary with the following keys:
+ of_dpid: DPID to use for this controller ?? Does a controller have a dpid?
+ of_url: must be [http://HOST:PORT/]
+ of_user: user credentials, can be missing or None
+ of_password: password credentials
+ of_debug: debug level for logging. Default to ERROR
+ other keys are ignored
+ Raise an exception if same parameter is missing or wrong
"""
-
OpenflowConn.__init__(self, params)
# check params
url = params.get("of_url")
+
if not url:
raise ValueError("'url' must be provided")
+
if not url.startswith("http"):
url = "http://" + url
+
if not url.endswith("/"):
url = url + "/"
+
self.url = url + "onos/v1/"
# internal variables
self.name = "onosof"
- self.headers = {'content-type': 'application/json', 'accept': 'application/json'}
+ self.headers = {
+ "content-type": "application/json",
+ "accept": "application/json",
+ }
self.auth = "None"
self.pp2ofi = {} # From Physical Port to OpenFlow Index
self.ofi2pp = {} # From OpenFlow Index to Physical Port
self.dpid = str(params["of_dpid"])
- self.id = 'of:'+str(self.dpid.replace(':', ''))
+ self.id = "of:" + str(self.dpid.replace(":", ""))
# TODO This may not be straightforward
if params.get("of_user"):
of_password = params.get("of_password", "")
- self.auth = base64.b64encode(bytes(params["of_user"] + ":" + of_password, "utf-8"))
+ self.auth = base64.b64encode(
+ bytes(params["of_user"] + ":" + of_password, "utf-8")
+ )
self.auth = self.auth.decode()
- self.headers['authorization'] = 'Basic ' + self.auth
+ self.headers["authorization"] = "Basic " + self.auth
- self.logger = logging.getLogger('ro.sdn.onosof')
+ self.logger = logging.getLogger("ro.sdn.onosof")
# self.logger.setLevel( getattr(logging, params.get("of_debug", "ERROR")) )
self.logger.debug("onosof plugin initialized")
self.ip_address = None
Raise a openflowconnUnexpectedResponse expection in case of failure
"""
try:
- self.headers['content-type'] = 'text/plain'
+ self.headers["content-type"] = "text/plain"
of_response = requests.get(self.url + "devices", headers=self.headers)
- error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+ error_text = "Openflow response %d: %s" % (
+ of_response.status_code,
+ of_response.text,
+ )
+
if of_response.status_code != 200:
self.logger.warning("get_of_switches " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
self.logger.debug("get_of_switches " + error_text)
info = of_response.json()
if type(info) != dict:
- self.logger.error("get_of_switches. Unexpected response, not a dict: %s", str(info))
- raise OpenflowConnUnexpectedResponse("Unexpected response, not a dict. Wrong version?")
+ self.logger.error(
+ "get_of_switches. Unexpected response, not a dict: %s", str(info)
+ )
+
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response, not a dict. Wrong version?"
+ )
- node_list = info.get('devices')
+ node_list = info.get("devices")
if type(node_list) is not list:
self.logger.error(
"get_of_switches. Unexpected response, at 'devices', not found or not a list: %s",
- str(type(node_list)))
- raise OpenflowConnUnexpectedResponse("Unexpected response, at 'devices', not found "
- "or not a list. Wrong version?")
+ str(type(node_list)),
+ )
+
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response, at 'devices', not found "
+ "or not a list. Wrong version?"
+ )
switch_list = []
for node in node_list:
- node_id = node.get('id')
+ node_id = node.get("id")
if node_id is None:
- self.logger.error("get_of_switches. Unexpected response at 'device':'id', not found: %s",
- str(node))
- raise OpenflowConnUnexpectedResponse("Unexpected response at 'device':'id', "
- "not found . Wrong version?")
+ self.logger.error(
+ "get_of_switches. Unexpected response at 'device':'id', not found: %s",
+ str(node),
+ )
- node_ip_address = node.get('annotations').get('managementAddress')
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response at 'device':'id', "
+ "not found . Wrong version?"
+ )
+
+ node_ip_address = node.get("annotations").get("managementAddress")
if node_ip_address is None:
self.logger.error(
"get_of_switches. Unexpected response at 'device':'managementAddress', not found: %s",
- str(node))
- raise OpenflowConnUnexpectedResponse(
- "Unexpected response at 'device':'managementAddress', not found. Wrong version?")
+ str(node),
+ )
- node_id_hex = hex(int(node_id.split(':')[1])).split('x')[1].zfill(16)
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response at 'device':'managementAddress', not found. Wrong version?"
+ )
+ node_id_hex = hex(int(node_id.split(":")[1])).split("x")[1].zfill(16)
switch_list.append(
- (':'.join(a + b for a, b in zip(node_id_hex[::2], node_id_hex[1::2])), node_ip_address))
- return switch_list
+ (
+ ":".join(
+ a + b for a, b in zip(node_id_hex[::2], node_id_hex[1::2])
+ ),
+ node_ip_address,
+ )
+ )
+ return switch_list
except requests.exceptions.RequestException as e:
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("get_of_switches " + error_text)
+
raise OpenflowConnConnectionException(error_text)
except ValueError as e:
# ValueError in the case that JSON can not be decoded
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("get_of_switches " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
def obtain_port_correspondence(self):
Raise a openflowconnUnexpectedResponse expection in case of failure
"""
try:
- self.headers['content-type'] = 'text/plain'
- of_response = requests.get(self.url + "devices/" + self.id + "/ports", headers=self.headers)
- error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
+ self.headers["content-type"] = "text/plain"
+ of_response = requests.get(
+ self.url + "devices/" + self.id + "/ports", headers=self.headers
+ )
+ error_text = "Openflow response {}: {}".format(
+ of_response.status_code, of_response.text
+ )
+
if of_response.status_code != 200:
self.logger.warning("obtain_port_correspondence " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
self.logger.debug("obtain_port_correspondence " + error_text)
info = of_response.json()
- node_connector_list = info.get('ports')
+ node_connector_list = info.get("ports")
if type(node_connector_list) is not list:
self.logger.error(
"obtain_port_correspondence. Unexpected response at 'ports', not found or not a list: %s",
- str(node_connector_list))
- raise OpenflowConnUnexpectedResponse("Unexpected response at 'ports', not found or not "
- "a list. Wrong version?")
+ str(node_connector_list),
+ )
- for node_connector in node_connector_list:
- if node_connector['port'] != "local":
- self.pp2ofi[str(node_connector['annotations']['portName'])] = str(node_connector['port'])
- self.ofi2pp[str(node_connector['port'])] = str(node_connector['annotations']['portName'])
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response at 'ports', not found or not "
+ "a list. Wrong version?"
+ )
- node_ip_address = info['annotations']['managementAddress']
+ for node_connector in node_connector_list:
+ if node_connector["port"] != "local":
+ self.pp2ofi[str(node_connector["annotations"]["portName"])] = str(
+ node_connector["port"]
+ )
+ self.ofi2pp[str(node_connector["port"])] = str(
+ node_connector["annotations"]["portName"]
+ )
+
+ node_ip_address = info["annotations"]["managementAddress"]
if node_ip_address is None:
self.logger.error(
"obtain_port_correspondence. Unexpected response at 'managementAddress', not found: %s",
- str(self.id))
- raise OpenflowConnUnexpectedResponse("Unexpected response at 'managementAddress', "
- "not found. Wrong version?")
+ str(self.id),
+ )
+
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response at 'managementAddress', "
+ "not found. Wrong version?"
+ )
+
self.ip_address = node_ip_address
# print self.name, ": obtain_port_correspondence ports:", self.pp2ofi
except requests.exceptions.RequestException as e:
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("obtain_port_correspondence " + error_text)
+
raise OpenflowConnConnectionException(error_text)
except ValueError as e:
# ValueError in the case that JSON can not be decoded
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("obtain_port_correspondence " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
def get_of_rules(self, translate_of_ports=True):
switch: DPID, all
Raise a openflowconnUnexpectedResponse exception in case of failure
"""
-
try:
-
if len(self.ofi2pp) == 0:
self.obtain_port_correspondence()
# get rules
- self.headers['content-type'] = 'text/plain'
- of_response = requests.get(self.url + "flows/" + self.id, headers=self.headers)
- error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+ self.headers["content-type"] = "text/plain"
+ of_response = requests.get(
+ self.url + "flows/" + self.id, headers=self.headers
+ )
+ error_text = "Openflow response %d: %s" % (
+ of_response.status_code,
+ of_response.text,
+ )
# The configured page does not exist if there are no rules installed. In that case we return an empty dict
if of_response.status_code == 404:
return []
-
elif of_response.status_code != 200:
self.logger.warning("get_of_rules " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
+
self.logger.debug("get_of_rules " + error_text)
info = of_response.json()
if type(info) != dict:
- self.logger.error("get_of_rules. Unexpected response, not a dict: %s", str(info))
- raise OpenflowConnUnexpectedResponse("Unexpected openflow response, not a dict. "
- "Wrong version?")
+ self.logger.error(
+ "get_of_rules. Unexpected response, not a dict: %s",
+ str(info),
+ )
- flow_list = info.get('flows')
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected openflow response, not a dict. Wrong version?"
+ )
+
+ flow_list = info.get("flows")
if flow_list is None:
return []
+
if type(flow_list) is not list:
self.logger.error(
"get_of_rules. Unexpected response at 'flows', not a list: %s",
- str(type(flow_list)))
- raise OpenflowConnUnexpectedResponse("Unexpected response at 'flows', not a list. "
- "Wrong version?")
+ str(type(flow_list)),
+ )
+
+ raise OpenflowConnUnexpectedResponse(
+ "Unexpected response at 'flows', not a list. Wrong version?"
+ )
rules = [] # Response list
for flow in flow_list:
- if not ('id' in flow and 'selector' in flow and 'treatment' in flow and
- 'instructions' in flow['treatment'] and 'criteria' in
- flow['selector']):
- raise OpenflowConnUnexpectedResponse("unexpected openflow response, one or more "
- "elements are missing. Wrong version?")
+ if not (
+ "id" in flow
+ and "selector" in flow
+ and "treatment" in flow
+ and "instructions" in flow["treatment"]
+ and "criteria" in flow["selector"]
+ ):
+ raise OpenflowConnUnexpectedResponse(
+ "unexpected openflow response, one or more "
+ "elements are missing. Wrong version?"
+ )
rule = dict()
- rule['switch'] = self.dpid
- rule['priority'] = flow.get('priority')
- rule['name'] = flow['id']
+ rule["switch"] = self.dpid
+ rule["priority"] = flow.get("priority")
+ rule["name"] = flow["id"]
- for criteria in flow['selector']['criteria']:
- if criteria['type'] == 'IN_PORT':
- in_port = str(criteria['port'])
+ for criteria in flow["selector"]["criteria"]:
+ if criteria["type"] == "IN_PORT":
+ in_port = str(criteria["port"])
if in_port != "CONTROLLER":
if in_port not in self.ofi2pp:
- raise OpenflowConnUnexpectedResponse("Error: Ingress port {} is not "
- "in switch port list".format(in_port))
+ raise OpenflowConnUnexpectedResponse(
+ "Error: Ingress port {} is not "
+ "in switch port list".format(in_port)
+ )
+
if translate_of_ports:
in_port = self.ofi2pp[in_port]
- rule['ingress_port'] = in_port
-
- elif criteria['type'] == 'VLAN_VID':
- rule['vlan_id'] = criteria['vlanId']
- elif criteria['type'] == 'ETH_DST':
- rule['dst_mac'] = str(criteria['mac']).lower()
+ rule["ingress_port"] = in_port
+ elif criteria["type"] == "VLAN_VID":
+ rule["vlan_id"] = criteria["vlanId"]
+ elif criteria["type"] == "ETH_DST":
+ rule["dst_mac"] = str(criteria["mac"]).lower()
actions = []
- for instruction in flow['treatment']['instructions']:
- if instruction['type'] == "OUTPUT":
- out_port = str(instruction['port'])
+ for instruction in flow["treatment"]["instructions"]:
+ if instruction["type"] == "OUTPUT":
+ out_port = str(instruction["port"])
if out_port != "CONTROLLER":
if out_port not in self.ofi2pp:
- raise OpenflowConnUnexpectedResponse("Error: Output port {} is not in "
- "switch port list".format(out_port))
+ raise OpenflowConnUnexpectedResponse(
+ "Error: Output port {} is not in "
+ "switch port list".format(out_port)
+ )
if translate_of_ports:
out_port = self.ofi2pp[out_port]
- actions.append(('out', out_port))
+ actions.append(("out", out_port))
- if instruction['type'] == "L2MODIFICATION" and instruction['subtype'] == "VLAN_POP":
- actions.append(('vlan', 'None'))
- if instruction['type'] == "L2MODIFICATION" and instruction['subtype'] == "VLAN_ID":
- actions.append(('vlan', instruction['vlanId']))
+ if (
+ instruction["type"] == "L2MODIFICATION"
+ and instruction["subtype"] == "VLAN_POP"
+ ):
+ actions.append(("vlan", "None"))
- rule['actions'] = actions
+ if (
+ instruction["type"] == "L2MODIFICATION"
+ and instruction["subtype"] == "VLAN_ID"
+ ):
+ actions.append(("vlan", instruction["vlanId"]))
+
+ rule["actions"] = actions
rules.append(rule)
- return rules
+ return rules
except requests.exceptions.RequestException as e:
# ValueError in the case that JSON can not be decoded
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("get_of_rules " + error_text)
+
raise OpenflowConnConnectionException(error_text)
except ValueError as e:
# ValueError in the case that JSON can not be decoded
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("get_of_rules " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
def del_flow(self, flow_name):
:param flow_name:
:return: Raise a openflowconnUnexpectedResponse expection in case of failure
"""
-
try:
self.logger.debug("del_flow: delete flow name {}".format(flow_name))
- self.headers['content-type'] = None
- of_response = requests.delete(self.url + "flows/" + self.id + "/" + flow_name, headers=self.headers)
- error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
+ self.headers["content-type"] = None
+ of_response = requests.delete(
+ self.url + "flows/" + self.id + "/" + flow_name, headers=self.headers
+ )
+ error_text = "Openflow response {}: {}".format(
+ of_response.status_code, of_response.text
+ )
if of_response.status_code != 204:
self.logger.warning("del_flow " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
self.logger.debug("del_flow: {} OK,: {} ".format(flow_name, error_text))
- return None
+ return None
except requests.exceptions.RequestException as e:
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("del_flow " + error_text)
+
raise OpenflowConnConnectionException(error_text)
def new_flow(self, data):
# Build the dictionary with the flow rule information for ONOS
flow = dict()
- # flow['id'] = data['name']
- flow['tableId'] = 0
- flow['priority'] = data.get('priority')
- flow['timeout'] = 0
- flow['isPermanent'] = "true"
- flow['appId'] = 10 # FIXME We should create an appId for OSM
- flow['selector'] = dict()
- flow['selector']['criteria'] = list()
+ # flow["id"] = data["name"]
+ flow["tableId"] = 0
+ flow["priority"] = data.get("priority")
+ flow["timeout"] = 0
+ flow["isPermanent"] = "true"
+ flow["appId"] = 10 # FIXME We should create an appId for OSM
+ flow["selector"] = dict()
+ flow["selector"]["criteria"] = list()
# Flow rule matching criteria
- if not data['ingress_port'] in self.pp2ofi:
- error_text = 'Error. Port ' + data['ingress_port'] + ' is not present in the switch'
+ if not data["ingress_port"] in self.pp2ofi:
+ error_text = (
+ "Error. Port "
+ + data["ingress_port"]
+ + " is not present in the switch"
+ )
self.logger.warning("new_flow " + error_text)
+
raise OpenflowConnUnexpectedResponse(error_text)
ingress_port_criteria = dict()
- ingress_port_criteria['type'] = "IN_PORT"
- ingress_port_criteria['port'] = self.pp2ofi[data['ingress_port']]
- flow['selector']['criteria'].append(ingress_port_criteria)
+ ingress_port_criteria["type"] = "IN_PORT"
+ ingress_port_criteria["port"] = self.pp2ofi[data["ingress_port"]]
+ flow["selector"]["criteria"].append(ingress_port_criteria)
- if 'dst_mac' in data:
+ if "dst_mac" in data:
dst_mac_criteria = dict()
dst_mac_criteria["type"] = "ETH_DST"
- dst_mac_criteria["mac"] = data['dst_mac']
- flow['selector']['criteria'].append(dst_mac_criteria)
+ dst_mac_criteria["mac"] = data["dst_mac"]
+ flow["selector"]["criteria"].append(dst_mac_criteria)
- if data.get('vlan_id'):
+ if data.get("vlan_id"):
vlan_criteria = dict()
vlan_criteria["type"] = "VLAN_VID"
- vlan_criteria["vlanId"] = int(data['vlan_id'])
- flow['selector']['criteria'].append(vlan_criteria)
+ vlan_criteria["vlanId"] = int(data["vlan_id"])
+ flow["selector"]["criteria"].append(vlan_criteria)
# Flow rule treatment
- flow['treatment'] = dict()
- flow['treatment']['instructions'] = list()
- flow['treatment']['deferred'] = list()
+ flow["treatment"] = dict()
+ flow["treatment"]["instructions"] = list()
+ flow["treatment"]["deferred"] = list()
- for action in data['actions']:
+ for action in data["actions"]:
new_action = dict()
if action[0] == "vlan":
- new_action['type'] = "L2MODIFICATION"
+ new_action["type"] = "L2MODIFICATION"
+
if action[1] is None:
- new_action['subtype'] = "VLAN_POP"
+ new_action["subtype"] = "VLAN_POP"
else:
- new_action['subtype'] = "VLAN_ID"
- new_action['vlanId'] = int(action[1])
- elif action[0] == 'out':
- new_action['type'] = "OUTPUT"
+ new_action["subtype"] = "VLAN_ID"
+ new_action["vlanId"] = int(action[1])
+ elif action[0] == "out":
+ new_action["type"] = "OUTPUT"
+
if not action[1] in self.pp2ofi:
- error_msj = 'Port ' + action[1] + ' is not present in the switch'
+ error_msj = (
+ "Port " + action[1] + " is not present in the switch"
+ )
+
raise OpenflowConnUnexpectedResponse(error_msj)
- new_action['port'] = self.pp2ofi[action[1]]
+
+ new_action["port"] = self.pp2ofi[action[1]]
else:
error_msj = "Unknown item '%s' in action list" % action[0]
self.logger.error("new_flow " + error_msj)
+
raise OpenflowConnUnexpectedResponse(error_msj)
- flow['treatment']['instructions'].append(new_action)
+ flow["treatment"]["instructions"].append(new_action)
- self.headers['content-type'] = 'application/json'
+ self.headers["content-type"] = "application/json"
path = self.url + "flows/" + self.id
self.logger.debug("new_flow post: {}".format(flow))
- of_response = requests.post(path, headers=self.headers, data=json.dumps(flow))
+ of_response = requests.post(
+ path, headers=self.headers, data=json.dumps(flow)
+ )
- error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text)
+ error_text = "Openflow response {}: {}".format(
+ of_response.status_code, of_response.text
+ )
if of_response.status_code != 201:
self.logger.warning("new_flow " + error_text)
- raise OpenflowConnUnexpectedResponse(error_text)
- flowId = of_response.headers['location'][path.__len__() + 1:]
+ raise OpenflowConnUnexpectedResponse(error_text)
- data['name'] = flowId
+ flowId = of_response.headers["location"][path.__len__() + 1 :]
+ data["name"] = flowId
self.logger.debug("new_flow id: {},: {} ".format(flowId, error_text))
- return None
+ return None
except requests.exceptions.RequestException as e:
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("new_flow " + error_text)
+
raise OpenflowConnConnectionException(error_text)
def clear_all_flows(self):
self.del_flow(rule)
self.logger.debug("clear_all_flows OK ")
- return None
+ return None
except requests.exceptions.RequestException as e:
error_text = type(e).__name__ + ": " + str(e)
self.logger.error("clear_all_flows " + error_text)
+
raise OpenflowConnConnectionException(error_text)
class SdnConnectorOnosOf(SdnConnectorOpenFlow):
-
def __init__(self, wim, wim_account, config=None, logger=None):
- """Creates a connectivity based on pro-active openflow rules
- """
- self.logger = logging.getLogger('ro.sdn.onosof')
+ """Creates a connectivity based on pro-active openflow rules"""
+ self.logger = logging.getLogger("ro.sdn.onosof")
super().__init__(wim, wim_account, config, logger)
of_params = {
"of_url": wim["wim_url"],
}
self.openflow_conn = OfConnOnos(of_params)
super().__init__(wim, wim_account, config, logger, self.openflow_conn)
- self.logger.debug("Init sdn plugin '{}' dpid={} user={}".format(of_params["of_url"], of_params["of_dpid"],
- of_params["of_user"]))
+ self.logger.debug(
+ "Init sdn plugin '{}' dpid={} user={}".format(
+ of_params["of_url"], of_params["of_dpid"], of_params["of_user"]
+ )
+ )
setup(
name=_name,
- description='OSM RO plugin for SDN with onos openflow rules',
+ description="OSM RO plugin for SDN with onos openflow rules",
long_description=README,
- version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ version_command=(
+ "git describe --match v* --tags --long --dirty",
+ "pep440-git-full",
+ ),
# version=VERSION,
# python_requires='>3.5.0',
- author='ETSI OSM',
- author_email='alfonso.tiernosepulveda@telefonica.com',
- maintainer='Alfonso Tierno',
- maintainer_email='alfonso.tiernosepulveda@telefonica.com',
- url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
- license='Apache 2.0',
-
+ author="ETSI OSM",
+ author_email="alfonso.tiernosepulveda@telefonica.com",
+ maintainer="Alfonso Tierno",
+ maintainer_email="alfonso.tiernosepulveda@telefonica.com",
+ url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary",
+ license="Apache 2.0",
packages=[_name],
include_package_data=True,
install_requires=[
"requests",
- "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin"
+ "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin",
],
- setup_requires=['setuptools-version-command'],
+ setup_requires=["setuptools-version-command"],
entry_points={
- 'osm_rosdn.plugins': ['rosdn_onosof = osm_rosdn_onosof.sdnconn_onosof:SdnConnectorOnosOf'],
+ "osm_rosdn.plugins": [
+ "rosdn_onosof = osm_rosdn_onosof.sdnconn_onosof:SdnConnectorOnosOf"
+ ],
},
)
-r{toxinidir}/requirements.txt
install_command = python3 -m pip install -U {opts} {packages}
commands = flake8 osm_rosdn_onosof --max-line-length 120 \
- --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241
[testenv:unittest]
basepython = python3
"""
https://wiki.onosproject.org/display/ONOS/VPLS+User+Guide
"""
+
_WIM_LOGGER = "ro.sdn.onosvpls"
def __init__(self, wim, wim_account, config=None, logger=None):
self.user = wim_account.get("user")
self.password = wim_account.get("password")
url = wim.get("wim_url")
+
if not url:
raise SdnConnectorError("'url' must be provided")
+
if not url.startswith("http"):
url = "http://" + url
+
if not url.endswith("/"):
url = url + "/"
+
self.url = url + "onos/v1/network/configuration"
self.logger.info("ONOS VPLS Connector Initialized.")
def check_credentials(self):
status_code = 503
onos_config_req = None
+
try:
- onos_config_req = requests.get(self.url, auth=HTTPBasicAuth(self.user, self.password))
+ onos_config_req = requests.get(
+ self.url, auth=HTTPBasicAuth(self.user, self.password)
+ )
onos_config_req.raise_for_status()
except Exception as e:
if onos_config_req:
status_code = onos_config_req.status_code
- self.logger.exception('Error checking credentials: {}'.format(e))
- raise SdnConnectorError('Error checking credentials: {}'.format(e), http_code=status_code)
+
+ self.logger.exception("Error checking credentials: {}".format(e))
+
+ raise SdnConnectorError(
+ "Error checking credentials: {}".format(e), http_code=status_code
+ )
def get_connectivity_service_status(self, service_uuid, conn_info=None):
try:
onos_config = self._get_onos_netconfig()
- vpls_config = onos_config.get('apps', {}).get('org.onosproject.vpls')
+ vpls_config = onos_config.get("apps", {}).get("org.onosproject.vpls")
if vpls_config:
- for vpls in vpls_config.get('vpls', {}).get('vplsList'):
- if vpls.get('name') == service_uuid:
- return {'sdn_status': 'ACTIVE', 'sdn_info': vpls}
-
- return {'sdn_status': 'ERROR', 'sdn_info': 'not found'}
+ for vpls in vpls_config.get("vpls", {}).get("vplsList"):
+ if vpls.get("name") == service_uuid:
+ return {"sdn_status": "ACTIVE", "sdn_info": vpls}
+ return {"sdn_status": "ERROR", "sdn_info": "not found"}
except Exception as e:
- self.logger.error('Exception getting connectivity service info: %s', e)
- return {'sdn_status': 'ERROR', 'error_msg': str(e)}
+ self.logger.error("Exception getting connectivity service info: %s", e)
+
+ return {"sdn_status": "ERROR", "error_msg": str(e)}
def _get_onos_netconfig(self):
try:
- onos_config_req = requests.get(self.url, auth=HTTPBasicAuth(self.user, self.password))
+ onos_config_req = requests.get(
+ self.url, auth=HTTPBasicAuth(self.user, self.password)
+ )
status_code = onos_config_req.status_code
+
if status_code == requests.codes.ok:
return onos_config_req.json()
else:
- self.logger.info("Error obtaining network config, status code: {}".format(status_code))
- raise SdnConnectorError("Error obtaining network config status code: {}".format(status_code),
- http_code=status_code)
+ self.logger.info(
+ "Error obtaining network config, status code: {}".format(
+ status_code
+ )
+ )
+
+ raise SdnConnectorError(
+ "Error obtaining network config status code: {}".format(
+ status_code
+ ),
+ http_code=status_code,
+ )
except requests.exceptions.ConnectionError as e:
- self.logger.info('Exception connecting to onos: %s', e)
+ self.logger.info("Exception connecting to onos: %s", e)
+
raise SdnConnectorError("Error connecting to onos: {}".format(e))
except Exception as e:
- self.logger.error('Exception getting onos network config: %s', e)
- raise SdnConnectorError("Exception getting onos network config: {}".format(e))
+ self.logger.error("Exception getting onos network config: %s", e)
+
+ raise SdnConnectorError(
+ "Exception getting onos network config: {}".format(e)
+ )
def _post_onos_netconfig(self, onos_config):
try:
- onos_config_resp = requests.post(self.url, json=onos_config, auth=HTTPBasicAuth(self.user, self.password))
+ onos_config_resp = requests.post(
+ self.url, json=onos_config, auth=HTTPBasicAuth(self.user, self.password)
+ )
status_code = onos_config_resp.status_code
+
if status_code != requests.codes.ok:
- self.logger.info("Error updating network config, status code: {}".format(status_code))
- raise SdnConnectorError("Error obtaining network config status code: {}".format(status_code),
- http_code=status_code)
+ self.logger.info(
+ "Error updating network config, status code: {}".format(status_code)
+ )
+
+ raise SdnConnectorError(
+ "Error obtaining network config status code: {}".format(
+ status_code
+ ),
+ http_code=status_code,
+ )
except requests.exceptions.ConnectionError as e:
- self.logger.info('Exception connecting to onos: %s', e)
+ self.logger.info("Exception connecting to onos: %s", e)
+
raise SdnConnectorError("Error connecting to onos: {}".format(e))
except Exception as e:
- self.logger.info('Exception posting onos network config: %s', e)
- raise SdnConnectorError("Exception posting onos network config: {}".format(e))
+ self.logger.info("Exception posting onos network config: %s", e)
+
+ raise SdnConnectorError(
+ "Exception posting onos network config: {}".format(e)
+ )
def create_connectivity_service(self, service_type, connection_points, **kwargs):
- self.logger.debug("create_connectivity_service, service_type: {}, connection_points: {}".
- format(service_type, connection_points))
- if service_type.lower() == 'etree':
- raise SdnConnectorError('Only ELINE/ELAN network type is supported by ONOS VPLS.')
+ self.logger.debug(
+ "create_connectivity_service, service_type: {}, connection_points: {}".format(
+ service_type, connection_points
+ )
+ )
+
+ if service_type.lower() == "etree":
+ raise SdnConnectorError(
+ "Only ELINE/ELAN network type is supported by ONOS VPLS."
+ )
# FIXME ¿must check number of connection_points?
service_uuid = str(uuid.uuid4())
# Create missing interfaces, append to created_items if returned, append_port_to_onos_config
# returns null if it was already created
created_items = []
+
for port in connection_points:
created_ifz = self._append_port_to_onos_config(port, onos_config)
if created_ifz:
created_items.append(created_ifz[1])
+
self._post_onos_netconfig(onos_config)
# Add vpls service to config
encapsulation = self._get_encapsulation(connection_points)
interfaces = [port.get("service_endpoint_id") for port in connection_points]
- if 'org.onosproject.vpls' in onos_config['apps']:
- if 'vpls' not in onos_config['apps']['org.onosproject.vpls']:
- onos_config['apps']['org.onosproject.vpls']['vpls'] = {
- 'vplsList': []
+
+ if "org.onosproject.vpls" in onos_config["apps"]:
+ if "vpls" not in onos_config["apps"]["org.onosproject.vpls"]:
+ onos_config["apps"]["org.onosproject.vpls"]["vpls"] = {
+ "vplsList": []
}
- for vpls in onos_config['apps']['org.onosproject.vpls']['vpls']['vplsList']:
- if vpls['name'] == service_uuid:
- raise SdnConnectorError('Network {} already exists.'.format(service_uuid))
- onos_config['apps']['org.onosproject.vpls']['vpls']['vplsList'].append({
- 'name': service_uuid,
- 'interfaces': interfaces,
- 'encapsulation': encapsulation
- })
+
+ for vpls in onos_config["apps"]["org.onosproject.vpls"]["vpls"][
+ "vplsList"
+ ]:
+ if vpls["name"] == service_uuid:
+ raise SdnConnectorError(
+ "Network {} already exists.".format(service_uuid)
+ )
+
+ onos_config["apps"]["org.onosproject.vpls"]["vpls"]["vplsList"].append(
+ {
+ "name": service_uuid,
+ "interfaces": interfaces,
+ "encapsulation": encapsulation,
+ }
+ )
self._pop_last_update_time(onos_config)
else:
- onos_config['apps'] = {
- 'org.onosproject.vpls': {
- 'vpls': {
+ onos_config["apps"] = {
+ "org.onosproject.vpls": {
+ "vpls": {
"vplsList": [
{
- 'name': service_uuid,
- 'interfaces': interfaces,
- 'encapsulation': encapsulation
+ "name": service_uuid,
+ "interfaces": interfaces,
+ "encapsulation": encapsulation,
}
]
}
# self.logger.debug("original config: %s", onos_config)
self._post_onos_netconfig(onos_config)
- self.logger.debug("created connectivity_service, service_uuid: {}, created_items: {}".
- format(service_uuid, created_items))
+ self.logger.debug(
+ "created connectivity_service, service_uuid: {}, created_items: {}".format(
+ service_uuid, created_items
+ )
+ )
+
return service_uuid, {"interfaces": created_items}
except Exception as e:
- self.logger.error('Exception add connection_service: %s', e)
+ self.logger.error("Exception add connection_service: %s", e)
+
# try to rollback push original config
try:
self._post_onos_netconfig(onos_config_orig)
except Exception as e:
- self.logger.error('Exception rolling back to original config: %s', e)
+ self.logger.error("Exception rolling back to original config: %s", e)
+
# raise exception
if isinstance(e, SdnConnectorError):
raise
else:
- raise SdnConnectorError("Exception create_connectivity_service: {}".format(e))
+ raise SdnConnectorError(
+ "Exception create_connectivity_service: {}".format(e)
+ )
def _get_encapsulation(self, connection_points):
"""
if connection_point.get("service_endpoint_encapsulation_type") == "dot1q":
encapsulation = "VLAN"
break
+
return encapsulation
- def edit_connectivity_service(self, service_uuid, conn_info=None, connection_points=None, **kwargs):
- self.logger.debug("edit connectivity service, service_uuid: {}, conn_info: {}, "
- "connection points: {} ".format(service_uuid, conn_info, connection_points))
+ def edit_connectivity_service(
+ self, service_uuid, conn_info=None, connection_points=None, **kwargs
+ ):
+ self.logger.debug(
+ "edit connectivity service, service_uuid: {}, conn_info: {}, "
+ "connection points: {} ".format(service_uuid, conn_info, connection_points)
+ )
conn_info = conn_info or {}
created_ifs = conn_info.get("interfaces", [])
onos_config = copy.deepcopy(onos_config_orig)
# get current service data and check if it does not exists
- for vpls in onos_config.get('apps', {}).get('org.onosproject.vpls', {}).get('vpls', {}).get('vplsList', {}):
- if vpls['name'] == service_uuid:
+ for vpls in (
+ onos_config.get("apps", {})
+ .get("org.onosproject.vpls", {})
+ .get("vpls", {})
+ .get("vplsList", {})
+ ):
+ if vpls["name"] == service_uuid:
self.logger.debug("service exists")
curr_interfaces = vpls.get("interfaces", [])
curr_encapsulation = vpls.get("encapsulation")
break
else:
- raise SdnConnectorError("service uuid: {} does not exist".format(service_uuid))
-
+ raise SdnConnectorError(
+ "service uuid: {} does not exist".format(service_uuid)
+ )
+
self.logger.debug("current interfaces: {}".format(curr_interfaces))
self.logger.debug("current encapsulation: {}".format(curr_encapsulation))
# new interfaces names
- new_interfaces = [port['service_endpoint_id'] for port in connection_points]
+ new_interfaces = [port["service_endpoint_id"] for port in connection_points]
# obtain interfaces to delete, list will contain port
ifs_delete = list(set(curr_interfaces) - set(new_interfaces))
# in that case delete it and add it again
ifs_remain = list(set(new_interfaces) & set(curr_interfaces))
for port in connection_points:
- if port['service_endpoint_id'] in ifs_remain:
+ if port["service_endpoint_id"] in ifs_remain:
# check if there are some changes
- curr_port_name, curr_vlan = self._get_current_port_data(onos_config, port['service_endpoint_id'])
- new_port_name = 'of:{}/{}'.format(port['service_endpoint_encapsulation_info']['switch_dpid'],
- port['service_endpoint_encapsulation_info']['switch_port'])
- new_vlan = port['service_endpoint_encapsulation_info']['vlan']
- if (curr_port_name != new_port_name or curr_vlan != new_vlan):
- self.logger.debug("TODO: must update data interface: {}".format(port['service_endpoint_id']))
- ifs_delete.append(port['service_endpoint_id'])
- ifs_add.append(port['service_endpoint_id'])
+ curr_port_name, curr_vlan = self._get_current_port_data(
+ onos_config, port["service_endpoint_id"]
+ )
+ new_port_name = "of:{}/{}".format(
+ port["service_endpoint_encapsulation_info"]["switch_dpid"],
+ port["service_endpoint_encapsulation_info"]["switch_port"],
+ )
+ new_vlan = port["service_endpoint_encapsulation_info"]["vlan"]
+
+ if curr_port_name != new_port_name or curr_vlan != new_vlan:
+ self.logger.debug(
+ "TODO: must update data interface: {}".format(
+ port["service_endpoint_id"]
+ )
+ )
+ ifs_delete.append(port["service_endpoint_id"])
+ ifs_add.append(port["service_endpoint_id"])
new_encapsulation = self._get_encapsulation(connection_points)
# Delete interfaces, only will delete interfaces that are in provided conn_info
# because these are the ones that have been created for this service
if ifs_delete:
- for port in onos_config['ports'].values():
- for port_interface in port['interfaces']:
- interface_name = port_interface['name']
- self.logger.debug("interface name: {}".format(port_interface['name']))
- if interface_name in ifs_delete and interface_name in created_ifs:
- self.logger.debug("delete interface name: {}".format(interface_name))
- port['interfaces'].remove(port_interface)
+ for port in onos_config["ports"].values():
+ for port_interface in port["interfaces"]:
+ interface_name = port_interface["name"]
+ self.logger.debug(
+ "interface name: {}".format(port_interface["name"])
+ )
+
+ if (
+ interface_name in ifs_delete
+ and interface_name in created_ifs
+ ):
+ self.logger.debug(
+ "delete interface name: {}".format(interface_name)
+ )
+ port["interfaces"].remove(port_interface)
created_ifs.remove(interface_name)
# Add new interfaces
for port in connection_points:
- if port['service_endpoint_id'] in ifs_add:
+ if port["service_endpoint_id"] in ifs_add:
created_ifz = self._append_port_to_onos_config(port, onos_config)
if created_ifz:
created_ifs.append(created_ifz[1])
+
self._pop_last_update_time(onos_config)
self._post_onos_netconfig(onos_config)
- self.logger.debug("onos config after updating interfaces: {}".format(onos_config))
- self.logger.debug("created_ifs after updating interfaces: {}".format(created_ifs))
+ self.logger.debug(
+ "onos config after updating interfaces: {}".format(onos_config)
+ )
+ self.logger.debug(
+ "created_ifs after updating interfaces: {}".format(created_ifs)
+ )
# Update interfaces list in vpls service
- for vpls in onos_config.get('apps', {}).get('org.onosproject.vpls', {}).get('vpls', {}).get('vplsList', {}):
- if vpls['name'] == service_uuid:
- vpls['interfaces'] = new_interfaces
- vpls['encapsulation'] = new_encapsulation
+ for vpls in (
+ onos_config.get("apps", {})
+ .get("org.onosproject.vpls", {})
+ .get("vpls", {})
+ .get("vplsList", {})
+ ):
+ if vpls["name"] == service_uuid:
+ vpls["interfaces"] = new_interfaces
+ vpls["encapsulation"] = new_encapsulation
self._pop_last_update_time(onos_config)
self._post_onos_netconfig(onos_config)
+
return {"interfaces": created_ifs}
except Exception as e:
- self.logger.error('Exception add connection_service: %s', e)
+ self.logger.error("Exception add connection_service: %s", e)
# try to rollback push original config
try:
self._post_onos_netconfig(onos_config_orig)
except Exception as e2:
- self.logger.error('Exception rolling back to original config: %s', e2)
+ self.logger.error("Exception rolling back to original config: %s", e2)
# raise exception
if isinstance(e, SdnConnectorError):
raise
else:
- raise SdnConnectorError("Exception create_connectivity_service: {}".format(e))
+ raise SdnConnectorError(
+ "Exception create_connectivity_service: {}".format(e)
+ )
def delete_connectivity_service(self, service_uuid, conn_info=None):
self.logger.debug("delete_connectivity_service uuid: {}".format(service_uuid))
try:
# Removes ports used by network from onos config
- for vpls in onos_config.get('apps', {}).get('org.onosproject.vpls', {}).get('vpls', {}).get('vplsList', {}):
- if vpls['name'] == service_uuid:
+ for vpls in (
+ onos_config.get("apps", {})
+ .get("org.onosproject.vpls", {})
+ .get("vpls", {})
+ .get("vplsList", {})
+ ):
+ if vpls["name"] == service_uuid:
# iterate interfaces to check if must delete them
- for interface in vpls['interfaces']:
- for port in onos_config['ports'].values():
- for port_interface in port['interfaces']:
- if port_interface['name'] == interface:
+ for interface in vpls["interfaces"]:
+ for port in onos_config["ports"].values():
+ for port_interface in port["interfaces"]:
+ if port_interface["name"] == interface:
# Delete only created ifzs
- if port_interface['name'] in created_ifs:
- self.logger.debug("Delete ifz: {}".format(port_interface['name']))
- port['interfaces'].remove(port_interface)
- onos_config['apps']['org.onosproject.vpls']['vpls']['vplsList'].remove(vpls)
+ if port_interface["name"] in created_ifs:
+ self.logger.debug(
+ "Delete ifz: {}".format(
+ port_interface["name"]
+ )
+ )
+ port["interfaces"].remove(port_interface)
+ onos_config["apps"]["org.onosproject.vpls"]["vpls"][
+ "vplsList"
+ ].remove(vpls)
break
else:
- raise SdnConnectorError("service uuid: {} does not exist".format(service_uuid))
+ raise SdnConnectorError(
+ "service uuid: {} does not exist".format(service_uuid)
+ )
self._pop_last_update_time(onos_config)
self._post_onos_netconfig(onos_config)
- self.logger.debug("deleted connectivity service uuid: {}".format(service_uuid))
+ self.logger.debug(
+ "deleted connectivity service uuid: {}".format(service_uuid)
+ )
except SdnConnectorError:
raise
except Exception as e:
- self.logger.error('Exception delete connection_service: %s', e, exc_info=True)
- raise SdnConnectorError("Exception delete connectivity service: {}".format(str(e)))
+ self.logger.error(
+ "Exception delete connection_service: %s", e, exc_info=True
+ )
+
+ raise SdnConnectorError(
+ "Exception delete connectivity service: {}".format(str(e))
+ )
def _pop_last_update_time(self, onos_config):
"""
Needed before post when there are already configured vpls services to apply changes
"""
- onos_config['apps']['org.onosproject.vpls']['vpls'].pop('lastUpdateTime', None)
+ onos_config["apps"]["org.onosproject.vpls"]["vpls"].pop("lastUpdateTime", None)
def _get_current_port_data(self, onos_config, interface_name):
- for port_name, port in onos_config['ports'].items():
- for port_interface in port['interfaces']:
- if port_interface['name'] == interface_name:
- return port_name, port_interface['vlan']
+ for port_name, port in onos_config["ports"].items():
+ for port_interface in port["interfaces"]:
+ if port_interface["name"] == interface_name:
+ return port_name, port_interface["vlan"]
def _append_port_to_onos_config(self, port, onos_config):
created_item = None
- port_name = 'of:{}/{}'.format(port['service_endpoint_encapsulation_info']['switch_dpid'],
- port['service_endpoint_encapsulation_info']['switch_port'])
- interface_config = {'name': port['service_endpoint_id']}
- if 'vlan' in port['service_endpoint_encapsulation_info'] \
- and port['service_endpoint_encapsulation_info']['vlan']:
- interface_config['vlan'] = port['service_endpoint_encapsulation_info']['vlan']
- if port_name in onos_config['ports'] and 'interfaces' in onos_config['ports'][port_name]:
- for interface in onos_config['ports'][port_name]['interfaces']:
- if interface['name'] == port['service_endpoint_id']:
+ port_name = "of:{}/{}".format(
+ port["service_endpoint_encapsulation_info"]["switch_dpid"],
+ port["service_endpoint_encapsulation_info"]["switch_port"],
+ )
+ interface_config = {"name": port["service_endpoint_id"]}
+
+ if (
+ "vlan" in port["service_endpoint_encapsulation_info"]
+ and port["service_endpoint_encapsulation_info"]["vlan"]
+ ):
+ interface_config["vlan"] = port["service_endpoint_encapsulation_info"][
+ "vlan"
+ ]
+
+ if (
+ port_name in onos_config["ports"]
+ and "interfaces" in onos_config["ports"][port_name]
+ ):
+ for interface in onos_config["ports"][port_name]["interfaces"]:
+ if interface["name"] == port["service_endpoint_id"]:
# self.logger.debug("interface with same name and port exits")
# interface already exists TODO ¿check vlan? ¿delete and recreate?
# by the moment use and do not touch
break
else:
# self.logger.debug("port with same name exits but not interface")
- onos_config['ports'][port_name]['interfaces'].append(interface_config)
- created_item = (port_name, port['service_endpoint_id'])
+ onos_config["ports"][port_name]["interfaces"].append(interface_config)
+ created_item = (port_name, port["service_endpoint_id"])
else:
# self.logger.debug("create port and interface")
- onos_config['ports'][port_name] = {
- 'interfaces': [interface_config]
- }
- created_item = (port_name, port['service_endpoint_id'])
+ onos_config["ports"][port_name] = {"interfaces": [interface_config]}
+ created_item = (port_name, port["service_endpoint_id"])
+
return created_item
-if __name__ == '__main__':
- logger = logging.getLogger('ro.sdn.onos_vpls')
+if __name__ == "__main__":
+ logger = logging.getLogger("ro.sdn.onos_vpls")
logging.basicConfig()
logger.setLevel(getattr(logging, "DEBUG"))
# wim_url = "http://10.95.172.251:8181"
wim_url = "http://192.168.56.106:8181"
user = "karaf"
password = "karaf"
- wim = {'wim_url': wim_url}
- wim_account = {'user': user, 'password': password}
+ wim = {"wim_url": wim_url}
+ wim_account = {"user": user, "password": password}
onos_vpls = OnosVpls(wim=wim, wim_account=wim_account, logger=logger)
# conn_service = onos_vpls.get_connectivity_service_status("4e1f4c8a-a874-425d-a9b5-955cb77178f8")
# print(conn_service)
- service_type = 'ELAN'
+ service_type = "ELAN"
conn_point_0 = {
"service_endpoint_id": "switch1:ifz1",
"service_endpoint_encapsulation_type": "dot1q",
"service_endpoint_encapsulation_info": {
"switch_dpid": "0000000000000011",
"switch_port": "1",
- "vlan": "600"
- }
+ "vlan": "600",
+ },
}
conn_point_1 = {
"service_endpoint_id": "switch3:ifz1",
"service_endpoint_encapsulation_info": {
"switch_dpid": "0000000000000031",
"switch_port": "3",
- "vlan": "600"
- }
+ "vlan": "600",
+ },
}
connection_points = [conn_point_0, conn_point_1]
# service_uuid, conn_info = onos_vpls.create_connectivity_service(service_type, connection_points)
# print(conn_info)
# conn_info = None
- conn_info = {"interfaces": ['switch1:ifz1', 'switch3:ifz1']}
+ conn_info = {"interfaces": ["switch1:ifz1", "switch3:ifz1"]}
# onos_vpls.delete_connectivity_service("70248a41-11cb-44f3-9039-c41387394a30", conn_info)
conn_point_0 = {
"service_endpoint_encapsulation_info": {
"switch_dpid": "0000000000000011",
"switch_port": "1",
- "vlan": "500"
- }
+ "vlan": "500",
+ },
}
conn_point_2 = {
"service_endpoint_id": "switch1:ifz3",
"service_endpoint_encapsulation_info": {
"switch_dpid": "0000000000000011",
"switch_port": "3",
- "vlan": "500"
- }
+ "vlan": "500",
+ },
}
conn_point_3 = {
"service_endpoint_id": "switch2:ifz2",
"service_endpoint_encapsulation_info": {
"switch_dpid": "0000000000000022",
"switch_port": "2",
- "vlan": "500"
- }
+ "vlan": "500",
+ },
}
connection_points_2 = [conn_point_0, conn_point_3]
# conn_info = onos_vpls.edit_connectivity_service("c65d88be-73aa-4933-927d-57ec6bee6b41",
# conn_info, connection_points_2)
# print(conn_info)
- service_status = onos_vpls.get_connectivity_service_status("c65d88be-73aa-4933-927d-57ec6bee6b41", conn_info)
+ service_status = onos_vpls.get_connectivity_service_status(
+ "c65d88be-73aa-4933-927d-57ec6bee6b41", conn_info
+ )
print("service status")
print(service_status)
setup(
name=_name,
- description='OSM ro sdn plugin for ONOS VPLS',
+ description="OSM ro sdn plugin for ONOS VPLS",
long_description=README,
- version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ version_command=(
+ "git describe --match v* --tags --long --dirty",
+ "pep440-git-full",
+ ),
# version=VERSION,
# python_requires='>3.5.0',
- author='ETSI OSM',
- author_email='OSM_TECH@list.etsi.org',
- maintainer='ETSI OSM',
- maintainer_email='OSM_TECH@list.etsi.org',
- url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
- license='Apache 2.0',
-
+ author="ETSI OSM",
+ author_email="OSM_TECH@list.etsi.org",
+ maintainer="ETSI OSM",
+ maintainer_email="OSM_TECH@list.etsi.org",
+ url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary",
+ license="Apache 2.0",
packages=[_name],
include_package_data=True,
install_requires=[
"requests",
- "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin"
+ "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin",
],
- setup_requires=['setuptools-version-command'],
+ setup_requires=["setuptools-version-command"],
entry_points={
- 'osm_rosdn.plugins': ['rosdn_onos_vpls = osm_rosdn_onos_vpls.sdn_assist_onos_vpls:OnosVpls'],
+ "osm_rosdn.plugins": [
+ "rosdn_onos_vpls = osm_rosdn_onos_vpls.sdn_assist_onos_vpls:OnosVpls"
+ ],
},
)
basepython = python3
deps = flake8
commands = flake8 osm_rosdn_onos_vpls --max-line-length 120 \
- --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241
[testenv:unittest]
basepython = python3
# contact with: saboor.ahmad@xflowresearch.com
##
-'''
+"""
AWS-connector implements all the methods to interact with AWS using the BOTO client
-'''
+"""
__author__ = "Saboor Ahmad"
__date__ = "10-Apr-2017"
class vimconnector(vimconn.VimConnector):
- def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None,
- config={}, persistent_info={}):
- """ Params: uuid - id asigned to this VIM
- name - name assigned to this VIM, can be used for logging
- tenant_id - ID to be used for tenant
- tenant_name - name of tenant to be used VIM tenant to be used
- url_admin - optional, url used for administrative tasks
- user - credentials of the VIM user
- passwd - credentials of the VIM user
- log_level - if must use a different log_level than the general one
- config - dictionary with misc VIM information
- region_name - name of region to deploy the instances
- vpc_cidr_block - default CIDR block for VPC
- security_groups - default security group to specify this instance
- persistent_info - dict where the class can store information that will be available among class
- destroy/creation cycles. This info is unique per VIM/credential. At first call it will contain an
- empty dict. Useful to store login/tokens information for speed up communication
+ def __init__(
+ self,
+ uuid,
+ name,
+ tenant_id,
+ tenant_name,
+ url,
+ url_admin=None,
+ user=None,
+ passwd=None,
+ log_level=None,
+ config={},
+ persistent_info={},
+ ):
+ """Params:
+ uuid - id asigned to this VIM
+ name - name assigned to this VIM, can be used for logging
+ tenant_id - ID to be used for tenant
+ tenant_name - name of tenant to be used VIM tenant to be used
+ url_admin - optional, url used for administrative tasks
+ user - credentials of the VIM user
+ passwd - credentials of the VIM user
+ log_level - if must use a different log_level than the general one
+ config - dictionary with misc VIM information
+ region_name - name of region to deploy the instances
+ vpc_cidr_block - default CIDR block for VPC
+ security_groups - default security group to specify this instance
+ persistent_info - dict where the class can store information that will be available among class
+ destroy/creation cycles. This info is unique per VIM/credential. At first call it will contain an
+ empty dict. Useful to store login/tokens information for speed up communication
"""
-
- vimconn.VimConnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
- config, persistent_info)
+ vimconn.VimConnector.__init__(
+ self,
+ uuid,
+ name,
+ tenant_id,
+ tenant_name,
+ url,
+ url_admin,
+ user,
+ passwd,
+ log_level,
+ config,
+ persistent_info,
+ )
self.persistent_info = persistent_info
self.a_creds = {}
+
if user:
- self.a_creds['aws_access_key_id'] = user
+ self.a_creds["aws_access_key_id"] = user
else:
raise vimconn.VimConnAuthException("Username is not specified")
+
if passwd:
- self.a_creds['aws_secret_access_key'] = passwd
+ self.a_creds["aws_secret_access_key"] = passwd
else:
raise vimconn.VimConnAuthException("Password is not specified")
- if 'region_name' in config:
- self.region = config.get('region_name')
+
+ if "region_name" in config:
+ self.region = config.get("region_name")
else:
raise vimconn.VimConnException("AWS region_name is not specified at config")
self.conn_vpc = None
self.account_id = None
- self.vpc_id = self.get_tenant_list()[0]['id']
+ self.vpc_id = self.get_tenant_list()[0]["id"]
# we take VPC CIDR block if specified, otherwise we use the default CIDR
# block suggested by AWS while creating instance
- self.vpc_cidr_block = '10.0.0.0/24'
+ self.vpc_cidr_block = "10.0.0.0/24"
if tenant_id:
self.vpc_id = tenant_id
- if 'vpc_cidr_block' in config:
- self.vpc_cidr_block = config['vpc_cidr_block']
+
+ if "vpc_cidr_block" in config:
+ self.vpc_cidr_block = config["vpc_cidr_block"]
self.security_groups = None
- if 'security_groups' in config:
- self.security_groups = config['security_groups']
+ if "security_groups" in config:
+ self.security_groups = config["security_groups"]
self.key_pair = None
- if 'key_pair' in config:
- self.key_pair = config['key_pair']
+ if "key_pair" in config:
+ self.key_pair = config["key_pair"]
self.flavor_info = None
- if 'flavor_info' in config:
- flavor_data = config.get('flavor_info')
+ if "flavor_info" in config:
+ flavor_data = config.get("flavor_info")
if isinstance(flavor_data, str):
try:
if flavor_data[0] == "@": # read from a file
- with open(flavor_data[1:], 'r') as stream:
+ with open(flavor_data[1:], "r") as stream:
self.flavor_info = yaml.load(stream, Loader=yaml.Loader)
else:
self.flavor_info = yaml.load(flavor_data, Loader=yaml.Loader)
except yaml.YAMLError as e:
self.flavor_info = None
- raise vimconn.VimConnException("Bad format at file '{}': {}".format(flavor_data[1:], e))
+
+ raise vimconn.VimConnException(
+ "Bad format at file '{}': {}".format(flavor_data[1:], e)
+ )
except IOError as e:
- raise vimconn.VimConnException("Error reading file '{}': {}".format(flavor_data[1:], e))
+ raise vimconn.VimConnException(
+ "Error reading file '{}': {}".format(flavor_data[1:], e)
+ )
elif isinstance(flavor_data, dict):
self.flavor_info = flavor_data
- self.logger = logging.getLogger('ro.vim.aws')
+ self.logger = logging.getLogger("ro.vim.aws")
+
if log_level:
self.logger.setLevel(getattr(logging, log_level))
def __setitem__(self, index, value):
- """Params: index - name of value of set
- value - value to set
+ """Params:
+ index - name of value of set
+ value - value to set
"""
- if index == 'user':
- self.a_creds['aws_access_key_id'] = value
- elif index == 'passwd':
- self.a_creds['aws_secret_access_key'] = value
- elif index == 'region':
+ if index == "user":
+ self.a_creds["aws_access_key_id"] = value
+ elif index == "passwd":
+ self.a_creds["aws_secret_access_key"] = value
+ elif index == "region":
self.region = value
else:
vimconn.VimConnector.__setitem__(self, index, value)
def _reload_connection(self):
- """Returns: sets boto.EC2 and boto.VPC connection to work with AWS services
- """
-
+ """Returns: sets boto.EC2 and boto.VPC connection to work with AWS services"""
try:
- self.conn = boto.ec2.connect_to_region(self.region, aws_access_key_id=self.a_creds['aws_access_key_id'],
- aws_secret_access_key=self.a_creds['aws_secret_access_key'])
- self.conn_vpc = boto.vpc.connect_to_region(self.region, aws_access_key_id=self.a_creds['aws_access_key_id'],
- aws_secret_access_key=self.a_creds['aws_secret_access_key'])
+ self.conn = boto.ec2.connect_to_region(
+ self.region,
+ aws_access_key_id=self.a_creds["aws_access_key_id"],
+ aws_secret_access_key=self.a_creds["aws_secret_access_key"],
+ )
+ self.conn_vpc = boto.vpc.connect_to_region(
+ self.region,
+ aws_access_key_id=self.a_creds["aws_access_key_id"],
+ aws_secret_access_key=self.a_creds["aws_secret_access_key"],
+ )
# client = boto3.client("sts", aws_access_key_id=self.a_creds['aws_access_key_id'],
# aws_secret_access_key=self.a_creds['aws_secret_access_key'])
# self.account_id = client.get_caller_identity()["Account"]
"""Params: an Exception object
Returns: Raises the exception 'e' passed in mehtod parameters
"""
-
self.conn = None
self.conn_vpc = None
+
raise vimconn.VimConnConnectionException(type(e).__name__ + ": " + str(e))
def get_availability_zones_list(self):
- """Obtain AvailabilityZones from AWS
- """
-
+ """Obtain AvailabilityZones from AWS"""
try:
self._reload_connection()
az_list = []
+
for az in self.conn.get_all_zones():
az_list.append(az.name)
+
return az_list
except Exception as e:
self.format_vimconn_exception(e)
Returns the tenant list of dictionaries, and empty list if no tenant match all the filers:
[{'name':'<name>, 'id':'<id>, ...}, ...]
"""
-
try:
self._reload_connection()
vpc_ids = []
tfilters = {}
+
if filter_dict != {}:
- if 'id' in filter_dict:
- vpc_ids.append(filter_dict['id'])
- tfilters['name'] = filter_dict['id']
+ if "id" in filter_dict:
+ vpc_ids.append(filter_dict["id"])
+ tfilters["name"] = filter_dict["id"]
+
tenants = self.conn_vpc.get_all_vpcs(vpc_ids, tfilters)
tenant_list = []
+
for tenant in tenants:
- tenant_list.append({'id': str(tenant.id), 'name': str(tenant.id), 'status': str(tenant.state),
- 'cidr_block': str(tenant.cidr_block)})
+ tenant_list.append(
+ {
+ "id": str(tenant.id),
+ "name": str(tenant.id),
+ "status": str(tenant.state),
+ "cidr_block": str(tenant.cidr_block),
+ }
+ )
+
return tenant_list
except Exception as e:
self.format_vimconn_exception(e)
"tenant_description": string max length 256
returns the tenant identifier or raise exception
"""
-
self.logger.debug("Adding a new VPC")
+
try:
self._reload_connection()
vpc = self.conn_vpc.create_vpc(self.vpc_cidr_block)
gateway = self.conn_vpc.create_internet_gateway()
self.conn_vpc.attach_internet_gateway(gateway.id, vpc.id)
route_table = self.conn_vpc.create_route_table(vpc.id)
- self.conn_vpc.create_route(route_table.id, '0.0.0.0/0', gateway.id)
+ self.conn_vpc.create_route(route_table.id, "0.0.0.0/0", gateway.id)
+
+ self.vpc_data[vpc.id] = {
+ "gateway": gateway.id,
+ "route_table": route_table.id,
+ "subnets": self.subnet_sizes(
+ len(self.get_availability_zones_list()), self.vpc_cidr_block
+ ),
+ }
- self.vpc_data[vpc.id] = {'gateway': gateway.id, 'route_table': route_table.id,
- 'subnets': self.subnet_sizes(len(self.get_availability_zones_list()),
- self.vpc_cidr_block)}
return vpc.id
except Exception as e:
self.format_vimconn_exception(e)
tenant_id: returned VIM tenant_id on "new_tenant"
Returns None on success. Raises and exception of failure. If tenant is not found raises vimconnNotFoundException
"""
-
self.logger.debug("Deleting specified VPC")
+
try:
self._reload_connection()
vpc = self.vpc_data.get(tenant_id)
- if 'gateway' in vpc and 'route_table' in vpc:
- gateway_id, route_table_id = vpc['gateway'], vpc['route_table']
+
+ if "gateway" in vpc and "route_table" in vpc:
+ gateway_id, route_table_id = vpc["gateway"], vpc["route_table"]
self.conn_vpc.detach_internet_gateway(gateway_id, tenant_id)
self.conn_vpc.delete_vpc(tenant_id)
- self.conn_vpc.delete_route(route_table_id, '0.0.0.0/0')
+ self.conn_vpc.delete_route(route_table_id, "0.0.0.0/0")
else:
self.conn_vpc.delete_vpc(tenant_id)
except Exception as e:
self.format_vimconn_exception(e)
def subnet_sizes(self, availability_zones, cidr):
- """Calcualtes possible subnets given CIDR value of VPC
- """
-
+ """Calculates possible subnets given CIDR value of VPC"""
if availability_zones != 2 and availability_zones != 3:
self.logger.debug("Number of AZs should be 2 or 3")
+
raise vimconn.VimConnNotSupportedException("Number of AZs should be 2 or 3")
- netmasks = ('255.255.252.0', '255.255.254.0', '255.255.255.0', '255.255.255.128')
+ netmasks = (
+ "255.255.252.0",
+ "255.255.254.0",
+ "255.255.255.0",
+ "255.255.255.128",
+ )
ip = netaddr.IPNetwork(cidr)
mask = ip.netmask
if str(mask) not in netmasks:
self.logger.debug("Netmask " + str(mask) + " not found")
- raise vimconn.VimConnNotFoundException("Netmask " + str(mask) + " not found")
+
+ raise vimconn.VimConnNotFoundException(
+ "Netmask " + str(mask) + " not found"
+ )
if availability_zones == 2:
for n, netmask in enumerate(netmasks):
pub_net = list(ip.subnet(n + 24))
pri_subs = pub_net[1:]
pub_mask = pub_net[0].netmask
- pub_split = list(ip.subnet(26)) if (str(pub_mask) == '255.255.255.0') else list(ip.subnet(27))
+
+ pub_split = (
+ list(ip.subnet(26))
+ if (str(pub_mask) == "255.255.255.0")
+ else list(ip.subnet(27))
+ )
pub_subs = pub_split[:3]
subnets = pub_subs + pri_subs
return map(str, subnets)
- def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None):
+ def new_network(
+ self,
+ net_name,
+ net_type,
+ ip_profile=None,
+ shared=False,
+ provider_network_profile=None,
+ ):
"""Adds a tenant network to VIM
Params:
'net_name': name of the network
Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
as not present.
"""
-
self.logger.debug("Adding a subnet to VPC")
+
try:
created_items = {}
self._reload_connection()
subnet = None
vpc_id = self.vpc_id
+
if self.vpc_data.get(vpc_id, None):
- cidr_block = list(set(self.vpc_data[vpc_id]['subnets']) -
- set(self.get_network_details({'tenant_id': vpc_id}, detail='cidr_block')))[0]
+ cidr_block = list(
+ set(self.vpc_data[vpc_id]["subnets"])
+ - set(
+ self.get_network_details(
+ {"tenant_id": vpc_id}, detail="cidr_block"
+ )
+ )
+ )[0]
else:
- vpc = self.get_tenant_list({'id': vpc_id})[0]
- subnet_list = self.subnet_sizes(len(self.get_availability_zones_list()), vpc['cidr_block'])
- cidr_block = list(set(subnet_list) - set(self.get_network_details({'tenant_id': vpc['id']},
- detail='cidr_block')))[0]
+ vpc = self.get_tenant_list({"id": vpc_id})[0]
+ subnet_list = self.subnet_sizes(
+ len(self.get_availability_zones_list()), vpc["cidr_block"]
+ )
+ cidr_block = list(
+ set(subnet_list)
+ - set(
+ self.get_network_details(
+ {"tenant_id": vpc["id"]}, detail="cidr_block"
+ )
+ )
+ )[0]
+
subnet = self.conn_vpc.create_subnet(vpc_id, cidr_block)
+
return subnet.id, created_items
except Exception as e:
self.format_vimconn_exception(e)
def get_network_details(self, filters, detail):
- """Get specified details related to a subnet
- """
+ """Get specified details related to a subnet"""
detail_list = []
subnet_list = self.get_network_list(filters)
+
for net in subnet_list:
detail_list.append(net[detail])
+
return detail_list
def get_network_list(self, filter_dict={}):
List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity,
authorization, or some other unspecific error
"""
-
self.logger.debug("Getting all subnets from VIM")
+
try:
self._reload_connection()
tfilters = {}
+
if filter_dict != {}:
- if 'tenant_id' in filter_dict:
- tfilters['vpcId'] = filter_dict['tenant_id']
- subnets = self.conn_vpc.get_all_subnets(subnet_ids=filter_dict.get('name', None), filters=tfilters)
+ if "tenant_id" in filter_dict:
+ tfilters["vpcId"] = filter_dict["tenant_id"]
+
+ subnets = self.conn_vpc.get_all_subnets(
+ subnet_ids=filter_dict.get("name", None), filters=tfilters
+ )
net_list = []
+
for net in subnets:
net_list.append(
- {'id': str(net.id), 'name': str(net.id), 'status': str(net.state), 'vpc_id': str(net.vpc_id),
- 'cidr_block': str(net.cidr_block), 'type': 'bridge'})
+ {
+ "id": str(net.id),
+ "name": str(net.id),
+ "status": str(net.state),
+ "vpc_id": str(net.vpc_id),
+ "cidr_block": str(net.cidr_block),
+ "type": "bridge",
+ }
+ )
+
return net_list
except Exception as e:
self.format_vimconn_exception(e)
other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
Raises an exception upon error or when network is not found
"""
-
self.logger.debug("Getting Subnet from VIM")
+
try:
self._reload_connection()
subnet = self.conn_vpc.get_all_subnets(net_id)[0]
- return {'id': str(subnet.id), 'name': str(subnet.id), 'status': str(subnet.state),
- 'vpc_id': str(subnet.vpc_id), 'cidr_block': str(subnet.cidr_block)}
+
+ return {
+ "id": str(subnet.id),
+ "name": str(subnet.id),
+ "status": str(subnet.state),
+ "vpc_id": str(subnet.vpc_id),
+ "cidr_block": str(subnet.cidr_block),
+ }
except Exception as e:
self.format_vimconn_exception(e)
:param created_items: dictionary with extra items to be deleted. provided by method new_network
Returns the network identifier or raises an exception upon error or when network is not found
"""
-
self.logger.debug("Deleting subnet from VIM")
+
try:
self._reload_connection()
self.logger.debug("DELETING NET_ID: " + str(net_id))
self.conn_vpc.delete_subnet(net_id)
+
return net_id
except Exception as e:
self.format_vimconn_exception(e)
vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
'net_id2': ...
"""
-
self._reload_connection()
+
try:
dict_entry = {}
+
for net_id in net_list:
subnet_dict = {}
subnet = None
+
try:
subnet = self.conn_vpc.get_all_subnets(net_id)[0]
+
if subnet.state == "pending":
- subnet_dict['status'] = "BUILD"
+ subnet_dict["status"] = "BUILD"
elif subnet.state == "available":
- subnet_dict['status'] = 'ACTIVE'
+ subnet_dict["status"] = "ACTIVE"
else:
- subnet_dict['status'] = 'ERROR'
- subnet_dict['error_msg'] = ''
+ subnet_dict["status"] = "ERROR"
+ subnet_dict["error_msg"] = ""
except Exception:
- subnet_dict['status'] = 'DELETED'
- subnet_dict['error_msg'] = 'Network not found'
+ subnet_dict["status"] = "DELETED"
+ subnet_dict["error_msg"] = "Network not found"
finally:
try:
- subnet_dict['vim_info'] = yaml.safe_dump(subnet, default_flow_style=True, width=256)
+ subnet_dict["vim_info"] = yaml.safe_dump(
+ subnet, default_flow_style=True, width=256
+ )
except yaml.YAMLError:
- subnet_dict['vim_info'] = str(subnet)
+ subnet_dict["vim_info"] = str(subnet)
+
dict_entry[net_id] = subnet_dict
+
return dict_entry
except Exception as e:
self.format_vimconn_exception(e)
Returns the flavor dict details {'id':<>, 'name':<>, other vim specific }
Raises an exception upon error or if not found
"""
-
self.logger.debug("Getting instance type")
+
try:
if flavor_id in self.flavor_info:
return self.flavor_info[flavor_id]
else:
- raise vimconn.VimConnNotFoundException("Cannot find flavor with this flavor ID/Name")
+ raise vimconn.VimConnNotFoundException(
+ "Cannot find flavor with this flavor ID/Name"
+ )
except Exception as e:
self.format_vimconn_exception(e)
#todo: complete parameters for EPA
Returns the flavor_id or raises a vimconnNotFoundException
"""
-
self.logger.debug("Getting flavor id from data")
+
try:
flavor = None
for key, values in self.flavor_info.items():
if (values["ram"], values["cpus"], values["disk"]) == (
- flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"]):
+ flavor_dict["ram"],
+ flavor_dict["vcpus"],
+ flavor_dict["disk"],
+ ):
flavor = (key, values)
break
elif (values["ram"], values["cpus"], values["disk"]) >= (
- flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"]):
+ flavor_dict["ram"],
+ flavor_dict["vcpus"],
+ flavor_dict["disk"],
+ ):
if not flavor:
flavor = (key, values)
else:
if (flavor[1]["ram"], flavor[1]["cpus"], flavor[1]["disk"]) >= (
- values["ram"], values["cpus"], values["disk"]):
+ values["ram"],
+ values["cpus"],
+ values["disk"],
+ ):
flavor = (key, values)
+
if flavor:
return flavor[0]
- raise vimconn.VimConnNotFoundException("Cannot find flavor with this flavor ID/Name")
+
+ raise vimconn.VimConnNotFoundException(
+ "Cannot find flavor with this flavor ID/Name"
+ )
except Exception as e:
self.format_vimconn_exception(e)
def new_image(self, image_dict):
- """ Adds a tenant image to VIM
+ """Adds a tenant image to VIM
Params: image_dict
name (string) - The name of the AMI. Valid only for EBS-based images.
description (string) - The description of the AMI.
volumes behind after instance termination is not free
Returns: image_id - image ID of the newly created image
"""
-
try:
self._reload_connection()
- image_location = image_dict.get('image_location', None)
+ image_location = image_dict.get("image_location", None)
+
if image_location:
image_location = str(self.account_id) + str(image_location)
- image_id = self.conn.register_image(image_dict.get('name', None), image_dict.get('description', None),
- image_location, image_dict.get('architecture', None),
- image_dict.get('kernel_id', None),
- image_dict.get('root_device_name', None),
- image_dict.get('block_device_map', None),
- image_dict.get('virtualization_type', None),
- image_dict.get('sriov_net_support', None),
- image_dict.get('snapshot_id', None),
- image_dict.get('delete_root_volume_on_termination', None))
+ image_id = self.conn.register_image(
+ image_dict.get("name", None),
+ image_dict.get("description", None),
+ image_location,
+ image_dict.get("architecture", None),
+ image_dict.get("kernel_id", None),
+ image_dict.get("root_device_name", None),
+ image_dict.get("block_device_map", None),
+ image_dict.get("virtualization_type", None),
+ image_dict.get("sriov_net_support", None),
+ image_dict.get("snapshot_id", None),
+ image_dict.get("delete_root_volume_on_termination", None),
+ )
+
return image_id
except Exception as e:
self.format_vimconn_exception(e)
try:
self._reload_connection()
self.conn.deregister_image(image_id)
+
return image_id
except Exception as e:
self.format_vimconn_exception(e)
def get_image_id_from_path(self, path):
- '''
+ """
Params: path - location of the image
Returns: image_id - ID of the matching image
- '''
+ """
self._reload_connection()
try:
filters = {}
+
if path:
- tokens = path.split('/')
- filters['owner_id'] = tokens[0]
- filters['name'] = '/'.join(tokens[1:])
+ tokens = path.split("/")
+ filters["owner_id"] = tokens[0]
+ filters["name"] = "/".join(tokens[1:])
+
image = self.conn.get_all_images(filters=filters)[0]
+
return image.id
except Exception as e:
self.format_vimconn_exception(e)
[{<the fields at Filter_dict plus some VIM specific>}, ...]
List can be empty
"""
-
self.logger.debug("Getting image list from VIM")
+
try:
self._reload_connection()
image_id = None
filters = {}
- if 'id' in filter_dict:
- image_id = filter_dict['id']
- if 'name' in filter_dict:
- filters['name'] = filter_dict['name']
- if 'location' in filter_dict:
- filters['location'] = filter_dict['location']
+
+ if "id" in filter_dict:
+ image_id = filter_dict["id"]
+
+ if "name" in filter_dict:
+ filters["name"] = filter_dict["name"]
+
+ if "location" in filter_dict:
+ filters["location"] = filter_dict["location"]
+
# filters['image_type'] = 'machine'
# filter_dict['owner_id'] = self.account_id
images = self.conn.get_all_images(image_id, filters=filters)
image_list = []
+
for image in images:
- image_list.append({'id': str(image.id), 'name': str(image.name), 'status': str(image.state),
- 'owner': str(image.owner_id), 'location': str(image.location),
- 'is_public': str(image.is_public), 'architecture': str(image.architecture),
- 'platform': str(image.platform)})
+ image_list.append(
+ {
+ "id": str(image.id),
+ "name": str(image.name),
+ "status": str(image.state),
+ "owner": str(image.owner_id),
+ "location": str(image.location),
+ "is_public": str(image.is_public),
+ "architecture": str(image.architecture),
+ "platform": str(image.platform),
+ }
+ )
+
return image_list
except Exception as e:
self.format_vimconn_exception(e)
- def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None,
- disk_list=None, availability_zone_index=None, availability_zone_list=None):
+ def new_vminstance(
+ self,
+ name,
+ description,
+ start,
+ image_id,
+ flavor_id,
+ net_list,
+ cloud_config=None,
+ disk_list=None,
+ availability_zone_index=None,
+ availability_zone_list=None,
+ ):
"""Create a new VM/instance in AWS
Params: name
decription
Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
as not present.
"""
-
self.logger.debug("Creating a new VM instance")
+
try:
self._reload_connection()
instance = None
key_name=self.key_pair,
instance_type=flavor_id,
security_groups=self.security_groups,
- user_data=userdata
+ user_data=userdata,
)
else:
for index, subnet in enumerate(net_list):
- net_intr = boto.ec2.networkinterface.NetworkInterfaceSpecification(subnet_id=subnet.get('net_id'),
- groups=None,
- associate_public_ip_address=True)
+ net_intr = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+ subnet_id=subnet.get("net_id"),
+ groups=None,
+ associate_public_ip_address=True,
+ )
- if subnet.get('elastic_ip'):
+ if subnet.get("elastic_ip"):
eip = self.conn.allocate_address()
- self.conn.associate_address(allocation_id=eip.allocation_id, network_interface_id=net_intr.id)
+ self.conn.associate_address(
+ allocation_id=eip.allocation_id,
+ network_interface_id=net_intr.id,
+ )
if index == 0:
reservation = self.conn.run_instances(
key_name=self.key_pair,
instance_type=flavor_id,
security_groups=self.security_groups,
- network_interfaces=boto.ec2.networkinterface.NetworkInterfaceCollection(net_intr),
- user_data=userdata
+ network_interfaces=boto.ec2.networkinterface.NetworkInterfaceCollection(
+ net_intr
+ ),
+ user_data=userdata,
)
else:
while True:
try:
self.conn.attach_network_interface(
- network_interface_id=boto.ec2.networkinterface.NetworkInterfaceCollection(net_intr),
- instance_id=instance.id, device_index=0)
+ network_interface_id=boto.ec2.networkinterface.NetworkInterfaceCollection(
+ net_intr
+ ),
+ instance_id=instance.id,
+ device_index=0,
+ )
break
except Exception:
time.sleep(10)
- net_list[index]['vim_id'] = reservation.instances[0].interfaces[index].id
+
+ net_list[index]["vim_id"] = (
+ reservation.instances[0].interfaces[index].id
+ )
instance = reservation.instances[0]
+
return instance.id, None
except Exception as e:
self.format_vimconn_exception(e)
def get_vminstance(self, vm_id):
"""Returns the VM instance information from VIM"""
-
try:
self._reload_connection()
reservation = self.conn.get_all_instances(vm_id)
+
return reservation[0].instances[0].__dict__
except Exception as e:
self.format_vimconn_exception(e)
def delete_vminstance(self, vm_id, created_items=None):
"""Removes a VM instance from VIM
Returns the instance identifier"""
-
try:
self._reload_connection()
self.logger.debug("DELETING VM_ID: " + str(vm_id))
self.conn.terminate_instances(vm_id)
+
return vm_id
except Exception as e:
self.format_vimconn_exception(e)
def refresh_vms_status(self, vm_list):
- """ Get the status of the virtual machines and their interfaces/ports
+ """Get the status of the virtual machines and their interfaces/ports
Params: the list of VM identifiers
Returns a dictionary with:
vm_id: #VIM id of this Virtual Machine
ip_address - The IP address of the interface within the subnet.
"""
self.logger.debug("Getting VM instance information from VIM")
+
try:
self._reload_connection()
reservation = self.conn.get_all_instances(vm_list)[0]
instances = {}
instance_dict = {}
+
for instance in reservation.instances:
try:
if instance.state in ("pending"):
- instance_dict['status'] = "BUILD"
+ instance_dict["status"] = "BUILD"
elif instance.state in ("available", "running", "up"):
- instance_dict['status'] = 'ACTIVE'
+ instance_dict["status"] = "ACTIVE"
else:
- instance_dict['status'] = 'ERROR'
- instance_dict['error_msg'] = ""
- instance_dict['interfaces'] = []
+ instance_dict["status"] = "ERROR"
+
+ instance_dict["error_msg"] = ""
+ instance_dict["interfaces"] = []
interface_dict = {}
+
for interface in instance.interfaces:
- interface_dict['vim_interface_id'] = interface.id
- interface_dict['vim_net_id'] = interface.subnet_id
- interface_dict['mac_address'] = interface.mac_address
- if hasattr(interface, 'publicIp') and interface.publicIp is not None:
- interface_dict['ip_address'] = interface.publicIp + ";" + interface.private_ip_address
+ interface_dict["vim_interface_id"] = interface.id
+ interface_dict["vim_net_id"] = interface.subnet_id
+ interface_dict["mac_address"] = interface.mac_address
+
+ if (
+ hasattr(interface, "publicIp")
+ and interface.publicIp is not None
+ ):
+ interface_dict["ip_address"] = (
+ interface.publicIp + ";" + interface.private_ip_address
+ )
else:
- interface_dict['ip_address'] = interface.private_ip_address
- instance_dict['interfaces'].append(interface_dict)
+ interface_dict["ip_address"] = interface.private_ip_address
+
+ instance_dict["interfaces"].append(interface_dict)
except Exception as e:
- self.logger.error("Exception getting vm status: %s", str(e), exc_info=True)
- instance_dict['status'] = "DELETED"
- instance_dict['error_msg'] = str(e)
+ self.logger.error(
+ "Exception getting vm status: %s", str(e), exc_info=True
+ )
+ instance_dict["status"] = "DELETED"
+ instance_dict["error_msg"] = str(e)
finally:
try:
- instance_dict['vim_info'] = yaml.safe_dump(instance, default_flow_style=True, width=256)
+ instance_dict["vim_info"] = yaml.safe_dump(
+ instance, default_flow_style=True, width=256
+ )
except yaml.YAMLError:
# self.logger.error("Exception getting vm status: %s", str(e), exc_info=True)
- instance_dict['vim_info'] = str(instance)
+ instance_dict["vim_info"] = str(instance)
+
instances[instance.id] = instance_dict
+
return instances
except Exception as e:
self.logger.error("Exception getting vm status: %s", str(e), exc_info=True)
self.conn.terminate_instances(vm_id)
elif "reboot" in action_dict:
self.conn.reboot_instances(vm_id)
+
return None
except Exception as e:
self.format_vimconn_exception(e)
setup(
name=_name,
- description='OSM ro vim plugin for aws',
+ description="OSM ro vim plugin for aws",
long_description=README,
- version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ version_command=(
+ "git describe --match v* --tags --long --dirty",
+ "pep440-git-full",
+ ),
# version=VERSION,
# python_requires='>3.5.0',
- author='ETSI OSM',
- author_email='OSM_TECH@LIST.ETSI.ORG',
- maintainer='ETSI OSM',
- maintainer_email='OSM_TECH@LIST.ETSI.ORG',
- url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
- license='Apache 2.0',
-
+ author="ETSI OSM",
+ author_email="OSM_TECH@LIST.ETSI.ORG",
+ maintainer="ETSI OSM",
+ maintainer_email="OSM_TECH@LIST.ETSI.ORG",
+ url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary",
+ license="Apache 2.0",
packages=[_name],
include_package_data=True,
install_requires=[
- "requests", "netaddr", "PyYAML", "boto",
- "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin"
+ "requests",
+ "netaddr",
+ "PyYAML",
+ "boto",
+ "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin",
],
- setup_requires=['setuptools-version-command'],
+ setup_requires=["setuptools-version-command"],
entry_points={
- 'osm_rovim.plugins': ['rovim_aws = osm_rovim_aws.vimconn_aws:vimconnector'],
+ "osm_rovim.plugins": ["rovim_aws = osm_rovim_aws.vimconn_aws:vimconnector"],
},
)
basepython = python3
deps = flake8
commands = flake8 osm_rovim_aws --max-line-length 120 \
- --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241
[testenv:unittest]
basepython = python3
import msrestazure.tools as azure_tools
from requests.exceptions import ConnectionError
-__author__ = 'Isabel Lloret, Sergio Gonzalez, Alfonso Tierno'
-__date__ = '$18-apr-2019 23:59:59$'
+__author__ = "Isabel Lloret, Sergio Gonzalez, Alfonso Tierno"
+__date__ = "$18-apr-2019 23:59:59$"
-if getenv('OSMRO_PDB_DEBUG'):
+if getenv("OSMRO_PDB_DEBUG"):
import sys
+
print(sys.path)
import pdb
+
pdb.set_trace()
"Updating": "BUILD",
"Deleting": "INACTIVE",
"Succeeded": "ACTIVE",
- "Failed": "ERROR"
+ "Failed": "ERROR",
}
# Translate azure power state to OSM provision state
"stopped": "INACTIVE",
"unknown": "OTHER",
"deallocated": "BUILD",
- "deallocating": "BUILD"
+ "deallocating": "BUILD",
}
AZURE_ZONES = ["1", "2", "3"]
- def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None,
- config={}, persistent_info={}):
+ def __init__(
+ self,
+ uuid,
+ name,
+ tenant_id,
+ tenant_name,
+ url,
+ url_admin=None,
+ user=None,
+ passwd=None,
+ log_level=None,
+ config={},
+ persistent_info={},
+ ):
"""
Constructor of VIM. Raise an exception is some needed parameter is missing, but it must not do any connectivity
checking against the VIM
"^((?!Standard_B).)*$" will filter out Standard_B range that is cheap but is very overused
"^Standard_B" will select a serie B maybe for test environment
"""
-
- vimconn.VimConnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
- config, persistent_info)
+ vimconn.VimConnector.__init__(
+ self,
+ uuid,
+ name,
+ tenant_id,
+ tenant_name,
+ url,
+ url_admin,
+ user,
+ passwd,
+ log_level,
+ config,
+ persistent_info,
+ )
# Variable that indicates if client must be reloaded or initialized
self.reload_client = True
self.vnet_address_space = None
# LOGGER
- self.logger = logging.getLogger('ro.vim.azure')
+ self.logger = logging.getLogger("ro.vim.azure")
+
if log_level:
logging.basicConfig()
self.logger.setLevel(getattr(logging, log_level))
- self.tenant = (tenant_id or tenant_name)
+ self.tenant = tenant_id or tenant_name
# Store config to create azure subscription later
self._config = {
"user": user,
"passwd": passwd,
- "tenant": tenant_id or tenant_name
+ "tenant": tenant_id or tenant_name,
}
# SUBSCRIPTION
- if 'subscription_id' in config:
- self._config["subscription_id"] = config.get('subscription_id')
- # self.logger.debug('Setting subscription to: %s', self.config["subscription_id"])
+ if "subscription_id" in config:
+ self._config["subscription_id"] = config.get("subscription_id")
+ # self.logger.debug("Setting subscription to: %s", self.config["subscription_id"])
else:
- raise vimconn.VimConnException('Subscription not specified')
+ raise vimconn.VimConnException("Subscription not specified")
# REGION
- if 'region_name' in config:
- self.region = config.get('region_name')
+ if "region_name" in config:
+ self.region = config.get("region_name")
else:
- raise vimconn.VimConnException('Azure region_name is not specified at config')
+ raise vimconn.VimConnException(
+ "Azure region_name is not specified at config"
+ )
# RESOURCE_GROUP
- if 'resource_group' in config:
- self.resource_group = config.get('resource_group')
+ if "resource_group" in config:
+ self.resource_group = config.get("resource_group")
else:
- raise vimconn.VimConnException('Azure resource_group is not specified at config')
+ raise vimconn.VimConnException(
+ "Azure resource_group is not specified at config"
+ )
# VNET_NAME
- if 'vnet_name' in config:
+ if "vnet_name" in config:
self.vnet_name = config["vnet_name"]
-
+
# public ssh key
- self.pub_key = config.get('pub_key')
+ self.pub_key = config.get("pub_key")
# flavor pattern regex
- if 'flavors_pattern' in config:
- self._config['flavors_pattern'] = config['flavors_pattern']
-
+ if "flavors_pattern" in config:
+ self._config["flavors_pattern"] = config["flavors_pattern"]
+
def _reload_connection(self):
"""
Called before any operation, checks python azure clients
"""
if self.reload_client:
- self.logger.debug('reloading azure client')
+ self.logger.debug("reloading azure client")
+
try:
self.credentials = ServicePrincipalCredentials(
client_id=self._config["user"],
secret=self._config["passwd"],
- tenant=self._config["tenant"]
+ tenant=self._config["tenant"],
+ )
+ self.conn = ResourceManagementClient(
+ self.credentials, self._config["subscription_id"]
+ )
+ self.conn_compute = ComputeManagementClient(
+ self.credentials, self._config["subscription_id"]
+ )
+ self.conn_vnet = NetworkManagementClient(
+ self.credentials, self._config["subscription_id"]
)
- self.conn = ResourceManagementClient(self.credentials, self._config["subscription_id"])
- self.conn_compute = ComputeManagementClient(self.credentials, self._config["subscription_id"])
- self.conn_vnet = NetworkManagementClient(self.credentials, self._config["subscription_id"])
self._check_or_create_resource_group()
self._check_or_create_vnet()
Obtains resource_name from the azure complete identifier: resource_name will always be last item
"""
try:
- resource = str(resource_id.split('/')[-1])
+ resource = str(resource_id.split("/")[-1])
+
return resource
except Exception as e:
- raise vimconn.VimConnException("Unable to get resource name from resource_id '{}' Error: '{}'".
- format(resource_id, e))
+ raise vimconn.VimConnException(
+ "Unable to get resource name from resource_id '{}' Error: '{}'".format(
+ resource_id, e
+ )
+ )
def _get_location_from_resource_group(self, resource_group_name):
try:
location = self.conn.resource_groups.get(resource_group_name).location
+
return location
except Exception:
- raise vimconn.VimConnNotFoundException("Location '{}' not found".format(resource_group_name))
+ raise vimconn.VimConnNotFoundException(
+ "Location '{}' not found".format(resource_group_name)
+ )
def _get_resource_group_name_from_resource_id(self, resource_id):
-
try:
- rg = str(resource_id.split('/')[4])
+ rg = str(resource_id.split("/")[4])
+
return rg
except Exception:
- raise vimconn.VimConnException("Unable to get resource group from invalid resource_id format '{}'".
- format(resource_id))
+ raise vimconn.VimConnException(
+ "Unable to get resource group from invalid resource_id format '{}'".format(
+ resource_id
+ )
+ )
def _get_net_name_from_resource_id(self, resource_id):
-
try:
- net_name = str(resource_id.split('/')[8])
+ net_name = str(resource_id.split("/")[8])
+
return net_name
except Exception:
- raise vimconn.VimConnException("Unable to get azure net_name from invalid resource_id format '{}'".
- format(resource_id))
+ raise vimconn.VimConnException(
+ "Unable to get azure net_name from invalid resource_id format '{}'".format(
+ resource_id
+ )
+ )
def _check_subnets_for_vm(self, net_list):
# All subnets must belong to the same resource group and vnet
- rg_vnet = set(self._get_resource_group_name_from_resource_id(net['net_id']) +
- self._get_net_name_from_resource_id(net['net_id']) for net in net_list)
+ rg_vnet = set(
+ self._get_resource_group_name_from_resource_id(net["net_id"])
+ + self._get_net_name_from_resource_id(net["net_id"])
+ for net in net_list
+ )
if len(rg_vnet) != 1:
- raise self._format_vimconn_exception('Azure VMs can only attach to subnets in same VNET')
+ raise self._format_vimconn_exception(
+ "Azure VMs can only attach to subnets in same VNET"
+ )
def _format_vimconn_exception(self, e):
"""
if isinstance(e, vimconn.VimConnException):
raise
elif isinstance(e, AuthenticationError):
- raise vimconn.VimConnAuthException(type(e).__name__ + ': ' + str(e))
+ raise vimconn.VimConnAuthException(type(e).__name__ + ": " + str(e))
elif isinstance(e, ConnectionError):
- raise vimconn.VimConnConnectionException(type(e).__name__ + ': ' + str(e))
+ raise vimconn.VimConnConnectionException(type(e).__name__ + ": " + str(e))
else:
# In case of generic error recreate client
self.reload_client = True
- raise vimconn.VimConnException(type(e).__name__ + ': ' + str(e))
+
+ raise vimconn.VimConnException(type(e).__name__ + ": " + str(e))
def _check_or_create_resource_group(self):
"""
"""
try:
rg_exists = self.conn.resource_groups.check_existence(self.resource_group)
+
if not rg_exists:
self.logger.debug("create base rgroup: %s", self.resource_group)
- self.conn.resource_groups.create_or_update(self.resource_group, {'location': self.region})
+ self.conn.resource_groups.create_or_update(
+ self.resource_group, {"location": self.region}
+ )
except Exception as e:
self._format_vimconn_exception(e)
Try to get existent base vnet, in case it does not exist it creates it
"""
try:
- vnet = self.conn_vnet.virtual_networks.get(self.resource_group, self.vnet_name)
+ vnet = self.conn_vnet.virtual_networks.get(
+ self.resource_group, self.vnet_name
+ )
self.vnet_address_space = vnet.address_space.address_prefixes[0]
self.vnet_id = vnet.id
+
return
except CloudError as e:
if e.error.error and "notfound" in e.error.error.lower():
# if it does not exist, create it
try:
vnet_params = {
- 'location': self.region,
- 'address_space': {
- 'address_prefixes': ["10.0.0.0/8"]
- },
+ "location": self.region,
+ "address_space": {"address_prefixes": ["10.0.0.0/8"]},
}
self.vnet_address_space = "10.0.0.0/8"
self.logger.debug("create base vnet: %s", self.vnet_name)
- self.conn_vnet.virtual_networks.create_or_update(self.resource_group, self.vnet_name, vnet_params)
- vnet = self.conn_vnet.virtual_networks.get(self.resource_group, self.vnet_name)
+ self.conn_vnet.virtual_networks.create_or_update(
+ self.resource_group, self.vnet_name, vnet_params
+ )
+ vnet = self.conn_vnet.virtual_networks.get(
+ self.resource_group, self.vnet_name
+ )
self.vnet_id = vnet.id
except Exception as e:
self._format_vimconn_exception(e)
- def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None):
+ def new_network(
+ self,
+ net_name,
+ net_type,
+ ip_profile=None,
+ shared=False,
+ provider_network_profile=None,
+ ):
"""
Adds a tenant network to VIM
:param net_name: name of the network
otherwise it creates a subnet in the indicated address
:return: a tuple with the network identifier and created_items, or raises an exception on error
"""
- self.logger.debug('create subnet name %s, ip_profile %s', net_name, ip_profile)
+ self.logger.debug("create subnet name %s, ip_profile %s", net_name, ip_profile)
self._reload_connection()
if ip_profile is None:
for ip_range in netaddr.IPNetwork(self.vnet_address_space).subnet(24):
for used_subnet in used_subnets:
subnet_range = netaddr.IPNetwork(used_subnet["cidr_block"])
+
if subnet_range in ip_range or ip_range in subnet_range:
# this range overlaps with an existing subnet ip range. Breaks and look for another
break
else:
ip_profile = {"subnet_address": str(ip_range)}
- self.logger.debug('dinamically obtained ip_profile: %s', ip_range)
+ self.logger.debug("dinamically obtained ip_profile: %s", ip_range)
break
else:
- raise vimconn.VimConnException("Cannot find a non-used subnet range in {}".
- format(self.vnet_address_space))
+ raise vimconn.VimConnException(
+ "Cannot find a non-used subnet range in {}".format(
+ self.vnet_address_space
+ )
+ )
else:
- ip_profile = {"subnet_address": ip_profile['subnet_address']}
+ ip_profile = {"subnet_address": ip_profile["subnet_address"]}
try:
# subnet_name = "{}-{}".format(net_name[:24], uuid4())
- subnet_params = {
- 'address_prefix': ip_profile['subnet_address']
- }
+ subnet_params = {"address_prefix": ip_profile["subnet_address"]}
# Assign a not duplicated net name
subnet_name = self._get_unused_subnet_name(net_name)
- self.logger.debug('creating subnet_name: {}'.format(subnet_name))
- async_creation = self.conn_vnet.subnets.create_or_update(self.resource_group, self.vnet_name,
- subnet_name, subnet_params)
+ self.logger.debug("creating subnet_name: {}".format(subnet_name))
+ async_creation = self.conn_vnet.subnets.create_or_update(
+ self.resource_group, self.vnet_name, subnet_name, subnet_params
+ )
async_creation.wait()
- self.logger.debug('created subnet_name: {}'.format(subnet_name))
+ self.logger.debug("created subnet_name: {}".format(subnet_name))
return "{}/subnets/{}".format(self.vnet_id, subnet_name), None
except Exception as e:
"""
all_subnets = self.conn_vnet.subnets.list(self.resource_group, self.vnet_name)
# Filter to subnets starting with the indicated name
- subnets = list(filter(lambda subnet: (subnet.name.startswith(subnet_name)), all_subnets))
+ subnets = list(
+ filter(lambda subnet: (subnet.name.startswith(subnet_name)), all_subnets)
+ )
net_names = [str(subnet.name) for subnet in subnets]
# get the name with the first not used suffix
while name in net_names:
name_suffix += 1
name = subnet_name + "-" + str(name_suffix)
+
return name
def _create_nic(self, net, nic_name, static_ip=None, created_items={}):
-
- self.logger.debug('create nic name %s, net_name %s', nic_name, net)
+ self.logger.debug("create nic name %s, net_name %s", nic_name, net)
self._reload_connection()
- subnet_id = net['net_id']
+ subnet_id = net["net_id"]
location = self._get_location_from_resource_group(self.resource_group)
try:
- net_ifz = {'location': location}
- net_ip_config = {'name': nic_name + '-ipconfiguration', 'subnet': {'id': subnet_id}}
+ net_ifz = {"location": location}
+ net_ip_config = {
+ "name": nic_name + "-ipconfiguration",
+ "subnet": {"id": subnet_id},
+ }
+
if static_ip:
- net_ip_config['privateIPAddress'] = static_ip
- net_ip_config['privateIPAllocationMethod'] = 'Static'
- net_ifz['ip_configurations'] = [net_ip_config]
- mac_address = net.get('mac_address')
+ net_ip_config["privateIPAddress"] = static_ip
+ net_ip_config["privateIPAllocationMethod"] = "Static"
+
+ net_ifz["ip_configurations"] = [net_ip_config]
+ mac_address = net.get("mac_address")
+
if mac_address:
- net_ifz['mac_address'] = mac_address
+ net_ifz["mac_address"] = mac_address
- async_nic_creation = self.conn_vnet.network_interfaces.create_or_update(self.resource_group, nic_name,
- net_ifz)
+ async_nic_creation = self.conn_vnet.network_interfaces.create_or_update(
+ self.resource_group, nic_name, net_ifz
+ )
nic_data = async_nic_creation.result()
created_items[nic_data.id] = True
- self.logger.debug('created nic name %s', nic_name)
+ self.logger.debug("created nic name %s", nic_name)
- public_ip = net.get('floating_ip')
+ public_ip = net.get("floating_ip")
if public_ip:
public_ip_address_params = {
- 'location': location,
- 'public_ip_allocation_method': 'Dynamic'
+ "location": location,
+ "public_ip_allocation_method": "Dynamic",
}
- public_ip_name = nic_name + '-public-ip'
+ public_ip_name = nic_name + "-public-ip"
async_public_ip = self.conn_vnet.public_ip_addresses.create_or_update(
- self.resource_group,
- public_ip_name,
- public_ip_address_params
+ self.resource_group, public_ip_name, public_ip_address_params
)
public_ip = async_public_ip.result()
- self.logger.debug('created public IP: {}'.format(public_ip))
+ self.logger.debug("created public IP: {}".format(public_ip))
# Associate NIC to Public IP
nic_data = self.conn_vnet.network_interfaces.get(
- self.resource_group,
- nic_name)
+ self.resource_group, nic_name
+ )
nic_data.ip_configurations[0].public_ip_address = public_ip
created_items[public_ip.id] = True
self.conn_vnet.network_interfaces.create_or_update(
- self.resource_group,
- nic_name,
- nic_data)
+ self.resource_group, nic_name, nic_data
+ )
except Exception as e:
self._format_vimconn_exception(e)
"""
It is not allowed to create new flavors in Azure, must always use an existing one
"""
- raise vimconn.VimConnAuthException("It is not possible to create new flavors in AZURE")
+ raise vimconn.VimConnAuthException(
+ "It is not possible to create new flavors in AZURE"
+ )
def new_tenant(self, tenant_name, tenant_description):
"""
It is not allowed to create new tenants in azure
"""
- raise vimconn.VimConnAuthException("It is not possible to create a TENANT in AZURE")
+ raise vimconn.VimConnAuthException(
+ "It is not possible to create a TENANT in AZURE"
+ )
def new_image(self, image_dict):
"""
It is not allowed to create new images in Azure, must always use an existing one
"""
- raise vimconn.VimConnAuthException("It is not possible to create new images in AZURE")
+ raise vimconn.VimConnAuthException(
+ "It is not possible to create new images in AZURE"
+ )
def get_image_id_from_path(self, path):
"""Get the image id from image path in the VIM database.
- Returns the image_id or raises a vimconnNotFoundException
+ Returns the image_id or raises a vimconnNotFoundException
"""
- raise vimconn.VimConnAuthException("It is not possible to obtain image from path in AZURE")
+ raise vimconn.VimConnAuthException(
+ "It is not possible to obtain image from path in AZURE"
+ )
def get_image_list(self, filter_dict={}):
"""Obtain tenant images from VIM
[{<the fields at Filter_dict plus some VIM specific>}, ...]
List can be empty
"""
-
self.logger.debug("get_image_list filter {}".format(filter_dict))
self._reload_connection()
try:
image_list = []
if filter_dict.get("name"):
- # name will have the format 'publisher:offer:sku:version'
+ # name will have the format "publisher:offer:sku:version"
# publisher is required, offer sku and version will be searched if not provided
params = filter_dict["name"].split(":")
publisher = params[0]
if publisher:
# obtain offer list
offer_list = self._get_offer_list(params, publisher)
+
for offer in offer_list:
# obtain skus
sku_list = self._get_sku_list(params, publisher, offer)
+
for sku in sku_list:
# if version is defined get directly version, else list images
if len(params) == 4 and params[3]:
version = params[3]
- image_list = self._get_version_image_list(publisher, offer, sku, version)
+ image_list = self._get_version_image_list(
+ publisher, offer, sku, version
+ )
else:
- image_list = self._get_sku_image_list(publisher, offer, sku)
+ image_list = self._get_sku_image_list(
+ publisher, offer, sku
+ )
else:
raise vimconn.VimConnAuthException(
- "List images in Azure must include name param with at least publisher")
+ "List images in Azure must include name param with at least publisher"
+ )
else:
- raise vimconn.VimConnAuthException("List images in Azure must include name param with at"
- " least publisher")
+ raise vimconn.VimConnAuthException(
+ "List images in Azure must include name param with at"
+ " least publisher"
+ )
return image_list
except Exception as e:
else:
try:
# get list of offers from azure
- result_offers = self.conn_compute.virtual_machine_images.list_offers(self.region, publisher)
+ result_offers = self.conn_compute.virtual_machine_images.list_offers(
+ self.region, publisher
+ )
+
return [offer.name for offer in result_offers]
except CloudError as e:
# azure raises CloudError when not found
- self.logger.info("error listing offers for publisher {}, Error: {}".format(publisher, e))
+ self.logger.info(
+ "error listing offers for publisher {}, Error: {}".format(
+ publisher, e
+ )
+ )
+
return []
def _get_sku_list(self, params, publisher, offer):
else:
try:
# get list of skus from azure
- result_skus = self.conn_compute.virtual_machine_images.list_skus(self.region, publisher, offer)
+ result_skus = self.conn_compute.virtual_machine_images.list_skus(
+ self.region, publisher, offer
+ )
+
return [sku.name for sku in result_skus]
except CloudError as e:
# azure raises CloudError when not found
- self.logger.info("error listing skus for publisher {}, offer {}, Error: {}".format(publisher, offer, e))
+ self.logger.info(
+ "error listing skus for publisher {}, offer {}, Error: {}".format(
+ publisher, offer, e
+ )
+ )
+
return []
def _get_sku_image_list(self, publisher, offer, sku):
"""
image_list = []
try:
- result_images = self.conn_compute.virtual_machine_images.list(self.region, publisher, offer, sku)
+ result_images = self.conn_compute.virtual_machine_images.list(
+ self.region, publisher, offer, sku
+ )
for result_image in result_images:
- image_list.append({
- 'id': str(result_image.id),
- 'name': ":".join([publisher, offer, sku, result_image.name])
- })
+ image_list.append(
+ {
+ "id": str(result_image.id),
+ "name": ":".join([publisher, offer, sku, result_image.name]),
+ }
+ )
except CloudError as e:
self.logger.info(
- "error listing skus for publisher {}, offer {}, Error: {}".format(publisher, offer, e))
+ "error listing skus for publisher {}, offer {}, Error: {}".format(
+ publisher, offer, e
+ )
+ )
image_list = []
+
return image_list
def _get_version_image_list(self, publisher, offer, sku, version):
image_list = []
try:
- result_image = self.conn_compute.virtual_machine_images.get(self.region, publisher, offer, sku, version)
+ result_image = self.conn_compute.virtual_machine_images.get(
+ self.region, publisher, offer, sku, version
+ )
+
if result_image:
- image_list.append({
- 'id': str(result_image.id),
- 'name': ":".join([publisher, offer, sku, version])
- })
+ image_list.append(
+ {
+ "id": str(result_image.id),
+ "name": ":".join([publisher, offer, sku, version]),
+ }
+ )
except CloudError as e:
# azure gives CloudError when not found
- self.logger.info("error listing images for publisher {}, offer {}, sku {}, version {} Error: {}".
- format(publisher, offer, sku, version, e))
+ self.logger.info(
+ "error listing images for publisher {}, offer {}, sku {}, version {} Error: {}".format(
+ publisher, offer, sku, version, e
+ )
+ )
image_list = []
+
return image_list
def get_network_list(self, filter_dict={}):
status: 'ACTIVE', not implemented in Azure #
Returns the network list of dictionaries
"""
- # self.logger.debug('getting network list for vim, filter %s', filter_dict)
+ # self.logger.debug("getting network list for vim, filter %s", filter_dict)
try:
self._reload_connection()
- vnet = self.conn_vnet.virtual_networks.get(self.resource_group, self.vnet_name)
+ vnet = self.conn_vnet.virtual_networks.get(
+ self.resource_group, self.vnet_name
+ )
subnet_list = []
for subnet in vnet.subnets:
if filter_dict:
if filter_dict.get("id") and str(subnet.id) != filter_dict["id"]:
continue
- if filter_dict.get("name") and \
- str(subnet.name) != filter_dict["name"]:
+
+ if (
+ filter_dict.get("name")
+ and str(subnet.name) != filter_dict["name"]
+ ):
continue
name = self._get_resource_name_from_resource_id(subnet.id)
- subnet_list.append({
- 'id': str(subnet.id),
- 'name': name,
- 'status': self.provision_state2osm[subnet.provisioning_state],
- 'cidr_block': str(subnet.address_prefix),
- 'type': 'bridge',
- 'shared': False
- })
+ subnet_list.append(
+ {
+ "id": str(subnet.id),
+ "name": name,
+ "status": self.provision_state2osm[subnet.provisioning_state],
+ "cidr_block": str(subnet.address_prefix),
+ "type": "bridge",
+ "shared": False,
+ }
+ )
return subnet_list
except Exception as e:
self._format_vimconn_exception(e)
- def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None,
- disk_list=None, availability_zone_index=None, availability_zone_list=None):
-
- self.logger.debug("new vm instance name: %s, image_id: %s, flavor_id: %s, net_list: %s, cloud_config: %s, "
- "disk_list: %s, availability_zone_index: %s, availability_zone_list: %s",
- name, image_id, flavor_id, net_list, cloud_config, disk_list,
- availability_zone_index, availability_zone_list)
-
+ def new_vminstance(
+ self,
+ name,
+ description,
+ start,
+ image_id,
+ flavor_id,
+ net_list,
+ cloud_config=None,
+ disk_list=None,
+ availability_zone_index=None,
+ availability_zone_list=None,
+ ):
+ self.logger.debug(
+ "new vm instance name: %s, image_id: %s, flavor_id: %s, net_list: %s, cloud_config: %s, "
+ "disk_list: %s, availability_zone_index: %s, availability_zone_list: %s",
+ name,
+ image_id,
+ flavor_id,
+ net_list,
+ cloud_config,
+ disk_list,
+ availability_zone_index,
+ availability_zone_list,
+ )
self._reload_connection()
# Validate input data is valid
# At least one network must be provided
if not net_list:
- raise vimconn.VimConnException("At least one net must be provided to create a new VM")
+ raise vimconn.VimConnException(
+ "At least one net must be provided to create a new VM"
+ )
# image_id are several fields of the image_id
image_reference = self._get_image_reference(image_id)
# Create nics for each subnet
self._check_subnets_for_vm(net_list)
vm_nics = []
+
for idx, net in enumerate(net_list):
# Fault with subnet_id
- # subnet_id=net['subnet_id']
- # subnet_id=net['net_id']
- nic_name = vm_name + '-nic-' + str(idx)
- vm_nic, nic_items = self._create_nic(net, nic_name, net.get('ip_address'), created_items)
- vm_nics.append({'id': str(vm_nic.id)})
- net['vim_id'] = vm_nic.id
+ # subnet_id=net["subnet_id"]
+ # subnet_id=net["net_id"]
+ nic_name = vm_name + "-nic-" + str(idx)
+ vm_nic, nic_items = self._create_nic(
+ net, nic_name, net.get("ip_address"), created_items
+ )
+ vm_nics.append({"id": str(vm_nic.id)})
+ net["vim_id"] = vm_nic.id
# cloud-init configuration
# cloud config
if cloud_config:
config_drive, userdata = self._create_user_data(cloud_config)
- custom_data = base64.b64encode(userdata.encode('utf-8')).decode('latin-1')
+ custom_data = base64.b64encode(userdata.encode("utf-8")).decode(
+ "latin-1"
+ )
key_data = None
key_pairs = cloud_config.get("key-pairs")
if key_pairs:
user_name = "osm" # DEFAULT USER IS OSM
os_profile = {
- 'computer_name': vm_name,
- 'admin_username': user_name,
- 'linux_configuration': {
+ "computer_name": vm_name,
+ "admin_username": user_name,
+ "linux_configuration": {
"disable_password_authentication": True,
"ssh": {
- "public_keys": [{
- "path": "/home/{}/.ssh/authorized_keys".format(user_name),
- "key_data": key_data
- }]
- }
+ "public_keys": [
+ {
+ "path": "/home/{}/.ssh/authorized_keys".format(
+ user_name
+ ),
+ "key_data": key_data,
+ }
+ ]
+ },
},
- 'custom_data': custom_data
+ "custom_data": custom_data,
}
else:
os_profile = {
- 'computer_name': vm_name,
- 'admin_username': 'osm',
- 'admin_password': 'Osm4u!',
+ "computer_name": vm_name,
+ "admin_username": "osm",
+ "admin_password": "Osm4u!",
}
vm_parameters = {
- 'location': self.region,
- 'os_profile': os_profile,
- 'hardware_profile': {
- 'vm_size': flavor_id
- },
- 'storage_profile': {
- 'image_reference': image_reference
- }
+ "location": self.region,
+ "os_profile": os_profile,
+ "hardware_profile": {"vm_size": flavor_id},
+ "storage_profile": {"image_reference": image_reference},
}
# If the machine has several networks one must be marked as primary
if len(vm_nics) > 1:
for idx, vm_nic in enumerate(vm_nics):
if idx == 0:
- vm_nics[0]['Primary'] = True
+ vm_nics[0]["Primary"] = True
else:
- vm_nics[idx]['Primary'] = False
+ vm_nics[idx]["Primary"] = False
- vm_parameters['network_profile'] = {'network_interfaces': vm_nics}
+ vm_parameters["network_profile"] = {"network_interfaces": vm_nics}
# Obtain zone information
vm_zone = self._get_vm_zone(availability_zone_index, availability_zone_list)
if vm_zone:
- vm_parameters['zones'] = [vm_zone]
+ vm_parameters["zones"] = [vm_zone]
self.logger.debug("create vm name: %s", vm_name)
creation_result = self.conn_compute.virtual_machines.create_or_update(
- self.resource_group,
- vm_name,
- vm_parameters
+ self.resource_group, vm_name, vm_parameters
)
virtual_machine = creation_result.result()
self.logger.debug("created vm name: %s", vm_name)
# Add disks if they are provided
if disk_list:
for disk_index, disk in enumerate(disk_list):
- self.logger.debug("add disk size: %s, image: %s", disk.get("size"), disk.get("image"))
- self._add_newvm_disk(virtual_machine, vm_name, disk_index, disk, created_items)
+ self.logger.debug(
+ "add disk size: %s, image: %s",
+ disk.get("size"),
+ disk.get("image"),
+ )
+ self._add_newvm_disk(
+ virtual_machine, vm_name, disk_index, disk, created_items
+ )
if start:
- self.conn_compute.virtual_machines.start(
- self.resource_group,
- vm_name)
+ self.conn_compute.virtual_machines.start(self.resource_group, vm_name)
# start_result.wait()
return virtual_machine.id, created_items
-
+
# run_command_parameters = {
- # 'command_id': 'RunShellScript', # For linux, don't change it
- # 'script': [
- # 'date > /tmp/test.txt'
+ # "command_id": "RunShellScript", # For linux, don't change it
+ # "script": [
+ # "date > /tmp/test.txt"
# ]
# }
except Exception as e:
# Rollback vm creacion
vm_id = None
+
if virtual_machine:
vm_id = virtual_machine.id
+
try:
self.logger.debug("exception creating vm try to rollback")
self.delete_vminstance(vm_id, created_items)
except Exception as e2:
self.logger.error("new_vminstance rollback fail {}".format(e2))
- self.logger.debug('Exception creating new vminstance: %s', e, exc_info=True)
+ self.logger.debug("Exception creating new vminstance: %s", e, exc_info=True)
self._format_vimconn_exception(e)
def _get_unused_vm_name(self, vm_name):
name_suffix = 0
# name = subnet_name + "-" + str(name_suffix)
name = vm_name # first subnet created will have no prefix
+
while name in vm_names:
name_suffix += 1
name = vm_name + "-" + str(name_suffix)
+
return name
def _get_vm_zone(self, availability_zone_index, availability_zone_list):
-
if availability_zone_index is None:
return None
vim_availability_zones = self._get_azure_availability_zones()
# check if VIM offer enough availability zones describe in the VNFD
- if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
+ if vim_availability_zones and len(availability_zone_list) <= len(
+ vim_availability_zones
+ ):
# check if all the names of NFV AV match VIM AV names
match_by_index = False
+
if not availability_zone_list:
match_by_index = True
else:
if av not in vim_availability_zones:
match_by_index = True
break
+
if match_by_index:
return vim_availability_zones[availability_zone_index]
else:
return availability_zone_list[availability_zone_index]
else:
- raise vimconn.VimConnConflictException("No enough availability zones at VIM for this deployment")
+ raise vimconn.VimConnConflictException(
+ "No enough availability zones at VIM for this deployment"
+ )
def _get_azure_availability_zones(self):
return self.AZURE_ZONES
- def _add_newvm_disk(self, virtual_machine, vm_name, disk_index, disk, created_items={}):
-
+ def _add_newvm_disk(
+ self, virtual_machine, vm_name, disk_index, disk, created_items={}
+ ):
disk_name = None
data_disk = None
# Check if must create empty disk or from image
- if disk.get('vim_id'):
+ if disk.get("vim_id"):
# disk already exists, just get
- parsed_id = azure_tools.parse_resource_id(disk.get('vim_id'))
+ parsed_id = azure_tools.parse_resource_id(disk.get("vim_id"))
disk_name = parsed_id.get("name")
data_disk = self.conn_compute.disks.get(self.resource_group, disk_name)
else:
self.resource_group,
disk_name,
{
- 'location': self.region,
- 'disk_size_gb': disk.get("size"),
- 'creation_data': {
- 'create_option': DiskCreateOption.empty
- }
- }
+ "location": self.region,
+ "disk_size_gb": disk.get("size"),
+ "creation_data": {"create_option": DiskCreateOption.empty},
+ },
)
data_disk = async_disk_creation.result()
created_items[data_disk.id] = True
else:
image_id = disk.get("image_id")
+
if azure_tools.is_valid_resource_id(image_id):
parsed_id = azure_tools.parse_resource_id(image_id)
# Check if image is snapshot or disk
image_name = parsed_id.get("name")
type = parsed_id.get("resource_type")
- if type == 'snapshots' or type == 'disks':
+ if type == "snapshots" or type == "disks":
self.logger.debug("create disk from copy name: %s", image_name)
# ¿Should check that snapshot exists?
async_disk_creation = self.conn_compute.disks.create_or_update(
self.resource_group,
disk_name,
{
- 'location': self.region,
- 'creation_data': {
- 'create_option': 'Copy',
- 'source_uri': image_id
- }
- }
+ "location": self.region,
+ "creation_data": {
+ "create_option": "Copy",
+ "source_uri": image_id,
+ },
+ },
)
data_disk = async_disk_creation.result()
created_items[data_disk.id] = True
-
else:
- raise vimconn.VimConnNotFoundException("Invalid image_id: %s ", image_id)
+ raise vimconn.VimConnNotFoundException(
+ "Invalid image_id: %s ", image_id
+ )
else:
- raise vimconn.VimConnNotFoundException("Invalid image_id: %s ", image_id)
+ raise vimconn.VimConnNotFoundException(
+ "Invalid image_id: %s ", image_id
+ )
# Attach the disk created
- virtual_machine.storage_profile.data_disks.append({
- 'lun': disk_index,
- 'name': disk_name,
- 'create_option': DiskCreateOption.attach,
- 'managed_disk': {
- 'id': data_disk.id
- },
- 'disk_size_gb': disk.get('size')
- })
+ virtual_machine.storage_profile.data_disks.append(
+ {
+ "lun": disk_index,
+ "name": disk_name,
+ "create_option": DiskCreateOption.attach,
+ "managed_disk": {"id": data_disk.id},
+ "disk_size_gb": disk.get("size"),
+ }
+ )
self.logger.debug("attach disk name: %s", disk_name)
self.conn_compute.virtual_machines.create_or_update(
- self.resource_group,
- virtual_machine.name,
- virtual_machine
+ self.resource_group, virtual_machine.name, virtual_machine
)
# It is necesary extract from image_id data to create the VM with this format
- # 'image_reference': {
- # 'publisher': vm_reference['publisher'],
- # 'offer': vm_reference['offer'],
- # 'sku': vm_reference['sku'],
- # 'version': vm_reference['version']
+ # "image_reference": {
+ # "publisher": vm_reference["publisher"],
+ # "offer": vm_reference["offer"],
+ # "sku": vm_reference["sku"],
+ # "version": vm_reference["version"]
# },
def _get_image_reference(self, image_id):
-
try:
# The data input format example:
# /Subscriptions/ca3d18ab-d373-4afb-a5d6-7c44f098d16a/Providers/Microsoft.Compute/Locations/westeurope/
# Offers/UbuntuServer/
# Skus/18.04-LTS/
# Versions/18.04.201809110
- publisher = str(image_id.split('/')[8])
- offer = str(image_id.split('/')[12])
- sku = str(image_id.split('/')[14])
- version = str(image_id.split('/')[16])
+ publisher = str(image_id.split("/")[8])
+ offer = str(image_id.split("/")[12])
+ sku = str(image_id.split("/")[14])
+ version = str(image_id.split("/")[16])
return {
- 'publisher': publisher,
- 'offer': offer,
- 'sku': sku,
- 'version': version
+ "publisher": publisher,
+ "offer": offer,
+ "sku": sku,
+ "version": version,
}
except Exception:
raise vimconn.VimConnException(
- "Unable to get image_reference from invalid image_id format: '{}'".format(image_id))
+ "Unable to get image_reference from invalid image_id format: '{}'".format(
+ image_id
+ )
+ )
# Azure VM names can not have some special characters
def _check_vm_name(self, vm_name):
"""
Checks vm name, in case the vm has not allowed characters they are removed, not error raised
"""
-
chars_not_allowed_list = "~!@#$%^&*()=+_[]{}|;:<>/?."
# First: the VM name max length is 64 characters
for elem in chars_not_allowed_list:
# Check if string is in the main string
if elem in vm_name_aux:
- # self.logger.debug('Dentro del IF')
+ # self.logger.debug("Dentro del IF")
# Replace the string
- vm_name_aux = vm_name_aux.replace(elem, '-')
+ vm_name_aux = vm_name_aux.replace(elem, "-")
return vm_name_aux
def get_flavor_id_from_data(self, flavor_dict):
-
self.logger.debug("getting flavor id from data, flavor_dict: %s", flavor_dict)
filter_dict = flavor_dict or {}
+
try:
self._reload_connection()
- vm_sizes_list = [vm_size.serialize() for vm_size in
- self.conn_compute.virtual_machine_sizes.list(self.region)]
+ vm_sizes_list = [
+ vm_size.serialize()
+ for vm_size in self.conn_compute.virtual_machine_sizes.list(self.region)
+ ]
- cpus = filter_dict.get('vcpus') or 0
- memMB = filter_dict.get('ram') or 0
+ cpus = filter_dict.get("vcpus") or 0
+ memMB = filter_dict.get("ram") or 0
# Filter
if self._config.get("flavors_pattern"):
- filtered_sizes = [size for size in vm_sizes_list if size['numberOfCores'] >= cpus and
- size['memoryInMB'] >= memMB and
- re.search(self._config.get("flavors_pattern"), size["name"])]
+ filtered_sizes = [
+ size
+ for size in vm_sizes_list
+ if size["numberOfCores"] >= cpus
+ and size["memoryInMB"] >= memMB
+ and re.search(self._config.get("flavors_pattern"), size["name"])
+ ]
else:
- filtered_sizes = [size for size in vm_sizes_list if size['numberOfCores'] >= cpus and
- size['memoryInMB'] >= memMB]
+ filtered_sizes = [
+ size
+ for size in vm_sizes_list
+ if size["numberOfCores"] >= cpus and size["memoryInMB"] >= memMB
+ ]
# Sort
- listedFilteredSizes = sorted(filtered_sizes, key=lambda k: (k['numberOfCores'], k['memoryInMB'],
- k['resourceDiskSizeInMB']))
+ listedFilteredSizes = sorted(
+ filtered_sizes,
+ key=lambda k: (
+ k["numberOfCores"],
+ k["memoryInMB"],
+ k["resourceDiskSizeInMB"],
+ ),
+ )
if listedFilteredSizes:
- return listedFilteredSizes[0]['name']
- raise vimconn.VimConnNotFoundException("Cannot find any flavor matching '{}'".format(str(flavor_dict)))
+ return listedFilteredSizes[0]["name"]
+ raise vimconn.VimConnNotFoundException(
+ "Cannot find any flavor matching '{}'".format(str(flavor_dict))
+ )
except Exception as e:
self._format_vimconn_exception(e)
def _get_flavor_id_from_flavor_name(self, flavor_name):
-
# self.logger.debug("getting flavor id from flavor name {}".format(flavor_name))
try:
self._reload_connection()
- vm_sizes_list = [vm_size.serialize() for vm_size in
- self.conn_compute.virtual_machine_sizes.list(self.region)]
+ vm_sizes_list = [
+ vm_size.serialize()
+ for vm_size in self.conn_compute.virtual_machine_sizes.list(self.region)
+ ]
output_flavor = None
for size in vm_sizes_list:
- if size['name'] == flavor_name:
+ if size["name"] == flavor_name:
output_flavor = size
# None is returned if not found anything
return output_flavor
-
except Exception as e:
self._format_vimconn_exception(e)
self._reload_connection()
return True
except Exception as e:
- raise vimconn.VimConnException("Connectivity issue with Azure API: {}".format(e))
+ raise vimconn.VimConnException(
+ "Connectivity issue with Azure API: {}".format(e)
+ )
def get_network(self, net_id):
-
- # self.logger.debug('get network id: {}'.format(net_id))
+ # self.logger.debug("get network id: {}".format(net_id))
# res_name = self._get_resource_name_from_resource_id(net_id)
self._reload_connection()
- filter_dict = {'name': net_id}
+ filter_dict = {"name": net_id}
network_list = self.get_network_list(filter_dict)
if not network_list:
- raise vimconn.VimConnNotFoundException("network '{}' not found".format(net_id))
+ raise vimconn.VimConnNotFoundException(
+ "network '{}' not found".format(net_id)
+ )
else:
return network_list[0]
def delete_network(self, net_id, created_items=None):
-
- self.logger.debug('deleting network {} - {}'.format(self.resource_group, net_id))
+ self.logger.debug(
+ "deleting network {} - {}".format(self.resource_group, net_id)
+ )
self._reload_connection()
res_name = self._get_resource_name_from_resource_id(net_id)
- filter_dict = {'name': res_name}
+ filter_dict = {"name": res_name}
network_list = self.get_network_list(filter_dict)
if not network_list:
- raise vimconn.VimConnNotFoundException("network '{}' not found".format(net_id))
+ raise vimconn.VimConnNotFoundException(
+ "network '{}' not found".format(net_id)
+ )
try:
# Subnet API fails (CloudError: Azure Error: ResourceNotFound)
# Put the initial virtual_network API
- async_delete = self.conn_vnet.subnets.delete(self.resource_group, self.vnet_name, res_name)
+ async_delete = self.conn_vnet.subnets.delete(
+ self.resource_group, self.vnet_name, res_name
+ )
async_delete.wait()
return net_id
except CloudError as e:
if e.error.error and "notfound" in e.error.error.lower():
- raise vimconn.VimConnNotFoundException("network '{}' not found".format(net_id))
+ raise vimconn.VimConnNotFoundException(
+ "network '{}' not found".format(net_id)
+ )
else:
self._format_vimconn_exception(e)
except Exception as e:
self._format_vimconn_exception(e)
def delete_vminstance(self, vm_id, created_items=None):
- """ Deletes a vm instance from the vim.
- """
- self.logger.debug('deleting VM instance {} - {}'.format(self.resource_group, vm_id))
+ """Deletes a vm instance from the vim."""
+ self.logger.debug(
+ "deleting VM instance {} - {}".format(self.resource_group, vm_id)
+ )
self._reload_connection()
created_items = created_items or {}
# Check vm exists, we can call delete_vm to clean created_items
if vm_id:
res_name = self._get_resource_name_from_resource_id(vm_id)
- vm = self.conn_compute.virtual_machines.get(self.resource_group, res_name)
+ vm = self.conn_compute.virtual_machines.get(
+ self.resource_group, res_name
+ )
# Shuts down the virtual machine and releases the compute resources
# vm_stop = self.conn_compute.virtual_machines.power_off(self.resource_group, resName)
# vm_stop.wait()
- vm_delete = self.conn_compute.virtual_machines.delete(self.resource_group, res_name)
+ vm_delete = self.conn_compute.virtual_machines.delete(
+ self.resource_group, res_name
+ )
vm_delete.wait()
- self.logger.debug('deleted VM name: %s', res_name)
+ self.logger.debug("deleted VM name: %s", res_name)
# Delete OS Disk
os_disk_name = vm.storage_profile.os_disk.name
- self.logger.debug('delete OS DISK: %s', os_disk_name)
- async_disk_delete = self.conn_compute.disks.delete(self.resource_group, os_disk_name)
+ self.logger.debug("delete OS DISK: %s", os_disk_name)
+ async_disk_delete = self.conn_compute.disks.delete(
+ self.resource_group, os_disk_name
+ )
async_disk_delete.wait()
# os disks are created always with the machine
- self.logger.debug('deleted OS DISK name: %s', os_disk_name)
+ self.logger.debug("deleted OS DISK name: %s", os_disk_name)
for data_disk in vm.storage_profile.data_disks:
- self.logger.debug('delete data_disk: %s', data_disk.name)
- async_disk_delete = self.conn_compute.disks.delete(self.resource_group, data_disk.name)
+ self.logger.debug("delete data_disk: %s", data_disk.name)
+ async_disk_delete = self.conn_compute.disks.delete(
+ self.resource_group, data_disk.name
+ )
async_disk_delete.wait()
self._markdel_created_item(data_disk.managed_disk.id, created_items)
- self.logger.debug('deleted OS DISK name: %s', data_disk.name)
+ self.logger.debug("deleted OS DISK name: %s", data_disk.name)
# After deleting VM, it is necessary to delete NIC, because if is not deleted delete_network
# does not work because Azure says that is in use the subnet
network_interfaces = vm.network_profile.network_interfaces
for network_interface in network_interfaces:
-
- nic_name = self._get_resource_name_from_resource_id(network_interface.id)
+ nic_name = self._get_resource_name_from_resource_id(
+ network_interface.id
+ )
nic_data = self.conn_vnet.network_interfaces.get(
- self.resource_group,
- nic_name)
+ self.resource_group, nic_name
+ )
public_ip_name = None
exist_public_ip = nic_data.ip_configurations[0].public_ip_address
if exist_public_ip:
- public_ip_id = nic_data.ip_configurations[0].public_ip_address.id
+ public_ip_id = nic_data.ip_configurations[
+ 0
+ ].public_ip_address.id
# Delete public_ip
- public_ip_name = self._get_resource_name_from_resource_id(public_ip_id)
+ public_ip_name = self._get_resource_name_from_resource_id(
+ public_ip_id
+ )
# Public ip must be deleted afterwards of nic that is attached
- self.logger.debug('delete NIC name: %s', nic_name)
- nic_delete = self.conn_vnet.network_interfaces.delete(self.resource_group, nic_name)
+ self.logger.debug("delete NIC name: %s", nic_name)
+ nic_delete = self.conn_vnet.network_interfaces.delete(
+ self.resource_group, nic_name
+ )
nic_delete.wait()
self._markdel_created_item(network_interface.id, created_items)
- self.logger.debug('deleted NIC name: %s', nic_name)
+ self.logger.debug("deleted NIC name: %s", nic_name)
# Delete list of public ips
if public_ip_name:
- self.logger.debug('delete PUBLIC IP - ' + public_ip_name)
- ip_delete = self.conn_vnet.public_ip_addresses.delete(self.resource_group, public_ip_name)
+ self.logger.debug("delete PUBLIC IP - " + public_ip_name)
+ ip_delete = self.conn_vnet.public_ip_addresses.delete(
+ self.resource_group, public_ip_name
+ )
ip_delete.wait()
self._markdel_created_item(public_ip_id, created_items)
except CloudError as e:
if e.error.error and "notfound" in e.error.error.lower():
- raise vimconn.VimConnNotFoundException("No vm instance found '{}'".format(vm_id))
+ raise vimconn.VimConnNotFoundException(
+ "No vm instance found '{}'".format(vm_id)
+ )
else:
self._format_vimconn_exception(e)
except Exception as e:
created_items[item_id] = False
def _delete_created_items(self, created_items):
- """ Delete created_items elements that have not been deleted with the virtual machine
- Created_items may not be deleted correctly with the created machine if the
- virtual machine fails creating or in other cases of error
+ """Delete created_items elements that have not been deleted with the virtual machine
+ Created_items may not be deleted correctly with the created machine if the
+ virtual machine fails creating or in other cases of error
"""
self.logger.debug("Created items: %s", created_items)
# Must delete in order first nics, then public_ips
continue
# self.logger.debug("Must delete item id: %s", item_id)
-
# Obtain type, supported nic, disk or public ip
parsed_id = azure_tools.parse_resource_id(item_id)
resource_type = parsed_id.get("resource_type")
for item_name in nics_to_delete:
try:
self.logger.debug("deleting nic name %s:", item_name)
- nic_delete = self.conn_vnet.network_interfaces.delete(self.resource_group, item_name)
+ nic_delete = self.conn_vnet.network_interfaces.delete(
+ self.resource_group, item_name
+ )
nic_delete.wait()
self.logger.debug("deleted nic name %s:", item_name)
except Exception as e:
- self.logger.error("Error deleting item: {}: {}".format(type(e).__name__, e))
+ self.logger.error(
+ "Error deleting item: {}: {}".format(type(e).__name__, e)
+ )
for item_name in publics_ip_to_delete:
try:
self.logger.debug("deleting public ip name %s:", item_name)
- ip_delete = self.conn_vnet.public_ip_addresses.delete(self.resource_group, name)
+ ip_delete = self.conn_vnet.public_ip_addresses.delete(
+ self.resource_group, name
+ )
ip_delete.wait()
self.logger.debug("deleted public ip name %s:", item_name)
except Exception as e:
- self.logger.error("Error deleting item: {}: {}".format(type(e).__name__, e))
+ self.logger.error(
+ "Error deleting item: {}: {}".format(type(e).__name__, e)
+ )
for item_name in disks_to_delete:
try:
self.logger.debug("deleting data disk name %s:", name)
- async_disk_delete = self.conn_compute.disks.delete(self.resource_group, item_name)
+ async_disk_delete = self.conn_compute.disks.delete(
+ self.resource_group, item_name
+ )
async_disk_delete.wait()
self.logger.debug("deleted data disk name %s:", name)
except Exception as e:
- self.logger.error("Error deleting item: {}: {}".format(type(e).__name__, e))
+ self.logger.error(
+ "Error deleting item: {}: {}".format(type(e).__name__, e)
+ )
def action_vminstance(self, vm_id, action_dict, created_items={}):
"""Send and action over a VM instance from VIM
Returns the vm_id if the action was successfully sent to the VIM
"""
-
self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
+
try:
self._reload_connection()
resName = self._get_resource_name_from_resource_id(vm_id)
+
if "start" in action_dict:
self.conn_compute.virtual_machines.start(self.resource_group, resName)
- elif "stop" in action_dict or "shutdown" in action_dict or "shutoff" in action_dict:
- self.conn_compute.virtual_machines.power_off(self.resource_group, resName)
+ elif (
+ "stop" in action_dict
+ or "shutdown" in action_dict
+ or "shutoff" in action_dict
+ ):
+ self.conn_compute.virtual_machines.power_off(
+ self.resource_group, resName
+ )
elif "terminate" in action_dict:
self.conn_compute.virtual_machines.delete(self.resource_group, resName)
elif "reboot" in action_dict:
self.conn_compute.virtual_machines.restart(self.resource_group, resName)
+
return None
except CloudError as e:
if e.error.error and "notfound" in e.error.error.lower():
self._format_vimconn_exception(e)
def delete_flavor(self, flavor_id):
- raise vimconn.VimConnAuthException("It is not possible to delete a FLAVOR in AZURE")
+ raise vimconn.VimConnAuthException(
+ "It is not possible to delete a FLAVOR in AZURE"
+ )
- def delete_tenant(self, tenant_id,):
- raise vimconn.VimConnAuthException("It is not possible to delete a TENANT in AZURE")
+ def delete_tenant(self, tenant_id):
+ raise vimconn.VimConnAuthException(
+ "It is not possible to delete a TENANT in AZURE"
+ )
def delete_image(self, image_id):
- raise vimconn.VimConnAuthException("It is not possible to delete a IMAGE in AZURE")
+ raise vimconn.VimConnAuthException(
+ "It is not possible to delete a IMAGE in AZURE"
+ )
def get_vminstance(self, vm_id):
"""
vm = self.conn_compute.virtual_machines.get(self.resource_group, resName)
except CloudError as e:
if e.error.error and "notfound" in e.error.error.lower():
- raise vimconn.VimConnNotFoundException("No vminstance found '{}'".format(vm_id))
+ raise vimconn.VimConnNotFoundException(
+ "No vminstance found '{}'".format(vm_id)
+ )
else:
self._format_vimconn_exception(e)
except Exception as e:
self._reload_connection()
self.logger.debug("get flavor from id: %s", flavor_id)
flavor_data = self._get_flavor_id_from_flavor_name(flavor_id)
+
if flavor_data:
flavor = {
- 'id': flavor_id,
- 'name': flavor_id,
- 'ram': flavor_data['memoryInMB'],
- 'vcpus': flavor_data['numberOfCores'],
- 'disk': flavor_data['resourceDiskSizeInMB']/1024
+ "id": flavor_id,
+ "name": flavor_id,
+ "ram": flavor_data["memoryInMB"],
+ "vcpus": flavor_data["numberOfCores"],
+ "disk": flavor_data["resourceDiskSizeInMB"] / 1024,
}
+
return flavor
else:
- raise vimconn.VimConnNotFoundException("flavor '{}' not found".format(flavor_id))
+ raise vimconn.VimConnNotFoundException(
+ "flavor '{}' not found".format(flavor_id)
+ )
def get_tenant_list(self, filter_dict={}):
- """ Obtains the list of tenants
- For the azure connector only the azure tenant will be returned if it is compatible
- with filter_dict
+ """Obtains the list of tenants
+ For the azure connector only the azure tenant will be returned if it is compatible
+ with filter_dict
"""
- tenants_azure = [{'name': self.tenant, 'id': self.tenant}]
+ tenants_azure = [{"name": self.tenant, "id": self.tenant}]
tenant_list = []
self.logger.debug("get tenant list: %s", filter_dict)
for tenant_azure in tenants_azure:
if filter_dict:
- if filter_dict.get("id") and str(tenant_azure.get("id")) != filter_dict["id"]:
+ if (
+ filter_dict.get("id")
+ and str(tenant_azure.get("id")) != filter_dict["id"]
+ ):
continue
- if filter_dict.get("name") and str(tenant_azure.get("name")) != filter_dict["name"]:
+
+ if (
+ filter_dict.get("name")
+ and str(tenant_azure.get("name")) != filter_dict["name"]
+ ):
continue
tenant_list.append(tenant_azure)
def refresh_nets_status(self, net_list):
"""Get the status of the networks
- Params: the list of network identifiers
- Returns a dictionary with:
- net_id: #VIM id of this network
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE, INACTIVE, DOWN (admin down),
- # BUILD (on building process)
- #
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
-
+ Params: the list of network identifiers
+ Returns a dictionary with:
+ net_id: #VIM id of this network
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE, INACTIVE, DOWN (admin down),
+ # BUILD (on building process)
+ #
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
"""
-
out_nets = {}
self._reload_connection()
out_nets[net_id] = {
"status": self.provision_state2osm[net.provisioning_state],
- "vim_info": str(net)
+ "vim_info": str(net),
}
except CloudError as e:
if e.error.error and "notfound" in e.error.error.lower():
- self.logger.info("Not found subnet net_name: %s, subnet_name: %s", netName, resName)
- out_nets[net_id] = {
- "status": "DELETED",
- "error_msg": str(e)
- }
+ self.logger.info(
+ "Not found subnet net_name: %s, subnet_name: %s",
+ netName,
+ resName,
+ )
+ out_nets[net_id] = {"status": "DELETED", "error_msg": str(e)}
else:
- self.logger.error("CloudError Exception %s when searching subnet", e)
+ self.logger.error(
+ "CloudError Exception %s when searching subnet", e
+ )
out_nets[net_id] = {
"status": "VIM_ERROR",
- "error_msg": str(e)
+ "error_msg": str(e),
}
except vimconn.VimConnNotFoundException as e:
- self.logger.error("VimConnNotFoundException %s when searching subnet", e)
+ self.logger.error(
+ "VimConnNotFoundException %s when searching subnet", e
+ )
out_nets[net_id] = {
"status": "DELETED",
- "error_msg": str(e)
+ "error_msg": str(e),
}
except Exception as e:
- self.logger.error("Exception %s when searching subnet", e, exc_info=True)
+ self.logger.error(
+ "Exception %s when searching subnet", e, exc_info=True
+ )
out_nets[net_id] = {
"status": "VIM_ERROR",
- "error_msg": str(e)
+ "error_msg": str(e),
}
+
return out_nets
def refresh_vms_status(self, vm_list):
- """ Get the status of the virtual machines and their interfaces/ports
+ """Get the status of the virtual machines and their interfaces/ports
Params: the list of VM identifiers
Returns a dictionary with:
vm_id: # VIM id of this Virtual Machine
mac_address - The MAC address of the interface.
ip_address - The IP address of the interface within the subnet.
"""
-
out_vms = {}
self._reload_connection()
try:
res_name = self._get_resource_name_from_resource_id(vm_id)
- vm = self.conn_compute.virtual_machines.get(self.resource_group, res_name)
- out_vm['vim_info'] = str(vm)
- out_vm['status'] = self.provision_state2osm.get(vm.provisioning_state, 'OTHER')
- if vm.provisioning_state == 'Succeeded':
+ vm = self.conn_compute.virtual_machines.get(
+ self.resource_group, res_name
+ )
+ out_vm["vim_info"] = str(vm)
+ out_vm["status"] = self.provision_state2osm.get(
+ vm.provisioning_state, "OTHER"
+ )
+
+ if vm.provisioning_state == "Succeeded":
# check if machine is running or stopped
- instance_view = self.conn_compute.virtual_machines.instance_view(self.resource_group,
- res_name)
+ instance_view = self.conn_compute.virtual_machines.instance_view(
+ self.resource_group, res_name
+ )
+
for status in instance_view.statuses:
splitted_status = status.code.split("/")
- if len(splitted_status) == 2 and splitted_status[0] == 'PowerState':
- out_vm['status'] = self.power_state2osm.get(splitted_status[1], 'OTHER')
+ if (
+ len(splitted_status) == 2
+ and splitted_status[0] == "PowerState"
+ ):
+ out_vm["status"] = self.power_state2osm.get(
+ splitted_status[1], "OTHER"
+ )
network_interfaces = vm.network_profile.network_interfaces
- out_vm['interfaces'] = self._get_vm_interfaces_status(vm_id, network_interfaces)
+ out_vm["interfaces"] = self._get_vm_interfaces_status(
+ vm_id, network_interfaces
+ )
except CloudError as e:
if e.error.error and "notfound" in e.error.error.lower():
self.logger.debug("Not found vm id: %s", vm_id)
- out_vm['status'] = "DELETED"
- out_vm['error_msg'] = str(e)
- out_vm['vim_info'] = None
+ out_vm["status"] = "DELETED"
+ out_vm["error_msg"] = str(e)
+ out_vm["vim_info"] = None
else:
# maybe connection error or another type of error, return vim error
self.logger.error("Exception %s refreshing vm_status", e)
- out_vm['status'] = "VIM_ERROR"
- out_vm['error_msg'] = str(e)
- out_vm['vim_info'] = None
+ out_vm["status"] = "VIM_ERROR"
+ out_vm["error_msg"] = str(e)
+ out_vm["vim_info"] = None
except Exception as e:
self.logger.error("Exception %s refreshing vm_status", e, exc_info=True)
- out_vm['status'] = "VIM_ERROR"
- out_vm['error_msg'] = str(e)
- out_vm['vim_info'] = None
+ out_vm["status"] = "VIM_ERROR"
+ out_vm["error_msg"] = str(e)
+ out_vm["vim_info"] = None
out_vms[vm_id] = out_vm
interface_list = []
for network_interface in interfaces:
interface_dict = {}
- nic_name = self._get_resource_name_from_resource_id(network_interface.id)
- interface_dict['vim_interface_id'] = network_interface.id
+ nic_name = self._get_resource_name_from_resource_id(
+ network_interface.id
+ )
+ interface_dict["vim_interface_id"] = network_interface.id
nic_data = self.conn_vnet.network_interfaces.get(
self.resource_group,
- nic_name)
+ nic_name,
+ )
ips = []
if nic_data.ip_configurations[0].public_ip_address:
self.logger.debug("Obtain public ip address")
public_ip_name = self._get_resource_name_from_resource_id(
- nic_data.ip_configurations[0].public_ip_address.id)
- public_ip = self.conn_vnet.public_ip_addresses.get(self.resource_group, public_ip_name)
+ nic_data.ip_configurations[0].public_ip_address.id
+ )
+ public_ip = self.conn_vnet.public_ip_addresses.get(
+ self.resource_group, public_ip_name
+ )
self.logger.debug("Public ip address is: %s", public_ip.ip_address)
ips.append(public_ip.ip_address)
private_ip = nic_data.ip_configurations[0].private_ip_address
ips.append(private_ip)
- interface_dict['mac_address'] = nic_data.mac_address
- interface_dict['ip_address'] = ";".join(ips)
+ interface_dict["mac_address"] = nic_data.mac_address
+ interface_dict["ip_address"] = ";".join(ips)
interface_list.append(interface_dict)
return interface_list
except Exception as e:
- self.logger.error("Exception %s obtaining interface data for vm: %s, error: %s", vm_id, e, exc_info=True)
+ self.logger.error(
+ "Exception %s obtaining interface data for vm: %s, error: %s",
+ vm_id,
+ e,
+ exc_info=True,
+ )
self._format_vimconn_exception(e)
if __name__ == "__main__":
-
# Making some basic test
- vim_id = 'azure'
- vim_name = 'azure'
+ vim_id = "azure"
+ vim_name = "azure"
needed_test_params = {
"client_id": "AZURE_CLIENT_ID",
"secret": "AZURE_SECRET",
for param, env_var in needed_test_params.items():
value = getenv(env_var)
+
if not value:
raise Exception("Provide a valid value for env '{}'".format(env_var))
+
test_params[param] = value
config = {
- 'region_name': getenv("AZURE_REGION_NAME", 'westeurope'),
- 'resource_group': getenv("AZURE_RESOURCE_GROUP"),
- 'subscription_id': getenv("AZURE_SUBSCRIPTION_ID"),
- 'pub_key': getenv("AZURE_PUB_KEY", None),
- 'vnet_name': getenv("AZURE_VNET_NAME", 'myNetwork'),
+ "region_name": getenv("AZURE_REGION_NAME", "westeurope"),
+ "resource_group": getenv("AZURE_RESOURCE_GROUP"),
+ "subscription_id": getenv("AZURE_SUBSCRIPTION_ID"),
+ "pub_key": getenv("AZURE_PUB_KEY", None),
+ "vnet_name": getenv("AZURE_VNET_NAME", "myNetwork"),
}
virtualMachine = {
- 'name': 'sergio',
- 'description': 'new VM',
- 'status': 'running',
- 'image': {
- 'publisher': 'Canonical',
- 'offer': 'UbuntuServer',
- 'sku': '16.04.0-LTS',
- 'version': 'latest'
- },
- 'hardware_profile': {
- 'vm_size': 'Standard_DS1_v2'
+ "name": "sergio",
+ "description": "new VM",
+ "status": "running",
+ "image": {
+ "publisher": "Canonical",
+ "offer": "UbuntuServer",
+ "sku": "16.04.0-LTS",
+ "version": "latest",
},
- 'networks': [
- 'sergio'
- ]
+ "hardware_profile": {"vm_size": "Standard_DS1_v2"},
+ "networks": ["sergio"],
}
vnet_config = {
- 'subnet_address': '10.1.2.0/24',
- # 'subnet_name': 'subnet-oam'
+ "subnet_address": "10.1.2.0/24",
+ # "subnet_name": "subnet-oam"
}
###########################
- azure = vimconnector(vim_id, vim_name, tenant_id=test_params["tenant"], tenant_name=None, url=None, url_admin=None,
- user=test_params["client_id"], passwd=test_params["secret"], log_level=None, config=config)
+ azure = vimconnector(
+ vim_id,
+ vim_name,
+ tenant_id=test_params["tenant"],
+ tenant_name=None,
+ url=None,
+ url_admin=None,
+ user=test_params["client_id"],
+ passwd=test_params["secret"],
+ log_level=None,
+ config=config,
+ )
# azure.get_flavor_id_from_data("here")
# subnets=azure.get_network_list()
- # azure.new_vminstance(virtualMachine['name'], virtualMachine['description'], virtualMachine['status'],
- # virtualMachine['image'], virtualMachine['hardware_profile']['vm_size'], subnets)
+ # azure.new_vminstance(virtualMachine["name"], virtualMachine["description"], virtualMachine["status"],
+ # virtualMachine["image"], virtualMachine["hardware_profile"]["vm_size"], subnets)
azure.new_network("mynet", None)
- net_id = "/subscriptions/82f80cc1-876b-4591-9911-1fb5788384fd/resourceGroups/osmRG/providers/Microsoft."\
- "Network/virtualNetworks/test"
- net_id_not_found = "/subscriptions/82f80cc1-876b-4591-9911-1fb5788384fd/resourceGroups/osmRG/providers/"\
- "Microsoft.Network/virtualNetworks/testALF"
+ net_id = (
+ "/subscriptions/82f80cc1-876b-4591-9911-1fb5788384fd/resourceGroups/osmRG/providers/Microsoft."
+ "Network/virtualNetworks/test"
+ )
+ net_id_not_found = (
+ "/subscriptions/82f80cc1-876b-4591-9911-1fb5788384fd/resourceGroups/osmRG/providers/"
+ "Microsoft.Network/virtualNetworks/testALF"
+ )
azure.refresh_nets_status([net_id, net_id_not_found])
setup(
name=_name,
- description='OSM ro vim plugin for azure',
+ description="OSM ro vim plugin for azure",
long_description=README,
- version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ version_command=(
+ "git describe --match v* --tags --long --dirty",
+ "pep440-git-full",
+ ),
# version=VERSION,
# python_requires='>3.5.0',
- author='ETSI OSM',
- author_email='alfonso.tiernosepulveda@telefonica.com',
- maintainer='Alfonso Tierno',
- maintainer_email='alfonso.tiernosepulveda@telefonica.com',
- url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
- license='Apache 2.0',
-
+ author="ETSI OSM",
+ author_email="alfonso.tiernosepulveda@telefonica.com",
+ maintainer="Alfonso Tierno",
+ maintainer_email="alfonso.tiernosepulveda@telefonica.com",
+ url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary",
+ license="Apache 2.0",
packages=[_name],
include_package_data=True,
install_requires=[
"netaddr",
"PyYAML",
"azure==4.0.0",
- "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin"
+ "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin",
],
- setup_requires=['setuptools-version-command'],
+ setup_requires=["setuptools-version-command"],
entry_points={
- 'osm_rovim.plugins': ['rovim_azure = osm_rovim_azure.vimconn_azure:vimconnector'],
+ "osm_rovim.plugins": [
+ "rovim_azure = osm_rovim_azure.vimconn_azure:vimconnector"
+ ],
},
)
basepython = python3
deps = flake8
commands = flake8 osm_rovim_azure --max-line-length 120 \
- --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241
[testenv:unittest]
basepython = python3
import socket
import struct
from osm_ro_plugin import vimconn
+
# import json
from functools import partial
from fog05 import FIMAPI
class vimconnector(vimconn.VimConnector):
- def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None,
- config={}, persistent_info={}):
+ def __init__(
+ self,
+ uuid,
+ name,
+ tenant_id,
+ tenant_name,
+ url,
+ url_admin=None,
+ user=None,
+ passwd=None,
+ log_level=None,
+ config={},
+ persistent_info={},
+ ):
"""Constructor of VIM
Params:
'uuid': id asigned to this VIM
Returns: Raise an exception is some needed parameter is missing, but it must not do any connectivity
check against the VIM
"""
-
- vimconn.VimConnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
- config, persistent_info)
-
- self.logger = logging.getLogger('ro.vim.fos')
- self.logger.debug('vimconn_fos init with config: {}'.format(config))
- self.arch = config.get('arch', 'x86_64')
- self.hv = config.get('hypervisor', 'LXD')
- self.nodes = config.get('nodes', [])
+ vimconn.VimConnector.__init__(
+ self,
+ uuid,
+ name,
+ tenant_id,
+ tenant_name,
+ url,
+ url_admin,
+ user,
+ passwd,
+ log_level,
+ config,
+ persistent_info,
+ )
+
+ self.logger = logging.getLogger("ro.vim.fos")
+ self.logger.debug("vimconn_fos init with config: {}".format(config))
+ self.arch = config.get("arch", "x86_64")
+ self.hv = config.get("hypervisor", "LXD")
+ self.nodes = config.get("nodes", [])
self.fdu_node_map = {}
self.fos_api = FIMAPI(locator=self.url)
def __get_ip_range(self, first, count):
- int_first = struct.unpack('!L', socket.inet_aton(first))[0]
+ int_first = struct.unpack("!L", socket.inet_aton(first))[0]
int_last = int_first + count
- last = socket.inet_ntoa(struct.pack('!L', int_last))
+ last = socket.inet_ntoa(struct.pack("!L", int_last))
+
return (first, last)
def __name_filter(self, desc, filter_name=None):
if filter_name is None:
return True
- return desc.get('name') == filter_name
+
+ return desc.get("name") == filter_name
def __id_filter(self, desc, filter_id=None):
if filter_id is None:
return True
- return desc.get('uuid') == filter_id
+
+ return desc.get("uuid") == filter_id
def __checksum_filter(self, desc, filter_checksum=None):
if filter_checksum is None:
return True
- return desc.get('checksum') == filter_checksum
+
+ return desc.get("checksum") == filter_checksum
def check_vim_connectivity(self):
"""Checks VIM can be reached and user credentials are ok.
"""
try:
self.fos_api.node.list()
+
return None
except fimapi.FIMAuthExcetpion as fae:
- raise vimconn.VimConnAuthException("Unable to authenticate to the VIM. Error {}".format(fae))
+ raise vimconn.VimConnAuthException(
+ "Unable to authenticate to the VIM. Error {}".format(fae)
+ )
except Exception as e:
- raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e))
-
- def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None):
+ raise vimconn.VimConnConnectionException(
+ "VIM not reachable. Error {}".format(e)
+ )
+
+ def new_network(
+ self,
+ net_name,
+ net_type,
+ ip_profile=None,
+ shared=False,
+ provider_network_profile=None,
+ ):
"""Adds a tenant network to VIM
Params:
'net_name': name of the network
'shared': if this network can be seen/use by other tenants/organization
Returns the network identifier on success or raises and exception on failure
"""
- self.logger.debug('new_network: {}'.format(locals()))
- if net_type in ['data', 'ptp']:
- raise vimconn.VimConnNotImplemented('{} type of network not supported'.format(net_type))
+ self.logger.debug("new_network: {}".format(locals()))
+
+ if net_type in ["data", "ptp"]:
+ raise vimconn.VimConnNotImplemented(
+ "{} type of network not supported".format(net_type)
+ )
- net_uuid = '{}'.format(uuid.uuid4())
+ net_uuid = "{}".format(uuid.uuid4())
desc = {
- 'uuid': net_uuid,
- 'name': net_name,
- 'net_type': 'ELAN',
- 'is_mgmt': False
+ "uuid": net_uuid,
+ "name": net_name,
+ "net_type": "ELAN",
+ "is_mgmt": False,
}
if ip_profile is not None:
ip = {}
- if ip_profile.get('ip_version') == 'IPv4':
+ if ip_profile.get("ip_version") == "IPv4":
ip_info = {}
- ip_range = self.__get_ip_range(ip_profile.get('dhcp_start_address'), ip_profile.get('dhcp_count'))
- dhcp_range = '{},{}'.format(ip_range[0], ip_range[1])
- ip['subnet'] = ip_profile.get('subnet_address')
- ip['dns'] = ip_profile.get('dns', None)
- ip['dhcp_enable'] = ip_profile.get('dhcp_enabled', False)
- ip['dhcp_range'] = dhcp_range
- ip['gateway'] = ip_profile.get('gateway_address', None)
- desc['ip_configuration'] = ip_info
+ ip_range = self.__get_ip_range(
+ ip_profile.get("dhcp_start_address"), ip_profile.get("dhcp_count")
+ )
+ dhcp_range = "{},{}".format(ip_range[0], ip_range[1])
+ ip["subnet"] = ip_profile.get("subnet_address")
+ ip["dns"] = ip_profile.get("dns", None)
+ ip["dhcp_enable"] = ip_profile.get("dhcp_enabled", False)
+ ip["dhcp_range"] = dhcp_range
+ ip["gateway"] = ip_profile.get("gateway_address", None)
+ desc["ip_configuration"] = ip_info
else:
- raise vimconn.VimConnNotImplemented('IPV6 network is not implemented at VIM')
- desc['ip_configuration'] = ip
- self.logger.debug('VIM new_network args: {} - Generated Eclipse fog05 Descriptor {}'.format(locals(), desc))
+ raise vimconn.VimConnNotImplemented(
+ "IPV6 network is not implemented at VIM"
+ )
+
+ desc["ip_configuration"] = ip
+
+ self.logger.debug(
+ "VIM new_network args: {} - Generated Eclipse fog05 Descriptor {}".format(
+ locals(), desc
+ )
+ )
+
try:
self.fos_api.network.add_network(desc)
except fimapi.FIMAResouceExistingException as free:
- raise vimconn.VimConnConflictException("Network already exists at VIM. Error {}".format(free))
+ raise vimconn.VimConnConflictException(
+ "Network already exists at VIM. Error {}".format(free)
+ )
except Exception as e:
- raise vimconn.VimConnException("Unable to create network {}. Error {}".format(net_name, e))
+ raise vimconn.VimConnException(
+ "Unable to create network {}. Error {}".format(net_name, e)
+ )
# No way from the current rest service to get the actual error, most likely it will be an already
# existing error
+
return net_uuid, {}
def get_network_list(self, filter_dict={}):
List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity,
authorization, or some other unspecific error
"""
- self.logger.debug('get_network_list: {}'.format(filter_dict))
+ self.logger.debug("get_network_list: {}".format(filter_dict))
res = []
+
try:
nets = self.fos_api.network.list()
except Exception as e:
raise vimconn.VimConnConnectionException(
- "Cannot get network list from VIM, connection error. Error {}".format(e))
+ "Cannot get network list from VIM, connection error. Error {}".format(e)
+ )
filters = [
- partial(self.__name_filter, filter_name=filter_dict.get('name')),
- partial(self.__id_filter, filter_id=filter_dict.get('id'))
+ partial(self.__name_filter, filter_name=filter_dict.get("name")),
+ partial(self.__id_filter, filter_id=filter_dict.get("id")),
]
r1 = []
for n in nets:
match = True
+
for f in filters:
match = match and f(n)
+
if match:
r1.append(n)
for n in r1:
- osm_net = {
- 'id': n.get('uuid'),
- 'name': n.get('name'),
- 'status': 'ACTIVE'
- }
+ osm_net = {"id": n.get("uuid"), "name": n.get("name"), "status": "ACTIVE"}
res.append(osm_net)
+
return res
def get_network(self, net_id):
other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
Raises an exception upon error or when network is not found
"""
- self.logger.debug('get_network: {}'.format(net_id))
- res = self.get_network_list(filter_dict={'id': net_id})
+ self.logger.debug("get_network: {}".format(net_id))
+ res = self.get_network_list(filter_dict={"id": net_id})
+
if len(res) == 0:
- raise vimconn.VimConnNotFoundException("Network {} not found at VIM".format(net_id))
+ raise vimconn.VimConnNotFoundException(
+ "Network {} not found at VIM".format(net_id)
+ )
+
return res[0]
def delete_network(self, net_id, created_items=None):
"""Deletes a tenant network from VIM
Returns the network identifier or raises an exception upon error or when network is not found
"""
- self.logger.debug('delete_network: {}'.format(net_id))
+ self.logger.debug("delete_network: {}".format(net_id))
+
try:
self.fos_api.network.remove_network(net_id)
except fimapi.FIMNotFoundException as fnfe:
raise vimconn.VimConnNotFoundException(
- "Network {} not found at VIM (already deleted?). Error {}".format(net_id, fnfe))
+ "Network {} not found at VIM (already deleted?). Error {}".format(
+ net_id, fnfe
+ )
+ )
except Exception as e:
- raise vimconn.VimConnException("Cannot delete network {} from VIM. Error {}".format(net_id, e))
+ raise vimconn.VimConnException(
+ "Cannot delete network {} from VIM. Error {}".format(net_id, e)
+ )
+
return net_id
def refresh_nets_status(self, net_list):
vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
'net_id2': ...
"""
- self.logger.debug('Refeshing network status with args: {}'.format(locals()))
+ self.logger.debug("Refeshing network status with args: {}".format(locals()))
r = {}
+
for n in net_list:
try:
osm_n = self.get_network(n)
- r[osm_n.get('id')] = {'status': osm_n.get('status')}
+ r[osm_n.get("id")] = {"status": osm_n.get("status")}
except vimconn.VimConnNotFoundException:
- r[n] = {'status': 'VIM_ERROR'}
+ r[n] = {"status": "VIM_ERROR"}
+
return r
def get_flavor(self, flavor_id):
Returns the flavor dict details {'id':<>, 'name':<>, other vim specific }
Raises an exception upon error or if not found
"""
- self.logger.debug('VIM get_flavor with args: {}'.format(locals()))
+ self.logger.debug("VIM get_flavor with args: {}".format(locals()))
+
try:
r = self.fos_api.flavor.get(flavor_id)
except Exception as e:
- raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e))
+ raise vimconn.VimConnConnectionException(
+ "VIM not reachable. Error {}".format(e)
+ )
+
if r is None:
raise vimconn.VimConnNotFoundException("Flavor not found at VIM")
- return {'id': r.get('uuid'), 'name': r.get('name'), 'fos': r}
+
+ return {"id": r.get("uuid"), "name": r.get("name"), "fos": r}
def get_flavor_id_from_data(self, flavor_dict):
"""Obtain flavor id that match the flavor description
#TODO: complete parameters for EPA
Returns the flavor_id or raises a vimconnNotFoundException
"""
- self.logger.debug('VIM get_flavor_id_from_data with args : {}'.format(locals()))
+ self.logger.debug("VIM get_flavor_id_from_data with args : {}".format(locals()))
try:
flvs = self.fos_api.flavor.list()
except Exception as e:
- raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e))
- r = [x.get('uuid') for x in flvs if (x.get('cpu_min_count') == flavor_dict.get('vcpus') and
- x.get('ram_size_mb') == flavor_dict.get('ram') and
- x.get('storage_size_gb') == flavor_dict.get('disk'))]
+ raise vimconn.VimConnConnectionException(
+ "VIM not reachable. Error {}".format(e)
+ )
+
+ r = [
+ x.get("uuid")
+ for x in flvs
+ if (
+ x.get("cpu_min_count") == flavor_dict.get("vcpus")
+ and x.get("ram_size_mb") == flavor_dict.get("ram")
+ and x.get("storage_size_gb") == flavor_dict.get("disk")
+ )
+ ]
+
if len(r) == 0:
raise vimconn.VimConnNotFoundException("No flavor found")
+
return r[0]
def new_flavor(self, flavor_data):
is_public:
#TODO to concrete
Returns the flavor identifier"""
- self.logger.debug('VIM new_flavor with args: {}'.format(locals()))
- flv_id = '{}'.format(uuid.uuid4())
+ self.logger.debug("VIM new_flavor with args: {}".format(locals()))
+ flv_id = "{}".format(uuid.uuid4())
desc = {
- 'uuid': flv_id,
- 'name': flavor_data.get('name'),
- 'cpu_arch': self.arch,
- 'cpu_min_count': flavor_data.get('vcpus'),
- 'cpu_min_freq': 0,
- 'ram_size_mb': float(flavor_data.get('ram')),
- 'storage_size_gb': float(flavor_data.get('disk'))
+ "uuid": flv_id,
+ "name": flavor_data.get("name"),
+ "cpu_arch": self.arch,
+ "cpu_min_count": flavor_data.get("vcpus"),
+ "cpu_min_freq": 0,
+ "ram_size_mb": float(flavor_data.get("ram")),
+ "storage_size_gb": float(flavor_data.get("disk")),
}
+
try:
self.fos_api.flavor.add(desc)
except fimapi.FIMAResouceExistingException as free:
- raise vimconn.VimConnConflictException("Flavor {} already exist at VIM. Error {}".format(flv_id, free))
+ raise vimconn.VimConnConflictException(
+ "Flavor {} already exist at VIM. Error {}".format(flv_id, free)
+ )
except Exception as e:
- raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e))
+ raise vimconn.VimConnConnectionException(
+ "VIM not reachable. Error {}".format(e)
+ )
+
return flv_id
def delete_flavor(self, flavor_id):
self.fos_api.flavor.remove(flavor_id)
except fimapi.FIMNotFoundException as fnfe:
raise vimconn.VimConnNotFoundException(
- "Flavor {} not found at VIM (already deleted?). Error {}".format(flavor_id, fnfe))
+ "Flavor {} not found at VIM (already deleted?). Error {}".format(
+ flavor_id, fnfe
+ )
+ )
except Exception as e:
- raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e))
+ raise vimconn.VimConnConnectionException(
+ "VIM not reachable. Error {}".format(e)
+ )
+
return flavor_id
def new_image(self, image_dict):
- """ Adds a tenant image to VIM. imge_dict is a dictionary with:
+ """Adds a tenant image to VIM. imge_dict is a dictionary with:
name: name
disk_format: qcow2, vhd, vmdk, raw (by default), ...
location: path or URI
metadata: metadata of the image
Returns the image id or raises an exception if failed
"""
- self.logger.debug('VIM new_image with args: {}'.format(locals()))
- img_id = '{}'.format(uuid.uuid4())
+ self.logger.debug("VIM new_image with args: {}".format(locals()))
+ img_id = "{}".format(uuid.uuid4())
desc = {
- 'name': image_dict.get('name'),
- 'uuid': img_id,
- 'uri': image_dict.get('location'),
- 'format': image_dict.get('disk_format')
+ "name": image_dict.get("name"),
+ "uuid": img_id,
+ "uri": image_dict.get("location"),
+ "format": image_dict.get("disk_format"),
}
+
try:
self.fos_api.image.add(desc)
except fimapi.FIMAResouceExistingException as free:
- raise vimconn.VimConnConflictException("Image {} already exist at VIM. Error {}".format(img_id, free))
+ raise vimconn.VimConnConflictException(
+ "Image {} already exist at VIM. Error {}".format(img_id, free)
+ )
except Exception as e:
- raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e))
+ raise vimconn.VimConnConnectionException(
+ "VIM not reachable. Error {}".format(e)
+ )
+
return img_id
def get_image_id_from_path(self, path):
-
"""Get the image id from image path in the VIM database.
- Returns the image_id or raises a vimconnNotFoundException
+ Returns the image_id or raises a vimconnNotFoundException
"""
- self.logger.debug('VIM get_image_id_from_path with args: {}'.format(locals()))
+ self.logger.debug("VIM get_image_id_from_path with args: {}".format(locals()))
+
try:
imgs = self.fos_api.image.list()
except Exception as e:
- raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e))
- res = [x.get('uuid') for x in imgs if x.get('uri') == path]
+ raise vimconn.VimConnConnectionException(
+ "VIM not reachable. Error {}".format(e)
+ )
+
+ res = [x.get("uuid") for x in imgs if x.get("uri") == path]
+
if len(res) == 0:
raise vimconn.VimConnNotFoundException("Image with this path was not found")
+
return res[0]
def get_image_list(self, filter_dict={}):
[{<the fields at Filter_dict plus some VIM specific>}, ...]
List can be empty
"""
- self.logger.debug('VIM get_image_list args: {}'.format(locals()))
+ self.logger.debug("VIM get_image_list args: {}".format(locals()))
r = []
+
try:
fimgs = self.fos_api.image.list()
except Exception as e:
- raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e))
+ raise vimconn.VimConnConnectionException(
+ "VIM not reachable. Error {}".format(e)
+ )
filters = [
- partial(self.__name_filter, filter_name=filter_dict.get('name')),
- partial(self.__id_filter, filter_id=filter_dict.get('id')),
- partial(self.__checksum_filter, filter_checksum=filter_dict.get('checksum'))
+ partial(self.__name_filter, filter_name=filter_dict.get("name")),
+ partial(self.__id_filter, filter_id=filter_dict.get("id")),
+ partial(
+ self.__checksum_filter, filter_checksum=filter_dict.get("checksum")
+ ),
]
r1 = []
for i in fimgs:
match = True
+
for f in filters:
match = match and f(i)
+
if match:
r1.append(i)
for i in r1:
img_info = {
- 'name': i.get('name'),
- 'id': i.get('uuid'),
- 'checksum': i.get('checksum'),
- 'location': i.get('uri'),
- 'fos': i
+ "name": i.get("name"),
+ "id": i.get("uuid"),
+ "checksum": i.get("checksum"),
+ "location": i.get("uri"),
+ "fos": i,
}
r.append(img_info)
+
return r
# raise VimConnNotImplemented( "Should have implemented this" )
- def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
- availability_zone_index=None, availability_zone_list=None):
+ def new_vminstance(
+ self,
+ name,
+ description,
+ start,
+ image_id,
+ flavor_id,
+ net_list,
+ cloud_config=None,
+ disk_list=None,
+ availability_zone_index=None,
+ availability_zone_list=None,
+ ):
"""Adds a VM instance to VIM
:param start: (boolean) indicates if VM must start or created in pause mode.
:param image_id: :param flavor_id: image and flavor VIM id to use for the VM
Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
as not present.
"""
- self.logger.debug('new_vminstance with args: {}'.format(locals()))
- fdu_uuid = '{}'.format(uuid.uuid4())
+ self.logger.debug("new_vminstance with args: {}".format(locals()))
+ fdu_uuid = "{}".format(uuid.uuid4())
flv = self.fos_api.flavor.get(flavor_id)
img = self.fos_api.image.get(image_id)
if flv is None:
- raise vimconn.VimConnNotFoundException("Flavor {} not found at VIM".format(flavor_id))
+ raise vimconn.VimConnNotFoundException(
+ "Flavor {} not found at VIM".format(flavor_id)
+ )
+
if img is None:
- raise vimconn.VimConnNotFoundException("Image {} not found at VIM".format(image_id))
+ raise vimconn.VimConnNotFoundException(
+ "Image {} not found at VIM".format(image_id)
+ )
created_items = {
- 'fdu_id': '',
- 'node_id': '',
- 'connection_points': []
+ "fdu_id": "",
+ "node_id": "",
+ "connection_points": [],
}
fdu_desc = {
- 'name': name,
- 'id': fdu_uuid,
- 'uuid': fdu_uuid,
- 'computation_requirements': flv,
- 'image': img,
- 'hypervisor': self.hv,
- 'migration_kind': 'LIVE',
- 'interfaces': [],
- 'io_ports': [],
- 'connection_points': [],
- 'depends_on': [],
- 'storage': []
+ "name": name,
+ "id": fdu_uuid,
+ "uuid": fdu_uuid,
+ "computation_requirements": flv,
+ "image": img,
+ "hypervisor": self.hv,
+ "migration_kind": "LIVE",
+ "interfaces": [],
+ "io_ports": [],
+ "connection_points": [],
+ "depends_on": [],
+ "storage": [],
}
nets = []
cps = []
intf_id = 0
for n in net_list:
- cp_id = '{}'.format(uuid.uuid4())
- n['vim_id'] = cp_id
- pair_id = n.get('net_id')
+ cp_id = "{}".format(uuid.uuid4())
+ n["vim_id"] = cp_id
+ pair_id = n.get("net_id")
cp_d = {
- 'id': cp_id,
- 'name': cp_id,
- 'vld_ref': pair_id
+ "id": cp_id,
+ "name": cp_id,
+ "vld_ref": pair_id,
}
intf_d = {
- 'name': n.get('name', 'eth{}'.format(intf_id)),
- 'is_mgmt': False,
- 'if_type': 'INTERNAL',
- 'virtual_interface': {
- 'intf_type': n.get('model', 'VIRTIO'),
- 'vpci': n.get('vpci', '0:0:0'),
- 'bandwidth': int(n.get('bw', 100))
+ "name": n.get("name", "eth{}".format(intf_id)),
+ "is_mgmt": False,
+ "if_type": "INTERNAL",
+ "virtual_interface": {
+ "intf_type": n.get("model", "VIRTIO"),
+ "vpci": n.get("vpci", "0:0:0"),
+ "bandwidth": int(n.get("bw", 100)),
},
- 'cp_id': cp_id
+ "cp_id": cp_id,
}
- if n.get('mac_address', None) is not None:
- intf_d['mac_address'] = n['mac_address']
+ if n.get("mac_address", None) is not None:
+ intf_d["mac_address"] = n["mac_address"]
- created_items['connection_points'].append(cp_id)
- fdu_desc['connection_points'].append(cp_d)
- fdu_desc['interfaces'].append(intf_d)
+ created_items["connection_points"].append(cp_id)
+ fdu_desc["connection_points"].append(cp_d)
+ fdu_desc["interfaces"].append(intf_d)
intf_id = intf_id + 1
if cloud_config is not None:
- configuration = {'conf_type': 'CLOUD_INIT'}
- if cloud_config.get('user-data') is not None:
- configuration['script'] = cloud_config.get('user-data')
- if cloud_config.get('key-pairs') is not None:
- configuration['ssh_keys'] = cloud_config.get('key-pairs')
+ configuration = {"conf_type": "CLOUD_INIT"}
+ if cloud_config.get("user-data") is not None:
+ configuration["script"] = cloud_config.get("user-data")
+
+ if cloud_config.get("key-pairs") is not None:
+ configuration["ssh_keys"] = cloud_config.get("key-pairs")
- if 'script' in configuration:
- fdu_desc['configuration'] = configuration
+ if "script" in configuration:
+ fdu_desc["configuration"] = configuration
- self.logger.debug('Eclipse fog05 FDU Descriptor: {}'.format(fdu_desc))
+ self.logger.debug("Eclipse fog05 FDU Descriptor: {}".format(fdu_desc))
fdu = FDU(fdu_desc)
self.fos_api.fdu.onboard(fdu)
instance = self.fos_api.fdu.define(fdu_uuid)
instance_list = self.fos_api.fdu.instance_list(fdu_uuid)
- selected_node = ''
+ selected_node = ""
+
for n in instance_list:
instances = instance_list[n]
if instance.uuid in instances:
selected_node = n
- if selected_node == '':
+
+ if selected_node == "":
raise ValueError("Unable to find node for network creation")
- self.logger.debug('Selected node by VIM: {}'.format(selected_node))
- created_items['fdu_id'] = fdu_uuid
- created_items['node_id'] = selected_node
+ self.logger.debug("Selected node by VIM: {}".format(selected_node))
+ created_items["fdu_id"] = fdu_uuid
+ created_items["node_id"] = selected_node
- for cp in fdu_desc['connection_points']:
+ for cp in fdu_desc["connection_points"]:
nets = self.fos_api.network.list()
for net in nets:
- if net.get('uuid') == cp['vld_ref']:
+ if net.get("uuid") == cp["vld_ref"]:
self.fos_api.network.add_network_to_node(net, selected_node)
self.fos_api.fdu.configure(instance.uuid)
self.fos_api.fdu.start(instance.uuid)
- self.logger.debug('Eclipse fog05 FDU Started {}'.format(instance.uuid))
+ self.logger.debug("Eclipse fog05 FDU Started {}".format(instance.uuid))
- created_items['instance_id'] = str(instance.uuid)
+ created_items["instance_id"] = str(instance.uuid)
self.fdu_node_map[instance.uuid] = selected_node
- self.logger.debug('new_vminstance returns: {} {}'.format(instance.uuid, created_items))
+ self.logger.debug(
+ "new_vminstance returns: {} {}".format(instance.uuid, created_items)
+ )
+
return str(instance.uuid), created_items
except fimapi.FIMAResouceExistingException as free:
- raise vimconn.VimConnConflictException("VM already exists at VIM. Error {}".format(free))
+ raise vimconn.VimConnConflictException(
+ "VM already exists at VIM. Error {}".format(free)
+ )
except Exception as e:
- raise vimconn.VimConnException("Error while instantiating VM {}. Error {}".format(name, e))
+ raise vimconn.VimConnException(
+ "Error while instantiating VM {}. Error {}".format(name, e)
+ )
def get_vminstance(self, vm_id):
"""Returns the VM instance information from VIM"""
- self.logger.debug('VIM get_vminstance with args: {}'.format(locals()))
+ self.logger.debug("VIM get_vminstance with args: {}".format(locals()))
try:
instance = self.fos_api.fdu.instance_info(vm_id)
except Exception as e:
- raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e))
+ raise vimconn.VimConnConnectionException(
+ "VIM not reachable. Error {}".format(e)
+ )
+
if instance is None:
- raise vimconn.VimConnNotFoundException('VM with id {} not found!'.format(vm_id))
+ raise vimconn.VimConnNotFoundException(
+ "VM with id {} not found!".format(vm_id)
+ )
+
return instance.to_json()
def delete_vminstance(self, vm_id, created_items=None):
action_vminstance
:return: None or the same vm_id. Raises an exception on fail
"""
- self.logger.debug('FOS delete_vminstance with args: {}'.format(locals()))
- fduid = created_items.get('fdu_id')
+ self.logger.debug("FOS delete_vminstance with args: {}".format(locals()))
+ fduid = created_items.get("fdu_id")
+
try:
instance = self.fos_api.fdu.instance_info(vm_id)
instance_list = self.fos_api.fdu.instance_list(instance.fdu_id)
- selected_node = ''
+ selected_node = ""
+
for n in instance_list:
instances = instance_list[n]
+
if instance.uuid in instances:
selected_node = n
- if selected_node == '':
+
+ if selected_node == "":
raise ValueError("Unable to find node for the given Instance")
self.fos_api.fdu.stop(vm_id)
- for cp in instance.to_json()['connection_points']:
+ for cp in instance.to_json()["connection_points"]:
nets = self.fos_api.network.list()
for net in nets:
- if net.get('uuid') == cp['vld_ref']:
- self.fos_api.network.remove_network_from_node(net.get('uuid'), selected_node)
+ if net.get("uuid") == cp["vld_ref"]:
+ self.fos_api.network.remove_network_from_node(
+ net.get("uuid"), selected_node
+ )
self.fos_api.fdu.clean(vm_id)
self.fos_api.fdu.undefine(vm_id)
-
self.fos_api.fdu.offload(fduid)
except Exception as e:
- raise vimconn.VimConnException("Error on deleting VM with id {}. Error {}".format(vm_id, e))
+ raise vimconn.VimConnException(
+ "Error on deleting VM with id {}. Error {}".format(vm_id, e)
+ )
+
return vm_id
# raise VimConnNotImplemented( "Should have implemented this" )
def refresh_vms_status(self, vm_list):
"""Get the status of the virtual machines and their interfaces/ports
- Params: the list of VM identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this Virtual Machine
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
- # BUILD (on building process), ERROR
- # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
- #
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
- interfaces: list with interface info. Each item a dictionary with:
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
- mac_address: #Text format XX:XX:XX:XX:XX:XX
- vim_net_id: #network id where this interface is connected, if provided at creation
- vim_interface_id: #interface/port VIM id
- ip_address: #null, or text with IPv4, IPv6 address
- compute_node: #identification of compute node where PF,VF interface is allocated
- pci: #PCI address of the NIC that hosts the PF,VF
- vlan: #physical VLAN used for VF
+ Params: the list of VM identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this Virtual Machine
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+ # BUILD (on building process), ERROR
+ # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+ #
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ interfaces: list with interface info. Each item a dictionary with:
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ mac_address: #Text format XX:XX:XX:XX:XX:XX
+ vim_net_id: #network id where this interface is connected, if provided at creation
+ vim_interface_id: #interface/port VIM id
+ ip_address: #null, or text with IPv4, IPv6 address
+ compute_node: #identification of compute node where PF,VF interface is allocated
+ pci: #PCI address of the NIC that hosts the PF,VF
+ vlan: #physical VLAN used for VF
"""
- self.logger.debug('FOS refresh_vms_status with args: {}'.format(locals()))
+ self.logger.debug("FOS refresh_vms_status with args: {}".format(locals()))
fos2osm_status = {
- 'DEFINE': 'OTHER',
- 'CONFIGURE': 'INACTIVE',
- 'RUN': 'ACTIVE',
- 'PAUSE': 'PAUSED',
- 'ERROR': 'ERROR'
+ "DEFINE": "OTHER",
+ "CONFIGURE": "INACTIVE",
+ "RUN": "ACTIVE",
+ "PAUSE": "PAUSED",
+ "ERROR": "ERROR",
}
r = {}
for vm in vm_list:
- self.logger.debug('FOS refresh_vms_status for {}'.format(vm))
+ self.logger.debug("FOS refresh_vms_status for {}".format(vm))
info = {}
nid = self.fdu_node_map.get(vm)
if nid is None:
r[vm] = {
- 'status': 'VIM_ERROR',
- 'error_msg': 'Not compute node associated for VM'
+ "status": "VIM_ERROR",
+ "error_msg": "Not compute node associated for VM",
}
continue
try:
vm_info = self.fos_api.fdu.instance_info(vm)
except Exception:
- r[vm] = {
- 'status': 'VIM_ERROR',
- 'error_msg': 'unable to connect to VIM'
- }
+ r[vm] = {"status": "VIM_ERROR", "error_msg": "unable to connect to VIM"}
continue
if vm_info is None:
- r[vm:] = {'status': 'DELETED'}
+ r[vm:] = {"status": "DELETED"}
continue
desc = self.fos_api.fdu.info(str(vm_info.fdu_id))
vm_info = vm_info.to_json()
desc = desc.to_json()
- osm_status = fos2osm_status.get(vm_info.get('status'))
+ osm_status = fos2osm_status.get(vm_info.get("status"))
+
+ self.logger.debug("FOS status info {}".format(vm_info))
+ self.logger.debug(
+ "FOS status is {} <-> OSM Status {}".format(
+ vm_info.get("status"), osm_status
+ )
+ )
+ info["status"] = osm_status
+
+ if vm_info.get("status") == "ERROR":
+ info["error_msg"] = vm_info.get("error_code")
- self.logger.debug('FOS status info {}'.format(vm_info))
- self.logger.debug('FOS status is {} <-> OSM Status {}'.format(vm_info.get('status'), osm_status))
- info['status'] = osm_status
- if vm_info.get('status') == 'ERROR':
- info['error_msg'] = vm_info.get('error_code')
# yaml.safe_dump(json.loads(json.dumps(vm_info)))
- # info['vim_info'] = ''
+ # info["vim_info"] = ""
faces = []
i = 0
- for intf_name in vm_info.get('hypervisor_info').get('network', []):
- intf_info = vm_info.get('hypervisor_info').get('network').get(intf_name)
+ for intf_name in vm_info.get("hypervisor_info").get("network", []):
+ intf_info = vm_info.get("hypervisor_info").get("network").get(intf_name)
face = {}
- face['compute_node'] = nid
- # face['vim_info'] = '' #yaml.safe_dump(json.loads(json.dumps(intf_info)))
- face['mac_address'] = intf_info.get('hwaddr')
+ face["compute_node"] = nid
+ # face["vim_info"] = "" #yaml.safe_dump(json.loads(json.dumps(intf_info)))
+ face["mac_address"] = intf_info.get("hwaddr")
addrs = []
- for a in intf_info.get('addresses'):
- addrs.append(a.get('address'))
+
+ for a in intf_info.get("addresses"):
+ addrs.append(a.get("address"))
+
if len(addrs) >= 0:
- face['ip_address'] = ','.join(addrs)
+ face["ip_address"] = ",".join(addrs)
else:
- face['ip_address'] = ''
- face['pci'] = '0:0:0.0'
+ face["ip_address"] = ""
+
+ face["pci"] = "0:0:0.0"
try:
- cp_info = vm_info.get('connection_points')[i]
+ cp_info = vm_info.get("connection_points")[i]
except IndexError:
cp_info = None
+
if cp_info is not None:
- cp_id = cp_info['cp_id']
- cps_d = desc['connection_points']
- matches = [x for x in cps_d if x['id'] == cp_id]
+ cp_id = cp_info["cp_id"]
+ cps_d = desc["connection_points"]
+ matches = [x for x in cps_d if x["id"] == cp_id]
+
if len(matches) > 0:
cpd = matches[0]
- face['vim_net_id'] = cpd.get('vld_ref', '')
+ face["vim_net_id"] = cpd.get("vld_ref", "")
else:
- face['vim_net_id'] = ''
- face['vim_interface_id'] = cp_id
- # cp_info.get('uuid')
+ face["vim_net_id"] = ""
+
+ face["vim_interface_id"] = cp_id
+ # cp_info.get("uuid")
else:
- face['vim_net_id'] = ''
- face['vim_interface_id'] = intf_name
+ face["vim_net_id"] = ""
+ face["vim_interface_id"] = intf_name
+
faces.append(face)
i += 1
- info['interfaces'] = faces
+ info["interfaces"] = faces
r[vm] = info
- self.logger.debug('FOS refresh_vms_status res for {} is {}'.format(vm, info))
- self.logger.debug('FOS refresh_vms_status res is {}'.format(r))
+ self.logger.debug(
+ "FOS refresh_vms_status res for {} is {}".format(vm, info)
+ )
+
+ self.logger.debug("FOS refresh_vms_status res is {}".format(r))
+
return r
def action_vminstance(self, vm_id, action_dict, created_items={}):
method can modify this value
:return: None, or a console dict
"""
- self.logger.debug('VIM action_vminstance with args: {}'.format(locals()))
+ self.logger.debug("VIM action_vminstance with args: {}".format(locals()))
nid = self.fdu_node_map.get(vm_id)
+
if nid is None:
- raise vimconn.VimConnNotFoundException('No node for this VM')
+ raise vimconn.VimConnNotFoundException("No node for this VM")
+
try:
instance = self.fos_api.fdu.instance_info(vm_id)
if "start" in action_dict:
- if instance.get('status') == 'CONFIGURE':
+ if instance.get("status") == "CONFIGURE":
self.fos_api.fdu.start(vm_id)
- elif instance.get('status') == 'PAUSE':
+ elif instance.get("status") == "PAUSE":
self.fos_api.fdu.resume(vm_id)
else:
- raise vimconn.VimConnConflictException('Cannot start from current state: {}'.format(
- instance.get('status')))
+ raise vimconn.VimConnConflictException(
+ "Cannot start from current state: {}".format(
+ instance.get("status")
+ )
+ )
elif "pause" in action_dict:
- if instance.get('status') == 'RUN':
+ if instance.get("status") == "RUN":
self.fos_api.fdu.pause(vm_id)
else:
- raise vimconn.VimConnConflictException('Cannot pause from current state: {}'.format(
- instance.get('status')))
+ raise vimconn.VimConnConflictException(
+ "Cannot pause from current state: {}".format(
+ instance.get("status")
+ )
+ )
elif "resume" in action_dict:
- if instance.get('status') == 'PAUSE':
+ if instance.get("status") == "PAUSE":
self.fos_api.fdu.resume(vm_id)
else:
- raise vimconn.VimConnConflictException('Cannot resume from current state: {}'.format(
- instance.get('status')))
+ raise vimconn.VimConnConflictException(
+ "Cannot resume from current state: {}".format(
+ instance.get("status")
+ )
+ )
elif "shutoff" in action_dict or "shutdown" or "forceOff" in action_dict:
- if instance.get('status') == 'RUN':
+ if instance.get("status") == "RUN":
self.fos_api.fdu.stop(vm_id)
else:
- raise vimconn.VimConnConflictException('Cannot shutoff from current state: {}'.format(
- instance.get('status')))
+ raise vimconn.VimConnConflictException(
+ "Cannot shutoff from current state: {}".format(
+ instance.get("status")
+ )
+ )
elif "terminate" in action_dict:
- if instance.get('status') == 'RUN':
+ if instance.get("status") == "RUN":
self.fos_api.fdu.stop(vm_id)
self.fos_api.fdu.clean(vm_id)
self.fos_api.fdu.undefine(vm_id)
# self.fos_api.fdu.offload(vm_id)
- elif instance.get('status') == 'CONFIGURE':
+ elif instance.get("status") == "CONFIGURE":
self.fos_api.fdu.clean(vm_id)
self.fos_api.fdu.undefine(vm_id)
# self.fos_api.fdu.offload(vm_id)
- elif instance.get('status') == 'PAUSE':
+ elif instance.get("status") == "PAUSE":
self.fos_api.fdu.resume(vm_id)
self.fos_api.fdu.stop(vm_id)
self.fos_api.fdu.clean(vm_id)
self.fos_api.fdu.undefine(vm_id)
# self.fos_api.fdu.offload(vm_id)
else:
- raise vimconn.VimConnConflictException('Cannot terminate from current state: {}'.format(
- instance.get('status')))
+ raise vimconn.VimConnConflictException(
+ "Cannot terminate from current state: {}".format(
+ instance.get("status")
+ )
+ )
elif "rebuild" in action_dict:
raise vimconn.VimConnNotImplemented("Rebuild not implemented")
elif "reboot" in action_dict:
- if instance.get('status') == 'RUN':
+ if instance.get("status") == "RUN":
self.fos_api.fdu.stop(vm_id)
self.fos_api.fdu.start(vm_id)
else:
- raise vimconn.VimConnConflictException('Cannot reboot from current state: {}'.format(
- instance.get('status')))
+ raise vimconn.VimConnConflictException(
+ "Cannot reboot from current state: {}".format(
+ instance.get("status")
+ )
+ )
except Exception as e:
- raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e))
+ raise vimconn.VimConnConnectionException(
+ "VIM not reachable. Error {}".format(e)
+ )
setup(
name=_name,
- description='OSM ro vim plugin for Eclipse fog05',
+ description="OSM ro vim plugin for Eclipse fog05",
long_description=README,
- version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ version_command=(
+ "git describe --match v* --tags --long --dirty",
+ "pep440-git-full",
+ ),
# version=VERSION,
- # python_requires='>3.5.0',
- author='ETSI OSM',
- author_email='OSM_TECH@LIST.ETSI.ORG',
- maintainer='ETSI OSM',
- maintainer_email='OSM_TECH@LIST.ETSI.ORG',
- url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
- license='Apache 2.0',
-
+ # python_requires=">3.5.0",
+ author="ETSI OSM",
+ author_email="OSM_TECH@LIST.ETSI.ORG",
+ maintainer="ETSI OSM",
+ maintainer_email="OSM_TECH@LIST.ETSI.ORG",
+ url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary",
+ license="Apache 2.0",
packages=[_name],
include_package_data=True,
install_requires=[
"fog05==0.2.0",
"pyangbind",
"sphinx",
- "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin"
+ "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin",
],
- setup_requires=['setuptools-version-command'],
+ setup_requires=["setuptools-version-command"],
entry_points={
- 'osm_rovim.plugins': ['rovim_fos = osm_rovim_fos.vimconn_fos:vimconnector'],
+ "osm_rovim.plugins": ["rovim_fos = osm_rovim_fos.vimconn_fos:vimconnector"],
},
)
basepython = python3
deps = flake8
commands = flake8 osm_rovim_fos --max-line-length 120 \
- --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241
[testenv:unittest]
basepython = python3
"""
vimconnector implements all the methods to interact with OpenNebula using the XML-RPC API.
"""
-__author__ = "Jose Maria Carmona Perez,Juan Antonio Hernando Labajo, Emilio Abraham Garrido Garcia,Alberto Florez " \
- "Pages, Andres Pozo Munoz, Santiago Perez Marin, Onlife Networks Telefonica I+D Product Innovation "
+__author__ = (
+ "Jose Maria Carmona Perez,Juan Antonio Hernando Labajo, Emilio Abraham Garrido Garcia,Alberto Florez "
+ "Pages, Andres Pozo Munoz, Santiago Perez Marin, Onlife Networks Telefonica I+D Product Innovation "
+)
__date__ = "$13-dec-2017 11:09:29$"
from osm_ro_plugin import vimconn
import logging
import requests
+
# import logging
import oca
+
# import untangle
import math
import random
class vimconnector(vimconn.VimConnector):
- def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
- log_level="DEBUG", config={}, persistent_info={}):
-
+ def __init__(
+ self,
+ uuid,
+ name,
+ tenant_id,
+ tenant_name,
+ url,
+ url_admin=None,
+ user=None,
+ passwd=None,
+ log_level="DEBUG",
+ config={},
+ persistent_info={},
+ ):
"""Constructor of VIM
Params:
'uuid': id asigned to this VIM
Returns: Raise an exception is some needed parameter is missing, but it must not do any connectivity
check against the VIM
"""
-
- vimconn.VimConnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
- config)
-
- self.logger = logging.getLogger('ro.vim.openstack')
+ vimconn.VimConnector.__init__(
+ self,
+ uuid,
+ name,
+ tenant_id,
+ tenant_name,
+ url,
+ url_admin,
+ user,
+ passwd,
+ log_level,
+ config,
+ )
+
+ self.logger = logging.getLogger("ro.vim.openstack")
def _new_one_connection(self):
- return pyone.OneServer(self.url, session=self.user + ':' + self.passwd)
+ return pyone.OneServer(self.url, session=self.user + ":" + self.passwd)
def new_tenant(self, tenant_name, tenant_description):
- # '''Adds a new tenant to VIM with this name and description, returns the tenant identifier'''
+ # """Adds a new tenant to VIM with this name and description, returns the tenant identifier"""
try:
- client = oca.Client(self.user + ':' + self.passwd, self.url)
+ client = oca.Client(self.user + ":" + self.passwd, self.url)
group_list = oca.GroupPool(client)
user_list = oca.UserPool(client)
group_list.info()
user_list.info()
create_primarygroup = 1
+
# create group-tenant
for group in group_list:
if str(group.name) == str(tenant_name):
create_primarygroup = 0
break
+
if create_primarygroup == 1:
oca.Group.allocate(client, tenant_name)
+
group_list.info()
+
# set to primary_group the tenant_group and oneadmin to secondary_group
for group in group_list:
if str(group.name) == str(tenant_name):
else:
self._add_secondarygroup(user.id, group.id)
user.chgrp(group.id)
+
return str(group.id)
except Exception as e:
self.logger.error("Create new tenant error: " + str(e))
+
raise vimconn.VimConnException(e)
def delete_tenant(self, tenant_id):
"""Delete a tenant from VIM. Returns the old tenant identifier"""
try:
- client = oca.Client(self.user + ':' + self.passwd, self.url)
+ client = oca.Client(self.user + ":" + self.passwd, self.url)
group_list = oca.GroupPool(client)
user_list = oca.UserPool(client)
group_list.info()
user_list.info()
+
for group in group_list:
if str(group.id) == str(tenant_id):
for user in user_list:
if str(user.name) == str(self.user):
self._delete_secondarygroup(user.id, group.id)
group.delete(client)
+
return None
- raise vimconn.VimConnNotFoundException("Group {} not found".format(tenant_id))
+
+ raise vimconn.VimConnNotFoundException(
+ "Group {} not found".format(tenant_id)
+ )
except Exception as e:
self.logger.error("Delete tenant " + str(tenant_id) + " error: " + str(e))
raise vimconn.VimConnException(e)
<value><int>{}</int></value>\
</param>\
</params>\
- </methodCall>'.format(self.user, self.passwd, (str(id_user)), (str(id_group)))
+ </methodCall>'.format(
+ self.user, self.passwd, (str(id_user)), (str(id_group))
+ )
requests.post(self.url, params)
def _delete_secondarygroup(self, id_user, id_group):
<value><int>{}</int></value>\
</param>\
</params>\
- </methodCall>'.format(self.user, self.passwd, (str(id_user)), (str(id_group)))
+ </methodCall>'.format(
+ self.user, self.passwd, (str(id_user)), (str(id_group))
+ )
requests.post(self.url, params)
- def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None):
+ def new_network(
+ self,
+ net_name,
+ net_type,
+ ip_profile=None,
+ shared=False,
+ provider_network_profile=None,
+ ):
"""Adds a tenant network to VIM
Params:
'net_name': name of the network
Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
as not present.
"""
-
# oca library method cannot be used in this case (problem with cluster parameters)
try:
vlan = None
+
if provider_network_profile:
vlan = provider_network_profile.get("segmentation-id")
+
created_items = {}
one = self._new_one_connection()
size = "254"
+
if ip_profile is None:
subnet_rand = random.randint(0, 255)
ip_start = "192.168.{}.1".format(subnet_rand)
else:
index = ip_profile["subnet_address"].find("/")
ip_start = ip_profile["subnet_address"][:index]
+
if "dhcp_count" in ip_profile and ip_profile["dhcp_count"] is not None:
size = str(ip_profile["dhcp_count"])
- elif "dhcp_count" not in ip_profile and ip_profile["ip_version"] == "IPv4":
- prefix = ip_profile["subnet_address"][index + 1:]
+ elif (
+ "dhcp_count" not in ip_profile
+ and ip_profile["ip_version"] == "IPv4"
+ ):
+ prefix = ip_profile["subnet_address"][index + 1 :]
size = int(math.pow(2, 32 - prefix))
- if "dhcp_start_address" in ip_profile and ip_profile["dhcp_start_address"] is not None:
+
+ if (
+ "dhcp_start_address" in ip_profile
+ and ip_profile["dhcp_start_address"] is not None
+ ):
ip_start = str(ip_profile["dhcp_start_address"])
# if ip_profile["ip_version"] == "IPv6":
# ip_prefix_type = "GLOBAL_PREFIX"
vlan_id = vlan
else:
vlan_id = str(random.randint(100, 4095))
+
# if "internal" in net_name:
# OpenNebula not support two networks with same name
random_net_name = str(random.randint(1, 1000000))
net_name = net_name + random_net_name
- net_id = one.vn.allocate({
- 'NAME': net_name,
- 'VN_MAD': '802.1Q',
- 'PHYDEV': self.config["network"]["phydev"],
- 'VLAN_ID': vlan_id
- }, self.config["cluster"]["id"])
- arpool = {
- 'AR_POOL': {
- 'AR': {
- 'TYPE': 'IP4',
- 'IP': ip_start,
- 'SIZE': size
- }
- }
- }
+ net_id = one.vn.allocate(
+ {
+ "NAME": net_name,
+ "VN_MAD": "802.1Q",
+ "PHYDEV": self.config["network"]["phydev"],
+ "VLAN_ID": vlan_id,
+ },
+ self.config["cluster"]["id"],
+ )
+ arpool = {"AR_POOL": {"AR": {"TYPE": "IP4", "IP": ip_start, "SIZE": size}}}
one.vn.add_ar(net_id, arpool)
+
return net_id, created_items
except Exception as e:
self.logger.error("Create new network error: " + str(e))
+
raise vimconn.VimConnException(e)
def get_network_list(self, filter_dict={}):
List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity,
authorization, or some other unspecific error
"""
-
try:
one = self._new_one_connection()
net_pool = one.vnpool.info(-2, -1, -1).VNET
response = []
+
if "name" in filter_dict:
network_name_filter = filter_dict["name"]
else:
network_name_filter = None
+
if "id" in filter_dict:
network_id_filter = filter_dict["id"]
else:
network_id_filter = None
+
for network in net_pool:
- if network.NAME == network_name_filter or str(network.ID) == str(network_id_filter):
- net_dict = {"name": network.NAME, "id": str(network.ID), "status": "ACTIVE"}
+ if network.NAME == network_name_filter or str(network.ID) == str(
+ network_id_filter
+ ):
+ net_dict = {
+ "name": network.NAME,
+ "id": str(network.ID),
+ "status": "ACTIVE",
+ }
response.append(net_dict)
+
return response
except Exception as e:
self.logger.error("Get network list error: " + str(e))
+
raise vimconn.VimConnException(e)
def get_network(self, net_id):
one = self._new_one_connection()
net_pool = one.vnpool.info(-2, -1, -1).VNET
net = {}
+
for network in net_pool:
if str(network.ID) == str(net_id):
- net['id'] = network.ID
- net['name'] = network.NAME
- net['status'] = "ACTIVE"
+ net["id"] = network.ID
+ net["name"] = network.NAME
+ net["status"] = "ACTIVE"
break
+
if net:
return net
else:
- raise vimconn.VimConnNotFoundException("Network {} not found".format(net_id))
+ raise vimconn.VimConnNotFoundException(
+ "Network {} not found".format(net_id)
+ )
except Exception as e:
self.logger.error("Get network " + str(net_id) + " error): " + str(e))
+
raise vimconn.VimConnException(e)
def delete_network(self, net_id, created_items=None):
Returns the network identifier or raises an exception upon error or when network is not found
"""
try:
-
one = self._new_one_connection()
one.vn.delete(int(net_id))
+
return net_id
except Exception as e:
- self.logger.error("Delete network " + str(net_id) + "error: network not found" + str(e))
+ self.logger.error(
+ "Delete network " + str(net_id) + "error: network not found" + str(e)
+ )
+
raise vimconn.VimConnException(e)
def refresh_nets_status(self, net_list):
try:
for net_id in net_list:
net = {}
+
try:
net_vim = self.get_network(net_id)
net["status"] = net_vim["status"]
net["vim_info"] = None
except vimconn.VimConnNotFoundException as e:
self.logger.error("Exception getting net status: {}".format(str(e)))
- net['status'] = "DELETED"
- net['error_msg'] = str(e)
+ net["status"] = "DELETED"
+ net["error_msg"] = str(e)
except vimconn.VimConnException as e:
self.logger.error(e)
net["status"] = "VIM_ERROR"
net["error_msg"] = str(e)
+
net_dict[net_id] = net
+
return net_dict
except vimconn.VimConnException as e:
self.logger.error(e)
+
for k in net_dict:
net_dict[k]["status"] = "VIM_ERROR"
net_dict[k]["error_msg"] = str(e)
+
return net_dict
def get_flavor(self, flavor_id): # Esta correcto
Raises an exception upon error or if not found
"""
try:
-
one = self._new_one_connection()
template = one.template.info(int(flavor_id))
+
if template is not None:
- return {'id': template.ID, 'name': template.NAME}
- raise vimconn.VimConnNotFoundException("Flavor {} not found".format(flavor_id))
+ return {"id": template.ID, "name": template.NAME}
+
+ raise vimconn.VimConnNotFoundException(
+ "Flavor {} not found".format(flavor_id)
+ )
except Exception as e:
self.logger.error("get flavor " + str(flavor_id) + " error: " + str(e))
+
raise vimconn.VimConnException(e)
def new_flavor(self, flavor_data):
is_public:
#TODO to concrete
Returns the flavor identifier"""
-
- disk_size = str(int(flavor_data["disk"])*1024)
+ disk_size = str(int(flavor_data["disk"]) * 1024)
try:
one = self._new_one_connection()
- template_id = one.template.allocate({
- 'TEMPLATE': {
- 'NAME': flavor_data["name"],
- 'CPU': flavor_data["vcpus"],
- 'VCPU': flavor_data["vcpus"],
- 'MEMORY': flavor_data["ram"],
- 'DISK': {
- 'SIZE': disk_size
- },
- 'CONTEXT': {
- 'NETWORK': "YES",
- 'SSH_PUBLIC_KEY': '$USER[SSH_PUBLIC_KEY]'
- },
- 'GRAPHICS': {
- 'LISTEN': '0.0.0.0',
- 'TYPE': 'VNC'
- },
- 'CLUSTER_ID': self.config["cluster"]["id"]
+ template_id = one.template.allocate(
+ {
+ "TEMPLATE": {
+ "NAME": flavor_data["name"],
+ "CPU": flavor_data["vcpus"],
+ "VCPU": flavor_data["vcpus"],
+ "MEMORY": flavor_data["ram"],
+ "DISK": {"SIZE": disk_size},
+ "CONTEXT": {
+ "NETWORK": "YES",
+ "SSH_PUBLIC_KEY": "$USER[SSH_PUBLIC_KEY]",
+ },
+ "GRAPHICS": {"LISTEN": "0.0.0.0", "TYPE": "VNC"},
+ "CLUSTER_ID": self.config["cluster"]["id"],
+ }
}
- })
- return template_id
+ )
+ return template_id
except Exception as e:
self.logger.error("Create new flavor error: " + str(e))
+
raise vimconn.VimConnException(e)
def delete_flavor(self, flavor_id):
- """ Deletes a tenant flavor from VIM
- Returns the old flavor_id
+ """Deletes a tenant flavor from VIM
+ Returns the old flavor_id
"""
try:
one = self._new_one_connection()
one.template.delete(int(flavor_id), False)
+
return flavor_id
except Exception as e:
- self.logger.error("Error deleting flavor " + str(flavor_id) + ". Flavor not found")
+ self.logger.error(
+ "Error deleting flavor " + str(flavor_id) + ". Flavor not found"
+ )
+
raise vimconn.VimConnException(e)
def get_image_list(self, filter_dict={}):
one = self._new_one_connection()
image_pool = one.imagepool.info(-2, -1, -1).IMAGE
images = []
+
if "name" in filter_dict:
image_name_filter = filter_dict["name"]
else:
image_name_filter = None
+
if "id" in filter_dict:
image_id_filter = filter_dict["id"]
else:
image_id_filter = None
+
for image in image_pool:
- if str(image_name_filter) == str(image.NAME) or str(image.ID) == str(image_id_filter):
+ if str(image_name_filter) == str(image.NAME) or str(image.ID) == str(
+ image_id_filter
+ ):
images_dict = {"name": image.NAME, "id": str(image.ID)}
images.append(images_dict)
+
return images
except Exception as e:
self.logger.error("Get image list error: " + str(e))
raise vimconn.VimConnException(e)
- def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
- availability_zone_index=None, availability_zone_list=None):
+ def new_vminstance(
+ self,
+ name,
+ description,
+ start,
+ image_id,
+ flavor_id,
+ net_list,
+ cloud_config=None,
+ disk_list=None,
+ availability_zone_index=None,
+ availability_zone_list=None,
+ ):
"""
Adds a VM instance to VIM
:param name:
as not present.
"""
self.logger.debug(
- "new_vminstance input: image='{}' flavor='{}' nics='{}'".format(image_id, flavor_id, str(net_list)))
+ "new_vminstance input: image='{}' flavor='{}' nics='{}'".format(
+ image_id, flavor_id, str(net_list)
+ )
+ )
+
try:
one = self._new_one_connection()
template_vim = one.template.info(int(flavor_id), True)
one = self._new_one_connection()
template_updated = ""
+
for net in net_list:
net_in_vim = one.vn.info(int(net["net_id"]))
net["vim_id"] = str(net_in_vim.ID)
network = 'NIC = [NETWORK = "{}",NETWORK_UNAME = "{}" ]'.format(
- net_in_vim.NAME, net_in_vim.UNAME)
+ net_in_vim.NAME, net_in_vim.UNAME
+ )
template_updated += network
- template_updated += "DISK = [ IMAGE_ID = {},\n SIZE = {}]".format(image_id, disk_size)
+ template_updated += "DISK = [ IMAGE_ID = {},\n SIZE = {}]".format(
+ image_id, disk_size
+ )
if isinstance(cloud_config, dict):
if cloud_config.get("key-pairs"):
context = 'CONTEXT = [\n NETWORK = "YES",\n SSH_PUBLIC_KEY = "'
+
for key in cloud_config["key-pairs"]:
- context += key + '\n'
+ context += key + "\n"
+
# if False:
# context += '"\n USERNAME = '
context += '"]'
template_updated += context
- vm_instance_id = one.template.instantiate(int(flavor_id), name, False, template_updated)
+ vm_instance_id = one.template.instantiate(
+ int(flavor_id), name, False, template_updated
+ )
self.logger.info(
- "Instanciating in OpenNebula a new VM name:{} id:{}".format(name, flavor_id))
+ "Instanciating in OpenNebula a new VM name:{} id:{}".format(
+ name, flavor_id
+ )
+ )
+
return str(vm_instance_id), None
except pyone.OneNoExistsException as e:
self.logger.error("Network with id " + str(e) + " not found: " + str(e))
+
raise vimconn.VimConnNotFoundException(e)
except Exception as e:
self.logger.error("Create new vm instance error: " + str(e))
+
raise vimconn.VimConnException(e)
def get_vminstance(self, vm_id):
try:
one = self._new_one_connection()
vm = one.vm.info(int(vm_id))
+
return vm
except Exception as e:
- self.logger.error("Getting vm instance error: " + str(e) + ": VM Instance not found")
+ self.logger.error(
+ "Getting vm instance error: " + str(e) + ": VM Instance not found"
+ )
+
raise vimconn.VimConnException(e)
def delete_vminstance(self, vm_id, created_items=None):
one = self._new_one_connection()
one.vm.recover(int(vm_id), 3)
vm = None
+
while True:
if vm is not None and vm.LCM_STATE == 0:
break
else:
vm = one.vm.info(int(vm_id))
-
except pyone.OneNoExistsException:
- self.logger.info("The vm " + str(vm_id) + " does not exist or is already deleted")
- raise vimconn.VimConnNotFoundException("The vm {} does not exist or is already deleted".format(vm_id))
+ self.logger.info(
+ "The vm " + str(vm_id) + " does not exist or is already deleted"
+ )
+
+ raise vimconn.VimConnNotFoundException(
+ "The vm {} does not exist or is already deleted".format(vm_id)
+ )
except Exception as e:
self.logger.error("Delete vm instance " + str(vm_id) + " error: " + str(e))
raise vimconn.VimConnException(e)
def refresh_vms_status(self, vm_list):
"""Get the status of the virtual machines and their interfaces/ports
- Params: the list of VM identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this Virtual Machine
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
- # BUILD (on building process), ERROR
- # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
- #
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
- interfaces: list with interface info. Each item a dictionary with:
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
- mac_address: #Text format XX:XX:XX:XX:XX:XX
- vim_net_id: #network id where this interface is connected, if provided at creation
- vim_interface_id: #interface/port VIM id
- ip_address: #null, or text with IPv4, IPv6 address
- compute_node: #identification of compute node where PF,VF interface is allocated
- pci: #PCI address of the NIC that hosts the PF,VF
- vlan: #physical VLAN used for VF
+ Params: the list of VM identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this Virtual Machine
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+ # BUILD (on building process), ERROR
+ # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+ #
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ interfaces: list with interface info. Each item a dictionary with:
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ mac_address: #Text format XX:XX:XX:XX:XX:XX
+ vim_net_id: #network id where this interface is connected, if provided at creation
+ vim_interface_id: #interface/port VIM id
+ ip_address: #null, or text with IPv4, IPv6 address
+ compute_node: #identification of compute node where PF,VF interface is allocated
+ pci: #PCI address of the NIC that hosts the PF,VF
+ vlan: #physical VLAN used for VF
"""
vm_dict = {}
try:
for vm_id in vm_list:
vm = {}
+
if self.get_vminstance(vm_id) is not None:
vm_element = self.get_vminstance(vm_id)
else:
self.logger.info("The vm " + str(vm_id) + " does not exist.")
- vm['status'] = "DELETED"
- vm['error_msg'] = ("The vm " + str(vm_id) + " does not exist.")
+ vm["status"] = "DELETED"
+ vm["error_msg"] = "The vm " + str(vm_id) + " does not exist."
continue
+
vm["vim_info"] = None
vm_status = vm_element.LCM_STATE
+
if vm_status == 3:
- vm['status'] = "ACTIVE"
+ vm["status"] = "ACTIVE"
elif vm_status == 36:
- vm['status'] = "ERROR"
- vm['error_msg'] = "VM failure"
+ vm["status"] = "ERROR"
+ vm["error_msg"] = "VM failure"
else:
- vm['status'] = "BUILD"
+ vm["status"] = "BUILD"
if vm_element is not None:
interfaces = self._get_networks_vm(vm_element)
vm["interfaces"] = interfaces
+
vm_dict[vm_id] = vm
+
return vm_dict
except Exception as e:
self.logger.error(e)
for k in vm_dict:
vm_dict[k]["status"] = "VIM_ERROR"
vm_dict[k]["error_msg"] = str(e)
+
return vm_dict
def _get_networks_vm(self, vm_element):
try:
if isinstance(vm_element.TEMPLATE["NIC"], list):
for net in vm_element.TEMPLATE["NIC"]:
- interface = {'vim_info': None, "mac_address": str(net["MAC"]), "vim_net_id": str(net["NETWORK_ID"]),
- "vim_interface_id": str(net["NETWORK_ID"])}
+ interface = {
+ "vim_info": None,
+ "mac_address": str(net["MAC"]),
+ "vim_net_id": str(net["NETWORK_ID"]),
+ "vim_interface_id": str(net["NETWORK_ID"]),
+ }
+
# maybe it should be 2 different keys for ip_address if an interface has ipv4 and ipv6
- if 'IP' in net:
+ if "IP" in net:
interface["ip_address"] = str(net["IP"])
- if 'IP6_GLOBAL' in net:
+
+ if "IP6_GLOBAL" in net:
interface["ip_address"] = str(net["IP6_GLOBAL"])
+
interfaces.append(interface)
else:
net = vm_element.TEMPLATE["NIC"]
- interface = {'vim_info': None, "mac_address": str(net["MAC"]), "vim_net_id": str(net["NETWORK_ID"]),
- "vim_interface_id": str(net["NETWORK_ID"])}
+ interface = {
+ "vim_info": None,
+ "mac_address": str(net["MAC"]),
+ "vim_net_id": str(net["NETWORK_ID"]),
+ "vim_interface_id": str(net["NETWORK_ID"]),
+ }
+
# maybe it should be 2 different keys for ip_address if an interface has ipv4 and ipv6
- if 'IP' in net:
+ if "IP" in net:
interface["ip_address"] = str(net["IP"])
- if 'IP6_GLOBAL' in net:
+
+ if "IP6_GLOBAL" in net:
interface["ip_address"] = str(net["IP6_GLOBAL"])
+
interfaces.append(interface)
return interfaces
except Exception:
- self.logger.error("Error getting vm interface_information of vm_id: " + str(vm_element.ID))
+ self.logger.error(
+ "Error getting vm interface_information of vm_id: " + str(vm_element.ID)
+ )
setup(
name=_name,
- description='OSM ro vim plugin for opennebula',
+ description="OSM ro vim plugin for opennebula",
long_description=README,
- version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ version_command=(
+ "git describe --match v* --tags --long --dirty",
+ "pep440-git-full",
+ ),
# version=VERSION,
- # python_requires='>3.5.0',
- author='ETSI OSM',
- author_email='OSM_TECH@LIST.ETSI.ORG',
- maintainer='ETSI OSM',
- maintainer_email='OSM_TECH@LIST.ETSI.ORG',
- url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
- license='Apache 2.0',
-
+ # python_requires=">3.5.0",
+ author="ETSI OSM",
+ author_email="OSM_TECH@LIST.ETSI.ORG",
+ maintainer="ETSI OSM",
+ maintainer_email="OSM_TECH@LIST.ETSI.ORG",
+ url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary",
+ license="Apache 2.0",
packages=[_name],
include_package_data=True,
install_requires=[
"requests",
"netaddr",
"PyYAML",
- "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin"
+ "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin",
],
- setup_requires=['setuptools-version-command'],
+ setup_requires=["setuptools-version-command"],
entry_points={
- 'osm_rovim.plugins': ['rovim_opennebula = osm_rovim_opennebula.vimconn_opennebula:vimconnector'],
+ "osm_rovim.plugins": [
+ "rovim_opennebula = osm_rovim_opennebula.vimconn_opennebula:vimconnector"
+ ],
},
)
basepython = python3
deps = flake8
commands = flake8 osm_rovim_opennebula --max-line-length 120 \
- --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241
[testenv:unittest]
basepython = python3
def setUp(self):
# instantiate dummy VIM connector so we can test it
self.vimconn = vimconnector(
- '123', 'openstackvim', '456', '789', 'http://dummy.url', None,
- 'user', 'pass')
-
- def _test_new_sfi(self, create_sfc_port_pair, sfc_encap,
- ingress_ports=['5311c75d-d718-4369-bbda-cdcc6da60fcc'],
- egress_ports=['230cdf1b-de37-4891-bc07-f9010cf1f967']):
+ "123",
+ "openstackvim",
+ "456",
+ "789",
+ "http://dummy.url",
+ None,
+ "user",
+ "pass",
+ )
+
+ def _test_new_sfi(
+ self,
+ create_sfc_port_pair,
+ sfc_encap,
+ ingress_ports=["5311c75d-d718-4369-bbda-cdcc6da60fcc"],
+ egress_ports=["230cdf1b-de37-4891-bc07-f9010cf1f967"],
+ ):
# input to VIM connector
- name = 'osm_sfi'
+ name = "osm_sfi"
# + ingress_ports
# + egress_ports
# TODO(igordc): must be changed to NSH in Queens (MPLS is a workaround)
- correlation = 'nsh'
+ correlation = "nsh"
if sfc_encap is not None:
if not sfc_encap:
correlation = None
- # what OpenStack is assumed to respond (patch OpenStack's return value)
- dict_from_neutron = {'port_pair': {
- 'id': '3d7ddc13-923c-4332-971e-708ed82902ce',
- 'name': name,
- 'description': '',
- 'tenant_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c',
- 'project_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c',
- 'ingress': ingress_ports[0] if len(ingress_ports) else None,
- 'egress': egress_ports[0] if len(egress_ports) else None,
- 'service_function_parameters': {'correlation': correlation}
- }}
+ # what OpenStack is assumed to respond (patch OpenStack"s return value)
+ dict_from_neutron = {
+ "port_pair": {
+ "id": "3d7ddc13-923c-4332-971e-708ed82902ce",
+ "name": name,
+ "description": "",
+ "tenant_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c",
+ "project_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c",
+ "ingress": ingress_ports[0] if len(ingress_ports) else None,
+ "egress": egress_ports[0] if len(egress_ports) else None,
+ "service_function_parameters": {"correlation": correlation},
+ }
+ }
create_sfc_port_pair.return_value = dict_from_neutron
# what the VIM connector is expected to
# send to OpenStack based on the input
- dict_to_neutron = {'port_pair': {
- 'name': name,
- 'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
- 'egress': '230cdf1b-de37-4891-bc07-f9010cf1f967',
- 'service_function_parameters': {'correlation': correlation}
- }}
+ dict_to_neutron = {
+ "port_pair": {
+ "name": name,
+ "ingress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
+ "egress": "230cdf1b-de37-4891-bc07-f9010cf1f967",
+ "service_function_parameters": {"correlation": correlation},
+ }
+ }
# call the VIM connector
if sfc_encap is None:
result = self.vimconn.new_sfi(name, ingress_ports, egress_ports)
else:
- result = self.vimconn.new_sfi(name, ingress_ports, egress_ports,
- sfc_encap)
+ result = self.vimconn.new_sfi(name, ingress_ports, egress_ports, sfc_encap)
# assert that the VIM connector made the expected call to OpenStack
create_sfc_port_pair.assert_called_with(dict_to_neutron)
# assert that the VIM connector had the expected result / return value
- self.assertEqual(result, dict_from_neutron['port_pair']['id'])
+ self.assertEqual(result, dict_from_neutron["port_pair"]["id"])
def _test_new_sf(self, create_sfc_port_pair_group):
# input to VIM connector
- name = 'osm_sf'
- instances = ['bbd01220-cf72-41f2-9e70-0669c2e5c4cd',
- '12ba215e-3987-4892-bd3a-d0fd91eecf98',
- 'e25a7c79-14c8-469a-9ae1-f601c9371ffd']
-
- # what OpenStack is assumed to respond (patch OpenStack's return value)
- dict_from_neutron = {'port_pair_group': {
- 'id': '3d7ddc13-923c-4332-971e-708ed82902ce',
- 'name': name,
- 'description': '',
- 'tenant_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c',
- 'project_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c',
- 'port_pairs': instances,
- 'group_id': 1,
- 'port_pair_group_parameters': {
- "lb_fields": [],
- "ppg_n_tuple_mapping": {
- "ingress_n_tuple": {},
- "egress_n_tuple": {}
- }}
- }}
+ name = "osm_sf"
+ instances = [
+ "bbd01220-cf72-41f2-9e70-0669c2e5c4cd",
+ "12ba215e-3987-4892-bd3a-d0fd91eecf98",
+ "e25a7c79-14c8-469a-9ae1-f601c9371ffd",
+ ]
+
+ # what OpenStack is assumed to respond (patch OpenStack"s return value)
+ dict_from_neutron = {
+ "port_pair_group": {
+ "id": "3d7ddc13-923c-4332-971e-708ed82902ce",
+ "name": name,
+ "description": "",
+ "tenant_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c",
+ "project_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c",
+ "port_pairs": instances,
+ "group_id": 1,
+ "port_pair_group_parameters": {
+ "lb_fields": [],
+ "ppg_n_tuple_mapping": {
+ "ingress_n_tuple": {},
+ "egress_n_tuple": {},
+ },
+ },
+ }
+ }
create_sfc_port_pair_group.return_value = dict_from_neutron
# what the VIM connector is expected to
# send to OpenStack based on the input
- dict_to_neutron = {'port_pair_group': {
- 'name': name,
- 'port_pairs': ['bbd01220-cf72-41f2-9e70-0669c2e5c4cd',
- '12ba215e-3987-4892-bd3a-d0fd91eecf98',
- 'e25a7c79-14c8-469a-9ae1-f601c9371ffd']
- }}
+ dict_to_neutron = {
+ "port_pair_group": {
+ "name": name,
+ "port_pairs": [
+ "bbd01220-cf72-41f2-9e70-0669c2e5c4cd",
+ "12ba215e-3987-4892-bd3a-d0fd91eecf98",
+ "e25a7c79-14c8-469a-9ae1-f601c9371ffd",
+ ],
+ }
+ }
# call the VIM connector
result = self.vimconn.new_sf(name, instances)
# assert that the VIM connector made the expected call to OpenStack
create_sfc_port_pair_group.assert_called_with(dict_to_neutron)
# assert that the VIM connector had the expected result / return value
- self.assertEqual(result, dict_from_neutron['port_pair_group']['id'])
+ self.assertEqual(result, dict_from_neutron["port_pair_group"]["id"])
def _test_new_sfp(self, create_sfc_port_chain, sfc_encap, spi):
# input to VIM connector
- name = 'osm_sfp'
- classifications = ['2bd2a2e5-c5fd-4eac-a297-d5e255c35c19',
- '00f23389-bdfa-43c2-8b16-5815f2582fa8']
- sfs = ['2314daec-c262-414a-86e3-69bb6fa5bc16',
- 'd8bfdb5d-195e-4f34-81aa-6135705317df']
+ name = "osm_sfp"
+ classifications = [
+ "2bd2a2e5-c5fd-4eac-a297-d5e255c35c19",
+ "00f23389-bdfa-43c2-8b16-5815f2582fa8",
+ ]
+ sfs = [
+ "2314daec-c262-414a-86e3-69bb6fa5bc16",
+ "d8bfdb5d-195e-4f34-81aa-6135705317df",
+ ]
# TODO(igordc): must be changed to NSH in Queens (MPLS is a workaround)
- correlation = 'nsh'
+ correlation = "nsh"
chain_id = 33
if spi:
chain_id = spi
- # what OpenStack is assumed to respond (patch OpenStack's return value)
- dict_from_neutron = {'port_chain': {
- 'id': '5bc05721-079b-4b6e-a235-47cac331cbb6',
- 'name': name,
- 'description': '',
- 'tenant_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c',
- 'project_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c',
- 'chain_id': chain_id,
- 'flow_classifiers': classifications,
- 'port_pair_groups': sfs,
- 'chain_parameters': {'correlation': correlation}
- }}
+ # what OpenStack is assumed to respond (patch OpenStack"s return value)
+ dict_from_neutron = {
+ "port_chain": {
+ "id": "5bc05721-079b-4b6e-a235-47cac331cbb6",
+ "name": name,
+ "description": "",
+ "tenant_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c",
+ "project_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c",
+ "chain_id": chain_id,
+ "flow_classifiers": classifications,
+ "port_pair_groups": sfs,
+ "chain_parameters": {"correlation": correlation},
+ }
+ }
create_sfc_port_chain.return_value = dict_from_neutron
# what the VIM connector is expected to
# send to OpenStack based on the input
- dict_to_neutron = {'port_chain': {
- 'name': name,
- 'flow_classifiers': ['2bd2a2e5-c5fd-4eac-a297-d5e255c35c19',
- '00f23389-bdfa-43c2-8b16-5815f2582fa8'],
- 'port_pair_groups': ['2314daec-c262-414a-86e3-69bb6fa5bc16',
- 'd8bfdb5d-195e-4f34-81aa-6135705317df'],
- 'chain_parameters': {'correlation': correlation}
- }}
+ dict_to_neutron = {
+ "port_chain": {
+ "name": name,
+ "flow_classifiers": [
+ "2bd2a2e5-c5fd-4eac-a297-d5e255c35c19",
+ "00f23389-bdfa-43c2-8b16-5815f2582fa8",
+ ],
+ "port_pair_groups": [
+ "2314daec-c262-414a-86e3-69bb6fa5bc16",
+ "d8bfdb5d-195e-4f34-81aa-6135705317df",
+ ],
+ "chain_parameters": {"correlation": correlation},
+ }
+ }
if spi:
- dict_to_neutron['port_chain']['chain_id'] = spi
+ dict_to_neutron["port_chain"]["chain_id"] = spi
# call the VIM connector
if sfc_encap is None:
if spi is None:
result = self.vimconn.new_sfp(name, classifications, sfs)
else:
- result = self.vimconn.new_sfp(name, classifications, sfs,
- spi=spi)
+ result = self.vimconn.new_sfp(name, classifications, sfs, spi=spi)
else:
if spi is None:
- result = self.vimconn.new_sfp(name, classifications, sfs,
- sfc_encap)
+ result = self.vimconn.new_sfp(name, classifications, sfs, sfc_encap)
else:
- result = self.vimconn.new_sfp(name, classifications, sfs,
- sfc_encap, spi)
+ result = self.vimconn.new_sfp(
+ name, classifications, sfs, sfc_encap, spi
+ )
# assert that the VIM connector made the expected call to OpenStack
create_sfc_port_chain.assert_called_with(dict_to_neutron)
# assert that the VIM connector had the expected result / return value
- self.assertEqual(result, dict_from_neutron['port_chain']['id'])
+ self.assertEqual(result, dict_from_neutron["port_chain"]["id"])
def _test_new_classification(self, create_sfc_flow_classifier, ctype):
# input to VIM connector
- name = 'osm_classification'
- definition = {'ethertype': 'IPv4',
- 'logical_source_port':
- 'aaab0ab0-1452-4636-bb3b-11dca833fa2b',
- 'protocol': 'tcp',
- 'source_ip_prefix': '192.168.2.0/24',
- 'source_port_range_max': 99,
- 'source_port_range_min': 50}
-
- # what OpenStack is assumed to respond (patch OpenStack's return value)
- dict_from_neutron = {'flow_classifier': copy.copy(definition)}
- dict_from_neutron['flow_classifier'][
- 'id'] = '7735ec2c-fddf-4130-9712-32ed2ab6a372'
- dict_from_neutron['flow_classifier']['name'] = name
- dict_from_neutron['flow_classifier']['description'] = ''
- dict_from_neutron['flow_classifier'][
- 'tenant_id'] = '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c'
- dict_from_neutron['flow_classifier'][
- 'project_id'] = '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c'
+ name = "osm_classification"
+ definition = {
+ "ethertype": "IPv4",
+ "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
+ "protocol": "tcp",
+ "source_ip_prefix": "192.168.2.0/24",
+ "source_port_range_max": 99,
+ "source_port_range_min": 50,
+ }
+
+ # what OpenStack is assumed to respond (patch OpenStack"s return value)
+ dict_from_neutron = {"flow_classifier": copy.copy(definition)}
+ dict_from_neutron["flow_classifier"][
+ "id"
+ ] = "7735ec2c-fddf-4130-9712-32ed2ab6a372"
+ dict_from_neutron["flow_classifier"]["name"] = name
+ dict_from_neutron["flow_classifier"]["description"] = ""
+ dict_from_neutron["flow_classifier"][
+ "tenant_id"
+ ] = "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c"
+ dict_from_neutron["flow_classifier"][
+ "project_id"
+ ] = "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c"
create_sfc_flow_classifier.return_value = dict_from_neutron
# what the VIM connector is expected to
# send to OpenStack based on the input
- dict_to_neutron = {'flow_classifier': copy.copy(definition)}
- dict_to_neutron['flow_classifier']['name'] = 'osm_classification'
+ dict_to_neutron = {"flow_classifier": copy.copy(definition)}
+ dict_to_neutron["flow_classifier"]["name"] = "osm_classification"
# call the VIM connector
result = self.vimconn.new_classification(name, ctype, definition)
# assert that the VIM connector made the expected call to OpenStack
create_sfc_flow_classifier.assert_called_with(dict_to_neutron)
# assert that the VIM connector had the expected result / return value
- self.assertEqual(result, dict_from_neutron['flow_classifier']['id'])
+ self.assertEqual(result, dict_from_neutron["flow_classifier"]["id"])
- @mock.patch.object(Client, 'create_sfc_flow_classifier')
+ @mock.patch.object(Client, "create_sfc_flow_classifier")
def test_new_classification(self, create_sfc_flow_classifier):
- self._test_new_classification(create_sfc_flow_classifier,
- 'legacy_flow_classifier')
+ self._test_new_classification(
+ create_sfc_flow_classifier, "legacy_flow_classifier"
+ )
- @mock.patch.object(Client, 'create_sfc_flow_classifier')
+ @mock.patch.object(Client, "create_sfc_flow_classifier")
def test_new_classification_unsupported_type(self, create_sfc_flow_classifier):
- self.assertRaises(vimconn.VimConnNotSupportedException,
- self._test_new_classification,
- create_sfc_flow_classifier, 'h265')
-
- @mock.patch.object(Client, 'create_sfc_port_pair')
+ self.assertRaises(
+ vimconn.VimConnNotSupportedException,
+ self._test_new_classification,
+ create_sfc_flow_classifier,
+ "h265",
+ )
+
+ @mock.patch.object(Client, "create_sfc_port_pair")
def test_new_sfi_with_sfc_encap(self, create_sfc_port_pair):
self._test_new_sfi(create_sfc_port_pair, True)
- @mock.patch.object(Client, 'create_sfc_port_pair')
+ @mock.patch.object(Client, "create_sfc_port_pair")
def test_new_sfi_without_sfc_encap(self, create_sfc_port_pair):
self._test_new_sfi(create_sfc_port_pair, False)
- @mock.patch.object(Client, 'create_sfc_port_pair')
+ @mock.patch.object(Client, "create_sfc_port_pair")
def test_new_sfi_default_sfc_encap(self, create_sfc_port_pair):
self._test_new_sfi(create_sfc_port_pair, None)
- @mock.patch.object(Client, 'create_sfc_port_pair')
+ @mock.patch.object(Client, "create_sfc_port_pair")
def test_new_sfi_bad_ingress_ports(self, create_sfc_port_pair):
- ingress_ports = ['5311c75d-d718-4369-bbda-cdcc6da60fcc',
- 'a0273f64-82c9-11e7-b08f-6328e53f0fa7']
- self.assertRaises(vimconn.VimConnNotSupportedException,
- self._test_new_sfi,
- create_sfc_port_pair, True, ingress_ports=ingress_ports)
+ ingress_ports = [
+ "5311c75d-d718-4369-bbda-cdcc6da60fcc",
+ "a0273f64-82c9-11e7-b08f-6328e53f0fa7",
+ ]
+ self.assertRaises(
+ vimconn.VimConnNotSupportedException,
+ self._test_new_sfi,
+ create_sfc_port_pair,
+ True,
+ ingress_ports=ingress_ports,
+ )
ingress_ports = []
- self.assertRaises(vimconn.VimConnNotSupportedException,
- self._test_new_sfi,
- create_sfc_port_pair, True, ingress_ports=ingress_ports)
-
- @mock.patch.object(Client, 'create_sfc_port_pair')
+ self.assertRaises(
+ vimconn.VimConnNotSupportedException,
+ self._test_new_sfi,
+ create_sfc_port_pair,
+ True,
+ ingress_ports=ingress_ports,
+ )
+
+ @mock.patch.object(Client, "create_sfc_port_pair")
def test_new_sfi_bad_egress_ports(self, create_sfc_port_pair):
- egress_ports = ['230cdf1b-de37-4891-bc07-f9010cf1f967',
- 'b41228fe-82c9-11e7-9b44-17504174320b']
- self.assertRaises(vimconn.VimConnNotSupportedException,
- self._test_new_sfi,
- create_sfc_port_pair, True, egress_ports=egress_ports)
+ egress_ports = [
+ "230cdf1b-de37-4891-bc07-f9010cf1f967",
+ "b41228fe-82c9-11e7-9b44-17504174320b",
+ ]
+ self.assertRaises(
+ vimconn.VimConnNotSupportedException,
+ self._test_new_sfi,
+ create_sfc_port_pair,
+ True,
+ egress_ports=egress_ports,
+ )
egress_ports = []
- self.assertRaises(vimconn.VimConnNotSupportedException,
- self._test_new_sfi,
- create_sfc_port_pair, True, egress_ports=egress_ports)
-
- @mock.patch.object(vimconnector, 'get_sfi')
- @mock.patch.object(Client, 'create_sfc_port_pair_group')
+ self.assertRaises(
+ vimconn.VimConnNotSupportedException,
+ self._test_new_sfi,
+ create_sfc_port_pair,
+ True,
+ egress_ports=egress_ports,
+ )
+
+ @mock.patch.object(vimconnector, "get_sfi")
+ @mock.patch.object(Client, "create_sfc_port_pair_group")
def test_new_sf(self, create_sfc_port_pair_group, get_sfi):
- get_sfi.return_value = {'sfc_encap': True}
+ get_sfi.return_value = {"sfc_encap": True}
self._test_new_sf(create_sfc_port_pair_group)
- @mock.patch.object(vimconnector, 'get_sfi')
- @mock.patch.object(Client, 'create_sfc_port_pair_group')
- def test_new_sf_inconsistent_sfc_encap(self, create_sfc_port_pair_group,
- get_sfi):
- get_sfi.return_value = {'sfc_encap': 'nsh'}
- self.assertRaises(vimconn.VimConnNotSupportedException,
- self._test_new_sf, create_sfc_port_pair_group)
-
- @mock.patch.object(Client, 'create_sfc_port_chain')
+ @mock.patch.object(vimconnector, "get_sfi")
+ @mock.patch.object(Client, "create_sfc_port_pair_group")
+ def test_new_sf_inconsistent_sfc_encap(self, create_sfc_port_pair_group, get_sfi):
+ get_sfi.return_value = {"sfc_encap": "nsh"}
+ self.assertRaises(
+ vimconn.VimConnNotSupportedException,
+ self._test_new_sf,
+ create_sfc_port_pair_group,
+ )
+
+ @mock.patch.object(Client, "create_sfc_port_chain")
def test_new_sfp_with_sfc_encap(self, create_sfc_port_chain):
self._test_new_sfp(create_sfc_port_chain, True, None)
- @mock.patch.object(Client, 'create_sfc_port_chain')
+ @mock.patch.object(Client, "create_sfc_port_chain")
def test_new_sfp_without_sfc_encap(self, create_sfc_port_chain):
self._test_new_sfp(create_sfc_port_chain, False, None)
self._test_new_sfp(create_sfc_port_chain, False, 25)
- @mock.patch.object(Client, 'create_sfc_port_chain')
+ @mock.patch.object(Client, "create_sfc_port_chain")
def test_new_sfp_default_sfc_encap(self, create_sfc_port_chain):
self._test_new_sfp(create_sfc_port_chain, None, None)
- @mock.patch.object(Client, 'create_sfc_port_chain')
+ @mock.patch.object(Client, "create_sfc_port_chain")
def test_new_sfp_with_sfc_encap_spi(self, create_sfc_port_chain):
self._test_new_sfp(create_sfc_port_chain, True, 25)
- @mock.patch.object(Client, 'create_sfc_port_chain')
+ @mock.patch.object(Client, "create_sfc_port_chain")
def test_new_sfp_default_sfc_encap_spi(self, create_sfc_port_chain):
self._test_new_sfp(create_sfc_port_chain, None, 25)
- @mock.patch.object(Client, 'list_sfc_flow_classifiers')
+ @mock.patch.object(Client, "list_sfc_flow_classifiers")
def test_get_classification_list(self, list_sfc_flow_classifiers):
# what OpenStack is assumed to return to the VIM connector
- list_sfc_flow_classifiers.return_value = {'flow_classifiers': [
- {'source_port_range_min': 2000,
- 'destination_ip_prefix': '192.168.3.0/24',
- 'protocol': 'udp',
- 'description': '',
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': 2000,
- 'destination_port_range_min': 3000,
- 'source_ip_prefix': '192.168.2.0/24',
- 'logical_destination_port': None,
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'destination_port_range_max': None,
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b',
- 'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d',
- 'name': 'fc1'}]}
+ list_sfc_flow_classifiers.return_value = {
+ "flow_classifiers": [
+ {
+ "source_port_range_min": 2000,
+ "destination_ip_prefix": "192.168.3.0/24",
+ "protocol": "udp",
+ "description": "",
+ "ethertype": "IPv4",
+ "l7_parameters": {},
+ "source_port_range_max": 2000,
+ "destination_port_range_min": 3000,
+ "source_ip_prefix": "192.168.2.0/24",
+ "logical_destination_port": None,
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "destination_port_range_max": None,
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
+ "id": "22198366-d4e8-4d6b-b4d2-637d5d6cbb7d",
+ "name": "fc1",
+ }
+ ]
+ }
# call the VIM connector
- filter_dict = {'protocol': 'tcp', 'ethertype': 'IPv4'}
+ filter_dict = {"protocol": "tcp", "ethertype": "IPv4"}
result = self.vimconn.get_classification_list(filter_dict.copy())
# assert that VIM connector called OpenStack with the expected filter
list_sfc_flow_classifiers.assert_called_with(**filter_dict)
# assert that the VIM connector successfully
# translated and returned the OpenStack result
- self.assertEqual(result, [
- {'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d',
- 'name': 'fc1',
- 'description': '',
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'ctype': 'legacy_flow_classifier',
- 'definition': {
- 'source_port_range_min': 2000,
- 'destination_ip_prefix': '192.168.3.0/24',
- 'protocol': 'udp',
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': 2000,
- 'destination_port_range_min': 3000,
- 'source_ip_prefix': '192.168.2.0/24',
- 'logical_destination_port': None,
- 'destination_port_range_max': None,
- 'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b'}
- }])
+ self.assertEqual(
+ result,
+ [
+ {
+ "id": "22198366-d4e8-4d6b-b4d2-637d5d6cbb7d",
+ "name": "fc1",
+ "description": "",
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "ctype": "legacy_flow_classifier",
+ "definition": {
+ "source_port_range_min": 2000,
+ "destination_ip_prefix": "192.168.3.0/24",
+ "protocol": "udp",
+ "ethertype": "IPv4",
+ "l7_parameters": {},
+ "source_port_range_max": 2000,
+ "destination_port_range_min": 3000,
+ "source_ip_prefix": "192.168.2.0/24",
+ "logical_destination_port": None,
+ "destination_port_range_max": None,
+ "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
+ },
+ }
+ ],
+ )
def _test_get_sfi_list(self, list_port_pair, correlation, sfc_encap):
# what OpenStack is assumed to return to the VIM connector
- list_port_pair.return_value = {'port_pairs': [
- {'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
- 'description': '',
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'egress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
- 'service_function_parameters': {'correlation': correlation},
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'id': 'c121ebdd-7f2d-4213-b933-3325298a6966',
- 'name': 'osm_sfi'}]}
+ list_port_pair.return_value = {
+ "port_pairs": [
+ {
+ "ingress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "egress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
+ "service_function_parameters": {"correlation": correlation},
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "c121ebdd-7f2d-4213-b933-3325298a6966",
+ "name": "osm_sfi",
+ }
+ ]
+ }
# call the VIM connector
- filter_dict = {'name': 'osm_sfi', 'description': ''}
+ filter_dict = {"name": "osm_sfi", "description": ""}
result = self.vimconn.get_sfi_list(filter_dict.copy())
# assert that VIM connector called OpenStack with the expected filter
list_port_pair.assert_called_with(**filter_dict)
# assert that the VIM connector successfully
# translated and returned the OpenStack result
- self.assertEqual(result, [
- {'ingress_ports': ['5311c75d-d718-4369-bbda-cdcc6da60fcc'],
- 'description': '',
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'egress_ports': ['5311c75d-d718-4369-bbda-cdcc6da60fcc'],
- 'sfc_encap': sfc_encap,
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'id': 'c121ebdd-7f2d-4213-b933-3325298a6966',
- 'name': 'osm_sfi'}])
-
- @mock.patch.object(Client, 'list_sfc_port_pairs')
+ self.assertEqual(
+ result,
+ [
+ {
+ "ingress_ports": ["5311c75d-d718-4369-bbda-cdcc6da60fcc"],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "egress_ports": ["5311c75d-d718-4369-bbda-cdcc6da60fcc"],
+ "sfc_encap": sfc_encap,
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "c121ebdd-7f2d-4213-b933-3325298a6966",
+ "name": "osm_sfi",
+ }
+ ],
+ )
+
+ @mock.patch.object(Client, "list_sfc_port_pairs")
def test_get_sfi_list_with_sfc_encap(self, list_sfc_port_pairs):
- self._test_get_sfi_list(list_sfc_port_pairs, 'nsh', True)
+ self._test_get_sfi_list(list_sfc_port_pairs, "nsh", True)
- @mock.patch.object(Client, 'list_sfc_port_pairs')
+ @mock.patch.object(Client, "list_sfc_port_pairs")
def test_get_sfi_list_without_sfc_encap(self, list_sfc_port_pairs):
self._test_get_sfi_list(list_sfc_port_pairs, None, False)
- @mock.patch.object(Client, 'list_sfc_port_pair_groups')
+ @mock.patch.object(Client, "list_sfc_port_pair_groups")
def test_get_sf_list(self, list_sfc_port_pair_groups):
# what OpenStack is assumed to return to the VIM connector
- list_sfc_port_pair_groups.return_value = {'port_pair_groups': [
- {'port_pairs': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2',
- '0d63799c-82d6-11e7-8deb-a746bb3ae9f5'],
- 'description': '',
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'port_pair_group_parameters': {},
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'id': 'f4a0bde8-82d5-11e7-90e1-a72b762fa27f',
- 'name': 'osm_sf'}]}
+ list_sfc_port_pair_groups.return_value = {
+ "port_pair_groups": [
+ {
+ "port_pairs": [
+ "08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2",
+ "0d63799c-82d6-11e7-8deb-a746bb3ae9f5",
+ ],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "port_pair_group_parameters": {},
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "f4a0bde8-82d5-11e7-90e1-a72b762fa27f",
+ "name": "osm_sf",
+ }
+ ]
+ }
# call the VIM connector
- filter_dict = {'name': 'osm_sf', 'description': ''}
+ filter_dict = {"name": "osm_sf", "description": ""}
result = self.vimconn.get_sf_list(filter_dict.copy())
# assert that VIM connector called OpenStack with the expected filter
list_sfc_port_pair_groups.assert_called_with(**filter_dict)
# assert that the VIM connector successfully
# translated and returned the OpenStack result
- self.assertEqual(result, [
- {'sfis': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2',
- '0d63799c-82d6-11e7-8deb-a746bb3ae9f5'],
- 'description': '',
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'id': 'f4a0bde8-82d5-11e7-90e1-a72b762fa27f',
- 'name': 'osm_sf'}])
+ self.assertEqual(
+ result,
+ [
+ {
+ "sfis": [
+ "08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2",
+ "0d63799c-82d6-11e7-8deb-a746bb3ae9f5",
+ ],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "f4a0bde8-82d5-11e7-90e1-a72b762fa27f",
+ "name": "osm_sf",
+ }
+ ],
+ )
def _test_get_sfp_list(self, list_sfc_port_chains, correlation, sfc_encap):
# what OpenStack is assumed to return to the VIM connector
- list_sfc_port_chains.return_value = {'port_chains': [
- {'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25',
- '7dc9013e-82d6-11e7-a5a6-a3a8d78a5518'],
- 'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e',
- '1387ab44-82d7-11e7-9bb0-476337183905'],
- 'description': '',
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'chain_parameters': {'correlation': correlation},
- 'chain_id': 40,
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47',
- 'name': 'osm_sfp'}]}
+ list_sfc_port_chains.return_value = {
+ "port_chains": [
+ {
+ "port_pair_groups": [
+ "7d8e3bf8-82d6-11e7-a032-8ff028839d25",
+ "7dc9013e-82d6-11e7-a5a6-a3a8d78a5518",
+ ],
+ "flow_classifiers": [
+ "1333c2f4-82d7-11e7-a5df-9327f33d104e",
+ "1387ab44-82d7-11e7-9bb0-476337183905",
+ ],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "chain_parameters": {"correlation": correlation},
+ "chain_id": 40,
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "821bc9be-82d7-11e7-8ce3-23a08a27ab47",
+ "name": "osm_sfp",
+ }
+ ]
+ }
# call the VIM connector
- filter_dict = {'name': 'osm_sfp', 'description': ''}
+ filter_dict = {"name": "osm_sfp", "description": ""}
result = self.vimconn.get_sfp_list(filter_dict.copy())
# assert that VIM connector called OpenStack with the expected filter
list_sfc_port_chains.assert_called_with(**filter_dict)
# assert that the VIM connector successfully
# translated and returned the OpenStack result
- self.assertEqual(result, [
- {'service_functions': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25',
- '7dc9013e-82d6-11e7-a5a6-a3a8d78a5518'],
- 'classifications': ['1333c2f4-82d7-11e7-a5df-9327f33d104e',
- '1387ab44-82d7-11e7-9bb0-476337183905'],
- 'description': '',
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'sfc_encap': sfc_encap,
- 'spi': 40,
- 'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47',
- 'name': 'osm_sfp'}])
-
- @mock.patch.object(Client, 'list_sfc_port_chains')
+ self.assertEqual(
+ result,
+ [
+ {
+ "service_functions": [
+ "7d8e3bf8-82d6-11e7-a032-8ff028839d25",
+ "7dc9013e-82d6-11e7-a5a6-a3a8d78a5518",
+ ],
+ "classifications": [
+ "1333c2f4-82d7-11e7-a5df-9327f33d104e",
+ "1387ab44-82d7-11e7-9bb0-476337183905",
+ ],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "sfc_encap": sfc_encap,
+ "spi": 40,
+ "id": "821bc9be-82d7-11e7-8ce3-23a08a27ab47",
+ "name": "osm_sfp",
+ }
+ ],
+ )
+
+ @mock.patch.object(Client, "list_sfc_port_chains")
def test_get_sfp_list_with_sfc_encap(self, list_sfc_port_chains):
- self._test_get_sfp_list(list_sfc_port_chains, 'nsh', True)
+ self._test_get_sfp_list(list_sfc_port_chains, "nsh", True)
- @mock.patch.object(Client, 'list_sfc_port_chains')
+ @mock.patch.object(Client, "list_sfc_port_chains")
def test_get_sfp_list_without_sfc_encap(self, list_sfc_port_chains):
self._test_get_sfp_list(list_sfc_port_chains, None, False)
- @mock.patch.object(Client, 'list_sfc_flow_classifiers')
+ @mock.patch.object(Client, "list_sfc_flow_classifiers")
def test_get_classification(self, list_sfc_flow_classifiers):
# what OpenStack is assumed to return to the VIM connector
- list_sfc_flow_classifiers.return_value = {'flow_classifiers': [
- {'source_port_range_min': 2000,
- 'destination_ip_prefix': '192.168.3.0/24',
- 'protocol': 'udp',
- 'description': '',
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': 2000,
- 'destination_port_range_min': 3000,
- 'source_ip_prefix': '192.168.2.0/24',
- 'logical_destination_port': None,
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'destination_port_range_max': None,
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b',
- 'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d',
- 'name': 'fc1'}
- ]}
+ list_sfc_flow_classifiers.return_value = {
+ "flow_classifiers": [
+ {
+ "source_port_range_min": 2000,
+ "destination_ip_prefix": "192.168.3.0/24",
+ "protocol": "udp",
+ "description": "",
+ "ethertype": "IPv4",
+ "l7_parameters": {},
+ "source_port_range_max": 2000,
+ "destination_port_range_min": 3000,
+ "source_ip_prefix": "192.168.2.0/24",
+ "logical_destination_port": None,
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "destination_port_range_max": None,
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
+ "id": "22198366-d4e8-4d6b-b4d2-637d5d6cbb7d",
+ "name": "fc1",
+ }
+ ]
+ }
# call the VIM connector
- result = self.vimconn.get_classification(
- '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d')
+ result = self.vimconn.get_classification("22198366-d4e8-4d6b-b4d2-637d5d6cbb7d")
# assert that VIM connector called OpenStack with the expected filter
list_sfc_flow_classifiers.assert_called_with(
- id='22198366-d4e8-4d6b-b4d2-637d5d6cbb7d')
+ id="22198366-d4e8-4d6b-b4d2-637d5d6cbb7d"
+ )
# assert that VIM connector successfully returned the OpenStack result
- self.assertEqual(result,
- {'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d',
- 'name': 'fc1',
- 'description': '',
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'ctype': 'legacy_flow_classifier',
- 'definition': {
- 'source_port_range_min': 2000,
- 'destination_ip_prefix': '192.168.3.0/24',
- 'protocol': 'udp',
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': 2000,
- 'destination_port_range_min': 3000,
- 'source_ip_prefix': '192.168.2.0/24',
- 'logical_destination_port': None,
- 'destination_port_range_max': None,
- 'logical_source_port':
- 'aaab0ab0-1452-4636-bb3b-11dca833fa2b'}
- })
-
- @mock.patch.object(Client, 'list_sfc_flow_classifiers')
+ self.assertEqual(
+ result,
+ {
+ "id": "22198366-d4e8-4d6b-b4d2-637d5d6cbb7d",
+ "name": "fc1",
+ "description": "",
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "ctype": "legacy_flow_classifier",
+ "definition": {
+ "source_port_range_min": 2000,
+ "destination_ip_prefix": "192.168.3.0/24",
+ "protocol": "udp",
+ "ethertype": "IPv4",
+ "l7_parameters": {},
+ "source_port_range_max": 2000,
+ "destination_port_range_min": 3000,
+ "source_ip_prefix": "192.168.2.0/24",
+ "logical_destination_port": None,
+ "destination_port_range_max": None,
+ "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
+ },
+ },
+ )
+
+ @mock.patch.object(Client, "list_sfc_flow_classifiers")
def test_get_classification_many_results(self, list_sfc_flow_classifiers):
# what OpenStack is assumed to return to the VIM connector
- list_sfc_flow_classifiers.return_value = {'flow_classifiers': [
- {'source_port_range_min': 2000,
- 'destination_ip_prefix': '192.168.3.0/24',
- 'protocol': 'udp',
- 'description': '',
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': 2000,
- 'destination_port_range_min': 3000,
- 'source_ip_prefix': '192.168.2.0/24',
- 'logical_destination_port': None,
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'destination_port_range_max': None,
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b',
- 'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d',
- 'name': 'fc1'},
- {'source_port_range_min': 1000,
- 'destination_ip_prefix': '192.168.3.0/24',
- 'protocol': 'udp',
- 'description': '',
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': 1000,
- 'destination_port_range_min': 3000,
- 'source_ip_prefix': '192.168.2.0/24',
- 'logical_destination_port': None,
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'destination_port_range_max': None,
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b',
- 'id': '3196bafc-82dd-11e7-a205-9bf6c14b0721',
- 'name': 'fc2'}
- ]}
+ list_sfc_flow_classifiers.return_value = {
+ "flow_classifiers": [
+ {
+ "source_port_range_min": 2000,
+ "destination_ip_prefix": "192.168.3.0/24",
+ "protocol": "udp",
+ "description": "",
+ "ethertype": "IPv4",
+ "l7_parameters": {},
+ "source_port_range_max": 2000,
+ "destination_port_range_min": 3000,
+ "source_ip_prefix": "192.168.2.0/24",
+ "logical_destination_port": None,
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "destination_port_range_max": None,
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
+ "id": "22198366-d4e8-4d6b-b4d2-637d5d6cbb7d",
+ "name": "fc1",
+ },
+ {
+ "source_port_range_min": 1000,
+ "destination_ip_prefix": "192.168.3.0/24",
+ "protocol": "udp",
+ "description": "",
+ "ethertype": "IPv4",
+ "l7_parameters": {},
+ "source_port_range_max": 1000,
+ "destination_port_range_min": 3000,
+ "source_ip_prefix": "192.168.2.0/24",
+ "logical_destination_port": None,
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "destination_port_range_max": None,
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
+ "id": "3196bafc-82dd-11e7-a205-9bf6c14b0721",
+ "name": "fc2",
+ },
+ ]
+ }
# call the VIM connector
- self.assertRaises(vimconn.VimConnConflictException,
- self.vimconn.get_classification,
- '3196bafc-82dd-11e7-a205-9bf6c14b0721')
+ self.assertRaises(
+ vimconn.VimConnConflictException,
+ self.vimconn.get_classification,
+ "3196bafc-82dd-11e7-a205-9bf6c14b0721",
+ )
# assert the VIM connector called OpenStack with the expected filter
list_sfc_flow_classifiers.assert_called_with(
- id='3196bafc-82dd-11e7-a205-9bf6c14b0721')
+ id="3196bafc-82dd-11e7-a205-9bf6c14b0721"
+ )
- @mock.patch.object(Client, 'list_sfc_flow_classifiers')
+ @mock.patch.object(Client, "list_sfc_flow_classifiers")
def test_get_classification_no_results(self, list_sfc_flow_classifiers):
# what OpenStack is assumed to return to the VIM connector
- list_sfc_flow_classifiers.return_value = {'flow_classifiers': []}
+ list_sfc_flow_classifiers.return_value = {"flow_classifiers": []}
# call the VIM connector
- self.assertRaises(vimconn.VimConnNotFoundException,
- self.vimconn.get_classification,
- '3196bafc-82dd-11e7-a205-9bf6c14b0721')
+ self.assertRaises(
+ vimconn.VimConnNotFoundException,
+ self.vimconn.get_classification,
+ "3196bafc-82dd-11e7-a205-9bf6c14b0721",
+ )
# assert the VIM connector called OpenStack with the expected filter
list_sfc_flow_classifiers.assert_called_with(
- id='3196bafc-82dd-11e7-a205-9bf6c14b0721')
+ id="3196bafc-82dd-11e7-a205-9bf6c14b0721"
+ )
- @mock.patch.object(Client, 'list_sfc_port_pairs')
+ @mock.patch.object(Client, "list_sfc_port_pairs")
def test_get_sfi(self, list_sfc_port_pairs):
# what OpenStack is assumed to return to the VIM connector
- list_sfc_port_pairs.return_value = {'port_pairs': [
- {'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
- 'description': '',
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'egress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
- 'service_function_parameters': {'correlation': 'nsh'},
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'id': 'c121ebdd-7f2d-4213-b933-3325298a6966',
- 'name': 'osm_sfi1'},
- ]}
+ list_sfc_port_pairs.return_value = {
+ "port_pairs": [
+ {
+ "ingress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "egress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
+ "service_function_parameters": {"correlation": "nsh"},
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "c121ebdd-7f2d-4213-b933-3325298a6966",
+ "name": "osm_sfi1",
+ },
+ ]
+ }
# call the VIM connector
- result = self.vimconn.get_sfi('c121ebdd-7f2d-4213-b933-3325298a6966')
+ result = self.vimconn.get_sfi("c121ebdd-7f2d-4213-b933-3325298a6966")
# assert the VIM connector called OpenStack with the expected filter
list_sfc_port_pairs.assert_called_with(
- id='c121ebdd-7f2d-4213-b933-3325298a6966')
+ id="c121ebdd-7f2d-4213-b933-3325298a6966"
+ )
# assert the VIM connector successfully returned the OpenStack result
- self.assertEqual(result,
- {'ingress_ports': [
- '5311c75d-d718-4369-bbda-cdcc6da60fcc'],
- 'egress_ports': [
- '5311c75d-d718-4369-bbda-cdcc6da60fcc'],
- 'sfc_encap': True,
- 'description': '',
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'id': 'c121ebdd-7f2d-4213-b933-3325298a6966',
- 'name': 'osm_sfi1'})
-
- @mock.patch.object(Client, 'list_sfc_port_pairs')
+ self.assertEqual(
+ result,
+ {
+ "ingress_ports": ["5311c75d-d718-4369-bbda-cdcc6da60fcc"],
+ "egress_ports": ["5311c75d-d718-4369-bbda-cdcc6da60fcc"],
+ "sfc_encap": True,
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "c121ebdd-7f2d-4213-b933-3325298a6966",
+ "name": "osm_sfi1",
+ },
+ )
+
+ @mock.patch.object(Client, "list_sfc_port_pairs")
def test_get_sfi_many_results(self, list_sfc_port_pairs):
# what OpenStack is assumed to return to the VIM connector
- list_sfc_port_pairs.return_value = {'port_pairs': [
- {'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
- 'description': '',
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'egress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
- 'service_function_parameters': {'correlation': 'nsh'},
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'id': 'c121ebdd-7f2d-4213-b933-3325298a6966',
- 'name': 'osm_sfi1'},
- {'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
- 'description': '',
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'egress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
- 'service_function_parameters': {'correlation': 'nsh'},
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'id': 'c0436d92-82db-11e7-8f9c-5fa535f1261f',
- 'name': 'osm_sfi2'}
- ]}
+ list_sfc_port_pairs.return_value = {
+ "port_pairs": [
+ {
+ "ingress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "egress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
+ "service_function_parameters": {"correlation": "nsh"},
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "c121ebdd-7f2d-4213-b933-3325298a6966",
+ "name": "osm_sfi1",
+ },
+ {
+ "ingress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "egress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
+ "service_function_parameters": {"correlation": "nsh"},
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "c0436d92-82db-11e7-8f9c-5fa535f1261f",
+ "name": "osm_sfi2",
+ },
+ ]
+ }
# call the VIM connector
- self.assertRaises(vimconn.VimConnConflictException,
- self.vimconn.get_sfi,
- 'c0436d92-82db-11e7-8f9c-5fa535f1261f')
+ self.assertRaises(
+ vimconn.VimConnConflictException,
+ self.vimconn.get_sfi,
+ "c0436d92-82db-11e7-8f9c-5fa535f1261f",
+ )
# assert that VIM connector called OpenStack with the expected filter
list_sfc_port_pairs.assert_called_with(
- id='c0436d92-82db-11e7-8f9c-5fa535f1261f')
+ id="c0436d92-82db-11e7-8f9c-5fa535f1261f"
+ )
- @mock.patch.object(Client, 'list_sfc_port_pairs')
+ @mock.patch.object(Client, "list_sfc_port_pairs")
def test_get_sfi_no_results(self, list_sfc_port_pairs):
# what OpenStack is assumed to return to the VIM connector
- list_sfc_port_pairs.return_value = {'port_pairs': []}
+ list_sfc_port_pairs.return_value = {"port_pairs": []}
# call the VIM connector
- self.assertRaises(vimconn.VimConnNotFoundException,
- self.vimconn.get_sfi,
- 'b22892fc-82d9-11e7-ae85-0fea6a3b3757')
+ self.assertRaises(
+ vimconn.VimConnNotFoundException,
+ self.vimconn.get_sfi,
+ "b22892fc-82d9-11e7-ae85-0fea6a3b3757",
+ )
# assert that VIM connector called OpenStack with the expected filter
list_sfc_port_pairs.assert_called_with(
- id='b22892fc-82d9-11e7-ae85-0fea6a3b3757')
+ id="b22892fc-82d9-11e7-ae85-0fea6a3b3757"
+ )
- @mock.patch.object(Client, 'list_sfc_port_pair_groups')
+ @mock.patch.object(Client, "list_sfc_port_pair_groups")
def test_get_sf(self, list_sfc_port_pair_groups):
# what OpenStack is assumed to return to the VIM connector
- list_sfc_port_pair_groups.return_value = {'port_pair_groups': [
- {'port_pairs': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2'],
- 'description': '',
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'port_pair_group_parameters': {},
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'id': 'aabba8a6-82d9-11e7-a18a-d3c7719b742d',
- 'name': 'osm_sf1'}
- ]}
+ list_sfc_port_pair_groups.return_value = {
+ "port_pair_groups": [
+ {
+ "port_pairs": ["08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2"],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "port_pair_group_parameters": {},
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "aabba8a6-82d9-11e7-a18a-d3c7719b742d",
+ "name": "osm_sf1",
+ }
+ ]
+ }
# call the VIM connector
- result = self.vimconn.get_sf('b22892fc-82d9-11e7-ae85-0fea6a3b3757')
+ result = self.vimconn.get_sf("b22892fc-82d9-11e7-ae85-0fea6a3b3757")
# assert that VIM connector called OpenStack with the expected filter
list_sfc_port_pair_groups.assert_called_with(
- id='b22892fc-82d9-11e7-ae85-0fea6a3b3757')
+ id="b22892fc-82d9-11e7-ae85-0fea6a3b3757"
+ )
# assert that VIM connector successfully returned the OpenStack result
- self.assertEqual(result,
- {'description': '',
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'sfis': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2'],
- 'id': 'aabba8a6-82d9-11e7-a18a-d3c7719b742d',
- 'name': 'osm_sf1'})
-
- @mock.patch.object(Client, 'list_sfc_port_pair_groups')
+ self.assertEqual(
+ result,
+ {
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "sfis": ["08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2"],
+ "id": "aabba8a6-82d9-11e7-a18a-d3c7719b742d",
+ "name": "osm_sf1",
+ },
+ )
+
+ @mock.patch.object(Client, "list_sfc_port_pair_groups")
def test_get_sf_many_results(self, list_sfc_port_pair_groups):
# what OpenStack is assumed to return to the VIM connector
- list_sfc_port_pair_groups.return_value = {'port_pair_groups': [
- {'port_pairs': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2'],
- 'description': '',
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'port_pair_group_parameters': {},
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'id': 'aabba8a6-82d9-11e7-a18a-d3c7719b742d',
- 'name': 'osm_sf1'},
- {'port_pairs': ['0d63799c-82d6-11e7-8deb-a746bb3ae9f5'],
- 'description': '',
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'port_pair_group_parameters': {},
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'id': 'b22892fc-82d9-11e7-ae85-0fea6a3b3757',
- 'name': 'osm_sf2'}
- ]}
+ list_sfc_port_pair_groups.return_value = {
+ "port_pair_groups": [
+ {
+ "port_pairs": ["08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2"],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "port_pair_group_parameters": {},
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "aabba8a6-82d9-11e7-a18a-d3c7719b742d",
+ "name": "osm_sf1",
+ },
+ {
+ "port_pairs": ["0d63799c-82d6-11e7-8deb-a746bb3ae9f5"],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "port_pair_group_parameters": {},
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "b22892fc-82d9-11e7-ae85-0fea6a3b3757",
+ "name": "osm_sf2",
+ },
+ ]
+ }
# call the VIM connector
- self.assertRaises(vimconn.VimConnConflictException,
- self.vimconn.get_sf,
- 'b22892fc-82d9-11e7-ae85-0fea6a3b3757')
+ self.assertRaises(
+ vimconn.VimConnConflictException,
+ self.vimconn.get_sf,
+ "b22892fc-82d9-11e7-ae85-0fea6a3b3757",
+ )
# assert that VIM connector called OpenStack with the expected filter
list_sfc_port_pair_groups.assert_called_with(
- id='b22892fc-82d9-11e7-ae85-0fea6a3b3757')
+ id="b22892fc-82d9-11e7-ae85-0fea6a3b3757"
+ )
- @mock.patch.object(Client, 'list_sfc_port_pair_groups')
+ @mock.patch.object(Client, "list_sfc_port_pair_groups")
def test_get_sf_no_results(self, list_sfc_port_pair_groups):
# what OpenStack is assumed to return to the VIM connector
- list_sfc_port_pair_groups.return_value = {'port_pair_groups': []}
+ list_sfc_port_pair_groups.return_value = {"port_pair_groups": []}
# call the VIM connector
- self.assertRaises(vimconn.VimConnNotFoundException,
- self.vimconn.get_sf,
- 'b22892fc-82d9-11e7-ae85-0fea6a3b3757')
+ self.assertRaises(
+ vimconn.VimConnNotFoundException,
+ self.vimconn.get_sf,
+ "b22892fc-82d9-11e7-ae85-0fea6a3b3757",
+ )
# assert that VIM connector called OpenStack with the expected filter
list_sfc_port_pair_groups.assert_called_with(
- id='b22892fc-82d9-11e7-ae85-0fea6a3b3757')
+ id="b22892fc-82d9-11e7-ae85-0fea6a3b3757"
+ )
- @mock.patch.object(Client, 'list_sfc_port_chains')
+ @mock.patch.object(Client, "list_sfc_port_chains")
def test_get_sfp(self, list_sfc_port_chains):
# what OpenStack is assumed to return to the VIM connector
- list_sfc_port_chains.return_value = {'port_chains': [
- {'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25'],
- 'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e'],
- 'description': '',
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'chain_parameters': {'correlation': 'nsh'},
- 'chain_id': 40,
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47',
- 'name': 'osm_sfp1'}]}
+ list_sfc_port_chains.return_value = {
+ "port_chains": [
+ {
+ "port_pair_groups": ["7d8e3bf8-82d6-11e7-a032-8ff028839d25"],
+ "flow_classifiers": ["1333c2f4-82d7-11e7-a5df-9327f33d104e"],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "chain_parameters": {"correlation": "nsh"},
+ "chain_id": 40,
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "821bc9be-82d7-11e7-8ce3-23a08a27ab47",
+ "name": "osm_sfp1",
+ }
+ ]
+ }
# call the VIM connector
- result = self.vimconn.get_sfp('821bc9be-82d7-11e7-8ce3-23a08a27ab47')
+ result = self.vimconn.get_sfp("821bc9be-82d7-11e7-8ce3-23a08a27ab47")
# assert that VIM connector called OpenStack with the expected filter
list_sfc_port_chains.assert_called_with(
- id='821bc9be-82d7-11e7-8ce3-23a08a27ab47')
+ id="821bc9be-82d7-11e7-8ce3-23a08a27ab47"
+ )
# assert that VIM connector successfully returned the OpenStack result
- self.assertEqual(result,
- {'service_functions': [
- '7d8e3bf8-82d6-11e7-a032-8ff028839d25'],
- 'classifications': [
- '1333c2f4-82d7-11e7-a5df-9327f33d104e'],
- 'description': '',
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'sfc_encap': True,
- 'spi': 40,
- 'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47',
- 'name': 'osm_sfp1'})
-
- @mock.patch.object(Client, 'list_sfc_port_chains')
+ self.assertEqual(
+ result,
+ {
+ "service_functions": ["7d8e3bf8-82d6-11e7-a032-8ff028839d25"],
+ "classifications": ["1333c2f4-82d7-11e7-a5df-9327f33d104e"],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "sfc_encap": True,
+ "spi": 40,
+ "id": "821bc9be-82d7-11e7-8ce3-23a08a27ab47",
+ "name": "osm_sfp1",
+ },
+ )
+
+ @mock.patch.object(Client, "list_sfc_port_chains")
def test_get_sfp_many_results(self, list_sfc_port_chains):
# what OpenStack is assumed to return to the VIM connector
- list_sfc_port_chains.return_value = {'port_chains': [
- {'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25'],
- 'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e'],
- 'description': '',
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'chain_parameters': {'correlation': 'nsh'},
- 'chain_id': 40,
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47',
- 'name': 'osm_sfp1'},
- {'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25'],
- 'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e'],
- 'description': '',
- 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'chain_parameters': {'correlation': 'nsh'},
- 'chain_id': 50,
- 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
- 'id': '5d002f38-82de-11e7-a770-f303f11ce66a',
- 'name': 'osm_sfp2'}
- ]}
+ list_sfc_port_chains.return_value = {
+ "port_chains": [
+ {
+ "port_pair_groups": ["7d8e3bf8-82d6-11e7-a032-8ff028839d25"],
+ "flow_classifiers": ["1333c2f4-82d7-11e7-a5df-9327f33d104e"],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "chain_parameters": {"correlation": "nsh"},
+ "chain_id": 40,
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "821bc9be-82d7-11e7-8ce3-23a08a27ab47",
+ "name": "osm_sfp1",
+ },
+ {
+ "port_pair_groups": ["7d8e3bf8-82d6-11e7-a032-8ff028839d25"],
+ "flow_classifiers": ["1333c2f4-82d7-11e7-a5df-9327f33d104e"],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "chain_parameters": {"correlation": "nsh"},
+ "chain_id": 50,
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "5d002f38-82de-11e7-a770-f303f11ce66a",
+ "name": "osm_sfp2",
+ },
+ ]
+ }
# call the VIM connector
- self.assertRaises(vimconn.VimConnConflictException,
- self.vimconn.get_sfp,
- '5d002f38-82de-11e7-a770-f303f11ce66a')
+ self.assertRaises(
+ vimconn.VimConnConflictException,
+ self.vimconn.get_sfp,
+ "5d002f38-82de-11e7-a770-f303f11ce66a",
+ )
# assert that VIM connector called OpenStack with the expected filter
list_sfc_port_chains.assert_called_with(
- id='5d002f38-82de-11e7-a770-f303f11ce66a')
+ id="5d002f38-82de-11e7-a770-f303f11ce66a"
+ )
- @mock.patch.object(Client, 'list_sfc_port_chains')
+ @mock.patch.object(Client, "list_sfc_port_chains")
def test_get_sfp_no_results(self, list_sfc_port_chains):
# what OpenStack is assumed to return to the VIM connector
- list_sfc_port_chains.return_value = {'port_chains': []}
+ list_sfc_port_chains.return_value = {"port_chains": []}
# call the VIM connector
- self.assertRaises(vimconn.VimConnNotFoundException,
- self.vimconn.get_sfp,
- '5d002f38-82de-11e7-a770-f303f11ce66a')
+ self.assertRaises(
+ vimconn.VimConnNotFoundException,
+ self.vimconn.get_sfp,
+ "5d002f38-82de-11e7-a770-f303f11ce66a",
+ )
# assert that VIM connector called OpenStack with the expected filter
list_sfc_port_chains.assert_called_with(
- id='5d002f38-82de-11e7-a770-f303f11ce66a')
+ id="5d002f38-82de-11e7-a770-f303f11ce66a"
+ )
- @mock.patch.object(Client, 'delete_sfc_flow_classifier')
+ @mock.patch.object(Client, "delete_sfc_flow_classifier")
def test_delete_classification(self, delete_sfc_flow_classifier):
result = self.vimconn.delete_classification(
- '638f957c-82df-11e7-b7c8-132706021464')
+ "638f957c-82df-11e7-b7c8-132706021464"
+ )
delete_sfc_flow_classifier.assert_called_with(
- '638f957c-82df-11e7-b7c8-132706021464')
- self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464')
+ "638f957c-82df-11e7-b7c8-132706021464"
+ )
+ self.assertEqual(result, "638f957c-82df-11e7-b7c8-132706021464")
- @mock.patch.object(Client, 'delete_sfc_port_pair')
+ @mock.patch.object(Client, "delete_sfc_port_pair")
def test_delete_sfi(self, delete_sfc_port_pair):
- result = self.vimconn.delete_sfi(
- '638f957c-82df-11e7-b7c8-132706021464')
- delete_sfc_port_pair.assert_called_with(
- '638f957c-82df-11e7-b7c8-132706021464')
- self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464')
+ result = self.vimconn.delete_sfi("638f957c-82df-11e7-b7c8-132706021464")
+ delete_sfc_port_pair.assert_called_with("638f957c-82df-11e7-b7c8-132706021464")
+ self.assertEqual(result, "638f957c-82df-11e7-b7c8-132706021464")
- @mock.patch.object(Client, 'delete_sfc_port_pair_group')
+ @mock.patch.object(Client, "delete_sfc_port_pair_group")
def test_delete_sf(self, delete_sfc_port_pair_group):
- result = self.vimconn.delete_sf('638f957c-82df-11e7-b7c8-132706021464')
+ result = self.vimconn.delete_sf("638f957c-82df-11e7-b7c8-132706021464")
delete_sfc_port_pair_group.assert_called_with(
- '638f957c-82df-11e7-b7c8-132706021464')
- self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464')
+ "638f957c-82df-11e7-b7c8-132706021464"
+ )
+ self.assertEqual(result, "638f957c-82df-11e7-b7c8-132706021464")
- @mock.patch.object(Client, 'delete_sfc_port_chain')
+ @mock.patch.object(Client, "delete_sfc_port_chain")
def test_delete_sfp(self, delete_sfc_port_chain):
- result = self.vimconn.delete_sfp(
- '638f957c-82df-11e7-b7c8-132706021464')
- delete_sfc_port_chain.assert_called_with(
- '638f957c-82df-11e7-b7c8-132706021464')
- self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464')
+ result = self.vimconn.delete_sfp("638f957c-82df-11e7-b7c8-132706021464")
+ delete_sfc_port_chain.assert_called_with("638f957c-82df-11e7-b7c8-132706021464")
+ self.assertEqual(result, "638f957c-82df-11e7-b7c8-132706021464")
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
"""
from osm_ro_plugin import vimconn
+
# import json
import logging
import netaddr
import re
import copy
from pprint import pformat
-
from novaclient import client as nClient, exceptions as nvExceptions
from keystoneauth1.identity import v2, v3
from keystoneauth1 import session
from glanceclient import client as glClient
import glanceclient.exc as gl1Exceptions
from cinderclient import client as cClient
-from http.client import HTTPException # TODO py3 check that this base exception matches python2 httplib.HTTPException
+
+# TODO py3 check that this base exception matches python2 httplib.HTTPException
+from http.client import HTTPException
from neutronclient.neutron import client as neClient
from neutronclient.common import exceptions as neExceptions
from requests.exceptions import ConnectionError
__date__ = "$22-sep-2017 23:59:59$"
"""contain the openstack virtual machine status to openmano status"""
-vmStatus2manoFormat = {'ACTIVE': 'ACTIVE',
- 'PAUSED': 'PAUSED',
- 'SUSPENDED': 'SUSPENDED',
- 'SHUTOFF': 'INACTIVE',
- 'BUILD': 'BUILD',
- 'ERROR': 'ERROR',
- 'DELETED': 'DELETED'
- }
-netStatus2manoFormat = {'ACTIVE': 'ACTIVE',
- 'PAUSED': 'PAUSED',
- 'INACTIVE': 'INACTIVE',
- 'BUILD': 'BUILD',
- 'ERROR': 'ERROR',
- 'DELETED': 'DELETED'
- }
-
-supportedClassificationTypes = ['legacy_flow_classifier']
+vmStatus2manoFormat = {
+ "ACTIVE": "ACTIVE",
+ "PAUSED": "PAUSED",
+ "SUSPENDED": "SUSPENDED",
+ "SHUTOFF": "INACTIVE",
+ "BUILD": "BUILD",
+ "ERROR": "ERROR",
+ "DELETED": "DELETED",
+}
+netStatus2manoFormat = {
+ "ACTIVE": "ACTIVE",
+ "PAUSED": "PAUSED",
+ "INACTIVE": "INACTIVE",
+ "BUILD": "BUILD",
+ "ERROR": "ERROR",
+ "DELETED": "DELETED",
+}
+
+supportedClassificationTypes = ["legacy_flow_classifier"]
# global var to have a timeout creating and deleting volumes
volume_timeout = 1800
class vimconnector(vimconn.VimConnector):
- def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
- log_level=None, config={}, persistent_info={}):
+ def __init__(
+ self,
+ uuid,
+ name,
+ tenant_id,
+ tenant_name,
+ url,
+ url_admin=None,
+ user=None,
+ passwd=None,
+ log_level=None,
+ config={},
+ persistent_info={},
+ ):
"""using common constructor parameters. In this case
'url' is the keystone authorization url,
'url_admin' is not use
"""
- api_version = config.get('APIversion')
- if api_version and api_version not in ('v3.3', 'v2.0', '2', '3'):
- raise vimconn.VimConnException("Invalid value '{}' for config:APIversion. "
- "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version))
- vim_type = config.get('vim_type')
- if vim_type and vim_type not in ('vio', 'VIO'):
- raise vimconn.VimConnException("Invalid value '{}' for config:vim_type."
- "Allowed values are 'vio' or 'VIO'".format(vim_type))
-
- if config.get('dataplane_net_vlan_range') is not None:
- # validate vlan ranges provided by user
- self._validate_vlan_ranges(config.get('dataplane_net_vlan_range'), 'dataplane_net_vlan_range')
+ api_version = config.get("APIversion")
+
+ if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
+ raise vimconn.VimConnException(
+ "Invalid value '{}' for config:APIversion. "
+ "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
+ )
- if config.get('multisegment_vlan_range') is not None:
+ vim_type = config.get("vim_type")
+
+ if vim_type and vim_type not in ("vio", "VIO"):
+ raise vimconn.VimConnException(
+ "Invalid value '{}' for config:vim_type."
+ "Allowed values are 'vio' or 'VIO'".format(vim_type)
+ )
+
+ if config.get("dataplane_net_vlan_range") is not None:
# validate vlan ranges provided by user
- self._validate_vlan_ranges(config.get('multisegment_vlan_range'), 'multisegment_vlan_range')
+ self._validate_vlan_ranges(
+ config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
+ )
- vimconn.VimConnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
- config)
+ if config.get("multisegment_vlan_range") is not None:
+ # validate vlan ranges provided by user
+ self._validate_vlan_ranges(
+ config.get("multisegment_vlan_range"), "multisegment_vlan_range"
+ )
+
+ vimconn.VimConnector.__init__(
+ self,
+ uuid,
+ name,
+ tenant_id,
+ tenant_name,
+ url,
+ url_admin,
+ user,
+ passwd,
+ log_level,
+ config,
+ )
if self.config.get("insecure") and self.config.get("ca_cert"):
- raise vimconn.VimConnException("options insecure and ca_cert are mutually exclusive")
+ raise vimconn.VimConnException(
+ "options insecure and ca_cert are mutually exclusive"
+ )
+
self.verify = True
+
if self.config.get("insecure"):
self.verify = False
+
if self.config.get("ca_cert"):
self.verify = self.config.get("ca_cert")
if not url:
- raise TypeError('url param can not be NoneType')
+ raise TypeError("url param can not be NoneType")
+
self.persistent_info = persistent_info
- self.availability_zone = persistent_info.get('availability_zone', None)
- self.session = persistent_info.get('session', {'reload_client': True})
- self.my_tenant_id = self.session.get('my_tenant_id')
- self.nova = self.session.get('nova')
- self.neutron = self.session.get('neutron')
- self.cinder = self.session.get('cinder')
- self.glance = self.session.get('glance')
- # self.glancev1 = self.session.get('glancev1')
- self.keystone = self.session.get('keystone')
- self.api_version3 = self.session.get('api_version3')
+ self.availability_zone = persistent_info.get("availability_zone", None)
+ self.session = persistent_info.get("session", {"reload_client": True})
+ self.my_tenant_id = self.session.get("my_tenant_id")
+ self.nova = self.session.get("nova")
+ self.neutron = self.session.get("neutron")
+ self.cinder = self.session.get("cinder")
+ self.glance = self.session.get("glance")
+ # self.glancev1 = self.session.get("glancev1")
+ self.keystone = self.session.get("keystone")
+ self.api_version3 = self.session.get("api_version3")
self.vim_type = self.config.get("vim_type")
+
if self.vim_type:
self.vim_type = self.vim_type.upper()
+
if self.config.get("use_internal_endpoint"):
self.endpoint_type = "internalURL"
else:
self.endpoint_type = None
- logging.getLogger('urllib3').setLevel(logging.WARNING)
- logging.getLogger('keystoneauth').setLevel(logging.WARNING)
- logging.getLogger('novaclient').setLevel(logging.WARNING)
- self.logger = logging.getLogger('ro.vim.openstack')
+ logging.getLogger("urllib3").setLevel(logging.WARNING)
+ logging.getLogger("keystoneauth").setLevel(logging.WARNING)
+ logging.getLogger("novaclient").setLevel(logging.WARNING)
+ self.logger = logging.getLogger("ro.vim.openstack")
# allow security_groups to be a list or a single string
- if isinstance(self.config.get('security_groups'), str):
- self.config['security_groups'] = [self.config['security_groups']]
+ if isinstance(self.config.get("security_groups"), str):
+ self.config["security_groups"] = [self.config["security_groups"]]
+
self.security_groups_id = None
# ###### VIO Specific Changes #########
if self.vim_type == "VIO":
- self.logger = logging.getLogger('ro.vim.vio')
+ self.logger = logging.getLogger("ro.vim.vio")
if log_level:
self.logger.setLevel(getattr(logging, log_level))
def __getitem__(self, index):
"""Get individuals parameters.
Throw KeyError"""
- if index == 'project_domain_id':
+ if index == "project_domain_id":
return self.config.get("project_domain_id")
- elif index == 'user_domain_id':
+ elif index == "user_domain_id":
return self.config.get("user_domain_id")
else:
return vimconn.VimConnector.__getitem__(self, index)
def __setitem__(self, index, value):
"""Set individuals parameters and it is marked as dirty so to force connection reload.
Throw KeyError"""
- if index == 'project_domain_id':
+ if index == "project_domain_id":
self.config["project_domain_id"] = value
- elif index == 'user_domain_id':
+ elif index == "user_domain_id":
self.config["user_domain_id"] = value
else:
vimconn.VimConnector.__setitem__(self, index, value)
- self.session['reload_client'] = True
+
+ self.session["reload_client"] = True
def serialize(self, value):
"""Serialization of python basic types.
return value
try:
- return yaml.dump(value, Dumper=SafeDumper,
- default_flow_style=True, width=256)
+ return yaml.dump(
+ value, Dumper=SafeDumper, default_flow_style=True, width=256
+ )
except yaml.representer.RepresenterError:
- self.logger.debug('The following entity cannot be serialized in YAML:\n\n%s\n\n', pformat(value),
- exc_info=True)
+ self.logger.debug(
+ "The following entity cannot be serialized in YAML:\n\n%s\n\n",
+ pformat(value),
+ exc_info=True,
+ )
+
return str(value)
def _reload_connection(self):
Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
"""
# TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
- if self.session['reload_client']:
- if self.config.get('APIversion'):
- self.api_version3 = self.config['APIversion'] == 'v3.3' or self.config['APIversion'] == '3'
+ if self.session["reload_client"]:
+ if self.config.get("APIversion"):
+ self.api_version3 = (
+ self.config["APIversion"] == "v3.3"
+ or self.config["APIversion"] == "3"
+ )
else: # get from ending auth_url that end with v3 or with v2.0
- self.api_version3 = self.url.endswith("/v3") or self.url.endswith("/v3/")
- self.session['api_version3'] = self.api_version3
+ self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
+ "/v3/"
+ )
+
+ self.session["api_version3"] = self.api_version3
+
if self.api_version3:
- if self.config.get('project_domain_id') or self.config.get('project_domain_name'):
+ if self.config.get("project_domain_id") or self.config.get(
+ "project_domain_name"
+ ):
project_domain_id_default = None
else:
- project_domain_id_default = 'default'
- if self.config.get('user_domain_id') or self.config.get('user_domain_name'):
+ project_domain_id_default = "default"
+
+ if self.config.get("user_domain_id") or self.config.get(
+ "user_domain_name"
+ ):
user_domain_id_default = None
else:
- user_domain_id_default = 'default'
- auth = v3.Password(auth_url=self.url,
- username=self.user,
- password=self.passwd,
- project_name=self.tenant_name,
- project_id=self.tenant_id,
- project_domain_id=self.config.get('project_domain_id', project_domain_id_default),
- user_domain_id=self.config.get('user_domain_id', user_domain_id_default),
- project_domain_name=self.config.get('project_domain_name'),
- user_domain_name=self.config.get('user_domain_name'))
+ user_domain_id_default = "default"
+ auth = v3.Password(
+ auth_url=self.url,
+ username=self.user,
+ password=self.passwd,
+ project_name=self.tenant_name,
+ project_id=self.tenant_id,
+ project_domain_id=self.config.get(
+ "project_domain_id", project_domain_id_default
+ ),
+ user_domain_id=self.config.get(
+ "user_domain_id", user_domain_id_default
+ ),
+ project_domain_name=self.config.get("project_domain_name"),
+ user_domain_name=self.config.get("user_domain_name"),
+ )
else:
- auth = v2.Password(auth_url=self.url,
- username=self.user,
- password=self.passwd,
- tenant_name=self.tenant_name,
- tenant_id=self.tenant_id)
+ auth = v2.Password(
+ auth_url=self.url,
+ username=self.user,
+ password=self.passwd,
+ tenant_name=self.tenant_name,
+ tenant_id=self.tenant_id,
+ )
+
sess = session.Session(auth=auth, verify=self.verify)
# addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
# Titanium cloud and StarlingX
- region_name = self.config.get('region_name')
+ region_name = self.config.get("region_name")
+
if self.api_version3:
- self.keystone = ksClient_v3.Client(session=sess, endpoint_type=self.endpoint_type,
- region_name=region_name)
+ self.keystone = ksClient_v3.Client(
+ session=sess,
+ endpoint_type=self.endpoint_type,
+ region_name=region_name,
+ )
else:
- self.keystone = ksClient_v2.Client(session=sess, endpoint_type=self.endpoint_type)
- self.session['keystone'] = self.keystone
- # In order to enable microversion functionality an explicit microversion must be specified in 'config'.
+ self.keystone = ksClient_v2.Client(
+ session=sess, endpoint_type=self.endpoint_type
+ )
+
+ self.session["keystone"] = self.keystone
+ # In order to enable microversion functionality an explicit microversion must be specified in "config".
# This implementation approach is due to the warning message in
# https://developer.openstack.org/api-guide/compute/microversions.html
# where it is stated that microversion backwards compatibility is not guaranteed and clients should
# always require an specific microversion.
- # To be able to use 'device role tagging' functionality define 'microversion: 2.32' in datacenter config
+ # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
version = self.config.get("microversion")
+
if not version:
version = "2.1"
+
# addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
# Titanium cloud and StarlingX
- self.nova = self.session['nova'] = nClient.Client(str(version), session=sess,
- endpoint_type=self.endpoint_type, region_name=region_name)
- self.neutron = self.session['neutron'] = neClient.Client('2.0', session=sess,
- endpoint_type=self.endpoint_type,
- region_name=region_name)
- self.cinder = self.session['cinder'] = cClient.Client(2, session=sess, endpoint_type=self.endpoint_type,
- region_name=region_name)
+ self.nova = self.session["nova"] = nClient.Client(
+ str(version),
+ session=sess,
+ endpoint_type=self.endpoint_type,
+ region_name=region_name,
+ )
+ self.neutron = self.session["neutron"] = neClient.Client(
+ "2.0",
+ session=sess,
+ endpoint_type=self.endpoint_type,
+ region_name=region_name,
+ )
+ self.cinder = self.session["cinder"] = cClient.Client(
+ 2,
+ session=sess,
+ endpoint_type=self.endpoint_type,
+ region_name=region_name,
+ )
+
try:
- self.my_tenant_id = self.session['my_tenant_id'] = sess.get_project_id()
+ self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
except Exception:
self.logger.error("Cannot get project_id from session", exc_info=True)
+
if self.endpoint_type == "internalURL":
glance_service_id = self.keystone.services.list(name="glance")[0].id
- glance_endpoint = self.keystone.endpoints.list(glance_service_id, interface="internal")[0].url
+ glance_endpoint = self.keystone.endpoints.list(
+ glance_service_id, interface="internal"
+ )[0].url
else:
glance_endpoint = None
- self.glance = self.session['glance'] = glClient.Client(2, session=sess, endpoint=glance_endpoint)
+
+ self.glance = self.session["glance"] = glClient.Client(
+ 2, session=sess, endpoint=glance_endpoint
+ )
# using version 1 of glance client in new_image()
- # self.glancev1 = self.session['glancev1'] = glClient.Client('1', session=sess,
+ # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
# endpoint=glance_endpoint)
- self.session['reload_client'] = False
- self.persistent_info['session'] = self.session
+ self.session["reload_client"] = False
+ self.persistent_info["session"] = self.session
# add availablity zone info inside self.persistent_info
self._set_availablity_zones()
- self.persistent_info['availability_zone'] = self.availability_zone
- self.security_groups_id = None # force to get again security_groups_ids next time they are needed
+ self.persistent_info["availability_zone"] = self.availability_zone
+ # force to get again security_groups_ids next time they are needed
+ self.security_groups_id = None
def __net_os2mano(self, net_list_dict):
"""Transform the net openstack format to mano format
else:
raise TypeError("param net_list_dict must be a list or a dictionary")
for net in net_list_:
- if net.get('provider:network_type') == "vlan":
- net['type'] = 'data'
+ if net.get("provider:network_type") == "vlan":
+ net["type"] = "data"
else:
- net['type'] = 'bridge'
+ net["type"] = "bridge"
def __classification_os2mano(self, class_list_dict):
"""Transform the openstack format (Flow Classifier) to mano format
else:
raise TypeError("param class_list_dict must be a list or a dictionary")
for classification in class_list_:
- id = classification.pop('id')
- name = classification.pop('name')
- description = classification.pop('description')
- project_id = classification.pop('project_id')
- tenant_id = classification.pop('tenant_id')
+ id = classification.pop("id")
+ name = classification.pop("name")
+ description = classification.pop("description")
+ project_id = classification.pop("project_id")
+ tenant_id = classification.pop("tenant_id")
original_classification = copy.deepcopy(classification)
classification.clear()
- classification['ctype'] = 'legacy_flow_classifier'
- classification['definition'] = original_classification
- classification['id'] = id
- classification['name'] = name
- classification['description'] = description
- classification['project_id'] = project_id
- classification['tenant_id'] = tenant_id
+ classification["ctype"] = "legacy_flow_classifier"
+ classification["definition"] = original_classification
+ classification["id"] = id
+ classification["name"] = name
+ classification["description"] = description
+ classification["project_id"] = project_id
+ classification["tenant_id"] = tenant_id
def __sfi_os2mano(self, sfi_list_dict):
"""Transform the openstack format (Port Pair) to mano format (SFI)
elif isinstance(sfi_list_dict, list):
sfi_list_ = sfi_list_dict
else:
- raise TypeError(
- "param sfi_list_dict must be a list or a dictionary")
+ raise TypeError("param sfi_list_dict must be a list or a dictionary")
+
for sfi in sfi_list_:
- sfi['ingress_ports'] = []
- sfi['egress_ports'] = []
- if sfi.get('ingress'):
- sfi['ingress_ports'].append(sfi['ingress'])
- if sfi.get('egress'):
- sfi['egress_ports'].append(sfi['egress'])
- del sfi['ingress']
- del sfi['egress']
- params = sfi.get('service_function_parameters')
+ sfi["ingress_ports"] = []
+ sfi["egress_ports"] = []
+
+ if sfi.get("ingress"):
+ sfi["ingress_ports"].append(sfi["ingress"])
+
+ if sfi.get("egress"):
+ sfi["egress_ports"].append(sfi["egress"])
+
+ del sfi["ingress"]
+ del sfi["egress"]
+ params = sfi.get("service_function_parameters")
sfc_encap = False
+
if params:
- correlation = params.get('correlation')
+ correlation = params.get("correlation")
+
if correlation:
sfc_encap = True
- sfi['sfc_encap'] = sfc_encap
- del sfi['service_function_parameters']
+
+ sfi["sfc_encap"] = sfc_encap
+ del sfi["service_function_parameters"]
def __sf_os2mano(self, sf_list_dict):
"""Transform the openstack format (Port Pair Group) to mano format (SF)
elif isinstance(sf_list_dict, list):
sf_list_ = sf_list_dict
else:
- raise TypeError(
- "param sf_list_dict must be a list or a dictionary")
+ raise TypeError("param sf_list_dict must be a list or a dictionary")
+
for sf in sf_list_:
- del sf['port_pair_group_parameters']
- sf['sfis'] = sf['port_pairs']
- del sf['port_pairs']
+ del sf["port_pair_group_parameters"]
+ sf["sfis"] = sf["port_pairs"]
+ del sf["port_pairs"]
def __sfp_os2mano(self, sfp_list_dict):
"""Transform the openstack format (Port Chain) to mano format (SFP)
elif isinstance(sfp_list_dict, list):
sfp_list_ = sfp_list_dict
else:
- raise TypeError(
- "param sfp_list_dict must be a list or a dictionary")
+ raise TypeError("param sfp_list_dict must be a list or a dictionary")
+
for sfp in sfp_list_:
- params = sfp.pop('chain_parameters')
+ params = sfp.pop("chain_parameters")
sfc_encap = False
+
if params:
- correlation = params.get('correlation')
+ correlation = params.get("correlation")
+
if correlation:
sfc_encap = True
- sfp['sfc_encap'] = sfc_encap
- sfp['spi'] = sfp.pop('chain_id')
- sfp['classifications'] = sfp.pop('flow_classifiers')
- sfp['service_functions'] = sfp.pop('port_pair_groups')
+
+ sfp["sfc_encap"] = sfc_encap
+ sfp["spi"] = sfp.pop("chain_id")
+ sfp["classifications"] = sfp.pop("flow_classifiers")
+ sfp["service_functions"] = sfp.pop("port_pair_groups")
# placeholder for now; read TODO note below
def _validate_classification(self, type, definition):
def _format_exception(self, exception):
"""Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
-
message_error = str(exception)
tip = ""
- if isinstance(exception, (neExceptions.NetworkNotFoundClient, nvExceptions.NotFound, ksExceptions.NotFound,
- gl1Exceptions.HTTPNotFound)):
- raise vimconn.VimConnNotFoundException(type(exception).__name__ + ": " + message_error)
- elif isinstance(exception, (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError,
- ConnectionError, ksExceptions.ConnectionError, neExceptions.ConnectionFailed)):
+ if isinstance(
+ exception,
+ (
+ neExceptions.NetworkNotFoundClient,
+ nvExceptions.NotFound,
+ ksExceptions.NotFound,
+ gl1Exceptions.HTTPNotFound,
+ ),
+ ):
+ raise vimconn.VimConnNotFoundException(
+ type(exception).__name__ + ": " + message_error
+ )
+ elif isinstance(
+ exception,
+ (
+ HTTPException,
+ gl1Exceptions.HTTPException,
+ gl1Exceptions.CommunicationError,
+ ConnectionError,
+ ksExceptions.ConnectionError,
+ neExceptions.ConnectionFailed,
+ ),
+ ):
if type(exception).__name__ == "SSLError":
tip = " (maybe option 'insecure' must be added to the VIM)"
- raise vimconn.VimConnConnectionException("Invalid URL or credentials{}: {}".format(tip, message_error))
- elif isinstance(exception, (KeyError, nvExceptions.BadRequest, ksExceptions.BadRequest)):
- raise vimconn.VimConnException(type(exception).__name__ + ": " + message_error)
- elif isinstance(exception, (nvExceptions.ClientException, ksExceptions.ClientException,
- neExceptions.NeutronException)):
- raise vimconn.VimConnUnexpectedResponse(type(exception).__name__ + ": " + message_error)
+
+ raise vimconn.VimConnConnectionException(
+ "Invalid URL or credentials{}: {}".format(tip, message_error)
+ )
+ elif isinstance(
+ exception,
+ (
+ KeyError,
+ nvExceptions.BadRequest,
+ ksExceptions.BadRequest,
+ ),
+ ):
+ raise vimconn.VimConnException(
+ type(exception).__name__ + ": " + message_error
+ )
+ elif isinstance(
+ exception,
+ (
+ nvExceptions.ClientException,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ),
+ ):
+ raise vimconn.VimConnUnexpectedResponse(
+ type(exception).__name__ + ": " + message_error
+ )
elif isinstance(exception, nvExceptions.Conflict):
- raise vimconn.VimConnConflictException(type(exception).__name__ + ": " + message_error)
+ raise vimconn.VimConnConflictException(
+ type(exception).__name__ + ": " + message_error
+ )
elif isinstance(exception, vimconn.VimConnException):
raise exception
else: # ()
self.logger.error("General Exception " + message_error, exc_info=True)
- raise vimconn.VimConnConnectionException(type(exception).__name__ + ": " + message_error)
+
+ raise vimconn.VimConnConnectionException(
+ type(exception).__name__ + ": " + message_error
+ )
def _get_ids_from_name(self):
"""
"""
# get tenant_id if only tenant_name is supplied
self._reload_connection()
+
if not self.my_tenant_id:
- raise vimconn.VimConnConnectionException("Error getting tenant information from name={} id={}".
- format(self.tenant_name, self.tenant_id))
- if self.config.get('security_groups') and not self.security_groups_id:
+ raise vimconn.VimConnConnectionException(
+ "Error getting tenant information from name={} id={}".format(
+ self.tenant_name, self.tenant_id
+ )
+ )
+
+ if self.config.get("security_groups") and not self.security_groups_id:
# convert from name to id
- neutron_sg_list = self.neutron.list_security_groups(tenant_id=self.my_tenant_id)["security_groups"]
+ neutron_sg_list = self.neutron.list_security_groups(
+ tenant_id=self.my_tenant_id
+ )["security_groups"]
self.security_groups_id = []
- for sg in self.config.get('security_groups'):
+ for sg in self.config.get("security_groups"):
for neutron_sg in neutron_sg_list:
if sg in (neutron_sg["id"], neutron_sg["name"]):
self.security_groups_id.append(neutron_sg["id"])
break
else:
self.security_groups_id = None
- raise vimconn.VimConnConnectionException("Not found security group {} for this tenant".format(sg))
+
+ raise vimconn.VimConnConnectionException(
+ "Not found security group {} for this tenant".format(sg)
+ )
def check_vim_connectivity(self):
# just get network list to check connectivity and credentials
Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
"""
self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
+
try:
self._reload_connection()
+
if self.api_version3:
- project_class_list = self.keystone.projects.list(name=filter_dict.get("name"))
+ project_class_list = self.keystone.projects.list(
+ name=filter_dict.get("name")
+ )
else:
project_class_list = self.keystone.tenants.findall(**filter_dict)
+
project_list = []
+
for project in project_class_list:
- if filter_dict.get('id') and filter_dict["id"] != project.id:
+ if filter_dict.get("id") and filter_dict["id"] != project.id:
continue
+
project_list.append(project.to_dict())
+
return project_list
- except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e:
+ except (
+ ksExceptions.ConnectionError,
+ ksExceptions.ClientException,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
def new_tenant(self, tenant_name, tenant_description):
"""Adds a new tenant to openstack VIM. Returns the tenant identifier"""
self.logger.debug("Adding a new tenant name: %s", tenant_name)
+
try:
self._reload_connection()
+
if self.api_version3:
- project = self.keystone.projects.create(tenant_name, self.config.get("project_domain_id", "default"),
- description=tenant_description, is_domain=False)
+ project = self.keystone.projects.create(
+ tenant_name,
+ self.config.get("project_domain_id", "default"),
+ description=tenant_description,
+ is_domain=False,
+ )
else:
project = self.keystone.tenants.create(tenant_name, tenant_description)
+
return project.id
- except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.BadRequest, ConnectionError)\
- as e:
+ except (
+ ksExceptions.ConnectionError,
+ ksExceptions.ClientException,
+ ksExceptions.BadRequest,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
def delete_tenant(self, tenant_id):
"""Delete a tenant from openstack VIM. Returns the old tenant identifier"""
self.logger.debug("Deleting tenant %s from VIM", tenant_id)
+
try:
self._reload_connection()
+
if self.api_version3:
self.keystone.projects.delete(tenant_id)
else:
self.keystone.tenants.delete(tenant_id)
+
return tenant_id
- except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.NotFound, ConnectionError)\
- as e:
+ except (
+ ksExceptions.ConnectionError,
+ ksExceptions.ClientException,
+ ksExceptions.NotFound,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
- def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None):
+ def new_network(
+ self,
+ net_name,
+ net_type,
+ ip_profile=None,
+ shared=False,
+ provider_network_profile=None,
+ ):
"""Adds a tenant network to VIM
Params:
'net_name': name of the network
Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
as not present.
"""
- self.logger.debug("Adding a new network to VIM name '%s', type '%s'", net_name, net_type)
+ self.logger.debug(
+ "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
+ )
# self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
try:
vlan = None
+
if provider_network_profile:
vlan = provider_network_profile.get("segmentation-id")
+
new_net = None
created_items = {}
self._reload_connection()
- network_dict = {'name': net_name, 'admin_state_up': True}
+ network_dict = {"name": net_name, "admin_state_up": True}
+
if net_type in ("data", "ptp"):
provider_physical_network = None
- if provider_network_profile and provider_network_profile.get("physical-network"):
- provider_physical_network = provider_network_profile.get("physical-network")
+
+ if provider_network_profile and provider_network_profile.get(
+ "physical-network"
+ ):
+ provider_physical_network = provider_network_profile.get(
+ "physical-network"
+ )
+
# provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
# or not declared, just ignore the checking
- if isinstance(self.config.get('dataplane_physical_net'), (tuple, list)) and \
- provider_physical_network not in self.config['dataplane_physical_net']:
+ if (
+ isinstance(
+ self.config.get("dataplane_physical_net"), (tuple, list)
+ )
+ and provider_physical_network
+ not in self.config["dataplane_physical_net"]
+ ):
raise vimconn.VimConnConflictException(
- "Invalid parameter 'provider-network:physical-network' for network creation. '{}' is not "
- "one of the declared list at VIM_config:dataplane_physical_net".format(
- provider_physical_network))
- if not provider_physical_network: # use the default dataplane_physical_net
- provider_physical_network = self.config.get('dataplane_physical_net')
+ "Invalid parameter 'provider-network:physical-network' "
+ "for network creation. '{}' is not one of the declared "
+ "list at VIM_config:dataplane_physical_net".format(
+ provider_physical_network
+ )
+ )
+
+ # use the default dataplane_physical_net
+ if not provider_physical_network:
+ provider_physical_network = self.config.get(
+ "dataplane_physical_net"
+ )
+
# if it is non empty list, use the first value. If it is a string use the value directly
- if isinstance(provider_physical_network, (tuple, list)) and provider_physical_network:
+ if (
+ isinstance(provider_physical_network, (tuple, list))
+ and provider_physical_network
+ ):
provider_physical_network = provider_physical_network[0]
if not provider_physical_network:
raise vimconn.VimConnConflictException(
- "missing information needed for underlay networks. Provide 'dataplane_physical_net' "
- "configuration at VIM or use the NS instantiation parameter 'provider-network.physical-network'"
- " for the VLD")
-
- if not self.config.get('multisegment_support'):
- network_dict["provider:physical_network"] = provider_physical_network
- if provider_network_profile and "network-type" in provider_network_profile:
- network_dict["provider:network_type"] = provider_network_profile["network-type"]
+ "missing information needed for underlay networks. Provide "
+ "'dataplane_physical_net' configuration at VIM or use the NS "
+ "instantiation parameter 'provider-network.physical-network'"
+ " for the VLD"
+ )
+
+ if not self.config.get("multisegment_support"):
+ network_dict[
+ "provider:physical_network"
+ ] = provider_physical_network
+
+ if (
+ provider_network_profile
+ and "network-type" in provider_network_profile
+ ):
+ network_dict[
+ "provider:network_type"
+ ] = provider_network_profile["network-type"]
else:
- network_dict["provider:network_type"] = self.config.get('dataplane_network_type', 'vlan')
+ network_dict["provider:network_type"] = self.config.get(
+ "dataplane_network_type", "vlan"
+ )
+
if vlan:
network_dict["provider:segmentation_id"] = vlan
else:
# Multi-segment case
segment_list = []
segment1_dict = {
- "provider:physical_network": '',
- "provider:network_type": 'vxlan'
+ "provider:physical_network": "",
+ "provider:network_type": "vxlan",
}
segment_list.append(segment1_dict)
segment2_dict = {
"provider:physical_network": provider_physical_network,
- "provider:network_type": "vlan"
+ "provider:network_type": "vlan",
}
+
if vlan:
segment2_dict["provider:segmentation_id"] = vlan
- elif self.config.get('multisegment_vlan_range'):
+ elif self.config.get("multisegment_vlan_range"):
vlanID = self._generate_multisegment_vlanID()
segment2_dict["provider:segmentation_id"] = vlanID
+
# else
# raise vimconn.VimConnConflictException(
- # "You must provide 'multisegment_vlan_range' at config dict before creating a multisegment
+ # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
# network")
segment_list.append(segment2_dict)
network_dict["segments"] = segment_list
# VIO Specific Changes. It needs a concrete VLAN
if self.vim_type == "VIO" and vlan is None:
- if self.config.get('dataplane_net_vlan_range') is None:
+ if self.config.get("dataplane_net_vlan_range") is None:
raise vimconn.VimConnConflictException(
- "You must provide 'dataplane_net_vlan_range' in format [start_ID - end_ID] at VIM_config "
- "for creating underlay networks")
+ "You must provide 'dataplane_net_vlan_range' in format "
+ "[start_ID - end_ID] at VIM_config for creating underlay "
+ "networks"
+ )
+
network_dict["provider:segmentation_id"] = self._generate_vlanID()
network_dict["shared"] = shared
+
if self.config.get("disable_network_port_security"):
network_dict["port_security_enabled"] = False
- new_net = self.neutron.create_network({'network': network_dict})
+
+ new_net = self.neutron.create_network({"network": network_dict})
# print new_net
# create subnetwork, even if there is no profile
+
if not ip_profile:
ip_profile = {}
- if not ip_profile.get('subnet_address'):
+
+ if not ip_profile.get("subnet_address"):
# Fake subnet is required
subnet_rand = random.randint(0, 255)
- ip_profile['subnet_address'] = "192.168.{}.0/24".format(subnet_rand)
- if 'ip_version' not in ip_profile:
- ip_profile['ip_version'] = "IPv4"
- subnet = {"name": net_name+"-subnet",
- "network_id": new_net["network"]["id"],
- "ip_version": 4 if ip_profile['ip_version'] == "IPv4" else 6,
- "cidr": ip_profile['subnet_address']
- }
+ ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
+
+ if "ip_version" not in ip_profile:
+ ip_profile["ip_version"] = "IPv4"
+
+ subnet = {
+ "name": net_name + "-subnet",
+ "network_id": new_net["network"]["id"],
+ "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
+ "cidr": ip_profile["subnet_address"],
+ }
+
# Gateway should be set to None if not needed. Otherwise openstack assigns one by default
- if ip_profile.get('gateway_address'):
- subnet['gateway_ip'] = ip_profile['gateway_address']
+ if ip_profile.get("gateway_address"):
+ subnet["gateway_ip"] = ip_profile["gateway_address"]
else:
- subnet['gateway_ip'] = None
- if ip_profile.get('dns_address'):
- subnet['dns_nameservers'] = ip_profile['dns_address'].split(";")
- if 'dhcp_enabled' in ip_profile:
- subnet['enable_dhcp'] = False if \
- ip_profile['dhcp_enabled'] == "false" or ip_profile['dhcp_enabled'] is False else True
- if ip_profile.get('dhcp_start_address'):
- subnet['allocation_pools'] = []
- subnet['allocation_pools'].append(dict())
- subnet['allocation_pools'][0]['start'] = ip_profile['dhcp_start_address']
- if ip_profile.get('dhcp_count'):
- # parts = ip_profile['dhcp_start_address'].split('.')
+ subnet["gateway_ip"] = None
+
+ if ip_profile.get("dns_address"):
+ subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
+
+ if "dhcp_enabled" in ip_profile:
+ subnet["enable_dhcp"] = (
+ False
+ if ip_profile["dhcp_enabled"] == "false"
+ or ip_profile["dhcp_enabled"] is False
+ else True
+ )
+
+ if ip_profile.get("dhcp_start_address"):
+ subnet["allocation_pools"] = []
+ subnet["allocation_pools"].append(dict())
+ subnet["allocation_pools"][0]["start"] = ip_profile[
+ "dhcp_start_address"
+ ]
+
+ if ip_profile.get("dhcp_count"):
+ # parts = ip_profile["dhcp_start_address"].split(".")
# ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
- ip_int = int(netaddr.IPAddress(ip_profile['dhcp_start_address']))
- ip_int += ip_profile['dhcp_count'] - 1
+ ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
+ ip_int += ip_profile["dhcp_count"] - 1
ip_str = str(netaddr.IPAddress(ip_int))
- subnet['allocation_pools'][0]['end'] = ip_str
+ subnet["allocation_pools"][0]["end"] = ip_str
+
# self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
self.neutron.create_subnet({"subnet": subnet})
- if net_type == "data" and self.config.get('multisegment_support'):
- if self.config.get('l2gw_support'):
+ if net_type == "data" and self.config.get("multisegment_support"):
+ if self.config.get("l2gw_support"):
l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
for l2gw in l2gw_list:
l2gw_conn = {
"network_id": new_net["network"]["id"],
"segmentation_id": str(vlanID),
}
- new_l2gw_conn = self.neutron.create_l2_gateway_connection({"l2_gateway_connection": l2gw_conn})
- created_items["l2gwconn:" + str(new_l2gw_conn["l2_gateway_connection"]["id"])] = True
+ new_l2gw_conn = self.neutron.create_l2_gateway_connection(
+ {"l2_gateway_connection": l2gw_conn}
+ )
+ created_items[
+ "l2gwconn:"
+ + str(new_l2gw_conn["l2_gateway_connection"]["id"])
+ ] = True
+
return new_net["network"]["id"], created_items
except Exception as e:
# delete l2gw connections (if any) before deleting the network
for k, v in created_items.items():
if not v: # skip already deleted
continue
+
try:
k_item, _, k_id = k.partition(":")
+
if k_item == "l2gwconn":
self.neutron.delete_l2_gateway_connection(k_id)
except Exception as e2:
- self.logger.error("Error deleting l2 gateway connection: {}: {}".format(type(e2).__name__, e2))
+ self.logger.error(
+ "Error deleting l2 gateway connection: {}: {}".format(
+ type(e2).__name__, e2
+ )
+ )
+
if new_net:
- self.neutron.delete_network(new_net['network']['id'])
+ self.neutron.delete_network(new_net["network"]["id"])
+
self._format_exception(e)
def get_network_list(self, filter_dict={}):
Returns the network list of dictionaries
"""
self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
+
try:
self._reload_connection()
filter_dict_os = filter_dict.copy()
+
if self.api_version3 and "tenant_id" in filter_dict_os:
- filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id') # TODO check
+ # TODO check
+ filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+
net_dict = self.neutron.list_networks(**filter_dict_os)
net_list = net_dict["networks"]
self.__net_os2mano(net_list)
+
return net_list
- except (neExceptions.ConnectionFailed, ksExceptions.ClientException, neExceptions.NeutronException,
- ConnectionError) as e:
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
def get_network(self, net_id):
self.logger.debug(" Getting tenant network %s from VIM", net_id)
filter_dict = {"id": net_id}
net_list = self.get_network_list(filter_dict)
+
if len(net_list) == 0:
- raise vimconn.VimConnNotFoundException("Network '{}' not found".format(net_id))
+ raise vimconn.VimConnNotFoundException(
+ "Network '{}' not found".format(net_id)
+ )
elif len(net_list) > 1:
- raise vimconn.VimConnConflictException("Found more than one network with this criteria")
+ raise vimconn.VimConnConflictException(
+ "Found more than one network with this criteria"
+ )
+
net = net_list[0]
subnets = []
for subnet_id in net.get("subnets", ()):
try:
subnet = self.neutron.show_subnet(subnet_id)
except Exception as e:
- self.logger.error("osconnector.get_network(): Error getting subnet %s %s" % (net_id, str(e)))
+ self.logger.error(
+ "osconnector.get_network(): Error getting subnet %s %s"
+ % (net_id, str(e))
+ )
subnet = {"id": subnet_id, "fault": str(e)}
+
subnets.append(subnet)
+
net["subnets"] = subnets
- net["encapsulation"] = net.get('provider:network_type')
- net["encapsulation_type"] = net.get('provider:network_type')
- net["segmentation_id"] = net.get('provider:segmentation_id')
- net["encapsulation_id"] = net.get('provider:segmentation_id')
+ net["encapsulation"] = net.get("provider:network_type")
+ net["encapsulation_type"] = net.get("provider:network_type")
+ net["segmentation_id"] = net.get("provider:segmentation_id")
+ net["encapsulation_id"] = net.get("provider:segmentation_id")
+
return net
def delete_network(self, net_id, created_items=None):
Returns the network identifier or raises an exception upon error or when network is not found
"""
self.logger.debug("Deleting network '%s' from VIM", net_id)
+
if created_items is None:
created_items = {}
+
try:
self._reload_connection()
# delete l2gw connections (if any) before deleting the network
for k, v in created_items.items():
if not v: # skip already deleted
continue
+
try:
k_item, _, k_id = k.partition(":")
if k_item == "l2gwconn":
self.neutron.delete_l2_gateway_connection(k_id)
except Exception as e:
- self.logger.error("Error deleting l2 gateway connection: {}: {}".format(type(e).__name__, e))
+ self.logger.error(
+ "Error deleting l2 gateway connection: {}: {}".format(
+ type(e).__name__, e
+ )
+ )
+
# delete VM ports attached to this networks before the network
ports = self.neutron.list_ports(network_id=net_id)
- for p in ports['ports']:
+ for p in ports["ports"]:
try:
self.neutron.delete_port(p["id"])
except Exception as e:
self.logger.error("Error deleting port %s: %s", p["id"], str(e))
+
self.neutron.delete_network(net_id)
+
return net_id
- except (neExceptions.ConnectionFailed, neExceptions.NetworkNotFoundClient, neExceptions.NeutronException,
- ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e:
+ except (
+ neExceptions.ConnectionFailed,
+ neExceptions.NetworkNotFoundClient,
+ neExceptions.NeutronException,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
def refresh_nets_status(self, net_list):
"""Get the status of the networks
- Params: the list of network identifiers
- Returns a dictionary with:
- net_id: #VIM id of this network
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE, INACTIVE, DOWN (admin down),
- # BUILD (on building process)
- #
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
-
+ Params: the list of network identifiers
+ Returns a dictionary with:
+ net_id: #VIM id of this network
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE, INACTIVE, DOWN (admin down),
+ # BUILD (on building process)
+ #
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
"""
net_dict = {}
+
for net_id in net_list:
net = {}
+
try:
net_vim = self.get_network(net_id)
- if net_vim['status'] in netStatus2manoFormat:
- net["status"] = netStatus2manoFormat[net_vim['status']]
+
+ if net_vim["status"] in netStatus2manoFormat:
+ net["status"] = netStatus2manoFormat[net_vim["status"]]
else:
net["status"] = "OTHER"
- net["error_msg"] = "VIM status reported " + net_vim['status']
+ net["error_msg"] = "VIM status reported " + net_vim["status"]
- if net['status'] == "ACTIVE" and not net_vim['admin_state_up']:
- net['status'] = 'DOWN'
+ if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
+ net["status"] = "DOWN"
- net['vim_info'] = self.serialize(net_vim)
+ net["vim_info"] = self.serialize(net_vim)
- if net_vim.get('fault'): # TODO
- net['error_msg'] = str(net_vim['fault'])
+ if net_vim.get("fault"): # TODO
+ net["error_msg"] = str(net_vim["fault"])
except vimconn.VimConnNotFoundException as e:
self.logger.error("Exception getting net status: %s", str(e))
- net['status'] = "DELETED"
- net['error_msg'] = str(e)
+ net["status"] = "DELETED"
+ net["error_msg"] = str(e)
except vimconn.VimConnException as e:
self.logger.error("Exception getting net status: %s", str(e))
- net['status'] = "VIM_ERROR"
- net['error_msg'] = str(e)
+ net["status"] = "VIM_ERROR"
+ net["error_msg"] = str(e)
net_dict[net_id] = net
return net_dict
def get_flavor(self, flavor_id):
"""Obtain flavor details from the VIM. Returns the flavor dict details"""
self.logger.debug("Getting flavor '%s'", flavor_id)
+
try:
self._reload_connection()
flavor = self.nova.flavors.find(id=flavor_id)
# TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+
return flavor.to_dict()
- except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException,
- ConnectionError) as e:
+ except (
+ nvExceptions.NotFound,
+ nvExceptions.ClientException,
+ ksExceptions.ClientException,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
def get_flavor_id_from_data(self, flavor_dict):
"""Obtain flavor id that match the flavor description
- Returns the flavor_id or raises a vimconnNotFoundException
- flavor_dict: contains the required ram, vcpus, disk
- If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
- and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
- vimconnNotFoundException is raised
+ Returns the flavor_id or raises a vimconnNotFoundException
+ flavor_dict: contains the required ram, vcpus, disk
+ If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
+ and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
+ vimconnNotFoundException is raised
"""
- exact_match = False if self.config.get('use_existing_flavors') else True
+ exact_match = False if self.config.get("use_existing_flavors") else True
+
try:
self._reload_connection()
flavor_candidate_id = None
flavor_candidate_data = (10000, 10000, 10000)
- flavor_target = (flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"])
+ flavor_target = (
+ flavor_dict["ram"],
+ flavor_dict["vcpus"],
+ flavor_dict["disk"],
+ )
# numa=None
extended = flavor_dict.get("extended", {})
if extended:
# TODO
- raise vimconn.VimConnNotFoundException("Flavor with EPA still not implemented")
+ raise vimconn.VimConnNotFoundException(
+ "Flavor with EPA still not implemented"
+ )
# if len(numas) > 1:
# raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
# numa=numas[0]
# numas = extended.get("numas")
for flavor in self.nova.flavors.list():
epa = flavor.get_keys()
+
if epa:
continue
# TODO
+
flavor_data = (flavor.ram, flavor.vcpus, flavor.disk)
if flavor_data == flavor_target:
return flavor.id
- elif not exact_match and flavor_target < flavor_data < flavor_candidate_data:
+ elif (
+ not exact_match
+ and flavor_target < flavor_data < flavor_candidate_data
+ ):
flavor_candidate_id = flavor.id
flavor_candidate_data = flavor_data
+
if not exact_match and flavor_candidate_id:
return flavor_candidate_id
- raise vimconn.VimConnNotFoundException("Cannot find any flavor matching '{}'".format(flavor_dict))
- except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException,
- ConnectionError) as e:
+
+ raise vimconn.VimConnNotFoundException(
+ "Cannot find any flavor matching '{}'".format(flavor_dict)
+ )
+ except (
+ nvExceptions.NotFound,
+ nvExceptions.ClientException,
+ ksExceptions.ClientException,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
def process_resource_quota(self, quota, prefix, extra_specs):
:param extra_specs:
:return:
"""
- if 'limit' in quota:
- extra_specs["quota:" + prefix + "_limit"] = quota['limit']
- if 'reserve' in quota:
- extra_specs["quota:" + prefix + "_reservation"] = quota['reserve']
- if 'shares' in quota:
+ if "limit" in quota:
+ extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
+
+ if "reserve" in quota:
+ extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
+
+ if "shares" in quota:
extra_specs["quota:" + prefix + "_shares_level"] = "custom"
- extra_specs["quota:" + prefix + "_shares_share"] = quota['shares']
+ extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
def new_flavor(self, flavor_data, change_name_if_used=True):
"""Adds a tenant flavor to openstack VIM
retry = 0
max_retries = 3
name_suffix = 0
+
try:
- name = flavor_data['name']
+ name = flavor_data["name"]
while retry < max_retries:
retry += 1
try:
self._reload_connection()
+
if change_name_if_used:
# get used names
fl_names = []
fl = self.nova.flavors.list()
+
for f in fl:
fl_names.append(f.name)
+
while name in fl_names:
name_suffix += 1
- name = flavor_data['name']+"-" + str(name_suffix)
+ name = flavor_data["name"] + "-" + str(name_suffix)
- ram = flavor_data.get('ram', 64)
- vcpus = flavor_data.get('vcpus', 1)
+ ram = flavor_data.get("ram", 64)
+ vcpus = flavor_data.get("vcpus", 1)
extra_specs = {}
extended = flavor_data.get("extended")
if extended:
numas = extended.get("numas")
+
if numas:
numa_nodes = len(numas)
+
if numa_nodes > 1:
return -1, "Can not add flavor with more than one numa"
+
extra_specs["hw:numa_nodes"] = str(numa_nodes)
extra_specs["hw:mem_page_size"] = "large"
extra_specs["hw:cpu_policy"] = "dedicated"
extra_specs["hw:numa_mempolicy"] = "strict"
+
if self.vim_type == "VIO":
- extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
+ extra_specs[
+ "vmware:extra_config"
+ ] = '{"numa.nodeAffinity":"0"}'
extra_specs["vmware:latency_sensitivity_level"] = "high"
+
for numa in numas:
# overwrite ram and vcpus
- # check if key 'memory' is present in numa else use ram value at flavor
- if 'memory' in numa:
- ram = numa['memory']*1024
+ # check if key "memory" is present in numa else use ram value at flavor
+ if "memory" in numa:
+ ram = numa["memory"] * 1024
# See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/
# implemented/virt-driver-cpu-thread-pinning.html
extra_specs["hw:cpu_sockets"] = 1
- if 'paired-threads' in numa:
- vcpus = numa['paired-threads']*2
- # cpu_thread_policy "require" implies that the compute node must have an
+
+ if "paired-threads" in numa:
+ vcpus = numa["paired-threads"] * 2
+ # cpu_thread_policy "require" implies that the compute node must have an
# STM architecture
extra_specs["hw:cpu_thread_policy"] = "require"
extra_specs["hw:cpu_policy"] = "dedicated"
- elif 'cores' in numa:
- vcpus = numa['cores']
- # cpu_thread_policy "prefer" implies that the host must not have an SMT
+ elif "cores" in numa:
+ vcpus = numa["cores"]
+ # cpu_thread_policy "prefer" implies that the host must not have an SMT
# architecture, or a non-SMT architecture will be emulated
extra_specs["hw:cpu_thread_policy"] = "isolate"
extra_specs["hw:cpu_policy"] = "dedicated"
- elif 'threads' in numa:
- vcpus = numa['threads']
+ elif "threads" in numa:
+ vcpus = numa["threads"]
# cpu_thread_policy "prefer" implies that the host may or may not have an SMT
# architecture
extra_specs["hw:cpu_thread_policy"] = "prefer"
# if interface["dedicated"]=="yes":
# raise vimconn.VimConnException("Passthrough interfaces are not supported
# for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
- # #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"'
+ # #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"'
# when a way to connect it is available
elif extended.get("cpu-quota"):
- self.process_resource_quota(extended.get("cpu-quota"), "cpu", extra_specs)
+ self.process_resource_quota(
+ extended.get("cpu-quota"), "cpu", extra_specs
+ )
+
if extended.get("mem-quota"):
- self.process_resource_quota(extended.get("mem-quota"), "memory", extra_specs)
+ self.process_resource_quota(
+ extended.get("mem-quota"), "memory", extra_specs
+ )
+
if extended.get("vif-quota"):
- self.process_resource_quota(extended.get("vif-quota"), "vif", extra_specs)
+ self.process_resource_quota(
+ extended.get("vif-quota"), "vif", extra_specs
+ )
+
if extended.get("disk-io-quota"):
- self.process_resource_quota(extended.get("disk-io-quota"), "disk_io", extra_specs)
+ self.process_resource_quota(
+ extended.get("disk-io-quota"), "disk_io", extra_specs
+ )
+
# create flavor
- new_flavor = self.nova.flavors.create(name,
- ram,
- vcpus,
- flavor_data.get('disk', 0),
- is_public=flavor_data.get('is_public', True)
- )
+ new_flavor = self.nova.flavors.create(
+ name,
+ ram,
+ vcpus,
+ flavor_data.get("disk", 0),
+ is_public=flavor_data.get("is_public", True),
+ )
# add metadata
if extra_specs:
new_flavor.set_keys(extra_specs)
+
return new_flavor.id
except nvExceptions.Conflict as e:
if change_name_if_used and retry < max_retries:
continue
+
self._format_exception(e)
# except nvExceptions.BadRequest as e:
- except (ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError, KeyError) as e:
+ except (
+ ksExceptions.ClientException,
+ nvExceptions.ClientException,
+ ConnectionError,
+ KeyError,
+ ) as e:
self._format_exception(e)
def delete_flavor(self, flavor_id):
- """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id
- """
+ """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
try:
self._reload_connection()
self.nova.flavors.delete(flavor_id)
+
return flavor_id
# except nvExceptions.BadRequest as e:
- except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException,
- ConnectionError) as e:
+ except (
+ nvExceptions.NotFound,
+ ksExceptions.ClientException,
+ nvExceptions.ClientException,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
def new_image(self, image_dict):
"""
retry = 0
max_retries = 3
+
while retry < max_retries:
retry += 1
try:
self._reload_connection()
+
# determine format http://docs.openstack.org/developer/glance/formats.html
if "disk_format" in image_dict:
disk_format = image_dict["disk_format"]
else: # autodiscover based on extension
- if image_dict['location'].endswith(".qcow2"):
+ if image_dict["location"].endswith(".qcow2"):
disk_format = "qcow2"
- elif image_dict['location'].endswith(".vhd"):
+ elif image_dict["location"].endswith(".vhd"):
disk_format = "vhd"
- elif image_dict['location'].endswith(".vmdk"):
+ elif image_dict["location"].endswith(".vmdk"):
disk_format = "vmdk"
- elif image_dict['location'].endswith(".vdi"):
+ elif image_dict["location"].endswith(".vdi"):
disk_format = "vdi"
- elif image_dict['location'].endswith(".iso"):
+ elif image_dict["location"].endswith(".iso"):
disk_format = "iso"
- elif image_dict['location'].endswith(".aki"):
+ elif image_dict["location"].endswith(".aki"):
disk_format = "aki"
- elif image_dict['location'].endswith(".ari"):
+ elif image_dict["location"].endswith(".ari"):
disk_format = "ari"
- elif image_dict['location'].endswith(".ami"):
+ elif image_dict["location"].endswith(".ami"):
disk_format = "ami"
else:
disk_format = "raw"
- self.logger.debug("new_image: '%s' loading from '%s'", image_dict['name'], image_dict['location'])
+
+ self.logger.debug(
+ "new_image: '%s' loading from '%s'",
+ image_dict["name"],
+ image_dict["location"],
+ )
if self.vim_type == "VIO":
container_format = "bare"
- if 'container_format' in image_dict:
- container_format = image_dict['container_format']
- new_image = self.glance.images.create(name=image_dict['name'], container_format=container_format,
- disk_format=disk_format)
+ if "container_format" in image_dict:
+ container_format = image_dict["container_format"]
+
+ new_image = self.glance.images.create(
+ name=image_dict["name"],
+ container_format=container_format,
+ disk_format=disk_format,
+ )
else:
- new_image = self.glance.images.create(name=image_dict['name'])
- if image_dict['location'].startswith("http"):
+ new_image = self.glance.images.create(name=image_dict["name"])
+
+ if image_dict["location"].startswith("http"):
# TODO there is not a method to direct download. It must be downloaded locally with requests
raise vimconn.VimConnNotImplemented("Cannot create image from URL")
else: # local path
- with open(image_dict['location']) as fimage:
+ with open(image_dict["location"]) as fimage:
self.glance.images.upload(new_image.id, fimage)
- # new_image = self.glancev1.images.create(name=image_dict['name'], is_public=
- # image_dict.get('public',"yes")=="yes",
+ # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
+ # image_dict.get("public","yes")=="yes",
# container_format="bare", data=fimage, disk_format=disk_format)
- metadata_to_load = image_dict.get('metadata')
- # TODO location is a reserved word for current openstack versions. fixed for VIO please check
+
+ metadata_to_load = image_dict.get("metadata")
+
+ # TODO location is a reserved word for current openstack versions. fixed for VIO please check
# for openstack
if self.vim_type == "VIO":
- metadata_to_load['upload_location'] = image_dict['location']
+ metadata_to_load["upload_location"] = image_dict["location"]
else:
- metadata_to_load['location'] = image_dict['location']
+ metadata_to_load["location"] = image_dict["location"]
+
self.glance.images.update(new_image.id, **metadata_to_load)
+
return new_image.id
- except (nvExceptions.Conflict, ksExceptions.ClientException, nvExceptions.ClientException) as e:
+ except (
+ nvExceptions.Conflict,
+ ksExceptions.ClientException,
+ nvExceptions.ClientException,
+ ) as e:
self._format_exception(e)
- except (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError, ConnectionError) as e:
+ except (
+ HTTPException,
+ gl1Exceptions.HTTPException,
+ gl1Exceptions.CommunicationError,
+ ConnectionError,
+ ) as e:
if retry == max_retries:
continue
+
self._format_exception(e)
except IOError as e: # can not open the file
- raise vimconn.VimConnConnectionException("{}: {} for {}".format(type(e).__name__, e,
- image_dict['location']),
- http_code=vimconn.HTTP_Bad_Request)
+ raise vimconn.VimConnConnectionException(
+ "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
def delete_image(self, image_id):
- """Deletes a tenant image from openstack VIM. Returns the old id
- """
+ """Deletes a tenant image from openstack VIM. Returns the old id"""
try:
self._reload_connection()
self.glance.images.delete(image_id)
+
return image_id
- except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException,
- gl1Exceptions.CommunicationError, gl1Exceptions.HTTPNotFound, ConnectionError) as e: # TODO remove
+ except (
+ nvExceptions.NotFound,
+ ksExceptions.ClientException,
+ nvExceptions.ClientException,
+ gl1Exceptions.CommunicationError,
+ gl1Exceptions.HTTPNotFound,
+ ConnectionError,
+ ) as e: # TODO remove
self._format_exception(e)
def get_image_id_from_path(self, path):
try:
self._reload_connection()
images = self.glance.images.list()
+
for image in images:
if image.metadata.get("location") == path:
return image.id
- raise vimconn.VimConnNotFoundException("image with location '{}' not found".format(path))
- except (ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError,
- ConnectionError) as e:
+
+ raise vimconn.VimConnNotFoundException(
+ "image with location '{}' not found".format(path)
+ )
+ except (
+ ksExceptions.ClientException,
+ nvExceptions.ClientException,
+ gl1Exceptions.CommunicationError,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
def get_image_list(self, filter_dict={}):
List can be empty
"""
self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
+
try:
self._reload_connection()
# filter_dict_os = filter_dict.copy()
# First we filter by the available filter fields: name, id. The others are removed.
image_list = self.glance.images.list()
filtered_list = []
+
for image in image_list:
try:
if filter_dict.get("name") and image["name"] != filter_dict["name"]:
continue
+
if filter_dict.get("id") and image["id"] != filter_dict["id"]:
continue
- if filter_dict.get("checksum") and image["checksum"] != filter_dict["checksum"]:
+
+ if (
+ filter_dict.get("checksum")
+ and image["checksum"] != filter_dict["checksum"]
+ ):
continue
filtered_list.append(image.copy())
except gl1Exceptions.HTTPNotFound:
pass
+
return filtered_list
- except (ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError,
- ConnectionError) as e:
+ except (
+ ksExceptions.ClientException,
+ nvExceptions.ClientException,
+ gl1Exceptions.CommunicationError,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
def __wait_for_vm(self, vm_id, status):
elapsed_time = 0
while elapsed_time < server_timeout:
vm_status = self.nova.servers.get(vm_id).status
+
if vm_status == status:
return True
- if vm_status == 'ERROR':
+
+ if vm_status == "ERROR":
return False
+
time.sleep(5)
elapsed_time += 5
# if we exceeded the timeout rollback
if elapsed_time >= server_timeout:
- raise vimconn.VimConnException('Timeout waiting for instance ' + vm_id + ' to get ' + status,
- http_code=vimconn.HTTP_Request_Timeout)
+ raise vimconn.VimConnException(
+ "Timeout waiting for instance " + vm_id + " to get " + status,
+ http_code=vimconn.HTTP_Request_Timeout,
+ )
def _get_openstack_availablity_zones(self):
"""
"""
try:
openstack_availability_zone = self.nova.availability_zones.list()
- openstack_availability_zone = [str(zone.zoneName) for zone in openstack_availability_zone
- if zone.zoneName != 'internal']
+ openstack_availability_zone = [
+ str(zone.zoneName)
+ for zone in openstack_availability_zone
+ if zone.zoneName != "internal"
+ ]
+
return openstack_availability_zone
except Exception:
return None
Set vim availablity zone
:return:
"""
+ if "availability_zone" in self.config:
+ vim_availability_zones = self.config.get("availability_zone")
- if 'availability_zone' in self.config:
- vim_availability_zones = self.config.get('availability_zone')
if isinstance(vim_availability_zones, str):
self.availability_zone = [vim_availability_zones]
elif isinstance(vim_availability_zones, list):
else:
self.availability_zone = self._get_openstack_availablity_zones()
- def _get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
+ def _get_vm_availability_zone(
+ self, availability_zone_index, availability_zone_list
+ ):
"""
Return thge availability zone to be used by the created VM.
:return: The VIM availability zone to be used or None
"""
if availability_zone_index is None:
- if not self.config.get('availability_zone'):
+ if not self.config.get("availability_zone"):
return None
- elif isinstance(self.config.get('availability_zone'), str):
- return self.config['availability_zone']
+ elif isinstance(self.config.get("availability_zone"), str):
+ return self.config["availability_zone"]
else:
# TODO consider using a different parameter at config for default AV and AV list match
- return self.config['availability_zone'][0]
+ return self.config["availability_zone"][0]
vim_availability_zones = self.availability_zone
# check if VIM offer enough availability zones describe in the VNFD
- if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
+ if vim_availability_zones and len(availability_zone_list) <= len(
+ vim_availability_zones
+ ):
# check if all the names of NFV AV match VIM AV names
match_by_index = False
for av in availability_zone_list:
if av not in vim_availability_zones:
match_by_index = True
break
+
if match_by_index:
return vim_availability_zones[availability_zone_index]
else:
return availability_zone_list[availability_zone_index]
else:
- raise vimconn.VimConnConflictException("No enough availability zones at VIM for this deployment")
-
- def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
- availability_zone_index=None, availability_zone_list=None):
+ raise vimconn.VimConnConflictException(
+ "No enough availability zones at VIM for this deployment"
+ )
+
+ def new_vminstance(
+ self,
+ name,
+ description,
+ start,
+ image_id,
+ flavor_id,
+ net_list,
+ cloud_config=None,
+ disk_list=None,
+ availability_zone_index=None,
+ availability_zone_list=None,
+ ):
"""Adds a VM instance to VIM
Params:
start: indicates if VM must start or boot in pause mode. Ignored
Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
as not present.
"""
- self.logger.debug("new_vminstance input: image='%s' flavor='%s' nics='%s'", image_id, flavor_id, str(net_list))
+ self.logger.debug(
+ "new_vminstance input: image='%s' flavor='%s' nics='%s'",
+ image_id,
+ flavor_id,
+ str(net_list),
+ )
+
try:
server = None
created_items = {}
net_list_vim = []
external_network = []
# ^list of external networks to be connected to instance, later on used to create floating_ip
- no_secured_ports = [] # List of port-is with port-security disabled
+ no_secured_ports = [] # List of port-is with port-security disabled
self._reload_connection()
- # metadata_vpci = {} # For a specific neutron plugin
+ # metadata_vpci = {} # For a specific neutron plugin
block_device_mapping = None
for net in net_list:
- if not net.get("net_id"): # skip non connected iface
+ if not net.get("net_id"): # skip non connected iface
continue
port_dict = {
"network_id": net["net_id"],
"name": net.get("name"),
- "admin_state_up": True
+ "admin_state_up": True,
}
- if self.config.get("security_groups") and net.get("port_security") is not False and \
- not self.config.get("no_port_security_extension"):
+
+ if (
+ self.config.get("security_groups")
+ and net.get("port_security") is not False
+ and not self.config.get("no_port_security_extension")
+ ):
if not self.security_groups_id:
self._get_ids_from_name()
+
port_dict["security_groups"] = self.security_groups_id
if net["type"] == "virtual":
# metadata_vpci["VF"]=[]
# metadata_vpci["VF"].append([ net["vpci"], "" ])
port_dict["binding:vnic_type"] = "direct"
+
# VIO specific Changes
if self.vim_type == "VIO":
# Need to create port with port_security_enabled = False and no-security-groups
port_dict["port_security_enabled"] = False
port_dict["provider_security_groups"] = []
port_dict["security_groups"] = []
- else: # For PT PCI-PASSTHROUGH
+ else: # For PT PCI-PASSTHROUGH
# if "vpci" in net:
# if "PF" not in metadata_vpci:
# metadata_vpci["PF"]=[]
# metadata_vpci["PF"].append([ net["vpci"], "" ])
port_dict["binding:vnic_type"] = "direct-physical"
+
if not port_dict["name"]:
port_dict["name"] = name
+
if net.get("mac_address"):
port_dict["mac_address"] = net["mac_address"]
+
if net.get("ip_address"):
- port_dict["fixed_ips"] = [{'ip_address': net["ip_address"]}]
- # TODO add 'subnet_id': <subnet_id>
+ port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
+ # TODO add "subnet_id": <subnet_id>
+
new_port = self.neutron.create_port({"port": port_dict})
created_items["port:" + str(new_port["port"]["id"])] = True
net["mac_adress"] = new_port["port"]["mac_address"]
net["vim_id"] = new_port["port"]["id"]
# if try to use a network without subnetwork, it will return a emtpy list
fixed_ips = new_port["port"].get("fixed_ips")
+
if fixed_ips:
net["ip"] = fixed_ips[0].get("ip_address")
else:
port = {"port-id": new_port["port"]["id"]}
if float(self.nova.api_version.get_string()) >= 2.32:
port["tag"] = new_port["port"]["name"]
+
net_list_vim.append(port)
- if net.get('floating_ip', False):
- net['exit_on_floating_ip_error'] = True
+ if net.get("floating_ip", False):
+ net["exit_on_floating_ip_error"] = True
external_network.append(net)
- elif net['use'] == 'mgmt' and self.config.get('use_floating_ip'):
- net['exit_on_floating_ip_error'] = False
+ elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
+ net["exit_on_floating_ip_error"] = False
external_network.append(net)
- net['floating_ip'] = self.config.get('use_floating_ip')
+ net["floating_ip"] = self.config.get("use_floating_ip")
# If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
# is dropped.
# As a workaround we wait until the VM is active and then disable the port-security
- if net.get("port_security") is False and not self.config.get("no_port_security_extension"):
- no_secured_ports.append((new_port["port"]["id"], net.get("port_security_disable_strategy")))
+ if net.get("port_security") is False and not self.config.get(
+ "no_port_security_extension"
+ ):
+ no_secured_ports.append(
+ (
+ new_port["port"]["id"],
+ net.get("port_security_disable_strategy"),
+ )
+ )
# if metadata_vpci:
# metadata = {"pci_assignement": json.dumps(metadata_vpci)}
# self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
# metadata = {}
- self.logger.debug("name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
- name, image_id, flavor_id, str(net_list_vim), description)
+ self.logger.debug(
+ "name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
+ name,
+ image_id,
+ flavor_id,
+ str(net_list_vim),
+ description,
+ )
# cloud config
config_drive, userdata = self._create_user_data(cloud_config)
# Create additional volumes in case these are present in disk_list
- base_disk_index = ord('b')
+ base_disk_index = ord("b")
if disk_list:
block_device_mapping = {}
for disk in disk_list:
- if disk.get('vim_id'):
- block_device_mapping['_vd' + chr(base_disk_index)] = disk['vim_id']
+ if disk.get("vim_id"):
+ block_device_mapping["_vd" + chr(base_disk_index)] = disk[
+ "vim_id"
+ ]
else:
- if 'image_id' in disk:
- volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' +
- chr(base_disk_index), imageRef=disk['image_id'])
+ if "image_id" in disk:
+ volume = self.cinder.volumes.create(
+ size=disk["size"],
+ name=name + "_vd" + chr(base_disk_index),
+ imageRef=disk["image_id"],
+ )
else:
- volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' +
- chr(base_disk_index))
+ volume = self.cinder.volumes.create(
+ size=disk["size"],
+ name=name + "_vd" + chr(base_disk_index),
+ )
+
created_items["volume:" + str(volume.id)] = True
- block_device_mapping['_vd' + chr(base_disk_index)] = volume.id
+ block_device_mapping["_vd" + chr(base_disk_index)] = volume.id
+
base_disk_index += 1
# Wait until created volumes are with status available
while elapsed_time < volume_timeout:
for created_item in created_items:
v, _, volume_id = created_item.partition(":")
- if v == 'volume':
- if self.cinder.volumes.get(volume_id).status != 'available':
+ if v == "volume":
+ if self.cinder.volumes.get(volume_id).status != "available":
break
else: # all ready: break from while
break
+
time.sleep(5)
elapsed_time += 5
+
# If we exceeded the timeout rollback
if elapsed_time >= volume_timeout:
- raise vimconn.VimConnException('Timeout creating volumes for instance ' + name,
- http_code=vimconn.HTTP_Request_Timeout)
+ raise vimconn.VimConnException(
+ "Timeout creating volumes for instance " + name,
+ http_code=vimconn.HTTP_Request_Timeout,
+ )
+
# get availability Zone
- vm_av_zone = self._get_vm_availability_zone(availability_zone_index, availability_zone_list)
-
- self.logger.debug("nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
- "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
- "block_device_mapping={})".format(name, image_id, flavor_id, net_list_vim,
- self.config.get("security_groups"), vm_av_zone,
- self.config.get('keypair'), userdata, config_drive,
- block_device_mapping))
- server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim,
- security_groups=self.config.get("security_groups"),
- # TODO remove security_groups in future versions. Already at neutron port
- availability_zone=vm_av_zone,
- key_name=self.config.get('keypair'),
- userdata=userdata,
- config_drive=config_drive,
- block_device_mapping=block_device_mapping
- ) # , description=description)
+ vm_av_zone = self._get_vm_availability_zone(
+ availability_zone_index, availability_zone_list
+ )
+
+ self.logger.debug(
+ "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
+ "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
+ "block_device_mapping={})".format(
+ name,
+ image_id,
+ flavor_id,
+ net_list_vim,
+ self.config.get("security_groups"),
+ vm_av_zone,
+ self.config.get("keypair"),
+ userdata,
+ config_drive,
+ block_device_mapping,
+ )
+ )
+ server = self.nova.servers.create(
+ name,
+ image_id,
+ flavor_id,
+ nics=net_list_vim,
+ security_groups=self.config.get("security_groups"),
+ # TODO remove security_groups in future versions. Already at neutron port
+ availability_zone=vm_av_zone,
+ key_name=self.config.get("keypair"),
+ userdata=userdata,
+ config_drive=config_drive,
+ block_device_mapping=block_device_mapping,
+ ) # , description=description)
vm_start_time = time.time()
# Previously mentioned workaround to wait until the VM is active and then disable the port-security
if no_secured_ports:
- self.__wait_for_vm(server.id, 'ACTIVE')
+ self.__wait_for_vm(server.id, "ACTIVE")
for port in no_secured_ports:
port_update = {
- "port": {
- "port_security_enabled": False,
- "security_groups": None
- }
+ "port": {"port_security_enabled": False, "security_groups": None}
}
if port[1] == "allow-address-pairs":
port_update = {
- "port": {
- "allowed_address_pairs": [
- {
- "ip_address": "0.0.0.0/0"
- }
- ]
- }
+ "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
}
try:
self.neutron.update_port(port[0], port_update)
except Exception:
raise vimconn.VimConnException(
- "It was not possible to disable port security for port {}"
- .format(port[0])
+ "It was not possible to disable port security for port {}".format(
+ port[0]
+ )
)
# print "DONE :-)", server
# In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
# several times
while not assigned:
- floating_ips = self.neutron.list_floatingips().get("floatingips", ())
- random.shuffle(floating_ips) # randomize
+ floating_ips = self.neutron.list_floatingips().get(
+ "floatingips", ()
+ )
+ random.shuffle(floating_ips) # randomize
for fip in floating_ips:
- if fip.get("port_id") or fip.get('tenant_id') != server.tenant_id:
+ if (
+ fip.get("port_id")
+ or fip.get("tenant_id") != server.tenant_id
+ ):
continue
- if isinstance(floating_network['floating_ip'], str):
- if fip.get("floating_network_id") != floating_network['floating_ip']:
+
+ if isinstance(floating_network["floating_ip"], str):
+ if (
+ fip.get("floating_network_id")
+ != floating_network["floating_ip"]
+ ):
continue
+
free_floating_ip = fip["id"]
break
else:
- if isinstance(floating_network['floating_ip'], str) and \
- floating_network['floating_ip'].lower() != "true":
- pool_id = floating_network['floating_ip']
+ if (
+ isinstance(floating_network["floating_ip"], str)
+ and floating_network["floating_ip"].lower() != "true"
+ ):
+ pool_id = floating_network["floating_ip"]
else:
# Find the external network
external_nets = list()
- for net in self.neutron.list_networks()['networks']:
- if net['router:external']:
+
+ for net in self.neutron.list_networks()["networks"]:
+ if net["router:external"]:
external_nets.append(net)
if len(external_nets) == 0:
raise vimconn.VimConnException(
- "Cannot create floating_ip automatically since no external network is present",
- http_code=vimconn.HTTP_Conflict)
+ "Cannot create floating_ip automatically since "
+ "no external network is present",
+ http_code=vimconn.HTTP_Conflict,
+ )
+
if len(external_nets) > 1:
raise vimconn.VimConnException(
- "Cannot create floating_ip automatically since multiple external networks are"
- " present", http_code=vimconn.HTTP_Conflict)
+ "Cannot create floating_ip automatically since "
+ "multiple external networks are present",
+ http_code=vimconn.HTTP_Conflict,
+ )
+
+ pool_id = external_nets[0].get("id")
+
+ param = {
+ "floatingip": {
+ "floating_network_id": pool_id,
+ "tenant_id": server.tenant_id,
+ }
+ }
- pool_id = external_nets[0].get('id')
- param = {'floatingip': {'floating_network_id': pool_id, 'tenant_id': server.tenant_id}}
try:
# self.logger.debug("Creating floating IP")
new_floating_ip = self.neutron.create_floatingip(param)
- free_floating_ip = new_floating_ip['floatingip']['id']
- created_items["floating_ip:" + str(free_floating_ip)] = True
+ free_floating_ip = new_floating_ip["floatingip"]["id"]
+ created_items[
+ "floating_ip:" + str(free_floating_ip)
+ ] = True
except Exception as e:
- raise vimconn.VimConnException(type(e).__name__ + ": Cannot create new floating_ip " +
- str(e), http_code=vimconn.HTTP_Conflict)
+ raise vimconn.VimConnException(
+ type(e).__name__
+ + ": Cannot create new floating_ip "
+ + str(e),
+ http_code=vimconn.HTTP_Conflict,
+ )
try:
# for race condition ensure not already assigned
fip = self.neutron.show_floatingip(free_floating_ip)
- if fip['floatingip']['port_id']:
+
+ if fip["floatingip"]["port_id"]:
continue
+
# the vim_id key contains the neutron.port_id
- self.neutron.update_floatingip(free_floating_ip,
- {"floatingip": {"port_id": floating_network["vim_id"]}})
+ self.neutron.update_floatingip(
+ free_floating_ip,
+ {"floatingip": {"port_id": floating_network["vim_id"]}},
+ )
# for race condition ensure not re-assigned to other VM after 5 seconds
time.sleep(5)
fip = self.neutron.show_floatingip(free_floating_ip)
- if fip['floatingip']['port_id'] != floating_network["vim_id"]:
- self.logger.error("floating_ip {} re-assigned to other port".format(free_floating_ip))
+
+ if (
+ fip["floatingip"]["port_id"]
+ != floating_network["vim_id"]
+ ):
+ self.logger.error(
+ "floating_ip {} re-assigned to other port".format(
+ free_floating_ip
+ )
+ )
continue
- self.logger.debug("Assigned floating_ip {} to VM {}".format(free_floating_ip, server.id))
+
+ self.logger.debug(
+ "Assigned floating_ip {} to VM {}".format(
+ free_floating_ip, server.id
+ )
+ )
assigned = True
except Exception as e:
# openstack need some time after VM creation to assign an IP. So retry if fails
vm_status = self.nova.servers.get(server.id).status
- if vm_status not in ('ACTIVE', 'ERROR'):
+
+ if vm_status not in ("ACTIVE", "ERROR"):
if time.time() - vm_start_time < server_timeout:
time.sleep(5)
continue
elif floating_ip_retries > 0:
floating_ip_retries -= 1
continue
+
raise vimconn.VimConnException(
- "Cannot create floating_ip: {} {}".format(type(e).__name__, e),
- http_code=vimconn.HTTP_Conflict)
+ "Cannot create floating_ip: {} {}".format(
+ type(e).__name__, e
+ ),
+ http_code=vimconn.HTTP_Conflict,
+ )
except Exception as e:
- if not floating_network['exit_on_floating_ip_error']:
+ if not floating_network["exit_on_floating_ip_error"]:
self.logger.error("Cannot create floating_ip. %s", str(e))
continue
+
raise
return server.id, created_items
server_id = None
if server:
server_id = server.id
+
try:
self.delete_vminstance(server_id, created_items)
except Exception as e2:
self._reload_connection()
server = self.nova.servers.find(id=vm_id)
# TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+
return server.to_dict()
- except (ksExceptions.ClientException, nvExceptions.ClientException, nvExceptions.NotFound,
- ConnectionError) as e:
+ except (
+ ksExceptions.ClientException,
+ nvExceptions.ClientException,
+ nvExceptions.NotFound,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
def get_vminstance_console(self, vm_id, console_type="vnc"):
suffix: extra text, e.g. the http path and query string
"""
self.logger.debug("Getting VM CONSOLE from VIM")
+
try:
self._reload_connection()
server = self.nova.servers.find(id=vm_id)
+
if console_type is None or console_type == "novnc":
console_dict = server.get_vnc_console("novnc")
elif console_type == "xvpvnc":
elif console_type == "spice-html5":
console_dict = server.get_spice_console(console_type)
else:
- raise vimconn.VimConnException("console type '{}' not allowed".format(console_type),
- http_code=vimconn.HTTP_Bad_Request)
+ raise vimconn.VimConnException(
+ "console type '{}' not allowed".format(console_type),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
console_dict1 = console_dict.get("console")
+
if console_dict1:
console_url = console_dict1.get("url")
+
if console_url:
# parse console_url
protocol_index = console_url.find("//")
- suffix_index = console_url[protocol_index+2:].find("/") + protocol_index+2
- port_index = console_url[protocol_index+2:suffix_index].find(":") + protocol_index+2
+ suffix_index = (
+ console_url[protocol_index + 2 :].find("/") + protocol_index + 2
+ )
+ port_index = (
+ console_url[protocol_index + 2 : suffix_index].find(":")
+ + protocol_index
+ + 2
+ )
+
if protocol_index < 0 or port_index < 0 or suffix_index < 0:
- return -vimconn.HTTP_Internal_Server_Error, "Unexpected response from VIM"
- console_dict = {"protocol": console_url[0:protocol_index],
- "server": console_url[protocol_index+2:port_index],
- "port": console_url[port_index:suffix_index],
- "suffix": console_url[suffix_index+1:]
- }
+ return (
+ -vimconn.HTTP_Internal_Server_Error,
+ "Unexpected response from VIM",
+ )
+
+ console_dict = {
+ "protocol": console_url[0:protocol_index],
+ "server": console_url[protocol_index + 2 : port_index],
+ "port": console_url[port_index:suffix_index],
+ "suffix": console_url[suffix_index + 1 :],
+ }
protocol_index += 2
+
return console_dict
raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
-
- except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException,
- nvExceptions.BadRequest, ConnectionError) as e:
+ except (
+ nvExceptions.NotFound,
+ ksExceptions.ClientException,
+ nvExceptions.ClientException,
+ nvExceptions.BadRequest,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
def delete_vminstance(self, vm_id, created_items=None):
- """Removes a VM instance from VIM. Returns the old identifier
- """
+ """Removes a VM instance from VIM. Returns the old identifier"""
# print "osconnector: Getting VM from VIM"
if created_items is None:
created_items = {}
+
try:
self._reload_connection()
# delete VM ports attached to this networks before the virtual machine
for k, v in created_items.items():
if not v: # skip already deleted
continue
+
try:
k_item, _, k_id = k.partition(":")
if k_item == "port":
self.neutron.delete_port(k_id)
except Exception as e:
- self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
+ self.logger.error(
+ "Error deleting port: {}: {}".format(type(e).__name__, e)
+ )
# #commented because detaching the volumes makes the servers.delete not work properly ?!?
# #dettach volumes attached
# server = self.nova.servers.get(vm_id)
- # volumes_attached_dict = server._info['os-extended-volumes:volumes_attached'] #volume['id']
+ # volumes_attached_dict = server._info["os-extended-volumes:volumes_attached"] #volume["id"]
# #for volume in volumes_attached_dict:
- # # self.cinder.volumes.detach(volume['id'])
+ # # self.cinder.volumes.detach(volume["id"])
if vm_id:
self.nova.servers.delete(vm_id)
# we ensure in this loop
keep_waiting = True
elapsed_time = 0
+
while keep_waiting and elapsed_time < volume_timeout:
keep_waiting = False
+
for k, v in created_items.items():
if not v: # skip already deleted
continue
+
try:
k_item, _, k_id = k.partition(":")
if k_item == "volume":
- if self.cinder.volumes.get(k_id).status != 'available':
+ if self.cinder.volumes.get(k_id).status != "available":
keep_waiting = True
else:
self.cinder.volumes.delete(k_id)
except Exception as e:
self.logger.error("Error deleting {}: {}".format(k, e))
+
if keep_waiting:
time.sleep(1)
elapsed_time += 1
+
return None
- except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException,
- ConnectionError) as e:
+ except (
+ nvExceptions.NotFound,
+ ksExceptions.ClientException,
+ nvExceptions.ClientException,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
def refresh_vms_status(self, vm_list):
"""Get the status of the virtual machines and their interfaces/ports
- Params: the list of VM identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this Virtual Machine
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
- # CREATING (on building process), ERROR
- # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
- #
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
- interfaces:
- - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
- mac_address: #Text format XX:XX:XX:XX:XX:XX
- vim_net_id: #network id where this interface is connected
- vim_interface_id: #interface/port VIM id
- ip_address: #null, or text with IPv4, IPv6 address
- compute_node: #identification of compute node where PF,VF interface is allocated
- pci: #PCI address of the NIC that hosts the PF,VF
- vlan: #physical VLAN used for VF
+ Params: the list of VM identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this Virtual Machine
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+ # CREATING (on building process), ERROR
+ # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+ #
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ interfaces:
+ - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ mac_address: #Text format XX:XX:XX:XX:XX:XX
+ vim_net_id: #network id where this interface is connected
+ vim_interface_id: #interface/port VIM id
+ ip_address: #null, or text with IPv4, IPv6 address
+ compute_node: #identification of compute node where PF,VF interface is allocated
+ pci: #PCI address of the NIC that hosts the PF,VF
+ vlan: #physical VLAN used for VF
"""
vm_dict = {}
- self.logger.debug("refresh_vms status: Getting tenant VM instance information from VIM")
+ self.logger.debug(
+ "refresh_vms status: Getting tenant VM instance information from VIM"
+ )
+
for vm_id in vm_list:
vm = {}
+
try:
vm_vim = self.get_vminstance(vm_id)
- if vm_vim['status'] in vmStatus2manoFormat:
- vm['status'] = vmStatus2manoFormat[vm_vim['status']]
+
+ if vm_vim["status"] in vmStatus2manoFormat:
+ vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
else:
- vm['status'] = "OTHER"
- vm['error_msg'] = "VIM status reported " + vm_vim['status']
+ vm["status"] = "OTHER"
+ vm["error_msg"] = "VIM status reported " + vm_vim["status"]
+
vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
vm_vim.pop("user_data", None)
- vm['vim_info'] = self.serialize(vm_vim)
+ vm["vim_info"] = self.serialize(vm_vim)
vm["interfaces"] = []
- if vm_vim.get('fault'):
- vm['error_msg'] = str(vm_vim['fault'])
+ if vm_vim.get("fault"):
+ vm["error_msg"] = str(vm_vim["fault"])
+
# get interfaces
try:
self._reload_connection()
port_dict = self.neutron.list_ports(device_id=vm_id)
+
for port in port_dict["ports"]:
interface = {}
- interface['vim_info'] = self.serialize(port)
+ interface["vim_info"] = self.serialize(port)
interface["mac_address"] = port.get("mac_address")
interface["vim_net_id"] = port["network_id"]
interface["vim_interface_id"] = port["id"]
# check if OS-EXT-SRV-ATTR:host is there,
# in case of non-admin credentials, it will be missing
- if vm_vim.get('OS-EXT-SRV-ATTR:host'):
- interface["compute_node"] = vm_vim['OS-EXT-SRV-ATTR:host']
+
+ if vm_vim.get("OS-EXT-SRV-ATTR:host"):
+ interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
+
interface["pci"] = None
# check if binding:profile is there,
# in case of non-admin credentials, it will be missing
- if port.get('binding:profile'):
- if port['binding:profile'].get('pci_slot'):
+ if port.get("binding:profile"):
+ if port["binding:profile"].get("pci_slot"):
# TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
# the slot to 0x00
# TODO: This is just a workaround valid for niantinc. Find a better way to do so
# CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
- pci = port['binding:profile']['pci_slot']
+ pci = port["binding:profile"]["pci_slot"]
# interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
interface["pci"] = pci
+
interface["vlan"] = None
- if port.get('binding:vif_details'):
- interface["vlan"] = port['binding:vif_details'].get('vlan')
+
+ if port.get("binding:vif_details"):
+ interface["vlan"] = port["binding:vif_details"].get("vlan")
+
# Get vlan from network in case not present in port for those old openstacks and cases where
# it is needed vlan at PT
if not interface["vlan"]:
# if network is of type vlan and port is of type direct (sr-iov) then set vlan id
network = self.neutron.show_network(port["network_id"])
- if network['network'].get('provider:network_type') == 'vlan':
+
+ if (
+ network["network"].get("provider:network_type")
+ == "vlan"
+ ):
# and port.get("binding:vnic_type") in ("direct", "direct-physical"):
- interface["vlan"] = network['network'].get('provider:segmentation_id')
+ interface["vlan"] = network["network"].get(
+ "provider:segmentation_id"
+ )
+
ips = []
# look for floating ip address
try:
- floating_ip_dict = self.neutron.list_floatingips(port_id=port["id"])
+ floating_ip_dict = self.neutron.list_floatingips(
+ port_id=port["id"]
+ )
+
if floating_ip_dict.get("floatingips"):
- ips.append(floating_ip_dict["floatingips"][0].get("floating_ip_address"))
+ ips.append(
+ floating_ip_dict["floatingips"][0].get(
+ "floating_ip_address"
+ )
+ )
except Exception:
pass
for subnet in port["fixed_ips"]:
ips.append(subnet["ip_address"])
+
interface["ip_address"] = ";".join(ips)
vm["interfaces"].append(interface)
except Exception as e:
- self.logger.error("Error getting vm interface information {}: {}".format(type(e).__name__, e),
- exc_info=True)
+ self.logger.error(
+ "Error getting vm interface information {}: {}".format(
+ type(e).__name__, e
+ ),
+ exc_info=True,
+ )
except vimconn.VimConnNotFoundException as e:
self.logger.error("Exception getting vm status: %s", str(e))
- vm['status'] = "DELETED"
- vm['error_msg'] = str(e)
+ vm["status"] = "DELETED"
+ vm["error_msg"] = str(e)
except vimconn.VimConnException as e:
self.logger.error("Exception getting vm status: %s", str(e))
- vm['status'] = "VIM_ERROR"
- vm['error_msg'] = str(e)
+ vm["status"] = "VIM_ERROR"
+ vm["error_msg"] = str(e)
+
vm_dict[vm_id] = vm
+
return vm_dict
def action_vminstance(self, vm_id, action_dict, created_items={}):
"""Send and action over a VM instance from VIM
Returns None or the console dict if the action was successfully sent to the VIM"""
self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
+
try:
self._reload_connection()
server = self.nova.servers.find(id=vm_id)
+
if "start" in action_dict:
if action_dict["start"] == "rebuild":
server.rebuild()
# "imageRef": id_schema,
# "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
elif "rebuild" in action_dict:
- server.rebuild(server.image['id'])
+ server.rebuild(server.image["id"])
elif "reboot" in action_dict:
- server.reboot() # reboot_type='SOFT'
+ server.reboot() # reboot_type="SOFT"
elif "console" in action_dict:
console_type = action_dict["console"]
+
if console_type is None or console_type == "novnc":
console_dict = server.get_vnc_console("novnc")
elif console_type == "xvpvnc":
elif console_type == "spice-html5":
console_dict = server.get_spice_console(console_type)
else:
- raise vimconn.VimConnException("console type '{}' not allowed".format(console_type),
- http_code=vimconn.HTTP_Bad_Request)
+ raise vimconn.VimConnException(
+ "console type '{}' not allowed".format(console_type),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+
try:
console_url = console_dict["console"]["url"]
# parse console_url
protocol_index = console_url.find("//")
- suffix_index = console_url[protocol_index+2:].find("/") + protocol_index+2
- port_index = console_url[protocol_index+2:suffix_index].find(":") + protocol_index+2
+ suffix_index = (
+ console_url[protocol_index + 2 :].find("/") + protocol_index + 2
+ )
+ port_index = (
+ console_url[protocol_index + 2 : suffix_index].find(":")
+ + protocol_index
+ + 2
+ )
+
if protocol_index < 0 or port_index < 0 or suffix_index < 0:
- raise vimconn.VimConnException("Unexpected response from VIM " + str(console_dict))
- console_dict2 = {"protocol": console_url[0:protocol_index],
- "server": console_url[protocol_index+2: port_index],
- "port": int(console_url[port_index+1: suffix_index]),
- "suffix": console_url[suffix_index+1:]
- }
+ raise vimconn.VimConnException(
+ "Unexpected response from VIM " + str(console_dict)
+ )
+
+ console_dict2 = {
+ "protocol": console_url[0:protocol_index],
+ "server": console_url[protocol_index + 2 : port_index],
+ "port": int(console_url[port_index + 1 : suffix_index]),
+ "suffix": console_url[suffix_index + 1 :],
+ }
+
return console_dict2
except Exception:
- raise vimconn.VimConnException("Unexpected response from VIM " + str(console_dict))
+ raise vimconn.VimConnException(
+ "Unexpected response from VIM " + str(console_dict)
+ )
return None
- except (ksExceptions.ClientException, nvExceptions.ClientException, nvExceptions.NotFound,
- ConnectionError) as e:
+ except (
+ ksExceptions.ClientException,
+ nvExceptions.ClientException,
+ nvExceptions.NotFound,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
# TODO insert exception vimconn.HTTP_Unauthorized
# ###### VIO Specific Changes #########
def _generate_vlanID(self):
"""
- Method to get unused vlanID
+ Method to get unused vlanID
Args:
None
Returns:
# Get used VLAN IDs
usedVlanIDs = []
networks = self.get_network_list()
+
for net in networks:
- if net.get('provider:segmentation_id'):
- usedVlanIDs.append(net.get('provider:segmentation_id'))
+ if net.get("provider:segmentation_id"):
+ usedVlanIDs.append(net.get("provider:segmentation_id"))
+
used_vlanIDs = set(usedVlanIDs)
# find unused VLAN ID
- for vlanID_range in self.config.get('dataplane_net_vlan_range'):
+ for vlanID_range in self.config.get("dataplane_net_vlan_range"):
try:
- start_vlanid, end_vlanid = map(int, vlanID_range.replace(" ", "").split("-"))
+ start_vlanid, end_vlanid = map(
+ int, vlanID_range.replace(" ", "").split("-")
+ )
+
for vlanID in range(start_vlanid, end_vlanid + 1):
if vlanID not in used_vlanIDs:
return vlanID
except Exception as exp:
- raise vimconn.VimConnException("Exception {} occurred while generating VLAN ID.".format(exp))
+ raise vimconn.VimConnException(
+ "Exception {} occurred while generating VLAN ID.".format(exp)
+ )
else:
raise vimconn.VimConnConflictException(
"Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
- self.config.get('dataplane_net_vlan_range')))
+ self.config.get("dataplane_net_vlan_range")
+ )
+ )
def _generate_multisegment_vlanID(self):
"""
- Method to get unused vlanID
- Args:
- None
- Returns:
- vlanID
+ Method to get unused vlanID
+ Args:
+ None
+ Returns:
+ vlanID
"""
# Get used VLAN IDs
usedVlanIDs = []
networks = self.get_network_list()
for net in networks:
- if net.get('provider:network_type') == "vlan" and net.get('provider:segmentation_id'):
- usedVlanIDs.append(net.get('provider:segmentation_id'))
- elif net.get('segments'):
- for segment in net.get('segments'):
- if segment.get('provider:network_type') == "vlan" and segment.get('provider:segmentation_id'):
- usedVlanIDs.append(segment.get('provider:segmentation_id'))
+ if net.get("provider:network_type") == "vlan" and net.get(
+ "provider:segmentation_id"
+ ):
+ usedVlanIDs.append(net.get("provider:segmentation_id"))
+ elif net.get("segments"):
+ for segment in net.get("segments"):
+ if segment.get("provider:network_type") == "vlan" and segment.get(
+ "provider:segmentation_id"
+ ):
+ usedVlanIDs.append(segment.get("provider:segmentation_id"))
+
used_vlanIDs = set(usedVlanIDs)
# find unused VLAN ID
- for vlanID_range in self.config.get('multisegment_vlan_range'):
+ for vlanID_range in self.config.get("multisegment_vlan_range"):
try:
- start_vlanid, end_vlanid = map(int, vlanID_range.replace(" ", "").split("-"))
+ start_vlanid, end_vlanid = map(
+ int, vlanID_range.replace(" ", "").split("-")
+ )
+
for vlanID in range(start_vlanid, end_vlanid + 1):
if vlanID not in used_vlanIDs:
return vlanID
except Exception as exp:
- raise vimconn.VimConnException("Exception {} occurred while generating VLAN ID.".format(exp))
+ raise vimconn.VimConnException(
+ "Exception {} occurred while generating VLAN ID.".format(exp)
+ )
else:
raise vimconn.VimConnConflictException(
"Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
- self.config.get('multisegment_vlan_range')))
+ self.config.get("multisegment_vlan_range")
+ )
+ )
def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
"""
for vlanID_range in input_vlan_range:
vlan_range = vlanID_range.replace(" ", "")
# validate format
- vlanID_pattern = r'(\d)*-(\d)*$'
+ vlanID_pattern = r"(\d)*-(\d)*$"
match_obj = re.match(vlanID_pattern, vlan_range)
if not match_obj:
raise vimconn.VimConnConflictException(
- "Invalid VLAN range for {}: {}.You must provide '{}' in format [start_ID - end_ID].".format(
- text_vlan_range, vlanID_range, text_vlan_range))
+ "Invalid VLAN range for {}: {}.You must provide "
+ "'{}' in format [start_ID - end_ID].".format(
+ text_vlan_range, vlanID_range, text_vlan_range
+ )
+ )
start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
if start_vlanid <= 0:
raise vimconn.VimConnConflictException(
"Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
- "networks valid IDs are 1 to 4094 ".format(text_vlan_range, vlanID_range))
+ "networks valid IDs are 1 to 4094 ".format(
+ text_vlan_range, vlanID_range
+ )
+ )
+
if end_vlanid > 4094:
raise vimconn.VimConnConflictException(
- "Invalid VLAN range for {}: {}. End VLAN ID can not be greater than 4094. For VLAN "
- "networks valid IDs are 1 to 4094 ".format(text_vlan_range, vlanID_range))
+ "Invalid VLAN range for {}: {}. End VLAN ID can not be "
+ "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
+ text_vlan_range, vlanID_range
+ )
+ )
if start_vlanid > end_vlanid:
raise vimconn.VimConnConflictException(
- "Invalid VLAN range for {}: {}. You must provide '{}' in format start_ID - end_ID and "
- "start_ID < end_ID ".format(text_vlan_range, vlanID_range, text_vlan_range))
+ "Invalid VLAN range for {}: {}. You must provide '{}'"
+ " in format start_ID - end_ID and start_ID < end_ID ".format(
+ text_vlan_range, vlanID_range, text_vlan_range
+ )
+ )
# NOT USED FUNCTIONS
def new_external_port(self, port_data):
"""Adds a external port to VIM
- Returns the port identifier"""
+ Returns the port identifier"""
# TODO openstack if needed
- return -vimconn.HTTP_Internal_Server_Error, "osconnector.new_external_port() not implemented"
+ return (
+ -vimconn.HTTP_Internal_Server_Error,
+ "osconnector.new_external_port() not implemented",
+ )
def connect_port_network(self, port_id, network_id, admin=False):
"""Connects a external port to a network
- Returns status code of the VIM response"""
+ Returns status code of the VIM response"""
# TODO openstack if needed
- return -vimconn.HTTP_Internal_Server_Error, "osconnector.connect_port_network() not implemented"
+ return (
+ -vimconn.HTTP_Internal_Server_Error,
+ "osconnector.connect_port_network() not implemented",
+ )
def new_user(self, user_name, user_passwd, tenant_id=None):
"""Adds a new user to openstack VIM
- Returns the user identifier"""
+ Returns the user identifier"""
self.logger.debug("osconnector: Adding a new user to VIM")
+
try:
self._reload_connection()
- user = self.keystone.users.create(user_name, password=user_passwd, default_project=tenant_id)
+ user = self.keystone.users.create(
+ user_name, password=user_passwd, default_project=tenant_id
+ )
# self.keystone.tenants.add_user(self.k_creds["username"], #role)
+
return user.id
except ksExceptions.ConnectionError as e:
error_value = -vimconn.HTTP_Bad_Request
- error_text = type(e).__name__ + ": " + (str(e) if len(e.args) == 0 else str(e.args[0]))
+ error_text = (
+ type(e).__name__
+ + ": "
+ + (str(e) if len(e.args) == 0 else str(e.args[0]))
+ )
except ksExceptions.ClientException as e: # TODO remove
error_value = -vimconn.HTTP_Bad_Request
- error_text = type(e).__name__ + ": " + (str(e) if len(e.args) == 0 else str(e.args[0]))
+ error_text = (
+ type(e).__name__
+ + ": "
+ + (str(e) if len(e.args) == 0 else str(e.args[0]))
+ )
+
# TODO insert exception vimconn.HTTP_Unauthorized
# if reaching here is because an exception
self.logger.debug("new_user " + error_text)
+
return error_value, error_text
def delete_user(self, user_id):
"""Delete a user from openstack VIM
- Returns the user identifier"""
+ Returns the user identifier"""
if self.debug:
print("osconnector: Deleting a user from VIM")
+
try:
self._reload_connection()
self.keystone.users.delete(user_id)
+
return 1, user_id
except ksExceptions.ConnectionError as e:
error_value = -vimconn.HTTP_Bad_Request
- error_text = type(e).__name__ + ": " + (str(e) if len(e.args) == 0 else str(e.args[0]))
+ error_text = (
+ type(e).__name__
+ + ": "
+ + (str(e) if len(e.args) == 0 else str(e.args[0]))
+ )
except ksExceptions.NotFound as e:
error_value = -vimconn.HTTP_Not_Found
- error_text = type(e).__name__ + ": " + (str(e) if len(e.args) == 0 else str(e.args[0]))
+ error_text = (
+ type(e).__name__
+ + ": "
+ + (str(e) if len(e.args) == 0 else str(e.args[0]))
+ )
except ksExceptions.ClientException as e: # TODO remove
error_value = -vimconn.HTTP_Bad_Request
- error_text = type(e).__name__ + ": " + (str(e) if len(e.args) == 0 else str(e.args[0]))
+ error_text = (
+ type(e).__name__
+ + ": "
+ + (str(e) if len(e.args) == 0 else str(e.args[0]))
+ )
+
# TODO insert exception vimconn.HTTP_Unauthorized
# if reaching here is because an exception
self.logger.debug("delete_tenant " + error_text)
+
return error_value, error_text
def get_hosts_info(self):
Returns the hosts content"""
if self.debug:
print("osconnector: Getting Host info from VIM")
+
try:
h_list = []
self._reload_connection()
hypervisors = self.nova.hypervisors.list()
+
for hype in hypervisors:
h_list.append(hype.to_dict())
+
return 1, {"hosts": h_list}
except nvExceptions.NotFound as e:
error_value = -vimconn.HTTP_Not_Found
- error_text = (str(e) if len(e.args) == 0 else str(e.args[0]))
+ error_text = str(e) if len(e.args) == 0 else str(e.args[0])
except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
error_value = -vimconn.HTTP_Bad_Request
- error_text = type(e).__name__ + ": " + (str(e) if len(e.args) == 0 else str(e.args[0]))
+ error_text = (
+ type(e).__name__
+ + ": "
+ + (str(e) if len(e.args) == 0 else str(e.args[0]))
+ )
+
# TODO insert exception vimconn.HTTP_Unauthorized
# if reaching here is because an exception
self.logger.debug("get_hosts_info " + error_text)
+
return error_value, error_text
def get_hosts(self, vim_tenant):
"""Get the hosts and deployed instances
Returns the hosts content"""
r, hype_dict = self.get_hosts_info()
+
if r < 0:
return r, hype_dict
+
hypervisors = hype_dict["hosts"]
+
try:
servers = self.nova.servers.list()
for hype in hypervisors:
for server in servers:
- if server.to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname'] == hype['hypervisor_hostname']:
- if 'vm' in hype:
- hype['vm'].append(server.id)
+ if (
+ server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
+ == hype["hypervisor_hostname"]
+ ):
+ if "vm" in hype:
+ hype["vm"].append(server.id)
else:
- hype['vm'] = [server.id]
+ hype["vm"] = [server.id]
+
return 1, hype_dict
except nvExceptions.NotFound as e:
error_value = -vimconn.HTTP_Not_Found
- error_text = (str(e) if len(e.args) == 0 else str(e.args[0]))
+ error_text = str(e) if len(e.args) == 0 else str(e.args[0])
except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
error_value = -vimconn.HTTP_Bad_Request
- error_text = type(e).__name__ + ": " + (str(e) if len(e.args) == 0 else str(e.args[0]))
+ error_text = (
+ type(e).__name__
+ + ": "
+ + (str(e) if len(e.args) == 0 else str(e.args[0]))
+ )
+
# TODO insert exception vimconn.HTTP_Unauthorized
# if reaching here is because an exception
self.logger.debug("get_hosts " + error_text)
+
return error_value, error_text
def new_classification(self, name, ctype, definition):
- self.logger.debug('Adding a new (Traffic) Classification to VIM, named %s', name)
+ self.logger.debug(
+ "Adding a new (Traffic) Classification to VIM, named %s", name
+ )
+
try:
new_class = None
self._reload_connection()
+
if ctype not in supportedClassificationTypes:
raise vimconn.VimConnNotSupportedException(
- 'OpenStack VIM connector does not support provided Classification Type {}, supported ones are: '
- '{}'.format(ctype, supportedClassificationTypes))
+ "OpenStack VIM connector does not support provided "
+ "Classification Type {}, supported ones are: {}".format(
+ ctype, supportedClassificationTypes
+ )
+ )
+
if not self._validate_classification(ctype, definition):
raise vimconn.VimConnException(
- 'Incorrect Classification definition '
- 'for the type specified.')
- classification_dict = definition
- classification_dict['name'] = name
+ "Incorrect Classification definition for the type specified."
+ )
+ classification_dict = definition
+ classification_dict["name"] = name
new_class = self.neutron.create_sfc_flow_classifier(
- {'flow_classifier': classification_dict})
- return new_class['flow_classifier']['id']
- except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
- neExceptions.NeutronException, ConnectionError) as e:
- self.logger.error(
- 'Creation of Classification failed.')
+ {"flow_classifier": classification_dict}
+ )
+
+ return new_class["flow_classifier"]["id"]
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ self.logger.error("Creation of Classification failed.")
self._format_exception(e)
def get_classification(self, class_id):
self.logger.debug(" Getting Classification %s from VIM", class_id)
filter_dict = {"id": class_id}
class_list = self.get_classification_list(filter_dict)
+
if len(class_list) == 0:
raise vimconn.VimConnNotFoundException(
- "Classification '{}' not found".format(class_id))
+ "Classification '{}' not found".format(class_id)
+ )
elif len(class_list) > 1:
raise vimconn.VimConnConflictException(
- "Found more than one Classification with this criteria")
+ "Found more than one Classification with this criteria"
+ )
+
classification = class_list[0]
+
return classification
def get_classification_list(self, filter_dict={}):
- self.logger.debug("Getting Classifications from VIM filter: '%s'",
- str(filter_dict))
+ self.logger.debug(
+ "Getting Classifications from VIM filter: '%s'", str(filter_dict)
+ )
+
try:
filter_dict_os = filter_dict.copy()
self._reload_connection()
+
if self.api_version3 and "tenant_id" in filter_dict_os:
- filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
+ filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+
classification_dict = self.neutron.list_sfc_flow_classifiers(
- **filter_dict_os)
+ **filter_dict_os
+ )
classification_list = classification_dict["flow_classifiers"]
self.__classification_os2mano(classification_list)
+
return classification_list
- except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
- neExceptions.NeutronException, ConnectionError) as e:
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
def delete_classification(self, class_id):
self.logger.debug("Deleting Classification '%s' from VIM", class_id)
+
try:
self._reload_connection()
self.neutron.delete_sfc_flow_classifier(class_id)
+
return class_id
- except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
- ksExceptions.ClientException, neExceptions.NeutronException,
- ConnectionError) as e:
+ except (
+ neExceptions.ConnectionFailed,
+ neExceptions.NeutronException,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
- self.logger.debug("Adding a new Service Function Instance to VIM, named '%s'", name)
+ self.logger.debug(
+ "Adding a new Service Function Instance to VIM, named '%s'", name
+ )
+
try:
new_sfi = None
self._reload_connection()
correlation = None
+
if sfc_encap:
- correlation = 'nsh'
+ correlation = "nsh"
+
if len(ingress_ports) != 1:
raise vimconn.VimConnNotSupportedException(
- "OpenStack VIM connector can only have "
- "1 ingress port per SFI")
+ "OpenStack VIM connector can only have 1 ingress port per SFI"
+ )
+
if len(egress_ports) != 1:
raise vimconn.VimConnNotSupportedException(
- "OpenStack VIM connector can only have "
- "1 egress port per SFI")
- sfi_dict = {'name': name,
- 'ingress': ingress_ports[0],
- 'egress': egress_ports[0],
- 'service_function_parameters': {
- 'correlation': correlation}}
- new_sfi = self.neutron.create_sfc_port_pair({'port_pair': sfi_dict})
- return new_sfi['port_pair']['id']
- except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
- neExceptions.NeutronException, ConnectionError) as e:
+ "OpenStack VIM connector can only have 1 egress port per SFI"
+ )
+
+ sfi_dict = {
+ "name": name,
+ "ingress": ingress_ports[0],
+ "egress": egress_ports[0],
+ "service_function_parameters": {"correlation": correlation},
+ }
+ new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
+
+ return new_sfi["port_pair"]["id"]
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
if new_sfi:
try:
- self.neutron.delete_sfc_port_pair(
- new_sfi['port_pair']['id'])
+ self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
except Exception:
self.logger.error(
- 'Creation of Service Function Instance failed, with '
- 'subsequent deletion failure as well.')
+ "Creation of Service Function Instance failed, with "
+ "subsequent deletion failure as well."
+ )
+
self._format_exception(e)
def get_sfi(self, sfi_id):
- self.logger.debug('Getting Service Function Instance %s from VIM', sfi_id)
+ self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
filter_dict = {"id": sfi_id}
sfi_list = self.get_sfi_list(filter_dict)
+
if len(sfi_list) == 0:
- raise vimconn.VimConnNotFoundException("Service Function Instance '{}' not found".format(sfi_id))
+ raise vimconn.VimConnNotFoundException(
+ "Service Function Instance '{}' not found".format(sfi_id)
+ )
elif len(sfi_list) > 1:
raise vimconn.VimConnConflictException(
- 'Found more than one Service Function Instance '
- 'with this criteria')
+ "Found more than one Service Function Instance with this criteria"
+ )
+
sfi = sfi_list[0]
+
return sfi
def get_sfi_list(self, filter_dict={}):
- self.logger.debug("Getting Service Function Instances from VIM filter: '%s'", str(filter_dict))
+ self.logger.debug(
+ "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
+ )
+
try:
self._reload_connection()
filter_dict_os = filter_dict.copy()
+
if self.api_version3 and "tenant_id" in filter_dict_os:
- filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
+ filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+
sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
sfi_list = sfi_dict["port_pairs"]
self.__sfi_os2mano(sfi_list)
+
return sfi_list
- except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
- neExceptions.NeutronException, ConnectionError) as e:
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
def delete_sfi(self, sfi_id):
- self.logger.debug("Deleting Service Function Instance '%s' "
- "from VIM", sfi_id)
+ self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
+
try:
self._reload_connection()
self.neutron.delete_sfc_port_pair(sfi_id)
+
return sfi_id
- except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
- ksExceptions.ClientException, neExceptions.NeutronException,
- ConnectionError) as e:
+ except (
+ neExceptions.ConnectionFailed,
+ neExceptions.NeutronException,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
def new_sf(self, name, sfis, sfc_encap=True):
self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
+
try:
new_sf = None
self._reload_connection()
# correlation = None
# if sfc_encap:
- # correlation = 'nsh'
+ # correlation = "nsh"
+
for instance in sfis:
sfi = self.get_sfi(instance)
- if sfi.get('sfc_encap') != sfc_encap:
+
+ if sfi.get("sfc_encap") != sfc_encap:
raise vimconn.VimConnNotSupportedException(
"OpenStack VIM connector requires all SFIs of the "
- "same SF to share the same SFC Encapsulation")
- sf_dict = {'name': name,
- 'port_pairs': sfis}
- new_sf = self.neutron.create_sfc_port_pair_group({
- 'port_pair_group': sf_dict})
- return new_sf['port_pair_group']['id']
- except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
- neExceptions.NeutronException, ConnectionError) as e:
+ "same SF to share the same SFC Encapsulation"
+ )
+
+ sf_dict = {"name": name, "port_pairs": sfis}
+ new_sf = self.neutron.create_sfc_port_pair_group(
+ {"port_pair_group": sf_dict}
+ )
+
+ return new_sf["port_pair_group"]["id"]
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
if new_sf:
try:
self.neutron.delete_sfc_port_pair_group(
- new_sf['port_pair_group']['id'])
+ new_sf["port_pair_group"]["id"]
+ )
except Exception:
self.logger.error(
- 'Creation of Service Function failed, with '
- 'subsequent deletion failure as well.')
+ "Creation of Service Function failed, with "
+ "subsequent deletion failure as well."
+ )
+
self._format_exception(e)
def get_sf(self, sf_id):
self.logger.debug("Getting Service Function %s from VIM", sf_id)
filter_dict = {"id": sf_id}
sf_list = self.get_sf_list(filter_dict)
+
if len(sf_list) == 0:
raise vimconn.VimConnNotFoundException(
- "Service Function '{}' not found".format(sf_id))
+ "Service Function '{}' not found".format(sf_id)
+ )
elif len(sf_list) > 1:
raise vimconn.VimConnConflictException(
- "Found more than one Service Function with this criteria")
+ "Found more than one Service Function with this criteria"
+ )
+
sf = sf_list[0]
+
return sf
def get_sf_list(self, filter_dict={}):
- self.logger.debug("Getting Service Function from VIM filter: '%s'",
- str(filter_dict))
+ self.logger.debug(
+ "Getting Service Function from VIM filter: '%s'", str(filter_dict)
+ )
+
try:
self._reload_connection()
filter_dict_os = filter_dict.copy()
+
if self.api_version3 and "tenant_id" in filter_dict_os:
- filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
+ filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+
sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
sf_list = sf_dict["port_pair_groups"]
self.__sf_os2mano(sf_list)
+
return sf_list
- except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
- neExceptions.NeutronException, ConnectionError) as e:
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
def delete_sf(self, sf_id):
self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
+
try:
self._reload_connection()
self.neutron.delete_sfc_port_pair_group(sf_id)
+
return sf_id
- except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
- ksExceptions.ClientException, neExceptions.NeutronException,
- ConnectionError) as e:
+ except (
+ neExceptions.ConnectionFailed,
+ neExceptions.NeutronException,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
+
try:
new_sfp = None
self._reload_connection()
# In networking-sfc the MPLS encapsulation is legacy
# should be used when no full SFC Encapsulation is intended
- correlation = 'mpls'
+ correlation = "mpls"
+
if sfc_encap:
- correlation = 'nsh'
- sfp_dict = {'name': name,
- 'flow_classifiers': classifications,
- 'port_pair_groups': sfs,
- 'chain_parameters': {'correlation': correlation}}
+ correlation = "nsh"
+
+ sfp_dict = {
+ "name": name,
+ "flow_classifiers": classifications,
+ "port_pair_groups": sfs,
+ "chain_parameters": {"correlation": correlation},
+ }
+
if spi:
- sfp_dict['chain_id'] = spi
- new_sfp = self.neutron.create_sfc_port_chain({'port_chain': sfp_dict})
+ sfp_dict["chain_id"] = spi
+
+ new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
+
return new_sfp["port_chain"]["id"]
- except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
- neExceptions.NeutronException, ConnectionError) as e:
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
if new_sfp:
try:
- self.neutron.delete_sfc_port_chain(new_sfp['port_chain']['id'])
+ self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
except Exception:
self.logger.error(
- 'Creation of Service Function Path failed, with '
- 'subsequent deletion failure as well.')
+ "Creation of Service Function Path failed, with "
+ "subsequent deletion failure as well."
+ )
+
self._format_exception(e)
def get_sfp(self, sfp_id):
self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
+
filter_dict = {"id": sfp_id}
sfp_list = self.get_sfp_list(filter_dict)
+
if len(sfp_list) == 0:
raise vimconn.VimConnNotFoundException(
- "Service Function Path '{}' not found".format(sfp_id))
+ "Service Function Path '{}' not found".format(sfp_id)
+ )
elif len(sfp_list) > 1:
raise vimconn.VimConnConflictException(
- "Found more than one Service Function Path with this criteria")
+ "Found more than one Service Function Path with this criteria"
+ )
+
sfp = sfp_list[0]
+
return sfp
def get_sfp_list(self, filter_dict={}):
- self.logger.debug("Getting Service Function Paths from VIM filter: '%s'", str(filter_dict))
+ self.logger.debug(
+ "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
+ )
+
try:
self._reload_connection()
filter_dict_os = filter_dict.copy()
+
if self.api_version3 and "tenant_id" in filter_dict_os:
- filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
+ filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+
sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
sfp_list = sfp_dict["port_chains"]
self.__sfp_os2mano(sfp_list)
+
return sfp_list
- except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
- neExceptions.NeutronException, ConnectionError) as e:
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
def delete_sfp(self, sfp_id):
self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
+
try:
self._reload_connection()
self.neutron.delete_sfc_port_chain(sfp_id)
+
return sfp_id
- except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
- ksExceptions.ClientException, neExceptions.NeutronException,
- ConnectionError) as e:
+ except (
+ neExceptions.ConnectionFailed,
+ neExceptions.NeutronException,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
self._format_exception(e)
def refresh_sfps_status(self, sfp_list):
"""Get the status of the service function path
- Params: the list of sfp identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this service function path
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE,
- # CREATING (on building process)
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
+ Params: the list of sfp identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function path
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
"""
sfp_dict = {}
- self.logger.debug("refresh_sfps status: Getting tenant SFP information from VIM")
+ self.logger.debug(
+ "refresh_sfps status: Getting tenant SFP information from VIM"
+ )
+
for sfp_id in sfp_list:
sfp = {}
+
try:
sfp_vim = self.get_sfp(sfp_id)
- if sfp_vim['spi']:
- sfp['status'] = vmStatus2manoFormat['ACTIVE']
- else:
- sfp['status'] = "OTHER"
- sfp['error_msg'] = "VIM status reported " + sfp['status']
- sfp['vim_info'] = self.serialize(sfp_vim)
+ if sfp_vim["spi"]:
+ sfp["status"] = vmStatus2manoFormat["ACTIVE"]
+ else:
+ sfp["status"] = "OTHER"
+ sfp["error_msg"] = "VIM status reported " + sfp["status"]
- if sfp_vim.get('fault'):
- sfp['error_msg'] = str(sfp_vim['fault'])
+ sfp["vim_info"] = self.serialize(sfp_vim)
+ if sfp_vim.get("fault"):
+ sfp["error_msg"] = str(sfp_vim["fault"])
except vimconn.VimConnNotFoundException as e:
self.logger.error("Exception getting sfp status: %s", str(e))
- sfp['status'] = "DELETED"
- sfp['error_msg'] = str(e)
+ sfp["status"] = "DELETED"
+ sfp["error_msg"] = str(e)
except vimconn.VimConnException as e:
self.logger.error("Exception getting sfp status: %s", str(e))
- sfp['status'] = "VIM_ERROR"
- sfp['error_msg'] = str(e)
+ sfp["status"] = "VIM_ERROR"
+ sfp["error_msg"] = str(e)
+
sfp_dict[sfp_id] = sfp
+
return sfp_dict
def refresh_sfis_status(self, sfi_list):
"""Get the status of the service function instances
- Params: the list of sfi identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this service function instance
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE,
- # CREATING (on building process)
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ Params: the list of sfi identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function instance
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
"""
sfi_dict = {}
- self.logger.debug("refresh_sfis status: Getting tenant sfi information from VIM")
+ self.logger.debug(
+ "refresh_sfis status: Getting tenant sfi information from VIM"
+ )
+
for sfi_id in sfi_list:
sfi = {}
+
try:
sfi_vim = self.get_sfi(sfi_id)
+
if sfi_vim:
- sfi['status'] = vmStatus2manoFormat['ACTIVE']
+ sfi["status"] = vmStatus2manoFormat["ACTIVE"]
else:
- sfi['status'] = "OTHER"
- sfi['error_msg'] = "VIM status reported " + sfi['status']
-
- sfi['vim_info'] = self.serialize(sfi_vim)
+ sfi["status"] = "OTHER"
+ sfi["error_msg"] = "VIM status reported " + sfi["status"]
- if sfi_vim.get('fault'):
- sfi['error_msg'] = str(sfi_vim['fault'])
+ sfi["vim_info"] = self.serialize(sfi_vim)
+ if sfi_vim.get("fault"):
+ sfi["error_msg"] = str(sfi_vim["fault"])
except vimconn.VimConnNotFoundException as e:
self.logger.error("Exception getting sfi status: %s", str(e))
- sfi['status'] = "DELETED"
- sfi['error_msg'] = str(e)
+ sfi["status"] = "DELETED"
+ sfi["error_msg"] = str(e)
except vimconn.VimConnException as e:
self.logger.error("Exception getting sfi status: %s", str(e))
- sfi['status'] = "VIM_ERROR"
- sfi['error_msg'] = str(e)
+ sfi["status"] = "VIM_ERROR"
+ sfi["error_msg"] = str(e)
+
sfi_dict[sfi_id] = sfi
+
return sfi_dict
def refresh_sfs_status(self, sf_list):
"""Get the status of the service functions
- Params: the list of sf identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this service function
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE,
- # CREATING (on building process)
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ Params: the list of sf identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
"""
sf_dict = {}
self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
+
for sf_id in sf_list:
sf = {}
+
try:
sf_vim = self.get_sf(sf_id)
+
if sf_vim:
- sf['status'] = vmStatus2manoFormat['ACTIVE']
+ sf["status"] = vmStatus2manoFormat["ACTIVE"]
else:
- sf['status'] = "OTHER"
- sf['error_msg'] = "VIM status reported " + sf_vim['status']
-
- sf['vim_info'] = self.serialize(sf_vim)
+ sf["status"] = "OTHER"
+ sf["error_msg"] = "VIM status reported " + sf_vim["status"]
- if sf_vim.get('fault'):
- sf['error_msg'] = str(sf_vim['fault'])
+ sf["vim_info"] = self.serialize(sf_vim)
+ if sf_vim.get("fault"):
+ sf["error_msg"] = str(sf_vim["fault"])
except vimconn.VimConnNotFoundException as e:
self.logger.error("Exception getting sf status: %s", str(e))
- sf['status'] = "DELETED"
- sf['error_msg'] = str(e)
+ sf["status"] = "DELETED"
+ sf["error_msg"] = str(e)
except vimconn.VimConnException as e:
self.logger.error("Exception getting sf status: %s", str(e))
- sf['status'] = "VIM_ERROR"
- sf['error_msg'] = str(e)
+ sf["status"] = "VIM_ERROR"
+ sf["error_msg"] = str(e)
+
sf_dict[sf_id] = sf
+
return sf_dict
def refresh_classifications_status(self, classification_list):
"""Get the status of the classifications
- Params: the list of classification identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this classifier
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE,
- # CREATING (on building process)
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ Params: the list of classification identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this classifier
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
"""
classification_dict = {}
- self.logger.debug("refresh_classifications status: Getting tenant classification information from VIM")
+ self.logger.debug(
+ "refresh_classifications status: Getting tenant classification information from VIM"
+ )
+
for classification_id in classification_list:
classification = {}
+
try:
classification_vim = self.get_classification(classification_id)
+
if classification_vim:
- classification['status'] = vmStatus2manoFormat['ACTIVE']
+ classification["status"] = vmStatus2manoFormat["ACTIVE"]
else:
- classification['status'] = "OTHER"
- classification['error_msg'] = "VIM status reported " + classification['status']
-
- classification['vim_info'] = self.serialize(classification_vim)
+ classification["status"] = "OTHER"
+ classification["error_msg"] = (
+ "VIM status reported " + classification["status"]
+ )
- if classification_vim.get('fault'):
- classification['error_msg'] = str(classification_vim['fault'])
+ classification["vim_info"] = self.serialize(classification_vim)
+ if classification_vim.get("fault"):
+ classification["error_msg"] = str(classification_vim["fault"])
except vimconn.VimConnNotFoundException as e:
self.logger.error("Exception getting classification status: %s", str(e))
- classification['status'] = "DELETED"
- classification['error_msg'] = str(e)
+ classification["status"] = "DELETED"
+ classification["error_msg"] = str(e)
except vimconn.VimConnException as e:
self.logger.error("Exception getting classification status: %s", str(e))
- classification['status'] = "VIM_ERROR"
- classification['error_msg'] = str(e)
+ classification["status"] = "VIM_ERROR"
+ classification["error_msg"] = str(e)
+
classification_dict[classification_id] = classification
+
return classification_dict
setup(
name=_name,
- description='OSM ro vim plugin for openstack',
+ description="OSM ro vim plugin for openstack",
long_description=README,
- version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ version_command=(
+ "git describe --match v* --tags --long --dirty",
+ "pep440-git-full",
+ ),
# version=VERSION,
- # python_requires='>3.5.0',
- author='ETSI OSM',
- author_email='alfonso.tiernosepulveda@telefonica.com',
- maintainer='Alfonso Tierno',
- maintainer_email='alfonso.tiernosepulveda@telefonica.com',
- url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
- license='Apache 2.0',
-
+ # python_requires=">3.5.0",
+ author="ETSI OSM",
+ author_email="alfonso.tiernosepulveda@telefonica.com",
+ maintainer="Alfonso Tierno",
+ maintainer_email="alfonso.tiernosepulveda@telefonica.com",
+ url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary",
+ license="Apache 2.0",
packages=[_name],
include_package_data=True,
install_requires=[
- "python-openstackclient", "python-neutronclient",
- "requests", "netaddr", "PyYAML",
- "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin"
+ "python-openstackclient",
+ "python-neutronclient",
+ "requests",
+ "netaddr",
+ "PyYAML",
+ "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin",
# TODO py3 "networking-l2gw"
# "python-novaclient", "python-keystoneclient", "python-glanceclient", "python-cinderclient",
],
- setup_requires=['setuptools-version-command'],
+ setup_requires=["setuptools-version-command"],
entry_points={
- 'osm_rovim.plugins': ['rovim_openstack = osm_rovim_openstack.vimconn_openstack:vimconnector'],
+ "osm_rovim.plugins": [
+ "rovim_openstack = osm_rovim_openstack.vimconn_openstack:vimconnector"
+ ],
},
)
basepython = python3
deps = flake8
commands = flake8 osm_rovim_openstack --max-line-length 120 \
- --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241
[testenv:unittest]
basepython = python3
# contact with: nfvlabs@tid.es
##
-'''
+"""
vimconnector implements all the methods to interact with openvim using the openvim API.
-'''
-__author__="Alfonso Tierno, Gerardo Garcia"
-__date__ ="$26-aug-2014 11:09:29$"
+"""
+__author__ = "Alfonso Tierno, Gerardo Garcia"
+__date__ = "$26-aug-2014 11:09:29$"
from osm_ro_plugin import vimconn
import requests
import yaml
import logging
import math
-from osm_ro.openmano_schemas import id_schema, name_schema, nameshort_schema, description_schema, \
- vlan1000_schema, integer0_schema
+from osm_ro.openmano_schemas import (
+ id_schema,
+ name_schema,
+ nameshort_schema,
+ description_schema,
+ vlan1000_schema,
+ integer0_schema,
+)
from jsonschema import validate as js_v, exceptions as js_e
from urllib.parse import quote
-'''contain the openvim virtual machine status to openmano status'''
-vmStatus2manoFormat={'ACTIVE':'ACTIVE',
- 'PAUSED':'PAUSED',
- 'SUSPENDED': 'SUSPENDED',
- 'INACTIVE':'INACTIVE',
- 'CREATING':'BUILD',
- 'ERROR':'ERROR','DELETED':'DELETED'
- }
-netStatus2manoFormat={'ACTIVE':'ACTIVE','INACTIVE':'INACTIVE','BUILD':'BUILD','ERROR':'ERROR','DELETED':'DELETED', 'DOWN':'DOWN'
- }
+"""contain the openvim virtual machine status to openmano status"""
+vmStatus2manoFormat = {
+ "ACTIVE": "ACTIVE",
+ "PAUSED": "PAUSED",
+ "SUSPENDED": "SUSPENDED",
+ "INACTIVE": "INACTIVE",
+ "CREATING": "BUILD",
+ "ERROR": "ERROR",
+ "DELETED": "DELETED",
+}
+netStatus2manoFormat = {
+ "ACTIVE": "ACTIVE",
+ "INACTIVE": "INACTIVE",
+ "BUILD": "BUILD",
+ "ERROR": "ERROR",
+ "DELETED": "DELETED",
+ "DOWN": "DOWN",
+}
host_schema = {
- "type":"object",
- "properties":{
+ "type": "object",
+ "properties": {
"id": id_schema,
"name": name_schema,
},
- "required": ["id"]
+ "required": ["id"],
}
image_schema = {
- "type":"object",
- "properties":{
+ "type": "object",
+ "properties": {
"id": id_schema,
"name": name_schema,
},
- "required": ["id","name"]
+ "required": ["id", "name"],
}
server_schema = {
- "type":"object",
- "properties":{
- "id":id_schema,
+ "type": "object",
+ "properties": {
+ "id": id_schema,
"name": name_schema,
},
- "required": ["id","name"]
+ "required": ["id", "name"],
}
new_host_response_schema = {
- "title":"host response information schema",
+ "title": "host response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
- "type":"object",
- "properties":{
- "host": host_schema
- },
+ "type": "object",
+ "properties": {"host": host_schema},
"required": ["host"],
- "additionalProperties": False
+ "additionalProperties": False,
}
get_images_response_schema = {
- "title":"openvim images response information schema",
+ "title": "openvim images response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
- "type":"object",
- "properties":{
- "images":{
- "type":"array",
+ "type": "object",
+ "properties": {
+ "images": {
+ "type": "array",
"items": image_schema,
}
},
"required": ["images"],
- "additionalProperties": False
+ "additionalProperties": False,
}
get_hosts_response_schema = {
- "title":"openvim hosts response information schema",
+ "title": "openvim hosts response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
- "type":"object",
- "properties":{
- "hosts":{
- "type":"array",
+ "type": "object",
+ "properties": {
+ "hosts": {
+ "type": "array",
"items": host_schema,
}
},
"required": ["hosts"],
- "additionalProperties": False
+ "additionalProperties": False,
}
-get_host_detail_response_schema = new_host_response_schema # TODO: Content is not parsed yet
+get_host_detail_response_schema = (
+ new_host_response_schema # TODO: Content is not parsed yet
+)
get_server_response_schema = {
- "title":"openvim server response information schema",
+ "title": "openvim server response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
- "type":"object",
- "properties":{
- "servers":{
- "type":"array",
+ "type": "object",
+ "properties": {
+ "servers": {
+ "type": "array",
"items": server_schema,
}
},
"required": ["servers"],
- "additionalProperties": False
+ "additionalProperties": False,
}
new_tenant_response_schema = {
- "title":"tenant response information schema",
+ "title": "tenant response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
- "type":"object",
- "properties":{
- "tenant":{
- "type":"object",
- "properties":{
+ "type": "object",
+ "properties": {
+ "tenant": {
+ "type": "object",
+ "properties": {
"id": id_schema,
"name": nameshort_schema,
- "description":description_schema,
- "enabled":{"type" : "boolean"}
+ "description": description_schema,
+ "enabled": {"type": "boolean"},
},
- "required": ["id"]
+ "required": ["id"],
}
},
"required": ["tenant"],
- "additionalProperties": False
+ "additionalProperties": False,
}
new_network_response_schema = {
- "title":"network response information schema",
+ "title": "network response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
- "type":"object",
- "properties":{
- "network":{
- "type":"object",
- "properties":{
- "id":id_schema,
- "name":name_schema,
- "type":{"type":"string", "enum":["bridge_man","bridge_data","data", "ptp"]},
- "shared":{"type":"boolean"},
- "tenant_id":id_schema,
- "admin_state_up":{"type":"boolean"},
- "vlan":vlan1000_schema
+ "type": "object",
+ "properties": {
+ "network": {
+ "type": "object",
+ "properties": {
+ "id": id_schema,
+ "name": name_schema,
+ "type": {
+ "type": "string",
+ "enum": ["bridge_man", "bridge_data", "data", "ptp"],
+ },
+ "shared": {"type": "boolean"},
+ "tenant_id": id_schema,
+ "admin_state_up": {"type": "boolean"},
+ "vlan": vlan1000_schema,
},
- "required": ["id"]
+ "required": ["id"],
}
},
"required": ["network"],
- "additionalProperties": False
+ "additionalProperties": False,
}
new_port_response_schema = {
- "title":"port response information schema",
+ "title": "port response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
- "type":"object",
- "properties":{
- "port":{
- "type":"object",
- "properties":{
- "id":id_schema,
+ "type": "object",
+ "properties": {
+ "port": {
+ "type": "object",
+ "properties": {
+ "id": id_schema,
},
- "required": ["id"]
+ "required": ["id"],
}
},
"required": ["port"],
- "additionalProperties": False
+ "additionalProperties": False,
}
get_flavor_response_schema = {
- "title":"openvim flavors response information schema",
+ "title": "openvim flavors response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
- "type":"object",
- "properties":{
- "flavor":{
- "type":"object",
- "properties":{
- "id": id_schema,
+ "type": "object",
+ "properties": {
+ "flavor": {
+ "type": "object",
+ "properties": {
+ "id": id_schema,
"name": name_schema,
- "extended": {"type":"object"},
+ "extended": {"type": "object"},
},
"required": ["id", "name"],
}
},
"required": ["flavor"],
- "additionalProperties": False
+ "additionalProperties": False,
}
new_flavor_response_schema = {
- "title":"flavor response information schema",
+ "title": "flavor response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
- "type":"object",
- "properties":{
- "flavor":{
- "type":"object",
- "properties":{
- "id":id_schema,
+ "type": "object",
+ "properties": {
+ "flavor": {
+ "type": "object",
+ "properties": {
+ "id": id_schema,
},
- "required": ["id"]
+ "required": ["id"],
}
},
"required": ["flavor"],
- "additionalProperties": False
+ "additionalProperties": False,
}
get_image_response_schema = {
- "title":"openvim images response information schema",
+ "title": "openvim images response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
- "type":"object",
- "properties":{
- "image":{
- "type":"object",
- "properties":{
- "id": id_schema,
+ "type": "object",
+ "properties": {
+ "image": {
+ "type": "object",
+ "properties": {
+ "id": id_schema,
"name": name_schema,
},
"required": ["id", "name"],
}
},
"required": ["flavor"],
- "additionalProperties": False
+ "additionalProperties": False,
}
new_image_response_schema = {
- "title":"image response information schema",
+ "title": "image response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
- "type":"object",
- "properties":{
- "image":{
- "type":"object",
- "properties":{
- "id":id_schema,
+ "type": "object",
+ "properties": {
+ "image": {
+ "type": "object",
+ "properties": {
+ "id": id_schema,
},
- "required": ["id"]
+ "required": ["id"],
}
},
"required": ["image"],
- "additionalProperties": False
+ "additionalProperties": False,
}
new_vminstance_response_schema = {
- "title":"server response information schema",
+ "title": "server response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
- "type":"object",
- "properties":{
- "server":{
- "type":"object",
- "properties":{
- "id":id_schema,
+ "type": "object",
+ "properties": {
+ "server": {
+ "type": "object",
+ "properties": {
+ "id": id_schema,
},
- "required": ["id"]
+ "required": ["id"],
}
},
"required": ["server"],
- "additionalProperties": False
+ "additionalProperties": False,
}
get_processor_rankings_response_schema = {
- "title":"processor rankings information schema",
+ "title": "processor rankings information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
- "type":"object",
- "properties":{
- "rankings":{
- "type":"array",
- "items":{
- "type":"object",
- "properties":{
- "model": description_schema,
- "value": integer0_schema
- },
+ "type": "object",
+ "properties": {
+ "rankings": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {"model": description_schema, "value": integer0_schema},
"additionalProperties": False,
- "required": ["model","value"]
- }
+ "required": ["model", "value"],
+ },
},
"additionalProperties": False,
- "required": ["rankings"]
- }
+ "required": ["rankings"],
+ },
}
class vimconnector(vimconn.VimConnector):
- def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
- log_level="DEBUG", config={}, persistent_info={}):
- vimconn.VimConnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, config)
+ def __init__(
+ self,
+ uuid,
+ name,
+ tenant_id,
+ tenant_name,
+ url,
+ url_admin=None,
+ user=None,
+ passwd=None,
+ log_level="DEBUG",
+ config={},
+ persistent_info={},
+ ):
+ vimconn.VimConnector.__init__(
+ self,
+ uuid,
+ name,
+ tenant_id,
+ tenant_name,
+ url,
+ url_admin,
+ user,
+ passwd,
+ log_level,
+ config,
+ )
self.tenant = None
- self.headers_req = {'content-type': 'application/json'}
- self.logger = logging.getLogger('ro.vim.openvim')
+ self.headers_req = {"content-type": "application/json"}
+ self.logger = logging.getLogger("ro.vim.openvim")
self.persistent_info = persistent_info
if tenant_id:
self.tenant = tenant_id
- def __setitem__(self,index, value):
- '''Set individuals parameters
+ def __setitem__(self, index, value):
+ """Set individuals parameters
Throw TypeError, KeyError
- '''
- if index=='tenant_id':
+ """
+ if index == "tenant_id":
self.tenant = value
- elif index=='tenant_name':
+ elif index == "tenant_name":
self.tenant = None
- vimconn.VimConnector.__setitem__(self,index, value)
+ vimconn.VimConnector.__setitem__(self, index, value)
def _get_my_tenant(self):
- '''Obtain uuid of my tenant from name
- '''
+ """Obtain uuid of my tenant from name"""
if self.tenant:
return self.tenant
- url = self.url+'/tenants?name='+ quote(self.tenant_name)
+ url = self.url + "/tenants?name=" + quote(self.tenant_name)
self.logger.info("Getting VIM tenant_id GET %s", url)
- vim_response = requests.get(url, headers = self.headers_req)
+ vim_response = requests.get(url, headers=self.headers_req)
self._check_http_request_response(vim_response)
try:
tenant_list = vim_response.json()["tenants"]
if len(tenant_list) == 0:
- raise vimconn.VimConnNotFoundException("No tenant found for name '{}'".format(self.tenant_name))
+ raise vimconn.VimConnNotFoundException(
+ "No tenant found for name '{}'".format(self.tenant_name)
+ )
elif len(tenant_list) > 1:
- raise vimconn.VimConnConflictException ("More that one tenant found for name '{}'".format(self.tenant_name))
+ raise vimconn.VimConnConflictException(
+ "More that one tenant found for name '{}'".format(self.tenant_name)
+ )
self.tenant = tenant_list[0]["id"]
return self.tenant
except Exception as e:
- raise vimconn.VimConnUnexpectedResponse("Get VIM tenant {} '{}'".format(type(e).__name__, str(e)))
+ raise vimconn.VimConnUnexpectedResponse(
+ "Get VIM tenant {} '{}'".format(type(e).__name__, str(e))
+ )
- def _format_jsonerror(self,http_response):
- #DEPRECATED, to delete in the future
+ def _format_jsonerror(self, http_response):
+ # DEPRECATED, to delete in the future
try:
data = http_response.json()
return data["error"]["description"]
- except:
+ except Exception:
return http_response.text
def _format_in(self, http_response, schema):
- #DEPRECATED, to delete in the future
+ # DEPRECATED, to delete in the future
try:
client_data = http_response.json()
js_v(client_data, schema)
- #print "Input data: ", str(client_data)
+ # print "Input data: ", str(client_data)
return True, client_data
except js_e.ValidationError as exc:
- print("validate_in error, jsonschema exception ", exc.message, "at", exc.path)
- return False, ("validate_in error, jsonschema exception ", exc.message, "at", exc.path)
-
+ print(
+ "validate_in error, jsonschema exception ", exc.message, "at", exc.path
+ )
+ return False, (
+ "validate_in error, jsonschema exception ",
+ exc.message,
+ "at",
+ exc.path,
+ )
+
def _remove_extra_items(self, data, schema):
- deleted=[]
+ deleted = []
if type(data) is tuple or type(data) is list:
for d in data:
- a= self._remove_extra_items(d, schema['items'])
- if a is not None: deleted.append(a)
+ a = self._remove_extra_items(d, schema["items"])
+ if a is not None:
+ deleted.append(a)
elif type(data) is dict:
to_delete = []
for k in data.keys():
- if 'properties' not in schema or k not in schema['properties'].keys():
+ if "properties" not in schema or k not in schema["properties"].keys():
to_delete.append(k)
deleted.append(k)
else:
- a = self._remove_extra_items(data[k], schema['properties'][k])
- if a is not None: deleted.append({k:a})
+ a = self._remove_extra_items(data[k], schema["properties"][k])
+ if a is not None:
+ deleted.append({k: a})
for k in to_delete:
del data[k]
- if len(deleted) == 0: return None
- elif len(deleted) == 1: return deleted[0]
- else: return deleted
-
+ if len(deleted) == 0:
+ return None
+ elif len(deleted) == 1:
+ return deleted[0]
+ else:
+ return deleted
+
def _format_request_exception(self, request_exception):
- '''Transform a request exception into a vimconn exception'''
+ """Transform a request exception into a vimconn exception"""
if isinstance(request_exception, js_e.ValidationError):
- raise vimconn.VimConnUnexpectedResponse("jsonschema exception '{}' at '{}'".format(request_exception.message, request_exception.path))
+ raise vimconn.VimConnUnexpectedResponse(
+ "jsonschema exception '{}' at '{}'".format(
+ request_exception.message, request_exception.path
+ )
+ )
elif isinstance(request_exception, requests.exceptions.HTTPError):
- raise vimconn.VimConnUnexpectedResponse(type(request_exception).__name__ + ": " + str(request_exception))
+ raise vimconn.VimConnUnexpectedResponse(
+ type(request_exception).__name__ + ": " + str(request_exception)
+ )
else:
- raise vimconn.VimConnConnectionException(type(request_exception).__name__ + ": " + str(request_exception))
+ raise vimconn.VimConnConnectionException(
+ type(request_exception).__name__ + ": " + str(request_exception)
+ )
def _check_http_request_response(self, request_response):
- '''Raise a vimconn exception if the response is not Ok'''
- if request_response.status_code >= 200 and request_response.status_code < 300:
+ """Raise a vimconn exception if the response is not Ok"""
+ if request_response.status_code >= 200 and request_response.status_code < 300:
return
if request_response.status_code == vimconn.HTTP_Unauthorized:
raise vimconn.VimConnAuthException(request_response.text)
raise vimconn.VimConnNotFoundException(request_response.text)
elif request_response.status_code == vimconn.HTTP_Conflict:
raise vimconn.VimConnConflictException(request_response.text)
- else:
- raise vimconn.VimConnUnexpectedResponse("VIM HTTP_response {}, {}".format(request_response.status_code, str(request_response.text)))
+ else:
+ raise vimconn.VimConnUnexpectedResponse(
+ "VIM HTTP_response {}, {}".format(
+ request_response.status_code, str(request_response.text)
+ )
+ )
- def new_tenant(self,tenant_name,tenant_description):
- '''Adds a new tenant to VIM with this name and description, returns the tenant identifier'''
- #print "VIMConnector: Adding a new tenant to VIM"
- payload_dict = {"tenant": {"name":tenant_name,"description": tenant_description, "enabled": True}}
+ def new_tenant(self, tenant_name, tenant_description):
+ """Adds a new tenant to VIM with this name and description, returns the tenant identifier"""
+ # print "VIMConnector: Adding a new tenant to VIM"
+ payload_dict = {
+ "tenant": {
+ "name": tenant_name,
+ "description": tenant_description,
+ "enabled": True,
+ }
+ }
payload_req = json.dumps(payload_dict)
try:
- url = self.url_admin+'/tenants'
+ url = self.url_admin + "/tenants"
self.logger.info("Adding a new tenant %s", url)
- vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+ vim_response = requests.post(
+ url, headers=self.headers_req, data=payload_req
+ )
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
- #print json.dumps(vim_response.json(), indent=4)
+ # print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
js_v(response, new_tenant_response_schema)
- #r = self._remove_extra_items(response, new_tenant_response_schema)
- #if r is not None:
+ # r = self._remove_extra_items(response, new_tenant_response_schema)
+ # if r is not None:
# self.logger.warn("Warning: remove extra items %s", str(r))
- tenant_id = response['tenant']['id']
+ tenant_id = response["tenant"]["id"]
return tenant_id
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
- def delete_tenant(self,tenant_id):
- '''Delete a tenant from VIM. Returns the old tenant identifier'''
+ def delete_tenant(self, tenant_id):
+ """Delete a tenant from VIM. Returns the old tenant identifier"""
try:
- url = self.url_admin+'/tenants/'+tenant_id
+ url = self.url_admin + "/tenants/" + tenant_id
self.logger.info("Delete a tenant DELETE %s", url)
- vim_response = requests.delete(url, headers = self.headers_req)
+ vim_response = requests.delete(url, headers=self.headers_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
- #print json.dumps(vim_response.json(), indent=4)
+ # print json.dumps(vim_response.json(), indent=4)
return tenant_id
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def get_tenant_list(self, filter_dict={}):
- '''Obtain tenants of VIM
+ """Obtain tenants of VIM
filter_dict can contain the following keys:
name: filter by tenant name
id: filter by tenant uuid/id
<other VIM specific>
Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
- '''
- filterquery=[]
- filterquery_text=''
- for k,v in filter_dict.items():
- filterquery.append(str(k)+'='+str(v))
- if len(filterquery)>0:
- filterquery_text='?'+ '&'.join(filterquery)
+ """
+ filterquery = []
+ filterquery_text = ""
+ for k, v in filter_dict.items():
+ filterquery.append(str(k) + "=" + str(v))
+ if len(filterquery) > 0:
+ filterquery_text = "?" + "&".join(filterquery)
try:
- url = self.url+'/tenants'+filterquery_text
+ url = self.url + "/tenants" + filterquery_text
self.logger.info("get_tenant_list GET %s", url)
- vim_response = requests.get(url, headers = self.headers_req)
+ vim_response = requests.get(url, headers=self.headers_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
- #print json.dumps(vim_response.json(), indent=4)
+ # print json.dumps(vim_response.json(), indent=4)
return vim_response.json()["tenants"]
except requests.exceptions.RequestException as e:
self._format_request_exception(e)
- def new_network(self,net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None): #, **vim_specific):
+ def new_network(
+ self,
+ net_name,
+ net_type,
+ ip_profile=None,
+ shared=False,
+ provider_network_profile=None,
+ ): # , **vim_specific):
"""Adds a tenant network to VIM
Params:
'net_name': name of the network
vlan = provider_network_profile.get("segmentation-id")
created_items = {}
self._get_my_tenant()
- if net_type=="bridge":
- net_type="bridge_data"
- payload_req = {"name": net_name, "type": net_type, "tenant_id": self.tenant, "shared": shared}
+ if net_type == "bridge":
+ net_type = "bridge_data"
+ payload_req = {
+ "name": net_name,
+ "type": net_type,
+ "tenant_id": self.tenant,
+ "shared": shared,
+ }
if vlan:
payload_req["provider:vlan"] = vlan
# payload_req.update(vim_specific)
- url = self.url+'/networks'
- self.logger.info("Adding a new network POST: %s DATA: %s", url, str(payload_req))
- vim_response = requests.post(url, headers = self.headers_req, data=json.dumps({"network": payload_req}) )
+ url = self.url + "/networks"
+ self.logger.info(
+ "Adding a new network POST: %s DATA: %s", url, str(payload_req)
+ )
+ vim_response = requests.post(
+ url, headers=self.headers_req, data=json.dumps({"network": payload_req})
+ )
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
- #print json.dumps(vim_response.json(), indent=4)
+ # print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
js_v(response, new_network_response_schema)
- #r = self._remove_extra_items(response, new_network_response_schema)
- #if r is not None:
+ # r = self._remove_extra_items(response, new_network_response_schema)
+ # if r is not None:
# self.logger.warn("Warning: remove extra items %s", str(r))
- network_id = response['network']['id']
+ network_id = response["network"]["id"]
return network_id, created_items
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
-
+
def get_network_list(self, filter_dict={}):
- '''Obtain tenant networks of VIM
+ """Obtain tenant networks of VIM
Filter_dict can be:
name: network name
id: network uuid
admin_state_up: boolean
status: 'ACTIVE'
Returns the network list of dictionaries
- '''
+ """
try:
- if 'tenant_id' not in filter_dict:
+ if "tenant_id" not in filter_dict:
filter_dict["tenant_id"] = self._get_my_tenant()
elif not filter_dict["tenant_id"]:
del filter_dict["tenant_id"]
- filterquery=[]
- filterquery_text=''
- for k,v in filter_dict.items():
- filterquery.append(str(k)+'='+str(v))
- if len(filterquery)>0:
- filterquery_text='?'+ '&'.join(filterquery)
- url = self.url+'/networks'+filterquery_text
+ filterquery = []
+ filterquery_text = ""
+ for k, v in filter_dict.items():
+ filterquery.append(str(k) + "=" + str(v))
+ if len(filterquery) > 0:
+ filterquery_text = "?" + "&".join(filterquery)
+ url = self.url + "/networks" + filterquery_text
self.logger.info("Getting network list GET %s", url)
- vim_response = requests.get(url, headers = self.headers_req)
+ vim_response = requests.get(url, headers=self.headers_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
- #print json.dumps(vim_response.json(), indent=4)
+ # print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
- return response['networks']
+ return response["networks"]
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def get_network(self, net_id):
- '''Obtain network details of network id'''
+ """Obtain network details of network id"""
try:
- url = self.url+'/networks/'+net_id
+ url = self.url + "/networks/" + net_id
self.logger.info("Getting network GET %s", url)
- vim_response = requests.get(url, headers = self.headers_req)
+ vim_response = requests.get(url, headers=self.headers_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
- #print json.dumps(vim_response.json(), indent=4)
+ # print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
- return response['network']
+ return response["network"]
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
-
+
def delete_network(self, net_id, created_items=None):
"""
Removes a tenant network from VIM and its associated elements
"""
try:
self._get_my_tenant()
- url = self.url+'/networks/'+net_id
+ url = self.url + "/networks/" + net_id
self.logger.info("Deleting VIM network DELETE %s", url)
vim_response = requests.delete(url, headers=self.headers_req)
self._check_http_request_response(vim_response)
- #self.logger.debug(vim_response.text)
- #print json.dumps(vim_response.json(), indent=4)
+ # self.logger.debug(vim_response.text)
+ # print json.dumps(vim_response.json(), indent=4)
return net_id
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def get_flavor(self, flavor_id):
- '''Obtain flavor details from the VIM'''
+ """Obtain flavor details from the VIM"""
try:
self._get_my_tenant()
- url = self.url+'/'+self.tenant+'/flavors/'+flavor_id
+ url = self.url + "/" + self.tenant + "/flavors/" + flavor_id
self.logger.info("Getting flavor GET %s", url)
- vim_response = requests.get(url, headers = self.headers_req)
+ vim_response = requests.get(url, headers=self.headers_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
- #print json.dumps(vim_response.json(), indent=4)
+ # print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
js_v(response, get_flavor_response_schema)
r = self._remove_extra_items(response, get_flavor_response_schema)
- if r is not None:
+ if r is not None:
self.logger.warn("Warning: remove extra items %s", str(r))
- return response['flavor']
+ return response["flavor"]
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
-
+
def new_flavor(self, flavor_data):
- '''Adds a tenant flavor to VIM'''
- '''Returns the flavor identifier'''
+ """Adds a tenant flavor to VIM"""
+ """Returns the flavor identifier"""
try:
new_flavor_dict = flavor_data.copy()
- for device in new_flavor_dict.get('extended', {}).get('devices', ()):
- if 'image name' in device:
- del device['image name']
- if 'name' in device:
- del device['name']
- numas = new_flavor_dict.get('extended', {}).get('numas')
+ for device in new_flavor_dict.get("extended", {}).get("devices", ()):
+ if "image name" in device:
+ del device["image name"]
+ if "name" in device:
+ del device["name"]
+ numas = new_flavor_dict.get("extended", {}).get("numas")
if numas:
numa = numas[0]
# translate memory, cpus to EPA
- if "cores" not in numa and "threads" not in numa and "paired-threads" not in numa:
+ if (
+ "cores" not in numa
+ and "threads" not in numa
+ and "paired-threads" not in numa
+ ):
numa["paired-threads"] = new_flavor_dict["vcpus"]
if "memory" not in numa:
numa["memory"] = int(math.ceil(new_flavor_dict["ram"] / 1024.0))
new_flavor_dict["name"] = flavor_data["name"][:64]
self._get_my_tenant()
- payload_req = json.dumps({'flavor': new_flavor_dict})
- url = self.url+'/'+self.tenant+'/flavors'
+ payload_req = json.dumps({"flavor": new_flavor_dict})
+ url = self.url + "/" + self.tenant + "/flavors"
self.logger.info("Adding a new VIM flavor POST %s", url)
- vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+ vim_response = requests.post(
+ url, headers=self.headers_req, data=payload_req
+ )
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
- #print json.dumps(vim_response.json(), indent=4)
+ # print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
js_v(response, new_flavor_response_schema)
r = self._remove_extra_items(response, new_flavor_response_schema)
- if r is not None:
+ if r is not None:
self.logger.warn("Warning: remove extra items %s", str(r))
- flavor_id = response['flavor']['id']
+ flavor_id = response["flavor"]["id"]
return flavor_id
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
- def delete_flavor(self,flavor_id):
- '''Deletes a tenant flavor from VIM'''
- '''Returns the old flavor_id'''
+ def delete_flavor(self, flavor_id):
+ """Deletes a tenant flavor from VIM"""
+ """Returns the old flavor_id"""
try:
self._get_my_tenant()
- url = self.url+'/'+self.tenant+'/flavors/'+flavor_id
+ url = self.url + "/" + self.tenant + "/flavors/" + flavor_id
self.logger.info("Deleting VIM flavor DELETE %s", url)
vim_response = requests.delete(url, headers=self.headers_req)
self._check_http_request_response(vim_response)
- #self.logger.debug(vim_response.text)
- #print json.dumps(vim_response.json(), indent=4)
+ # self.logger.debug(vim_response.text)
+ # print json.dumps(vim_response.json(), indent=4)
return flavor_id
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def get_image(self, image_id):
- '''Obtain image details from the VIM'''
+ """Obtain image details from the VIM"""
try:
self._get_my_tenant()
- url = self.url+'/'+self.tenant+'/images/'+image_id
+ url = self.url + "/" + self.tenant + "/images/" + image_id
self.logger.info("Getting image GET %s", url)
- vim_response = requests.get(url, headers = self.headers_req)
+ vim_response = requests.get(url, headers=self.headers_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
- #print json.dumps(vim_response.json(), indent=4)
+ # print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
js_v(response, get_image_response_schema)
r = self._remove_extra_items(response, get_image_response_schema)
- if r is not None:
+ if r is not None:
self.logger.warn("Warning: remove extra items %s", str(r))
- return response['image']
+ return response["image"]
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
- def new_image(self,image_dict):
- ''' Adds a tenant image to VIM, returns image_id'''
+ def new_image(self, image_dict):
+ """ Adds a tenant image to VIM, returns image_id"""
try:
self._get_my_tenant()
- new_image_dict={'name': image_dict['name'][:64]}
- if image_dict.get('description'):
- new_image_dict['description'] = image_dict['description']
- if image_dict.get('metadata'):
- new_image_dict['metadata'] = yaml.load(image_dict['metadata'], Loader=yaml.SafeLoader)
- if image_dict.get('location'):
- new_image_dict['path'] = image_dict['location']
- payload_req = json.dumps({"image":new_image_dict})
- url=self.url + '/' + self.tenant + '/images'
+ new_image_dict = {"name": image_dict["name"][:64]}
+ if image_dict.get("description"):
+ new_image_dict["description"] = image_dict["description"]
+ if image_dict.get("metadata"):
+ new_image_dict["metadata"] = yaml.load(
+ image_dict["metadata"], Loader=yaml.SafeLoader
+ )
+ if image_dict.get("location"):
+ new_image_dict["path"] = image_dict["location"]
+ payload_req = json.dumps({"image": new_image_dict})
+ url = self.url + "/" + self.tenant + "/images"
self.logger.info("Adding a new VIM image POST %s", url)
- vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+ vim_response = requests.post(
+ url, headers=self.headers_req, data=payload_req
+ )
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
- #print json.dumps(vim_response.json(), indent=4)
+ # print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
js_v(response, new_image_response_schema)
r = self._remove_extra_items(response, new_image_response_schema)
- if r is not None:
+ if r is not None:
self.logger.warn("Warning: remove extra items %s", str(r))
- image_id = response['image']['id']
+ image_id = response["image"]["id"]
return image_id
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
-
+
def delete_image(self, image_id):
- '''Deletes a tenant image from VIM'''
- '''Returns the deleted image_id'''
+ """Deletes a tenant image from VIM"""
+ """Returns the deleted image_id"""
try:
self._get_my_tenant()
- url = self.url + '/'+ self.tenant +'/images/'+image_id
+ url = self.url + "/" + self.tenant + "/images/" + image_id
self.logger.info("Deleting VIM image DELETE %s", url)
vim_response = requests.delete(url, headers=self.headers_req)
self._check_http_request_response(vim_response)
- #self.logger.debug(vim_response.text)
- #print json.dumps(vim_response.json(), indent=4)
+ # self.logger.debug(vim_response.text)
+ # print json.dumps(vim_response.json(), indent=4)
return image_id
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def get_image_id_from_path(self, path):
- '''Get the image id from image path in the VIM database. Returns the image_id'''
+ """Get the image id from image path in the VIM database. Returns the image_id"""
try:
self._get_my_tenant()
- url=self.url + '/' + self.tenant + '/images?path='+quote(path)
+ url = self.url + "/" + self.tenant + "/images?path=" + quote(path)
self.logger.info("Getting images GET %s", url)
vim_response = requests.get(url)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
- #print json.dumps(vim_response.json(), indent=4)
+ # print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
js_v(response, get_images_response_schema)
- #r = self._remove_extra_items(response, get_images_response_schema)
- #if r is not None:
+ # r = self._remove_extra_items(response, get_images_response_schema)
+ # if r is not None:
# self.logger.warn("Warning: remove extra items %s", str(r))
- if len(response['images'])==0:
- raise vimconn.VimConnNotFoundException("Image not found at VIM with path '{}'".format(path))
- elif len(response['images'])>1:
- raise vimconn.VimConnConflictException("More than one image found at VIM with path '{}'".format(path))
- return response['images'][0]['id']
+ if len(response["images"]) == 0:
+ raise vimconn.VimConnNotFoundException(
+ "Image not found at VIM with path '{}'".format(path)
+ )
+ elif len(response["images"]) > 1:
+ raise vimconn.VimConnConflictException(
+ "More than one image found at VIM with path '{}'".format(path)
+ )
+ return response["images"][0]["id"]
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def get_image_list(self, filter_dict={}):
- '''Obtain tenant images from VIM
+ """Obtain tenant images from VIM
Filter_dict can be:
name: image name
id: image uuid
Returns the image list of dictionaries:
[{<the fields at Filter_dict plus some VIM specific>}, ...]
List can be empty
- '''
+ """
try:
self._get_my_tenant()
- filterquery=[]
- filterquery_text=''
- for k,v in filter_dict.items():
- filterquery.append(str(k)+'='+str(v))
- if len(filterquery)>0:
- filterquery_text='?'+ '&'.join(filterquery)
- url = self.url+'/'+self.tenant+'/images'+filterquery_text
+ filterquery = []
+ filterquery_text = ""
+ for k, v in filter_dict.items():
+ filterquery.append(str(k) + "=" + str(v))
+ if len(filterquery) > 0:
+ filterquery_text = "?" + "&".join(filterquery)
+ url = self.url + "/" + self.tenant + "/images" + filterquery_text
self.logger.info("Getting image list GET %s", url)
- vim_response = requests.get(url, headers = self.headers_req)
+ vim_response = requests.get(url, headers=self.headers_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
- #print json.dumps(vim_response.json(), indent=4)
+ # print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
- return response['images']
+ return response["images"]
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def new_vminstancefromJSON(self, vm_data):
- '''Adds a VM instance to VIM'''
- '''Returns the instance identifier'''
+ """Adds a VM instance to VIM"""
+ """Returns the instance identifier"""
try:
self._get_my_tenant()
except Exception as e:
print("VIMConnector: Adding a new VM instance from JSON to VIM")
payload_req = vm_data
try:
- vim_response = requests.post(self.url+'/'+self.tenant+'/servers', headers = self.headers_req, data=payload_req)
+ vim_response = requests.post(
+ self.url + "/" + self.tenant + "/servers",
+ headers=self.headers_req,
+ data=payload_req,
+ )
except requests.exceptions.RequestException as e:
- print( "new_vminstancefromJSON Exception: ", e.args)
+ print("new_vminstancefromJSON Exception: ", e.args)
return -vimconn.HTTP_Not_Found, str(e.args[0])
# print vim_response
- #print vim_response.status_code
+ # print vim_response.status_code
if vim_response.status_code == 200:
- #print vim_response.json()
- #print json.dumps(vim_response.json(), indent=4)
- res,http_content = self._format_in(vim_response, new_image_response_schema)
- #print http_content
+ # print vim_response.json()
+ # print json.dumps(vim_response.json(), indent=4)
+ res, http_content = self._format_in(vim_response, new_image_response_schema)
+ # print http_content
if res:
r = self._remove_extra_items(http_content, new_image_response_schema)
- if r is not None: print("Warning: remove extra items ", r)
- #print http_content
- vminstance_id = http_content['server']['id']
- print( "Tenant image id: ",vminstance_id)
- return vim_response.status_code,vminstance_id
- else: return -vimconn.HTTP_Bad_Request,http_content
+ if r is not None:
+ print("Warning: remove extra items ", r)
+ # print http_content
+ vminstance_id = http_content["server"]["id"]
+ print("Tenant image id: ", vminstance_id)
+ return vim_response.status_code, vminstance_id
+ else:
+ return -vimconn.HTTP_Bad_Request, http_content
else:
- #print vim_response.text
+ # print vim_response.text
jsonerror = self._format_jsonerror(vim_response)
text = 'Error in VIM "{}": not possible to add new vm instance. HTTP Response: {}. Error: {}'.format(
- self.url, vim_response.status_code, jsonerror)
- #print text
- return -vim_response.status_code,text
+ self.url, vim_response.status_code, jsonerror
+ )
+ # print text
+ return -vim_response.status_code, text
- def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
- availability_zone_index=None, availability_zone_list=None):
+ def new_vminstance(
+ self,
+ name,
+ description,
+ start,
+ image_id,
+ flavor_id,
+ net_list,
+ cloud_config=None,
+ disk_list=None,
+ availability_zone_index=None,
+ availability_zone_list=None,
+ ):
"""Adds a VM instance to VIM
Params:
start: indicates if VM must start or boot in pause mode. Ignored
net_id: network uuid to connect
vpci: virtual vcpi to assign
model: interface model, virtio, e1000, ...
- mac_address:
+ mac_address:
use: 'data', 'bridge', 'mgmt'
type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
vim_id: filled/added by this function
Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
as not present.
"""
- self.logger.debug("new_vminstance input: image='%s' flavor='%s' nics='%s'", image_id, flavor_id, str(net_list))
+ self.logger.debug(
+ "new_vminstance input: image='%s' flavor='%s' nics='%s'",
+ image_id,
+ flavor_id,
+ str(net_list),
+ )
try:
self._get_my_tenant()
-# net_list = []
-# for k,v in net_dict.items():
-# print k,v
-# net_list.append('{"name":"' + k + '", "uuid":"' + v + '"}')
-# net_list_string = ', '.join(net_list)
- virtio_net_list=[]
+ # net_list = []
+ # for k,v in net_dict.items():
+ # print k,v
+ # net_list.append('{"name":"' + k + '", "uuid":"' + v + '"}')
+ # net_list_string = ', '.join(net_list)
+ virtio_net_list = []
for net in net_list:
if not net.get("net_id"):
continue
- net_dict = {'uuid': net["net_id"]}
+ net_dict = {"uuid": net["net_id"]}
if net.get("type"):
if net["type"] == "SR-IOV":
net_dict["type"] = "VF"
if net.get("ip_address"):
net_dict["ip_address"] = net["ip_address"]
virtio_net_list.append(net_dict)
- payload_dict={ "name": name[:64],
- "description": description,
- "imageRef": image_id,
- "flavorRef": flavor_id,
- "networks": virtio_net_list
- }
- if start != None:
+ payload_dict = {
+ "name": name[:64],
+ "description": description,
+ "imageRef": image_id,
+ "flavorRef": flavor_id,
+ "networks": virtio_net_list,
+ }
+ if start is not None:
payload_dict["start"] = start
payload_req = json.dumps({"server": payload_dict})
- url = self.url+'/'+self.tenant+'/servers'
+ url = self.url + "/" + self.tenant + "/servers"
self.logger.info("Adding a new vm POST %s DATA %s", url, payload_req)
- vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+ vim_response = requests.post(
+ url, headers=self.headers_req, data=payload_req
+ )
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
- #print json.dumps(vim_response.json(), indent=4)
+ # print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
js_v(response, new_vminstance_response_schema)
- #r = self._remove_extra_items(response, new_vminstance_response_schema)
- #if r is not None:
+ # r = self._remove_extra_items(response, new_vminstance_response_schema)
+ # if r is not None:
# self.logger.warn("Warning: remove extra items %s", str(r))
- vminstance_id = response['server']['id']
+ vminstance_id = response["server"]["id"]
- #connect data plane interfaces to network
+ # connect data plane interfaces to network
for net in net_list:
- if net["type"]=="virtual":
+ if net["type"] == "virtual":
if not net.get("net_id"):
continue
- for iface in response['server']['networks']:
+ for iface in response["server"]["networks"]:
if "name" in net:
- if net["name"]==iface["name"]:
- net["vim_id"] = iface['iface_id']
+ if net["name"] == iface["name"]:
+ net["vim_id"] = iface["iface_id"]
break
elif "net_id" in net:
- if net["net_id"]==iface["net_id"]:
- net["vim_id"] = iface['iface_id']
+ if net["net_id"] == iface["net_id"]:
+ net["vim_id"] = iface["iface_id"]
break
- else: #dataplane
- for numa in response['server'].get('extended',{}).get('numas',() ):
- for iface in numa.get('interfaces',() ):
- if net['name'] == iface['name']:
- net['vim_id'] = iface['iface_id']
- #Code bellow is not needed, current openvim connect dataplane interfaces
- #if net.get("net_id"):
- ##connect dataplane interface
+ else: # dataplane
+ for numa in response["server"].get("extended", {}).get("numas", ()):
+ for iface in numa.get("interfaces", ()):
+ if net["name"] == iface["name"]:
+ net["vim_id"] = iface["iface_id"]
+ # Code bellow is not needed, current openvim connect dataplane interfaces
+ # if net.get("net_id"):
+ # connect dataplane interface
# result, port_id = self.connect_port_network(iface['iface_id'], net["net_id"])
# if result < 0:
- # error_text = "Error attaching port %s to network %s: %s." % (iface['iface_id'], net["net_id"], port_id)
+ # error_text = "Error attaching port %s to network %s: %s." % (iface['iface_id']
+ # , net["net_id"], port_id)
# print "new_vminstance: " + error_text
# self.delete_vminstance(vminstance_id)
# return result, error_text
break
-
+
return vminstance_id, None
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
-
+
def get_vminstance(self, vm_id):
- '''Returns the VM instance information from VIM'''
+ """Returns the VM instance information from VIM"""
try:
self._get_my_tenant()
- url = self.url+'/'+self.tenant+'/servers/'+vm_id
+ url = self.url + "/" + self.tenant + "/servers/" + vm_id
self.logger.info("Getting vm GET %s", url)
- vim_response = requests.get(url, headers = self.headers_req)
- vim_response = requests.get(url, headers = self.headers_req)
+ vim_response = requests.get(url, headers=self.headers_req)
+ vim_response = requests.get(url, headers=self.headers_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
- #print json.dumps(vim_response.json(), indent=4)
+ # print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
js_v(response, new_vminstance_response_schema)
- #r = self._remove_extra_items(response, new_vminstance_response_schema)
- #if r is not None:
+ # r = self._remove_extra_items(response, new_vminstance_response_schema)
+ # if r is not None:
# self.logger.warn("Warning: remove extra items %s", str(r))
- return response['server']
+ return response["server"]
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
-
+
def delete_vminstance(self, vm_id, created_items=None):
- '''Removes a VM instance from VIM, returns the deleted vm_id'''
+ """Removes a VM instance from VIM, returns the deleted vm_id"""
try:
self._get_my_tenant()
- url = self.url+'/'+self.tenant+'/servers/'+vm_id
+ url = self.url + "/" + self.tenant + "/servers/" + vm_id
self.logger.info("Deleting VIM vm DELETE %s", url)
vim_response = requests.delete(url, headers=self.headers_req)
self._check_http_request_response(vim_response)
- #self.logger.debug(vim_response.text)
- #print json.dumps(vim_response.json(), indent=4)
+ # self.logger.debug(vim_response.text)
+ # print json.dumps(vim_response.json(), indent=4)
return vm_id
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def refresh_vms_status(self, vm_list):
- '''Refreshes the status of the virtual machines'''
+ """Refreshes the status of the virtual machines"""
try:
self._get_my_tenant()
except requests.exceptions.RequestException as e:
self._format_request_exception(e)
- vm_dict={}
+ vm_dict = {}
for vm_id in vm_list:
- vm={}
- #print "VIMConnector refresh_tenant_vms and nets: Getting tenant VM instance information from VIM"
+ vm = {}
+ # print "VIMConnector refresh_tenant_vms and nets: Getting tenant VM instance information from VIM"
try:
- url = self.url + '/' + self.tenant + '/servers/' + vm_id
+ url = self.url + "/" + self.tenant + "/servers/" + vm_id
self.logger.info("Getting vm GET %s", url)
- vim_response = requests.get(url, headers = self.headers_req)
+ vim_response = requests.get(url, headers=self.headers_req)
self._check_http_request_response(vim_response)
response = vim_response.json()
js_v(response, new_vminstance_response_schema)
- if response['server']['status'] in vmStatus2manoFormat:
- vm['status'] = vmStatus2manoFormat[ response['server']['status'] ]
+ if response["server"]["status"] in vmStatus2manoFormat:
+ vm["status"] = vmStatus2manoFormat[response["server"]["status"]]
else:
- vm['status'] = "OTHER"
- vm['error_msg'] = "VIM status reported " + response['server']['status']
- if response['server'].get('last_error'):
- vm['error_msg'] = response['server']['last_error']
- vm["vim_info"] = yaml.safe_dump(response['server'])
- #get interfaces info
+ vm["status"] = "OTHER"
+ vm["error_msg"] = (
+ "VIM status reported " + response["server"]["status"]
+ )
+ if response["server"].get("last_error"):
+ vm["error_msg"] = response["server"]["last_error"]
+ vm["vim_info"] = yaml.safe_dump(response["server"])
+ # get interfaces info
try:
management_ip = False
- url2 = self.url + '/ports?device_id=' + quote(vm_id)
+ url2 = self.url + "/ports?device_id=" + quote(vm_id)
self.logger.info("Getting PORTS GET %s", url2)
- vim_response2 = requests.get(url2, headers = self.headers_req)
+ vim_response2 = requests.get(url2, headers=self.headers_req)
self._check_http_request_response(vim_response2)
client_data = vim_response2.json()
if isinstance(client_data.get("ports"), list):
- vm["interfaces"]=[]
+ vm["interfaces"] = []
for port in client_data.get("ports"):
- interface={}
- interface['vim_info'] = yaml.safe_dump(port)
+ interface = {}
+ interface["vim_info"] = yaml.safe_dump(port)
interface["mac_address"] = port.get("mac_address")
interface["vim_net_id"] = port.get("network_id")
interface["vim_interface_id"] = port["id"]
if interface["ip_address"] == "0.0.0.0":
interface["ip_address"] = None
vm["interfaces"].append(interface)
-
+
except Exception as e:
- self.logger.error("refresh_vms_and_nets. Port get %s: %s", type(e).__name__, str(e))
+ self.logger.error(
+ "refresh_vms_and_nets. Port get %s: %s",
+ type(e).__name__,
+ str(e),
+ )
+
+ if vm["status"] == "ACTIVE" and not management_ip:
+ vm["status"] = "ACTIVE:NoMgmtIP"
- if vm['status'] == "ACTIVE" and not management_ip:
- vm['status'] = "ACTIVE:NoMgmtIP"
-
except vimconn.VimConnNotFoundException as e:
self.logger.error("Exception getting vm status: %s", str(e))
- vm['status'] = "DELETED"
- vm['error_msg'] = str(e)
- except (requests.exceptions.RequestException, js_e.ValidationError, vimconn.VimConnException) as e:
+ vm["status"] = "DELETED"
+ vm["error_msg"] = str(e)
+ except (
+ requests.exceptions.RequestException,
+ js_e.ValidationError,
+ vimconn.VimConnException,
+ ) as e:
self.logger.error("Exception getting vm status: %s", str(e))
- vm['status'] = "VIM_ERROR"
- vm['error_msg'] = str(e)
+ vm["status"] = "VIM_ERROR"
+ vm["error_msg"] = str(e)
vm_dict[vm_id] = vm
return vm_dict
def refresh_nets_status(self, net_list):
- '''Get the status of the networks
- Params: the list of network identifiers
- Returns a dictionary with:
- net_id: #VIM id of this network
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE, INACTIVE, DOWN (admin down),
- # BUILD (on building process)
- #
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
-
- '''
+ """Get the status of the networks
+ Params: the list of network identifiers
+ Returns a dictionary with:
+ net_id: #VIM id of this network
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE, INACTIVE, DOWN (admin down),
+ # BUILD (on building process)
+ #
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+
+ """
try:
self._get_my_tenant()
except requests.exceptions.RequestException as e:
self._format_request_exception(e)
-
- net_dict={}
+
+ net_dict = {}
for net_id in net_list:
net = {}
- #print "VIMConnector refresh_tenant_vms_and_nets: Getting tenant network from VIM (tenant: " + str(self.tenant) + "): "
+ # print "VIMConnector refresh_tenant_vms_and_nets:
+ # Getting tenant network from VIM (tenant: " + str(self.tenant) + "): "
try:
net_vim = self.get_network(net_id)
- if net_vim['status'] in netStatus2manoFormat:
- net["status"] = netStatus2manoFormat[ net_vim['status'] ]
+ if net_vim["status"] in netStatus2manoFormat:
+ net["status"] = netStatus2manoFormat[net_vim["status"]]
else:
net["status"] = "OTHER"
- net["error_msg"] = "VIM status reported " + net_vim['status']
-
- if net["status"] == "ACTIVE" and not net_vim['admin_state_up']:
+ net["error_msg"] = "VIM status reported " + net_vim["status"]
+
+ if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
net["status"] = "DOWN"
- if net_vim.get('last_error'):
- net['error_msg'] = net_vim['last_error']
+ if net_vim.get("last_error"):
+ net["error_msg"] = net_vim["last_error"]
net["vim_info"] = yaml.safe_dump(net_vim)
except vimconn.VimConnNotFoundException as e:
self.logger.error("Exception getting net status: %s", str(e))
- net['status'] = "DELETED"
- net['error_msg'] = str(e)
- except (requests.exceptions.RequestException, js_e.ValidationError, vimconn.VimConnException) as e:
+ net["status"] = "DELETED"
+ net["error_msg"] = str(e)
+ except (
+ requests.exceptions.RequestException,
+ js_e.ValidationError,
+ vimconn.VimConnException,
+ ) as e:
self.logger.error("Exception getting net status: %s", str(e))
- net['status'] = "VIM_ERROR"
- net['error_msg'] = str(e)
+ net["status"] = "VIM_ERROR"
+ net["error_msg"] = str(e)
net_dict[net_id] = net
return net_dict
-
+
def action_vminstance(self, vm_id, action_dict, created_items={}):
- '''Send and action over a VM instance from VIM'''
- '''Returns the status'''
+ """Send and action over a VM instance from VIM"""
+ """Returns the status"""
try:
self._get_my_tenant()
if "console" in action_dict:
- raise vimconn.VimConnException("getting console is not available at openvim", http_code=vimconn.HTTP_Service_Unavailable)
- url = self.url+'/'+self.tenant+'/servers/'+vm_id+"/action"
+ raise vimconn.VimConnException(
+ "getting console is not available at openvim",
+ http_code=vimconn.HTTP_Service_Unavailable,
+ )
+ url = self.url + "/" + self.tenant + "/servers/" + vm_id + "/action"
self.logger.info("Action over VM instance POST %s", url)
- vim_response = requests.post(url, headers = self.headers_req, data=json.dumps(action_dict) )
+ vim_response = requests.post(
+ url, headers=self.headers_req, data=json.dumps(action_dict)
+ )
self._check_http_request_response(vim_response)
return None
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
-#NOT USED METHODS in current version
-
+ # NOT USED METHODS in current version
+
def host_vim2gui(self, host, server_dict):
- '''Transform host dictionary from VIM format to GUI format,
+ """Transform host dictionary from VIM format to GUI format,
and append to the server_dict
- '''
- if type(server_dict) is not dict:
- print( 'vimconnector.host_vim2gui() ERROR, param server_dict must be a dictionary')
+ """
+ if type(server_dict) is not dict:
+ print(
+ "vimconnector.host_vim2gui() ERROR, param server_dict must be a dictionary"
+ )
return
- RAD={}
- occupation={}
- for numa in host['host']['numas']:
- RAD_item={}
- occupation_item={}
- #memory
- RAD_item['memory']={'size': str(numa['memory'])+'GB', 'eligible': str(numa['hugepages'])+'GB'}
- occupation_item['memory']= str(numa['hugepages_consumed'])+'GB'
- #cpus
- RAD_item['cpus']={}
- RAD_item['cpus']['cores'] = []
- RAD_item['cpus']['eligible_cores'] = []
- occupation_item['cores']=[]
- for _ in range(0, len(numa['cores']) // 2):
- RAD_item['cpus']['cores'].append( [] )
- for core in numa['cores']:
- RAD_item['cpus']['cores'][core['core_id']].append(core['thread_id'])
- if not 'status' in core: RAD_item['cpus']['eligible_cores'].append(core['thread_id'])
- if 'instance_id' in core: occupation_item['cores'].append(core['thread_id'])
- #ports
- RAD_item['ports']={}
- occupation_item['ports']={}
- for iface in numa['interfaces']:
- RAD_item['ports'][ iface['pci'] ] = 'speed:'+str(iface['Mbps'])+'M'
- occupation_item['ports'][ iface['pci'] ] = { 'occupied': str(100*iface['Mbps_consumed'] // iface['Mbps']) + "%" }
-
- RAD[ numa['numa_socket'] ] = RAD_item
- occupation[ numa['numa_socket'] ] = occupation_item
- server_dict[ host['host']['name'] ] = {'RAD':RAD, 'occupation':occupation}
+ RAD = {}
+ occupation = {}
+ for numa in host["host"]["numas"]:
+ RAD_item = {}
+ occupation_item = {}
+ # memory
+ RAD_item["memory"] = {
+ "size": str(numa["memory"]) + "GB",
+ "eligible": str(numa["hugepages"]) + "GB",
+ }
+ occupation_item["memory"] = str(numa["hugepages_consumed"]) + "GB"
+ # cpus
+ RAD_item["cpus"] = {}
+ RAD_item["cpus"]["cores"] = []
+ RAD_item["cpus"]["eligible_cores"] = []
+ occupation_item["cores"] = []
+ for _ in range(0, len(numa["cores"]) // 2):
+ RAD_item["cpus"]["cores"].append([])
+ for core in numa["cores"]:
+ RAD_item["cpus"]["cores"][core["core_id"]].append(core["thread_id"])
+ if "status" not in core:
+ RAD_item["cpus"]["eligible_cores"].append(core["thread_id"])
+ if "instance_id" in core:
+ occupation_item["cores"].append(core["thread_id"])
+ # ports
+ RAD_item["ports"] = {}
+ occupation_item["ports"] = {}
+ for iface in numa["interfaces"]:
+ RAD_item["ports"][iface["pci"]] = "speed:" + str(iface["Mbps"]) + "M"
+ occupation_item["ports"][iface["pci"]] = {
+ "occupied": str(100 * iface["Mbps_consumed"] // iface["Mbps"]) + "%"
+ }
+
+ RAD[numa["numa_socket"]] = RAD_item
+ occupation[numa["numa_socket"]] = occupation_item
+ server_dict[host["host"]["name"]] = {"RAD": RAD, "occupation": occupation}
def get_hosts_info(self):
- '''Get the information of deployed hosts
- Returns the hosts content'''
- #obtain hosts list
- url=self.url+'/hosts'
+ """Get the information of deployed hosts
+ Returns the hosts content"""
+ # obtain hosts list
+ url = self.url + "/hosts"
try:
vim_response = requests.get(url)
except requests.exceptions.RequestException as e:
- print( "get_hosts_info Exception: ", e.args)
+ print("get_hosts_info Exception: ", e.args)
return -vimconn.HTTP_Not_Found, str(e.args[0])
- print("vim get", url, "response:", vim_response.status_code, vim_response.json())
- #print vim_response.status_code
- #print json.dumps(vim_response.json(), indent=4)
+ print(
+ "vim get", url, "response:", vim_response.status_code, vim_response.json()
+ )
+ # print vim_response.status_code
+ # print json.dumps(vim_response.json(), indent=4)
if vim_response.status_code != 200:
# TODO: get error
- print('vimconnector.get_hosts_info error getting host list {} {}'.format(vim_response.status_code, vim_response.json()))
+ print(
+ "vimconnector.get_hosts_info error getting host list {} {}".format(
+ vim_response.status_code, vim_response.json()
+ )
+ )
return -vim_response.status_code, "Error getting host list"
-
- res,hosts = self._format_in(vim_response, get_hosts_response_schema)
-
- if res==False:
- print("vimconnector.get_hosts_info error parsing GET HOSTS vim response", hosts)
+
+ res, hosts = self._format_in(vim_response, get_hosts_response_schema)
+
+ if not res:
+ print(
+ "vimconnector.get_hosts_info error parsing GET HOSTS vim response",
+ hosts,
+ )
return vimconn.HTTP_Internal_Server_Error, hosts
- #obtain hosts details
- hosts_dict={}
- for host in hosts['hosts']:
- url=self.url+'/hosts/'+host['id']
+ # obtain hosts details
+ hosts_dict = {}
+ for host in hosts["hosts"]:
+ url = self.url + "/hosts/" + host["id"]
try:
vim_response = requests.get(url)
except requests.exceptions.RequestException as e:
- print( "get_hosts_info Exception: ", e.args)
+ print("get_hosts_info Exception: ", e.args)
return -vimconn.HTTP_Not_Found, str(e.args[0])
- print("vim get", url, "response:", vim_response.status_code, vim_response.json())
+ print(
+ "vim get",
+ url,
+ "response:",
+ vim_response.status_code,
+ vim_response.json(),
+ )
if vim_response.status_code != 200:
- print('vimconnector.get_hosts_info error getting detailed host {} {}'.format(vim_response.status_code, vim_response.json()))
+ print(
+ "vimconnector.get_hosts_info error getting detailed host {} {}".format(
+ vim_response.status_code, vim_response.json()
+ )
+ )
continue
- res,host_detail = self._format_in(vim_response, get_host_detail_response_schema)
- if res==False:
- print ("vimconnector.get_hosts_info error parsing GET HOSTS/{} vim response {}".format(host['id']), host_detail)
+ res, host_detail = self._format_in(
+ vim_response, get_host_detail_response_schema
+ )
+ if not res:
+ print(
+ "vimconnector.get_hosts_info error parsing GET HOSTS/{} vim response {}".format(
+ host["id"], host_detail
+ ),
+ )
continue
- #print 'host id '+host['id'], json.dumps(host_detail, indent=4)
+ # print 'host id '+host['id'], json.dumps(host_detail, indent=4)
self.host_vim2gui(host_detail, hosts_dict)
return 200, hosts_dict
def get_hosts(self, vim_tenant):
- '''Get the hosts and deployed instances
- Returns the hosts content'''
- #obtain hosts list
- url=self.url+'/hosts'
+ """Get the hosts and deployed instances
+ Returns the hosts content"""
+ # obtain hosts list
+ url = self.url + "/hosts"
try:
vim_response = requests.get(url)
except requests.exceptions.RequestException as e:
print("get_hosts Exception: ", e.args)
return -vimconn.HTTP_Not_Found, str(e.args[0])
- print("vim get", url, "response:", vim_response.status_code, vim_response.json())
- #print vim_response.status_code
- #print json.dumps(vim_response.json(), indent=4)
+ print(
+ "vim get", url, "response:", vim_response.status_code, vim_response.json()
+ )
+ # print vim_response.status_code
+ # print json.dumps(vim_response.json(), indent=4)
if vim_response.status_code != 200:
- #TODO: get error
- print('vimconnector.get_hosts error getting host list {} {}'.format(vim_response.status_code, vim_response.json()))
+ # TODO: get error
+ print(
+ "vimconnector.get_hosts error getting host list {} {}".format(
+ vim_response.status_code, vim_response.json()
+ )
+ )
return -vim_response.status_code, "Error getting host list"
-
- res,hosts = self._format_in(vim_response, get_hosts_response_schema)
-
- if res==False:
+
+ res, hosts = self._format_in(vim_response, get_hosts_response_schema)
+
+ if not res:
print("vimconnector.get_host error parsing GET HOSTS vim response", hosts)
return vimconn.HTTP_Internal_Server_Error, hosts
- #obtain instances from hosts
- for host in hosts['hosts']:
- url=self.url+'/' + vim_tenant + '/servers?hostId='+host['id']
+ # obtain instances from hosts
+ for host in hosts["hosts"]:
+ url = self.url + "/" + vim_tenant + "/servers?hostId=" + host["id"]
try:
vim_response = requests.get(url)
except requests.exceptions.RequestException as e:
print("get_hosts Exception: ", e.args)
return -vimconn.HTTP_Not_Found, str(e.args[0])
- print("vim get", url, "response:", vim_response.status_code, vim_response.json())
+ print(
+ "vim get",
+ url,
+ "response:",
+ vim_response.status_code,
+ vim_response.json(),
+ )
if vim_response.status_code != 200:
- print('vimconnector.get_hosts error getting instances at host {} {}'.format(vim_response.status_code, vim_response.json()))
+ print(
+ "vimconnector.get_hosts error getting instances at host {} {}".format(
+ vim_response.status_code, vim_response.json()
+ )
+ )
continue
- res,servers = self._format_in(vim_response, get_server_response_schema)
- if res==False:
- print("vimconnector.get_host error parsing GET SERVERS/{} vim response {}".format(host['id']), servers)
+ res, servers = self._format_in(vim_response, get_server_response_schema)
+ if not res:
+ print(
+ "vimconnector.get_host error parsing GET SERVERS/{} vim response {}".format(
+ host["id"], servers
+ ),
+ )
continue
- #print 'host id '+host['id'], json.dumps(host_detail, indent=4)
- host['instances'] = servers['servers']
- return 200, hosts['hosts']
+ # print 'host id '+host['id'], json.dumps(host_detail, indent=4)
+ host["instances"] = servers["servers"]
+ return 200, hosts["hosts"]
def get_processor_rankings(self):
- '''Get the processor rankings in the VIM database'''
- url=self.url+'/processor_ranking'
+ """Get the processor rankings in the VIM database"""
+ url = self.url + "/processor_ranking"
try:
vim_response = requests.get(url)
except requests.exceptions.RequestException as e:
print("get_processor_rankings Exception: ", e.args)
return -vimconn.HTTP_Not_Found, str(e.args[0])
- print("vim get", url, "response:", vim_response.status_code, vim_response.json())
- #print vim_response.status_code
- #print json.dumps(vim_response.json(), indent=4)
+ print(
+ "vim get", url, "response:", vim_response.status_code, vim_response.json()
+ )
+ # print vim_response.status_code
+ # print json.dumps(vim_response.json(), indent=4)
if vim_response.status_code != 200:
- #TODO: get error
- print('vimconnector.get_processor_rankings error getting processor rankings {} {}'.format(vim_response.status_code, vim_response.json()))
+ # TODO: get error
+ print(
+ "vimconnector.get_processor_rankings error getting processor rankings {} {}".format(
+ vim_response.status_code, vim_response.json()
+ )
+ )
return -vim_response.status_code, "Error getting processor rankings"
-
- res,rankings = self._format_in(vim_response, get_processor_rankings_response_schema)
- return res, rankings['rankings']
-
+
+ res, rankings = self._format_in(
+ vim_response, get_processor_rankings_response_schema
+ )
+ return res, rankings["rankings"]
+
def new_host(self, host_data):
- '''Adds a new host to VIM'''
- '''Returns status code of the VIM response'''
+ """Adds a new host to VIM"""
+ """Returns status code of the VIM response"""
payload_req = host_data
try:
- url = self.url_admin+'/hosts'
+ url = self.url_admin + "/hosts"
self.logger.info("Adding a new host POST %s", url)
- vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+ vim_response = requests.post(
+ url, headers=self.headers_req, data=payload_req
+ )
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
- #print json.dumps(vim_response.json(), indent=4)
+ # print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
js_v(response, new_host_response_schema)
r = self._remove_extra_items(response, new_host_response_schema)
- if r is not None:
+ if r is not None:
self.logger.warn("Warning: remove extra items %s", str(r))
- host_id = response['host']['id']
+ host_id = response["host"]["id"]
return host_id
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
-
+
def new_external_port(self, port_data):
- '''Adds a external port to VIM'''
- '''Returns the port identifier'''
- #TODO change to logging exception code policies
- print( "VIMConnector: Adding a new external port")
+ """Adds a external port to VIM"""
+ """Returns the port identifier"""
+ # TODO change to logging exception code policies
+ print("VIMConnector: Adding a new external port")
payload_req = port_data
try:
- vim_response = requests.post(self.url_admin+'/ports', headers = self.headers_req, data=payload_req)
+ vim_response = requests.post(
+ self.url_admin + "/ports", headers=self.headers_req, data=payload_req
+ )
except requests.exceptions.RequestException as e:
self.logger.error("new_external_port Exception: ", str(e))
return -vimconn.HTTP_Not_Found, str(e.args[0])
- print( vim_response)
- #print vim_response.status_code
+ print(vim_response)
+ # print vim_response.status_code
if vim_response.status_code == 200:
- #print vim_response.json()
- #print json.dumps(vim_response.json(), indent=4)
+ # print vim_response.json()
+ # print json.dumps(vim_response.json(), indent=4)
res, http_content = self._format_in(vim_response, new_port_response_schema)
- #print http_content
+ # print http_content
if res:
r = self._remove_extra_items(http_content, new_port_response_schema)
- if r is not None: print("Warning: remove extra items ", r)
- #print http_content
- port_id = http_content['port']['id']
- print("Port id: ",port_id)
- return vim_response.status_code,port_id
- else: return -vimconn.HTTP_Bad_Request,http_content
+ if r is not None:
+ print("Warning: remove extra items ", r)
+ # print http_content
+ port_id = http_content["port"]["id"]
+ print("Port id: ", port_id)
+ return vim_response.status_code, port_id
+ else:
+ return -vimconn.HTTP_Bad_Request, http_content
else:
- #print vim_response.text
+ # print vim_response.text
jsonerror = self._format_jsonerror(vim_response)
text = 'Error in VIM "{}": not possible to add new external port. HTTP Response: {}. Error: {}'.format(
- self.url_admin, vim_response.status_code, jsonerror)
- #print text
- return -vim_response.status_code,text
-
- def new_external_network(self,net_name,net_type):
- '''Adds a external network to VIM (shared)'''
- '''Returns the network identifier'''
- #TODO change to logging exception code policies
- print("VIMConnector: Adding external shared network to VIM (type " + net_type + "): "+ net_name)
-
- payload_req = '{"network":{"name": "' + net_name + '","shared":true,"type": "' + net_type + '"}}'
+ self.url_admin, vim_response.status_code, jsonerror
+ )
+ # print text
+ return -vim_response.status_code, text
+
+ def new_external_network(self, net_name, net_type):
+ """Adds a external network to VIM (shared)"""
+ """Returns the network identifier"""
+ # TODO change to logging exception code policies
+ print(
+ "VIMConnector: Adding external shared network to VIM (type "
+ + net_type
+ + "): "
+ + net_name
+ )
+
+ payload_req = (
+ '{"network":{"name": "'
+ + net_name
+ + '","shared":true,"type": "'
+ + net_type
+ + '"}}'
+ )
try:
- vim_response = requests.post(self.url+'/networks', headers = self.headers_req, data=payload_req)
+ vim_response = requests.post(
+ self.url + "/networks", headers=self.headers_req, data=payload_req
+ )
except requests.exceptions.RequestException as e:
- self.logger.error( "new_external_network Exception: ", e.args)
+ self.logger.error("new_external_network Exception: ", e.args)
return -vimconn.HTTP_Not_Found, str(e.args[0])
print(vim_response)
- #print vim_response.status_code
+ # print vim_response.status_code
if vim_response.status_code == 200:
- #print vim_response.json()
- #print json.dumps(vim_response.json(), indent=4)
- res,http_content = self._format_in(vim_response, new_network_response_schema)
- #print http_content
+ # print vim_response.json()
+ # print json.dumps(vim_response.json(), indent=4)
+ res, http_content = self._format_in(
+ vim_response, new_network_response_schema
+ )
+ # print http_content
if res:
r = self._remove_extra_items(http_content, new_network_response_schema)
- if r is not None: print("Warning: remove extra items ", r)
- #print http_content
- network_id = http_content['network']['id']
- print( "Network id: ",network_id)
- return vim_response.status_code,network_id
- else: return -vimconn.HTTP_Bad_Request,http_content
+ if r is not None:
+ print("Warning: remove extra items ", r)
+ # print http_content
+ network_id = http_content["network"]["id"]
+ print("Network id: ", network_id)
+ return vim_response.status_code, network_id
+ else:
+ return -vimconn.HTTP_Bad_Request, http_content
else:
- #print vim_response.text
+ # print vim_response.text
jsonerror = self._format_jsonerror(vim_response)
text = 'Error in VIM "{}": not possible to add new external network. HTTP Response: {}. Error: {}'.format(
- self.url, vim_response.status_code, jsonerror)
- #print text
- return -vim_response.status_code,text
-
+ self.url, vim_response.status_code, jsonerror
+ )
+ # print text
+ return -vim_response.status_code, text
+
def connect_port_network(self, port_id, network_id, admin=False):
- '''Connects a external port to a network'''
- '''Returns status code of the VIM response'''
- #TODO change to logging exception code policies
+ """Connects a external port to a network"""
+ """Returns status code of the VIM response"""
+ # TODO change to logging exception code policies
print("VIMConnector: Connecting external port to network")
-
+
payload_req = '{"port":{"network_id":"' + network_id + '"}}'
if admin:
- if self.url_admin==None:
- return -vimconn.HTTP_Unauthorized, "datacenter cannot contain admin URL"
- url= self.url_admin
+ if self.url_admin is None:
+ return (
+ -vimconn.HTTP_Unauthorized,
+ "datacenter cannot contain admin URL",
+ )
+ url = self.url_admin
else:
- url= self.url
+ url = self.url
try:
- vim_response = requests.put(url +'/ports/'+port_id, headers = self.headers_req, data=payload_req)
+ vim_response = requests.put(
+ url + "/ports/" + port_id, headers=self.headers_req, data=payload_req
+ )
except requests.exceptions.RequestException as e:
print("connect_port_network Exception: ", e.args)
return -vimconn.HTTP_Not_Found, str(e.args[0])
print(vim_response)
- #print vim_response.status_code
+ # print vim_response.status_code
if vim_response.status_code == 200:
- #print vim_response.json()
- #print json.dumps(vim_response.json(), indent=4)
- res,http_content = self._format_in(vim_response, new_port_response_schema)
- #print http_content
+ # print vim_response.json()
+ # print json.dumps(vim_response.json(), indent=4)
+ res, http_content = self._format_in(vim_response, new_port_response_schema)
+ # print http_content
if res:
r = self._remove_extra_items(http_content, new_port_response_schema)
- if r is not None: print("Warning: remove extra items ", r)
- #print http_content
- port_id = http_content['port']['id']
- print("Port id: ",port_id)
- return vim_response.status_code,port_id
- else: return -vimconn.HTTP_Bad_Request,http_content
+ if r is not None:
+ print("Warning: remove extra items ", r)
+ # print http_content
+ port_id = http_content["port"]["id"]
+ print("Port id: ", port_id)
+ return vim_response.status_code, port_id
+ else:
+ return -vimconn.HTTP_Bad_Request, http_content
else:
print(vim_response.text)
jsonerror = self._format_jsonerror(vim_response)
- text = 'Error in VIM "{}": not possible to connect external port to network. HTTP Response: {}.' \
- ' Error: {}'.format(self.url_admin, vim_response.status_code, jsonerror)
+ text = (
+ 'Error in VIM "{}": not possible to connect external port to network. HTTP Response: {}.'
+ " Error: {}".format(self.url_admin, vim_response.status_code, jsonerror)
+ )
print(text)
- return -vim_response.status_code,text
-
-
+ return -vim_response.status_code, text
setup(
name=_name,
- description='OSM ro vim plugin for openvim',
+ description="OSM ro vim plugin for openvim",
long_description=README,
- version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ version_command=(
+ "git describe --match v* --tags --long --dirty",
+ "pep440-git-full",
+ ),
# version=VERSION,
# python_requires='>3.5.0',
- author='ETSI OSM',
- author_email='alfonso.tiernosepulveda@telefonica.com',
- maintainer='Alfonso Tierno',
- maintainer_email='alfonso.tiernosepulveda@telefonica.com',
- url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
- license='Apache 2.0',
-
+ author="ETSI OSM",
+ author_email="alfonso.tiernosepulveda@telefonica.com",
+ maintainer="Alfonso Tierno",
+ maintainer_email="alfonso.tiernosepulveda@telefonica.com",
+ url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary",
+ license="Apache 2.0",
packages=[_name],
include_package_data=True,
install_requires=[
- "requests", "netaddr", "PyYAML",
- "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin"
+ "requests",
+ "netaddr",
+ "PyYAML",
+ "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin",
],
- setup_requires=['setuptools-version-command'],
+ setup_requires=["setuptools-version-command"],
entry_points={
- 'osm_rovim.plugins': ['rovim_openvim = osm_rovim_openvim.vimconn_openvim:vimconnector'],
+ "osm_rovim.plugins": [
+ "rovim_openvim = osm_rovim_openvim.vimconn_openvim:vimconnector"
+ ],
},
)
basepython = python3
deps = flake8
commands = flake8 osm_rovim_openvim --max-line-length 120 \
- --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241
[testenv:unittest]
basepython = python3
from osm_rovim_vmware.vimconn_vmware import vimconnector
-from osm_ro_plugin.vimconn import VimConnUnexpectedResponse, VimConnNotFoundException,VimConnException
+from osm_ro_plugin.vimconn import (
+ VimConnUnexpectedResponse,
+ VimConnNotFoundException,
+ VimConnException,
+)
from pyvcloud.vcd.client import Client
from lxml import etree as lxmlElementTree
from pyvcloud.vcd.org import Org
__author__ = "Prakash Kasar"
+
class TestVimconn_VMware(unittest.TestCase):
def setUp(self):
- config = { "admin_password": "admin",
- "admin_username":"user",
- "nsx_user": "nsx",
- "nsx_password": "nsx",
- "nsx_manager":"https://test-nsx" }
+ config = {
+ "admin_password": "admin",
+ "admin_username": "user",
+ "nsx_user": "nsx",
+ "nsx_password": "nsx",
+ "nsx_manager": "https://test-nsx",
+ }
- self.client = Client('test', verify_ssl_certs=False)
+ self.client = Client("test", verify_ssl_certs=False)
# get vcd org object
org_resp = xml_resp.org_xml_response
get_org = lxmlElementTree.fromstring(org_resp)
self.org = Org(self.client, resource=get_org)
- self.vim = vimconnector(uuid='12354',
- name='test',
- tenant_id='abc1234',
- tenant_name='test',
- url='https://test',
- config=config)
-
-
- @mock.patch.object(vimconnector,'get_vdc_details')
- @mock.patch.object(vimconnector,'connect')
- @mock.patch.object(vimconnector,'perform_request')
+ self.vim = vimconnector(
+ uuid="12354",
+ name="test",
+ tenant_id="abc1234",
+ tenant_name="test",
+ url="https://test",
+ config=config,
+ )
+
+ @mock.patch.object(vimconnector, "get_vdc_details")
+ @mock.patch.object(vimconnector, "connect")
+ @mock.patch.object(vimconnector, "perform_request")
def test_get_network_not_found(self, perform_request, connect, get_vdc_details):
"""
Testcase to get network with invalid network id
perform_request.return_value.content = xml_resp.vdc_xml_response
# call to VIM connector method with invalid id
- self.assertRaises(VimConnNotFoundException,self.vim.get_network,'mgmt-net')
+ self.assertRaises(VimConnNotFoundException, self.vim.get_network, "mgmt-net")
- @mock.patch.object(vimconnector,'perform_request')
- @mock.patch.object(vimconnector,'get_vdc_details')
- @mock.patch.object(vimconnector,'connect')
+ @mock.patch.object(vimconnector, "perform_request")
+ @mock.patch.object(vimconnector, "get_vdc_details")
+ @mock.patch.object(vimconnector, "connect")
def test_get_network(self, connect, get_vdc_details, perform_request):
"""
Testcase to get network with valid network id
"""
- net_id = '5c04dc6d-6096-47c6-b72b-68f19013d491'
+ net_id = "5c04dc6d-6096-47c6-b72b-68f19013d491"
# created vdc object
vdc_xml_resp = xml_resp.vdc_xml_response
vdc = lxmlElementTree.fromstring(vdc_xml_resp)
# assumed return value from VIM connector
get_vdc_details.return_value = self.org, vdc
self.vim.client = self.vim.connect()
- perform_request.side_effect = [mock.Mock(status_code = 200,
- content = xml_resp.vdc_xml_response),
- mock.Mock(status_code = 200,
- content = xml_resp.network_xml_response)]
+ perform_request.side_effect = [
+ mock.Mock(status_code=200, content=xml_resp.vdc_xml_response),
+ mock.Mock(status_code=200, content=xml_resp.network_xml_response),
+ ]
# call to VIM connector method with network_id
result = self.vim.get_network(net_id)
# assert verified expected and return result from VIM connector
- self.assertEqual(net_id, result['id'])
+ self.assertEqual(net_id, result["id"])
- @mock.patch.object(vimconnector,'perform_request')
- @mock.patch.object(vimconnector,'get_vdc_details')
- @mock.patch.object(vimconnector,'connect')
- def test_get_network_list_not_found(self, connect, get_vdc_details, perform_request):
+ @mock.patch.object(vimconnector, "perform_request")
+ @mock.patch.object(vimconnector, "get_vdc_details")
+ @mock.patch.object(vimconnector, "connect")
+ def test_get_network_list_not_found(
+ self, connect, get_vdc_details, perform_request
+ ):
"""
Testcase to get list of available networks by invalid network id
"""
perform_request.return_value.content = network_xml_resp
# call to VIM connector method with network_id
- result = self.vim.get_network_list({'id':'45hdfg-345nb-345'})
+ result = self.vim.get_network_list({"id": "45hdfg-345nb-345"})
# assert verified expected and return result from VIM connector
self.assertEqual(list(), result)
- @mock.patch.object(vimconnector,'perform_request')
- @mock.patch.object(vimconnector,'get_vdc_details')
- @mock.patch.object(vimconnector,'connect')
+ @mock.patch.object(vimconnector, "perform_request")
+ @mock.patch.object(vimconnector, "get_vdc_details")
+ @mock.patch.object(vimconnector, "connect")
def test_get_network_list(self, connect, get_vdc_details, perform_request):
"""
Testcase to get list of available networks by valid network id
"""
- #import pdb;pdb.set_trace() ## Not working
+ # import pdb;pdb.set_trace() ## Not working
vdc_xml_resp = xml_resp.vdc_xml_response
- net_id = '5c04dc6d-6096-47c6-b72b-68f19013d491'
+ net_id = "5c04dc6d-6096-47c6-b72b-68f19013d491"
# created vdc object
vdc = lxmlElementTree.fromstring(vdc_xml_resp)
# created network object
# assumed return value from VIM connector
get_vdc_details.return_value = self.org, vdc
self.vim.client = self.vim.connect()
- perform_request.side_effect = [mock.Mock(status_code = 200,
- content = xml_resp.vdc_xml_response),
- mock.Mock(status_code = 200,
- content = network_xml_resp)]
+ perform_request.side_effect = [
+ mock.Mock(status_code=200, content=xml_resp.vdc_xml_response),
+ mock.Mock(status_code=200, content=network_xml_resp),
+ ]
perform_request.reset_mock()
perform_request()
# call to VIM connector method with network_id
- result = self.vim.get_network_list({'id': net_id})
+ result = self.vim.get_network_list({"id": net_id})
# assert verified expected and return result from VIM connector
for item in result:
- self.assertEqual(item.get('id'), net_id)
- self.assertEqual(item.get('status'), 'ACTIVE')
- self.assertEqual(item.get('shared'), False)
+ self.assertEqual(item.get("id"), net_id)
+ self.assertEqual(item.get("status"), "ACTIVE")
+ self.assertEqual(item.get("shared"), False)
- @mock.patch.object(vimconnector,'create_network_rest')
+ @mock.patch.object(vimconnector, "create_network_rest")
def test_new_network(self, create_network_rest):
"""
Testcase to create new network by passing network name and type
"""
# create network reposnse
create_net_xml_resp = xml_resp.create_network_xml_response
- net_name = 'Test_network'
- net_type = 'bridge'
+ net_name = "Test_network"
+ net_type = "bridge"
# assumed return value from VIM connector
create_network_rest.return_value = create_net_xml_resp
# call to VIM connector method with network name and type
result = self.vim.new_network(net_name, net_type)
# assert verified expected and return result from VIM connector
- self.assertEqual(result, ('df1956fa-da04-419e-a6a2-427b6f83788f', {}))
+ self.assertEqual(result, ("df1956fa-da04-419e-a6a2-427b6f83788f", {}))
- @mock.patch.object(vimconnector, 'create_network_rest')
+ @mock.patch.object(vimconnector, "create_network_rest")
def test_new_network_not_created(self, create_network_rest):
"""
Testcase to create new network by assigning empty xml data
<OrgVdcNetwork></OrgVdcNetwork>"""
# assert verified expected and return result from VIM connector
- self.assertRaises(VimConnUnexpectedResponse,self.vim.new_network,
- 'test_net',
- 'bridge')
+ self.assertRaises(
+ VimConnUnexpectedResponse, self.vim.new_network, "test_net", "bridge"
+ )
- @mock.patch.object(vimconnector, 'connect')
- @mock.patch.object(vimconnector, 'get_network_action')
- @mock.patch.object(vimconnector, 'delete_network_action')
+ @mock.patch.object(vimconnector, "connect")
+ @mock.patch.object(vimconnector, "get_network_action")
+ @mock.patch.object(vimconnector, "delete_network_action")
def test_delete_network(self, delete_network_action, get_network_action, connect):
"""
Testcase to delete network by network id
"""
- net_uuid = '0a55e5d1-43a2-4688-bc92-cb304046bf87'
+ net_uuid = "0a55e5d1-43a2-4688-bc92-cb304046bf87"
# delete network response
delete_net_xml_resp = xml_resp.delete_network_xml_response
# assert verified expected and return result from VIM connector
self.assertEqual(result, net_uuid)
- @mock.patch.object(vimconnector, 'get_vcd_network')
+ @mock.patch.object(vimconnector, "get_vcd_network")
def test_delete_network_not_found(self, get_vcd_network):
"""
Testcase to delete network by invalid network id
# assumed return value from VIM connector
get_vcd_network.return_value = False
# assert verified expected and return result from VIM connector
- self.assertRaises(VimConnNotFoundException,self.vim.delete_network,
- '2a23e5d1-42a2-0648-bc92-cb508046bf87')
+ self.assertRaises(
+ VimConnNotFoundException,
+ self.vim.delete_network,
+ "2a23e5d1-42a2-0648-bc92-cb508046bf87",
+ )
def test_get_flavor(self):
"""
Testcase to get flavor data
"""
- flavor_data = {'a646eb8a-95bd-4e81-8321-5413ee72b62e': {'disk': 10,
- 'vcpus': 1,
- 'ram': 1024}}
+ flavor_data = {
+ "a646eb8a-95bd-4e81-8321-5413ee72b62e": {
+ "disk": 10,
+ "vcpus": 1,
+ "ram": 1024,
+ }
+ }
vimconnector.flavorlist = flavor_data
- result = self.vim.get_flavor('a646eb8a-95bd-4e81-8321-5413ee72b62e')
+ result = self.vim.get_flavor("a646eb8a-95bd-4e81-8321-5413ee72b62e")
# assert verified expected and return result from VIM connector
- self.assertEqual(result, flavor_data['a646eb8a-95bd-4e81-8321-5413ee72b62e'])
+ self.assertEqual(result, flavor_data["a646eb8a-95bd-4e81-8321-5413ee72b62e"])
def test_get_flavor_not_found(self):
"""
"""
vimconnector.flavorlist = {}
# assert verified expected and return result from VIM connector
- self.assertRaises(VimConnNotFoundException,self.vim.get_flavor,
- 'a646eb8a-95bd-4e81-8321-5413ee72b62e')
+ self.assertRaises(
+ VimConnNotFoundException,
+ self.vim.get_flavor,
+ "a646eb8a-95bd-4e81-8321-5413ee72b62e",
+ )
def test_new_flavor(self):
"""
Testcase to create new flavor data
"""
- flavor_data = {'disk': 10, 'vcpus': 1, 'ram': 1024}
+ flavor_data = {"disk": 10, "vcpus": 1, "ram": 1024}
result = self.vim.new_flavor(flavor_data)
# assert verified expected and return result from VIM connector
self.assertIsNotNone(result)
"""
Testcase to delete flavor data
"""
- flavor_data = {'2cb3dffb-5c51-4355-8406-28553ead28ac': {'disk': 10,
- 'vcpus': 1,
- 'ram': 1024}}
+ flavor_data = {
+ "2cb3dffb-5c51-4355-8406-28553ead28ac": {
+ "disk": 10,
+ "vcpus": 1,
+ "ram": 1024,
+ }
+ }
vimconnector.flavorlist = flavor_data
# return value from VIM connector
- result = self.vim.delete_flavor('2cb3dffb-5c51-4355-8406-28553ead28ac')
+ result = self.vim.delete_flavor("2cb3dffb-5c51-4355-8406-28553ead28ac")
# assert verified expected and return result from VIM connector
- self.assertEqual(result, '2cb3dffb-5c51-4355-8406-28553ead28ac')
+ self.assertEqual(result, "2cb3dffb-5c51-4355-8406-28553ead28ac")
- @mock.patch.object(vimconnector,'connect_as_admin')
- @mock.patch.object(vimconnector,'perform_request')
+ @mock.patch.object(vimconnector, "connect_as_admin")
+ @mock.patch.object(vimconnector, "perform_request")
def test_delete_image_not_found(self, perform_request, connect_as_admin):
"""
Testcase to delete image by invalid image id
self.vim.client = self.vim.connect_as_admin()
# assumed return value from VIM connector
- perform_request.side_effect = [mock.Mock(status_code = 200,
- content = xml_resp.delete_catalog_xml_response),
- mock.Mock(status_code = 201,
- content = xml_resp.delete_catalog_item_xml_response)
- ]
+ perform_request.side_effect = [
+ mock.Mock(status_code=200, content=xml_resp.delete_catalog_xml_response),
+ mock.Mock(
+ status_code=201, content=xml_resp.delete_catalog_item_xml_response
+ ),
+ ]
# assert verified expected and return result from VIM connector
- self.assertRaises(VimConnNotFoundException, self.vim.delete_image, 'invali3453')
+ self.assertRaises(VimConnNotFoundException, self.vim.delete_image, "invali3453")
- @mock.patch.object(vimconnector,'get_vdc_details')
- @mock.patch.object(vimconnector,'connect')
- @mock.patch.object(Org,'list_catalogs')
+ @mock.patch.object(vimconnector, "get_vdc_details")
+ @mock.patch.object(vimconnector, "connect")
+ @mock.patch.object(Org, "list_catalogs")
def test_get_image_list(self, list_catalogs, connect, get_vdc_details):
"""
Testcase to get image list by valid image id
# assumed return value from VIM connector
get_vdc_details.return_value = self.org, vdc
- list_catalogs.return_value = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '34925a30-0f4a-4018-9759-0d6799063b51', 'name': 'Ubuntu_1nic'}, {'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'orgadmin', 'numberOfMedia': '1', 'creationDate': '2018-02-15T02:16:58.300-08:00', 'id': '4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a', 'name': 'cirros034'}, {'isShared': 'true', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'true', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2018-01-26T02:09:12.387-08:00', 'id': 'b139ed82-7ca4-49fb-9882-5f841f59c890', 'name': 'Ubuntu_plugtest-1'}, {'isShared': 'true', 'numberOfVAppTemplates': '1', 'orgName': 'Org2', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-06-18T21:33:16.430-07:00', 'id': 'b31e6973-86d2-404b-a522-b16846d099dc', 'name': 'Ubuntu_Cat'}, {'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'orgadmin', 'numberOfMedia': '0', 'creationDate': '2018-02-15T22:26:28.910-08:00', 'id': 'c3b56180-f980-4256-9109-a93168d73ff2', 'name': 'de4ffcf2ad21f1a5d0714d6b868e2645'}, {'isShared': 'false', 'numberOfVAppTemplates': '0', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-08-23T05:54:56.780-07:00', 'id': 'd0eb0b02-718d-42e0-b889-56575000b52d', 'name': 'Test_Cirros'}, {'isShared': 'false', 'numberOfVAppTemplates': '0', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-03-08T21:25:05.923-08:00', 'id': 'd3fa3df2-b311-4571-9138-4c66541d7f46', 'name': 'cirros_10'}, {'isShared': 'false', 'numberOfVAppTemplates': '0', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-07-12T22:45:20.537-07:00', 'id': 'd64b2617-ea4b-4b90-910b-102c99dd2031', 'name': 'Ubuntu16'}, {'isShared': 'true', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'true', 'ownerName': 'system', 'numberOfMedia': '1', 'creationDate': '2017-10-14T23:52:37.260-07:00', 'id': 'e8d953db-8dc9-46d5-9cab-329774cd2ad9', 'name': 'Ubuntu_no_nic'}]
-
- result = self.vim.get_image_list({'id': '4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a'})
+ list_catalogs.return_value = [
+ {
+ "isShared": "false",
+ "numberOfVAppTemplates": "1",
+ "orgName": "Org3",
+ "isPublished": "false",
+ "ownerName": "system",
+ "numberOfMedia": "0",
+ "creationDate": "2017-10-15T02:03:59.403-07:00",
+ "id": "34925a30-0f4a-4018-9759-0d6799063b51",
+ "name": "Ubuntu_1nic",
+ },
+ {
+ "isShared": "false",
+ "numberOfVAppTemplates": "1",
+ "orgName": "Org3",
+ "isPublished": "false",
+ "ownerName": "orgadmin",
+ "numberOfMedia": "1",
+ "creationDate": "2018-02-15T02:16:58.300-08:00",
+ "id": "4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a",
+ "name": "cirros034",
+ },
+ {
+ "isShared": "true",
+ "numberOfVAppTemplates": "1",
+ "orgName": "Org3",
+ "isPublished": "true",
+ "ownerName": "system",
+ "numberOfMedia": "0",
+ "creationDate": "2018-01-26T02:09:12.387-08:00",
+ "id": "b139ed82-7ca4-49fb-9882-5f841f59c890",
+ "name": "Ubuntu_plugtest-1",
+ },
+ {
+ "isShared": "true",
+ "numberOfVAppTemplates": "1",
+ "orgName": "Org2",
+ "isPublished": "false",
+ "ownerName": "system",
+ "numberOfMedia": "0",
+ "creationDate": "2017-06-18T21:33:16.430-07:00",
+ "id": "b31e6973-86d2-404b-a522-b16846d099dc",
+ "name": "Ubuntu_Cat",
+ },
+ {
+ "isShared": "false",
+ "numberOfVAppTemplates": "1",
+ "orgName": "Org3",
+ "isPublished": "false",
+ "ownerName": "orgadmin",
+ "numberOfMedia": "0",
+ "creationDate": "2018-02-15T22:26:28.910-08:00",
+ "id": "c3b56180-f980-4256-9109-a93168d73ff2",
+ "name": "de4ffcf2ad21f1a5d0714d6b868e2645",
+ },
+ {
+ "isShared": "false",
+ "numberOfVAppTemplates": "0",
+ "orgName": "Org3",
+ "isPublished": "false",
+ "ownerName": "system",
+ "numberOfMedia": "0",
+ "creationDate": "2017-08-23T05:54:56.780-07:00",
+ "id": "d0eb0b02-718d-42e0-b889-56575000b52d",
+ "name": "Test_Cirros",
+ },
+ {
+ "isShared": "false",
+ "numberOfVAppTemplates": "0",
+ "orgName": "Org3",
+ "isPublished": "false",
+ "ownerName": "system",
+ "numberOfMedia": "0",
+ "creationDate": "2017-03-08T21:25:05.923-08:00",
+ "id": "d3fa3df2-b311-4571-9138-4c66541d7f46",
+ "name": "cirros_10",
+ },
+ {
+ "isShared": "false",
+ "numberOfVAppTemplates": "0",
+ "orgName": "Org3",
+ "isPublished": "false",
+ "ownerName": "system",
+ "numberOfMedia": "0",
+ "creationDate": "2017-07-12T22:45:20.537-07:00",
+ "id": "d64b2617-ea4b-4b90-910b-102c99dd2031",
+ "name": "Ubuntu16",
+ },
+ {
+ "isShared": "true",
+ "numberOfVAppTemplates": "1",
+ "orgName": "Org3",
+ "isPublished": "true",
+ "ownerName": "system",
+ "numberOfMedia": "1",
+ "creationDate": "2017-10-14T23:52:37.260-07:00",
+ "id": "e8d953db-8dc9-46d5-9cab-329774cd2ad9",
+ "name": "Ubuntu_no_nic",
+ },
+ ]
+
+ result = self.vim.get_image_list({"id": "4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a"})
# assert verified expected and return result from VIM connector
for item in result:
- self.assertEqual(item['id'], '4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a')
+ self.assertEqual(item["id"], "4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a")
- @mock.patch.object(vimconnector,'get_vapp_details_rest')
- @mock.patch.object(vimconnector,'get_vdc_details')
+ @mock.patch.object(vimconnector, "get_vapp_details_rest")
+ @mock.patch.object(vimconnector, "get_vdc_details")
def test_get_vminstance(self, get_vdc_details, get_vapp_details_rest):
"""
Testcase to get vminstance by valid vm id
"""
- vapp_info = {'status': '4',
- 'acquireMksTicket': {'href': 'https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/screen/action/acquireMksTicket',
- 'type': 'application/vnd.vmware.vcloud.mksTicket+xml', 'rel': 'screen:acquireMksTicket'},
- 'vm_virtual_hardware': {'disk_edit_href': 'https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/disks', 'disk_size': '40960'},
- 'name': 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa',
- 'created': '2017-09-21T01:15:31.627-07:00',
- 'IsEnabled': 'true',
- 'EndAddress': '12.16.24.199',
- 'interfaces': [{'MACAddress': '00:50:56:01:12:a2',
- 'NetworkConnectionIndex': '0',
- 'network': 'testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d',
- 'IpAddressAllocationMode': 'DHCP',
- 'IsConnected': 'true',
- 'IpAddress': '12.16.24.200'}],
- 'ovfDescriptorUploaded': 'true',
- 'nestedHypervisorEnabled': 'false',
- 'Gateway': '12.16.24.1',
- 'acquireTicket': {'href': 'https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/screen/action/acquireTicket',
- 'rel': 'screen:acquireTicket'},
- 'vmuuid': '47d12505-5968-4e16-95a7-18743edb0c8b',
- 'Netmask': '255.255.255.0',
- 'StartAddress': '12.16.24.100',
- 'primarynetwork': '0',
- 'networkname': 'External-Network-1074',
- 'IsInherited': 'false',
- 'deployed': 'true'}
+ vapp_info = {
+ "status": "4",
+ "acquireMksTicket": {
+ "href": "https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7"
+ "-18743edb0c8b/screen/action/acquireMksTicket",
+ "type": "application/vnd.vmware.vcloud.mksTicket+xml",
+ "rel": "screen:acquireMksTicket",
+ },
+ "vm_virtual_hardware": {
+ "disk_edit_href": "https://localhost/api/vApp/vm-47d12505-5968"
+ "-4e16-95a7-18743edb0c8b/virtualHardwareSection/disks",
+ "disk_size": "40960",
+ },
+ "name": "Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa",
+ "created": "2017-09-21T01:15:31.627-07:00",
+ "IsEnabled": "true",
+ "EndAddress": "12.16.24.199",
+ "interfaces": [
+ {
+ "MACAddress": "00:50:56:01:12:a2",
+ "NetworkConnectionIndex": "0",
+ "network": "testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d",
+ "IpAddressAllocationMode": "DHCP",
+ "IsConnected": "true",
+ "IpAddress": "12.16.24.200",
+ }
+ ],
+ "ovfDescriptorUploaded": "true",
+ "nestedHypervisorEnabled": "false",
+ "Gateway": "12.16.24.1",
+ "acquireTicket": {
+ "href": "https://localhost/api/vApp/vm-47d12505-5968-4e16"
+ "-95a7-18743edb0c8b/screen/action/acquireTicket",
+ "rel": "screen:acquireTicket",
+ },
+ "vmuuid": "47d12505-5968-4e16-95a7-18743edb0c8b",
+ "Netmask": "255.255.255.0",
+ "StartAddress": "12.16.24.100",
+ "primarynetwork": "0",
+ "networkname": "External-Network-1074",
+ "IsInherited": "false",
+ "deployed": "true",
+ }
# created vdc object
vdc_xml_resp = xml_resp.vdc_xml_response
vdc = lxmlElementTree.fromstring(vdc_xml_resp)
get_vdc_details.return_value = self.org, vdc
get_vapp_details_rest.return_value = vapp_info
- result = self.vim.get_vminstance('47d12505-5968-4e16-95a7-18743edb0c8b')
+ result = self.vim.get_vminstance("47d12505-5968-4e16-95a7-18743edb0c8b")
# assert verified expected and return result from VIM connector
- self.assertEqual(result['status'], 'ACTIVE')
- self.assertEqual(result['hostId'], '47d12505-5968-4e16-95a7-18743edb0c8b')
-
-
- @mock.patch.object(vimconnector,'connect')
- @mock.patch.object(vimconnector,'get_namebyvappid')
- @mock.patch.object(vimconnector,'get_vdc_details')
- @mock.patch.object(VDC,'get_vapp')
- @mock.patch.object(VApp,'power_off')
- @mock.patch.object(VApp,'undeploy')
- @mock.patch.object(VDC,'delete_vapp')
- @mock.patch.object(Client,'get_task_monitor')
- def x_test_delete_vminstance(self, get_task_monitor, delete_vapp,
- undeploy, poweroff,
- get_vapp, get_vdc_details,
- get_namebyvappid, connect):
+ self.assertEqual(result["status"], "ACTIVE")
+ self.assertEqual(result["hostId"], "47d12505-5968-4e16-95a7-18743edb0c8b")
+
+ @mock.patch.object(vimconnector, "connect")
+ @mock.patch.object(vimconnector, "get_namebyvappid")
+ @mock.patch.object(vimconnector, "get_vdc_details")
+ @mock.patch.object(VDC, "get_vapp")
+ @mock.patch.object(VApp, "power_off")
+ @mock.patch.object(VApp, "undeploy")
+ @mock.patch.object(VDC, "delete_vapp")
+ @mock.patch.object(Client, "get_task_monitor")
+ def x_test_delete_vminstance(
+ self,
+ get_task_monitor,
+ delete_vapp,
+ undeploy,
+ poweroff,
+ get_vapp,
+ get_vdc_details,
+ get_namebyvappid,
+ connect,
+ ):
"""
Testcase to delete vminstance by valid vm id
"""
- vm_id = '4f6a9b49-e92d-4935-87a1-0e4dc9c3a069'
- vm_name = 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa'
+ vm_id = "4f6a9b49-e92d-4935-87a1-0e4dc9c3a069"
+ vm_name = "Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa"
# created vdc object
vdc_xml_resp = xml_resp.vdc_xml_response
vdc = lxmlElementTree.fromstring(vdc_xml_resp)
status_resp = xml_resp.status_task_xml
status = lxmlElementTree.fromstring(status_resp)
- self.vim.connect.return_value.get_task_monitor.return_value.wait_for_success.return_value = status
+ self.vim.connect.return_value.get_task_monitor.return_value.wait_for_success.return_value = (
+ status
+ )
# call to VIM connector method
result = self.vim.delete_vminstance(vm_id)
# assert verified expected and return result from VIM connector
self.assertEqual(result, vm_id)
- @mock.patch.object(vimconnector,'get_network_id_by_name')
- @mock.patch.object(vimconnector,'get_vm_pci_details')
- @mock.patch.object(VDC,'get_vapp')
- @mock.patch.object(vimconnector,'connect')
- @mock.patch.object(vimconnector,'get_namebyvappid')
- @mock.patch.object(vimconnector,'get_vdc_details')
- @mock.patch.object(vimconnector,'perform_request')
- @mock.patch.object(VApp,'get_all_vms')
- def test_refresh_vms_status(self, get_all_vms, perform_request, get_vdc_details,
- get_namebyvappid, connect,
- get_vapp, get_vm_pci_details,
- get_network_id_by_name):
+ @mock.patch.object(vimconnector, "get_network_id_by_name")
+ @mock.patch.object(vimconnector, "get_vm_pci_details")
+ @mock.patch.object(VDC, "get_vapp")
+ @mock.patch.object(vimconnector, "connect")
+ @mock.patch.object(vimconnector, "get_namebyvappid")
+ @mock.patch.object(vimconnector, "get_vdc_details")
+ @mock.patch.object(vimconnector, "perform_request")
+ @mock.patch.object(VApp, "get_all_vms")
+ def test_refresh_vms_status(
+ self,
+ get_all_vms,
+ perform_request,
+ get_vdc_details,
+ get_namebyvappid,
+ connect,
+ get_vapp,
+ get_vm_pci_details,
+ get_network_id_by_name,
+ ):
"""
Testcase to refresh vms status by valid vm id
"""
- vm_id = '53a529b2-10d8-4d56-a7ad-8182acdbe71c'
+ vm_id = "53a529b2-10d8-4d56-a7ad-8182acdbe71c"
# created vdc object
vdc_xml_resp = xml_resp.vdc_xml_response
self.vim.client = self.vim.connect()
get_vdc_details.return_value = self.org, vdc
- get_namebyvappid.return_value = 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa'
- get_vm_pci_details.return_value = {'host_name': 'test-esx-1.corp.local', 'host_ip': '12.19.24.31'}
+ get_namebyvappid.return_value = "Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa"
+ get_vm_pci_details.return_value = {
+ "host_name": "test-esx-1.corp.local",
+ "host_ip": "12.19.24.31",
+ }
vapp_resp = xml_resp.vapp_xml_response
vapp = lxmlElementTree.fromstring(vapp_resp)
get_vapp.return_value = vapp
- get_network_id_by_name.return_value = '47d12505-5968-4e16-95a7-18743edb0c8b'
+ get_network_id_by_name.return_value = "47d12505-5968-4e16-95a7-18743edb0c8b"
vm_resp = xml_resp.vm_xml_response
vm_list = lxmlElementTree.fromstring(vm_resp)
# call to VIM connector method
result = self.vim.refresh_vms_status([vm_id])
for attr in result[vm_id]:
- if attr == 'status':
+ if attr == "status":
# assert verified expected and return result from VIM connector
- self.assertEqual(result[vm_id][attr], 'ACTIVE')
+ self.assertEqual(result[vm_id][attr], "ACTIVE")
- @mock.patch.object(vimconnector,'get_vcd_network')
+ @mock.patch.object(vimconnector, "get_vcd_network")
def test_refresh_nets_status(self, get_vcd_network):
- net_id = 'c2d0f28f-d38b-4588-aecc-88af3d4af58b'
- network_dict = {'status': '1','isShared': 'false','IpScope': '',
- 'EndAddress':'12.19.21.15',
- 'name': 'testing_gwyRXlvWYL1-9ebb6d7b-5c74-472f-be77-963ed050d44d',
- 'Dns1': '12.19.21.10', 'IpRanges': '',
- 'Gateway': '12.19.21.23', 'Netmask': '255.255.255.0',
- 'RetainNetInfoAcrossDeployments': 'false',
- 'IpScopes': '', 'IsEnabled': 'true', 'DnsSuffix': 'corp.local',
- 'StartAddress': '12.19.21.11', 'IpRange': '',
- 'Configuration': '', 'FenceMode': 'bridged',
- 'IsInherited': 'true', 'uuid': 'c2d0f28f-d38b-4588-aecc-88af3d4af58b'}
+ net_id = "c2d0f28f-d38b-4588-aecc-88af3d4af58b"
+ network_dict = {
+ "status": "1",
+ "isShared": "false",
+ "IpScope": "",
+ "EndAddress": "12.19.21.15",
+ "name": "testing_gwyRXlvWYL1-9ebb6d7b-5c74-472f-be77-963ed050d44d",
+ "Dns1": "12.19.21.10",
+ "IpRanges": "",
+ "Gateway": "12.19.21.23",
+ "Netmask": "255.255.255.0",
+ "RetainNetInfoAcrossDeployments": "false",
+ "IpScopes": "",
+ "IsEnabled": "true",
+ "DnsSuffix": "corp.local",
+ "StartAddress": "12.19.21.11",
+ "IpRange": "",
+ "Configuration": "",
+ "FenceMode": "bridged",
+ "IsInherited": "true",
+ "uuid": "c2d0f28f-d38b-4588-aecc-88af3d4af58b",
+ }
# assumed return value from VIM connector
get_vcd_network.return_value = network_dict
result = self.vim.refresh_nets_status([net_id])
# assert verified expected and return result from VIM connector
for attr in result[net_id]:
- if attr == 'status':
- self.assertEqual(result[net_id][attr], 'ACTIVE')
+ if attr == "status":
+ self.assertEqual(result[net_id][attr], "ACTIVE")
- @mock.patch.object(VDC,'get_vapp')
- @mock.patch.object(vimconnector,'connect')
- @mock.patch.object(vimconnector,'get_namebyvappid')
- @mock.patch.object(vimconnector,'get_vdc_details')
- def test_action_vminstance(self, get_vdc_details, get_namebyvappid,
- connect,
- get_vapp):
+ @mock.patch.object(VDC, "get_vapp")
+ @mock.patch.object(vimconnector, "connect")
+ @mock.patch.object(vimconnector, "get_namebyvappid")
+ @mock.patch.object(vimconnector, "get_vdc_details")
+ def test_action_vminstance(
+ self, get_vdc_details, get_namebyvappid, connect, get_vapp
+ ):
"""
Testcase for action vm instance by vm id
"""
- task_resp = xml_resp.poweroff_task_xml
- vm_id = '05e6047b-6938-4275-8940-22d1ea7245b8'
+ # task_resp = xml_resp.poweroff_task_xml
+ vm_id = "05e6047b-6938-4275-8940-22d1ea7245b8"
# created vdc object
vdc_xml_resp = xml_resp.vdc_xml_response
vdc = lxmlElementTree.fromstring(vdc_xml_resp)
# assumed return value from VIM connector
get_vdc_details.return_value = self.org, vdc
- get_namebyvappid.return_value = 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa'
+ get_namebyvappid.return_value = "Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa"
self.vim.client = self.vim.connect()
power_off_resp = xml_resp.poweroff_task_xml
power_off = lxmlElementTree.fromstring(power_off_resp)
status_resp = xml_resp.status_task_xml
status = lxmlElementTree.fromstring(status_resp)
- self.vim.connect.return_value.get_task_monitor.return_value.wait_for_success.return_value = status
+ self.vim.connect.return_value.get_task_monitor.return_value.wait_for_success.return_value = (
+ status
+ )
# call to VIM connector method
- result = self.vim.action_vminstance(vm_id,{'shutdown': None})
+ result = self.vim.action_vminstance(vm_id, {"shutdown": None})
# assert verified expected and return result from VIM connector
self.assertEqual(result, vm_id)
- @mock.patch.object(vimconnector,'get_org')
+ @mock.patch.object(vimconnector, "get_org")
def test_get_tenant_list(self, get_org):
"""
Test case for get tenant list
"""
- org_dict = {'catalogs': {'4c4fdb5d-0c7d-4fee-9efd-cb061f327a01': '80d8488f67ba1de98b7f485fba6abbd2', '1b98ca02-b0a6-4ca7-babe-eadc0ae59677': 'Ubuntu', 'e7f27dfe-14b7-49e1-918e-173bda02683a': '834bdd1f28fd15dcbe830456ec58fbca', '9441ee69-0486-4438-ac62-8d8082c51302': 'centos', 'e660cce0-47a6-4315-a5b9-97a39299a374': 'cirros01', '0fd96c61-c3d1-4abf-9a34-0dff8fb65743': 'cirros034', '1c703be3-9bd2-46a2-854c-3e678d5cdda8': 'Ubuntu_plugtest-1', 'bc4e342b-f84c-41bd-a93a-480f35bacf69': 'Cirros', '8a206fb5-3ef9-4571-9bcc-137615f4d930': '255eb079a62ac155e7f942489f14b0c4'}, 'vdcs': {'e6436c6a-d922-4b39-9c1c-b48e766fce5e': 'osm', '3852f762-18ae-4833-a229-42684b6e7373': 'cloud-1-vdc'}, 'networks': {'e203cacd-9320-4422-9be0-12c7def3ab56': 'testing_lNejr37B-38e4ca67-1e26-486f-ad2f-f14bb099e068', 'a6623349-2bef-4367-9fda-d33f9ab927f8': 'Vlan_3151', 'adf780cb-358c-47c2-858d-ae5778ccaf17': 'testing_xwBultc-99b8a2ae-c091-4dd3-bbf7-762a51612385', '721f9efc-11fe-4c13-936d-252ba0ed93c8': 'testing_tLljy8WB5e-a898cb28-e75b-4867-a22e-f2bad285c144', '1512d97a-929d-4b06-b8af-cf5ac42a2aee': 'Managment', 'd9167301-28af-4b89-b9e0-09f612e962fa': 'testing_prMW1VThk-063cb428-eaee-44b8-9d0d-df5fb77a5b4d', '004ae853-f899-43fd-8981-7513a3b40d6b': 'testing_RTtKVi09rld-fab00b16-7996-49af-8249-369c6bbfa02d'}}
- tenant_name = 'osm'
+ org_dict = {
+ "catalogs": {
+ "4c4fdb5d-0c7d-4fee-9efd-cb061f327a01": "80d8488f67ba1de98b7f485fba6abbd2",
+ "1b98ca02-b0a6-4ca7-babe-eadc0ae59677": "Ubuntu",
+ "e7f27dfe-14b7-49e1-918e-173bda02683a": "834bdd1f28fd15dcbe830456ec58fbca",
+ "9441ee69-0486-4438-ac62-8d8082c51302": "centos",
+ "e660cce0-47a6-4315-a5b9-97a39299a374": "cirros01",
+ "0fd96c61-c3d1-4abf-9a34-0dff8fb65743": "cirros034",
+ "1c703be3-9bd2-46a2-854c-3e678d5cdda8": "Ubuntu_plugtest-1",
+ "bc4e342b-f84c-41bd-a93a-480f35bacf69": "Cirros",
+ "8a206fb5-3ef9-4571-9bcc-137615f4d930": "255eb079a62ac155e7f942489f14b0c4",
+ },
+ "vdcs": {
+ "e6436c6a-d922-4b39-9c1c-b48e766fce5e": "osm",
+ "3852f762-18ae-4833-a229-42684b6e7373": "cloud-1-vdc",
+ },
+ "networks": {
+ "e203cacd-9320-4422-9be0-12c7def3ab56": "testing_lNejr37B-38e4ca67-1e26-486f-ad2f-f14bb099e068",
+ "a6623349-2bef-4367-9fda-d33f9ab927f8": "Vlan_3151",
+ "adf780cb-358c-47c2-858d-ae5778ccaf17": "testing_xwBultc-99b8a2ae-c091-4dd3-bbf7-762a51612385",
+ "721f9efc-11fe-4c13-936d-252ba0ed93c8": "testing_tLljy8WB5e-a898cb28-e75b-4867-a22e-f2bad285c144",
+ "1512d97a-929d-4b06-b8af-cf5ac42a2aee": "Managment",
+ "d9167301-28af-4b89-b9e0-09f612e962fa": "testing_prMW1VThk-063cb428-eaee-44b8-9d0d-df5fb77a5b4d",
+ "004ae853-f899-43fd-8981-7513a3b40d6b": "testing_RTtKVi09rld-fab00b16-7996-49af-8249-369c6bbfa02d",
+ },
+ }
+ tenant_name = "osm"
get_org.return_value = org_dict
# call to VIM connector method
- results = self.vim.get_tenant_list({'name' : tenant_name})
+ results = self.vim.get_tenant_list({"name": tenant_name})
# assert verified expected and return result from VIM connector
for result in results:
- self.assertEqual(tenant_name,result['name'])
+ self.assertEqual(tenant_name, result["name"])
- @mock.patch.object(vimconnector,'get_org')
+ @mock.patch.object(vimconnector, "get_org")
def test_get_tenant_list_negative(self, get_org):
"""
Test case for get tenant list negative
"""
- org_dict = {'vdcs': {}}
- tenant_name = 'testosm'
+ org_dict = {"vdcs": {}}
+ tenant_name = "testosm"
get_org.return_value = org_dict
# call to VIM connector method
- results = self.vim.get_tenant_list({'name' : tenant_name})
+ results = self.vim.get_tenant_list({"name": tenant_name})
# assert verified expected and return result from VIM connector
self.assertEqual(results, [])
- @mock.patch.object(vimconnector,'create_vdc')
+ @mock.patch.object(vimconnector, "create_vdc")
def test_new_tenant(self, create_vdc):
"""
Test case for create new tenant
"""
- tenant_name = 'test'
- vdc = {'a493aa2c-3104-4d63-969b-fc9e72304c9f': 'https://localhost/api/task/e658d84c-007d-4fd8-9590-3a8f93cc0de4'}
+ tenant_name = "test"
+ vdc = {
+ "a493aa2c-3104-4d63-969b-fc9e72304c9f": "https://localhost/api/task/e658d84c-007d-4fd8-9590-3a8f93cc0de4"
+ }
create_vdc.return_value = vdc
# call to VIM connector method
result = self.vim.new_tenant(tenant_name)
# assert verified expected and return result from VIM connector
- self.assertEqual('a493aa2c-3104-4d63-969b-fc9e72304c9f', result)
+ self.assertEqual("a493aa2c-3104-4d63-969b-fc9e72304c9f", result)
- @mock.patch.object(vimconnector,'create_vdc')
+ @mock.patch.object(vimconnector, "create_vdc")
def test_new_tenant_negative(self, create_vdc):
"""
Test case for create new tenant
"""
- tenant_name = 'test'
+ tenant_name = "test"
create_vdc.return_value = None
# assert verified expected and return result from VIM connector
- self.assertRaises(VimConnException,self.vim.new_tenant,tenant_name)
+ self.assertRaises(VimConnException, self.vim.new_tenant, tenant_name)
- @mock.patch.object(vimconnector,'connect_as_admin')
- @mock.patch.object(vimconnector,'connect')
- @mock.patch.object(vimconnector,'perform_request')
+ @mock.patch.object(vimconnector, "connect_as_admin")
+ @mock.patch.object(vimconnector, "connect")
+ @mock.patch.object(vimconnector, "perform_request")
def test_delete_tenant(self, perform_request, connect, connect_as_admin):
"""
Test case to delete tenant
"""
- tenant_id = '753227f5-d6c6-4478-9546-acc5cfff21e9'
+ tenant_id = "753227f5-d6c6-4478-9546-acc5cfff21e9"
delete_tenant_resp = xml_resp.delete_tenant
self.vim.client = self.vim.connect()
- perform_request.side_effect = [mock.Mock(status_code = 200,
- content = delete_tenant_resp),
- mock.Mock(status_code = 202,
- content = None)
- ]
+ perform_request.side_effect = [
+ mock.Mock(status_code=200, content=delete_tenant_resp),
+ mock.Mock(status_code=202, content=None),
+ ]
# call to VIM connector method
result = self.vim.delete_tenant(tenant_id)
# assert verified expected and return result from VIM connector
self.assertEqual(tenant_id, result)
- @mock.patch.object(vimconnector,'connect_as_admin')
- @mock.patch.object(vimconnector,'connect')
- @mock.patch.object(vimconnector,'perform_request')
+ @mock.patch.object(vimconnector, "connect_as_admin")
+ @mock.patch.object(vimconnector, "connect")
+ @mock.patch.object(vimconnector, "perform_request")
def test_delete_tenant_negative(self, perform_request, connect, connect_as_admin):
"""
Test case to delete tenant
"""
- tenant_id = 'ten45klsjdf'
+ tenant_id = "ten45klsjdf"
self.vim.client = self.vim.connect()
perform_request.return_value.status_code = 201
# assert verified expected and return result from VIM connector
- self.assertRaises(VimConnNotFoundException,self.vim.delete_tenant,tenant_id)
-
- @mock.patch.object(vimconnector,'get_vdc_details')
- @mock.patch.object(Org,'list_catalogs')
- @mock.patch.object(vimconnector,'get_vcd_network')
- @mock.patch.object(Org,'get_vdc')
- @mock.patch.object(Org,'get_catalog_item')
- @mock.patch.object(vimconnector,'connect')
- @mock.patch.object(vimconnector,'perform_request')
- @mock.patch.object(Client,'get_task_monitor')
- @mock.patch.object(VDC,'get_vapp')
- @mock.patch.object(vimconnector,'get_network_list')
- @mock.patch.object(vimconnector,'power_on_vapp')
- def test_new_vminstance(self, power_on, get_network_list, get_vapp,
- get_task_monitor, perform_request, connect,
- get_catalog_item, get_vdc, get_vcd_network,
- list_catalogs, get_vdc_details):
+ self.assertRaises(VimConnNotFoundException, self.vim.delete_tenant, tenant_id)
+
+ @mock.patch.object(vimconnector, "get_vdc_details")
+ @mock.patch.object(Org, "list_catalogs")
+ @mock.patch.object(vimconnector, "get_vcd_network")
+ @mock.patch.object(Org, "get_vdc")
+ @mock.patch.object(Org, "get_catalog_item")
+ @mock.patch.object(vimconnector, "connect")
+ @mock.patch.object(vimconnector, "perform_request")
+ @mock.patch.object(Client, "get_task_monitor")
+ @mock.patch.object(VDC, "get_vapp")
+ @mock.patch.object(vimconnector, "get_network_list")
+ @mock.patch.object(vimconnector, "power_on_vapp")
+ def test_new_vminstance(
+ self,
+ power_on,
+ get_network_list,
+ get_vapp,
+ get_task_monitor,
+ perform_request,
+ connect,
+ get_catalog_item,
+ get_vdc,
+ get_vcd_network,
+ list_catalogs,
+ get_vdc_details,
+ ):
"""
Test case for new vm instance
"""
- image_id = '34925a30-0f4a-4018-9759-0d6799063b51'
- vimconnector.flavorlist = {'123347db-536b-4936-8b62-1fcdc721865d': {'vcpus': 1,
- 'disk': 10,
- 'ram': 1024}}
-
- flavor_id = '123347db-536b-4936-8b62-1fcdc721865d'
- net_list = [{'use': 'bridge', 'name': 'eth0', 'floating_ip': False, 'vpci': '0000:00:11.0', 'port_security': True, 'type': 'virtual', 'net_id': '69c713cb-3eec-452c-9a32-0e95c8ffe567'}]
-
- cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '34925a30-0f4a-4018-9759-0d6799063b51', 'name': 'Ubuntu_1nic'}]
-
- network_dict = {'status': '1', 'isShared': 'false', 'IpScope': '', 'EndAddress': '192.169.241.150', 'name': 'testing_6n5mJwUyx-ad9d62fc-8223-4dbe-88c4-9f16458ebeec', 'Dns1': '192.169.241.102', 'IpRanges': '', 'Gateway': '192.169.241.253', 'Netmask': '255.255.255.0', 'RetainNetInfoAcrossDeployments': 'false', 'IpScopes': '', 'IsEnabled': 'true', 'DnsSuffix': 'corp.local', 'StartAddress': '192.169.241.115', 'IpRange': '', 'Configuration': '', 'FenceMode': 'bridged', 'IsInherited': 'true', 'uuid': '69c713cb-3eec-452c-9a32-0e95c8ffe567'}
-
- network_list = [{'status': 'ACTIVE', 'name': 'default', 'admin_state_up': True, 'shared': False, 'tenant_id': '2584137f-6541-4c04-a2a2-e56bfca14c69', 'type': 'bridge', 'id': '1fd6421e-929a-4576-bc19-a0c48aea1969'}]
+ image_id = "34925a30-0f4a-4018-9759-0d6799063b51"
+ vimconnector.flavorlist = {
+ "123347db-536b-4936-8b62-1fcdc721865d": {
+ "vcpus": 1,
+ "disk": 10,
+ "ram": 1024,
+ }
+ }
+
+ flavor_id = "123347db-536b-4936-8b62-1fcdc721865d"
+ net_list = [
+ {
+ "use": "bridge",
+ "name": "eth0",
+ "floating_ip": False,
+ "vpci": "0000:00:11.0",
+ "port_security": True,
+ "type": "virtual",
+ "net_id": "69c713cb-3eec-452c-9a32-0e95c8ffe567",
+ }
+ ]
+
+ cat_list = [
+ {
+ "isShared": "false",
+ "numberOfVAppTemplates": "1",
+ "orgName": "Org3",
+ "isPublished": "false",
+ "ownerName": "system",
+ "numberOfMedia": "0",
+ "creationDate": "2017-10-15T02:03:59.403-07:00",
+ "id": "34925a30-0f4a-4018-9759-0d6799063b51",
+ "name": "Ubuntu_1nic",
+ }
+ ]
+
+ network_dict = {
+ "status": "1",
+ "isShared": "false",
+ "IpScope": "",
+ "EndAddress": "192.169.241.150",
+ "name": "testing_6n5mJwUyx-ad9d62fc-8223-4dbe-88c4-9f16458ebeec",
+ "Dns1": "192.169.241.102",
+ "IpRanges": "",
+ "Gateway": "192.169.241.253",
+ "Netmask": "255.255.255.0",
+ "RetainNetInfoAcrossDeployments": "false",
+ "IpScopes": "",
+ "IsEnabled": "true",
+ "DnsSuffix": "corp.local",
+ "StartAddress": "192.169.241.115",
+ "IpRange": "",
+ "Configuration": "",
+ "FenceMode": "bridged",
+ "IsInherited": "true",
+ "uuid": "69c713cb-3eec-452c-9a32-0e95c8ffe567",
+ }
+
+ network_list = [
+ {
+ "status": "ACTIVE",
+ "name": "default",
+ "admin_state_up": True,
+ "shared": False,
+ "tenant_id": "2584137f-6541-4c04-a2a2-e56bfca14c69",
+ "type": "bridge",
+ "id": "1fd6421e-929a-4576-bc19-a0c48aea1969",
+ }
+ ]
# created vdc object
vdc_xml_resp = xml_resp.vdc_xml_response
get_vdc.return_value = vdc
get_catalog_item.return_value = catalog_list
self.vim.client = self.vim.connect()
- perform_request.side_effect = [mock.Mock(status_code = 200,
- content = xml_resp.catalogItem_xml),
- mock.Mock(status_code = 200,
- content = xml_resp.vapp_template_xml),
- mock.Mock(status_code = 201,
- content = xml_resp.deployed_vapp_xml)]
+ perform_request.side_effect = [
+ mock.Mock(status_code=200, content=xml_resp.catalogItem_xml),
+ mock.Mock(status_code=200, content=xml_resp.vapp_template_xml),
+ mock.Mock(status_code=201, content=xml_resp.deployed_vapp_xml),
+ ]
status_resp = xml_resp.status_task_xml
status = lxmlElementTree.fromstring(status_resp)
- self.vim.connect.return_value.get_task_monitor.return_value.wait_for_success.return_value = status
+ self.vim.connect.return_value.get_task_monitor.return_value.wait_for_success.return_value = (
+ status
+ )
vapp_resp = xml_resp.vapp_xml_response
vapp = lxmlElementTree.fromstring(vapp_resp)
get_vapp.return_value = vapp
power_on.return_value = poweron
# call to VIM connector method
- result = self.vim.new_vminstance(name='Test1_vm', image_id=image_id,
- flavor_id=flavor_id,
- net_list=net_list)
+ result = self.vim.new_vminstance(
+ name="Test1_vm", image_id=image_id, flavor_id=flavor_id, net_list=net_list
+ )
# assert verified expected and return result from VIM connector
self.assertIsNotNone(result)
-
- @mock.patch.object(vimconnector,'get_vdc_details')
- @mock.patch.object(Org,'list_catalogs')
- @mock.patch.object(vimconnector,'get_vcd_network')
- @mock.patch.object(Org,'get_vdc')
- @mock.patch.object(Org,'get_catalog_item')
- @mock.patch.object(vimconnector,'connect')
- @mock.patch.object(vimconnector,'perform_request')
- @mock.patch.object(Client,'get_task_monitor')
- @mock.patch.object(VDC,'get_vapp')
- @mock.patch.object(vimconnector,'get_network_list')
- @mock.patch.object(vimconnector,'power_on_vapp')
- def test_new_vminstance_negative(self, power_on, get_network_list, get_vapp,
- get_task_monitor, perform_request, connect,
- get_catalog_item, get_vdc, get_vcd_network,
- list_catalogs, get_vdc_details):
+ @mock.patch.object(vimconnector, "get_vdc_details")
+ @mock.patch.object(Org, "list_catalogs")
+ @mock.patch.object(vimconnector, "get_vcd_network")
+ @mock.patch.object(Org, "get_vdc")
+ @mock.patch.object(Org, "get_catalog_item")
+ @mock.patch.object(vimconnector, "connect")
+ @mock.patch.object(vimconnector, "perform_request")
+ @mock.patch.object(Client, "get_task_monitor")
+ @mock.patch.object(VDC, "get_vapp")
+ @mock.patch.object(vimconnector, "get_network_list")
+ @mock.patch.object(vimconnector, "power_on_vapp")
+ def test_new_vminstance_negative(
+ self,
+ power_on,
+ get_network_list,
+ get_vapp,
+ get_task_monitor,
+ perform_request,
+ connect,
+ get_catalog_item,
+ get_vdc,
+ get_vcd_network,
+ list_catalogs,
+ get_vdc_details,
+ ):
"""
Test case for new vm instance
"""
- image_id = '34925a30-0f4a-4018-9759-0d6799063b51'
- vimconnector.flavorlist = {'123347db-536b-4936-8b62-1fcdc721865d': {'vcpus': 1,
- 'disk': 10,
- 'ram': 1024}}
- flavor_id = '123347db-536b-4936-8b62-1fcdc721865d'
- net_list = [{'use': 'bridge', 'name': 'eth0', 'floating_ip': False, 'vpci': '0000:00:11.0', 'port_security': True, 'type': 'virtual', 'net_id': '69c713cb-3eec-452c-9a32-0e95c8ffe567'}]
-
- cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '34925a30-0f4a-4018-9759-0d6799063b51', 'name': 'Ubuntu_1nic'}]
-
- network_dict = {'status': '1', 'isShared': 'false', 'IpScope': '', 'EndAddress': '192.169.241.150', 'name': 'testing_6n5mJwUyx-ad9d62fc-8223-4dbe-88c4-9f16458ebeec', 'Dns1': '192.169.241.102', 'IpRanges': '', 'Gateway': '192.169.241.253', 'Netmask': '255.255.255.0', 'RetainNetInfoAcrossDeployments': 'false', 'IpScopes': '', 'IsEnabled': 'true', 'DnsSuffix': 'corp.local', 'StartAddress': '192.169.241.115', 'IpRange': '', 'Configuration': '', 'FenceMode': 'bridged', 'IsInherited': 'true', 'uuid': '69c713cb-3eec-452c-9a32-0e95c8ffe567'}
+ image_id = "34925a30-0f4a-4018-9759-0d6799063b51"
+ vimconnector.flavorlist = {
+ "123347db-536b-4936-8b62-1fcdc721865d": {
+ "vcpus": 1,
+ "disk": 10,
+ "ram": 1024,
+ }
+ }
+ flavor_id = "123347db-536b-4936-8b62-1fcdc721865d"
+ net_list = [
+ {
+ "use": "bridge",
+ "name": "eth0",
+ "floating_ip": False,
+ "vpci": "0000:00:11.0",
+ "port_security": True,
+ "type": "virtual",
+ "net_id": "69c713cb-3eec-452c-9a32-0e95c8ffe567",
+ }
+ ]
+
+ cat_list = [
+ {
+ "isShared": "false",
+ "numberOfVAppTemplates": "1",
+ "orgName": "Org3",
+ "isPublished": "false",
+ "ownerName": "system",
+ "numberOfMedia": "0",
+ "creationDate": "2017-10-15T02:03:59.403-07:00",
+ "id": "34925a30-0f4a-4018-9759-0d6799063b51",
+ "name": "Ubuntu_1nic",
+ }
+ ]
+
+ network_dict = {
+ "status": "1",
+ "isShared": "false",
+ "IpScope": "",
+ "EndAddress": "192.169.241.150",
+ "name": "testing_6n5mJwUyx-ad9d62fc-8223-4dbe-88c4-9f16458ebeec",
+ "Dns1": "192.169.241.102",
+ "IpRanges": "",
+ "Gateway": "192.169.241.253",
+ "Netmask": "255.255.255.0",
+ "RetainNetInfoAcrossDeployments": "false",
+ "IpScopes": "",
+ "IsEnabled": "true",
+ "DnsSuffix": "corp.local",
+ "StartAddress": "192.169.241.115",
+ "IpRange": "",
+ "Configuration": "",
+ "FenceMode": "bridged",
+ "IsInherited": "true",
+ "uuid": "69c713cb-3eec-452c-9a32-0e95c8ffe567",
+ }
# created vdc object
vdc_xml_resp = xml_resp.vdc_xml_response
get_vdc.return_value = vdc
get_catalog_item.return_value = catalog_list
self.vim.client = self.vim.connect()
- perform_request.side_effect = [mock.Mock(status_code = 200,
- content = xml_resp.catalogItem_xml),
- mock.Mock(status_code = 200,
- content = xml_resp.vapp_template_xml),
- mock.Mock(status_code = 400,
- content = "Bad request error")]
+ perform_request.side_effect = [
+ mock.Mock(status_code=200, content=xml_resp.catalogItem_xml),
+ mock.Mock(status_code=200, content=xml_resp.vapp_template_xml),
+ mock.Mock(status_code=400, content="Bad request error"),
+ ]
# call to VIM connector method
- self.assertRaises(VimConnUnexpectedResponse,self.vim.new_vminstance,
- name='Test1_vm',
- image_id=image_id,
- flavor_id=flavor_id,
- net_list=net_list)
-
- @mock.patch.object(vimconnector,'get_catalogid')
- @mock.patch.object(vimconnector,'upload_vimimage')
- @mock.patch.object(Org,'create_catalog')
- @mock.patch.object(Org,'list_catalogs')
- @mock.patch.object(vimconnector,'get_vdc_details')
- @mock.patch.object(path,'isfile')
- @mock.patch.object(os,'access')
- def test_new_image(self, access, isfile,
- get_vdc_details,
- list_catalogs,
- create_catalog,
- upload_vimimage,
- get_catalogid):
+ self.assertRaises(
+ VimConnUnexpectedResponse,
+ self.vim.new_vminstance,
+ name="Test1_vm",
+ image_id=image_id,
+ flavor_id=flavor_id,
+ net_list=net_list,
+ )
+
+ @mock.patch.object(vimconnector, "get_catalogid")
+ @mock.patch.object(vimconnector, "upload_vimimage")
+ @mock.patch.object(Org, "create_catalog")
+ @mock.patch.object(Org, "list_catalogs")
+ @mock.patch.object(vimconnector, "get_vdc_details")
+ @mock.patch.object(path, "isfile")
+ @mock.patch.object(os, "access")
+ def test_new_image(
+ self,
+ access,
+ isfile,
+ get_vdc_details,
+ list_catalogs,
+ create_catalog,
+ upload_vimimage,
+ get_catalogid,
+ ):
"""
Test case for create new image
"""
- path = '/tmp/cirros/cirros.ovf'
- cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '9759-0d6799063b51', 'name': 'cirros'}]
+ path = "/tmp/cirros/cirros.ovf"
+ cat_list = [
+ {
+ "isShared": "false",
+ "numberOfVAppTemplates": "1",
+ "orgName": "Org3",
+ "isPublished": "false",
+ "ownerName": "system",
+ "numberOfMedia": "0",
+ "creationDate": "2017-10-15T02:03:59.403-07:00",
+ "id": "9759-0d6799063b51",
+ "name": "cirros",
+ }
+ ]
# created vdc object
vdc_xml_resp = xml_resp.vdc_xml_response
vdc = lxmlElementTree.fromstring(vdc_xml_resp)
list_catalogs.return_value = cat_list
create_catalog.return_value = catalog
upload_vimimage.return_value = True
- get_catalogid.return_value = '9759-0d6799063b51'
- result = self.vim.new_image({'name': 'TestImage', 'location' : path})
+ get_catalogid.return_value = "9759-0d6799063b51"
+ result = self.vim.new_image({"name": "TestImage", "location": path})
# assert verified expected and return result from VIM connector
self.assertIsNotNone(result)
- @mock.patch.object(vimconnector,'get_catalogid')
- @mock.patch.object(vimconnector,'upload_vimimage')
- @mock.patch.object(Org,'create_catalog')
- @mock.patch.object(Org,'list_catalogs')
- @mock.patch.object(vimconnector,'get_vdc_details')
- def test_new_image_negative(self, get_vdc_details, list_catalogs,
- create_catalog,
- upload_vimimage,
- get_catalogid):
+ @mock.patch.object(vimconnector, "get_catalogid")
+ @mock.patch.object(vimconnector, "upload_vimimage")
+ @mock.patch.object(Org, "create_catalog")
+ @mock.patch.object(Org, "list_catalogs")
+ @mock.patch.object(vimconnector, "get_vdc_details")
+ def test_new_image_negative(
+ self,
+ get_vdc_details,
+ list_catalogs,
+ create_catalog,
+ upload_vimimage,
+ get_catalogid,
+ ):
"""
Test case for create new image with negative scenario
"""
- path = '/tmp/cirros/cirros.ovf'
- cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org1', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15', 'id': '34925a30-0f4a-4018-9759-0d6799063b51', 'name': 'test'}]
+ path = "/tmp/cirros/cirros.ovf"
+ cat_list = [
+ {
+ "isShared": "false",
+ "numberOfVAppTemplates": "1",
+ "orgName": "Org1",
+ "ownerName": "system",
+ "numberOfMedia": "0",
+ "creationDate": "2017-10-15",
+ "id": "34925a30-0f4a-4018-9759-0d6799063b51",
+ "name": "test",
+ }
+ ]
# created vdc object
vdc_xml_resp = xml_resp.vdc_xml_response
vdc = lxmlElementTree.fromstring(vdc_xml_resp)
list_catalogs.return_value = cat_list
create_catalog.return_value = catalog
upload_vimimage.return_value = False
- get_catalogid.return_value = '34925a30-0f4a-4018-9759-0d6799063b51'
+ get_catalogid.return_value = "34925a30-0f4a-4018-9759-0d6799063b51"
# assert verified expected and return result from VIM connector
- self.assertRaises(VimConnException,self.vim.new_image,{'name':'TestImage', 'location':path})
-
- @mock.patch.object(vimconnector,'connect_as_admin')
- @mock.patch.object(vimconnector,'perform_request')
+ self.assertRaises(
+ VimConnException,
+ self.vim.new_image,
+ {"name": "TestImage", "location": path},
+ )
+
+ @mock.patch.object(vimconnector, "connect_as_admin")
+ @mock.patch.object(vimconnector, "perform_request")
def test_delete_image(self, perform_request, connect_as_admin):
"""
Testcase to delete image by image id
"""
- image_id = 'f3bf3733-465b-419f-b675-52f91d18edbb'
+ image_id = "f3bf3733-465b-419f-b675-52f91d18edbb"
# creating conn object
self.vim.client = self.vim.connect_as_admin()
# assumed return value from VIM connector
- perform_request.side_effect = [mock.Mock(status_code = 200,
- content = xml_resp.delete_catalog_xml_response),
- mock.Mock(status_code = 200,
- content = xml_resp.delete_catalog_item_xml_response),
- mock.Mock(status_code = 204,
- content = ''),
- mock.Mock(status_code = 204,
- content = '')
- ]
+ perform_request.side_effect = [
+ mock.Mock(status_code=200, content=xml_resp.delete_catalog_xml_response),
+ mock.Mock(
+ status_code=200, content=xml_resp.delete_catalog_item_xml_response
+ ),
+ mock.Mock(status_code=204, content=""),
+ mock.Mock(status_code=204, content=""),
+ ]
# call to vim connctor method
result = self.vim.delete_image(image_id)
# assert verified expected and return result from VIM connector
self.assertEqual(image_id, result)
- @mock.patch.object(vimconnector,'get_catalogid')
- @mock.patch.object(vimconnector,'upload_vimimage')
- @mock.patch.object(Org,'create_catalog')
- @mock.patch.object(Org,'list_catalogs')
- @mock.patch.object(vimconnector,'get_vdc_details')
- @mock.patch.object(path,'isfile')
- @mock.patch.object(os,'access')
- def test_get_image_id_from_path(self, access, isfile,
- get_vdc_details,
- list_catalogs,
- create_catalog,
- upload_vimimage,
- get_catalogid):
+ @mock.patch.object(vimconnector, "get_catalogid")
+ @mock.patch.object(vimconnector, "upload_vimimage")
+ @mock.patch.object(Org, "create_catalog")
+ @mock.patch.object(Org, "list_catalogs")
+ @mock.patch.object(vimconnector, "get_vdc_details")
+ @mock.patch.object(path, "isfile")
+ @mock.patch.object(os, "access")
+ def test_get_image_id_from_path(
+ self,
+ access,
+ isfile,
+ get_vdc_details,
+ list_catalogs,
+ create_catalog,
+ upload_vimimage,
+ get_catalogid,
+ ):
"""
Test case to get image id from image path
"""
- path = '/tmp/ubuntu/ubuntu.ovf'
- cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '7208-0f6777052c30', 'name': 'ubuntu'}]
+ path = "/tmp/ubuntu/ubuntu.ovf"
+ cat_list = [
+ {
+ "isShared": "false",
+ "numberOfVAppTemplates": "1",
+ "orgName": "Org3",
+ "isPublished": "false",
+ "ownerName": "system",
+ "numberOfMedia": "0",
+ "creationDate": "2017-10-15T02:03:59.403-07:00",
+ "id": "7208-0f6777052c30",
+ "name": "ubuntu",
+ }
+ ]
# created vdc object
vdc_xml_resp = xml_resp.vdc_xml_response
list_catalogs.return_value = cat_list
create_catalog.return_value = catalog
upload_vimimage.return_value = True
- get_catalogid.return_value = '7208-0f6777052c30'
+ get_catalogid.return_value = "7208-0f6777052c30"
result = self.vim.get_image_id_from_path(path=path)
# assert verified expected and return result from VIM connector
self.assertIsNotNone(result)
- @mock.patch.object(vimconnector,'get_catalogid')
- @mock.patch.object(vimconnector,'upload_vimimage')
- @mock.patch.object(Org,'create_catalog')
- @mock.patch.object(Org,'list_catalogs')
- @mock.patch.object(vimconnector,'get_vdc_details')
- @mock.patch.object(path,'isfile')
- @mock.patch.object(os,'access')
- def test_get_image_id_from_path_negative(self, access, isfile,
- get_vdc_details,
- list_catalogs,
- create_catalog,
- upload_vimimage,
- get_catalogid):
+ @mock.patch.object(vimconnector, "get_catalogid")
+ @mock.patch.object(vimconnector, "upload_vimimage")
+ @mock.patch.object(Org, "create_catalog")
+ @mock.patch.object(Org, "list_catalogs")
+ @mock.patch.object(vimconnector, "get_vdc_details")
+ @mock.patch.object(path, "isfile")
+ @mock.patch.object(os, "access")
+ def test_get_image_id_from_path_negative(
+ self,
+ access,
+ isfile,
+ get_vdc_details,
+ list_catalogs,
+ create_catalog,
+ upload_vimimage,
+ get_catalogid,
+ ):
"""
Test case to get image id from image path with negative scenario
"""
- path = '/tmp/ubuntu/ubuntu.ovf'
- cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '7208-0f6777052c30', 'name': 'ubuntu'}]
+ path = "/tmp/ubuntu/ubuntu.ovf"
+ cat_list = [
+ {
+ "isShared": "false",
+ "numberOfVAppTemplates": "1",
+ "orgName": "Org3",
+ "isPublished": "false",
+ "ownerName": "system",
+ "numberOfMedia": "0",
+ "creationDate": "2017-10-15T02:03:59.403-07:00",
+ "id": "7208-0f6777052c30",
+ "name": "ubuntu",
+ }
+ ]
# created vdc object
vdc_xml_resp = xml_resp.vdc_xml_response
list_catalogs.return_value = cat_list
create_catalog.return_value = catalog
upload_vimimage.return_value = False
- get_catalogid.return_value = '7208-0f6777052c30'
+ get_catalogid.return_value = "7208-0f6777052c30"
self.assertRaises(VimConnException, self.vim.get_image_id_from_path, path)
- @mock.patch.object(vimconnector,'get_vdc_details')
- @mock.patch.object(vimconnector,'connect')
- @mock.patch.object(Org,'list_catalogs')
+ @mock.patch.object(vimconnector, "get_vdc_details")
+ @mock.patch.object(vimconnector, "connect")
+ @mock.patch.object(Org, "list_catalogs")
def test_get_image_list_negative(self, list_catalogs, connect, get_vdc_details):
"""
Testcase to get image list by invalid image id
# assumed return value from VIM connector
get_vdc_details.return_value = self.org, vdc
- list_catalogs.return_value = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '34925a30-0f4a-4018-9759-0d6799063b51', 'name': 'Ubuntu_1nic'}, {'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'orgadmin', 'numberOfMedia': '1', 'creationDate': '2018-02-15T02:16:58.300-08:00', 'id': '4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a', 'name': 'cirros034'}]
+ list_catalogs.return_value = [
+ {
+ "isShared": "false",
+ "numberOfVAppTemplates": "1",
+ "orgName": "Org3",
+ "isPublished": "false",
+ "ownerName": "system",
+ "numberOfMedia": "0",
+ "creationDate": "2017-10-15T02:03:59.403-07:00",
+ "id": "34925a30-0f4a-4018-9759-0d6799063b51",
+ "name": "Ubuntu_1nic",
+ },
+ {
+ "isShared": "false",
+ "numberOfVAppTemplates": "1",
+ "orgName": "Org3",
+ "isPublished": "false",
+ "ownerName": "orgadmin",
+ "numberOfMedia": "1",
+ "creationDate": "2018-02-15T02:16:58.300-08:00",
+ "id": "4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a",
+ "name": "cirros034",
+ },
+ ]
# call to vim connector method with invalid image id
- self.vim.get_image_list({'id': 'b46c-3f35ba45ca4a'})
+ self.vim.get_image_list({"id": "b46c-3f35ba45ca4a"})
- @mock.patch.object(vimconnector,'get_vapp_details_rest')
- @mock.patch.object(vimconnector,'get_vdc_details')
+ @mock.patch.object(vimconnector, "get_vapp_details_rest")
+ @mock.patch.object(vimconnector, "get_vdc_details")
def test_get_vminstance_negative(self, get_vdc_details, get_vapp_details_rest):
"""
Testcase to get vminstance by invalid vm id
"""
- invalid_vmid = '18743edb0c8b-sdfsf-fg'
+ invalid_vmid = "18743edb0c8b-sdfsf-fg"
# created vdc object
vdc_xml_resp = xml_resp.vdc_xml_response
vdc = lxmlElementTree.fromstring(vdc_xml_resp)
get_vapp_details_rest.return_value = False
# assert verified expected and return result from VIM connector
- self.assertRaises(VimConnNotFoundException, self.vim.get_vminstance,invalid_vmid)
-
- @mock.patch.object(vimconnector,'connect')
- @mock.patch.object(vimconnector,'get_namebyvappid')
- @mock.patch.object(vimconnector,'get_vdc_details')
- @mock.patch.object(VDC,'get_vapp')
- def test_delete_vminstance_negative(self, get_vapp, get_vdc_details,
- get_namebyvappid, connect):
+ self.assertRaises(
+ VimConnNotFoundException, self.vim.get_vminstance, invalid_vmid
+ )
+
+ @mock.patch.object(vimconnector, "connect")
+ @mock.patch.object(vimconnector, "get_namebyvappid")
+ @mock.patch.object(vimconnector, "get_vdc_details")
+ @mock.patch.object(VDC, "get_vapp")
+ def test_delete_vminstance_negative(
+ self, get_vapp, get_vdc_details, get_namebyvappid, connect
+ ):
"""
Testcase to delete vminstance by invalid vm id
"""
- vm_id = 'sdfrtt4935-87a1-0e4dc9c3a069'
- vm_name = 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa'
+ vm_id = "sdfrtt4935-87a1-0e4dc9c3a069"
+ vm_name = "Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa"
# created vdc object
vdc_xml_resp = xml_resp.vdc_xml_response
vdc = lxmlElementTree.fromstring(vdc_xml_resp)
get_vapp.return_value = None
# call to VIM connector method
- self.assertRaises(VimConnException, self.vim.delete_vminstance,vm_id)
+ self.assertRaises(VimConnException, self.vim.delete_vminstance, vm_id)
- @mock.patch.object(vimconnector,'get_vcd_network')
+ @mock.patch.object(vimconnector, "get_vcd_network")
def test_refresh_nets_status_negative(self, get_vcd_network):
"""
Testcase for refresh nets status by invalid vm id
"""
- net_id = 'sjkldf-456mfd-345'
+ net_id = "sjkldf-456mfd-345"
# assumed return value from VIM connector
get_vcd_network.return_value = None
# assert verified expected and return result from VIM connector
for attr in result[net_id]:
- if attr == 'status':
- self.assertEqual(result[net_id][attr], 'DELETED')
+ if attr == "status":
+ self.assertEqual(result[net_id][attr], "DELETED")
- @mock.patch.object(vimconnector,'connect')
- @mock.patch.object(vimconnector,'get_namebyvappid')
- @mock.patch.object(vimconnector,'get_vdc_details')
- def test_action_vminstance_negative(self, get_vdc_details,
- get_namebyvappid,
- connect):
+ @mock.patch.object(vimconnector, "connect")
+ @mock.patch.object(vimconnector, "get_namebyvappid")
+ @mock.patch.object(vimconnector, "get_vdc_details")
+ def test_action_vminstance_negative(
+ self, get_vdc_details, get_namebyvappid, connect
+ ):
"""
Testcase for action vm instance by invalid action
"""
- vm_id = '8413-4cb8-bad7-b5afaec6f9fa'
+ vm_id = "8413-4cb8-bad7-b5afaec6f9fa"
# created vdc object
vdc_xml_resp = xml_resp.vdc_xml_response
vdc = lxmlElementTree.fromstring(vdc_xml_resp)
# assumed return value from VIM connector
get_vdc_details.return_value = self.org, vdc
- get_namebyvappid.return_value = 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa'
+ get_namebyvappid.return_value = "Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa"
self.vim.client = self.vim.connect()
# call to VIM connector method
- self.assertRaises(VimConnException, self.vim.action_vminstance, vm_id,{'invalid': None})
+ self.assertRaises(
+ VimConnException, self.vim.action_vminstance, vm_id, {"invalid": None}
+ )
import yaml
# global variable for vcd connector type
-STANDALONE = 'standalone'
+STANDALONE = "standalone"
# key for flavor dicts
-FLAVOR_RAM_KEY = 'ram'
-FLAVOR_VCPUS_KEY = 'vcpus'
-FLAVOR_DISK_KEY = 'disk'
-DEFAULT_IP_PROFILE = {'dhcp_count': 50,
- 'dhcp_enabled': True,
- 'ip_version': "IPv4"
- }
+FLAVOR_RAM_KEY = "ram"
+FLAVOR_VCPUS_KEY = "vcpus"
+FLAVOR_DISK_KEY = "disk"
+DEFAULT_IP_PROFILE = {"dhcp_count": 50, "dhcp_enabled": True, "ip_version": "IPv4"}
# global variable for wait time
INTERVAL_TIME = 5
MAX_WAIT_TIME = 1800
-API_VERSION = '27.0'
+API_VERSION = "27.0"
# -1: "Could not be created",
# 0: "Unresolved",
# 15: "Upload quarantine period has expired"
# mapping vCD status to MANO
-vcdStatusCode2manoFormat = {4: 'ACTIVE',
- 7: 'PAUSED',
- 3: 'SUSPENDED',
- 8: 'INACTIVE',
- 12: 'BUILD',
- -1: 'ERROR',
- 14: 'DELETED'}
+vcdStatusCode2manoFormat = {
+ 4: "ACTIVE",
+ 7: "PAUSED",
+ 3: "SUSPENDED",
+ 8: "INACTIVE",
+ 12: "BUILD",
+ -1: "ERROR",
+ 14: "DELETED",
+}
#
-netStatus2manoFormat = {'ACTIVE': 'ACTIVE',
- 'PAUSED': 'PAUSED',
- 'INACTIVE': 'INACTIVE',
- 'BUILD': 'BUILD',
- 'ERROR': 'ERROR',
- 'DELETED': 'DELETED'
- }
+netStatus2manoFormat = {
+ "ACTIVE": "ACTIVE",
+ "PAUSED": "PAUSED",
+ "INACTIVE": "INACTIVE",
+ "BUILD": "BUILD",
+ "ERROR": "ERROR",
+ "DELETED": "DELETED",
+}
class vimconnector(vimconn.VimConnector):
# dict used to store flavor in memory
flavorlist = {}
- def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
- url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
+ def __init__(
+ self,
+ uuid=None,
+ name=None,
+ tenant_id=None,
+ tenant_name=None,
+ url=None,
+ url_admin=None,
+ user=None,
+ passwd=None,
+ log_level=None,
+ config={},
+ persistent_info={},
+ ):
"""
Constructor create vmware connector to vCloud director.
Nothing.
"""
- vimconn.VimConnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
- url_admin, user, passwd, log_level, config)
-
- self.logger = logging.getLogger('ro.vim.vmware')
+ vimconn.VimConnector.__init__(
+ self,
+ uuid,
+ name,
+ tenant_id,
+ tenant_name,
+ url,
+ url_admin,
+ user,
+ passwd,
+ log_level,
+ config,
+ )
+
+ self.logger = logging.getLogger("ro.vim.vmware")
self.logger.setLevel(10)
self.persistent_info = persistent_info
if tenant_name is not None:
orgnameandtenant = tenant_name.split(":")
+
if len(orgnameandtenant) == 2:
self.tenant_name = orgnameandtenant[1]
self.org_name = orgnameandtenant[0]
else:
self.tenant_name = tenant_name
+
if "orgname" in config:
- self.org_name = config['orgname']
+ self.org_name = config["orgname"]
if log_level:
self.logger.setLevel(getattr(logging, log_level))
try:
- self.admin_user = config['admin_username']
- self.admin_password = config['admin_password']
+ self.admin_user = config["admin_username"]
+ self.admin_password = config["admin_password"]
except KeyError:
- raise vimconn.VimConnException(message="Error admin username or admin password is empty.")
+ raise vimconn.VimConnException(
+ message="Error admin username or admin password is empty."
+ )
try:
- self.nsx_manager = config['nsx_manager']
- self.nsx_user = config['nsx_user']
- self.nsx_password = config['nsx_password']
+ self.nsx_manager = config["nsx_manager"]
+ self.nsx_user = config["nsx_user"]
+ self.nsx_password = config["nsx_password"]
except KeyError:
- raise vimconn.VimConnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
+ raise vimconn.VimConnException(
+ message="Error: nsx manager or nsx user or nsx password is empty in Config"
+ )
self.vcenter_ip = config.get("vcenter_ip", None)
self.vcenter_port = config.get("vcenter_port", None)
# Set availability zone for Affinity rules
self.availability_zone = self.set_availability_zones()
-# ############# Stub code for SRIOV #################
-# try:
-# self.dvs_name = config['dv_switch_name']
-# except KeyError:
-# raise vimconn.VimConnException(message="Error: distributed virtaul switch name is empty in Config")
-#
-# self.vlanID_range = config.get("vlanID_range", None)
+ # ############# Stub code for SRIOV #################
+ # try:
+ # self.dvs_name = config['dv_switch_name']
+ # except KeyError:
+ # raise vimconn.VimConnException(message="Error:
+ # distributed virtaul switch name is empty in Config")
+ #
+ # self.vlanID_range = config.get("vlanID_range", None)
self.org_uuid = None
self.client = None
if not url:
- raise vimconn.VimConnException('url param can not be NoneType')
+ raise vimconn.VimConnException("url param can not be NoneType")
if not self.url_admin: # try to use normal url
self.url_admin = self.url
- logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
- self.tenant_id, self.tenant_name))
- logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
- logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
+ logging.debug(
+ "UUID: {} name: {} tenant_id: {} tenant name {}".format(
+ self.id, self.org_name, self.tenant_id, self.tenant_name
+ )
+ )
+ logging.debug(
+ "vcd url {} vcd username: {} vcd password: {}".format(
+ self.url, self.user, self.passwd
+ )
+ )
+ logging.debug(
+ "vcd admin username {} vcd admin passowrd {}".format(
+ self.admin_user, self.admin_password
+ )
+ )
# initialize organization
if self.user is not None and self.passwd is not None and self.url:
self.init_organization()
def __getitem__(self, index):
- if index == 'name':
+ if index == "name":
return self.name
- if index == 'tenant_id':
+
+ if index == "tenant_id":
return self.tenant_id
- if index == 'tenant_name':
+
+ if index == "tenant_name":
return self.tenant_name
- elif index == 'id':
+ elif index == "id":
return self.id
- elif index == 'org_name':
+ elif index == "org_name":
return self.org_name
- elif index == 'org_uuid':
+ elif index == "org_uuid":
return self.org_uuid
- elif index == 'user':
+ elif index == "user":
return self.user
- elif index == 'passwd':
+ elif index == "passwd":
return self.passwd
- elif index == 'url':
+ elif index == "url":
return self.url
- elif index == 'url_admin':
+ elif index == "url_admin":
return self.url_admin
elif index == "config":
return self.config
raise KeyError("Invalid key '{}'".format(index))
def __setitem__(self, index, value):
- if index == 'name':
+ if index == "name":
self.name = value
- if index == 'tenant_id':
+
+ if index == "tenant_id":
self.tenant_id = value
- if index == 'tenant_name':
+
+ if index == "tenant_name":
self.tenant_name = value
- elif index == 'id':
+ elif index == "id":
self.id = value
- elif index == 'org_name':
+ elif index == "org_name":
self.org_name = value
- elif index == 'org_uuid':
+ elif index == "org_uuid":
self.org_uuid = value
- elif index == 'user':
+ elif index == "user":
self.user = value
- elif index == 'passwd':
+ elif index == "passwd":
self.passwd = value
- elif index == 'url':
+ elif index == "url":
self.url = value
- elif index == 'url_admin':
+ elif index == "url_admin":
self.url_admin = value
else:
raise KeyError("Invalid key '{}'".format(index))
def connect_as_admin(self):
- """ Method connect as pvdc admin user to vCloud director.
- There are certain action that can be done only by provider vdc admin user.
- Organization creation / provider network creation etc.
+ """Method connect as pvdc admin user to vCloud director.
+ There are certain action that can be done only by provider vdc admin user.
+ Organization creation / provider network creation etc.
- Returns:
- The return client object that latter can be used to connect to vcloud director as admin for provider vdc
+ Returns:
+ The return client object that latter can be used to connect to vcloud director as admin for provider vdc
"""
self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
try:
host = self.url
- org = 'System'
- client_as_admin = Client(host, verify_ssl_certs=False, api_version=API_VERSION)
- client_as_admin.set_credentials(BasicLoginCredentials(self.admin_user, org, self.admin_password))
+ org = "System"
+ client_as_admin = Client(
+ host, verify_ssl_certs=False, api_version=API_VERSION
+ )
+ client_as_admin.set_credentials(
+ BasicLoginCredentials(self.admin_user, org, self.admin_password)
+ )
except Exception as e:
raise vimconn.VimConnException(
- "Can't connect to vCloud director as: {} with exception {}".format(self.admin_user, e))
+ "Can't connect to vCloud director as: {} with exception {}".format(
+ self.admin_user, e
+ )
+ )
return client_as_admin
def connect(self):
- """ Method connect as normal user to vCloud director.
+ """Method connect as normal user to vCloud director.
- Returns:
- The return client object that latter can be used to connect to vCloud director as admin for VDC
+ Returns:
+ The return client object that latter can be used to connect to vCloud director as admin for VDC
"""
try:
- self.logger.debug("Logging into vCD {} as {} to datacenter {}.".format(self.org_name,
- self.user,
- self.org_name))
+ self.logger.debug(
+ "Logging into vCD {} as {} to datacenter {}.".format(
+ self.org_name, self.user, self.org_name
+ )
+ )
host = self.url
client = Client(host, verify_ssl_certs=False, api_version=API_VERSION)
- client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
+ client.set_credentials(
+ BasicLoginCredentials(self.user, self.org_name, self.passwd)
+ )
except Exception as e:
- raise vimconn.VimConnConnectionException("Can't connect to vCloud director org: "
- "{} as user {} with exception: {}".format(self.org_name,
- self.user,
- e))
+ raise vimconn.VimConnConnectionException(
+ "Can't connect to vCloud director org: "
+ "{} as user {} with exception: {}".format(self.org_name, self.user, e)
+ )
return client
def init_organization(self):
- """ Method initialize organization UUID and VDC parameters.
+ """Method initialize organization UUID and VDC parameters.
- At bare minimum client must provide organization name that present in vCloud director and VDC.
+ At bare minimum client must provide organization name that present in vCloud director and VDC.
- The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
- The Org - UUID will be initialized at the run time if data center present in vCloud director.
+ The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
+ The Org - UUID will be initialized at the run time if data center present in vCloud director.
- Returns:
- The return vca object that letter can be used to connect to vcloud direct as admin
+ Returns:
+ The return vca object that letter can be used to connect to vcloud direct as admin
"""
client = self.connect()
+
if not client:
raise vimconn.VimConnConnectionException("Failed to connect vCD.")
org_list = client.get_org_list()
for org in org_list.Org:
# we set org UUID at the init phase but we can do it only when we have valid credential.
- if org.get('name') == self.org_name:
- self.org_uuid = org.get('href').split('/')[-1]
- self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
+ if org.get("name") == self.org_name:
+ self.org_uuid = org.get("href").split("/")[-1]
+ self.logger.debug(
+ "Setting organization UUID {}".format(self.org_uuid)
+ )
break
else:
- raise vimconn.VimConnException("Vcloud director organization {} not found".format(self.org_name))
+ raise vimconn.VimConnException(
+ "Vcloud director organization {} not found".format(
+ self.org_name
+ )
+ )
# if well good we require for org details
org_details_dict = self.get_org(org_uuid=self.org_uuid)
# we have two case if we want to initialize VDC ID or VDC name at run time
# tenant_name provided but no tenant id
- if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
- vdcs_dict = org_details_dict['vdcs']
+ if (
+ self.tenant_id is None
+ and self.tenant_name is not None
+ and "vdcs" in org_details_dict
+ ):
+ vdcs_dict = org_details_dict["vdcs"]
for vdc in vdcs_dict:
if vdcs_dict[vdc] == self.tenant_name:
self.tenant_id = vdc
- self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
- self.org_name))
+ self.logger.debug(
+ "Setting vdc uuid {} for organization UUID {}".format(
+ self.tenant_id, self.org_name
+ )
+ )
break
else:
- raise vimconn.VimConnException("Tenant name indicated but not present in vcloud director.")
+ raise vimconn.VimConnException(
+ "Tenant name indicated but not present in vcloud director."
+ )
+
# case two we have tenant_id but we don't have tenant name so we find and set it.
- if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
- vdcs_dict = org_details_dict['vdcs']
+ if (
+ self.tenant_id is not None
+ and self.tenant_name is None
+ and "vdcs" in org_details_dict
+ ):
+ vdcs_dict = org_details_dict["vdcs"]
for vdc in vdcs_dict:
if vdc == self.tenant_id:
self.tenant_name = vdcs_dict[vdc]
- self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
- self.org_name))
+ self.logger.debug(
+ "Setting vdc uuid {} for organization UUID {}".format(
+ self.tenant_id, self.org_name
+ )
+ )
break
else:
- raise vimconn.VimConnException("Tenant id indicated but not present in vcloud director")
+ raise vimconn.VimConnException(
+ "Tenant id indicated but not present in vcloud director"
+ )
+
self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
except Exception as e:
- self.logger.debug("Failed initialize organization UUID for org {}: {}".format(self.org_name), e)
+ self.logger.debug(
+ "Failed initialize organization UUID for org {}: {}".format(
+ self.org_name, e
+ ),
+ )
self.logger.debug(traceback.format_exc())
self.org_uuid = None
def new_tenant(self, tenant_name=None, tenant_description=None):
- """ Method adds a new tenant to VIM with this name.
- This action requires access to create VDC action in vCloud director.
+ """Method adds a new tenant to VIM with this name.
+ This action requires access to create VDC action in vCloud director.
- Args:
- tenant_name is tenant_name to be created.
- tenant_description not used for this call
+ Args:
+ tenant_name is tenant_name to be created.
+ tenant_description not used for this call
- Return:
- returns the tenant identifier in UUID format.
- If action is failed method will throw vimconn.VimConnException method
- """
+ Return:
+ returns the tenant identifier in UUID format.
+ If action is failed method will throw vimconn.VimConnException method
+ """
vdc_task = self.create_vdc(vdc_name=tenant_name)
if vdc_task is not None:
vdc_uuid, _ = vdc_task.popitem()
- self.logger.info("Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
+ self.logger.info(
+ "Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid)
+ )
+
return vdc_uuid
else:
- raise vimconn.VimConnException("Failed create tenant {}".format(tenant_name))
+ raise vimconn.VimConnException(
+ "Failed create tenant {}".format(tenant_name)
+ )
def delete_tenant(self, tenant_id=None):
- """ Delete a tenant from VIM
- Args:
- tenant_id is tenant_id to be deleted.
+ """Delete a tenant from VIM
+ Args:
+ tenant_id is tenant_id to be deleted.
- Return:
- returns the tenant identifier in UUID format.
- If action is failed method will throw exception
+ Return:
+ returns the tenant identifier in UUID format.
+ If action is failed method will throw exception
"""
vca = self.connect_as_admin()
if not vca:
if tenant_id is not None:
if vca._session:
# Get OrgVDC
- url_list = [self.url, '/api/vdc/', tenant_id]
- orgvdc_herf = ''.join(url_list)
-
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='GET',
- url=orgvdc_herf,
- headers=headers)
+ url_list = [self.url, "/api/vdc/", tenant_id]
+ orgvdc_herf = "".join(url_list)
+
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": vca._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url=orgvdc_herf, headers=headers
+ )
if response.status_code != requests.codes.ok:
- self.logger.debug("delete_tenant():GET REST API call {} failed. "
- "Return status code {}".format(orgvdc_herf,
- response.status_code))
- raise vimconn.VimConnNotFoundException("Fail to get tenant {}".format(tenant_id))
+ self.logger.debug(
+ "delete_tenant():GET REST API call {} failed. "
+ "Return status code {}".format(
+ orgvdc_herf, response.status_code
+ )
+ )
+
+ raise vimconn.VimConnNotFoundException(
+ "Fail to get tenant {}".format(tenant_id)
+ )
lxmlroot_respond = lxmlElementTree.fromstring(response.content)
- namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
+ namespaces = {
+ prefix: uri
+ for prefix, uri in lxmlroot_respond.nsmap.items()
+ if prefix
+ }
namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
- vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']", namespaces).attrib['href']
- vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
+ vdc_remove_href = lxmlroot_respond.find(
+ "xmlns:Link[@rel='remove']", namespaces
+ ).attrib["href"]
+ vdc_remove_href = vdc_remove_href + "?recursive=true&force=true"
- response = self.perform_request(req_type='DELETE',
- url=vdc_remove_href,
- headers=headers)
+ response = self.perform_request(
+ req_type="DELETE", url=vdc_remove_href, headers=headers
+ )
if response.status_code == 202:
time.sleep(5)
+
return tenant_id
else:
- self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "
- "Return status code {}".format(vdc_remove_href,
- response.status_code))
- raise vimconn.VimConnException("Fail to delete tenant with ID {}".format(tenant_id))
+ self.logger.debug(
+ "delete_tenant(): DELETE REST API call {} failed. "
+ "Return status code {}".format(
+ vdc_remove_href, response.status_code
+ )
+ )
+
+ raise vimconn.VimConnException(
+ "Fail to delete tenant with ID {}".format(tenant_id)
+ )
else:
- self.logger.debug("delete_tenant():Incorrect tenant ID {}".format(tenant_id))
- raise vimconn.VimConnNotFoundException("Fail to get tenant {}".format(tenant_id))
+ self.logger.debug(
+ "delete_tenant():Incorrect tenant ID {}".format(tenant_id)
+ )
+
+ raise vimconn.VimConnNotFoundException(
+ "Fail to get tenant {}".format(tenant_id)
+ )
def get_tenant_list(self, filter_dict={}):
"""Obtain tenants of VIM
"""
org_dict = self.get_org(self.org_uuid)
- vdcs_dict = org_dict['vdcs']
+ vdcs_dict = org_dict["vdcs"]
vdclist = []
try:
for k in vdcs_dict:
- entry = {'name': vdcs_dict[k], 'id': k}
+ entry = {"name": vdcs_dict[k], "id": k}
# if caller didn't specify dictionary we return all tenants.
+
if filter_dict is not None and filter_dict:
filtered_entry = entry.copy()
filtered_dict = set(entry.keys()) - set(filter_dict)
+
for unwanted_key in filtered_dict:
del entry[unwanted_key]
+
if filter_dict == entry:
vdclist.append(filtered_entry)
else:
except Exception:
self.logger.debug("Error in get_tenant_list()")
self.logger.debug(traceback.format_exc())
+
raise vimconn.VimConnException("Incorrect state. {}")
return vdclist
- def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None):
+ def new_network(
+ self,
+ net_name,
+ net_type,
+ ip_profile=None,
+ shared=False,
+ provider_network_profile=None,
+ ):
"""Adds a tenant network to VIM
Params:
'net_name': name of the network
as not present.
"""
- self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {} provider_network_profile {}"
- .format(net_name, net_type, ip_profile, shared, provider_network_profile))
-# vlan = None
-# if provider_network_profile:
-# vlan = provider_network_profile.get("segmentation-id")
+ self.logger.debug(
+ "new_network tenant {} net_type {} ip_profile {} shared {} provider_network_profile {}".format(
+ net_name, net_type, ip_profile, shared, provider_network_profile
+ )
+ )
+ # vlan = None
+ # if provider_network_profile:
+ # vlan = provider_network_profile.get("segmentation-id")
created_items = {}
- isshared = 'false'
+ isshared = "false"
+
if shared:
- isshared = 'true'
+ isshared = "true"
-# ############# Stub code for SRIOV #################
-# if net_type == "data" or net_type == "ptp":
-# if self.config.get('dv_switch_name') == None:
-# raise vimconn.VimConnConflictException("You must provide 'dv_switch_name' at config value")
-# network_uuid = self.create_dvPort_group(net_name)
+ # ############# Stub code for SRIOV #################
+ # if net_type == "data" or net_type == "ptp":
+ # if self.config.get('dv_switch_name') == None:
+ # raise vimconn.VimConnConflictException("You must provide 'dv_switch_name' at config value")
+ # network_uuid = self.create_dvPort_group(net_name)
parent_network_uuid = None
if provider_network_profile is not None:
for k, v in provider_network_profile.items():
- if k == 'physical_network':
+ if k == "physical_network":
parent_network_uuid = self.get_physical_network_by_name(v)
- network_uuid = self.create_network(network_name=net_name, net_type=net_type,
- ip_profile=ip_profile, isshared=isshared,
- parent_network_uuid=parent_network_uuid)
+ network_uuid = self.create_network(
+ network_name=net_name,
+ net_type=net_type,
+ ip_profile=ip_profile,
+ isshared=isshared,
+ parent_network_uuid=parent_network_uuid,
+ )
+
if network_uuid is not None:
return network_uuid, created_items
else:
- raise vimconn.VimConnUnexpectedResponse("Failed create a new network {}".format(net_name))
+ raise vimconn.VimConnUnexpectedResponse(
+ "Failed create a new network {}".format(net_name)
+ )
def get_vcd_network_list(self):
- """ Method available organization for a logged in tenant
+ """Method available organization for a logged in tenant
- Returns:
- The return vca object that letter can be used to connect to vcloud direct as admin
+ Returns:
+ The return vca object that letter can be used to connect to vcloud direct as admin
"""
- self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
+ self.logger.debug(
+ "get_vcd_network_list(): retrieving network list for vcd {}".format(
+ self.tenant_name
+ )
+ )
if not self.tenant_name:
raise vimconn.VimConnConnectionException("Tenant name is empty.")
_, vdc = self.get_vdc_details()
if vdc is None:
- raise vimconn.VimConnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
+ raise vimconn.VimConnConnectionException(
+ "Can't retrieve information for a VDC {}".format(self.tenant_name)
+ )
- vdc_uuid = vdc.get('id').split(":")[3]
+ vdc_uuid = vdc.get("id").split(":")[3]
if self.client._session:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='GET',
- url=vdc.get('href'),
- headers=headers)
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url=vdc.get("href"), headers=headers
+ )
+
if response.status_code != 200:
self.logger.error("Failed to get vdc content")
raise vimconn.VimConnNotFoundException("Failed to get vdc content")
network_list = []
try:
for item in content:
- if item.tag.split('}')[-1] == 'AvailableNetworks':
+ if item.tag.split("}")[-1] == "AvailableNetworks":
for net in item:
- response = self.perform_request(req_type='GET',
- url=net.get('href'),
- headers=headers)
+ response = self.perform_request(
+ req_type="GET", url=net.get("href"), headers=headers
+ )
if response.status_code != 200:
self.logger.error("Failed to get network content")
- raise vimconn.VimConnNotFoundException("Failed to get network content")
+ raise vimconn.VimConnNotFoundException(
+ "Failed to get network content"
+ )
else:
net_details = XmlElementTree.fromstring(response.text)
filter_dict = {}
- net_uuid = net_details.get('id').split(":")
+ net_uuid = net_details.get("id").split(":")
+
if len(net_uuid) != 4:
continue
else:
net_uuid = net_uuid[3]
# create dict entry
- self.logger.debug("get_vcd_network_list(): Adding network {} "
- "to a list vcd id {} network {}".format(net_uuid,
- vdc_uuid,
- net_details.get('name')))
- filter_dict["name"] = net_details.get('name')
+ self.logger.debug(
+ "get_vcd_network_list(): Adding network {} "
+ "to a list vcd id {} network {}".format(
+ net_uuid, vdc_uuid, net_details.get("name")
+ )
+ )
+ filter_dict["name"] = net_details.get("name")
filter_dict["id"] = net_uuid
- if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
+
+ if [
+ i.text
+ for i in net_details
+ if i.tag.split("}")[-1] == "IsShared"
+ ][0] == "true":
shared = True
else:
shared = False
+
filter_dict["shared"] = shared
filter_dict["tenant_id"] = vdc_uuid
- if int(net_details.get('status')) == 1:
+
+ if int(net_details.get("status")) == 1:
filter_dict["admin_state_up"] = True
else:
filter_dict["admin_state_up"] = False
+
filter_dict["status"] = "ACTIVE"
filter_dict["type"] = "bridge"
network_list.append(filter_dict)
- self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
+ self.logger.debug(
+ "get_vcd_network_list adding entry {}".format(
+ filter_dict
+ )
+ )
except Exception:
self.logger.debug("Error in get_vcd_network_list", exc_info=True)
pass
self.logger.debug("get_vcd_network_list returning {}".format(network_list))
+
return network_list
def get_network_list(self, filter_dict={}):
List can be empty
"""
- self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
+ self.logger.debug(
+ "get_network_list(): retrieving network list for vcd {}".format(
+ self.tenant_name
+ )
+ )
if not self.tenant_name:
raise vimconn.VimConnConnectionException("Tenant name is empty.")
_, vdc = self.get_vdc_details()
if vdc is None:
raise vimconn.VimConnConnectionException(
- "Can't retrieve information for a VDC {}.".format(self.tenant_name))
+ "Can't retrieve information for a VDC {}.".format(self.tenant_name)
+ )
try:
- vdcid = vdc.get('id').split(":")[3]
+ vdcid = vdc.get("id").split(":")[3]
if self.client._session:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='GET',
- url=vdc.get('href'),
- headers=headers)
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url=vdc.get("href"), headers=headers
+ )
+
if response.status_code != 200:
self.logger.error("Failed to get vdc content")
raise vimconn.VimConnNotFoundException("Failed to get vdc content")
network_list = []
for item in content:
- if item.tag.split('}')[-1] == 'AvailableNetworks':
+ if item.tag.split("}")[-1] == "AvailableNetworks":
for net in item:
- response = self.perform_request(req_type='GET',
- url=net.get('href'),
- headers=headers)
+ response = self.perform_request(
+ req_type="GET", url=net.get("href"), headers=headers
+ )
if response.status_code != 200:
self.logger.error("Failed to get network content")
- raise vimconn.VimConnNotFoundException("Failed to get network content")
+ raise vimconn.VimConnNotFoundException(
+ "Failed to get network content"
+ )
else:
net_details = XmlElementTree.fromstring(response.text)
filter_entry = {}
- net_uuid = net_details.get('id').split(":")
+ net_uuid = net_details.get("id").split(":")
+
if len(net_uuid) != 4:
continue
else:
net_uuid = net_uuid[3]
# create dict entry
- self.logger.debug("get_network_list(): Adding net {}"
- " to a list vcd id {} network {}".format(net_uuid,
- vdcid,
- net_details.get('name')))
- filter_entry["name"] = net_details.get('name')
+ self.logger.debug(
+ "get_network_list(): Adding net {}"
+ " to a list vcd id {} network {}".format(
+ net_uuid, vdcid, net_details.get("name")
+ )
+ )
+ filter_entry["name"] = net_details.get("name")
filter_entry["id"] = net_uuid
- if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
+
+ if [
+ i.text
+ for i in net_details
+ if i.tag.split("}")[-1] == "IsShared"
+ ][0] == "true":
shared = True
else:
shared = False
+
filter_entry["shared"] = shared
filter_entry["tenant_id"] = vdcid
- if int(net_details.get('status')) == 1:
+
+ if int(net_details.get("status")) == 1:
filter_entry["admin_state_up"] = True
else:
filter_entry["admin_state_up"] = False
+
filter_entry["status"] = "ACTIVE"
filter_entry["type"] = "bridge"
filtered_entry = filter_entry.copy()
if filter_dict is not None and filter_dict:
# we remove all the key : value we don't care and match only
# respected field
- filtered_dict = set(filter_entry.keys()) - set(filter_dict)
+ filtered_dict = set(filter_entry.keys()) - set(
+ filter_dict
+ )
+
for unwanted_key in filtered_dict:
del filter_entry[unwanted_key]
+
if filter_dict == filter_entry:
network_list.append(filtered_entry)
else:
network_list.append(filtered_entry)
except Exception as e:
self.logger.debug("Error in get_network_list", exc_info=True)
+
if isinstance(e, vimconn.VimConnException):
raise
else:
- raise vimconn.VimConnNotFoundException("Failed : Networks list not found {} ".format(e))
+ raise vimconn.VimConnNotFoundException(
+ "Failed : Networks list not found {} ".format(e)
+ )
self.logger.debug("Returning {}".format(network_list))
+
return network_list
def get_network(self, net_id):
"""Method obtains network details of net_id VIM network
- Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
-
+ Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
try:
_, vdc = self.get_vdc_details()
- vdc_id = vdc.get('id').split(":")[3]
+ vdc_id = vdc.get("id").split(":")[3]
+
if self.client._session:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='GET',
- url=vdc.get('href'),
- headers=headers)
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url=vdc.get("href"), headers=headers
+ )
+
if response.status_code != 200:
self.logger.error("Failed to get vdc content")
raise vimconn.VimConnNotFoundException("Failed to get vdc content")
filter_dict = {}
for item in content:
- if item.tag.split('}')[-1] == 'AvailableNetworks':
+ if item.tag.split("}")[-1] == "AvailableNetworks":
for net in item:
- response = self.perform_request(req_type='GET',
- url=net.get('href'),
- headers=headers)
+ response = self.perform_request(
+ req_type="GET", url=net.get("href"), headers=headers
+ )
if response.status_code != 200:
self.logger.error("Failed to get network content")
- raise vimconn.VimConnNotFoundException("Failed to get network content")
+ raise vimconn.VimConnNotFoundException(
+ "Failed to get network content"
+ )
else:
net_details = XmlElementTree.fromstring(response.text)
- vdc_network_id = net_details.get('id').split(":")
+ vdc_network_id = net_details.get("id").split(":")
if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
- filter_dict["name"] = net_details.get('name')
+ filter_dict["name"] = net_details.get("name")
filter_dict["id"] = vdc_network_id[3]
- if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
+
+ if [
+ i.text
+ for i in net_details
+ if i.tag.split("}")[-1] == "IsShared"
+ ][0] == "true":
shared = True
else:
shared = False
+
filter_dict["shared"] = shared
filter_dict["tenant_id"] = vdc_id
- if int(net_details.get('status')) == 1:
+
+ if int(net_details.get("status")) == 1:
filter_dict["admin_state_up"] = True
else:
filter_dict["admin_state_up"] = False
+
filter_dict["status"] = "ACTIVE"
filter_dict["type"] = "bridge"
self.logger.debug("Returning {}".format(filter_dict))
+
return filter_dict
else:
- raise vimconn.VimConnNotFoundException("Network {} not found".format(net_id))
+ raise vimconn.VimConnNotFoundException(
+ "Network {} not found".format(net_id)
+ )
except Exception as e:
self.logger.debug("Error in get_network")
self.logger.debug(traceback.format_exc())
+
if isinstance(e, vimconn.VimConnException):
raise
else:
- raise vimconn.VimConnNotFoundException("Failed : Network not found {} ".format(e))
+ raise vimconn.VimConnNotFoundException(
+ "Failed : Network not found {} ".format(e)
+ )
return filter_dict
"""
# ############# Stub code for SRIOV #################
-# dvport_group = self.get_dvport_group(net_id)
-# if dvport_group:
-# #delete portgroup
-# status = self.destroy_dvport_group(net_id)
-# if status:
-# # Remove vlanID from persistent info
-# if net_id in self.persistent_info["used_vlanIDs"]:
-# del self.persistent_info["used_vlanIDs"][net_id]
-#
-# return net_id
+ # dvport_group = self.get_dvport_group(net_id)
+ # if dvport_group:
+ # #delete portgroup
+ # status = self.destroy_dvport_group(net_id)
+ # if status:
+ # # Remove vlanID from persistent info
+ # if net_id in self.persistent_info["used_vlanIDs"]:
+ # del self.persistent_info["used_vlanIDs"][net_id]
+ #
+ # return net_id
vcd_network = self.get_vcd_network(network_uuid=net_id)
if vcd_network is not None and vcd_network:
if self.delete_network_action(network_uuid=net_id):
return net_id
else:
- raise vimconn.VimConnNotFoundException("Network {} not found".format(net_id))
+ raise vimconn.VimConnNotFoundException(
+ "Network {} not found".format(net_id)
+ )
def refresh_nets_status(self, net_list):
"""Get the status of the networks
- Params: the list of network identifiers
- Returns a dictionary with:
- net_id: #VIM id of this network
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE, INACTIVE, DOWN (admin down),
- # BUILD (on building process)
- #
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ Params: the list of network identifiers
+ Returns a dictionary with:
+ net_id: #VIM id of this network
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE, INACTIVE, DOWN (admin down),
+ # BUILD (on building process)
+ #
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
"""
-
dict_entry = {}
try:
for net in net_list:
- errormsg = ''
+ errormsg = ""
vcd_network = self.get_vcd_network(network_uuid=net)
if vcd_network is not None and vcd_network:
- if vcd_network['status'] == '1':
- status = 'ACTIVE'
+ if vcd_network["status"] == "1":
+ status = "ACTIVE"
else:
- status = 'DOWN'
+ status = "DOWN"
else:
- status = 'DELETED'
- errormsg = 'Network not found.'
+ status = "DELETED"
+ errormsg = "Network not found."
- dict_entry[net] = {'status': status, 'error_msg': errormsg,
- 'vim_info': yaml.safe_dump(vcd_network)}
+ dict_entry[net] = {
+ "status": status,
+ "error_msg": errormsg,
+ "vim_info": yaml.safe_dump(vcd_network),
+ }
except Exception:
self.logger.debug("Error in refresh_nets_status")
self.logger.debug(traceback.format_exc())
def get_flavor(self, flavor_id):
"""Obtain flavor details from the VIM
- Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
+ Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
"""
if flavor_id not in vimconnector.flavorlist:
raise vimconn.VimConnNotFoundException("Flavor not found.")
+
return vimconnector.flavorlist[flavor_id]
def new_flavor(self, flavor_data):
if numas:
for numa in numas:
# overwrite ram and vcpus
- if 'memory' in numa:
- ram = numa['memory'] * 1024
- if 'paired-threads' in numa:
- cpu = numa['paired-threads'] * 2
- elif 'cores' in numa:
- cpu = numa['cores']
- elif 'threads' in numa:
- cpu = numa['threads']
+ if "memory" in numa:
+ ram = numa["memory"] * 1024
+
+ if "paired-threads" in numa:
+ cpu = numa["paired-threads"] * 2
+ elif "cores" in numa:
+ cpu = numa["cores"]
+ elif "threads" in numa:
+ cpu = numa["threads"]
new_flavor[FLAVOR_RAM_KEY] = ram
new_flavor[FLAVOR_VCPUS_KEY] = cpu
def delete_flavor(self, flavor_id):
"""Deletes a tenant flavor from VIM identify by its id
- Returns the used id or raise an exception
+ Returns the used id or raise an exception
"""
if flavor_id not in vimconnector.flavorlist:
raise vimconn.VimConnNotFoundException("Flavor not found.")
vimconnector.flavorlist.pop(flavor_id, None)
+
return flavor_id
def new_image(self, image_dict):
200, image-id if the image is created
<0, message if there is an error
"""
-
- return self.get_image_id_from_path(image_dict['location'])
+ return self.get_image_id_from_path(image_dict["location"])
def delete_image(self, image_id):
"""
- Deletes a tenant image from VIM
- Args:
- image_id is ID of Image to be deleted
- Return:
- returns the image identifier in UUID format or raises an exception on error
+ Deletes a tenant image from VIM
+ Args:
+ image_id is ID of Image to be deleted
+ Return:
+ returns the image identifier in UUID format or raises an exception on error
"""
conn = self.connect_as_admin()
+
if not conn:
raise vimconn.VimConnConnectionException("Failed to connect vCD")
+
# Get Catalog details
- url_list = [self.url, '/api/catalog/', image_id]
- catalog_herf = ''.join(url_list)
+ url_list = [self.url, "/api/catalog/", image_id]
+ catalog_herf = "".join(url_list)
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': conn._session.headers['x-vcloud-authorization']}
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": conn._session.headers["x-vcloud-authorization"],
+ }
- response = self.perform_request(req_type='GET',
- url=catalog_herf,
- headers=headers)
+ response = self.perform_request(
+ req_type="GET", url=catalog_herf, headers=headers
+ )
if response.status_code != requests.codes.ok:
- self.logger.debug("delete_image():GET REST API call {} failed. "
- "Return status code {}".format(catalog_herf,
- response.status_code))
- raise vimconn.VimConnNotFoundException("Fail to get image {}".format(image_id))
+ self.logger.debug(
+ "delete_image():GET REST API call {} failed. "
+ "Return status code {}".format(catalog_herf, response.status_code)
+ )
+
+ raise vimconn.VimConnNotFoundException(
+ "Fail to get image {}".format(image_id)
+ )
lxmlroot_respond = lxmlElementTree.fromstring(response.content)
- namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
+ namespaces = {
+ prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
+ }
namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems", namespaces)
catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem", namespaces)
+
for catalogItem in catalogItems:
- catalogItem_href = catalogItem.attrib['href']
+ catalogItem_href = catalogItem.attrib["href"]
- response = self.perform_request(req_type='GET',
- url=catalogItem_href,
- headers=headers)
+ response = self.perform_request(
+ req_type="GET", url=catalogItem_href, headers=headers
+ )
if response.status_code != requests.codes.ok:
- self.logger.debug("delete_image():GET REST API call {} failed. "
- "Return status code {}".format(catalog_herf,
- response.status_code))
- raise vimconn.VimConnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
- catalogItem,
- image_id))
+ self.logger.debug(
+ "delete_image():GET REST API call {} failed. "
+ "Return status code {}".format(catalog_herf, response.status_code)
+ )
+ raise vimconn.VimConnNotFoundException(
+ "Fail to get catalogItem {} for catalog {}".format(
+ catalogItem, image_id
+ )
+ )
lxmlroot_respond = lxmlElementTree.fromstring(response.content)
- namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
+ namespaces = {
+ prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
+ }
namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
- catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']", namespaces).attrib['href']
+ catalogitem_remove_href = lxmlroot_respond.find(
+ "xmlns:Link[@rel='remove']", namespaces
+ ).attrib["href"]
# Remove catalogItem
- response = self.perform_request(req_type='DELETE',
- url=catalogitem_remove_href,
- headers=headers)
+ response = self.perform_request(
+ req_type="DELETE", url=catalogitem_remove_href, headers=headers
+ )
+
if response.status_code == requests.codes.no_content:
self.logger.debug("Deleted Catalog item {}".format(catalogItem))
else:
- raise vimconn.VimConnException("Fail to delete Catalog Item {}".format(catalogItem))
+ raise vimconn.VimConnException(
+ "Fail to delete Catalog Item {}".format(catalogItem)
+ )
# Remove catalog
- url_list = [self.url, '/api/admin/catalog/', image_id]
- catalog_remove_herf = ''.join(url_list)
- response = self.perform_request(req_type='DELETE',
- url=catalog_remove_herf,
- headers=headers)
+ url_list = [self.url, "/api/admin/catalog/", image_id]
+ catalog_remove_herf = "".join(url_list)
+ response = self.perform_request(
+ req_type="DELETE", url=catalog_remove_herf, headers=headers
+ )
if response.status_code == requests.codes.no_content:
self.logger.debug("Deleted Catalog {}".format(image_id))
+
return image_id
else:
raise vimconn.VimConnException("Fail to delete Catalog {}".format(image_id))
:return:
"""
for catalog in catalogs:
- if catalog['name'] == catalog_name:
- return catalog['id']
+ if catalog["name"] == catalog_name:
+ return catalog["id"]
def create_vimcatalog(self, vca=None, catalog_name=None):
- """ Create new catalog entry in vCloud director.
+ """Create new catalog entry in vCloud director.
- Args
- vca: vCloud director.
- catalog_name catalog that client wish to create. Note no validation done for a name.
- Client must make sure that provide valid string representation.
+ Args
+ vca: vCloud director.
+ catalog_name catalog that client wish to create. Note no validation done for a name.
+ Client must make sure that provide valid string representation.
- Returns catalog id if catalog created else None.
+ Returns catalog id if catalog created else None.
"""
try:
lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
+
if lxml_catalog_element:
- id_attr_value = lxml_catalog_element.get('id')
- return id_attr_value.split(':')[-1]
+ id_attr_value = lxml_catalog_element.get("id")
+ return id_attr_value.split(":")[-1]
+
catalogs = vca.list_catalogs()
except Exception as ex:
self.logger.error(
- 'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(catalog_name, ex))
+ 'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(
+ catalog_name, ex
+ )
+ )
raise
return self.catalog_exists(catalog_name, catalogs)
# noinspection PyIncorrectDocstring
- def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
- description='', progress=False, chunk_bytes=128 * 1024):
+ def upload_ovf(
+ self,
+ vca=None,
+ catalog_name=None,
+ image_name=None,
+ media_file_name=None,
+ description="",
+ progress=False,
+ chunk_bytes=128 * 1024,
+ ):
"""
Uploads a OVF file to a vCloud catalog
# if VCD can parse OVF we upload VMDK file
try:
for catalog in vca.list_catalogs():
- if catalog_name != catalog['name']:
+ if catalog_name != catalog["name"]:
continue
- catalog_href = "{}/api/catalog/{}/action/upload".format(self.url, catalog['id'])
+ catalog_href = "{}/api/catalog/{}/action/upload".format(
+ self.url, catalog["id"]
+ )
data = """
<UploadVAppTemplateParams name="{}"
xmlns="http://www.vmware.com/vcloud/v1.5"
xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
<Description>{} vApp Template</Description>
</UploadVAppTemplateParams>
- """.format(catalog_name, description)
+ """.format(
+ catalog_name, description
+ )
if self.client:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
- headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ headers[
+ "Content-Type"
+ ] = "application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"
- response = self.perform_request(req_type='POST',
- url=catalog_href,
- headers=headers,
- data=data)
+ response = self.perform_request(
+ req_type="POST", url=catalog_href, headers=headers, data=data
+ )
if response.status_code == requests.codes.created:
catalogItem = XmlElementTree.fromstring(response.text)
- entity = [child for child in catalogItem if
- child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
- href = entity.get('href')
+ entity = [
+ child
+ for child in catalogItem
+ if child.get("type")
+ == "application/vnd.vmware.vcloud.vAppTemplate+xml"
+ ][0]
+ href = entity.get("href")
template = href
- response = self.perform_request(req_type='GET',
- url=href,
- headers=headers)
+ response = self.perform_request(
+ req_type="GET", url=href, headers=headers
+ )
if response.status_code == requests.codes.ok:
- headers['Content-Type'] = 'Content-Type text/xml'
- result = re.search('rel="upload:default"\shref="(.*?\/descriptor.ovf)"', response.text)
+ headers["Content-Type"] = "Content-Type text/xml"
+ result = re.search(
+ 'rel="upload:default"\shref="(.*?\/descriptor.ovf)"',
+ response.text,
+ )
+
if result:
transfer_href = result.group(1)
- response = self.perform_request(req_type='PUT',
- url=transfer_href,
- headers=headers,
- data=open(media_file_name, 'rb'))
+ response = self.perform_request(
+ req_type="PUT",
+ url=transfer_href,
+ headers=headers,
+ data=open(media_file_name, "rb"),
+ )
+
if response.status_code != requests.codes.ok:
self.logger.debug(
- "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
- media_file_name))
+ "Failed create vApp template for catalog name {} and image {}".format(
+ catalog_name, media_file_name
+ )
+ )
return False
# TODO fix this with aync block
time.sleep(5)
- self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name,
- media_file_name))
+ self.logger.debug(
+ "vApp template for catalog name {} and image {}".format(
+ catalog_name, media_file_name
+ )
+ )
# uploading VMDK file
# check status of OVF upload and upload remaining files.
- response = self.perform_request(req_type='GET',
- url=template,
- headers=headers)
+ response = self.perform_request(
+ req_type="GET", url=template, headers=headers
+ )
if response.status_code == requests.codes.ok:
- result = re.search('rel="upload:default"\s*href="(.*?vmdk)"', response.text)
+ result = re.search(
+ 'rel="upload:default"\s*href="(.*?vmdk)"', response.text
+ )
+
if result:
link_href = result.group(1)
+
# we skip ovf since it already uploaded.
- if 'ovf' in link_href:
+ if "ovf" in link_href:
continue
+
# The OVF file and VMDK must be in a same directory
head, _ = os.path.split(media_file_name)
- file_vmdk = head + '/' + link_href.split("/")[-1]
+ file_vmdk = head + "/" + link_href.split("/")[-1]
+
if not os.path.isfile(file_vmdk):
return False
+
statinfo = os.stat(file_vmdk)
if statinfo.st_size == 0:
return False
+
hrefvmdk = link_href
if progress:
- widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
- FileTransferSpeed()]
- progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
+ widgets = [
+ "Uploading file: ",
+ Percentage(),
+ " ",
+ Bar(),
+ " ",
+ ETA(),
+ " ",
+ FileTransferSpeed(),
+ ]
+ progress_bar = ProgressBar(
+ widgets=widgets, maxval=statinfo.st_size
+ ).start()
bytes_transferred = 0
- f = open(file_vmdk, 'rb')
+ f = open(file_vmdk, "rb")
+
while bytes_transferred < statinfo.st_size:
my_bytes = f.read(chunk_bytes)
if len(my_bytes) <= chunk_bytes:
- headers['Content-Range'] = 'bytes {}-{}/{}'.format(
- bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
- headers['Content-Length'] = str(len(my_bytes))
- response = requests.put(url=hrefvmdk,
- headers=headers,
- data=my_bytes,
- verify=False)
+ headers["Content-Range"] = "bytes {}-{}/{}".format(
+ bytes_transferred,
+ len(my_bytes) - 1,
+ statinfo.st_size,
+ )
+ headers["Content-Length"] = str(len(my_bytes))
+ response = requests.put(
+ url=hrefvmdk,
+ headers=headers,
+ data=my_bytes,
+ verify=False,
+ )
+
if response.status_code == requests.codes.ok:
bytes_transferred += len(my_bytes)
if progress:
progress_bar.update(bytes_transferred)
else:
self.logger.debug(
- 'file upload failed with error: [{}] {}'.format(response.status_code,
- response.text))
+ "file upload failed with error: [{}] {}".format(
+ response.status_code, response.text
+ )
+ )
f.close()
+
return False
+
f.close()
if progress:
progress_bar.finish()
time.sleep(10)
+
return True
else:
- self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
- format(catalog_name, media_file_name))
+ self.logger.debug(
+ "Failed retrieve vApp template for catalog name {} for OVF {}".format(
+ catalog_name, media_file_name
+ )
+ )
return False
except Exception as exp:
- self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
- .format(catalog_name, media_file_name, exp))
+ self.logger.debug(
+ "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}".format(
+ catalog_name, media_file_name, exp
+ )
+ )
+
raise vimconn.VimConnException(
- "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
- .format(catalog_name, media_file_name, exp))
+ "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}".format(
+ catalog_name, media_file_name, exp
+ )
+ )
+
+ self.logger.debug(
+ "Failed retrieve catalog name {} for OVF file {}".format(
+ catalog_name, media_file_name
+ )
+ )
- self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
return False
- def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
+ def upload_vimimage(
+ self,
+ vca=None,
+ catalog_name=None,
+ media_name=None,
+ medial_file_name=None,
+ progress=False,
+ ):
"""Upload media file"""
# TODO add named parameters for readability
-
- return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
- media_file_name=medial_file_name, description='medial_file_name', progress=progress)
+ return self.upload_ovf(
+ vca=vca,
+ catalog_name=catalog_name,
+ image_name=media_name.split(".")[0],
+ media_file_name=medial_file_name,
+ description="medial_file_name",
+ progress=progress,
+ )
def validate_uuid4(self, uuid_string=None):
- """ Method validate correct format of UUID.
+ """Method validate correct format of UUID.
Return: true if string represent valid uuid
"""
uuid.UUID(uuid_string, version=4)
except ValueError:
return False
+
return True
def get_catalogid(self, catalog_name=None, catalogs=None):
- """ Method check catalog and return catalog ID in UUID format.
+ """Method check catalog and return catalog ID in UUID format.
Args
catalog_name: catalog name as string
Return: catalogs uuid
"""
-
for catalog in catalogs:
- if catalog['name'] == catalog_name:
- catalog_id = catalog['id']
+ if catalog["name"] == catalog_name:
+ catalog_id = catalog["id"]
return catalog_id
+
return None
def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
- """ Method check catalog and return catalog name lookup done by catalog UUID.
+ """Method check catalog and return catalog name lookup done by catalog UUID.
Args
catalog_name: catalog name as string
Return: catalogs name or None
"""
-
if not self.validate_uuid4(uuid_string=catalog_uuid):
return None
for catalog in catalogs:
- catalog_id = catalog.get('id')
+ catalog_id = catalog.get("id")
+
if catalog_id == catalog_uuid:
- return catalog.get('name')
+ return catalog.get("name")
+
return None
def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
- """ Method check catalog and return catalog name lookup done by catalog UUID.
+ """Method check catalog and return catalog name lookup done by catalog UUID.
Args
catalog_name: catalog name as string
Return: catalogs name or None
"""
-
if not self.validate_uuid4(uuid_string=catalog_uuid):
return None
for catalog in catalogs:
- catalog_id = catalog.get('id')
+ catalog_id = catalog.get("id")
+
if catalog_id == catalog_uuid:
return catalog
+
return None
def get_image_id_from_path(self, path=None, progress=False):
- """ Method upload OVF image to vCloud director.
+ """Method upload OVF image to vCloud director.
Each OVF image represented as single catalog entry in vcloud director.
The method check for existing catalog entry. The check done by file name without file extension.
Return: if image uploaded correct method will provide image catalog UUID.
"""
-
if not path:
raise vimconn.VimConnException("Image path can't be None.")
raise vimconn.VimConnException("Can't read file. File not found.")
if not os.access(path, os.R_OK):
- raise vimconn.VimConnException("Can't read file. Check file permission to read.")
+ raise vimconn.VimConnException(
+ "Can't read file. Check file permission to read."
+ )
self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
_, filename = os.path.split(path)
_, file_extension = os.path.splitext(path)
- if file_extension != '.ovf':
- self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
- raise vimconn.VimConnException("Wrong container. vCloud director supports only OVF.")
+ if file_extension != ".ovf":
+ self.logger.debug(
+ "Wrong file extension {} connector support only OVF container.".format(
+ file_extension
+ )
+ )
+
+ raise vimconn.VimConnException(
+ "Wrong container. vCloud director supports only OVF."
+ )
catalog_name = os.path.splitext(filename)[0]
- catalog_md5_name = hashlib.md5(path.encode('utf-8')).hexdigest()
- self.logger.debug("File name {} Catalog Name {} file path {} "
- "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
+ catalog_md5_name = hashlib.md5(path.encode("utf-8")).hexdigest()
+ self.logger.debug(
+ "File name {} Catalog Name {} file path {} "
+ "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name)
+ )
try:
org, _ = self.get_vdc_details()
catalogs = org.list_catalogs()
except Exception as exp:
self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
- raise vimconn.VimConnException("Failed get catalogs() with Exception {} ".format(exp))
+
+ raise vimconn.VimConnException(
+ "Failed get catalogs() with Exception {} ".format(exp)
+ )
if len(catalogs) == 0:
- self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
+ self.logger.info(
+ "Creating a new catalog entry {} in vcloud director".format(
+ catalog_name
+ )
+ )
+
if self.create_vimcatalog(org, catalog_md5_name) is None:
- raise vimconn.VimConnException("Failed create new catalog {} ".format(catalog_md5_name))
+ raise vimconn.VimConnException(
+ "Failed create new catalog {} ".format(catalog_md5_name)
+ )
+
+ result = self.upload_vimimage(
+ vca=org,
+ catalog_name=catalog_md5_name,
+ media_name=filename,
+ medial_file_name=path,
+ progress=progress,
+ )
- result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
- media_name=filename, medial_file_name=path, progress=progress)
if not result:
- raise vimconn.VimConnException("Failed create vApp template for catalog {} ".format(catalog_name))
+ raise vimconn.VimConnException(
+ "Failed create vApp template for catalog {} ".format(catalog_name)
+ )
+
return self.get_catalogid(catalog_name, catalogs)
else:
for catalog in catalogs:
# search for existing catalog if we find same name we return ID
# TODO optimize this
- if catalog['name'] == catalog_md5_name:
- self.logger.debug("Found existing catalog entry for {} "
- "catalog id {}".format(catalog_name,
- self.get_catalogid(catalog_md5_name, catalogs)))
+ if catalog["name"] == catalog_md5_name:
+ self.logger.debug(
+ "Found existing catalog entry for {} "
+ "catalog id {}".format(
+ catalog_name, self.get_catalogid(catalog_md5_name, catalogs)
+ )
+ )
+
return self.get_catalogid(catalog_md5_name, catalogs)
# if we didn't find existing catalog we create a new one and upload image.
- self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
+ self.logger.debug(
+ "Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name)
+ )
if self.create_vimcatalog(org, catalog_md5_name) is None:
- raise vimconn.VimConnException("Failed create new catalog {} ".format(catalog_md5_name))
+ raise vimconn.VimConnException(
+ "Failed create new catalog {} ".format(catalog_md5_name)
+ )
- result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
- media_name=filename, medial_file_name=path, progress=progress)
+ result = self.upload_vimimage(
+ vca=org,
+ catalog_name=catalog_md5_name,
+ media_name=filename,
+ medial_file_name=path,
+ progress=progress,
+ )
if not result:
- raise vimconn.VimConnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
+ raise vimconn.VimConnException(
+ "Failed create vApp template for catalog {} ".format(catalog_md5_name)
+ )
return self.get_catalogid(catalog_md5_name, org.list_catalogs())
def get_image_list(self, filter_dict={}):
- '''Obtain tenant images from VIM
+ """Obtain tenant images from VIM
Filter_dict can be:
name: image name
id: image uuid
Returns the image list of dictionaries:
[{<the fields at Filter_dict plus some VIM specific>}, ...]
List can be empty
- '''
-
+ """
try:
org, _ = self.get_vdc_details()
image_list = []
catalogs = org.list_catalogs()
+
if len(catalogs) == 0:
return image_list
else:
for catalog in catalogs:
- catalog_uuid = catalog.get('id')
- name = catalog.get('name')
+ catalog_uuid = catalog.get("id")
+ name = catalog.get("name")
filtered_dict = {}
+
if filter_dict.get("name") and filter_dict["name"] != name:
continue
+
if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
continue
+
filtered_dict["name"] = name
filtered_dict["id"] = catalog_uuid
image_list.append(filtered_dict)
- self.logger.debug("List of already created catalog items: {}".format(image_list))
+ self.logger.debug(
+ "List of already created catalog items: {}".format(image_list)
+ )
+
return image_list
except Exception as exp:
- raise vimconn.VimConnException("Exception occured while retriving catalog items {}".format(exp))
+ raise vimconn.VimConnException(
+ "Exception occured while retriving catalog items {}".format(exp)
+ )
def get_vappid(self, vdc=None, vapp_name=None):
- """ Method takes vdc object and vApp name and returns vapp uuid or None
+ """Method takes vdc object and vApp name and returns vapp uuid or None
Args:
vdc: The VDC object.
"""
if vdc is None or vapp_name is None:
return None
+
# UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
try:
- refs = [ref for ref in vdc.ResourceEntities.ResourceEntity
- if ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
+ refs = [
+ ref
+ for ref in vdc.ResourceEntities.ResourceEntity
+ if ref.name == vapp_name
+ and ref.type_ == "application/vnd.vmware.vcloud.vApp+xml"
+ ]
+
if len(refs) == 1:
return refs[0].href.split("vapp")[1][1:]
except Exception as e:
self.logger.exception(e)
return False
+
return None
def check_vapp(self, vdc=None, vapp_uuid=None):
- """ Method Method returns True or False if vapp deployed in vCloud director
+ """Method Method returns True or False if vapp deployed in vCloud director
- Args:
- vca: Connector to VCA
- vdc: The VDC object.
- vappid: vappid is application identifier
+ Args:
+ vca: Connector to VCA
+ vdc: The VDC object.
+ vappid: vappid is application identifier
- Returns:
- The return True if vApp deployed
- :param vdc:
- :param vapp_uuid:
+ Returns:
+ The return True if vApp deployed
+ :param vdc:
+ :param vapp_uuid:
"""
try:
- refs = [ref for ref in vdc.ResourceEntities.ResourceEntity
- if ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
+ refs = [
+ ref
+ for ref in vdc.ResourceEntities.ResourceEntity
+ if ref.type_ == "application/vnd.vmware.vcloud.vApp+xml"
+ ]
+
for ref in refs:
vappid = ref.href.split("vapp")[1][1:]
# find vapp with respected vapp uuid
+
if vappid == vapp_uuid:
return True
except Exception as e:
self.logger.exception(e)
+
return False
+
return False
def get_namebyvappid(self, vapp_uuid=None):
try:
if self.client and vapp_uuid:
vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+
+ response = self.perform_request(
+ req_type="GET", url=vapp_call, headers=headers
+ )
- response = self.perform_request(req_type='GET',
- url=vapp_call,
- headers=headers)
# Retry login if session expired & retry sending request
if response.status_code == 403:
- response = self.retry_rest('GET', vapp_call)
+ response = self.retry_rest("GET", vapp_call)
tree = XmlElementTree.fromstring(response.text)
- return tree.attrib['name'] if 'name' in tree.attrib else None
+
+ return tree.attrib["name"] if "name" in tree.attrib else None
except Exception as e:
self.logger.exception(e)
+
return None
+
return None
- def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
- cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
+ def new_vminstance(
+ self,
+ name=None,
+ description="",
+ start=False,
+ image_id=None,
+ flavor_id=None,
+ net_list=[],
+ cloud_config=None,
+ disk_list=None,
+ availability_zone_index=None,
+ availability_zone_list=None,
+ ):
"""Adds a VM instance to VIM
Params:
'start': (boolean) indicates if VM must start or created in pause mode.
as not present.
"""
self.logger.info("Creating new instance for entry {}".format(name))
- self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "
- "availability_zone_index {} availability_zone_list {}"
- .format(description, start, image_id, flavor_id, net_list, cloud_config, disk_list,
- availability_zone_index, availability_zone_list))
+ self.logger.debug(
+ "desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "
+ "availability_zone_index {} availability_zone_list {}".format(
+ description,
+ start,
+ image_id,
+ flavor_id,
+ net_list,
+ cloud_config,
+ disk_list,
+ availability_zone_index,
+ availability_zone_list,
+ )
+ )
# new vm name = vmname + tenant_id + uuid
- new_vm_name = [name, '-', str(uuid.uuid4())]
- vmname_andid = ''.join(new_vm_name)
+ new_vm_name = [name, "-", str(uuid.uuid4())]
+ vmname_andid = "".join(new_vm_name)
for net in net_list:
- if net['type'] == "PCI-PASSTHROUGH":
+ if net["type"] == "PCI-PASSTHROUGH":
raise vimconn.VimConnNotSupportedException(
- "Current vCD version does not support type : {}".format(net['type']))
+ "Current vCD version does not support type : {}".format(net["type"])
+ )
if len(net_list) > 10:
raise vimconn.VimConnNotSupportedException(
- "The VM hardware versions 7 and above support upto 10 NICs only")
+ "The VM hardware versions 7 and above support upto 10 NICs only"
+ )
# if vm already deployed we return existing uuid
# we check for presence of VDC, Catalog entry and Flavor.
org, vdc = self.get_vdc_details()
if vdc is None:
raise vimconn.VimConnNotFoundException(
- "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
- catalogs = org.list_catalogs()
+ "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(
+ name
+ )
+ )
+
+ catalogs = org.list_catalogs()
if catalogs is None:
# Retry once, if failed by refreshing token
self.get_token()
org = Org(self.client, resource=self.client.get_org())
catalogs = org.list_catalogs()
+
if catalogs is None:
raise vimconn.VimConnNotFoundException(
- "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
+ "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(
+ name
+ )
+ )
- catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
+ catalog_hash_name = self.get_catalogbyid(
+ catalog_uuid=image_id, catalogs=catalogs
+ )
if catalog_hash_name:
- self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
+ self.logger.info(
+ "Found catalog entry {} for image id {}".format(
+ catalog_hash_name, image_id
+ )
+ )
else:
- raise vimconn.VimConnNotFoundException("new_vminstance(): Failed create vApp {}: "
- "(Failed retrieve catalog information {})".format(name, image_id))
+ raise vimconn.VimConnNotFoundException(
+ "new_vminstance(): Failed create vApp {}: "
+ "(Failed retrieve catalog information {})".format(name, image_id)
+ )
# Set vCPU and Memory based on flavor.
vm_cpus = None
if flavor_id is not None:
if flavor_id not in vimconnector.flavorlist:
- raise vimconn.VimConnNotFoundException("new_vminstance(): Failed create vApp {}: "
- "Failed retrieve flavor information "
- "flavor id {}".format(name, flavor_id))
+ raise vimconn.VimConnNotFoundException(
+ "new_vminstance(): Failed create vApp {}: "
+ "Failed retrieve flavor information "
+ "flavor id {}".format(name, flavor_id)
+ )
else:
try:
flavor = vimconnector.flavorlist[flavor_id]
vm_memory = flavor[FLAVOR_RAM_KEY]
vm_disk = flavor[FLAVOR_DISK_KEY]
extended = flavor.get("extended", None)
+
if extended:
numas = extended.get("numas", None)
-
except Exception as exp:
- raise vimconn.VimConnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
+ raise vimconn.VimConnException(
+ "Corrupted flavor. {}.Exception: {}".format(flavor_id, exp)
+ )
# image upload creates template name as catalog name space Template.
templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
# network_mode = 'bridged'
if net_list is not None and len(net_list) > 0:
for net in net_list:
- if 'use' in net and net['use'] == 'mgmt' and not primary_net:
+ if "use" in net and net["use"] == "mgmt" and not primary_net:
primary_net = net
+
if primary_net is None:
primary_net = net_list[0]
try:
- primary_net_id = primary_net['net_id']
- url_list = [self.url, '/api/network/', primary_net_id]
- primary_net_href = ''.join(url_list)
+ primary_net_id = primary_net["net_id"]
+ url_list = [self.url, "/api/network/", primary_net_id]
+ primary_net_href = "".join(url_list)
network_dict = self.get_vcd_network(network_uuid=primary_net_id)
- if 'name' in network_dict:
- primary_netname = network_dict['name']
+ if "name" in network_dict:
+ primary_netname = network_dict["name"]
except KeyError:
- raise vimconn.VimConnException("Corrupted flavor. {}".format(primary_net))
+ raise vimconn.VimConnException(
+ "Corrupted flavor. {}".format(primary_net)
+ )
else:
- raise vimconn.VimConnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
+ raise vimconn.VimConnUnexpectedResponse(
+ "new_vminstance(): Failed network list is empty."
+ )
# use: 'data', 'bridge', 'mgmt'
# create vApp. Set vcpu and ram based on flavor id.
try:
vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
if not vdc_obj:
- raise vimconn.VimConnNotFoundException("new_vminstance(): Failed to get VDC object")
+ raise vimconn.VimConnNotFoundException(
+ "new_vminstance(): Failed to get VDC object"
+ )
for retry in (1, 2):
items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
if len(catalog_items) == 1:
if self.client:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
- response = self.perform_request(req_type='GET',
- url=catalog_items[0].get('href'),
- headers=headers)
+ response = self.perform_request(
+ req_type="GET",
+ url=catalog_items[0].get("href"),
+ headers=headers,
+ )
catalogItem = XmlElementTree.fromstring(response.text)
- entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+ entity = [
+ child
+ for child in catalogItem
+ if child.get("type")
+ == "application/vnd.vmware.vcloud.vAppTemplate+xml"
+ ][0]
vapp_tempalte_href = entity.get("href")
- response = self.perform_request(req_type='GET',
- url=vapp_tempalte_href,
- headers=headers)
+ response = self.perform_request(
+ req_type="GET", url=vapp_tempalte_href, headers=headers
+ )
+
if response.status_code != requests.codes.ok:
- self.logger.debug("REST API call {} failed. Return status code {}".format(vapp_tempalte_href,
- response.status_code))
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ vapp_tempalte_href, response.status_code
+ )
+ )
else:
result = (response.text).replace("\n", " ")
vapp_template_tree = XmlElementTree.fromstring(response.text)
- children_element = [child for child in vapp_template_tree if 'Children' in child.tag][0]
- vm_element = [child for child in children_element if 'Vm' in child.tag][0]
- vm_name = vm_element.get('name')
- vm_id = vm_element.get('id')
- vm_href = vm_element.get('href')
+ children_element = [
+ child for child in vapp_template_tree if "Children" in child.tag
+ ][0]
+ vm_element = [child for child in children_element if "Vm" in child.tag][
+ 0
+ ]
+ vm_name = vm_element.get("name")
+ vm_id = vm_element.get("id")
+ vm_href = vm_element.get("href")
# cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',
# result).group(1)
- memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',
- result).group(1)
+ memory_mb = re.search(
+ "<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>",
+ result,
+ ).group(1)
# cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>', result).group(1)
- headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
- vdc_id = vdc.get('id').split(':')[-1]
- instantiate_vapp_href = "{}/api/vdc/{}/action/instantiateVAppTemplate".format(self.url,
- vdc_id)
- with open(os.path.join(os.path.dirname(__file__), 'InstantiateVAppTemplateParams.xml'), 'r') as f:
+ headers[
+ "Content-Type"
+ ] = "application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml"
+ vdc_id = vdc.get("id").split(":")[-1]
+ instantiate_vapp_href = (
+ "{}/api/vdc/{}/action/instantiateVAppTemplate".format(
+ self.url, vdc_id
+ )
+ )
+
+ with open(
+ os.path.join(
+ os.path.dirname(__file__), "InstantiateVAppTemplateParams.xml"
+ ),
+ "r",
+ ) as f:
template = f.read()
- data = template.format(vmname_andid,
- primary_netname,
- primary_net_href,
- vapp_tempalte_href,
- vm_href,
- vm_id,
- vm_name,
- primary_netname,
- cpu=vm_cpus,
- core=1,
- memory=vm_memory)
-
- response = self.perform_request(req_type='POST',
- url=instantiate_vapp_href,
- headers=headers,
- data=data)
+ data = template.format(
+ vmname_andid,
+ primary_netname,
+ primary_net_href,
+ vapp_tempalte_href,
+ vm_href,
+ vm_id,
+ vm_name,
+ primary_netname,
+ cpu=vm_cpus,
+ core=1,
+ memory=vm_memory,
+ )
+
+ response = self.perform_request(
+ req_type="POST",
+ url=instantiate_vapp_href,
+ headers=headers,
+ data=data,
+ )
if response.status_code != 201:
- self.logger.error("REST call {} failed reason : {}"
- "status code : {}".format(instantiate_vapp_href,
- response.text,
- response.status_code))
- raise vimconn.VimConnException("new_vminstance(): Failed to create"
- "vAapp {}".format(vmname_andid))
+ self.logger.error(
+ "REST call {} failed reason : {}"
+ "status code : {}".format(
+ instantiate_vapp_href, response.text, response.status_code
+ )
+ )
+ raise vimconn.VimConnException(
+ "new_vminstance(): Failed to create"
+ "vAapp {}".format(vmname_andid)
+ )
else:
vapptask = self.get_task_from_response(response.text)
if vapptask is None or vapptask is False:
raise vimconn.VimConnUnexpectedResponse(
- "new_vminstance(): failed to create vApp {}".format(vmname_andid))
+ "new_vminstance(): failed to create vApp {}".format(vmname_andid)
+ )
# wait for task to complete
result = self.client.get_task_monitor().wait_for_success(task=vapptask)
- if result.get('status') == 'success':
- self.logger.debug("new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid))
+ if result.get("status") == "success":
+ self.logger.debug(
+ "new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid)
+ )
else:
raise vimconn.VimConnUnexpectedResponse(
- "new_vminstance(): failed to create vApp {}".format(vmname_andid))
-
+ "new_vminstance(): failed to create vApp {}".format(vmname_andid)
+ )
except Exception as exp:
raise vimconn.VimConnUnexpectedResponse(
- "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
+ "new_vminstance(): failed to create vApp {} with Exception:{}".format(
+ vmname_andid, exp
+ )
+ )
# we should have now vapp in undeployed state.
try:
- vdc_obj = VDC(self.client, href=vdc.get('href'))
+ vdc_obj = VDC(self.client, href=vdc.get("href"))
vapp_resource = vdc_obj.get_vapp(vmname_andid)
- vapp_uuid = vapp_resource.get('id').split(':')[-1]
+ vapp_uuid = vapp_resource.get("id").split(":")[-1]
vapp = VApp(self.client, resource=vapp_resource)
-
except Exception as exp:
raise vimconn.VimConnUnexpectedResponse(
- "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
- .format(vmname_andid, exp))
+ "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}".format(
+ vmname_andid, exp
+ )
+ )
if vapp_uuid is None:
raise vimconn.VimConnUnexpectedResponse(
- "new_vminstance(): Failed to retrieve vApp {} after creation".format(vmname_andid))
+ "new_vminstance(): Failed to retrieve vApp {} after creation".format(
+ vmname_andid
+ )
+ )
# Add PCI passthrough/SRIOV configrations
pci_devices_info = []
for net in net_list:
if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
pci_devices_info.append(net)
- elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
+ elif (
+ net["type"] == "VF"
+ or net["type"] == "SR-IOV"
+ or net["type"] == "VFnotShared"
+ ) and "net_id" in net:
reserve_memory = True
# Add PCI
if len(pci_devices_info) > 0:
- self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
- vmname_andid))
- PCI_devices_status, _, _ = self.add_pci_devices(vapp_uuid,
- pci_devices_info,
- vmname_andid)
+ self.logger.info(
+ "Need to add PCI devices {} into VM {}".format(
+ pci_devices_info, vmname_andid
+ )
+ )
+ PCI_devices_status, _, _ = self.add_pci_devices(
+ vapp_uuid, pci_devices_info, vmname_andid
+ )
+
if PCI_devices_status:
- self.logger.info("Added PCI devives {} to VM {}".format(
- pci_devices_info,
- vmname_andid))
+ self.logger.info(
+ "Added PCI devives {} to VM {}".format(
+ pci_devices_info, vmname_andid
+ )
+ )
reserve_memory = True
else:
- self.logger.info("Fail to add PCI devives {} to VM {}".format(
- pci_devices_info,
- vmname_andid))
+ self.logger.info(
+ "Fail to add PCI devives {} to VM {}".format(
+ pci_devices_info, vmname_andid
+ )
+ )
# Add serial console - this allows cloud images to boot as if we are running under OpenStack
self.add_serial_device(vapp_uuid)
if disk_list:
added_existing_disk = False
for disk in disk_list:
- if 'device_type' in disk and disk['device_type'] == 'cdrom':
- image_id = disk['image_id']
+ if "device_type" in disk and disk["device_type"] == "cdrom":
+ image_id = disk["image_id"]
# Adding CD-ROM to VM
# will revisit code once specification ready to support this feature
self.insert_media_to_vm(vapp, image_id)
elif "image_id" in disk and disk["image_id"] is not None:
- self.logger.debug("Adding existing disk from image {} to vm {} ".format(
- disk["image_id"], vapp_uuid))
- self.add_existing_disk(catalogs=catalogs,
- image_id=disk["image_id"],
- size=disk["size"],
- template_name=templateName,
- vapp_uuid=vapp_uuid
- )
+ self.logger.debug(
+ "Adding existing disk from image {} to vm {} ".format(
+ disk["image_id"], vapp_uuid
+ )
+ )
+ self.add_existing_disk(
+ catalogs=catalogs,
+ image_id=disk["image_id"],
+ size=disk["size"],
+ template_name=templateName,
+ vapp_uuid=vapp_uuid,
+ )
added_existing_disk = True
else:
# Wait till added existing disk gets reflected into vCD database/API
if added_existing_disk:
time.sleep(5)
added_existing_disk = False
- self.add_new_disk(vapp_uuid, disk['size'])
+ self.add_new_disk(vapp_uuid, disk["size"])
if numas:
# Assigning numa affinity setting
for numa in numas:
- if 'paired-threads-id' in numa:
- paired_threads_id = numa['paired-threads-id']
+ if "paired-threads-id" in numa:
+ paired_threads_id = numa["paired-threads-id"]
self.set_numa_affinity(vapp_uuid, paired_threads_id)
# add NICs & connect to networks in netlist
try:
- vdc_obj = VDC(self.client, href=vdc.get('href'))
+ vdc_obj = VDC(self.client, href=vdc.get("href"))
vapp_resource = vdc_obj.get_vapp(vmname_andid)
vapp = VApp(self.client, resource=vapp_resource)
- vapp_id = vapp_resource.get('id').split(':')[-1]
+ vapp_id = vapp_resource.get("id").split(":")[-1]
self.logger.info("Removing primary NIC: ")
# First remove all NICs so that NIC properties can be adjusted as needed
# [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
# 'vpci': '0000:00:11.0', 'name': 'eth0'}]
- if 'net_id' not in net:
+ if "net_id" not in net:
continue
# Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
# Same will be returned in refresh_vms_status() as vim_interface_id
- net['vim_id'] = net['net_id'] # Provide the same VIM identifier as the VIM network
+ net["vim_id"] = net[
+ "net_id"
+ ] # Provide the same VIM identifier as the VIM network
- interface_net_id = net['net_id']
- interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
- interface_network_mode = net['use']
+ interface_net_id = net["net_id"]
+ interface_net_name = self.get_network_name_by_id(
+ network_uuid=interface_net_id
+ )
+ interface_network_mode = net["use"]
- if interface_network_mode == 'mgmt':
+ if interface_network_mode == "mgmt":
primary_nic_index = nicIndex
"""- POOL (A static IP address is allocated automatically from a pool of addresses.)
- NONE (No IP addressing mode specified.)"""
if primary_netname is not None:
- self.logger.debug("new_vminstance(): Filtering by net name {}".format(interface_net_name))
- nets = [n for n in self.get_network_list() if n.get('name') == interface_net_name]
+ self.logger.debug(
+ "new_vminstance(): Filtering by net name {}".format(
+ interface_net_name
+ )
+ )
+ nets = [
+ n
+ for n in self.get_network_list()
+ if n.get("name") == interface_net_name
+ ]
+
if len(nets) == 1:
- self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].get('name')))
+ self.logger.info(
+ "new_vminstance(): Found requested network: {}".format(
+ nets[0].get("name")
+ )
+ )
if interface_net_name != primary_netname:
# connect network to VM - with all DHCP by default
- self.logger.info("new_vminstance(): Attaching net {} to vapp".format(interface_net_name))
- self.connect_vapp_to_org_vdc_network(vapp_id, nets[0].get('name'))
-
- type_list = ('PF', 'PCI-PASSTHROUGH', 'VFnotShared')
- nic_type = 'VMXNET3'
- if 'type' in net and net['type'] not in type_list:
+ self.logger.info(
+ "new_vminstance(): Attaching net {} to vapp".format(
+ interface_net_name
+ )
+ )
+ self.connect_vapp_to_org_vdc_network(
+ vapp_id, nets[0].get("name")
+ )
+
+ type_list = ("PF", "PCI-PASSTHROUGH", "VFnotShared")
+ nic_type = "VMXNET3"
+ if "type" in net and net["type"] not in type_list:
# fetching nic type from vnf
- if 'model' in net:
- if net['model'] is not None:
- if net['model'].lower() == 'paravirt' or net['model'].lower() == 'virtio':
- nic_type = 'VMXNET3'
+ if "model" in net:
+ if net["model"] is not None:
+ if (
+ net["model"].lower() == "paravirt"
+ or net["model"].lower() == "virtio"
+ ):
+ nic_type = "VMXNET3"
else:
- nic_type = net['model']
-
- self.logger.info("new_vminstance(): adding network adapter "
- "to a network {}".format(nets[0].get('name')))
- self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
- primary_nic_index,
- nicIndex,
- net,
- nic_type=nic_type)
+ nic_type = net["model"]
+
+ self.logger.info(
+ "new_vminstance(): adding network adapter "
+ "to a network {}".format(nets[0].get("name"))
+ )
+ self.add_network_adapter_to_vms(
+ vapp,
+ nets[0].get("name"),
+ primary_nic_index,
+ nicIndex,
+ net,
+ nic_type=nic_type,
+ )
else:
- self.logger.info("new_vminstance(): adding network adapter "
- "to a network {}".format(nets[0].get('name')))
- if net['type'] in ['SR-IOV', 'VF']:
- nic_type = net['type']
- self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
- primary_nic_index,
- nicIndex,
- net,
- nic_type=nic_type)
+ self.logger.info(
+ "new_vminstance(): adding network adapter "
+ "to a network {}".format(nets[0].get("name"))
+ )
+
+ if net["type"] in ["SR-IOV", "VF"]:
+ nic_type = net["type"]
+ self.add_network_adapter_to_vms(
+ vapp,
+ nets[0].get("name"),
+ primary_nic_index,
+ nicIndex,
+ net,
+ nic_type=nic_type,
+ )
nicIndex += 1
# cloud-init for ssh-key injection
# Create a catalog which will be carrying the config drive ISO
# This catalog is deleted during vApp deletion. The catalog name carries
# vApp UUID and thats how it gets identified during its deletion.
- config_drive_catalog_name = 'cfg_drv-' + vapp_uuid
- self.logger.info('new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
- config_drive_catalog_name))
- config_drive_catalog_id = self.create_vimcatalog(org, config_drive_catalog_name)
+ config_drive_catalog_name = "cfg_drv-" + vapp_uuid
+ self.logger.info(
+ 'new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
+ config_drive_catalog_name
+ )
+ )
+ config_drive_catalog_id = self.create_vimcatalog(
+ org, config_drive_catalog_name
+ )
+
if config_drive_catalog_id is None:
- error_msg = "new_vminstance(): Failed to create new catalog '{}' to carry the config drive " \
- "ISO".format(config_drive_catalog_name)
+ error_msg = (
+ "new_vminstance(): Failed to create new catalog '{}' to carry the config drive "
+ "ISO".format(config_drive_catalog_name)
+ )
raise Exception(error_msg)
# Create config-drive ISO
_, userdata = self._create_user_data(cloud_config)
# self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
iso_path = self.create_config_drive_iso(userdata)
- self.logger.debug('new_vminstance(): The ISO is successfully created. Path: {}'.format(iso_path))
+ self.logger.debug(
+ "new_vminstance(): The ISO is successfully created. Path: {}".format(
+ iso_path
+ )
+ )
- self.logger.info('new_vminstance(): uploading iso to catalog {}'.format(config_drive_catalog_name))
+ self.logger.info(
+ "new_vminstance(): uploading iso to catalog {}".format(
+ config_drive_catalog_name
+ )
+ )
self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
# Attach the config-drive ISO to the VM
- self.logger.info('new_vminstance(): Attaching the config-drive ISO to the VM')
+ self.logger.info(
+ "new_vminstance(): Attaching the config-drive ISO to the VM"
+ )
self.insert_media_to_vm(vapp, config_drive_catalog_id)
shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
if reserve_memory:
self.reserve_memory_for_all_vms(vapp, memory_mb)
- self.logger.debug("new_vminstance(): starting power on vApp {} ".format(vmname_andid))
+ self.logger.debug(
+ "new_vminstance(): starting power on vApp {} ".format(vmname_andid)
+ )
poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
- if result.get('status') == 'success':
- self.logger.info("new_vminstance(): Successfully power on "
- "vApp {}".format(vmname_andid))
+ if result.get("status") == "success":
+ self.logger.info(
+ "new_vminstance(): Successfully power on "
+ "vApp {}".format(vmname_andid)
+ )
else:
- self.logger.error("new_vminstance(): failed to power on vApp "
- "{}".format(vmname_andid))
+ self.logger.error(
+ "new_vminstance(): failed to power on vApp "
+ "{}".format(vmname_andid)
+ )
except Exception as exp:
try:
except Exception as exp2:
self.logger.error("new_vminstance rollback fail {}".format(exp2))
# it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
- self.logger.error("new_vminstance(): Failed create new vm instance {} with exception {}"
- .format(name, exp))
- raise vimconn.VimConnException("new_vminstance(): Failed create new vm instance {} with exception {}"
- .format(name, exp))
+ self.logger.error(
+ "new_vminstance(): Failed create new vm instance {} with exception {}".format(
+ name, exp
+ )
+ )
+ raise vimconn.VimConnException(
+ "new_vminstance(): Failed create new vm instance {} with exception {}".format(
+ name, exp
+ )
+ )
# check if vApp deployed and if that the case return vApp UUID otherwise -1
wait_time = 0
vapp_uuid = None
vapp = VApp(self.client, resource=vapp_resource)
except Exception as exp:
raise vimconn.VimConnUnexpectedResponse(
- "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
- .format(vmname_andid, exp))
+ "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}".format(
+ vmname_andid, exp
+ )
+ )
# if vapp and vapp.me.deployed:
- if vapp and vapp_resource.get('deployed') == 'true':
- vapp_uuid = vapp_resource.get('id').split(':')[-1]
+ if vapp and vapp_resource.get("deployed") == "true":
+ vapp_uuid = vapp_resource.get("id").split(":")[-1]
break
else:
- self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
+ self.logger.debug(
+ "new_vminstance(): Wait for vApp {} to deploy".format(name)
+ )
time.sleep(INTERVAL_TIME)
wait_time += INTERVAL_TIME
# Host groups are referred as availability zones
# With following procedure, deployed VM will be added into a VM group.
# Then A VM to Host Affinity rule will be created using the VM group & Host group.
- if(availability_zone_list):
- self.logger.debug("Existing Host Groups in VIM {}".format(self.config.get('availability_zone')))
+ if availability_zone_list:
+ self.logger.debug(
+ "Existing Host Groups in VIM {}".format(
+ self.config.get("availability_zone")
+ )
+ )
# Admin access required for creating Affinity rules
client = self.connect_as_admin()
+
if not client:
- raise vimconn.VimConnConnectionException("Failed to connect vCD as admin")
+ raise vimconn.VimConnConnectionException(
+ "Failed to connect vCD as admin"
+ )
else:
self.client = client
+
if self.client:
- headers = {'Accept': 'application/*+xml;version=27.0',
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+ headers = {
+ "Accept": "application/*+xml;version=27.0",
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+
# Step1: Get provider vdc details from organization
pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
if pvdc_href is not None:
respool_href = self.get_resource_pool_details(pvdc_href, headers)
if respool_href is None:
# Raise error if respool_href not found
- msg = "new_vminstance():Error in finding resource pool details in pvdc {}".format(pvdc_href)
+ msg = "new_vminstance():Error in finding resource pool details in pvdc {}".format(
+ pvdc_href
+ )
self.log_message(msg)
# Step3: Verify requested availability zone(hostGroup) is present in vCD
# get availability Zone
- vm_az = self.get_vm_availability_zone(availability_zone_index, availability_zone_list)
+ vm_az = self.get_vm_availability_zone(
+ availability_zone_index, availability_zone_list
+ )
+
# check if provided av zone(hostGroup) is present in vCD VIM
status = self.check_availibility_zone(vm_az, respool_href, headers)
if status is False:
- msg = "new_vminstance(): Error in finding availability zone(Host Group): {} in "
- "resource pool {} status: {}".format(vm_az, respool_href, status)
+ msg = (
+ "new_vminstance(): Error in finding availability zone(Host Group): {} in "
+ "resource pool {} status: {}"
+ ).format(vm_az, respool_href, status)
self.log_message(msg)
else:
- self.logger.debug("new_vminstance(): Availability zone {} found in VIM".format(vm_az))
+ self.logger.debug(
+ "new_vminstance(): Availability zone {} found in VIM".format(vm_az)
+ )
# Step4: Find VM group references to create vm group
vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
self.log_message(msg)
# Step5: Create a VmGroup with name az_VmGroup
- vmgrp_name = vm_az + "_" + name # Formed VM Group name = Host Group name + VM name
+ vmgrp_name = (
+ vm_az + "_" + name
+ ) # Formed VM Group name = Host Group name + VM name
status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
if status is not True:
- msg = "new_vminstance(): Error in creating VM group {}".format(vmgrp_name)
+ msg = "new_vminstance(): Error in creating VM group {}".format(
+ vmgrp_name
+ )
self.log_message(msg)
# VM Group url to add vms to vm group
# Step6: Add VM to VM Group
# Find VM uuid from vapp_uuid
vm_details = self.get_vapp_details_rest(vapp_uuid)
- vm_uuid = vm_details['vmuuid']
+ vm_uuid = vm_details["vmuuid"]
status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
if status is not True:
- msg = "new_vminstance(): Error in adding VM to VM group {}".format(vmgrp_name)
+ msg = "new_vminstance(): Error in adding VM to VM group {}".format(
+ vmgrp_name
+ )
self.log_message(msg)
# Step7: Create VM to Host affinity rule
addrule_href = self.get_add_rule_reference(respool_href, headers)
if addrule_href is None:
- msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}"\
- .format(respool_href)
+ msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}".format(
+ respool_href
+ )
self.log_message(msg)
- status = self.create_vm_to_host_affinity_rule(addrule_href, vmgrp_name, vm_az, "Affinity", headers)
+ status = self.create_vm_to_host_affinity_rule(
+ addrule_href, vmgrp_name, vm_az, "Affinity", headers
+ )
if status is False:
- msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}"\
- .format(name, vm_az)
+ msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}".format(
+ name, vm_az
+ )
self.log_message(msg)
else:
- self.logger.debug("new_vminstance(): Affinity rule created successfully. Added {} in Host group {}"
- .format(name, vm_az))
+ self.logger.debug(
+ "new_vminstance(): Affinity rule created successfully. Added {} in Host group {}".format(
+ name, vm_az
+ )
+ )
# Reset token to a normal user to perform other operations
self.get_token()
if vapp_uuid is not None:
return vapp_uuid, None
else:
- raise vimconn.VimConnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
+ raise vimconn.VimConnUnexpectedResponse(
+ "new_vminstance(): Failed create new vm instance {}".format(name)
+ )
def create_config_drive_iso(self, user_data):
tmpdir = tempfile.mkdtemp()
- iso_path = os.path.join(tmpdir, 'ConfigDrive.iso')
- latest_dir = os.path.join(tmpdir, 'openstack', 'latest')
+ iso_path = os.path.join(tmpdir, "ConfigDrive.iso")
+ latest_dir = os.path.join(tmpdir, "openstack", "latest")
os.makedirs(latest_dir)
- with open(os.path.join(latest_dir, 'meta_data.json'), 'w') as meta_file_obj, \
- open(os.path.join(latest_dir, 'user_data'), 'w') as userdata_file_obj:
+ with open(
+ os.path.join(latest_dir, "meta_data.json"), "w"
+ ) as meta_file_obj, open(
+ os.path.join(latest_dir, "user_data"), "w"
+ ) as userdata_file_obj:
userdata_file_obj.write(user_data)
- meta_file_obj.write(json.dumps({"availability_zone": "nova",
- "launch_index": 0,
- "name": "ConfigDrive",
- "uuid": str(uuid.uuid4())}
- )
- )
- genisoimage_cmd = 'genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}'.format(
- iso_path=iso_path, source_dir_path=tmpdir)
- self.logger.info('create_config_drive_iso(): Creating ISO by running command "{}"'.format(genisoimage_cmd))
+ meta_file_obj.write(
+ json.dumps(
+ {
+ "availability_zone": "nova",
+ "launch_index": 0,
+ "name": "ConfigDrive",
+ "uuid": str(uuid.uuid4()),
+ }
+ )
+ )
+ genisoimage_cmd = (
+ "genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}".format(
+ iso_path=iso_path, source_dir_path=tmpdir
+ )
+ )
+ self.logger.info(
+ 'create_config_drive_iso(): Creating ISO by running command "{}"'.format(
+ genisoimage_cmd
+ )
+ )
+
try:
- FNULL = open(os.devnull, 'w')
+ FNULL = open(os.devnull, "w")
subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
except subprocess.CalledProcessError as e:
shutil.rmtree(tmpdir, ignore_errors=True)
- error_msg = 'create_config_drive_iso(): Exception while running genisoimage command: {}'.format(e)
+ error_msg = "create_config_drive_iso(): Exception while running genisoimage command: {}".format(
+ e
+ )
self.logger.error(error_msg)
raise Exception(error_msg)
+
return iso_path
def upload_iso_to_catalog(self, catalog_id, iso_file_path):
if not os.path.isfile(iso_file_path):
- error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(iso_file_path)
+ error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(
+ iso_file_path
+ )
self.logger.error(error_msg)
raise Exception(error_msg)
+
iso_file_stat = os.stat(iso_file_path)
- xml_media_elem = '''<?xml version="1.0" encoding="UTF-8"?>
+ xml_media_elem = """<?xml version="1.0" encoding="UTF-8"?>
<Media
xmlns="http://www.vmware.com/vcloud/v1.5"
name="{iso_name}"
size="{iso_size}"
imageType="iso">
<Description>ISO image for config-drive</Description>
- </Media>'''.format(iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size)
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
- headers['Content-Type'] = 'application/vnd.vmware.vcloud.media+xml'
- catalog_href = self.url + '/api/catalog/' + catalog_id + '/action/upload'
- response = self.perform_request(req_type='POST', url=catalog_href, headers=headers, data=xml_media_elem)
+ </Media>""".format(
+ iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size
+ )
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ headers["Content-Type"] = "application/vnd.vmware.vcloud.media+xml"
+ catalog_href = self.url + "/api/catalog/" + catalog_id + "/action/upload"
+ response = self.perform_request(
+ req_type="POST", url=catalog_href, headers=headers, data=xml_media_elem
+ )
if response.status_code != 201:
- error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(catalog_href)
+ error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(
+ catalog_href
+ )
self.logger.error(error_msg)
raise Exception(error_msg)
catalogItem = XmlElementTree.fromstring(response.text)
- entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.media+xml"][0]
- entity_href = entity.get('href')
-
- response = self.perform_request(req_type='GET', url=entity_href, headers=headers)
+ entity = [
+ child
+ for child in catalogItem
+ if child.get("type") == "application/vnd.vmware.vcloud.media+xml"
+ ][0]
+ entity_href = entity.get("href")
+
+ response = self.perform_request(
+ req_type="GET", url=entity_href, headers=headers
+ )
if response.status_code != 200:
- raise Exception("upload_iso_to_catalog(): Failed to GET entity href {}".format(entity_href))
+ raise Exception(
+ "upload_iso_to_catalog(): Failed to GET entity href {}".format(
+ entity_href
+ )
+ )
- match = re.search(r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>', response.text, re.DOTALL)
+ match = re.search(
+ r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>',
+ response.text,
+ re.DOTALL,
+ )
if match:
media_upload_href = match.group(1)
else:
- raise Exception('Could not parse the upload URL for the media file from the last response')
+ raise Exception(
+ "Could not parse the upload URL for the media file from the last response"
+ )
upload_iso_task = self.get_task_from_response(response.text)
- headers['Content-Type'] = 'application/octet-stream'
- response = self.perform_request(req_type='PUT',
- url=media_upload_href,
- headers=headers,
- data=open(iso_file_path, 'rb'))
+ headers["Content-Type"] = "application/octet-stream"
+ response = self.perform_request(
+ req_type="PUT",
+ url=media_upload_href,
+ headers=headers,
+ data=open(iso_file_path, "rb"),
+ )
if response.status_code != 200:
raise Exception('PUT request to "{}" failed'.format(media_upload_href))
+
result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
- if result.get('status') != 'success':
- raise Exception('The upload iso task failed with status {}'.format(result.get('status')))
+ if result.get("status") != "success":
+ raise Exception(
+ "The upload iso task failed with status {}".format(result.get("status"))
+ )
def get_vcd_availibility_zones(self, respool_href, headers):
- """ Method to find presence of av zone is VIM resource pool
+ """Method to find presence of av zone is VIM resource pool
- Args:
- respool_href - resource pool href
- headers - header information
+ Args:
+ respool_href - resource pool href
+ headers - header information
- Returns:
- vcd_az - list of azone present in vCD
+ Returns:
+ vcd_az - list of azone present in vCD
"""
vcd_az = []
url = respool_href
- resp = self.perform_request(req_type='GET', url=respool_href, headers=headers)
+ resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
if resp.status_code != requests.codes.ok:
- self.logger.debug("REST API call {} failed. Return status code {}".format(url, resp.status_code))
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ url, resp.status_code
+ )
+ )
else:
# Get the href to hostGroups and find provided hostGroup is present in it
resp_xml = XmlElementTree.fromstring(resp.content)
for child in resp_xml:
- if 'VMWProviderVdcResourcePool' in child.tag:
+ if "VMWProviderVdcResourcePool" in child.tag:
for schild in child:
- if 'Link' in schild.tag:
- if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
- hostGroup = schild.attrib.get('href')
- hg_resp = self.perform_request(req_type='GET', url=hostGroup, headers=headers)
+ if "Link" in schild.tag:
+ if (
+ schild.attrib.get("type")
+ == "application/vnd.vmware.admin.vmwHostGroupsType+xml"
+ ):
+ hostGroup = schild.attrib.get("href")
+ hg_resp = self.perform_request(
+ req_type="GET", url=hostGroup, headers=headers
+ )
+
if hg_resp.status_code != requests.codes.ok:
- self.logger.debug("REST API call {} failed. Return status code {}".format(
- hostGroup, hg_resp.status_code))
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ hostGroup, hg_resp.status_code
+ )
+ )
else:
- hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
+ hg_resp_xml = XmlElementTree.fromstring(
+ hg_resp.content
+ )
for hostGroup in hg_resp_xml:
- if 'HostGroup' in hostGroup.tag:
+ if "HostGroup" in hostGroup.tag:
# append host group name to the list
vcd_az.append(hostGroup.attrib.get("name"))
+
return vcd_az
def set_availability_zones(self):
"""
Set vim availability zone
"""
-
vim_availability_zones = None
availability_zone = None
- if 'availability_zone' in self.config:
- vim_availability_zones = self.config.get('availability_zone')
+
+ if "availability_zone" in self.config:
+ vim_availability_zones = self.config.get("availability_zone")
+
if isinstance(vim_availability_zones, str):
availability_zone = [vim_availability_zones]
elif isinstance(vim_availability_zones, list):
returns: The VIM availability zone to be used or None
"""
if availability_zone_index is None:
- if not self.config.get('availability_zone'):
+ if not self.config.get("availability_zone"):
return None
- elif isinstance(self.config.get('availability_zone'), str):
- return self.config['availability_zone']
+ elif isinstance(self.config.get("availability_zone"), str):
+ return self.config["availability_zone"]
else:
- return self.config['availability_zone'][0]
+ return self.config["availability_zone"][0]
vim_availability_zones = self.availability_zone
# check if VIM offer enough availability zones describe in the VNFD
- if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
+ if vim_availability_zones and len(availability_zone_list) <= len(
+ vim_availability_zones
+ ):
# check if all the names of NFV AV match VIM AV names
match_by_index = False
for av in availability_zone_list:
if av not in vim_availability_zones:
match_by_index = True
break
+
if match_by_index:
- self.logger.debug("Required Availability zone or Host Group not found in VIM config")
- self.logger.debug("Input Availability zone list: {}".format(availability_zone_list))
- self.logger.debug("VIM configured Availability zones: {}".format(vim_availability_zones))
+ self.logger.debug(
+ "Required Availability zone or Host Group not found in VIM config"
+ )
+ self.logger.debug(
+ "Input Availability zone list: {}".format(availability_zone_list)
+ )
+ self.logger.debug(
+ "VIM configured Availability zones: {}".format(
+ vim_availability_zones
+ )
+ )
self.logger.debug("VIM Availability zones will be used by index")
return vim_availability_zones[availability_zone_index]
else:
return availability_zone_list[availability_zone_index]
else:
- raise vimconn.VimConnConflictException("No enough availability zones at VIM for this deployment")
+ raise vimconn.VimConnConflictException(
+ "No enough availability zones at VIM for this deployment"
+ )
- def create_vm_to_host_affinity_rule(self, addrule_href, vmgrpname, hostgrpname, polarity, headers):
- """ Method to create VM to Host Affinity rule in vCD
+ def create_vm_to_host_affinity_rule(
+ self, addrule_href, vmgrpname, hostgrpname, polarity, headers
+ ):
+ """Method to create VM to Host Affinity rule in vCD
Args:
addrule_href - href to make a POST request
<vcloud:Polarity>{}</vcloud:Polarity>
<vmext:HostGroupName>{}</vmext:HostGroupName>
<vmext:VmGroupName>{}</vmext:VmGroupName>
- </vmext:VMWVmHostAffinityRule>""".format(rule_name, polarity, hostgrpname, vmgrpname)
+ </vmext:VMWVmHostAffinityRule>""".format(
+ rule_name, polarity, hostgrpname, vmgrpname
+ )
- resp = self.perform_request(req_type='POST', url=addrule_href, headers=headers, data=payload)
+ resp = self.perform_request(
+ req_type="POST", url=addrule_href, headers=headers, data=payload
+ )
if resp.status_code != requests.codes.accepted:
- self.logger.debug("REST API call {} failed. Return status code {}".format(addrule_href, resp.status_code))
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ addrule_href, resp.status_code
+ )
+ )
task_status = False
+
return task_status
else:
affinity_task = self.get_task_from_response(resp.content)
self.logger.debug("affinity_task: {}".format(affinity_task))
+
if affinity_task is None or affinity_task is False:
raise vimconn.VimConnUnexpectedResponse("failed to find affinity task")
# wait for task to complete
result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
- if result.get('status') == 'success':
- self.logger.debug("Successfully created affinity rule {}".format(rule_name))
+
+ if result.get("status") == "success":
+ self.logger.debug(
+ "Successfully created affinity rule {}".format(rule_name)
+ )
return True
else:
raise vimconn.VimConnUnexpectedResponse(
- "failed to create affinity rule {}".format(rule_name))
+ "failed to create affinity rule {}".format(rule_name)
+ )
def get_add_rule_reference(self, respool_href, headers):
- """ This method finds href to add vm to host affinity rule to vCD
+ """This method finds href to add vm to host affinity rule to vCD
Args:
respool_href- href to resource pool
addrule_href - href to add vm to host affinity rule of resource pool
"""
addrule_href = None
- resp = self.perform_request(req_type='GET', url=respool_href, headers=headers)
+ resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
if resp.status_code != requests.codes.ok:
- self.logger.debug("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ respool_href, resp.status_code
+ )
+ )
else:
-
resp_xml = XmlElementTree.fromstring(resp.content)
for child in resp_xml:
- if 'VMWProviderVdcResourcePool' in child.tag:
+ if "VMWProviderVdcResourcePool" in child.tag:
for schild in child:
- if 'Link' in schild.tag:
- if (schild.attrib.get(
- 'type') == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml" and
- schild.attrib.get('rel') == "add"):
- addrule_href = schild.attrib.get('href')
+ if "Link" in schild.tag:
+ if (
+ schild.attrib.get("type")
+ == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml"
+ and schild.attrib.get("rel") == "add"
+ ):
+ addrule_href = schild.attrib.get("href")
break
return addrule_href
def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
- """ Method to add deployed VM to newly created VM Group.
+ """Method to add deployed VM to newly created VM Group.
This is required to create VM to Host affinity in vCD
Args:
True- if VM added to VM group successfully
False- if any error encounter
"""
-
- addvm_resp = self.perform_request(req_type='GET', url=vmGroupNameURL, headers=headers) # , data=payload)
+ addvm_resp = self.perform_request(
+ req_type="GET", url=vmGroupNameURL, headers=headers
+ ) # , data=payload)
if addvm_resp.status_code != requests.codes.ok:
- self.logger.debug("REST API call to get VM Group Name url {} failed. Return status code {}"
- .format(vmGroupNameURL, addvm_resp.status_code))
+ self.logger.debug(
+ "REST API call to get VM Group Name url {} failed. Return status code {}".format(
+ vmGroupNameURL, addvm_resp.status_code
+ )
+ )
return False
else:
resp_xml = XmlElementTree.fromstring(addvm_resp.content)
for child in resp_xml:
- if child.tag.split('}')[1] == 'Link':
+ if child.tag.split("}")[1] == "Link":
if child.attrib.get("rel") == "addVms":
addvmtogrpURL = child.attrib.get("href")
# Get vm details
- url_list = [self.url, '/api/vApp/vm-', vm_uuid]
- vmdetailsURL = ''.join(url_list)
+ url_list = [self.url, "/api/vApp/vm-", vm_uuid]
+ vmdetailsURL = "".join(url_list)
- resp = self.perform_request(req_type='GET', url=vmdetailsURL, headers=headers)
+ resp = self.perform_request(req_type="GET", url=vmdetailsURL, headers=headers)
if resp.status_code != requests.codes.ok:
- self.logger.debug("REST API call {} failed. Return status code {}".format(vmdetailsURL, resp.status_code))
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ vmdetailsURL, resp.status_code
+ )
+ )
return False
# Parse VM details
resp_xml = XmlElementTree.fromstring(resp.content)
- if resp_xml.tag.split('}')[1] == "Vm":
+ if resp_xml.tag.split("}")[1] == "Vm":
vm_id = resp_xml.attrib.get("id")
vm_name = resp_xml.attrib.get("name")
vm_href = resp_xml.attrib.get("href")
# print vm_id, vm_name, vm_href
+
# Add VM into VMgroup
payload = """<?xml version="1.0" encoding="UTF-8"?>\
<ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
<ns2:VmReference href="{}" id="{}" name="{}" \
type="application/vnd.vmware.vcloud.vm+xml" />\
- </ns2:Vms>""".format(vm_href, vm_id, vm_name)
+ </ns2:Vms>""".format(
+ vm_href, vm_id, vm_name
+ )
- addvmtogrp_resp = self.perform_request(req_type='POST', url=addvmtogrpURL, headers=headers, data=payload)
+ addvmtogrp_resp = self.perform_request(
+ req_type="POST", url=addvmtogrpURL, headers=headers, data=payload
+ )
if addvmtogrp_resp.status_code != requests.codes.accepted:
- self.logger.debug("REST API call {} failed. Return status code {}".format(addvmtogrpURL,
- addvmtogrp_resp.
- status_code))
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ addvmtogrpURL, addvmtogrp_resp.status_code
+ )
+ )
+
return False
else:
- self.logger.debug("Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name))
+ self.logger.debug(
+ "Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name)
+ )
+
return True
def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
"""Method to create a VM group in vCD
- Args:
- vmgroup_name : Name of VM group to be created
- vmgroup_href : href for vmgroup
- headers- Headers for REST request
+ Args:
+ vmgroup_name : Name of VM group to be created
+ vmgroup_href : href for vmgroup
+ headers- Headers for REST request
"""
# POST to add URL with required data
vmgroup_status = False
payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
<vmCount>1</vmCount>\
- </VMWVmGroup>""".format(vmgroup_name)
- resp = self.perform_request(req_type='POST', url=vmgroup_href, headers=headers, data=payload)
+ </VMWVmGroup>""".format(
+ vmgroup_name
+ )
+ resp = self.perform_request(
+ req_type="POST", url=vmgroup_href, headers=headers, data=payload
+ )
if resp.status_code != requests.codes.accepted:
- self.logger.debug("REST API call {} failed. Return status code {}".format(vmgroup_href, resp.status_code))
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ vmgroup_href, resp.status_code
+ )
+ )
+
return vmgroup_status
else:
vmgroup_task = self.get_task_from_response(resp.content)
if vmgroup_task is None or vmgroup_task is False:
raise vimconn.VimConnUnexpectedResponse(
- "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
+ "create_vmgroup(): failed to create VM group {}".format(
+ vmgroup_name
+ )
+ )
# wait for task to complete
result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
- if result.get('status') == 'success':
- self.logger.debug("create_vmgroup(): Successfully created VM group {}".format(vmgroup_name))
+ if result.get("status") == "success":
+ self.logger.debug(
+ "create_vmgroup(): Successfully created VM group {}".format(
+ vmgroup_name
+ )
+ )
# time.sleep(10)
vmgroup_status = True
+
return vmgroup_status
else:
raise vimconn.VimConnUnexpectedResponse(
- "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
+ "create_vmgroup(): failed to create VM group {}".format(
+ vmgroup_name
+ )
+ )
def find_vmgroup_reference(self, url, headers):
- """ Method to create a new VMGroup which is required to add created VM
- Args:
- url- resource pool href
- headers- header information
+ """Method to create a new VMGroup which is required to add created VM
+ Args:
+ url- resource pool href
+ headers- header information
- Returns:
- returns href to VM group to create VM group
+ Returns:
+ returns href to VM group to create VM group
"""
# Perform GET on resource pool to find 'add' link to create VMGroup
# https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
vmgrp_href = None
- resp = self.perform_request(req_type='GET', url=url, headers=headers)
+ resp = self.perform_request(req_type="GET", url=url, headers=headers)
if resp.status_code != requests.codes.ok:
- self.logger.debug("REST API call {} failed. Return status code {}".format(url, resp.status_code))
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ url, resp.status_code
+ )
+ )
else:
# Get the href to add vmGroup to vCD
resp_xml = XmlElementTree.fromstring(resp.content)
for child in resp_xml:
- if 'VMWProviderVdcResourcePool' in child.tag:
+ if "VMWProviderVdcResourcePool" in child.tag:
for schild in child:
- if 'Link' in schild.tag:
+ if "Link" in schild.tag:
# Find href with type VMGroup and rel with add
- if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmGroupType+xml"\
- and schild.attrib.get('rel') == "add":
- vmgrp_href = schild.attrib.get('href')
+ if (
+ schild.attrib.get("type")
+ == "application/vnd.vmware.admin.vmwVmGroupType+xml"
+ and schild.attrib.get("rel") == "add"
+ ):
+ vmgrp_href = schild.attrib.get("href")
+
return vmgrp_href
def check_availibility_zone(self, az, respool_href, headers):
- """ Method to verify requested av zone is present or not in provided
- resource pool
+ """Method to verify requested av zone is present or not in provided
+ resource pool
- Args:
- az - name of hostgroup (availibility_zone)
- respool_href - Resource Pool href
- headers - Headers to make REST call
- Returns:
- az_found - True if availibility_zone is found else False
+ Args:
+ az - name of hostgroup (availibility_zone)
+ respool_href - Resource Pool href
+ headers - Headers to make REST call
+ Returns:
+ az_found - True if availibility_zone is found else False
"""
az_found = False
- headers['Accept'] = 'application/*+xml;version=27.0'
- resp = self.perform_request(req_type='GET', url=respool_href, headers=headers)
+ headers["Accept"] = "application/*+xml;version=27.0"
+ resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
if resp.status_code != requests.codes.ok:
- self.logger.debug("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ respool_href, resp.status_code
+ )
+ )
else:
# Get the href to hostGroups and find provided hostGroup is present in it
resp_xml = XmlElementTree.fromstring(resp.content)
for child in resp_xml:
- if 'VMWProviderVdcResourcePool' in child.tag:
+ if "VMWProviderVdcResourcePool" in child.tag:
for schild in child:
- if 'Link' in schild.tag:
- if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
- hostGroup_href = schild.attrib.get('href')
- hg_resp = self.perform_request(req_type='GET', url=hostGroup_href, headers=headers)
+ if "Link" in schild.tag:
+ if (
+ schild.attrib.get("type")
+ == "application/vnd.vmware.admin.vmwHostGroupsType+xml"
+ ):
+ hostGroup_href = schild.attrib.get("href")
+ hg_resp = self.perform_request(
+ req_type="GET", url=hostGroup_href, headers=headers
+ )
+
if hg_resp.status_code != requests.codes.ok:
self.logger.debug(
- "REST API call {} failed. Return status code {}".format(hostGroup_href,
- hg_resp.status_code))
+ "REST API call {} failed. Return status code {}".format(
+ hostGroup_href, hg_resp.status_code
+ )
+ )
else:
- hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
+ hg_resp_xml = XmlElementTree.fromstring(
+ hg_resp.content
+ )
for hostGroup in hg_resp_xml:
- if 'HostGroup' in hostGroup.tag:
+ if "HostGroup" in hostGroup.tag:
if hostGroup.attrib.get("name") == az:
az_found = True
break
+
return az_found
def get_pvdc_for_org(self, org_vdc, headers):
- """ This method gets provider vdc references from organisation
+ """This method gets provider vdc references from organisation
- Args:
- org_vdc - name of the organisation VDC to find pvdc
- headers - headers to make REST call
+ Args:
+ org_vdc - name of the organisation VDC to find pvdc
+ headers - headers to make REST call
- Returns:
- None - if no pvdc href found else
- pvdc_href - href to pvdc
+ Returns:
+ None - if no pvdc href found else
+ pvdc_href - href to pvdc
"""
-
# Get provider VDC references from vCD
pvdc_href = None
# url = '<vcd url>/api/admin/extension/providerVdcReferences'
- url_list = [self.url, '/api/admin/extension/providerVdcReferences']
- url = ''.join(url_list)
+ url_list = [self.url, "/api/admin/extension/providerVdcReferences"]
+ url = "".join(url_list)
- response = self.perform_request(req_type='GET', url=url, headers=headers)
+ response = self.perform_request(req_type="GET", url=url, headers=headers)
if response.status_code != requests.codes.ok:
- self.logger.debug("REST API call {} failed. Return status code {}"
- .format(url, response.status_code))
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ url, response.status_code
+ )
+ )
else:
xmlroot_response = XmlElementTree.fromstring(response.text)
for child in xmlroot_response:
- if 'ProviderVdcReference' in child.tag:
- pvdc_href = child.attrib.get('href')
+ if "ProviderVdcReference" in child.tag:
+ pvdc_href = child.attrib.get("href")
# Get vdcReferences to find org
- pvdc_resp = self.perform_request(req_type='GET', url=pvdc_href, headers=headers)
+ pvdc_resp = self.perform_request(
+ req_type="GET", url=pvdc_href, headers=headers
+ )
+
if pvdc_resp.status_code != requests.codes.ok:
- raise vimconn.VimConnException("REST API call {} failed. "
- "Return status code {}"
- .format(url, pvdc_resp.status_code))
+ raise vimconn.VimConnException(
+ "REST API call {} failed. "
+ "Return status code {}".format(url, pvdc_resp.status_code)
+ )
pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
for child in pvdc_resp_xml:
- if 'Link' in child.tag:
- if child.attrib.get('type') == "application/vnd.vmware.admin.vdcReferences+xml":
- vdc_href = child.attrib.get('href')
+ if "Link" in child.tag:
+ if (
+ child.attrib.get("type")
+ == "application/vnd.vmware.admin.vdcReferences+xml"
+ ):
+ vdc_href = child.attrib.get("href")
# Check if provided org is present in vdc
- vdc_resp = self.perform_request(req_type='GET',
- url=vdc_href,
- headers=headers)
+ vdc_resp = self.perform_request(
+ req_type="GET", url=vdc_href, headers=headers
+ )
+
if vdc_resp.status_code != requests.codes.ok:
- raise vimconn.VimConnException("REST API call {} failed. "
- "Return status code {}"
- .format(url, vdc_resp.status_code))
- vdc_resp_xml = XmlElementTree.fromstring(vdc_resp.content)
+ raise vimconn.VimConnException(
+ "REST API call {} failed. "
+ "Return status code {}".format(
+ url, vdc_resp.status_code
+ )
+ )
+ vdc_resp_xml = XmlElementTree.fromstring(
+ vdc_resp.content
+ )
+
for child in vdc_resp_xml:
- if 'VdcReference' in child.tag:
- if child.attrib.get('name') == org_vdc:
+ if "VdcReference" in child.tag:
+ if child.attrib.get("name") == org_vdc:
return pvdc_href
def get_resource_pool_details(self, pvdc_href, headers):
- """ Method to get resource pool information.
- Host groups are property of resource group.
- To get host groups, we need to GET details of resource pool.
+ """Method to get resource pool information.
+ Host groups are property of resource group.
+ To get host groups, we need to GET details of resource pool.
- Args:
- pvdc_href: href to pvdc details
- headers: headers
+ Args:
+ pvdc_href: href to pvdc details
+ headers: headers
- Returns:
- respool_href - Returns href link reference to resource pool
+ Returns:
+ respool_href - Returns href link reference to resource pool
"""
respool_href = None
- resp = self.perform_request(req_type='GET', url=pvdc_href, headers=headers)
+ resp = self.perform_request(req_type="GET", url=pvdc_href, headers=headers)
if resp.status_code != requests.codes.ok:
- self.logger.debug("REST API call {} failed. Return status code {}"
- .format(pvdc_href, resp.status_code))
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ pvdc_href, resp.status_code
+ )
+ )
else:
respool_resp_xml = XmlElementTree.fromstring(resp.content)
for child in respool_resp_xml:
- if 'Link' in child.tag:
- if child.attrib.get('type') == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml":
+ if "Link" in child.tag:
+ if (
+ child.attrib.get("type")
+ == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml"
+ ):
respool_href = child.attrib.get("href")
break
+
return respool_href
def log_message(self, msg):
"""
- Method to log error messages related to Affinity rule creation
- in new_vminstance & raise Exception
- Args :
- msg - Error message to be logged
+ Method to log error messages related to Affinity rule creation
+ in new_vminstance & raise Exception
+ Args :
+ msg - Error message to be logged
"""
# get token to connect vCD as a normal user
self.get_token()
self.logger.debug(msg)
+
raise vimconn.VimConnException(msg)
# #
#
def get_vminstance(self, vim_vm_uuid=None):
"""Returns the VM instance information from VIM"""
-
self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
_, vdc = self.get_vdc_details()
if vdc is None:
raise vimconn.VimConnConnectionException(
- "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+ "Failed to get a reference of VDC for a tenant {}".format(
+ self.tenant_name
+ )
+ )
vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
if not vm_info_dict:
- self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
- raise vimconn.VimConnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
+ self.logger.debug(
+ "get_vminstance(): Failed to get vApp name by UUID {}".format(
+ vim_vm_uuid
+ )
+ )
+ raise vimconn.VimConnNotFoundException(
+ "Failed to get vApp name by UUID {}".format(vim_vm_uuid)
+ )
- status_key = vm_info_dict['status']
- error = ''
+ status_key = vm_info_dict["status"]
+ error = ""
try:
- vm_dict = {'created': vm_info_dict['created'],
- 'description': vm_info_dict['name'],
- 'status': vcdStatusCode2manoFormat[int(status_key)],
- 'hostId': vm_info_dict['vmuuid'],
- 'error_msg': error,
- 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
-
- if 'interfaces' in vm_info_dict:
- vm_dict['interfaces'] = vm_info_dict['interfaces']
+ vm_dict = {
+ "created": vm_info_dict["created"],
+ "description": vm_info_dict["name"],
+ "status": vcdStatusCode2manoFormat[int(status_key)],
+ "hostId": vm_info_dict["vmuuid"],
+ "error_msg": error,
+ "vim_info": yaml.safe_dump(vm_info_dict),
+ "interfaces": [],
+ }
+
+ if "interfaces" in vm_info_dict:
+ vm_dict["interfaces"] = vm_info_dict["interfaces"]
else:
- vm_dict['interfaces'] = []
+ vm_dict["interfaces"] = []
except KeyError:
- vm_dict = {'created': '',
- 'description': '',
- 'status': vcdStatusCode2manoFormat[int(-1)],
- 'hostId': vm_info_dict['vmuuid'],
- 'error_msg': "Inconsistency state",
- 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
+ vm_dict = {
+ "created": "",
+ "description": "",
+ "status": vcdStatusCode2manoFormat[int(-1)],
+ "hostId": vm_info_dict["vmuuid"],
+ "error_msg": "Inconsistency state",
+ "vim_info": yaml.safe_dump(vm_info_dict),
+ "interfaces": [],
+ }
return vm_dict
Returns:
Returns the instance identifier
"""
-
- self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
+ self.logger.debug(
+ "Client requesting delete vm instance {} ".format(vm__vim_uuid)
+ )
_, vdc = self.get_vdc_details()
- vdc_obj = VDC(self.client, href=vdc.get('href'))
+ vdc_obj = VDC(self.client, href=vdc.get("href"))
if vdc_obj is None:
- self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
- self.tenant_name))
+ self.logger.debug(
+ "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
+ self.tenant_name
+ )
+ )
raise vimconn.VimConnException(
- "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+ "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
+ self.tenant_name
+ )
+ )
try:
vapp_name = self.get_namebyvappid(vm__vim_uuid)
if vapp_name is None:
- self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
- return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
- self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
+ self.logger.debug(
+ "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
+ vm__vim_uuid
+ )
+ )
+
+ return (
+ -1,
+ "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
+ vm__vim_uuid
+ ),
+ )
+
+ self.logger.info(
+ "Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid)
+ )
vapp_resource = vdc_obj.get_vapp(vapp_name)
vapp = VApp(self.client, resource=vapp_resource)
# Delete vApp and wait for status change if task executed and vApp is None.
-
if vapp:
- if vapp_resource.get('deployed') == 'true':
+ if vapp_resource.get("deployed") == "true":
self.logger.info("Powering off vApp {}".format(vapp_name))
# Power off vApp
powered_off = False
wait_time = 0
+
while wait_time <= MAX_WAIT_TIME:
power_off_task = vapp.power_off()
- result = self.client.get_task_monitor().wait_for_success(task=power_off_task)
+ result = self.client.get_task_monitor().wait_for_success(
+ task=power_off_task
+ )
- if result.get('status') == 'success':
+ if result.get("status") == "success":
powered_off = True
break
else:
- self.logger.info("Wait for vApp {} to power off".format(vapp_name))
+ self.logger.info(
+ "Wait for vApp {} to power off".format(vapp_name)
+ )
time.sleep(INTERVAL_TIME)
wait_time += INTERVAL_TIME
+
if not powered_off:
self.logger.debug(
- "delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
+ "delete_vminstance(): Failed to power off VM instance {} ".format(
+ vm__vim_uuid
+ )
+ )
else:
- self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
+ self.logger.info(
+ "delete_vminstance(): Powered off VM instance {} ".format(
+ vm__vim_uuid
+ )
+ )
# Undeploy vApp
self.logger.info("Undeploy vApp {}".format(vapp_name))
vapp = VApp(self.client, resource=vapp_resource)
if not vapp:
self.logger.debug(
- "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
- return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+ "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
+ vm__vim_uuid
+ )
+ )
+
+ return (
+ -1,
+ "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
+ vm__vim_uuid
+ ),
+ )
+
undeploy_task = vapp.undeploy()
+ result = self.client.get_task_monitor().wait_for_success(
+ task=undeploy_task
+ )
- result = self.client.get_task_monitor().wait_for_success(task=undeploy_task)
- if result.get('status') == 'success':
+ if result.get("status") == "success":
undeployed = True
break
else:
- self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
+ self.logger.debug(
+ "Wait for vApp {} to undeploy".format(vapp_name)
+ )
time.sleep(INTERVAL_TIME)
wait_time += INTERVAL_TIME
if not undeployed:
- self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
+ self.logger.debug(
+ "delete_vminstance(): Failed to undeploy vApp {} ".format(
+ vm__vim_uuid
+ )
+ )
# delete vapp
self.logger.info("Start deletion of vApp {} ".format(vapp_name))
-
if vapp is not None:
wait_time = 0
result = False
vapp = VApp(self.client, resource=vapp_resource)
if not vapp:
self.logger.debug(
- "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
- return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+ "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
+ vm__vim_uuid
+ )
+ )
- delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
+ return (
+ -1,
+ "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
+ vm__vim_uuid
+ ),
+ )
- result = self.client.get_task_monitor().wait_for_success(task=delete_task)
- if result.get('status') == 'success':
+ delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
+ result = self.client.get_task_monitor().wait_for_success(
+ task=delete_task
+ )
+ if result.get("status") == "success":
break
else:
- self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
+ self.logger.debug(
+ "Wait for vApp {} to delete".format(vapp_name)
+ )
time.sleep(INTERVAL_TIME)
wait_time += INTERVAL_TIME
if result is None:
- self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
+ self.logger.debug(
+ "delete_vminstance(): Failed delete uuid {} ".format(
+ vm__vim_uuid
+ )
+ )
else:
- self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
- config_drive_catalog_name, config_drive_catalog_id = 'cfg_drv-' + vm__vim_uuid, None
+ self.logger.info(
+ "Deleted vm instance {} sccessfully".format(vm__vim_uuid)
+ )
+ config_drive_catalog_name, config_drive_catalog_id = (
+ "cfg_drv-" + vm__vim_uuid,
+ None,
+ )
catalog_list = self.get_image_list()
+
try:
- config_drive_catalog_id = [catalog_['id'] for catalog_ in catalog_list
- if catalog_['name'] == config_drive_catalog_name][0]
+ config_drive_catalog_id = [
+ catalog_["id"]
+ for catalog_ in catalog_list
+ if catalog_["name"] == config_drive_catalog_name
+ ][0]
except IndexError:
pass
+
if config_drive_catalog_id:
- self.logger.debug('delete_vminstance(): Found a config drive catalog {} matching '
- 'vapp_name"{}". Deleting it.'.format(config_drive_catalog_id, vapp_name))
+ self.logger.debug(
+ "delete_vminstance(): Found a config drive catalog {} matching "
+ 'vapp_name"{}". Deleting it.'.format(
+ config_drive_catalog_id, vapp_name
+ )
+ )
self.delete_image(config_drive_catalog_id)
+
return vm__vim_uuid
except Exception:
self.logger.debug(traceback.format_exc())
- raise vimconn.VimConnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
+
+ raise vimconn.VimConnException(
+ "delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid)
+ )
def refresh_vms_status(self, vm_list):
"""Get the status of the virtual machines and their interfaces/ports
- Params: the list of VM identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this Virtual Machine
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
- # CREATING (on building process), ERROR
- # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
- #
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
- interfaces:
- - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
- mac_address: #Text format XX:XX:XX:XX:XX:XX
- vim_net_id: #network id where this interface is connected
- vim_interface_id: #interface/port VIM id
- ip_address: #null, or text with IPv4, IPv6 address
+ Params: the list of VM identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this Virtual Machine
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+ # CREATING (on building process), ERROR
+ # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+ #
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ interfaces:
+ - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ mac_address: #Text format XX:XX:XX:XX:XX:XX
+ vim_net_id: #network id where this interface is connected
+ vim_interface_id: #interface/port VIM id
+ ip_address: #null, or text with IPv4, IPv6 address
"""
-
self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
_, vdc = self.get_vdc_details()
if vdc is None:
- raise vimconn.VimConnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+ raise vimconn.VimConnException(
+ "Failed to get a reference of VDC for a tenant {}".format(
+ self.tenant_name
+ )
+ )
vms_dict = {}
nsx_edge_list = []
for vmuuid in vm_list:
vapp_name = self.get_namebyvappid(vmuuid)
if vapp_name is not None:
-
try:
vm_pci_details = self.get_vm_pci_details(vmuuid)
- vdc_obj = VDC(self.client, href=vdc.get('href'))
+ vdc_obj = VDC(self.client, href=vdc.get("href"))
vapp_resource = vdc_obj.get_vapp(vapp_name)
the_vapp = VApp(self.client, resource=vapp_resource)
vm_details = {}
for vm in the_vapp.get_all_vms():
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='GET',
- url=vm.get('href'),
- headers=headers)
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url=vm.get("href"), headers=headers
+ )
if response.status_code != 200:
- self.logger.error("refresh_vms_status : REST call {} failed reason : {}"
- "status code : {}".format(vm.get('href'),
- response.text,
- response.status_code))
- raise vimconn.VimConnException("refresh_vms_status : Failed to get VM details")
- xmlroot = XmlElementTree.fromstring(response.text)
+ self.logger.error(
+ "refresh_vms_status : REST call {} failed reason : {}"
+ "status code : {}".format(
+ vm.get("href"), response.text, response.status_code
+ )
+ )
+ raise vimconn.VimConnException(
+ "refresh_vms_status : Failed to get VM details"
+ )
+ xmlroot = XmlElementTree.fromstring(response.text)
result = response.text.replace("\n", " ")
hdd_match = re.search(
- 'vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=', result)
+ 'vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',
+ result,
+ )
+
if hdd_match:
hdd_mb = hdd_match.group(1)
- vm_details['hdd_mb'] = int(hdd_mb) if hdd_mb else None
+ vm_details["hdd_mb"] = int(hdd_mb) if hdd_mb else None
+
cpus_match = re.search(
- '<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>', result)
+ "<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>",
+ result,
+ )
+
if cpus_match:
cpus = cpus_match.group(1)
- vm_details['cpus'] = int(cpus) if cpus else None
+ vm_details["cpus"] = int(cpus) if cpus else None
+
memory_mb = re.search(
- '<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>', result).group(1)
- vm_details['memory_mb'] = int(memory_mb) if memory_mb else None
- vm_details['status'] = vcdStatusCode2manoFormat[int(xmlroot.get('status'))]
- vm_details['id'] = xmlroot.get('id')
- vm_details['name'] = xmlroot.get('name')
+ "<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>",
+ result,
+ ).group(1)
+ vm_details["memory_mb"] = int(memory_mb) if memory_mb else None
+ vm_details["status"] = vcdStatusCode2manoFormat[
+ int(xmlroot.get("status"))
+ ]
+ vm_details["id"] = xmlroot.get("id")
+ vm_details["name"] = xmlroot.get("name")
vm_info = [vm_details]
+
if vm_pci_details:
vm_info[0].update(vm_pci_details)
- vm_dict = {'status': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
- 'error_msg': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
- 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
+ vm_dict = {
+ "status": vcdStatusCode2manoFormat[
+ int(vapp_resource.get("status"))
+ ],
+ "error_msg": vcdStatusCode2manoFormat[
+ int(vapp_resource.get("status"))
+ ],
+ "vim_info": yaml.safe_dump(vm_info),
+ "interfaces": [],
+ }
# get networks
vm_ip = None
vm_mac = None
- networks = re.findall('<NetworkConnection needsCustomization=.*?</NetworkConnection>', result)
+ networks = re.findall(
+ "<NetworkConnection needsCustomization=.*?</NetworkConnection>",
+ result,
+ )
+
for network in networks:
- mac_s = re.search('<MACAddress>(.*?)</MACAddress>', network)
+ mac_s = re.search("<MACAddress>(.*?)</MACAddress>", network)
vm_mac = mac_s.group(1) if mac_s else None
- ip_s = re.search('<IpAddress>(.*?)</IpAddress>', network)
+ ip_s = re.search("<IpAddress>(.*?)</IpAddress>", network)
vm_ip = ip_s.group(1) if ip_s else None
if vm_ip is None:
if not nsx_edge_list:
nsx_edge_list = self.get_edge_details()
if nsx_edge_list is None:
- raise vimconn.VimConnException("refresh_vms_status:"
- "Failed to get edge details from NSX Manager")
+ raise vimconn.VimConnException(
+ "refresh_vms_status:"
+ "Failed to get edge details from NSX Manager"
+ )
+
if vm_mac is not None:
- vm_ip = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_mac)
+ vm_ip = self.get_ipaddr_from_NSXedge(
+ nsx_edge_list, vm_mac
+ )
net_s = re.search('network="(.*?)"', network)
network_name = net_s.group(1) if net_s else None
-
vm_net_id = self.get_network_id_by_name(network_name)
- interface = {"mac_address": vm_mac,
- "vim_net_id": vm_net_id,
- "vim_interface_id": vm_net_id,
- "ip_address": vm_ip}
-
+ interface = {
+ "mac_address": vm_mac,
+ "vim_net_id": vm_net_id,
+ "vim_interface_id": vm_net_id,
+ "ip_address": vm_ip,
+ }
vm_dict["interfaces"].append(interface)
# add a vm to vm dict
def get_edge_details(self):
"""Get the NSX edge list from NSX Manager
- Returns list of NSX edges
+ Returns list of NSX edges
"""
edge_list = []
- rheaders = {'Content-Type': 'application/xml'}
- nsx_api_url = '/api/4.0/edges'
+ rheaders = {"Content-Type": "application/xml"}
+ nsx_api_url = "/api/4.0/edges"
- self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
+ self.logger.debug(
+ "Get edge details from NSX Manager {} {}".format(
+ self.nsx_manager, nsx_api_url
+ )
+ )
try:
- resp = requests.get(self.nsx_manager + nsx_api_url,
- auth=(self.nsx_user, self.nsx_password),
- verify=False, headers=rheaders)
+ resp = requests.get(
+ self.nsx_manager + nsx_api_url,
+ auth=(self.nsx_user, self.nsx_password),
+ verify=False,
+ headers=rheaders,
+ )
if resp.status_code == requests.codes.ok:
paged_Edge_List = XmlElementTree.fromstring(resp.text)
for edge_pages in paged_Edge_List:
- if edge_pages.tag == 'edgePage':
+ if edge_pages.tag == "edgePage":
for edge_summary in edge_pages:
- if edge_summary.tag == 'pagingInfo':
+ if edge_summary.tag == "pagingInfo":
for element in edge_summary:
- if element.tag == 'totalCount' and element.text == '0':
+ if (
+ element.tag == "totalCount"
+ and element.text == "0"
+ ):
raise vimconn.VimConnException(
- "get_edge_details: No NSX edges details found: {}"
- .format(self.nsx_manager))
+ "get_edge_details: No NSX edges details found: {}".format(
+ self.nsx_manager
+ )
+ )
- if edge_summary.tag == 'edgeSummary':
+ if edge_summary.tag == "edgeSummary":
for element in edge_summary:
- if element.tag == 'id':
+ if element.tag == "id":
edge_list.append(element.text)
else:
- raise vimconn.VimConnException("get_edge_details: No NSX edge details found: {}"
- .format(self.nsx_manager))
+ raise vimconn.VimConnException(
+ "get_edge_details: No NSX edge details found: {}".format(
+ self.nsx_manager
+ )
+ )
if not edge_list:
- raise vimconn.VimConnException("get_edge_details: "
- "No NSX edge details found: {}"
- .format(self.nsx_manager))
+ raise vimconn.VimConnException(
+ "get_edge_details: "
+ "No NSX edge details found: {}".format(self.nsx_manager)
+ )
else:
- self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
+ self.logger.debug(
+ "get_edge_details: Found NSX edges {}".format(edge_list)
+ )
+
return edge_list
else:
- self.logger.debug("get_edge_details: "
- "Failed to get NSX edge details from NSX Manager: {}"
- .format(resp.content))
+ self.logger.debug(
+ "get_edge_details: "
+ "Failed to get NSX edge details from NSX Manager: {}".format(
+ resp.content
+ )
+ )
+
return None
except Exception as exp:
- self.logger.debug("get_edge_details: "
- "Failed to get NSX edge details from NSX Manager: {}"
- .format(exp))
- raise vimconn.VimConnException("get_edge_details: "
- "Failed to get NSX edge details from NSX Manager: {}"
- .format(exp))
+ self.logger.debug(
+ "get_edge_details: "
+ "Failed to get NSX edge details from NSX Manager: {}".format(exp)
+ )
+ raise vimconn.VimConnException(
+ "get_edge_details: "
+ "Failed to get NSX edge details from NSX Manager: {}".format(exp)
+ )
def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
"""Get IP address details from NSX edges, using the MAC address
- PARAMS: nsx_edges : List of NSX edges
- mac_address : Find IP address corresponding to this MAC address
- Returns: IP address corrresponding to the provided MAC address
+ PARAMS: nsx_edges : List of NSX edges
+ mac_address : Find IP address corresponding to this MAC address
+ Returns: IP address corrresponding to the provided MAC address
"""
-
ip_addr = None
- rheaders = {'Content-Type': 'application/xml'}
+ rheaders = {"Content-Type": "application/xml"}
self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
try:
for edge in nsx_edges:
- nsx_api_url = '/api/4.0/edges/' + edge + '/dhcp/leaseInfo'
+ nsx_api_url = "/api/4.0/edges/" + edge + "/dhcp/leaseInfo"
- resp = requests.get(self.nsx_manager + nsx_api_url,
- auth=(self.nsx_user, self.nsx_password),
- verify=False, headers=rheaders)
+ resp = requests.get(
+ self.nsx_manager + nsx_api_url,
+ auth=(self.nsx_user, self.nsx_password),
+ verify=False,
+ headers=rheaders,
+ )
if resp.status_code == requests.codes.ok:
dhcp_leases = XmlElementTree.fromstring(resp.text)
for child in dhcp_leases:
- if child.tag == 'dhcpLeaseInfo':
+ if child.tag == "dhcpLeaseInfo":
dhcpLeaseInfo = child
for leaseInfo in dhcpLeaseInfo:
for elem in leaseInfo:
- if (elem.tag) == 'macAddress':
+ if (elem.tag) == "macAddress":
edge_mac_addr = elem.text
- if (elem.tag) == 'ipAddress':
+
+ if (elem.tag) == "ipAddress":
ip_addr = elem.text
+
if edge_mac_addr is not None:
if edge_mac_addr == mac_address:
- self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
- .format(ip_addr, mac_address, edge))
+ self.logger.debug(
+ "Found ip addr {} for mac {} at NSX edge {}".format(
+ ip_addr, mac_address, edge
+ )
+ )
+
return ip_addr
else:
- self.logger.debug("get_ipaddr_from_NSXedge: "
- "Error occurred while getting DHCP lease info from NSX Manager: {}"
- .format(resp.content))
+ self.logger.debug(
+ "get_ipaddr_from_NSXedge: "
+ "Error occurred while getting DHCP lease info from NSX Manager: {}".format(
+ resp.content
+ )
+ )
+
+ self.logger.debug(
+ "get_ipaddr_from_NSXedge: No IP addr found in any NSX edge"
+ )
- self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
return None
except XmlElementTree.ParseError as Err:
- self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
+ self.logger.debug(
+ "ParseError in response from NSX Manager {}".format(Err.message),
+ exc_info=True,
+ )
def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
"""Send and action over a VM instance from VIM
Returns the vm_id if the action was successfully sent to the VIM"""
- self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
+ self.logger.debug(
+ "Received action for vm {} and action dict {}".format(
+ vm__vim_uuid, action_dict
+ )
+ )
+
if vm__vim_uuid is None or action_dict is None:
raise vimconn.VimConnException("Invalid request. VM id or action is None.")
_, vdc = self.get_vdc_details()
if vdc is None:
- raise vimconn.VimConnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+ raise vimconn.VimConnException(
+ "Failed to get a reference of VDC for a tenant {}".format(
+ self.tenant_name
+ )
+ )
vapp_name = self.get_namebyvappid(vm__vim_uuid)
if vapp_name is None:
- self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
- raise vimconn.VimConnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+ self.logger.debug(
+ "action_vminstance(): Failed to get vm by given {} vm uuid".format(
+ vm__vim_uuid
+ )
+ )
+
+ raise vimconn.VimConnException(
+ "Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+ )
else:
- self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
+ self.logger.info(
+ "Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid)
+ )
try:
- vdc_obj = VDC(self.client, href=vdc.get('href'))
+ vdc_obj = VDC(self.client, href=vdc.get("href"))
vapp_resource = vdc_obj.get_vapp(vapp_name)
vapp = VApp(self.client, resource=vapp_resource)
+
if "start" in action_dict:
- self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
+ self.logger.info(
+ "action_vminstance: Power on vApp: {}".format(vapp_name)
+ )
poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
- result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
+ result = self.client.get_task_monitor().wait_for_success(
+ task=poweron_task
+ )
self.instance_actions_result("start", result, vapp_name)
elif "rebuild" in action_dict:
- self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
+ self.logger.info(
+ "action_vminstance: Rebuild vApp: {}".format(vapp_name)
+ )
rebuild_task = vapp.deploy(power_on=True)
- result = self.client.get_task_monitor().wait_for_success(task=rebuild_task)
+ result = self.client.get_task_monitor().wait_for_success(
+ task=rebuild_task
+ )
self.instance_actions_result("rebuild", result, vapp_name)
elif "pause" in action_dict:
self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
- pause_task = vapp.undeploy(action='suspend')
- result = self.client.get_task_monitor().wait_for_success(task=pause_task)
+ pause_task = vapp.undeploy(action="suspend")
+ result = self.client.get_task_monitor().wait_for_success(
+ task=pause_task
+ )
self.instance_actions_result("pause", result, vapp_name)
elif "resume" in action_dict:
self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
- result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
+ result = self.client.get_task_monitor().wait_for_success(
+ task=poweron_task
+ )
self.instance_actions_result("resume", result, vapp_name)
elif "shutoff" in action_dict or "shutdown" in action_dict:
action_name, _ = list(action_dict.items())[0]
- self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
+ self.logger.info(
+ "action_vminstance: {} vApp: {}".format(action_name, vapp_name)
+ )
shutdown_task = vapp.shutdown()
- result = self.client.get_task_monitor().wait_for_success(task=shutdown_task)
+ result = self.client.get_task_monitor().wait_for_success(
+ task=shutdown_task
+ )
if action_name == "shutdown":
self.instance_actions_result("shutdown", result, vapp_name)
else:
self.instance_actions_result("shutoff", result, vapp_name)
elif "forceOff" in action_dict:
- result = vapp.undeploy(action='powerOff')
+ result = vapp.undeploy(action="powerOff")
self.instance_actions_result("forceOff", result, vapp_name)
elif "reboot" in action_dict:
self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
self.client.get_task_monitor().wait_for_success(task=reboot_task)
else:
raise vimconn.VimConnException(
- "action_vminstance: Invalid action {} or action is None.".format(action_dict))
+ "action_vminstance: Invalid action {} or action is None.".format(
+ action_dict
+ )
+ )
+
return vm__vim_uuid
except Exception as exp:
self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
- raise vimconn.VimConnException("action_vminstance: Failed with Exception {}".format(exp))
+
+ raise vimconn.VimConnException(
+ "action_vminstance: Failed with Exception {}".format(exp)
+ )
def instance_actions_result(self, action, result, vapp_name):
- if result.get('status') == 'success':
- self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
+ if result.get("status") == "success":
+ self.logger.info(
+ "action_vminstance: Sucessfully {} the vApp: {}".format(
+ action, vapp_name
+ )
+ )
else:
- self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
+ self.logger.error(
+ "action_vminstance: Failed to {} vApp: {}".format(action, vapp_name)
+ )
def get_vminstance_console(self, vm_id, console_type="novnc"):
"""
"""
console_dict = {}
- if console_type is None or console_type == 'novnc':
-
- url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(self.url, vm_id)
-
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='POST',
- url=url_rest_call,
- headers=headers)
+ if console_type is None or console_type == "novnc":
+ url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(
+ self.url, vm_id
+ )
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="POST", url=url_rest_call, headers=headers
+ )
if response.status_code == 403:
- response = self.retry_rest('GET', url_rest_call)
+ response = self.retry_rest("GET", url_rest_call)
if response.status_code != 200:
- self.logger.error("REST call {} failed reason : {}"
- "status code : {}".format(url_rest_call,
- response.text,
- response.status_code))
- raise vimconn.VimConnException("get_vminstance_console : Failed to get "
- "VM Mks ticket details")
+ self.logger.error(
+ "REST call {} failed reason : {}"
+ "status code : {}".format(
+ url_rest_call, response.text, response.status_code
+ )
+ )
+ raise vimconn.VimConnException(
+ "get_vminstance_console : Failed to get " "VM Mks ticket details"
+ )
+
s = re.search("<Host>(.*?)</Host>", response.text)
- console_dict['server'] = s.group(1) if s else None
+ console_dict["server"] = s.group(1) if s else None
s1 = re.search("<Port>(\d+)</Port>", response.text)
- console_dict['port'] = s1.group(1) if s1 else None
-
- url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(self.url, vm_id)
-
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='POST',
- url=url_rest_call,
- headers=headers)
+ console_dict["port"] = s1.group(1) if s1 else None
+ url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(
+ self.url, vm_id
+ )
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="POST", url=url_rest_call, headers=headers
+ )
if response.status_code == 403:
- response = self.retry_rest('GET', url_rest_call)
+ response = self.retry_rest("GET", url_rest_call)
if response.status_code != 200:
- self.logger.error("REST call {} failed reason : {}"
- "status code : {}".format(url_rest_call,
- response.text,
- response.status_code))
- raise vimconn.VimConnException("get_vminstance_console : Failed to get "
- "VM console details")
+ self.logger.error(
+ "REST call {} failed reason : {}"
+ "status code : {}".format(
+ url_rest_call, response.text, response.status_code
+ )
+ )
+ raise vimconn.VimConnException(
+ "get_vminstance_console : Failed to get " "VM console details"
+ )
+
s = re.search(">.*?/(vm-\d+.*)</", response.text)
- console_dict['suffix'] = s.group(1) if s else None
- console_dict['protocol'] = "https"
+ console_dict["suffix"] = s.group(1) if s else None
+ console_dict["protocol"] = "https"
return console_dict
def new_host(self, host_data):
"""Adds a new host to VIM"""
- '''Returns status code of the VIM response'''
+ """Returns status code of the VIM response"""
raise vimconn.VimConnNotImplemented("Should have implemented this")
def new_external_port(self, port_data):
"""Adds a external port to VIM"""
- '''Returns the port identifier'''
+ """Returns the port identifier"""
raise vimconn.VimConnNotImplemented("Should have implemented this")
def new_external_network(self, net_name, net_type):
"""Adds a external network to VIM (shared)"""
- '''Returns the network identifier'''
+ """Returns the network identifier"""
raise vimconn.VimConnNotImplemented("Should have implemented this")
def connect_port_network(self, port_id, network_id, admin=False):
"""Connects a external port to a network"""
- '''Returns status code of the VIM response'''
+ """Returns status code of the VIM response"""
raise vimconn.VimConnNotImplemented("Should have implemented this")
def new_vminstancefromJSON(self, vm_data):
"""Adds a VM instance to VIM"""
- '''Returns the instance identifier'''
+ """Returns the instance identifier"""
raise vimconn.VimConnNotImplemented("Should have implemented this")
def get_network_name_by_id(self, network_uuid=None):
try:
org_dict = self.get_org(self.org_uuid)
- if 'networks' in org_dict:
- org_network_dict = org_dict['networks']
+ if "networks" in org_dict:
+ org_network_dict = org_dict["networks"]
+
for net_uuid in org_network_dict:
if net_uuid == network_uuid:
return org_network_dict[net_uuid]
try:
org_dict = self.get_org(self.org_uuid)
- if org_dict and 'networks' in org_dict:
- org_network_dict = org_dict['networks']
+ if org_dict and "networks" in org_dict:
+ org_network_dict = org_dict["networks"]
+
for net_uuid, net_name in org_network_dict.items():
if net_name == network_name:
return net_uuid
return None
def get_physical_network_by_name(self, physical_network_name):
- '''
+ """
Methos returns uuid of physical network which passed
Args:
physical_network_name: physical network name
Returns:
UUID of physical_network_name
- '''
+ """
try:
client_as_admin = self.connect_as_admin()
+
if not client_as_admin:
raise vimconn.VimConnConnectionException("Failed to connect vCD.")
- url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
- vm_list_rest_call = ''.join(url_list)
-
- if client_as_admin._session:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='GET',
- url=vm_list_rest_call,
- headers=headers)
+ url_list = [self.url, "/api/admin/vdc/", self.tenant_id]
+ vm_list_rest_call = "".join(url_list)
+ if client_as_admin._session:
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": client_as_admin._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url=vm_list_rest_call, headers=headers
+ )
provider_network = None
available_network = None
# add_vdc_rest_url = None
if response.status_code != requests.codes.ok:
- self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
- response.status_code))
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ vm_list_rest_call, response.status_code
+ )
+ )
return None
else:
try:
vm_list_xmlroot = XmlElementTree.fromstring(response.text)
for child in vm_list_xmlroot:
-
- if child.tag.split("}")[1] == 'ProviderVdcReference':
- provider_network = child.attrib.get('href')
+ if child.tag.split("}")[1] == "ProviderVdcReference":
+ provider_network = child.attrib.get("href")
# application/vnd.vmware.admin.providervdc+xml
- if child.tag.split("}")[1] == 'Link':
- if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
- and child.attrib.get('rel') == 'add':
- child.attrib.get('href')
+
+ if child.tag.split("}")[1] == "Link":
+ if (
+ child.attrib.get("type")
+ == "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
+ and child.attrib.get("rel") == "add"
+ ):
+ child.attrib.get("href")
except Exception:
- self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+ self.logger.debug(
+ "Failed parse respond for rest api call {}".format(
+ vm_list_rest_call
+ )
+ )
self.logger.debug("Respond body {}".format(response.text))
+
return None
# find pvdc provided available network
- response = self.perform_request(req_type='GET',
- url=provider_network,
- headers=headers)
+ response = self.perform_request(
+ req_type="GET", url=provider_network, headers=headers
+ )
if response.status_code != requests.codes.ok:
- self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
- response.status_code))
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ vm_list_rest_call, response.status_code
+ )
+ )
+
return None
try:
vm_list_xmlroot = XmlElementTree.fromstring(response.text)
for child in vm_list_xmlroot.iter():
- if child.tag.split("}")[1] == 'AvailableNetworks':
+ if child.tag.split("}")[1] == "AvailableNetworks":
for networks in child.iter():
- if networks.attrib.get('href') is not None and networks.attrib.get('name') is not None:
- if networks.attrib.get('name') == physical_network_name:
- network_url = networks.attrib.get('href')
- available_network = network_url[network_url.rindex('/') + 1:]
+ if (
+ networks.attrib.get("href") is not None
+ and networks.attrib.get("name") is not None
+ ):
+ if (
+ networks.attrib.get("name")
+ == physical_network_name
+ ):
+ network_url = networks.attrib.get("href")
+ available_network = network_url[
+ network_url.rindex("/") + 1 :
+ ]
break
- except Exception as e:
+ except Exception:
return None
return available_network
Returns:
The return XML respond
"""
- url_list = [self.url, '/api/org']
- vm_list_rest_call = ''.join(url_list)
+ url_list = [self.url, "/api/org"]
+ vm_list_rest_call = "".join(url_list)
if self.client._session:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-
- response = self.perform_request(req_type='GET',
- url=vm_list_rest_call,
- headers=headers)
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+
+ response = self.perform_request(
+ req_type="GET", url=vm_list_rest_call, headers=headers
+ )
if response.status_code == 403:
- response = self.retry_rest('GET', vm_list_rest_call)
+ response = self.retry_rest("GET", vm_list_rest_call)
if response.status_code == requests.codes.ok:
return response.text
if org_uuid is None:
return None
- url_list = [self.url, '/api/org/', org_uuid]
- vm_list_rest_call = ''.join(url_list)
+ url_list = [self.url, "/api/org/", org_uuid]
+ vm_list_rest_call = "".join(url_list)
if self.client._session:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
# response = requests.get(vm_list_rest_call, headers=headers, verify=False)
- response = self.perform_request(req_type='GET',
- url=vm_list_rest_call,
- headers=headers)
+ response = self.perform_request(
+ req_type="GET", url=vm_list_rest_call, headers=headers
+ )
+
if response.status_code == 403:
- response = self.retry_rest('GET', vm_list_rest_call)
+ response = self.retry_rest("GET", vm_list_rest_call)
if response.status_code == requests.codes.ok:
return response.text
+
return None
def get_org(self, org_uuid=None):
catalog_list = {}
vm_list_xmlroot = XmlElementTree.fromstring(content)
for child in vm_list_xmlroot:
- if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
- vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
- org_dict['vdcs'] = vdc_list
- if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
- network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
- org_dict['networks'] = network_list
- if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
- catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
- org_dict['catalogs'] = catalog_list
+ if child.attrib["type"] == "application/vnd.vmware.vcloud.vdc+xml":
+ vdc_list[child.attrib["href"].split("/")[-1:][0]] = child.attrib[
+ "name"
+ ]
+ org_dict["vdcs"] = vdc_list
+
+ if (
+ child.attrib["type"]
+ == "application/vnd.vmware.vcloud.orgNetwork+xml"
+ ):
+ network_list[
+ child.attrib["href"].split("/")[-1:][0]
+ ] = child.attrib["name"]
+ org_dict["networks"] = network_list
+
+ if child.attrib["type"] == "application/vnd.vmware.vcloud.catalog+xml":
+ catalog_list[
+ child.attrib["href"].split("/")[-1:][0]
+ ] = child.attrib["name"]
+ org_dict["catalogs"] = catalog_list
except Exception:
pass
Returns:
The return dictionary and key for each entry VDC UUID
"""
-
org_dict = {}
content = self.list_org_action()
try:
vm_list_xmlroot = XmlElementTree.fromstring(content)
+
for vm_xml in vm_list_xmlroot:
- if vm_xml.tag.split("}")[1] == 'Org':
- org_uuid = vm_xml.attrib['href'].split('/')[-1:]
- org_dict[org_uuid[0]] = vm_xml.attrib['name']
+ if vm_xml.tag.split("}")[1] == "Org":
+ org_uuid = vm_xml.attrib["href"].split("/")[-1:]
+ org_dict[org_uuid[0]] = vm_xml.attrib["name"]
except Exception:
pass
return org_dict
def vms_view_action(self, vdc_name=None):
- """ Method leverages vCloud director vms query call
+ """Method leverages vCloud director vms query call
Args:
vca - is active VCA connection.
if vdc_name is None:
return None
- url_list = [vca.host, '/api/vms/query']
- vm_list_rest_call = ''.join(url_list)
+ url_list = [vca.host, "/api/vms/query"]
+ vm_list_rest_call = "".join(url_list)
if not (not vca.vcloud_session or not vca.vcloud_session.organization):
- refs = [ref for ref in vca.vcloud_session.organization.Link if ref.name == vdc_name and
- ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml']
+ refs = [
+ ref
+ for ref in vca.vcloud_session.organization.Link
+ if ref.name == vdc_name
+ and ref.type_ == "application/vnd.vmware.vcloud.vdc+xml"
+ ]
+
if len(refs) == 1:
- response = Http.get(url=vm_list_rest_call,
- headers=vca.vcloud_session.get_vcloud_headers(),
- verify=vca.verify,
- logger=vca.logger)
+ response = self.perform_request(
+ req_type="GET",
+ url=vm_list_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger,
+ )
+
if response.status_code == requests.codes.ok:
return response.text
Returns:
The return dictionary and key for each entry vapp UUID
"""
-
vapp_dict = {}
+
if vdc_name is None:
return vapp_dict
try:
vm_list_xmlroot = XmlElementTree.fromstring(content)
for vm_xml in vm_list_xmlroot:
- if vm_xml.tag.split("}")[1] == 'VMRecord':
- if vm_xml.attrib['isVAppTemplate'] == 'true':
- rawuuid = vm_xml.attrib['container'].split('/')[-1:]
- if 'vappTemplate-' in rawuuid[0]:
+ if vm_xml.tag.split("}")[1] == "VMRecord":
+ if vm_xml.attrib["isVAppTemplate"] == "true":
+ rawuuid = vm_xml.attrib["container"].split("/")[-1:]
+ if "vappTemplate-" in rawuuid[0]:
# vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
# vm and use raw UUID as key
vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
try:
vm_list_xmlroot = XmlElementTree.fromstring(content)
for vm_xml in vm_list_xmlroot:
- if vm_xml.tag.split("}")[1] == 'VMRecord':
- if vm_xml.attrib['isVAppTemplate'] == 'false':
- rawuuid = vm_xml.attrib['href'].split('/')[-1:]
- if 'vm-' in rawuuid[0]:
+ if vm_xml.tag.split("}")[1] == "VMRecord":
+ if vm_xml.attrib["isVAppTemplate"] == "false":
+ rawuuid = vm_xml.attrib["href"].split("/")[-1:]
+ if "vm-" in rawuuid[0]:
# vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
# vm and use raw UUID as key
vm_dict[rawuuid[0][3:]] = vm_xml.attrib
"""
vm_dict = {}
vca = self.connect()
+
if not vca:
raise vimconn.VimConnConnectionException("self.connect() is failed")
try:
vm_list_xmlroot = XmlElementTree.fromstring(content)
for vm_xml in vm_list_xmlroot:
- if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
+ if (
+ vm_xml.tag.split("}")[1] == "VMRecord"
+ and vm_xml.attrib["isVAppTemplate"] == "false"
+ ):
# lookup done by UUID
if isuuid:
- if vapp_name in vm_xml.attrib['container']:
- rawuuid = vm_xml.attrib['href'].split('/')[-1:]
- if 'vm-' in rawuuid[0]:
+ if vapp_name in vm_xml.attrib["container"]:
+ rawuuid = vm_xml.attrib["href"].split("/")[-1:]
+ if "vm-" in rawuuid[0]:
vm_dict[rawuuid[0][3:]] = vm_xml.attrib
break
# lookup done by Name
else:
- if vapp_name in vm_xml.attrib['name']:
- rawuuid = vm_xml.attrib['href'].split('/')[-1:]
- if 'vm-' in rawuuid[0]:
+ if vapp_name in vm_xml.attrib["name"]:
+ rawuuid = vm_xml.attrib["href"].split("/")[-1:]
+ if "vm-" in rawuuid[0]:
vm_dict[rawuuid[0][3:]] = vm_xml.attrib
break
except Exception:
Returns:
The return XML respond
"""
-
if network_uuid is None:
return None
- url_list = [self.url, '/api/network/', network_uuid]
- vm_list_rest_call = ''.join(url_list)
+ url_list = [self.url, "/api/network/", network_uuid]
+ vm_list_rest_call = "".join(url_list)
if self.client._session:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url=vm_list_rest_call, headers=headers
+ )
- response = self.perform_request(req_type='GET',
- url=vm_list_rest_call,
- headers=headers)
# Retry login if session expired & retry sending request
if response.status_code == 403:
- response = self.retry_rest('GET', vm_list_rest_call)
+ response = self.retry_rest("GET", vm_list_rest_call)
if response.status_code == requests.codes.ok:
return response.text
Returns:
The return dictionary and key for each entry vapp UUID
"""
-
network_configuration = {}
+
if network_uuid is None:
return network_uuid
content = self.get_network_action(network_uuid=network_uuid)
if content is not None:
vm_list_xmlroot = XmlElementTree.fromstring(content)
-
- network_configuration['status'] = vm_list_xmlroot.get("status")
- network_configuration['name'] = vm_list_xmlroot.get("name")
- network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
+ network_configuration["status"] = vm_list_xmlroot.get("status")
+ network_configuration["name"] = vm_list_xmlroot.get("name")
+ network_configuration["uuid"] = vm_list_xmlroot.get("id").split(":")[3]
for child in vm_list_xmlroot:
- if child.tag.split("}")[1] == 'IsShared':
- network_configuration['isShared'] = child.text.strip()
- if child.tag.split("}")[1] == 'Configuration':
+ if child.tag.split("}")[1] == "IsShared":
+ network_configuration["isShared"] = child.text.strip()
+
+ if child.tag.split("}")[1] == "Configuration":
for configuration in child.iter():
tagKey = configuration.tag.split("}")[1].strip()
if tagKey != "":
- network_configuration[tagKey] = configuration.text.strip()
+ network_configuration[
+ tagKey
+ ] = configuration.text.strip()
except Exception as exp:
self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
- raise vimconn.VimConnException("get_vcd_network: Failed with Exception {}".format(exp))
+
+ raise vimconn.VimConnException(
+ "get_vcd_network: Failed with Exception {}".format(exp)
+ )
return network_configuration
The return None or XML respond or false
"""
client = self.connect_as_admin()
+
if not client:
raise vimconn.VimConnConnectionException("Failed to connect vCD as admin")
+
if network_uuid is None:
return False
- url_list = [self.url, '/api/admin/network/', network_uuid]
- vm_list_rest_call = ''.join(url_list)
+ url_list = [self.url, "/api/admin/network/", network_uuid]
+ vm_list_rest_call = "".join(url_list)
if client._session:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='DELETE',
- url=vm_list_rest_call,
- headers=headers)
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="DELETE", url=vm_list_rest_call, headers=headers
+ )
+
if response.status_code == 202:
return True
return False
- def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
- ip_profile=None, isshared='true'):
+ def create_network(
+ self,
+ network_name=None,
+ net_type="bridge",
+ parent_network_uuid=None,
+ ip_profile=None,
+ isshared="true",
+ ):
"""
Method create network in vCloud director
Returns:
The return network uuid or return None
"""
+ new_network_name = [network_name, "-", str(uuid.uuid4())]
+ content = self.create_network_rest(
+ network_name="".join(new_network_name),
+ ip_profile=ip_profile,
+ net_type=net_type,
+ parent_network_uuid=parent_network_uuid,
+ isshared=isshared,
+ )
- new_network_name = [network_name, '-', str(uuid.uuid4())]
- content = self.create_network_rest(network_name=''.join(new_network_name),
- ip_profile=ip_profile,
- net_type=net_type,
- parent_network_uuid=parent_network_uuid,
- isshared=isshared)
if content is None:
self.logger.debug("Failed create network {}.".format(network_name))
+
return None
try:
vm_list_xmlroot = XmlElementTree.fromstring(content)
- vcd_uuid = vm_list_xmlroot.get('id').split(":")
+ vcd_uuid = vm_list_xmlroot.get("id").split(":")
if len(vcd_uuid) == 4:
- self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
+ self.logger.info(
+ "Created new network name: {} uuid: {}".format(
+ network_name, vcd_uuid[3]
+ )
+ )
+
return vcd_uuid[3]
except Exception:
self.logger.debug("Failed create network {}".format(network_name))
+
return None
- def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
- ip_profile=None, isshared='true'):
+ def create_network_rest(
+ self,
+ network_name=None,
+ net_type="bridge",
+ parent_network_uuid=None,
+ ip_profile=None,
+ isshared="true",
+ ):
"""
Method create network in vCloud director
The return network uuid or return None
"""
client_as_admin = self.connect_as_admin()
+
if not client_as_admin:
raise vimconn.VimConnConnectionException("Failed to connect vCD.")
+
if network_name is None:
return None
- url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
- vm_list_rest_call = ''.join(url_list)
+ url_list = [self.url, "/api/admin/vdc/", self.tenant_id]
+ vm_list_rest_call = "".join(url_list)
if client_as_admin._session:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
-
- response = self.perform_request(req_type='GET',
- url=vm_list_rest_call,
- headers=headers)
-
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": client_as_admin._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url=vm_list_rest_call, headers=headers
+ )
provider_network = None
available_networks = None
add_vdc_rest_url = None
if response.status_code != requests.codes.ok:
- self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
- response.status_code))
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ vm_list_rest_call, response.status_code
+ )
+ )
+
return None
else:
try:
vm_list_xmlroot = XmlElementTree.fromstring(response.text)
for child in vm_list_xmlroot:
-
- if child.tag.split("}")[1] == 'ProviderVdcReference':
- provider_network = child.attrib.get('href')
+ if child.tag.split("}")[1] == "ProviderVdcReference":
+ provider_network = child.attrib.get("href")
# application/vnd.vmware.admin.providervdc+xml
- if child.tag.split("}")[1] == 'Link':
- if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
- and child.attrib.get('rel') == 'add':
- add_vdc_rest_url = child.attrib.get('href')
+
+ if child.tag.split("}")[1] == "Link":
+ if (
+ child.attrib.get("type")
+ == "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
+ and child.attrib.get("rel") == "add"
+ ):
+ add_vdc_rest_url = child.attrib.get("href")
except Exception:
- self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+ self.logger.debug(
+ "Failed parse respond for rest api call {}".format(
+ vm_list_rest_call
+ )
+ )
self.logger.debug("Respond body {}".format(response.text))
+
return None
# find pvdc provided available network
- response = self.perform_request(req_type='GET',
- url=provider_network,
- headers=headers)
+ response = self.perform_request(
+ req_type="GET", url=provider_network, headers=headers
+ )
if response.status_code != requests.codes.ok:
- self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
- response.status_code))
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ vm_list_rest_call, response.status_code
+ )
+ )
+
return None
if parent_network_uuid is None:
try:
vm_list_xmlroot = XmlElementTree.fromstring(response.text)
for child in vm_list_xmlroot.iter():
- if child.tag.split("}")[1] == 'AvailableNetworks':
+ if child.tag.split("}")[1] == "AvailableNetworks":
for networks in child.iter():
# application/vnd.vmware.admin.network+xml
- if networks.attrib.get('href') is not None:
- available_networks = networks.attrib.get('href')
+ if networks.attrib.get("href") is not None:
+ available_networks = networks.attrib.get("href")
break
except Exception:
return None
try:
# Configure IP profile of the network
- ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
+ ip_profile = (
+ ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
+ )
- if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
+ if (
+ "subnet_address" not in ip_profile
+ or ip_profile["subnet_address"] is None
+ ):
subnet_rand = random.randint(0, 255)
ip_base = "192.168.{}.".format(subnet_rand)
- ip_profile['subnet_address'] = ip_base + "0/24"
+ ip_profile["subnet_address"] = ip_base + "0/24"
else:
- ip_base = ip_profile['subnet_address'].rsplit('.', 1)[0] + '.'
-
- if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
- ip_profile['gateway_address'] = ip_base + "1"
- if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
- ip_profile['dhcp_count'] = DEFAULT_IP_PROFILE['dhcp_count']
- if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
- ip_profile['dhcp_enabled'] = DEFAULT_IP_PROFILE['dhcp_enabled']
- if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
- ip_profile['dhcp_start_address'] = ip_base + "3"
- if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
- ip_profile['ip_version'] = DEFAULT_IP_PROFILE['ip_version']
- if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
- ip_profile['dns_address'] = ip_base + "2"
-
- gateway_address = ip_profile['gateway_address']
- dhcp_count = int(ip_profile['dhcp_count'])
- subnet_address = self.convert_cidr_to_netmask(ip_profile['subnet_address'])
-
- if ip_profile['dhcp_enabled'] is True:
- dhcp_enabled = 'true'
+ ip_base = ip_profile["subnet_address"].rsplit(".", 1)[0] + "."
+
+ if (
+ "gateway_address" not in ip_profile
+ or ip_profile["gateway_address"] is None
+ ):
+ ip_profile["gateway_address"] = ip_base + "1"
+
+ if "dhcp_count" not in ip_profile or ip_profile["dhcp_count"] is None:
+ ip_profile["dhcp_count"] = DEFAULT_IP_PROFILE["dhcp_count"]
+
+ if (
+ "dhcp_enabled" not in ip_profile
+ or ip_profile["dhcp_enabled"] is None
+ ):
+ ip_profile["dhcp_enabled"] = DEFAULT_IP_PROFILE["dhcp_enabled"]
+
+ if (
+ "dhcp_start_address" not in ip_profile
+ or ip_profile["dhcp_start_address"] is None
+ ):
+ ip_profile["dhcp_start_address"] = ip_base + "3"
+
+ if "ip_version" not in ip_profile or ip_profile["ip_version"] is None:
+ ip_profile["ip_version"] = DEFAULT_IP_PROFILE["ip_version"]
+
+ if "dns_address" not in ip_profile or ip_profile["dns_address"] is None:
+ ip_profile["dns_address"] = ip_base + "2"
+
+ gateway_address = ip_profile["gateway_address"]
+ dhcp_count = int(ip_profile["dhcp_count"])
+ subnet_address = self.convert_cidr_to_netmask(
+ ip_profile["subnet_address"]
+ )
+
+ if ip_profile["dhcp_enabled"] is True:
+ dhcp_enabled = "true"
else:
- dhcp_enabled = 'false'
- dhcp_start_address = ip_profile['dhcp_start_address']
+ dhcp_enabled = "false"
+
+ dhcp_start_address = ip_profile["dhcp_start_address"]
# derive dhcp_end_address from dhcp_start_address & dhcp_count
end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
# ip_version = ip_profile['ip_version']
- dns_address = ip_profile['dns_address']
+ dns_address = ip_profile["dns_address"]
except KeyError as exp:
self.logger.debug("Create Network REST: Key error {}".format(exp))
- raise vimconn.VimConnException("Create Network REST: Key error{}".format(exp))
+
+ raise vimconn.VimConnException(
+ "Create Network REST: Key error{}".format(exp)
+ )
# either use client provided UUID or search for a first available
# if both are not defined we return none
provider_network = None
available_networks = None
add_vdc_rest_url = None
-
- url_list = [self.url, '/api/admin/vdc/', self.tenant_id, '/networks']
- add_vdc_rest_url = ''.join(url_list)
-
- url_list = [self.url, '/api/admin/network/', parent_network_uuid]
- available_networks = ''.join(url_list)
+ url_list = [self.url, "/api/admin/vdc/", self.tenant_id, "/networks"]
+ add_vdc_rest_url = "".join(url_list)
+ url_list = [self.url, "/api/admin/network/", parent_network_uuid]
+ available_networks = "".join(url_list)
# Creating all networks as Direct Org VDC type networks.
# Unused in case of Underlay (data/ptp) network interface.
fence_mode = "isolated"
- is_inherited = 'false'
+ is_inherited = "false"
dns_list = dns_address.split(";")
dns1 = dns_list[0]
dns2_text = ""
+
if len(dns_list) >= 2:
- dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
+ dns2_text = "\n <Dns2>{}</Dns2>\n".format(
+ dns_list[1]
+ )
+
if net_type == "isolated":
fence_mode = "isolated"
data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
<FenceMode>{9:s}</FenceMode>
</Configuration>
<IsShared>{10:s}</IsShared>
- </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
- subnet_address, dns1, dns2_text, dhcp_enabled,
- dhcp_start_address, dhcp_end_address,
- fence_mode, isshared)
+ </OrgVdcNetwork> """.format(
+ escape(network_name),
+ is_inherited,
+ gateway_address,
+ subnet_address,
+ dns1,
+ dns2_text,
+ dhcp_enabled,
+ dhcp_start_address,
+ dhcp_end_address,
+ fence_mode,
+ isshared,
+ )
else:
fence_mode = "bridged"
data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
<FenceMode>{10:s}</FenceMode>
</Configuration>
<IsShared>{11:s}</IsShared>
- </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
- subnet_address, dns1, dns2_text, dhcp_enabled,
- dhcp_start_address, dhcp_end_address, available_networks,
- fence_mode, isshared)
+ </OrgVdcNetwork> """.format(
+ escape(network_name),
+ is_inherited,
+ gateway_address,
+ subnet_address,
+ dns1,
+ dns2_text,
+ dhcp_enabled,
+ dhcp_start_address,
+ dhcp_end_address,
+ available_networks,
+ fence_mode,
+ isshared,
+ )
- headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
+ headers["Content-Type"] = "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
try:
- response = self.perform_request(req_type='POST',
- url=add_vdc_rest_url,
- headers=headers,
- data=data)
+ response = self.perform_request(
+ req_type="POST", url=add_vdc_rest_url, headers=headers, data=data
+ )
if response.status_code != 201:
- self.logger.debug("Create Network POST REST API call failed. "
- "Return status code {}, response.text: {}"
- .format(response.status_code, response.text))
+ self.logger.debug(
+ "Create Network POST REST API call failed. "
+ "Return status code {}, response.text: {}".format(
+ response.status_code, response.text
+ )
+ )
else:
network_task = self.get_task_from_response(response.text)
- self.logger.debug("Create Network REST : Waiting for Network creation complete")
+ self.logger.debug(
+ "Create Network REST : Waiting for Network creation complete"
+ )
time.sleep(5)
- result = self.client.get_task_monitor().wait_for_success(task=network_task)
- if result.get('status') == 'success':
+ result = self.client.get_task_monitor().wait_for_success(
+ task=network_task
+ )
+
+ if result.get("status") == "success":
return response.text
else:
- self.logger.debug("create_network_rest task failed. Network Create response : {}"
- .format(response.text))
+ self.logger.debug(
+ "create_network_rest task failed. Network Create response : {}".format(
+ response.text
+ )
+ )
except Exception as exp:
self.logger.debug("create_network_rest : Exception : {} ".format(exp))
netmask : Converted netmask
"""
if cidr_ip is not None:
- if '/' in cidr_ip:
- _, net_bits = cidr_ip.split('/')
- netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
+ if "/" in cidr_ip:
+ _, net_bits = cidr_ip.split("/")
+ netmask = socket.inet_ntoa(
+ struct.pack(">I", (0xFFFFFFFF << (32 - int(net_bits))) & 0xFFFFFFFF)
+ )
else:
netmask = cidr_ip
+
return netmask
+
return None
def get_provider_rest(self, vca=None):
Returns:
The return xml content of respond or None
"""
+ url_list = [self.url, "/api/admin"]
- url_list = [self.url, '/api/admin']
if vca:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='GET',
- url=''.join(url_list),
- headers=headers)
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url="".join(url_list), headers=headers
+ )
if response.status_code == requests.codes.ok:
return response.text
+
return None
def create_vdc(self, vdc_name=None):
-
vdc_dict = {}
-
xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
+
if xml_content is not None:
try:
task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
for child in task_resp_xmlroot:
- if child.tag.split("}")[1] == 'Owner':
- vdc_id = child.attrib.get('href').split("/")[-1]
- vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
+ if child.tag.split("}")[1] == "Owner":
+ vdc_id = child.attrib.get("href").split("/")[-1]
+ vdc_dict[vdc_id] = task_resp_xmlroot.get("href")
+
return vdc_dict
except Exception:
self.logger.debug("Respond body {}".format(xml_content))
# pre-requesite atleast one vdc template should be available in vCD
self.logger.info("Creating new vdc {}".format(vdc_name))
vca = self.connect_as_admin()
+
if not vca:
raise vimconn.VimConnConnectionException("Failed to connect vCD")
+
if vdc_name is None:
return None
- url_list = [self.url, '/api/vdcTemplates']
- vm_list_rest_call = ''.join(url_list)
-
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='GET',
- url=vm_list_rest_call,
- headers=headers)
+ url_list = [self.url, "/api/vdcTemplates"]
+ vm_list_rest_call = "".join(url_list)
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": vca._session.headers["x-vcloud-authorization"],
+ }
+ response = self.perform_request(
+ req_type="GET", url=vm_list_rest_call, headers=headers
+ )
# container url to a template
vdc_template_ref = None
for child in vm_list_xmlroot:
# application/vnd.vmware.admin.providervdc+xml
# we need find a template from witch we instantiate VDC
- if child.tag.split("}")[1] == 'VdcTemplate':
- if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
- vdc_template_ref = child.attrib.get('href')
+ if child.tag.split("}")[1] == "VdcTemplate":
+ if (
+ child.attrib.get("type")
+ == "application/vnd.vmware.admin.vdcTemplate+xml"
+ ):
+ vdc_template_ref = child.attrib.get("href")
except Exception:
- self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+ self.logger.debug(
+ "Failed parse respond for rest api call {}".format(vm_list_rest_call)
+ )
self.logger.debug("Respond body {}".format(response.text))
+
return None
# if we didn't found required pre defined template we return None
try:
# instantiate vdc
- url_list = [self.url, '/api/org/', self.org_uuid, '/action/instantiate']
- vm_list_rest_call = ''.join(url_list)
+ url_list = [self.url, "/api/org/", self.org_uuid, "/action/instantiate"]
+ vm_list_rest_call = "".join(url_list)
data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
<Source href="{1:s}"></Source>
<Description>opnemano</Description>
- </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
-
- headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
-
- response = self.perform_request(req_type='POST',
- url=vm_list_rest_call,
- headers=headers,
- data=data)
-
+ </InstantiateVdcTemplateParams>""".format(
+ vdc_name, vdc_template_ref
+ )
+ headers[
+ "Content-Type"
+ ] = "application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml"
+ response = self.perform_request(
+ req_type="POST", url=vm_list_rest_call, headers=headers, data=data
+ )
vdc_task = self.get_task_from_response(response.text)
self.client.get_task_monitor().wait_for_success(task=vdc_task)
# if we all ok we respond with content otherwise by default None
if response.status_code >= 200 and response.status_code < 300:
return response.text
+
return None
except Exception:
- self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+ self.logger.debug(
+ "Failed parse respond for rest api call {}".format(vm_list_rest_call)
+ )
self.logger.debug("Respond body {}".format(response.text))
return None
Returns:
The return response
"""
-
self.logger.info("Creating new vdc {}".format(vdc_name))
-
vca = self.connect_as_admin()
+
if not vca:
raise vimconn.VimConnConnectionException("Failed to connect vCD")
+
if vdc_name is None:
return None
- url_list = [self.url, '/api/admin/org/', self.org_uuid]
- vm_list_rest_call = ''.join(url_list)
+ url_list = [self.url, "/api/admin/org/", self.org_uuid]
+ vm_list_rest_call = "".join(url_list)
if vca._session:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='GET',
- url=vm_list_rest_call,
- headers=headers)
-
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url=vm_list_rest_call, headers=headers
+ )
provider_vdc_ref = None
add_vdc_rest_url = None
# available_networks = None
if response.status_code != requests.codes.ok:
- self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
- response.status_code))
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ vm_list_rest_call, response.status_code
+ )
+ )
+
return None
else:
try:
vm_list_xmlroot = XmlElementTree.fromstring(response.text)
for child in vm_list_xmlroot:
# application/vnd.vmware.admin.providervdc+xml
- if child.tag.split("}")[1] == 'Link':
- if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
- and child.attrib.get('rel') == 'add':
- add_vdc_rest_url = child.attrib.get('href')
+ if child.tag.split("}")[1] == "Link":
+ if (
+ child.attrib.get("type")
+ == "application/vnd.vmware.admin.createVdcParams+xml"
+ and child.attrib.get("rel") == "add"
+ ):
+ add_vdc_rest_url = child.attrib.get("href")
except Exception:
- self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+ self.logger.debug(
+ "Failed parse respond for rest api call {}".format(
+ vm_list_rest_call
+ )
+ )
self.logger.debug("Respond body {}".format(response.text))
+
return None
response = self.get_provider_rest(vca=vca)
try:
vm_list_xmlroot = XmlElementTree.fromstring(response)
for child in vm_list_xmlroot:
- if child.tag.split("}")[1] == 'ProviderVdcReferences':
+ if child.tag.split("}")[1] == "ProviderVdcReferences":
for sub_child in child:
- provider_vdc_ref = sub_child.attrib.get('href')
+ provider_vdc_ref = sub_child.attrib.get("href")
except Exception:
- self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+ self.logger.debug(
+ "Failed parse respond for rest api call {}".format(
+ vm_list_rest_call
+ )
+ )
self.logger.debug("Respond body {}".format(response))
+
return None
if add_vdc_rest_url is not None and provider_vdc_ref is not None:
<ProviderVdcReference
name="Main Provider"
href="{2:s}" />
- <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
- escape(vdc_name),
- provider_vdc_ref)
-
- headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
-
- response = self.perform_request(req_type='POST',
- url=add_vdc_rest_url,
- headers=headers,
- data=data)
+ <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(
+ escape(vdc_name), escape(vdc_name), provider_vdc_ref
+ )
+ headers[
+ "Content-Type"
+ ] = "application/vnd.vmware.admin.createVdcParams+xml"
+ response = self.perform_request(
+ req_type="POST",
+ url=add_vdc_rest_url,
+ headers=headers,
+ data=data,
+ )
# if we all ok we respond with content otherwise by default None
if response.status_code == 201:
return response.text
+
return None
def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
Returns:
The return network uuid or return None
"""
-
parsed_respond = {}
vca = None
if vapp_uuid is None:
return None
- url_list = [self.url, '/api/vApp/vapp-', vapp_uuid]
- get_vapp_restcall = ''.join(url_list)
+ url_list = [self.url, "/api/vApp/vapp-", vapp_uuid]
+ get_vapp_restcall = "".join(url_list)
if vca._session:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='GET',
- url=get_vapp_restcall,
- headers=headers)
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": vca._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url=get_vapp_restcall, headers=headers
+ )
if response.status_code == 403:
if need_admin_access is False:
- response = self.retry_rest('GET', get_vapp_restcall)
+ response = self.retry_rest("GET", get_vapp_restcall)
if response.status_code != requests.codes.ok:
- self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
- response.status_code))
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ get_vapp_restcall, response.status_code
+ )
+ )
+
return parsed_respond
try:
xmlroot_respond = XmlElementTree.fromstring(response.text)
- parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
-
+ parsed_respond["ovfDescriptorUploaded"] = xmlroot_respond.attrib[
+ "ovfDescriptorUploaded"
+ ]
namespaces = {
"vssd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData",
- 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
- 'vmw': 'http://www.vmware.com/schema/ovf',
- 'vm': 'http://www.vmware.com/vcloud/v1.5',
- 'rasd': "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
+ "ovf": "http://schemas.dmtf.org/ovf/envelope/1",
+ "vmw": "http://www.vmware.com/schema/ovf",
+ "vm": "http://www.vmware.com/vcloud/v1.5",
+ "rasd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
"vmext": "http://www.vmware.com/vcloud/extension/v1.5",
- "xmlns": "http://www.vmware.com/vcloud/v1.5"
+ "xmlns": "http://www.vmware.com/vcloud/v1.5",
}
- created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
+ created_section = xmlroot_respond.find("vm:DateCreated", namespaces)
if created_section is not None:
- parsed_respond['created'] = created_section.text
-
- network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
- if network_section is not None and 'networkName' in network_section.attrib:
- parsed_respond['networkname'] = network_section.attrib['networkName']
+ parsed_respond["created"] = created_section.text
- ipscopes_section = \
- xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
- namespaces)
+ network_section = xmlroot_respond.find(
+ "vm:NetworkConfigSection/vm:NetworkConfig", namespaces
+ )
+ if (
+ network_section is not None
+ and "networkName" in network_section.attrib
+ ):
+ parsed_respond["networkname"] = network_section.attrib[
+ "networkName"
+ ]
+
+ ipscopes_section = xmlroot_respond.find(
+ "vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes",
+ namespaces,
+ )
if ipscopes_section is not None:
for ipscope in ipscopes_section:
for scope in ipscope:
tag_key = scope.tag.split("}")[1]
- if tag_key == 'IpRanges':
+ if tag_key == "IpRanges":
ip_ranges = scope.getchildren()
for ipblock in ip_ranges:
for block in ipblock:
- parsed_respond[block.tag.split("}")[1]] = block.text
+ parsed_respond[
+ block.tag.split("}")[1]
+ ] = block.text
else:
parsed_respond[tag_key] = scope.text
# parse children section for other attrib
- children_section = xmlroot_respond.find('vm:Children/', namespaces)
+ children_section = xmlroot_respond.find("vm:Children/", namespaces)
if children_section is not None:
- parsed_respond['name'] = children_section.attrib['name']
- parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
- if "nestedHypervisorEnabled" in children_section.attrib else None
- parsed_respond['deployed'] = children_section.attrib['deployed']
- parsed_respond['status'] = children_section.attrib['status']
- parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
- network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
+ parsed_respond["name"] = children_section.attrib["name"]
+ parsed_respond["nestedHypervisorEnabled"] = (
+ children_section.attrib["nestedHypervisorEnabled"]
+ if "nestedHypervisorEnabled" in children_section.attrib
+ else None
+ )
+ parsed_respond["deployed"] = children_section.attrib["deployed"]
+ parsed_respond["status"] = children_section.attrib["status"]
+ parsed_respond["vmuuid"] = children_section.attrib["id"].split(":")[
+ -1
+ ]
+ network_adapter = children_section.find(
+ "vm:NetworkConnectionSection", namespaces
+ )
nic_list = []
for adapters in network_adapter:
adapter_key = adapters.tag.split("}")[1]
- if adapter_key == 'PrimaryNetworkConnectionIndex':
- parsed_respond['primarynetwork'] = adapters.text
- if adapter_key == 'NetworkConnection':
+ if adapter_key == "PrimaryNetworkConnectionIndex":
+ parsed_respond["primarynetwork"] = adapters.text
+
+ if adapter_key == "NetworkConnection":
vnic = {}
- if 'network' in adapters.attrib:
- vnic['network'] = adapters.attrib['network']
+ if "network" in adapters.attrib:
+ vnic["network"] = adapters.attrib["network"]
for adapter in adapters:
setting_key = adapter.tag.split("}")[1]
vnic[setting_key] = adapter.text
nic_list.append(vnic)
for link in children_section:
- if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
- if link.attrib['rel'] == 'screen:acquireTicket':
- parsed_respond['acquireTicket'] = link.attrib
- if link.attrib['rel'] == 'screen:acquireMksTicket':
- parsed_respond['acquireMksTicket'] = link.attrib
-
- parsed_respond['interfaces'] = nic_list
- vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
+ if link.tag.split("}")[1] == "Link" and "rel" in link.attrib:
+ if link.attrib["rel"] == "screen:acquireTicket":
+ parsed_respond["acquireTicket"] = link.attrib
+
+ if link.attrib["rel"] == "screen:acquireMksTicket":
+ parsed_respond["acquireMksTicket"] = link.attrib
+
+ parsed_respond["interfaces"] = nic_list
+ vCloud_extension_section = children_section.find(
+ "xmlns:VCloudExtension", namespaces
+ )
if vCloud_extension_section is not None:
vm_vcenter_info = {}
- vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
- vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
+ vim_info = vCloud_extension_section.find(
+ "vmext:VmVimInfo", namespaces
+ )
+ vmext = vim_info.find("vmext:VmVimObjectRef", namespaces)
+
if vmext is not None:
- vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
+ vm_vcenter_info["vm_moref_id"] = vmext.find(
+ "vmext:MoRef", namespaces
+ ).text
+
parsed_respond["vm_vcenter_info"] = vm_vcenter_info
- virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
+ virtual_hardware_section = children_section.find(
+ "ovf:VirtualHardwareSection", namespaces
+ )
vm_virtual_hardware_info = {}
if virtual_hardware_section is not None:
- for item in virtual_hardware_section.iterfind('ovf:Item', namespaces):
- if item.find("rasd:Description", namespaces).text == "Hard disk":
+ for item in virtual_hardware_section.iterfind(
+ "ovf:Item", namespaces
+ ):
+ if (
+ item.find("rasd:Description", namespaces).text
+ == "Hard disk"
+ ):
disk_size = item.find(
- "rasd:HostResource", namespaces).attrib["{" + namespaces['vm'] + "}capacity"]
-
+ "rasd:HostResource", namespaces
+ ).attrib["{" + namespaces["vm"] + "}capacity"]
vm_virtual_hardware_info["disk_size"] = disk_size
break
for link in virtual_hardware_section:
- if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
- if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
- vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
+ if (
+ link.tag.split("}")[1] == "Link"
+ and "rel" in link.attrib
+ ):
+ if link.attrib["rel"] == "edit" and link.attrib[
+ "href"
+ ].endswith("/disks"):
+ vm_virtual_hardware_info[
+ "disk_edit_href"
+ ] = link.attrib["href"]
break
parsed_respond["vm_virtual_hardware"] = vm_virtual_hardware_info
except Exception as exp:
- self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
+ self.logger.info(
+ "Error occurred calling rest api for getting vApp details {}".format(
+ exp
+ )
+ )
+
return parsed_respond
def acquire_console(self, vm_uuid=None):
-
if vm_uuid is None:
return None
+
if self.client._session:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
- console_dict = vm_dict['acquireTicket']
- console_rest_call = console_dict['href']
+ console_dict = vm_dict["acquireTicket"]
+ console_rest_call = console_dict["href"]
- response = self.perform_request(req_type='POST',
- url=console_rest_call,
- headers=headers)
+ response = self.perform_request(
+ req_type="POST", url=console_rest_call, headers=headers
+ )
if response.status_code == 403:
- response = self.retry_rest('POST', console_rest_call)
+ response = self.retry_rest("POST", console_rest_call)
if response.status_code == requests.codes.ok:
return response.text
# Flavor disk is in GB convert it into MB
flavor_disk = int(flavor_disk) * 1024
vm_details = self.get_vapp_details_rest(vapp_uuid)
+
if vm_details:
vm_name = vm_details["name"]
self.logger.info("VM: {} flavor_disk :{}".format(vm_name, flavor_disk))
if vm_details and "vm_virtual_hardware" in vm_details:
vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
-
self.logger.info("VM: {} VM_disk :{}".format(vm_name, vm_disk))
if flavor_disk > vm_disk:
status = self.modify_vm_disk_rest(disk_edit_href, flavor_disk)
- self.logger.info("Modify disk of VM {} from {} to {} MB".format(
- vm_name,
- vm_disk, flavor_disk))
+ self.logger.info(
+ "Modify disk of VM {} from {} to {} MB".format(
+ vm_name, vm_disk, flavor_disk
+ )
+ )
else:
status = True
self.logger.info("No need to modify disk of VM {}".format(vm_name))
return None
if self.client._session:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='GET',
- url=disk_href,
- headers=headers)
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url=disk_href, headers=headers
+ )
if response.status_code == 403:
- response = self.retry_rest('GET', disk_href)
+ response = self.retry_rest("GET", disk_href)
if response.status_code != requests.codes.ok:
- self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
- response.status_code))
+ self.logger.debug(
+ "GET REST API call {} failed. Return status code {}".format(
+ disk_href, response.status_code
+ )
+ )
+
return None
+
try:
lxmlroot_respond = lxmlElementTree.fromstring(response.content)
- namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
+ namespaces = {
+ prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
+ }
namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
- for item in lxmlroot_respond.iterfind('xmlns:Item', namespaces):
+ for item in lxmlroot_respond.iterfind("xmlns:Item", namespaces):
if item.find("rasd:Description", namespaces).text == "Hard disk":
disk_item = item.find("rasd:HostResource", namespaces)
if disk_item is not None:
- disk_item.attrib["{" + namespaces['xmlns'] + "}capacity"] = str(disk_size)
+ disk_item.attrib["{" + namespaces["xmlns"] + "}capacity"] = str(
+ disk_size
+ )
break
- data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
- xml_declaration=True)
+ data = lxmlElementTree.tostring(
+ lxmlroot_respond, encoding="utf8", method="xml", xml_declaration=True
+ )
# Send PUT request to modify disk size
- headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
+ headers[
+ "Content-Type"
+ ] = "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
- response = self.perform_request(req_type='PUT',
- url=disk_href,
- headers=headers,
- data=data)
+ response = self.perform_request(
+ req_type="PUT", url=disk_href, headers=headers, data=data
+ )
if response.status_code == 403:
- add_headers = {'Content-Type': headers['Content-Type']}
- response = self.retry_rest('PUT', disk_href, add_headers, data)
+ add_headers = {"Content-Type": headers["Content-Type"]}
+ response = self.retry_rest("PUT", disk_href, add_headers, data)
if response.status_code != 202:
- self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
- response.status_code))
+ self.logger.debug(
+ "PUT REST API call {} failed. Return status code {}".format(
+ disk_href, response.status_code
+ )
+ )
else:
modify_disk_task = self.get_task_from_response(response.text)
- result = self.client.get_task_monitor().wait_for_success(task=modify_disk_task)
- if result.get('status') == 'success':
+ result = self.client.get_task_monitor().wait_for_success(
+ task=modify_disk_task
+ )
+ if result.get("status") == "success":
return True
else:
return False
- return None
+ return None
except Exception as exp:
- self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
- return None
+ self.logger.info(
+ "Error occurred calling rest api for modifing disk size {}".format(exp)
+ )
+
+ return None
def add_serial_device(self, vapp_uuid):
"""
- Method to attach a serial device to a VM
+ Method to attach a serial device to a VM
- Args:
- vapp_uuid - uuid of vApp/VM
+ Args:
+ vapp_uuid - uuid of vApp/VM
- Returns:
+ Returns:
"""
self.logger.info("Add serial devices into vApp {}".format(vapp_uuid))
_, content = self.get_vcenter_content()
vm_moref_id = self.get_vm_moref_id(vapp_uuid)
+
if vm_moref_id:
try:
host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
- self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
+ self.logger.info(
+ "VM {} is currently on host {}".format(vm_obj, host_obj)
+ )
if host_obj and vm_obj:
spec = vim.vm.ConfigSpec()
spec.deviceChange = []
serial_spec = vim.vm.device.VirtualDeviceSpec()
- serial_spec.operation = 'add'
+ serial_spec.operation = "add"
serial_port = vim.vm.device.VirtualSerialPort()
serial_port.yieldOnPoll = True
backing = serial_port.URIBackingInfo()
- backing.serviceURI = 'tcp://:65500'
- backing.direction = 'server'
+ backing.serviceURI = "tcp://:65500"
+ backing.direction = "server"
serial_port.backing = backing
serial_spec.device = serial_port
spec.deviceChange.append(serial_spec)
vm_obj.ReconfigVM_Task(spec=spec)
-
self.logger.info("Adding serial device to VM {}".format(vm_obj))
except vmodl.MethodFault as error:
self.logger.error("Error occurred while adding PCI devices {} ", error)
def add_pci_devices(self, vapp_uuid, pci_devices, vmname_andid):
"""
- Method to attach pci devices to VM
+ Method to attach pci devices to VM
- Args:
- vapp_uuid - uuid of vApp/VM
- pci_devices - pci devices infromation as specified in VNFD (flavor)
+ Args:
+ vapp_uuid - uuid of vApp/VM
+ pci_devices - pci devices infromation as specified in VNFD (flavor)
- Returns:
- The status of add pci device task , vm object and
- vcenter_conect object
+ Returns:
+ The status of add pci device task , vm object and
+ vcenter_conect object
"""
vm_obj = None
- self.logger.info("Add pci devices {} into vApp {}".format(pci_devices, vapp_uuid))
+ self.logger.info(
+ "Add pci devices {} into vApp {}".format(pci_devices, vapp_uuid)
+ )
vcenter_conect, content = self.get_vcenter_content()
vm_moref_id = self.get_vm_moref_id(vapp_uuid)
if no_of_pci_devices > 0:
# Get VM and its host
host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
- self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
+ self.logger.info(
+ "VM {} is currently on host {}".format(vm_obj, host_obj)
+ )
+
if host_obj and vm_obj:
# get PCI devies from host on which vapp is currently installed
- avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
+ avilable_pci_devices = self.get_pci_devices(
+ host_obj, no_of_pci_devices
+ )
if avilable_pci_devices is None:
# find other hosts with active pci devices
- new_host_obj, avilable_pci_devices = self.get_host_and_PCIdevices(
- content,
- no_of_pci_devices
+ (
+ new_host_obj,
+ avilable_pci_devices,
+ ) = self.get_host_and_PCIdevices(content, no_of_pci_devices)
+
+ if (
+ new_host_obj is not None
+ and avilable_pci_devices is not None
+ and len(avilable_pci_devices) > 0
+ ):
+ # Migrate vm to the host where PCI devices are availble
+ self.logger.info(
+ "Relocate VM {} on new host {}".format(
+ vm_obj, new_host_obj
+ )
)
- if (new_host_obj is not None and
- avilable_pci_devices is not None and
- len(avilable_pci_devices) > 0):
- # Migrate vm to the host where PCI devices are availble
- self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
task = self.relocate_vm(new_host_obj, vm_obj)
if task is not None:
- result = self.wait_for_vcenter_task(task, vcenter_conect)
- self.logger.info("Migrate VM status: {}".format(result))
+ result = self.wait_for_vcenter_task(
+ task, vcenter_conect
+ )
+ self.logger.info(
+ "Migrate VM status: {}".format(result)
+ )
host_obj = new_host_obj
else:
- self.logger.info("Fail to migrate VM : {}".format(result))
+ self.logger.info(
+ "Fail to migrate VM : {}".format(result)
+ )
raise vimconn.VimConnNotFoundException(
"Fail to migrate VM : {} to host {}".format(
- vmname_andid,
- new_host_obj)
+ vmname_andid, new_host_obj
)
+ )
- if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices) > 0:
+ if (
+ host_obj is not None
+ and avilable_pci_devices is not None
+ and len(avilable_pci_devices) > 0
+ ):
# Add PCI devices one by one
for pci_device in avilable_pci_devices:
task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
if task:
- status = self.wait_for_vcenter_task(task, vcenter_conect)
+ status = self.wait_for_vcenter_task(
+ task, vcenter_conect
+ )
+
if status:
- self.logger.info("Added PCI device {} to VM {}".format(pci_device, str(vm_obj)))
+ self.logger.info(
+ "Added PCI device {} to VM {}".format(
+ pci_device, str(vm_obj)
+ )
+ )
else:
- self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,
- str(vm_obj)))
+ self.logger.error(
+ "Fail to add PCI device {} to VM {}".format(
+ pci_device, str(vm_obj)
+ )
+ )
+
return True, vm_obj, vcenter_conect
else:
- self.logger.error("Currently there is no host with"
- " {} number of avaialble PCI devices required for VM {}".format(
- no_of_pci_devices,
- vmname_andid)
- )
+ self.logger.error(
+ "Currently there is no host with"
+ " {} number of avaialble PCI devices required for VM {}".format(
+ no_of_pci_devices, vmname_andid
+ )
+ )
+
raise vimconn.VimConnNotFoundException(
"Currently there is no host with {} "
"number of avaialble PCI devices required for VM {}".format(
- no_of_pci_devices,
- vmname_andid))
+ no_of_pci_devices, vmname_andid
+ )
+ )
else:
- self.logger.debug("No infromation about PCI devices {} ", pci_devices)
-
+ self.logger.debug(
+ "No infromation about PCI devices {} ", pci_devices
+ )
except vmodl.MethodFault as error:
self.logger.error("Error occurred while adding PCI devices {} ", error)
+
return None, vm_obj, vcenter_conect
def get_vm_obj(self, content, mob_id):
"""
- Method to get the vsphere VM object associated with a given morf ID
- Args:
- vapp_uuid - uuid of vApp/VM
- content - vCenter content object
- mob_id - mob_id of VM
+ Method to get the vsphere VM object associated with a given morf ID
+ Args:
+ vapp_uuid - uuid of vApp/VM
+ content - vCenter content object
+ mob_id - mob_id of VM
- Returns:
- VM and host object
+ Returns:
+ VM and host object
"""
vm_obj = None
host_obj = None
+
try:
- container = content.viewManager.CreateContainerView(content.rootFolder,
- [vim.VirtualMachine], True
- )
+ container = content.viewManager.CreateContainerView(
+ content.rootFolder, [vim.VirtualMachine], True
+ )
for vm in container.view:
mobID = vm._GetMoId()
+
if mobID == mob_id:
vm_obj = vm
host_obj = vm_obj.runtime.host
break
except Exception as exp:
self.logger.error("Error occurred while finding VM object : {}".format(exp))
+
return host_obj, vm_obj
def get_pci_devices(self, host, need_devices):
"""
- Method to get the details of pci devices on given host
- Args:
- host - vSphere host object
- need_devices - number of pci devices needed on host
+ Method to get the details of pci devices on given host
+ Args:
+ host - vSphere host object
+ need_devices - number of pci devices needed on host
- Returns:
- array of pci devices
+ Returns:
+ array of pci devices
"""
all_devices = []
all_device_ids = []
for use_device in avalible_devices:
if use_device.id == device.backing.id:
avalible_devices.remove(use_device)
+
used_devices_ids.append(device.backing.id)
- self.logger.debug("Device {} from devices {}"
- "is in use".format(device.backing.id,
- device)
- )
+ self.logger.debug(
+ "Device {} from devices {}"
+ "is in use".format(device.backing.id, device)
+ )
if len(avalible_devices) < need_devices:
- self.logger.debug("Host {} don't have {} number of active devices".format(host,
- need_devices))
- self.logger.debug("found only {} devices {}".format(len(avalible_devices),
- avalible_devices))
+ self.logger.debug(
+ "Host {} don't have {} number of active devices".format(
+ host, need_devices
+ )
+ )
+ self.logger.debug(
+ "found only {} devices {}".format(
+ len(avalible_devices), avalible_devices
+ )
+ )
+
return None
else:
required_devices = avalible_devices[:need_devices]
- self.logger.info("Found {} PCI devices on host {} but required only {}".format(
- len(avalible_devices),
- host,
- need_devices))
- self.logger.info("Retruning {} devices as {}".format(need_devices,
- required_devices))
- return required_devices
+ self.logger.info(
+ "Found {} PCI devices on host {} but required only {}".format(
+ len(avalible_devices), host, need_devices
+ )
+ )
+ self.logger.info(
+ "Retruning {} devices as {}".format(need_devices, required_devices)
+ )
+ return required_devices
except Exception as exp:
- self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
+ self.logger.error(
+ "Error {} occurred while finding pci devices on host: {}".format(
+ exp, host
+ )
+ )
return None
def get_host_and_PCIdevices(self, content, need_devices):
"""
- Method to get the details of pci devices infromation on all hosts
+ Method to get the details of pci devices infromation on all hosts
- Args:
- content - vSphere host object
- need_devices - number of pci devices needed on host
+ Args:
+ content - vSphere host object
+ need_devices - number of pci devices needed on host
- Returns:
- array of pci devices and host object
+ Returns:
+ array of pci devices and host object
"""
host_obj = None
pci_device_objs = None
+
try:
if content:
- container = content.viewManager.CreateContainerView(content.rootFolder,
- [vim.HostSystem], True)
+ container = content.viewManager.CreateContainerView(
+ content.rootFolder, [vim.HostSystem], True
+ )
for host in container.view:
devices = self.get_pci_devices(host, need_devices)
+
if devices:
host_obj = host
pci_device_objs = devices
break
except Exception as exp:
- self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
+ self.logger.error(
+ "Error {} occurred while finding pci devices on host: {}".format(
+ exp, host_obj
+ )
+ )
return host_obj, pci_device_objs
def relocate_vm(self, dest_host, vm):
"""
- Method to get the relocate VM to new host
+ Method to get the relocate VM to new host
- Args:
- dest_host - vSphere host object
- vm - vSphere VM object
+ Args:
+ dest_host - vSphere host object
+ vm - vSphere VM object
- Returns:
- task object
+ Returns:
+ task object
"""
task = None
+
try:
relocate_spec = vim.vm.RelocateSpec(host=dest_host)
task = vm.Relocate(relocate_spec)
- self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
+ self.logger.info(
+ "Migrating {} to destination host {}".format(vm, dest_host)
+ )
except Exception as exp:
- self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
- dest_host, vm, exp))
+ self.logger.error(
+ "Error occurred while relocate VM {} to new host {}: {}".format(
+ dest_host, vm, exp
+ )
+ )
+
return task
- def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
+ def wait_for_vcenter_task(self, task, actionName="job", hideResult=False):
"""
Waits and provides updates on a vSphere task
"""
if task.info.state == vim.TaskInfo.State.success:
if task.info.result is not None and not hideResult:
- self.logger.info('{} completed successfully, result: {}'.format(
- actionName,
- task.info.result))
+ self.logger.info(
+ "{} completed successfully, result: {}".format(
+ actionName, task.info.result
+ )
+ )
else:
- self.logger.info('Task {} completed successfully.'.format(actionName))
+ self.logger.info("Task {} completed successfully.".format(actionName))
else:
- self.logger.error('{} did not complete successfully: {} '.format(
- actionName,
- task.info.error)
+ self.logger.error(
+ "{} did not complete successfully: {} ".format(
+ actionName, task.info.error
)
+ )
return task.info.result
def add_pci_to_vm(self, host_object, vm_object, host_pci_dev):
"""
- Method to add pci device in given VM
+ Method to add pci device in given VM
- Args:
- host_object - vSphere host object
- vm_object - vSphere VM object
- host_pci_dev - host_pci_dev must be one of the devices from the
- host_object.hardware.pciDevice list
- which is configured as a PCI passthrough device
+ Args:
+ host_object - vSphere host object
+ vm_object - vSphere VM object
+ host_pci_dev - host_pci_dev must be one of the devices from the
+ host_object.hardware.pciDevice list
+ which is configured as a PCI passthrough device
- Returns:
- task object
+ Returns:
+ task object
"""
task = None
+
if vm_object and host_object and host_pci_dev:
try:
# Add PCI device to VM
- pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
- systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
+ pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(
+ host=None
+ ).pciPassthrough
+ systemid_by_pciid = {
+ item.pciDevice.id: item.systemId for item in pci_passthroughs
+ }
if host_pci_dev.id not in systemid_by_pciid:
- self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
+ self.logger.error(
+ "Device {} is not a passthrough device ".format(host_pci_dev)
+ )
return None
- deviceId = hex(host_pci_dev.deviceId % 2 ** 16).lstrip('0x')
- backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
- id=host_pci_dev.id,
- systemId=systemid_by_pciid[host_pci_dev.id],
- vendorId=host_pci_dev.vendorId,
- deviceName=host_pci_dev.deviceName)
+ deviceId = hex(host_pci_dev.deviceId % 2 ** 16).lstrip("0x")
+ backing = vim.VirtualPCIPassthroughDeviceBackingInfo(
+ deviceId=deviceId,
+ id=host_pci_dev.id,
+ systemId=systemid_by_pciid[host_pci_dev.id],
+ vendorId=host_pci_dev.vendorId,
+ deviceName=host_pci_dev.deviceName,
+ )
hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
-
new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
new_device_config.operation = "add"
vmConfigSpec = vim.vm.ConfigSpec()
vmConfigSpec.deviceChange = [new_device_config]
-
task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
- self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
- host_pci_dev, vm_object, host_object)
+ self.logger.info(
+ "Adding PCI device {} into VM {} from host {} ".format(
+ host_pci_dev, vm_object, host_object
)
+ )
except Exception as exp:
- self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
- host_pci_dev,
- vm_object,
- exp))
+ self.logger.error(
+ "Error occurred while adding pci devive {} to VM {}: {}".format(
+ host_pci_dev, vm_object, exp
+ )
+ )
+
return task
def get_vm_vcenter_info(self):
if self.vcenter_ip is not None:
vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
else:
- raise vimconn.VimConnException(message="vCenter IP is not provided."
- " Please provide vCenter IP while attaching datacenter "
- "to tenant in --config")
+ raise vimconn.VimConnException(
+ message="vCenter IP is not provided."
+ " Please provide vCenter IP while attaching datacenter "
+ "to tenant in --config"
+ )
+
if self.vcenter_port is not None:
vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
else:
- raise vimconn.VimConnException(message="vCenter port is not provided."
- " Please provide vCenter port while attaching datacenter "
- "to tenant in --config")
+ raise vimconn.VimConnException(
+ message="vCenter port is not provided."
+ " Please provide vCenter port while attaching datacenter "
+ "to tenant in --config"
+ )
+
if self.vcenter_user is not None:
vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
else:
- raise vimconn.VimConnException(message="vCenter user is not provided."
- " Please provide vCenter user while attaching datacenter "
- "to tenant in --config")
+ raise vimconn.VimConnException(
+ message="vCenter user is not provided."
+ " Please provide vCenter user while attaching datacenter "
+ "to tenant in --config"
+ )
if self.vcenter_password is not None:
vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
else:
- raise vimconn.VimConnException(message="vCenter user password is not provided."
- " Please provide vCenter user password while attaching datacenter "
- "to tenant in --config")
+ raise vimconn.VimConnException(
+ message="vCenter user password is not provided."
+ " Please provide vCenter user password while attaching datacenter "
+ "to tenant in --config"
+ )
return vm_vcenter_info
def get_vm_pci_details(self, vmuuid):
"""
- Method to get VM PCI device details from vCenter
+ Method to get VM PCI device details from vCenter
- Args:
- vm_obj - vSphere VM object
+ Args:
+ vm_obj - vSphere VM object
- Returns:
- dict of PCI devives attached to VM
+ Returns:
+ dict of PCI devives attached to VM
"""
vm_pci_devices_info = {}
+
try:
_, content = self.get_vcenter_content()
vm_moref_id = self.get_vm_moref_id(vmuuid)
host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
if host_obj and vm_obj:
vm_pci_devices_info["host_name"] = host_obj.name
- vm_pci_devices_info["host_ip"] = host_obj.config.network.vnic[0].spec.ip.ipAddress
+ vm_pci_devices_info["host_ip"] = host_obj.config.network.vnic[
+ 0
+ ].spec.ip.ipAddress
+
for device in vm_obj.config.hardware.device:
if type(device) == vim.vm.device.VirtualPCIPassthrough:
- device_details = {'devide_id': device.backing.id,
- 'pciSlotNumber': device.slotInfo.pciSlotNumber,
- }
- vm_pci_devices_info[device.deviceInfo.label] = device_details
+ device_details = {
+ "devide_id": device.backing.id,
+ "pciSlotNumber": device.slotInfo.pciSlotNumber,
+ }
+ vm_pci_devices_info[
+ device.deviceInfo.label
+ ] = device_details
else:
- self.logger.error("Can not connect to vCenter while getting "
- "PCI devices infromationn")
+ self.logger.error(
+ "Can not connect to vCenter while getting "
+ "PCI devices infromationn"
+ )
+
return vm_pci_devices_info
except Exception as exp:
- self.logger.error("Error occurred while getting VM information"
- " for VM : {}".format(exp))
+ self.logger.error(
+ "Error occurred while getting VM information" " for VM : {}".format(exp)
+ )
+
raise vimconn.VimConnException(message=exp)
def reserve_memory_for_all_vms(self, vapp, memory_mb):
"""
- Method to reserve memory for all VMs
- Args :
- vapp - VApp
- memory_mb - Memory in MB
- Returns:
- None
+ Method to reserve memory for all VMs
+ Args :
+ vapp - VApp
+ memory_mb - Memory in MB
+ Returns:
+ None
"""
-
self.logger.info("Reserve memory for all VMs")
- for vms in vapp.get_all_vms():
- vm_id = vms.get('id').split(':')[-1]
- url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(self.url, vm_id)
-
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
- headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItem+xml'
- response = self.perform_request(req_type='GET',
- url=url_rest_call,
- headers=headers)
+ for vms in vapp.get_all_vms():
+ vm_id = vms.get("id").split(":")[-1]
+ url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(
+ self.url, vm_id
+ )
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ headers["Content-Type"] = "application/vnd.vmware.vcloud.rasdItem+xml"
+ response = self.perform_request(
+ req_type="GET", url=url_rest_call, headers=headers
+ )
if response.status_code == 403:
- response = self.retry_rest('GET', url_rest_call)
+ response = self.retry_rest("GET", url_rest_call)
if response.status_code != 200:
- self.logger.error("REST call {} failed reason : {}"
- "status code : {}".format(url_rest_call,
- response.text,
- response.status_code))
- raise vimconn.VimConnException("reserve_memory_for_all_vms : Failed to get "
- "memory")
-
- bytexml = bytes(bytearray(response.text, encoding='utf-8'))
+ self.logger.error(
+ "REST call {} failed reason : {}"
+ "status code : {}".format(
+ url_rest_call, response.text, response.status_code
+ )
+ )
+ raise vimconn.VimConnException(
+ "reserve_memory_for_all_vms : Failed to get " "memory"
+ )
+
+ bytexml = bytes(bytearray(response.text, encoding="utf-8"))
contentelem = lxmlElementTree.XML(bytexml)
- namespaces = {prefix: uri for prefix, uri in contentelem.nsmap.items() if prefix}
+ namespaces = {
+ prefix: uri for prefix, uri in contentelem.nsmap.items() if prefix
+ }
namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
# Find the reservation element in the response
newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
- response = self.perform_request(req_type='PUT',
- url=url_rest_call,
- headers=headers,
- data=newdata)
+ response = self.perform_request(
+ req_type="PUT", url=url_rest_call, headers=headers, data=newdata
+ )
if response.status_code == 403:
- add_headers = {'Content-Type': headers['Content-Type']}
- response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
+ add_headers = {"Content-Type": headers["Content-Type"]}
+ response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
if response.status_code != 202:
- self.logger.error("REST call {} failed reason : {}"
- "status code : {} ".format(url_rest_call,
- response.text,
- response.status_code))
- raise vimconn.VimConnException("reserve_memory_for_all_vms : Failed to update "
- "virtual hardware memory section")
+ self.logger.error(
+ "REST call {} failed reason : {}"
+ "status code : {} ".format(
+ url_rest_call, response.text, response.status_code
+ )
+ )
+ raise vimconn.VimConnException(
+ "reserve_memory_for_all_vms : Failed to update "
+ "virtual hardware memory section"
+ )
else:
mem_task = self.get_task_from_response(response.text)
result = self.client.get_task_monitor().wait_for_success(task=mem_task)
- if result.get('status') == 'success':
- self.logger.info("reserve_memory_for_all_vms(): VM {} succeeded "
- .format(vm_id))
+
+ if result.get("status") == "success":
+ self.logger.info(
+ "reserve_memory_for_all_vms(): VM {} succeeded ".format(vm_id)
+ )
else:
- self.logger.error("reserve_memory_for_all_vms(): VM {} failed "
- .format(vm_id))
+ self.logger.error(
+ "reserve_memory_for_all_vms(): VM {} failed ".format(vm_id)
+ )
def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
"""
- Configure VApp network config with org vdc network
- Args :
- vapp - VApp
- Returns:
- None
+ Configure VApp network config with org vdc network
+ Args :
+ vapp - VApp
+ Returns:
+ None
"""
- self.logger.info("Connecting vapp {} to org vdc network {}".
- format(vapp_id, net_name))
+ self.logger.info(
+ "Connecting vapp {} to org vdc network {}".format(vapp_id, net_name)
+ )
- url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(self.url, vapp_id)
+ url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(
+ self.url, vapp_id
+ )
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='GET',
- url=url_rest_call,
- headers=headers)
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url=url_rest_call, headers=headers
+ )
if response.status_code == 403:
- response = self.retry_rest('GET', url_rest_call)
+ response = self.retry_rest("GET", url_rest_call)
if response.status_code != 200:
- self.logger.error("REST call {} failed reason : {}"
- "status code : {}".format(url_rest_call,
- response.text,
- response.status_code))
- raise vimconn.VimConnException("connect_vapp_to_org_vdc_network : Failed to get "
- "network config section")
+ self.logger.error(
+ "REST call {} failed reason : {}"
+ "status code : {}".format(
+ url_rest_call, response.text, response.status_code
+ )
+ )
+ raise vimconn.VimConnException(
+ "connect_vapp_to_org_vdc_network : Failed to get "
+ "network config section"
+ )
data = response.text
- headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConfigSection+xml'
+ headers[
+ "Content-Type"
+ ] = "application/vnd.vmware.vcloud.networkConfigSection+xml"
net_id = self.get_network_id_by_name(net_name)
if not net_id:
- raise vimconn.VimConnException("connect_vapp_to_org_vdc_network : Failed to find "
- "existing network")
+ raise vimconn.VimConnException(
+ "connect_vapp_to_org_vdc_network : Failed to find " "existing network"
+ )
- bytexml = bytes(bytearray(data, encoding='utf-8'))
+ bytexml = bytes(bytearray(data, encoding="utf-8"))
newelem = lxmlElementTree.XML(bytexml)
namespaces = {prefix: uri for prefix, uri in newelem.nsmap.items() if prefix}
namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
<FenceMode>bridged</FenceMode>
</Configuration>
</NetworkConfig>
- """.format(net_name, self.url, net_id)
+ """.format(
+ net_name, self.url, net_id
+ )
newcfgelem = lxmlElementTree.fromstring(newstr)
if nwcfglist:
nwcfglist[0].addnext(newcfgelem)
newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
- response = self.perform_request(req_type='PUT',
- url=url_rest_call,
- headers=headers,
- data=newdata)
+ response = self.perform_request(
+ req_type="PUT", url=url_rest_call, headers=headers, data=newdata
+ )
if response.status_code == 403:
- add_headers = {'Content-Type': headers['Content-Type']}
- response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
+ add_headers = {"Content-Type": headers["Content-Type"]}
+ response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
if response.status_code != 202:
- self.logger.error("REST call {} failed reason : {}"
- "status code : {} ".format(url_rest_call,
- response.text,
- response.status_code))
- raise vimconn.VimConnException("connect_vapp_to_org_vdc_network : Failed to update "
- "network config section")
+ self.logger.error(
+ "REST call {} failed reason : {}"
+ "status code : {} ".format(
+ url_rest_call, response.text, response.status_code
+ )
+ )
+ raise vimconn.VimConnException(
+ "connect_vapp_to_org_vdc_network : Failed to update "
+ "network config section"
+ )
else:
vapp_task = self.get_task_from_response(response.text)
result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
- if result.get('status') == 'success':
- self.logger.info("connect_vapp_to_org_vdc_network(): Vapp {} connected to "
- "network {}".format(vapp_id, net_name))
+ if result.get("status") == "success":
+ self.logger.info(
+ "connect_vapp_to_org_vdc_network(): Vapp {} connected to "
+ "network {}".format(vapp_id, net_name)
+ )
else:
- self.logger.error("connect_vapp_to_org_vdc_network(): Vapp {} failed to "
- "connect to network {}".format(vapp_id, net_name))
+ self.logger.error(
+ "connect_vapp_to_org_vdc_network(): Vapp {} failed to "
+ "connect to network {}".format(vapp_id, net_name)
+ )
def remove_primary_network_adapter_from_all_vms(self, vapp):
"""
- Method to remove network adapter type to vm
- Args :
- vapp - VApp
- Returns:
- None
+ Method to remove network adapter type to vm
+ Args :
+ vapp - VApp
+ Returns:
+ None
"""
-
self.logger.info("Removing network adapter from all VMs")
+
for vms in vapp.get_all_vms():
- vm_id = vms.get('id').split(':')[-1]
+ vm_id = vms.get("id").split(":")[-1]
- url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
+ url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(
+ self.url, vm_id
+ )
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='GET',
- url=url_rest_call,
- headers=headers)
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url=url_rest_call, headers=headers
+ )
if response.status_code == 403:
- response = self.retry_rest('GET', url_rest_call)
+ response = self.retry_rest("GET", url_rest_call)
if response.status_code != 200:
- self.logger.error("REST call {} failed reason : {}"
- "status code : {}".format(url_rest_call,
- response.text,
- response.status_code))
- raise vimconn.VimConnException("remove_primary_network_adapter : Failed to get "
- "network connection section")
+ self.logger.error(
+ "REST call {} failed reason : {}"
+ "status code : {}".format(
+ url_rest_call, response.text, response.status_code
+ )
+ )
+ raise vimconn.VimConnException(
+ "remove_primary_network_adapter : Failed to get "
+ "network connection section"
+ )
data = response.text
data = data.split('<Link rel="edit"')[0]
- headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+ headers[
+ "Content-Type"
+ ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
<PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
<Link rel="edit" href="{url}"
type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
- </NetworkConnectionSection>""".format(url=url_rest_call)
- response = self.perform_request(req_type='PUT',
- url=url_rest_call,
- headers=headers,
- data=newdata)
+ </NetworkConnectionSection>""".format(
+ url=url_rest_call
+ )
+ response = self.perform_request(
+ req_type="PUT", url=url_rest_call, headers=headers, data=newdata
+ )
if response.status_code == 403:
- add_headers = {'Content-Type': headers['Content-Type']}
- response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
+ add_headers = {"Content-Type": headers["Content-Type"]}
+ response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
if response.status_code != 202:
- self.logger.error("REST call {} failed reason : {}"
- "status code : {} ".format(url_rest_call,
- response.text,
- response.status_code))
- raise vimconn.VimConnException("remove_primary_network_adapter : Failed to update "
- "network connection section")
+ self.logger.error(
+ "REST call {} failed reason : {}"
+ "status code : {} ".format(
+ url_rest_call, response.text, response.status_code
+ )
+ )
+ raise vimconn.VimConnException(
+ "remove_primary_network_adapter : Failed to update "
+ "network connection section"
+ )
else:
nic_task = self.get_task_from_response(response.text)
result = self.client.get_task_monitor().wait_for_success(task=nic_task)
- if result.get('status') == 'success':
- self.logger.info("remove_primary_network_adapter(): VM {} conneced to "
- "default NIC type".format(vm_id))
+ if result.get("status") == "success":
+ self.logger.info(
+ "remove_primary_network_adapter(): VM {} conneced to "
+ "default NIC type".format(vm_id)
+ )
else:
- self.logger.error("remove_primary_network_adapter(): VM {} failed to "
- "connect NIC type".format(vm_id))
+ self.logger.error(
+ "remove_primary_network_adapter(): VM {} failed to "
+ "connect NIC type".format(vm_id)
+ )
- def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
+ def add_network_adapter_to_vms(
+ self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None
+ ):
"""
- Method to add network adapter type to vm
- Args :
- network_name - name of network
- primary_nic_index - int value for primary nic index
- nicIndex - int value for nic index
- nic_type - specify model name to which add to vm
- Returns:
- None
+ Method to add network adapter type to vm
+ Args :
+ network_name - name of network
+ primary_nic_index - int value for primary nic index
+ nicIndex - int value for nic index
+ nic_type - specify model name to which add to vm
+ Returns:
+ None
"""
- self.logger.info("Add network adapter to VM: network_name {} nicIndex {} nic_type {}".
- format(network_name, nicIndex, nic_type))
+ self.logger.info(
+ "Add network adapter to VM: network_name {} nicIndex {} nic_type {}".format(
+ network_name, nicIndex, nic_type
+ )
+ )
try:
ip_address = None
floating_ip = False
mac_address = None
- if 'floating_ip' in net:
- floating_ip = net['floating_ip']
+ if "floating_ip" in net:
+ floating_ip = net["floating_ip"]
# Stub for ip_address feature
- if 'ip_address' in net:
- ip_address = net['ip_address']
+ if "ip_address" in net:
+ ip_address = net["ip_address"]
- if 'mac_address' in net:
- mac_address = net['mac_address']
+ if "mac_address" in net:
+ mac_address = net["mac_address"]
if floating_ip:
allocation_mode = "POOL"
if not nic_type:
for vms in vapp.get_all_vms():
- vm_id = vms.get('id').split(':')[-1]
+ vm_id = vms.get("id").split(":")[-1]
- url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
+ url_rest_call = (
+ "{}/api/vApp/vm-{}/networkConnectionSection/".format(
+ self.url, vm_id
+ )
+ )
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='GET',
- url=url_rest_call,
- headers=headers)
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url=url_rest_call, headers=headers
+ )
if response.status_code == 403:
- response = self.retry_rest('GET', url_rest_call)
+ response = self.retry_rest("GET", url_rest_call)
if response.status_code != 200:
- self.logger.error("REST call {} failed reason : {}"
- "status code : {}".format(url_rest_call,
- response.text,
- response.status_code))
- raise vimconn.VimConnException("add_network_adapter_to_vms : Failed to get "
- "network connection section")
+ self.logger.error(
+ "REST call {} failed reason : {}"
+ "status code : {}".format(
+ url_rest_call, response.text, response.status_code
+ )
+ )
+ raise vimconn.VimConnException(
+ "add_network_adapter_to_vms : Failed to get "
+ "network connection section"
+ )
data = response.text
data = data.split('<Link rel="edit"')[0]
- if '<PrimaryNetworkConnectionIndex>' not in data:
+ if "<PrimaryNetworkConnectionIndex>" not in data:
self.logger.debug("add_network_adapter PrimaryNIC not in data")
item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
<NetworkConnection network="{}">
<NetworkConnectionIndex>{}</NetworkConnectionIndex>
<IsConnected>true</IsConnected>
<IpAddressAllocationMode>{}</IpAddressAllocationMode>
- </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
- allocation_mode)
+ </NetworkConnection>""".format(
+ primary_nic_index, network_name, nicIndex, allocation_mode
+ )
+
# Stub for ip_address feature
if ip_address:
- ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
- item = item.replace('</NetworkConnectionIndex>\n',
- '</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+ ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
+ item = item.replace(
+ "</NetworkConnectionIndex>\n",
+ "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
+ )
if mac_address:
- mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
- item = item.replace('</IsConnected>\n', '</IsConnected>\n{}\n'.format(mac_tag))
-
- data = data.replace('</ovf:Info>\n',
- '</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
+ mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
+ item = item.replace(
+ "</IsConnected>\n",
+ "</IsConnected>\n{}\n".format(mac_tag),
+ )
+
+ data = data.replace(
+ "</ovf:Info>\n",
+ "</ovf:Info>\n{}\n</NetworkConnectionSection>".format(item),
+ )
else:
self.logger.debug("add_network_adapter PrimaryNIC in data")
new_item = """<NetworkConnection network="{}">
<NetworkConnectionIndex>{}</NetworkConnectionIndex>
<IsConnected>true</IsConnected>
<IpAddressAllocationMode>{}</IpAddressAllocationMode>
- </NetworkConnection>""".format(network_name, nicIndex,
- allocation_mode)
+ </NetworkConnection>""".format(
+ network_name, nicIndex, allocation_mode
+ )
+
# Stub for ip_address feature
if ip_address:
- ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
- new_item = new_item.replace('</NetworkConnectionIndex>\n',
- '</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+ ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
+ new_item = new_item.replace(
+ "</NetworkConnectionIndex>\n",
+ "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
+ )
if mac_address:
- mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
- new_item = new_item.replace('</IsConnected>\n', '</IsConnected>\n{}\n'.format(mac_tag))
+ mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
+ new_item = new_item.replace(
+ "</IsConnected>\n",
+ "</IsConnected>\n{}\n".format(mac_tag),
+ )
- data = data + new_item + '</NetworkConnectionSection>'
+ data = data + new_item + "</NetworkConnectionSection>"
- headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+ headers[
+ "Content-Type"
+ ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
- response = self.perform_request(req_type='PUT',
- url=url_rest_call,
- headers=headers,
- data=data)
+ response = self.perform_request(
+ req_type="PUT", url=url_rest_call, headers=headers, data=data
+ )
if response.status_code == 403:
- add_headers = {'Content-Type': headers['Content-Type']}
- response = self.retry_rest('PUT', url_rest_call, add_headers, data)
+ add_headers = {"Content-Type": headers["Content-Type"]}
+ response = self.retry_rest(
+ "PUT", url_rest_call, add_headers, data
+ )
if response.status_code != 202:
- self.logger.error("REST call {} failed reason : {}"
- "status code : {} ".format(url_rest_call,
- response.text,
- response.status_code))
- raise vimconn.VimConnException("add_network_adapter_to_vms : Failed to update "
- "network connection section")
+ self.logger.error(
+ "REST call {} failed reason : {}"
+ "status code : {} ".format(
+ url_rest_call, response.text, response.status_code
+ )
+ )
+ raise vimconn.VimConnException(
+ "add_network_adapter_to_vms : Failed to update "
+ "network connection section"
+ )
else:
nic_task = self.get_task_from_response(response.text)
- result = self.client.get_task_monitor().wait_for_success(task=nic_task)
- if result.get('status') == 'success':
- self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "
- "default NIC type".format(vm_id))
+ result = self.client.get_task_monitor().wait_for_success(
+ task=nic_task
+ )
+
+ if result.get("status") == "success":
+ self.logger.info(
+ "add_network_adapter_to_vms(): VM {} conneced to "
+ "default NIC type".format(vm_id)
+ )
else:
- self.logger.error("add_network_adapter_to_vms(): VM {} failed to "
- "connect NIC type".format(vm_id))
+ self.logger.error(
+ "add_network_adapter_to_vms(): VM {} failed to "
+ "connect NIC type".format(vm_id)
+ )
else:
for vms in vapp.get_all_vms():
- vm_id = vms.get('id').split(':')[-1]
+ vm_id = vms.get("id").split(":")[-1]
- url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
+ url_rest_call = (
+ "{}/api/vApp/vm-{}/networkConnectionSection/".format(
+ self.url, vm_id
+ )
+ )
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='GET',
- url=url_rest_call,
- headers=headers)
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url=url_rest_call, headers=headers
+ )
if response.status_code == 403:
- response = self.retry_rest('GET', url_rest_call)
+ response = self.retry_rest("GET", url_rest_call)
if response.status_code != 200:
- self.logger.error("REST call {} failed reason : {}"
- "status code : {}".format(url_rest_call,
- response.text,
- response.status_code))
- raise vimconn.VimConnException("add_network_adapter_to_vms : Failed to get "
- "network connection section")
+ self.logger.error(
+ "REST call {} failed reason : {}"
+ "status code : {}".format(
+ url_rest_call, response.text, response.status_code
+ )
+ )
+ raise vimconn.VimConnException(
+ "add_network_adapter_to_vms : Failed to get "
+ "network connection section"
+ )
data = response.text
data = data.split('<Link rel="edit"')[0]
vcd_netadapter_type = nic_type
- if nic_type in ['SR-IOV', 'VF']:
+
+ if nic_type in ["SR-IOV", "VF"]:
vcd_netadapter_type = "SRIOVETHERNETCARD"
- if '<PrimaryNetworkConnectionIndex>' not in data:
- self.logger.debug("add_network_adapter PrimaryNIC not in data nic_type {}".format(nic_type))
+ if "<PrimaryNetworkConnectionIndex>" not in data:
+ self.logger.debug(
+ "add_network_adapter PrimaryNIC not in data nic_type {}".format(
+ nic_type
+ )
+ )
item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
<NetworkConnection network="{}">
<NetworkConnectionIndex>{}</NetworkConnectionIndex>
<IsConnected>true</IsConnected>
<IpAddressAllocationMode>{}</IpAddressAllocationMode>
<NetworkAdapterType>{}</NetworkAdapterType>
- </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
- allocation_mode, vcd_netadapter_type)
+ </NetworkConnection>""".format(
+ primary_nic_index,
+ network_name,
+ nicIndex,
+ allocation_mode,
+ vcd_netadapter_type,
+ )
+
# Stub for ip_address feature
if ip_address:
- ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
- item = item.replace('</NetworkConnectionIndex>\n',
- '</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+ ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
+ item = item.replace(
+ "</NetworkConnectionIndex>\n",
+ "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
+ )
if mac_address:
- mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
- item = item.replace('</IsConnected>\n', '</IsConnected>\n{}\n'.format(mac_tag))
-
- data = data.replace('</ovf:Info>\n',
- '</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
+ mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
+ item = item.replace(
+ "</IsConnected>\n",
+ "</IsConnected>\n{}\n".format(mac_tag),
+ )
+
+ data = data.replace(
+ "</ovf:Info>\n",
+ "</ovf:Info>\n{}\n</NetworkConnectionSection>".format(item),
+ )
else:
- self.logger.debug("add_network_adapter PrimaryNIC in data nic_type {}".format(nic_type))
+ self.logger.debug(
+ "add_network_adapter PrimaryNIC in data nic_type {}".format(
+ nic_type
+ )
+ )
new_item = """<NetworkConnection network="{}">
<NetworkConnectionIndex>{}</NetworkConnectionIndex>
<IsConnected>true</IsConnected>
<IpAddressAllocationMode>{}</IpAddressAllocationMode>
<NetworkAdapterType>{}</NetworkAdapterType>
- </NetworkConnection>""".format(network_name, nicIndex,
- allocation_mode, vcd_netadapter_type)
+ </NetworkConnection>""".format(
+ network_name, nicIndex, allocation_mode, vcd_netadapter_type
+ )
+
# Stub for ip_address feature
if ip_address:
- ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
- new_item = new_item.replace('</NetworkConnectionIndex>\n',
- '</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+ ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
+ new_item = new_item.replace(
+ "</NetworkConnectionIndex>\n",
+ "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
+ )
if mac_address:
- mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
- new_item = new_item.replace('</IsConnected>\n', '</IsConnected>\n{}\n'.format(mac_tag))
+ mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
+ new_item = new_item.replace(
+ "</IsConnected>\n",
+ "</IsConnected>\n{}\n".format(mac_tag),
+ )
- data = data + new_item + '</NetworkConnectionSection>'
+ data = data + new_item + "</NetworkConnectionSection>"
- headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+ headers[
+ "Content-Type"
+ ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
- response = self.perform_request(req_type='PUT',
- url=url_rest_call,
- headers=headers,
- data=data)
+ response = self.perform_request(
+ req_type="PUT", url=url_rest_call, headers=headers, data=data
+ )
if response.status_code == 403:
- add_headers = {'Content-Type': headers['Content-Type']}
- response = self.retry_rest('PUT', url_rest_call, add_headers, data)
+ add_headers = {"Content-Type": headers["Content-Type"]}
+ response = self.retry_rest(
+ "PUT", url_rest_call, add_headers, data
+ )
if response.status_code != 202:
- self.logger.error("REST call {} failed reason : {}"
- "status code : {}".format(url_rest_call,
- response.text,
- response.status_code))
- raise vimconn.VimConnException("add_network_adapter_to_vms : Failed to update "
- "network connection section")
+ self.logger.error(
+ "REST call {} failed reason : {}"
+ "status code : {}".format(
+ url_rest_call, response.text, response.status_code
+ )
+ )
+ raise vimconn.VimConnException(
+ "add_network_adapter_to_vms : Failed to update "
+ "network connection section"
+ )
else:
nic_task = self.get_task_from_response(response.text)
- result = self.client.get_task_monitor().wait_for_success(task=nic_task)
- if result.get('status') == 'success':
- self.logger.info("add_network_adapter_to_vms(): VM {} "
- "conneced to NIC type {}".format(vm_id, nic_type))
+ result = self.client.get_task_monitor().wait_for_success(
+ task=nic_task
+ )
+
+ if result.get("status") == "success":
+ self.logger.info(
+ "add_network_adapter_to_vms(): VM {} "
+ "conneced to NIC type {}".format(vm_id, nic_type)
+ )
else:
- self.logger.error("add_network_adapter_to_vms(): VM {} "
- "failed to connect NIC type {}".format(vm_id, nic_type))
+ self.logger.error(
+ "add_network_adapter_to_vms(): VM {} "
+ "failed to connect NIC type {}".format(vm_id, nic_type)
+ )
except Exception as exp:
- self.logger.error("add_network_adapter_to_vms() : exception occurred "
- "while adding Network adapter")
+ self.logger.error(
+ "add_network_adapter_to_vms() : exception occurred "
+ "while adding Network adapter"
+ )
+
raise vimconn.VimConnException(message=exp)
def set_numa_affinity(self, vmuuid, paired_threads_id):
"""
- Method to assign numa affinity in vm configuration parammeters
- Args :
- vmuuid - vm uuid
- paired_threads_id - one or more virtual processor
- numbers
- Returns:
- return if True
+ Method to assign numa affinity in vm configuration parammeters
+ Args :
+ vmuuid - vm uuid
+ paired_threads_id - one or more virtual processor
+ numbers
+ Returns:
+ return if True
"""
try:
vcenter_conect, content = self.get_vcenter_content()
vm_moref_id = self.get_vm_moref_id(vmuuid)
-
_, vm_obj = self.get_vm_obj(content, vm_moref_id)
+
if vm_obj:
config_spec = vim.vm.ConfigSpec()
config_spec.extraConfig = []
opt = vim.option.OptionValue()
- opt.key = 'numa.nodeAffinity'
+ opt.key = "numa.nodeAffinity"
opt.value = str(paired_threads_id)
config_spec.extraConfig.append(opt)
task = vm_obj.ReconfigVM_Task(config_spec)
+
if task:
self.wait_for_vcenter_task(task, vcenter_conect)
extra_config = vm_obj.config.extraConfig
flag = False
+
for opts in extra_config:
- if 'numa.nodeAffinity' in opts.key:
+ if "numa.nodeAffinity" in opts.key:
flag = True
- self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "
- "value {} for vm {}".format(opt.value, vm_obj))
+ self.logger.info(
+ "set_numa_affinity: Sucessfully assign numa affinity "
+ "value {} for vm {}".format(opt.value, vm_obj)
+ )
+
if flag:
return
else:
self.logger.error("set_numa_affinity: Failed to assign numa affinity")
except Exception as exp:
- self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "
- "for VM {} : {}".format(vm_obj, vm_moref_id))
- raise vimconn.VimConnException("set_numa_affinity : Error {} failed to assign numa "
- "affinity".format(exp))
+ self.logger.error(
+ "set_numa_affinity : exception occurred while setting numa affinity "
+ "for VM {} : {}".format(vm_obj, vm_moref_id)
+ )
+
+ raise vimconn.VimConnException(
+ "set_numa_affinity : Error {} failed to assign numa "
+ "affinity".format(exp)
+ )
def cloud_init(self, vapp, cloud_config):
"""
"""
try:
if not isinstance(cloud_config, dict):
- raise Exception("cloud_init : parameter cloud_config is not a dictionary")
+ raise Exception(
+ "cloud_init : parameter cloud_config is not a dictionary"
+ )
else:
key_pairs = []
userdata = []
+
if "key-pairs" in cloud_config:
key_pairs = cloud_config["key-pairs"]
userdata = cloud_config["users"]
self.logger.debug("cloud_init : Guest os customization started..")
- customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
+ customize_script = self.format_script(
+ key_pairs=key_pairs, users_list=userdata
+ )
customize_script = customize_script.replace("&", "&")
self.guest_customization(vapp, customize_script)
-
except Exception as exp:
- self.logger.error("cloud_init : exception occurred while injecting "
- "ssh-key")
- raise vimconn.VimConnException("cloud_init : Error {} failed to inject "
- "ssh-key".format(exp))
+ self.logger.error(
+ "cloud_init : exception occurred while injecting " "ssh-key"
+ )
+
+ raise vimconn.VimConnException(
+ "cloud_init : Error {} failed to inject " "ssh-key".format(exp)
+ )
def format_script(self, key_pairs=[], users_list=[]):
bash_script = """#!/bin/sh
chmod 600 /root/.ssh/authorized_keys
fi
echo '{key}' >> /root/.ssh/authorized_keys
- """.format(key=keys)
+ """.format(
+ key=keys
+ )
bash_script += keys_data
for user in users_list:
- if 'name' in user:
- user_name = user['name']
- if 'key-pairs' in user:
- user_keys = "\n".join(user['key-pairs'])
+ if "name" in user:
+ user_name = user["name"]
+
+ if "key-pairs" in user:
+ user_keys = "\n".join(user["key-pairs"])
else:
user_keys = None
add_user_name = """
useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
- """.format(user_name=user_name)
+ """.format(
+ user_name=user_name
+ )
bash_script += add_user_name
# make centos with selinux happy
which restorecon && restorecon -Rv /home/{user_name}/.ssh
echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
- """.format(user_name=user_name, user_key=user_keys)
-
+ """.format(
+ user_name=user_name, user_key=user_keys
+ )
bash_script += user_keys_data
return bash_script + "\n\tfi"
customize_script - Customize script to be run at first boot of VM.
"""
for vm in vapp.get_all_vms():
- vm_id = vm.get('id').split(':')[-1]
- vm_name = vm.get('name')
- vm_name = vm_name.replace('_', '-')
+ vm_id = vm.get("id").split(":")[-1]
+ vm_name = vm.get("name")
+ vm_name = vm_name.replace("_", "-")
- vm_customization_url = "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+ vm_customization_url = (
+ "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
+ )
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
- headers['Content-Type'] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
+ headers[
+ "Content-Type"
+ ] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
data = """<GuestCustomizationSection
xmlns="http://www.vmware.com/vcloud/v1.5"
<Link href="{}"
type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
</GuestCustomizationSection>
- """.format(vm_customization_url,
- vm_id,
- customize_script,
- vm_name,
- vm_customization_url)
-
- response = self.perform_request(req_type='PUT',
- url=vm_customization_url,
- headers=headers,
- data=data)
+ """.format(
+ vm_customization_url,
+ vm_id,
+ customize_script,
+ vm_name,
+ vm_customization_url,
+ )
+
+ response = self.perform_request(
+ req_type="PUT", url=vm_customization_url, headers=headers, data=data
+ )
if response.status_code == 202:
guest_task = self.get_task_from_response(response.text)
self.client.get_task_monitor().wait_for_success(task=guest_task)
- self.logger.info("guest_customization : customized guest os task "
- "completed for VM {}".format(vm_name))
+ self.logger.info(
+ "guest_customization : customized guest os task "
+ "completed for VM {}".format(vm_name)
+ )
else:
- self.logger.error("guest_customization : task for customized guest os"
- "failed for VM {}".format(vm_name))
- raise vimconn.VimConnException("guest_customization : failed to perform"
- "guest os customization on VM {}".format(vm_name))
+ self.logger.error(
+ "guest_customization : task for customized guest os"
+ "failed for VM {}".format(vm_name)
+ )
+
+ raise vimconn.VimConnException(
+ "guest_customization : failed to perform"
+ "guest os customization on VM {}".format(vm_name)
+ )
def add_new_disk(self, vapp_uuid, disk_size):
"""
- Method to create an empty vm disk
+ Method to create an empty vm disk
- Args:
- vapp_uuid - is vapp identifier.
- disk_size - size of disk to be created in GB
+ Args:
+ vapp_uuid - is vapp identifier.
+ disk_size - size of disk to be created in GB
- Returns:
- None
+ Returns:
+ None
"""
status = False
vm_details = None
vm_details = self.get_vapp_details_rest(vapp_uuid)
if vm_details and "vm_virtual_hardware" in vm_details:
- self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
+ self.logger.info(
+ "Adding disk to VM: {} disk size:{}GB".format(
+ vm_details["name"], disk_size
+ )
+ )
disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
status = self.add_new_disk_rest(disk_href, disk_size_mb)
-
except Exception as exp:
msg = "Error occurred while creating new disk {}.".format(exp)
self.rollback_newvm(vapp_uuid, msg)
if status:
- self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
+ self.logger.info(
+ "Added new disk to VM: {} disk size:{}GB".format(
+ vm_details["name"], disk_size
+ )
+ )
else:
# If failed to add disk, delete VM
- msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
+ msg = "add_new_disk: Failed to add new disk to {}".format(
+ vm_details["name"]
+ )
self.rollback_newvm(vapp_uuid, msg)
def add_new_disk_rest(self, disk_href, disk_size_mb):
"""
status = False
if self.client._session:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='GET',
- url=disk_href,
- headers=headers)
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url=disk_href, headers=headers
+ )
if response.status_code == 403:
- response = self.retry_rest('GET', disk_href)
+ response = self.retry_rest("GET", disk_href)
if response.status_code != requests.codes.ok:
- self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
- .format(disk_href, response.status_code))
+ self.logger.error(
+ "add_new_disk_rest: GET REST API call {} failed. Return status code {}".format(
+ disk_href, response.status_code
+ )
+ )
+
return status
+
try:
# Find but type & max of instance IDs assigned to disks
lxmlroot_respond = lxmlElementTree.fromstring(response.content)
- namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
+ namespaces = {
+ prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
+ }
namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
instance_id = 0
- for item in lxmlroot_respond.iterfind('xmlns:Item', namespaces):
+
+ for item in lxmlroot_respond.iterfind("xmlns:Item", namespaces):
if item.find("rasd:Description", namespaces).text == "Hard disk":
inst_id = int(item.find("rasd:InstanceID", namespaces).text)
+
if inst_id > instance_id:
instance_id = inst_id
disk_item = item.find("rasd:HostResource", namespaces)
- bus_subtype = disk_item.attrib["{" + namespaces['xmlns'] + "}busSubType"]
- bus_type = disk_item.attrib["{" + namespaces['xmlns'] + "}busType"]
+ bus_subtype = disk_item.attrib[
+ "{" + namespaces["xmlns"] + "}busSubType"
+ ]
+ bus_type = disk_item.attrib[
+ "{" + namespaces["xmlns"] + "}busType"
+ ]
instance_id = instance_id + 1
new_item = """<Item>
vcloud:busType="{}"></rasd:HostResource>
<rasd:InstanceID>{}</rasd:InstanceID>
<rasd:ResourceType>17</rasd:ResourceType>
- </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
+ </Item>""".format(
+ disk_size_mb, bus_subtype, bus_type, instance_id
+ )
new_data = response.text
# Add new item at the bottom
- new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
+ new_data = new_data.replace(
+ "</Item>\n</RasdItemsList>",
+ "</Item>\n{}\n</RasdItemsList>".format(new_item),
+ )
# Send PUT request to modify virtual hardware section with new disk
- headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
+ headers[
+ "Content-Type"
+ ] = "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
- response = self.perform_request(req_type='PUT',
- url=disk_href,
- data=new_data,
- headers=headers)
+ response = self.perform_request(
+ req_type="PUT", url=disk_href, data=new_data, headers=headers
+ )
if response.status_code == 403:
- add_headers = {'Content-Type': headers['Content-Type']}
- response = self.retry_rest('PUT', disk_href, add_headers, new_data)
+ add_headers = {"Content-Type": headers["Content-Type"]}
+ response = self.retry_rest("PUT", disk_href, add_headers, new_data)
if response.status_code != 202:
- self.logger.error("PUT REST API call {} failed. Return status code {}. response.text:{}"
- .format(disk_href, response.status_code, response.text))
+ self.logger.error(
+ "PUT REST API call {} failed. Return status code {}. response.text:{}".format(
+ disk_href, response.status_code, response.text
+ )
+ )
else:
add_disk_task = self.get_task_from_response(response.text)
- result = self.client.get_task_monitor().wait_for_success(task=add_disk_task)
- if result.get('status') == 'success':
- status = True
- else:
- self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
+ result = self.client.get_task_monitor().wait_for_success(
+ task=add_disk_task
+ )
+ if result.get("status") == "success":
+ status = True
+ else:
+ self.logger.error(
+ "Add new disk REST task failed to add {} MB disk".format(
+ disk_size_mb
+ )
+ )
except Exception as exp:
- self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
+ self.logger.error(
+ "Error occurred calling rest api for creating new disk {}".format(exp)
+ )
return status
- def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
- """
- Method to add existing disk to vm
- Args :
- catalogs - List of VDC catalogs
- image_id - Catalog ID
- template_name - Name of template in catalog
- vapp_uuid - UUID of vApp
- Returns:
- None
+ def add_existing_disk(
+ self,
+ catalogs=None,
+ image_id=None,
+ size=None,
+ template_name=None,
+ vapp_uuid=None,
+ ):
+ """
+ Method to add existing disk to vm
+ Args :
+ catalogs - List of VDC catalogs
+ image_id - Catalog ID
+ template_name - Name of template in catalog
+ vapp_uuid - UUID of vApp
+ Returns:
+ None
"""
disk_info = None
vcenter_conect, content = self.get_vcenter_content()
# find moref-id of vm in image
- catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
- image_id=image_id,
- )
+ catalog_vm_info = self.get_vapp_template_details(
+ catalogs=catalogs,
+ image_id=image_id,
+ )
if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
- catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
+ catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get(
+ "vm_moref_id", None
+ )
+
if catalog_vm_moref_id:
- self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
+ self.logger.info(
+ "Moref_id of VM in catalog : {}".format(catalog_vm_moref_id)
+ )
_, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
+
if catalog_vm_obj:
# find existing disk
disk_info = self.find_disk(catalog_vm_obj)
# get VM
vm_moref_id = self.get_vm_moref_id(vapp_uuid)
_, vm_obj = self.get_vm_obj(content, vm_moref_id)
+
if vm_obj:
- status = self.add_disk(vcenter_conect=vcenter_conect,
- vm=vm_obj,
- disk_info=disk_info,
- size=size,
- vapp_uuid=vapp_uuid
- )
+ status = self.add_disk(
+ vcenter_conect=vcenter_conect,
+ vm=vm_obj,
+ disk_info=disk_info,
+ size=size,
+ vapp_uuid=vapp_uuid,
+ )
+
if status:
- self.logger.info("Disk from image id {} added to {}".format(image_id,
- vm_obj.config.name)
- )
+ self.logger.info(
+ "Disk from image id {} added to {}".format(
+ image_id, vm_obj.config.name
+ )
+ )
else:
msg = "No disk found with image id {} to add in VM {}".format(
- image_id,
- vm_obj.config.name)
+ image_id, vm_obj.config.name
+ )
self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
def find_disk(self, vm_obj):
"""
- Method to find details of existing disk in VM
- Args :
+ Method to find details of existing disk in VM
+ Args:
vm_obj - vCenter object of VM
- image_id - Catalog ID
Returns:
disk_info : dict of disk details
"""
if vm_obj:
try:
devices = vm_obj.config.hardware.device
+
for device in devices:
if type(device) is vim.vm.device.VirtualDisk:
- if (isinstance(device.backing, vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and
- hasattr(device.backing, 'fileName')):
+ if (
+ isinstance(
+ device.backing,
+ vim.vm.device.VirtualDisk.FlatVer2BackingInfo,
+ )
+ and hasattr(device.backing, "fileName")
+ ):
disk_info["full_path"] = device.backing.fileName
disk_info["datastore"] = device.backing.datastore
disk_info["capacityKB"] = device.capacityInKB
break
except Exception as exp:
- self.logger.error("find_disk() : exception occurred while "
- "getting existing disk details :{}".format(exp))
+ self.logger.error(
+ "find_disk() : exception occurred while "
+ "getting existing disk details :{}".format(exp)
+ )
+
return disk_info
- def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
+ def add_disk(
+ self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}
+ ):
"""
- Method to add existing disk in VM
- Args :
- vcenter_conect - vCenter content object
- vm - vCenter vm object
- disk_info : dict of disk details
- Returns:
- status : status of add disk task
+ Method to add existing disk in VM
+ Args :
+ vcenter_conect - vCenter content object
+ vm - vCenter vm object
+ disk_info : dict of disk details
+ Returns:
+ status : status of add disk task
"""
datastore = disk_info["datastore"] if "datastore" in disk_info else None
fullpath = disk_info["full_path"] if "full_path" in disk_info else None
# Convert size from GB to KB
sizeKB = int(size) * 1024 * 1024
# compare size of existing disk and user given size.Assign whicherver is greater
- self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
- sizeKB, capacityKB))
+ self.logger.info(
+ "Add Existing disk : sizeKB {} , capacityKB {}".format(
+ sizeKB, capacityKB
+ )
+ )
+
if sizeKB > capacityKB:
capacityKB = sizeKB
# get all disks on a VM, set unit_number to the next available
unit_number = 0
for dev in vm.config.hardware.device:
- if hasattr(dev.backing, 'fileName'):
+ if hasattr(dev.backing, "fileName"):
unit_number = int(dev.unitNumber) + 1
# unit_number 7 reserved for scsi controller
+
if unit_number == 7:
unit_number += 1
+
if isinstance(dev, vim.vm.device.VirtualDisk):
# vim.vm.device.VirtualSCSIController
controller_key = dev.controllerKey
- self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
- unit_number, controller_key))
+ self.logger.info(
+ "Add Existing disk : unit number {} , controller key {}".format(
+ unit_number, controller_key
+ )
+ )
# add disk here
dev_changes = []
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
disk_spec.device = vim.vm.device.VirtualDisk()
- disk_spec.device.backing = \
+ disk_spec.device.backing = (
vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
+ )
disk_spec.device.backing.thinProvisioned = True
- disk_spec.device.backing.diskMode = 'persistent'
+ disk_spec.device.backing.diskMode = "persistent"
disk_spec.device.backing.datastore = datastore
disk_spec.device.backing.fileName = fullpath
spec.deviceChange = dev_changes
task = vm.ReconfigVM_Task(spec=spec)
status = self.wait_for_vcenter_task(task, vcenter_conect)
+
return status
except Exception as exp:
- exp_msg = "add_disk() : exception {} occurred while adding disk "\
- "{} to vm {}".format(exp,
- fullpath,
- vm.config.name)
+ exp_msg = (
+ "add_disk() : exception {} occurred while adding disk "
+ "{} to vm {}".format(exp, fullpath, vm.config.name)
+ )
self.rollback_newvm(vapp_uuid, exp_msg)
else:
- msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
+ msg = "add_disk() : Can not add disk to VM with disk info {} ".format(
+ disk_info
+ )
self.rollback_newvm(vapp_uuid, msg)
def get_vcenter_content(self):
"""
- Get the vsphere content object
+ Get the vsphere content object
"""
try:
vm_vcenter_info = self.get_vm_vcenter_info()
except Exception as exp:
- self.logger.error("Error occurred while getting vCenter infromationn"
- " for VM : {}".format(exp))
+ self.logger.error(
+ "Error occurred while getting vCenter infromationn"
+ " for VM : {}".format(exp)
+ )
+
raise vimconn.VimConnException(message=exp)
context = None
- if hasattr(ssl, '_create_unverified_context'):
+ if hasattr(ssl, "_create_unverified_context"):
context = ssl._create_unverified_context()
vcenter_conect = SmartConnect(
user=vm_vcenter_info["vm_vcenter_user"],
pwd=vm_vcenter_info["vm_vcenter_password"],
port=int(vm_vcenter_info["vm_vcenter_port"]),
- sslContext=context
- )
+ sslContext=context,
+ )
atexit.register(Disconnect, vcenter_conect)
content = vcenter_conect.RetrieveContent()
+
return vcenter_conect, content
def get_vm_moref_id(self, vapp_uuid):
"""
try:
if vapp_uuid:
- vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
+ vm_details = self.get_vapp_details_rest(
+ vapp_uuid, need_admin_access=True
+ )
+
if vm_details and "vm_vcenter_info" in vm_details:
vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
- return vm_moref_id
+ return vm_moref_id
except Exception as exp:
- self.logger.error("Error occurred while getting VM moref ID "
- " for VM : {}".format(exp))
+ self.logger.error(
+ "Error occurred while getting VM moref ID " " for VM : {}".format(exp)
+ )
+
return None
- def get_vapp_template_details(self, catalogs=None, image_id=None, template_name=None):
+ def get_vapp_template_details(
+ self, catalogs=None, image_id=None, template_name=None
+ ):
"""
- Method to get vApp template details
- Args :
- catalogs - list of VDC catalogs
- image_id - Catalog ID to find
- template_name : template name in catalog
- Returns:
- parsed_respond : dict of vApp tempalte details
+ Method to get vApp template details
+ Args :
+ catalogs - list of VDC catalogs
+ image_id - Catalog ID to find
+ template_name : template name in catalog
+ Returns:
+ parsed_respond : dict of vApp tempalte details
"""
parsed_response = {}
org, _ = self.get_vdc_details()
catalog = self.get_catalog_obj(image_id, catalogs)
if catalog:
- items = org.get_catalog_item(catalog.get('name'), catalog.get('name'))
+ items = org.get_catalog_item(catalog.get("name"), catalog.get("name"))
catalog_items = [items.attrib]
if len(catalog_items) == 1:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
-
- response = self.perform_request(req_type='GET',
- url=catalog_items[0].get('href'),
- headers=headers)
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": vca._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET",
+ url=catalog_items[0].get("href"),
+ headers=headers,
+ )
catalogItem = XmlElementTree.fromstring(response.text)
- entity = [child for child in catalogItem if child.get("type") ==
- "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+ entity = [
+ child
+ for child in catalogItem
+ if child.get("type")
+ == "application/vnd.vmware.vcloud.vAppTemplate+xml"
+ ][0]
vapp_tempalte_href = entity.get("href")
# get vapp details and parse moref id
namespaces = {
- "vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
- 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
- 'vmw': 'http://www.vmware.com/schema/ovf',
- 'vm': 'http://www.vmware.com/vcloud/v1.5',
- 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
- 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
- 'xmlns':"http://www.vmware.com/vcloud/v1.5"
+ "vssd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData",
+ "ovf": "http://schemas.dmtf.org/ovf/envelope/1",
+ "vmw": "http://www.vmware.com/schema/ovf",
+ "vm": "http://www.vmware.com/vcloud/v1.5",
+ "rasd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
+ "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
+ "xmlns": "http://www.vmware.com/vcloud/v1.5",
}
if vca._session:
- response = self.perform_request(req_type='GET',
- url=vapp_tempalte_href,
- headers=headers)
+ response = self.perform_request(
+ req_type="GET", url=vapp_tempalte_href, headers=headers
+ )
if response.status_code != requests.codes.ok:
- self.logger.debug("REST API call {} failed. Return status code {}".format(
- vapp_tempalte_href, response.status_code))
-
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ vapp_tempalte_href, response.status_code
+ )
+ )
else:
xmlroot_respond = XmlElementTree.fromstring(response.text)
- children_section = xmlroot_respond.find('vm:Children/', namespaces)
+ children_section = xmlroot_respond.find(
+ "vm:Children/", namespaces
+ )
+
if children_section is not None:
- vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
+ vCloud_extension_section = children_section.find(
+ "xmlns:VCloudExtension", namespaces
+ )
+
if vCloud_extension_section is not None:
vm_vcenter_info = {}
- vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
- vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
+ vim_info = vCloud_extension_section.find(
+ "vmext:VmVimInfo", namespaces
+ )
+ vmext = vim_info.find(
+ "vmext:VmVimObjectRef", namespaces
+ )
+
if vmext is not None:
- vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
- parsed_response["vm_vcenter_info"] = vm_vcenter_info
+ vm_vcenter_info["vm_moref_id"] = vmext.find(
+ "vmext:MoRef", namespaces
+ ).text
+ parsed_response["vm_vcenter_info"] = vm_vcenter_info
except Exception as exp:
- self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
+ self.logger.info(
+ "Error occurred calling rest api for getting vApp details {}".format(
+ exp
+ )
+ )
return parsed_response
def rollback_newvm(self, vapp_uuid, msg, exp_type="Genric"):
"""
- Method to delete vApp
- Args :
- vapp_uuid - vApp UUID
- msg - Error message to be logged
- exp_type : Exception type
- Returns:
- None
+ Method to delete vApp
+ Args :
+ vapp_uuid - vApp UUID
+ msg - Error message to be logged
+ exp_type : Exception type
+ Returns:
+ None
"""
if vapp_uuid:
self.delete_vminstance(vapp_uuid)
else:
msg = "No vApp ID"
+
self.logger.error(msg)
+
if exp_type == "Genric":
raise vimconn.VimConnException(msg)
elif exp_type == "NotFound":
def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
"""
- Method to attach SRIOV adapters to VM
+ Method to attach SRIOV adapters to VM
- Args:
- vapp_uuid - uuid of vApp/VM
- sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
- vmname_andid - vmname
+ Args:
+ vapp_uuid - uuid of vApp/VM
+ sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
+ vmname_andid - vmname
- Returns:
- The status of add SRIOV adapter task , vm object and
- vcenter_conect object
+ Returns:
+ The status of add SRIOV adapter task , vm object and
+ vcenter_conect object
"""
vm_obj = None
vcenter_conect, content = self.get_vcenter_content()
if no_of_sriov_devices > 0:
# Get VM and its host
host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
- self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
+ self.logger.info(
+ "VM {} is currently on host {}".format(vm_obj, host_obj)
+ )
+
if host_obj and vm_obj:
# get SRIOV devies from host on which vapp is currently installed
- avilable_sriov_devices = self.get_sriov_devices(host_obj,
- no_of_sriov_devices,
- )
+ avilable_sriov_devices = self.get_sriov_devices(
+ host_obj,
+ no_of_sriov_devices,
+ )
if len(avilable_sriov_devices) == 0:
# find other hosts with active pci devices
- new_host_obj, avilable_sriov_devices = self.get_host_and_sriov_devices(
+ (
+ new_host_obj,
+ avilable_sriov_devices,
+ ) = self.get_host_and_sriov_devices(
content,
no_of_sriov_devices,
- )
+ )
- if new_host_obj is not None and len(avilable_sriov_devices) > 0:
+ if (
+ new_host_obj is not None
+ and len(avilable_sriov_devices) > 0
+ ):
# Migrate vm to the host where SRIOV devices are available
- self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
- new_host_obj))
+ self.logger.info(
+ "Relocate VM {} on new host {}".format(
+ vm_obj, new_host_obj
+ )
+ )
task = self.relocate_vm(new_host_obj, vm_obj)
+
if task is not None:
- result = self.wait_for_vcenter_task(task, vcenter_conect)
- self.logger.info("Migrate VM status: {}".format(result))
+ result = self.wait_for_vcenter_task(
+ task, vcenter_conect
+ )
+ self.logger.info(
+ "Migrate VM status: {}".format(result)
+ )
host_obj = new_host_obj
else:
- self.logger.info("Fail to migrate VM : {}".format(result))
+ self.logger.info(
+ "Fail to migrate VM : {}".format(result)
+ )
+
raise vimconn.VimConnNotFoundException(
"Fail to migrate VM : {} to host {}".format(
- vmname_andid,
- new_host_obj)
+ vmname_andid, new_host_obj
)
+ )
- if (host_obj is not None and
- avilable_sriov_devices is not None and
- len(avilable_sriov_devices) > 0):
+ if (
+ host_obj is not None
+ and avilable_sriov_devices is not None
+ and len(avilable_sriov_devices) > 0
+ ):
# Add SRIOV devices one by one
for sriov_net in sriov_nets:
- network_name = sriov_net.get('net_id')
+ network_name = sriov_net.get("net_id")
self.create_dvPort_group(network_name)
- if sriov_net.get('type') == "VF" or sriov_net.get('type') == "SR-IOV":
+
+ if (
+ sriov_net.get("type") == "VF"
+ or sriov_net.get("type") == "SR-IOV"
+ ):
# add vlan ID ,Modify portgroup for vlan ID
- self.configure_vlanID(content, vcenter_conect, network_name)
-
- task = self.add_sriov_to_vm(content,
- vm_obj,
- host_obj,
- network_name,
- avilable_sriov_devices[0]
- )
+ self.configure_vlanID(
+ content, vcenter_conect, network_name
+ )
+
+ task = self.add_sriov_to_vm(
+ content,
+ vm_obj,
+ host_obj,
+ network_name,
+ avilable_sriov_devices[0],
+ )
+
if task:
- status = self.wait_for_vcenter_task(task, vcenter_conect)
+ status = self.wait_for_vcenter_task(
+ task, vcenter_conect
+ )
+
if status:
- self.logger.info("Added SRIOV {} to VM {}".format(
- no_of_sriov_devices,
- str(vm_obj)))
+ self.logger.info(
+ "Added SRIOV {} to VM {}".format(
+ no_of_sriov_devices, str(vm_obj)
+ )
+ )
else:
- self.logger.error("Fail to add SRIOV {} to VM {}".format(
- no_of_sriov_devices,
- str(vm_obj)))
+ self.logger.error(
+ "Fail to add SRIOV {} to VM {}".format(
+ no_of_sriov_devices, str(vm_obj)
+ )
+ )
+
raise vimconn.VimConnUnexpectedResponse(
- "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
+ "Fail to add SRIOV adapter in VM {}".format(
+ str(vm_obj)
)
+ )
+
return True, vm_obj, vcenter_conect
else:
- self.logger.error("Currently there is no host with"
- " {} number of avaialble SRIOV "
- "VFs required for VM {}".format(
- no_of_sriov_devices,
- vmname_andid)
- )
+ self.logger.error(
+ "Currently there is no host with"
+ " {} number of avaialble SRIOV "
+ "VFs required for VM {}".format(
+ no_of_sriov_devices, vmname_andid
+ )
+ )
+
raise vimconn.VimConnNotFoundException(
"Currently there is no host with {} "
"number of avaialble SRIOV devices required for VM {}".format(
- no_of_sriov_devices,
- vmname_andid))
+ no_of_sriov_devices, vmname_andid
+ )
+ )
else:
- self.logger.debug("No infromation about SRIOV devices {} ", sriov_nets)
-
+ self.logger.debug(
+ "No infromation about SRIOV devices {} ", sriov_nets
+ )
except vmodl.MethodFault as error:
self.logger.error("Error occurred while adding SRIOV {} ", error)
+
return None, vm_obj, vcenter_conect
def get_sriov_devices(self, host, no_of_vfs):
"""
- Method to get the details of SRIOV devices on given host
- Args:
- host - vSphere host object
- no_of_vfs - number of VFs needed on host
+ Method to get the details of SRIOV devices on given host
+ Args:
+ host - vSphere host object
+ no_of_vfs - number of VFs needed on host
- Returns:
- array of SRIOV devices
+ Returns:
+ array of SRIOV devices
"""
sriovInfo = []
+
if host:
for device in host.config.pciPassthruInfo:
if isinstance(device, vim.host.SriovInfo) and device.sriovActive:
if device.numVirtualFunction >= no_of_vfs:
sriovInfo.append(device)
break
+
return sriovInfo
def get_host_and_sriov_devices(self, content, no_of_vfs):
"""
- Method to get the details of SRIOV devices infromation on all hosts
+ Method to get the details of SRIOV devices infromation on all hosts
- Args:
- content - vSphere host object
- no_of_vfs - number of pci VFs needed on host
+ Args:
+ content - vSphere host object
+ no_of_vfs - number of pci VFs needed on host
- Returns:
- array of SRIOV devices and host object
+ Returns:
+ array of SRIOV devices and host object
"""
host_obj = None
sriov_device_objs = None
+
try:
if content:
- container = content.viewManager.CreateContainerView(content.rootFolder,
- [vim.HostSystem], True)
+ container = content.viewManager.CreateContainerView(
+ content.rootFolder, [vim.HostSystem], True
+ )
+
for host in container.view:
devices = self.get_sriov_devices(host, no_of_vfs)
+
if devices:
host_obj = host
sriov_device_objs = devices
break
except Exception as exp:
- self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
+ self.logger.error(
+ "Error {} occurred while finding SRIOV devices on host: {}".format(
+ exp, host_obj
+ )
+ )
return host_obj, sriov_device_objs
def add_sriov_to_vm(self, content, vm_obj, host_obj, network_name, sriov_device):
"""
- Method to add SRIOV adapter to vm
+ Method to add SRIOV adapter to vm
- Args:
- host_obj - vSphere host object
- vm_obj - vSphere vm object
- content - vCenter content object
- network_name - name of distributed virtaul portgroup
- sriov_device - SRIOV device info
+ Args:
+ host_obj - vSphere host object
+ vm_obj - vSphere vm object
+ content - vCenter content object
+ network_name - name of distributed virtaul portgroup
+ sriov_device - SRIOV device info
- Returns:
- task object
+ Returns:
+ task object
"""
devices = []
vnic_label = "sriov nic"
+
try:
dvs_portgr = self.get_dvport_group(network_name)
network_name = dvs_portgr.name
# VM device
nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nic.device = vim.vm.device.VirtualSriovEthernetCard()
- nic.device.addressType = 'assigned'
+ nic.device.addressType = "assigned"
# nic.device.key = 13016
nic.device.deviceInfo = vim.Description()
nic.device.deviceInfo.label = vnic_label
nic.device.deviceInfo.summary = network_name
nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
- nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
+ nic.device.backing.network = self.get_obj(
+ content, [vim.Network], network_name
+ )
nic.device.backing.deviceName = network_name
nic.device.backing.useAutoDetect = False
nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nic.device.connectable.startConnected = True
nic.device.connectable.allowGuestControl = True
- nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
- nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
+ nic.device.sriovBacking = (
+ vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
+ )
+ nic.device.sriovBacking.physicalFunctionBacking = (
+ vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
+ )
nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
devices.append(nic)
vmconf = vim.vm.ConfigSpec(deviceChange=devices)
task = vm_obj.ReconfigVM_Task(vmconf)
+
return task
except Exception as exp:
- self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
+ self.logger.error(
+ "Error {} occurred while adding SRIOV adapter in VM: {}".format(
+ exp, vm_obj
+ )
+ )
+
return None
def create_dvPort_group(self, network_name):
"""
- Method to create disributed virtual portgroup
+ Method to create disributed virtual portgroup
- Args:
- network_name - name of network/portgroup
+ Args:
+ network_name - name of network/portgroup
- Returns:
- portgroup key
+ Returns:
+ portgroup key
"""
try:
- new_network_name = [network_name, '-', str(uuid.uuid4())]
- network_name = ''.join(new_network_name)
+ new_network_name = [network_name, "-", str(uuid.uuid4())]
+ network_name = "".join(new_network_name)
vcenter_conect, content = self.get_vcenter_content()
- dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
+ dv_switch = self.get_obj(
+ content, [vim.DistributedVirtualSwitch], self.dvs_name
+ )
+
if dv_switch:
dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
dv_pg_spec.name = network_name
- dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
- dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
- dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
- dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
- dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
- dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
+ dv_pg_spec.type = (
+ vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
+ )
+ dv_pg_spec.defaultPortConfig = (
+ vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+ )
+ dv_pg_spec.defaultPortConfig.securityPolicy = (
+ vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
+ )
+ dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = (
+ vim.BoolPolicy(value=False)
+ )
+ dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = (
+ vim.BoolPolicy(value=False)
+ )
+ dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(
+ value=False
+ )
task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
self.wait_for_vcenter_task(task, vcenter_conect)
- dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
+ dvPort_group = self.get_obj(
+ content, [vim.dvs.DistributedVirtualPortgroup], network_name
+ )
+
if dvPort_group:
- self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
+ self.logger.info(
+ "Created disributed virtaul port group: {}".format(dvPort_group)
+ )
return dvPort_group.key
else:
- self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
+ self.logger.debug(
+ "No disributed virtual switch found with name {}".format(
+ network_name
+ )
+ )
except Exception as exp:
- self.logger.error("Error occurred while creating disributed virtaul port group {}"
- " : {}".format(network_name, exp))
+ self.logger.error(
+ "Error occurred while creating disributed virtaul port group {}"
+ " : {}".format(network_name, exp)
+ )
+
return None
def reconfig_portgroup(self, content, dvPort_group_name, config_info={}):
"""
- Method to reconfigure disributed virtual portgroup
+ Method to reconfigure disributed virtual portgroup
- Args:
- dvPort_group_name - name of disributed virtual portgroup
- content - vCenter content object
- config_info - disributed virtual portgroup configuration
+ Args:
+ dvPort_group_name - name of disributed virtual portgroup
+ content - vCenter content object
+ config_info - disributed virtual portgroup configuration
- Returns:
- task object
+ Returns:
+ task object
"""
try:
dvPort_group = self.get_dvport_group(dvPort_group_name)
+
if dvPort_group:
dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
dv_pg_spec.configVersion = dvPort_group.config.configVersion
- dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+ dv_pg_spec.defaultPortConfig = (
+ vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+ )
+
if "vlanID" in config_info:
- dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
- dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
+ dv_pg_spec.defaultPortConfig.vlan = (
+ vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
+ )
+ dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get("vlanID")
task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
+
return task
else:
return None
except Exception as exp:
- self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"
- " : {}".format(dvPort_group_name, exp))
+ self.logger.error(
+ "Error occurred while reconfiguraing disributed virtaul port group {}"
+ " : {}".format(dvPort_group_name, exp)
+ )
+
return None
def destroy_dvport_group(self, dvPort_group_name):
"""
- Method to destroy disributed virtual portgroup
+ Method to destroy disributed virtual portgroup
- Args:
- network_name - name of network/portgroup
+ Args:
+ network_name - name of network/portgroup
- Returns:
- True if portgroup successfully got deleted else false
+ Returns:
+ True if portgroup successfully got deleted else false
"""
vcenter_conect, _ = self.get_vcenter_content()
+
try:
status = None
dvPort_group = self.get_dvport_group(dvPort_group_name)
+
if dvPort_group:
task = dvPort_group.Destroy_Task()
status = self.wait_for_vcenter_task(task, vcenter_conect)
+
return status
except vmodl.MethodFault as exp:
- self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
- exp, dvPort_group_name))
+ self.logger.error(
+ "Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
+ exp, dvPort_group_name
+ )
+ )
+
return None
def get_dvport_group(self, dvPort_group_name):
"""
_, content = self.get_vcenter_content()
dvPort_group = None
+
try:
- container = content.viewManager.CreateContainerView(content.rootFolder,
- [vim.dvs.DistributedVirtualPortgroup], True)
+ container = content.viewManager.CreateContainerView(
+ content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True
+ )
+
for item in container.view:
if item.key == dvPort_group_name:
dvPort_group = item
break
+
return dvPort_group
except vmodl.MethodFault as exp:
- self.logger.error("Caught vmodl fault {} for disributed virtual port group {}".format(
- exp, dvPort_group_name))
+ self.logger.error(
+ "Caught vmodl fault {} for disributed virtual port group {}".format(
+ exp, dvPort_group_name
+ )
+ )
+
return None
def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
"""
- Method to get disributed virtual portgroup vlanID
+ Method to get disributed virtual portgroup vlanID
- Args:
- network_name - name of network/portgroup
+ Args:
+ network_name - name of network/portgroup
- Returns:
- vlan ID
+ Returns:
+ vlan ID
"""
vlanId = None
+
try:
dvPort_group = self.get_dvport_group(dvPort_group_name)
+
if dvPort_group:
vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
except vmodl.MethodFault as exp:
- self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
- exp, dvPort_group_name))
+ self.logger.error(
+ "Caught vmodl fault {} for disributed virtaul port group {}".format(
+ exp, dvPort_group_name
+ )
+ )
+
return vlanId
def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
"""
- Method to configure vlanID in disributed virtual portgroup vlanID
+ Method to configure vlanID in disributed virtual portgroup vlanID
- Args:
- network_name - name of network/portgroup
+ Args:
+ network_name - name of network/portgroup
- Returns:
- None
+ Returns:
+ None
"""
vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
+
if vlanID == 0:
# configure vlanID
vlanID = self.genrate_vlanID(dvPort_group_name)
config = {"vlanID": vlanID}
- task = self.reconfig_portgroup(content, dvPort_group_name,
- config_info=config)
+ task = self.reconfig_portgroup(
+ content, dvPort_group_name, config_info=config
+ )
+
if task:
status = self.wait_for_vcenter_task(task, vcenter_conect)
+
if status:
- self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
- dvPort_group_name, vlanID))
+ self.logger.info(
+ "Reconfigured Port group {} for vlan ID {}".format(
+ dvPort_group_name, vlanID
+ )
+ )
else:
- self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
- dvPort_group_name, vlanID))
+ self.logger.error(
+ "Fail reconfigure portgroup {} for vlanID{}".format(
+ dvPort_group_name, vlanID
+ )
+ )
def genrate_vlanID(self, network_name):
"""
- Method to get unused vlanID
- Args:
- network_name - name of network/portgroup
- Returns:
- vlanID
+ Method to get unused vlanID
+ Args:
+ network_name - name of network/portgroup
+ Returns:
+ vlanID
"""
vlan_id = None
used_ids = []
- if self.config.get('vlanID_range') is None:
- raise vimconn.VimConnConflictException("You must provide a 'vlanID_range' "
- "at config value before creating sriov network with vlan tag")
+
+ if self.config.get("vlanID_range") is None:
+ raise vimconn.VimConnConflictException(
+ "You must provide a 'vlanID_range' "
+ "at config value before creating sriov network with vlan tag"
+ )
+
if "used_vlanIDs" not in self.persistent_info:
- self.persistent_info["used_vlanIDs"] = {}
+ self.persistent_info["used_vlanIDs"] = {}
else:
used_ids = list(self.persistent_info["used_vlanIDs"].values())
- for vlanID_range in self.config.get('vlanID_range'):
+ for vlanID_range in self.config.get("vlanID_range"):
start_vlanid, end_vlanid = vlanID_range.split("-")
+
if start_vlanid > end_vlanid:
- raise vimconn.VimConnConflictException("Invalid vlan ID range {}".format(
- vlanID_range))
+ raise vimconn.VimConnConflictException(
+ "Invalid vlan ID range {}".format(vlanID_range)
+ )
for vid in range(int(start_vlanid), int(end_vlanid) + 1):
if vid not in used_ids:
vlan_id = vid
self.persistent_info["used_vlanIDs"][network_name] = vlan_id
return vlan_id
+
if vlan_id is None:
raise vimconn.VimConnConflictException("All Vlan IDs are in use")
def get_obj(self, content, vimtype, name):
"""
- Get the vsphere object associated with a given text name
+ Get the vsphere object associated with a given text name
"""
obj = None
- container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
+ container = content.viewManager.CreateContainerView(
+ content.rootFolder, vimtype, True
+ )
+
for item in container.view:
if item.name == name:
obj = item
break
+
return obj
def insert_media_to_vm(self, vapp, image_id):
try:
# fetching catalog details
rest_url = "{}/api/catalog/{}".format(self.url, image_id)
+
if vca._session:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='GET',
- url=rest_url,
- headers=headers)
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": vca._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url=rest_url, headers=headers
+ )
if response.status_code != 200:
- self.logger.error("REST call {} failed reason : {}"
- "status code : {}".format(url_rest_call,
- response.text,
- response.status_code))
- raise vimconn.VimConnException("insert_media_to_vm(): Failed to get "
- "catalog details")
+ self.logger.error(
+ "REST call {} failed reason : {}"
+ "status code : {}".format(
+ rest_url, response.text, response.status_code
+ )
+ )
+
+ raise vimconn.VimConnException(
+ "insert_media_to_vm(): Failed to get " "catalog details"
+ )
+
# searching iso name and id
iso_name, media_id = self.get_media_details(vca, response.text)
name="{}"
id="urn:vcloud:media:{}"
href="https://{}/api/media/{}"/>
- </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
- self.url, media_id)
+ </ns6:MediaInsertOrEjectParams>""".format(
+ iso_name, media_id, self.url, media_id
+ )
for vms in vapp.get_all_vms():
- vm_id = vms.get('id').split(':')[-1]
+ vm_id = vms.get("id").split(":")[-1]
- headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
- rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(self.url, vm_id)
+ headers[
+ "Content-Type"
+ ] = "application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"
+ rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(
+ self.url, vm_id
+ )
- response = self.perform_request(req_type='POST',
- url=rest_url,
- data=data,
- headers=headers)
+ response = self.perform_request(
+ req_type="POST", url=rest_url, data=data, headers=headers
+ )
if response.status_code != 202:
- error_msg = ("insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. "
- "Status code {}".format(response.text, response.status_code))
+ error_msg = (
+ "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. "
+ "Status code {}".format(response.text, response.status_code)
+ )
self.logger.error(error_msg)
+
raise vimconn.VimConnException(error_msg)
else:
task = self.get_task_from_response(response.text)
- result = self.client.get_task_monitor().wait_for_success(task=task)
- if result.get('status') == 'success':
- self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"
- " image to vm {}".format(vm_id))
-
+ result = self.client.get_task_monitor().wait_for_success(
+ task=task
+ )
+
+ if result.get("status") == "success":
+ self.logger.info(
+ "insert_media_to_vm(): Sucessfully inserted media ISO"
+ " image to vm {}".format(vm_id)
+ )
except Exception as exp:
- self.logger.error("insert_media_to_vm() : exception occurred "
- "while inserting media CD-ROM")
+ self.logger.error(
+ "insert_media_to_vm() : exception occurred "
+ "while inserting media CD-ROM"
+ )
+
raise vimconn.VimConnException(message=exp)
def get_media_details(self, vca, content):
try:
if content:
vm_list_xmlroot = XmlElementTree.fromstring(content)
+
for child in vm_list_xmlroot.iter():
- if 'CatalogItem' in child.tag:
- cataloghref_list.append(child.attrib.get('href'))
+ if "CatalogItem" in child.tag:
+ cataloghref_list.append(child.attrib.get("href"))
+
if cataloghref_list is not None:
for href in cataloghref_list:
if href:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
- response = self.perform_request(req_type='GET',
- url=href,
- headers=headers)
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": vca._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url=href, headers=headers
+ )
+
if response.status_code != 200:
- self.logger.error("REST call {} failed reason : {}"
- "status code : {}".format(href,
- response.text,
- response.status_code))
- raise vimconn.VimConnException("get_media_details : Failed to get "
- "catalogitem details")
+ self.logger.error(
+ "REST call {} failed reason : {}"
+ "status code : {}".format(
+ href, response.text, response.status_code
+ )
+ )
+
+ raise vimconn.VimConnException(
+ "get_media_details : Failed to get "
+ "catalogitem details"
+ )
+
list_xmlroot = XmlElementTree.fromstring(response.text)
+
for child in list_xmlroot.iter():
- if 'Entity' in child.tag:
- if 'media' in child.attrib.get('href'):
- name = child.attrib.get('name')
- media_id = child.attrib.get('href').split('/').pop()
+ if "Entity" in child.tag:
+ if "media" in child.attrib.get("href"):
+ name = child.attrib.get("name")
+ media_id = (
+ child.attrib.get("href").split("/").pop()
+ )
+
return name, media_id
else:
self.logger.debug("Media name and id not found")
+
return False, False
except Exception as exp:
- self.logger.error("get_media_details : exception occurred "
- "getting media details")
+ self.logger.error(
+ "get_media_details : exception occurred " "getting media details"
+ )
+
raise vimconn.VimConnException(message=exp)
def retry_rest(self, method, url, add_headers=None, data=None):
- """ Method to get Token & retry respective REST request
- Args:
- api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
- url - request url to be used
- add_headers - Additional headers (optional)
- data - Request payload data to be passed in request
- Returns:
- response - Response of request
+ """Method to get Token & retry respective REST request
+ Args:
+ api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
+ url - request url to be used
+ add_headers - Additional headers (optional)
+ data - Request payload data to be passed in request
+ Returns:
+ response - Response of request
"""
response = None
self.get_token()
if self.client._session:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
if add_headers:
headers.update(add_headers)
- if method == 'GET':
- response = self.perform_request(req_type='GET',
- url=url,
- headers=headers)
- elif method == 'PUT':
- response = self.perform_request(req_type='PUT',
- url=url,
- headers=headers,
- data=data)
- elif method == 'POST':
- response = self.perform_request(req_type='POST',
- url=url,
- headers=headers,
- data=data)
- elif method == 'DELETE':
- response = self.perform_request(req_type='DELETE',
- url=url,
- headers=headers)
+ if method == "GET":
+ response = self.perform_request(req_type="GET", url=url, headers=headers)
+ elif method == "PUT":
+ response = self.perform_request(
+ req_type="PUT", url=url, headers=headers, data=data
+ )
+ elif method == "POST":
+ response = self.perform_request(
+ req_type="POST", url=url, headers=headers, data=data
+ )
+ elif method == "DELETE":
+ response = self.perform_request(req_type="DELETE", url=url, headers=headers)
+
return response
def get_token(self):
- """ Generate a new token if expired
+ """Generate a new token if expired
- Returns:
- The return client object that letter can be used to connect to vCloud director as admin for VDC
+ Returns:
+ The return client object that letter can be used to connect to vCloud director as admin for VDC
"""
self.client = self.connect()
def get_vdc_details(self):
- """ Get VDC details using pyVcloud Lib
+ """Get VDC details using pyVcloud Lib
- Returns org and vdc object
+ Returns org and vdc object
"""
vdc = None
+
try:
org = Org(self.client, resource=self.client.get_org())
vdc = org.get_vdc(self.tenant_name)
def perform_request(self, req_type, url, headers=None, data=None):
"""Perform the POST/PUT/GET/DELETE request."""
-
# Log REST request details
self.log_request(req_type, url=url, headers=headers, data=data)
# perform request and return its result
- if req_type == 'GET':
- response = requests.get(url=url,
- headers=headers,
- verify=False)
- elif req_type == 'PUT':
- response = requests.put(url=url,
- headers=headers,
- data=data,
- verify=False)
- elif req_type == 'POST':
- response = requests.post(url=url,
- headers=headers,
- data=data,
- verify=False)
- elif req_type == 'DELETE':
- response = requests.delete(url=url,
- headers=headers,
- verify=False)
+
+ if req_type == "GET":
+ response = requests.get(url=url, headers=headers, verify=False)
+ elif req_type == "PUT":
+ response = requests.put(url=url, headers=headers, data=data, verify=False)
+ elif req_type == "POST":
+ response = requests.post(url=url, headers=headers, data=data, verify=False)
+ elif req_type == "DELETE":
+ response = requests.delete(url=url, headers=headers, verify=False)
+
# Log the REST response
self.log_response(response)
if headers is not None:
for header in headers:
- self.logger.debug("Request header: {}: {}".format(header, headers[header]))
+ self.logger.debug(
+ "Request header: {}: {}".format(header, headers[header])
+ )
if data is not None:
self.logger.debug("Request data: {}".format(data))
return task object
"""
xmlroot = XmlElementTree.fromstring(content)
- if xmlroot.tag.split('}')[1] == "Task":
+
+ if xmlroot.tag.split("}")[1] == "Task":
return xmlroot
else:
for ele in xmlroot:
if ele.tag.split("}")[1] == "Tasks":
task = ele[0]
break
+
return task
def power_on_vapp(self, vapp_id, vapp_name):
vapp_name - vAapp name
return - Task object
"""
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
- poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(self.url,
- vapp_id)
- response = self.perform_request(req_type='POST',
- url=poweron_href,
- headers=headers)
+ poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(
+ self.url, vapp_id
+ )
+ response = self.perform_request(
+ req_type="POST", url=poweron_href, headers=headers
+ )
if response.status_code != 202:
- self.logger.error("REST call {} failed reason : {}"
- "status code : {} ".format(poweron_href,
- response.text,
- response.status_code))
- raise vimconn.VimConnException("power_on_vapp() : Failed to power on "
- "vApp {}".format(vapp_name))
+ self.logger.error(
+ "REST call {} failed reason : {}"
+ "status code : {} ".format(
+ poweron_href, response.text, response.status_code
+ )
+ )
+
+ raise vimconn.VimConnException(
+ "power_on_vapp() : Failed to power on " "vApp {}".format(vapp_name)
+ )
else:
poweron_task = self.get_task_from_response(response.text)
+
return poweron_task
setup(
name=_name,
- description='OSM ro vim plugin for vmware',
+ description="OSM ro vim plugin for vmware",
long_description=README,
- version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ version_command=(
+ "git describe --match v* --tags --long --dirty",
+ "pep440-git-full",
+ ),
# version=VERSION,
# python_requires='>3.5.0',
- author='ETSI OSM',
- author_email='OSM_TECH@LIST.ETSI.ORG',
- maintainer='ETSI OSM',
- maintainer_email='OSM_TECH@LIST.ETSI.ORG',
- url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
- license='Apache 2.0',
-
+ author="ETSI OSM",
+ author_email="OSM_TECH@LIST.ETSI.ORG",
+ maintainer="ETSI OSM",
+ maintainer_email="OSM_TECH@LIST.ETSI.ORG",
+ url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary",
+ license="Apache 2.0",
packages=[_name],
include_package_data=True,
install_requires=[
- "pyvcloud==19.1.1", "progressbar", "prettytable", "pyvmomi",
- "requests", "netaddr", "PyYAML",
- "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin"
+ "pyvcloud==19.1.1",
+ "progressbar",
+ "prettytable",
+ "pyvmomi",
+ "requests",
+ "netaddr",
+ "PyYAML",
+ "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin",
],
- setup_requires=['setuptools-version-command'],
+ setup_requires=["setuptools-version-command"],
entry_points={
- 'osm_rovim.plugins': ['rovim_vmware = osm_rovim_vmware.vimconn_vmware:vimconnector'],
+ "osm_rovim.plugins": [
+ "rovim_vmware = osm_rovim_vmware.vimconn_vmware:vimconnector"
+ ],
},
)
basepython = python3
deps = flake8
commands = flake8 osm_rovim_vmware --max-line-length 120 \
- --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241
[testenv:unittest]
basepython = python3
class OpenflowConnException(Exception):
"""Common and base class Exception for all vimconnector exceptions"""
+
def __init__(self, message, http_code=HTTPStatus.BAD_REQUEST.value):
Exception.__init__(self, message)
self.http_code = http_code
class OpenflowConnConnectionException(OpenflowConnException):
"""Connectivity error with the VIM"""
+
def __init__(self, message, http_code=HTTPStatus.SERVICE_UNAVAILABLE.value):
OpenflowConnException.__init__(self, message, http_code)
class OpenflowConnUnexpectedResponse(OpenflowConnException):
"""Get an wrong response from VIM"""
+
def __init__(self, message, http_code=HTTPStatus.INTERNAL_SERVER_ERROR.value):
OpenflowConnException.__init__(self, message, http_code)
class OpenflowConnAuthException(OpenflowConnException):
"""Invalid credentials or authorization to perform this action over the VIM"""
+
def __init__(self, message, http_code=HTTPStatus.UNAUTHORIZED.value):
OpenflowConnException.__init__(self, message, http_code)
class OpenflowConnNotFoundException(OpenflowConnException):
"""The item is not found at VIM"""
+
def __init__(self, message, http_code=HTTPStatus.NOT_FOUND.value):
OpenflowConnException.__init__(self, message, http_code)
class OpenflowConnConflictException(OpenflowConnException):
"""There is a conflict, e.g. more item found than one"""
+
def __init__(self, message, http_code=HTTPStatus.CONFLICT.value):
OpenflowConnException.__init__(self, message, http_code)
class OpenflowConnNotSupportedException(OpenflowConnException):
"""The request is not supported by connector"""
+
def __init__(self, message, http_code=HTTPStatus.SERVICE_UNAVAILABLE.value):
OpenflowConnException.__init__(self, message, http_code)
class OpenflowConnNotImplemented(OpenflowConnException):
"""The method is not implemented by the connected"""
+
def __init__(self, message, http_code=HTTPStatus.NOT_IMPLEMENTED.value):
OpenflowConnException.__init__(self, message, http_code)
"""
Openflow controller connector abstract implementeation.
"""
+
def __init__(self, params):
self.name = "openflow_conector"
self.pp2ofi = {} # From Physical Port to OpenFlow Index
self.ofi2pp = {} # From OpenFlow Index to Physical Port
- self.logger = logging.getLogger('ro.sdn.openflow_conn')
+ self.logger = logging.getLogger("ro.sdn.openflow_conn")
def get_of_switches(self):
- """"
+ """
Obtain a a list of switches or DPID detected by this controller
:return: list length, and a list where each element a tuple pair (DPID, IP address), text_error: if fails
"""
raise OpenflowConnNotImplemented("Should have implemented this")
def clear_all_flows(self):
- """"
+ """
Delete all existing rules
:return: None if ok, text_error if fails
"""
"""
This class is the base engine of SDN plugins base on openflow rules
"""
- flow_fields = ('priority', 'vlan', 'ingress_port', 'actions', 'dst_mac', 'src_mac', 'net_id')
+
+ flow_fields = (
+ "priority",
+ "vlan",
+ "ingress_port",
+ "actions",
+ "dst_mac",
+ "src_mac",
+ "net_id",
+ )
def __init__(self, wim, wim_account, config=None, logger=None, of_connector=None):
- self.logger = logger or logging.getLogger('ro.sdn.openflow_conn')
+ self.logger = logger or logging.getLogger("ro.sdn.openflow_conn")
self.of_connector = of_connector
config = config or {}
- self.of_controller_nets_with_same_vlan = config.get("of_controller_nets_with_same_vlan", False)
+ self.of_controller_nets_with_same_vlan = config.get(
+ "of_controller_nets_with_same_vlan", False
+ )
def check_credentials(self):
try:
def create_connectivity_service(self, service_type, connection_points, **kwargs):
net_id = str(uuid4())
ports = []
+
for cp in connection_points:
port = {
"uuid": cp["service_endpoint_id"],
"vlan": cp.get("service_endpoint_encapsulation_info", {}).get("vlan"),
"mac": cp.get("service_endpoint_encapsulation_info", {}).get("mac"),
- "switch_port": cp.get("service_endpoint_encapsulation_info", {}).get("switch_port"),
+ "switch_port": cp.get("service_endpoint_encapsulation_info", {}).get(
+ "switch_port"
+ ),
}
ports.append(port)
+
try:
- created_items = self._set_openflow_rules(service_type, net_id, ports, created_items=None)
+ created_items = self._set_openflow_rules(
+ service_type, net_id, ports, created_items=None
+ )
+
return net_id, created_items
except (SdnConnectorError, OpenflowConnException) as e:
raise SdnConnectorError(e, http_code=e.http_code)
try:
service_type = "ELAN"
ports = []
- self._set_openflow_rules(service_type, service_uuid, ports, created_items=conn_info)
+ self._set_openflow_rules(
+ service_type, service_uuid, ports, created_items=conn_info
+ )
+
return None
except (SdnConnectorError, OpenflowConnException) as e:
raise SdnConnectorError(e, http_code=e.http_code)
- def edit_connectivity_service(self, service_uuid, conn_info=None, connection_points=None, **kwargs):
+ def edit_connectivity_service(
+ self, service_uuid, conn_info=None, connection_points=None, **kwargs
+ ):
ports = []
for cp in connection_points:
port = {
"uuid": cp["service_endpoint_id"],
"vlan": cp.get("service_endpoint_encapsulation_info", {}).get("vlan"),
"mac": cp.get("service_endpoint_encapsulation_info", {}).get("mac"),
- "switch_port": cp.get("service_endpoint_encapsulation_info", {}).get("switch_port"),
+ "switch_port": cp.get("service_endpoint_encapsulation_info", {}).get(
+ "switch_port"
+ ),
}
ports.append(port)
+
service_type = "ELAN" # TODO. Store at conn_info for later use
+
try:
- created_items = self._set_openflow_rules(service_type, service_uuid, ports, created_items=conn_info)
+ created_items = self._set_openflow_rules(
+ service_type, service_uuid, ports, created_items=conn_info
+ )
+
return created_items
except (SdnConnectorError, OpenflowConnException) as e:
raise SdnConnectorError(e, http_code=e.http_code)
def _set_openflow_rules(self, net_type, net_id, ports, created_items=None):
ifaces_nb = len(ports)
+
if not created_items:
- created_items = {"status": None, "error_msg": None, "installed_rules_ids": []}
+ created_items = {
+ "status": None,
+ "error_msg": None,
+ "installed_rules_ids": [],
+ }
rules_to_delete = created_items.get("installed_rules_ids") or []
new_installed_rules_ids = []
error_list = []
step = "Checking ports and network type compatibility"
if ifaces_nb < 2:
pass
- elif net_type == 'ELINE':
+ elif net_type == "ELINE":
if ifaces_nb > 2:
- raise SdnConnectorError("'ELINE' type network cannot connect {} interfaces, only 2".format(
- ifaces_nb))
- elif net_type == 'ELAN':
+ raise SdnConnectorError(
+ "'ELINE' type network cannot connect {} interfaces, only 2".format(
+ ifaces_nb
+ )
+ )
+ elif net_type == "ELAN":
if ifaces_nb > 2 and self.of_controller_nets_with_same_vlan:
# check all ports are VLAN (tagged) or none
vlan_tags = []
+
for port in ports:
if port["vlan"] not in vlan_tags:
vlan_tags.append(port["vlan"])
+
if len(vlan_tags) > 1:
- raise SdnConnectorError("This pluging cannot connect ports with diferent VLAN tags when flag "
- "'of_controller_nets_with_same_vlan' is active")
+ raise SdnConnectorError(
+ "This pluging cannot connect ports with diferent VLAN tags when flag "
+ "'of_controller_nets_with_same_vlan' is active"
+ )
else:
- raise SdnConnectorError('Only ELINE or ELAN network types are supported for openflow')
+ raise SdnConnectorError(
+ "Only ELINE or ELAN network types are supported for openflow"
+ )
# Get the existing flows at openflow controller
step = "Getting installed openflow rules"
for flow in new_flows:
# 1 check if an equal flow is already present
index = self._check_flow_already_present(flow, existing_flows)
+
if index >= 0:
flow_id = existing_flows[index]["name"]
self.logger.debug("Skipping already present flow %s", str(flow))
else:
# 2 look for a non used name
flow_name = flow["net_id"] + "." + str(name_index)
+
while flow_name in existing_flows_ids:
name_index += 1
flow_name = flow["net_id"] + "." + str(name_index)
- flow['name'] = flow_name
+
+ flow["name"] = flow_name
+
# 3 insert at openflow
try:
self.of_connector.new_flow(flow)
existing_flows_ids.append(flow_id)
except OpenflowConnException as e:
flow_id = None
- error_list.append("Cannot create rule for ingress_port={}, dst_mac={}: {}"
- .format(flow["ingress_port"], flow["dst_mac"], e))
+ error_list.append(
+ "Cannot create rule for ingress_port={}, dst_mac={}: {}".format(
+ flow["ingress_port"], flow["dst_mac"], e
+ )
+ )
# 4 insert at database
if flow_id:
error_text = "Cannot remove rule '{}': {}".format(flow_id, e)
error_list.append(error_text)
self.logger.error(error_text)
+
created_items["installed_rules_ids"] = new_installed_rules_ids
+
if error_list:
created_items["error_msg"] = ";".join(error_list)[:1000]
created_items["error_msg"] = "ERROR"
else:
created_items["error_msg"] = None
created_items["status"] = "ACTIVE"
+
return created_items
except (SdnConnectorError, OpenflowConnException) as e:
raise SdnConnectorError("Error while {}: {}".format(step, e)) from e
# Check switch_port information is right
for port in ports:
nb_ports += 1
- if str(port['switch_port']) not in self.of_connector.pp2ofi:
- raise SdnConnectorError("switch port name '{}' is not valid for the openflow controller".
- format(port['switch_port']))
+
+ if str(port["switch_port"]) not in self.of_connector.pp2ofi:
+ raise SdnConnectorError(
+ "switch port name '{}' is not valid for the openflow controller".format(
+ port["switch_port"]
+ )
+ )
+
priority = 1000 # 1100
for src_port in ports:
# if src_port.get("groups")
- vlan_in = src_port['vlan']
+ vlan_in = src_port["vlan"]
# BROADCAST:
- broadcast_key = src_port['uuid'] + "." + str(vlan_in)
+ broadcast_key = src_port["uuid"] + "." + str(vlan_in)
if broadcast_key in new_broadcast_flows:
flow_broadcast = new_broadcast_flows[broadcast_key]
else:
- flow_broadcast = {'priority': priority,
- 'net_id': net_id,
- 'dst_mac': 'ff:ff:ff:ff:ff:ff',
- "ingress_port": str(src_port['switch_port']),
- 'vlan_id': vlan_in,
- 'actions': []
- }
+ flow_broadcast = {
+ "priority": priority,
+ "net_id": net_id,
+ "dst_mac": "ff:ff:ff:ff:ff:ff",
+ "ingress_port": str(src_port["switch_port"]),
+ "vlan_id": vlan_in,
+ "actions": [],
+ }
new_broadcast_flows[broadcast_key] = flow_broadcast
+
if vlan_in is not None:
- flow_broadcast['vlan_id'] = str(vlan_in)
+ flow_broadcast["vlan_id"] = str(vlan_in)
for dst_port in ports:
- vlan_out = dst_port['vlan']
- if src_port['switch_port'] == dst_port['switch_port'] and vlan_in == vlan_out:
+ vlan_out = dst_port["vlan"]
+
+ if (
+ src_port["switch_port"] == dst_port["switch_port"]
+ and vlan_in == vlan_out
+ ):
continue
+
flow = {
"priority": priority,
- 'net_id': net_id,
- "ingress_port": str(src_port['switch_port']),
- 'vlan_id': vlan_in,
- 'actions': []
+ "net_id": net_id,
+ "ingress_port": str(src_port["switch_port"]),
+ "vlan_id": vlan_in,
+ "actions": [],
}
+
# allow that one port have no mac
- if dst_port['mac'] is None or nb_ports == 2: # point to point or nets with 2 elements
- flow['priority'] = priority - 5 # less priority
+ # point to point or nets with 2 elements
+ if dst_port["mac"] is None or nb_ports == 2:
+ flow["priority"] = priority - 5 # less priority
else:
- flow['dst_mac'] = str(dst_port['mac'])
+ flow["dst_mac"] = str(dst_port["mac"])
if vlan_out is None:
if vlan_in:
- flow['actions'].append(('vlan', None))
+ flow["actions"].append(("vlan", None))
else:
- flow['actions'].append(('vlan', vlan_out))
- flow['actions'].append(('out', str(dst_port['switch_port'])))
+ flow["actions"].append(("vlan", vlan_out))
+
+ flow["actions"].append(("out", str(dst_port["switch_port"])))
if self._check_flow_already_present(flow, new_flows) >= 0:
self.logger.debug("Skipping repeated flow '%s'", str(flow))
new_flows.append(flow)
# BROADCAST:
- if nb_ports <= 2: # point to multipoint or nets with more than 2 elements
+ # point to multipoint or nets with more than 2 elements
+ if nb_ports <= 2:
continue
- out = (vlan_out, str(dst_port['switch_port']))
- if out not in flow_broadcast['actions']:
- flow_broadcast['actions'].append(out)
+
+ out = (vlan_out, str(dst_port["switch_port"]))
+
+ if out not in flow_broadcast["actions"]:
+ flow_broadcast["actions"].append(out)
# BROADCAST
for flow_broadcast in new_broadcast_flows.values():
- if len(flow_broadcast['actions']) == 0:
+ if len(flow_broadcast["actions"]) == 0:
continue # nothing to do, skip
- flow_broadcast['actions'].sort()
- if 'vlan_id' in flow_broadcast:
- previous_vlan = 0 # indicates that a packet contains a vlan, and the vlan
+
+ flow_broadcast["actions"].sort()
+
+ if "vlan_id" in flow_broadcast:
+ # indicates that a packet contains a vlan, and the vlan
+ previous_vlan = 0
else:
previous_vlan = None
+
final_actions = []
action_number = 0
- for action in flow_broadcast['actions']:
+
+ for action in flow_broadcast["actions"]:
if action[0] != previous_vlan:
- final_actions.append(('vlan', action[0]))
+ final_actions.append(("vlan", action[0]))
previous_vlan = action[0]
+
if self.of_controller_nets_with_same_vlan and action_number:
- raise SdnConnectorError("Cannot interconnect different vlan tags in a network when flag "
- "'of_controller_nets_with_same_vlan' is True.")
+ raise SdnConnectorError(
+ "Cannot interconnect different vlan tags in a network when flag "
+ "'of_controller_nets_with_same_vlan' is True."
+ )
+
action_number += 1
- final_actions.append(('out', action[1]))
- flow_broadcast['actions'] = final_actions
+ final_actions.append(("out", action[1]))
+ flow_broadcast["actions"] = final_actions
if self._check_flow_already_present(flow_broadcast, new_flows) >= 0:
self.logger.debug("Skipping repeated flow '%s'", str(flow_broadcast))
# UNIFY openflow rules with the same input port and vlan and the same output actions
# These flows differ at the dst_mac; and they are unified by not filtering by dst_mac
# this can happen if there is only two ports. It is converted to a point to point connection
- flow_dict = {} # use as key vlan_id+ingress_port and as value the list of flows matching these values
+ # use as key vlan_id+ingress_port and as value the list of flows matching these values
+ flow_dict = {}
for flow in new_flows:
key = str(flow.get("vlan_id")) + ":" + flow["ingress_port"]
+
if key in flow_dict:
flow_dict[key].append(flow)
else:
flow_dict[key] = [flow]
+
new_flows2 = []
+
for flow_list in flow_dict.values():
convert2ptp = False
+
if len(flow_list) >= 2:
convert2ptp = True
+
for f in flow_list:
- if f['actions'] != flow_list[0]['actions']:
+ if f["actions"] != flow_list[0]["actions"]:
convert2ptp = False
break
+
if convert2ptp: # add only one unified rule without dst_mac
- self.logger.debug("Convert flow rules to NON mac dst_address " + str(flow_list))
- flow_list[0].pop('dst_mac')
+ self.logger.debug(
+ "Convert flow rules to NON mac dst_address " + str(flow_list)
+ )
+ flow_list[0].pop("dst_mac")
flow_list[0]["priority"] -= 5
new_flows2.append(flow_list[0])
else: # add all the rules
new_flows2 += flow_list
+
return new_flows2
def _check_flow_already_present(self, new_flow, flow_list):
- '''check if the same flow is already present in the flow list
+ """check if the same flow is already present in the flow list
The flow is repeated if all the fields, apart from name, are equal
- Return the index of matching flow, -1 if not match'''
+ Return the index of matching flow, -1 if not match
+ """
for index, flow in enumerate(flow_list):
for f in self.flow_fields:
if flow.get(f) != new_flow.get(f):
break
else:
return index
+
return -1
from uuid import uuid4
from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError
from http import HTTPStatus
+
__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
The arguments of the constructor are converted to object attributes.
An extra property, ``service_endpoint_mapping`` is created from ``config``.
"""
+
def __init__(self, wim, wim_account, config=None, logger=None):
- self.logger = logger or logging.getLogger('ro.sdn.dummy')
+ self.logger = logger or logging.getLogger("ro.sdn.dummy")
super(SdnDummyConnector, self).__init__(wim, wim_account, config, self.logger)
self.logger.debug("__init: wim='{}' wim_account='{}'".format(wim, wim_account))
self.connections = {}
external URLs, etc are detected.
"""
self.logger.debug("check_credentials")
+
return None
def get_connectivity_service_status(self, service_uuid, conn_info=None):
keys can be used to provide additional status explanation or
new information available for the connectivity service.
"""
- self.logger.debug("get_connectivity_service_status: service_uuid='{}' conn_info='{}'".format(service_uuid,
- conn_info))
- return {'sdn_status': 'ACTIVE', 'sdn_info': self.connections.get(service_uuid)}
-
- def create_connectivity_service(self, service_type, connection_points,
- **kwargs):
- """
- Stablish WAN connectivity between the endpoints
-
- """
- self.logger.debug("create_connectivity_service: service_type='{}' connection_points='{}', kwargs='{}'".
- format(service_type, connection_points, kwargs))
+ self.logger.debug(
+ "get_connectivity_service_status: service_uuid='{}' conn_info='{}'".format(
+ service_uuid, conn_info
+ )
+ )
+
+ return {"sdn_status": "ACTIVE", "sdn_info": self.connections.get(service_uuid)}
+
+ def create_connectivity_service(self, service_type, connection_points, **kwargs):
+ """Establish WAN connectivity between the endpoints"""
+ self.logger.debug(
+ "create_connectivity_service: service_type='{}' connection_points='{}', kwargs='{}'".format(
+ service_type, connection_points, kwargs
+ )
+ )
_id = str(uuid4())
self.connections[_id] = connection_points.copy()
self.counter += 1
+
return _id, None
def delete_connectivity_service(self, service_uuid, conn_info=None):
- """Disconnect multi-site endpoints previously connected
+ """Disconnect multi-site endpoints previously connected"""
+ self.logger.debug(
+ "delete_connectivity_service: service_uuid='{}' conn_info='{}'".format(
+ service_uuid, conn_info
+ )
+ )
- """
- self.logger.debug("delete_connectivity_service: service_uuid='{}' conn_info='{}'".format(service_uuid,
- conn_info))
if service_uuid not in self.connections:
- raise SdnConnectorError("connectivity {} not found".format(service_uuid),
- http_code=HTTPStatus.NOT_FOUND.value)
+ raise SdnConnectorError(
+ "connectivity {} not found".format(service_uuid),
+ http_code=HTTPStatus.NOT_FOUND.value,
+ )
+
self.connections.pop(service_uuid, None)
+
return None
- def edit_connectivity_service(self, service_uuid, conn_info=None,
- connection_points=None, **kwargs):
+ def edit_connectivity_service(
+ self, service_uuid, conn_info=None, connection_points=None, **kwargs
+ ):
"""Change an existing connectivity service.
This method's arguments and return value follow the same convention as
:meth:`~.create_connectivity_service`.
"""
- self.logger.debug("edit_connectivity_service: service_uuid='{}' conn_info='{}', connection_points='{}'"
- "kwargs='{}'".format(service_uuid, conn_info, connection_points, kwargs))
+ self.logger.debug(
+ "edit_connectivity_service: service_uuid='{}' conn_info='{}', connection_points='{}'"
+ "kwargs='{}'".format(service_uuid, conn_info, connection_points, kwargs)
+ )
+
if service_uuid not in self.connections:
- raise SdnConnectorError("connectivity {} not found".format(service_uuid),
- http_code=HTTPStatus.NOT_FOUND.value)
+ raise SdnConnectorError(
+ "connectivity {} not found".format(service_uuid),
+ http_code=HTTPStatus.NOT_FOUND.value,
+ )
+
self.connections[service_uuid] = connection_points.copy()
+
return None
def clear_all_connectivity_services(self):
"""
self.logger.debug("clear_all_connectivity_services")
self.connections.clear()
+
return None
def get_all_active_connectivity_services(self):
SdnConnectorException: In case of error.
"""
self.logger.debug("get_all_active_connectivity_services")
+
return self.connections
This way we can make sure that all the other parts of the program will work
but the user will have all the information available to fix the problem.
"""
+
def __init__(self, error_msg):
self.error_msg = error_msg
raise Exception(self.error_msg)
def check_credentials(self):
- raise SdnConnectorError('Impossible to use WIM:\n' + self.error_msg)
+ raise SdnConnectorError("Impossible to use WIM:\n" + self.error_msg)
def get_connectivity_service_status(self, service_uuid, _conn_info=None):
- raise SdnConnectorError('Impossible to retrieve status for {}: {}'
- .format(service_uuid, self.error_msg))
+ raise SdnConnectorError(
+ "Impossible to retrieve status for {}: {}".format(
+ service_uuid, self.error_msg
+ )
+ )
def create_connectivity_service(self, service_uuid, *args, **kwargs):
- raise SdnConnectorError('Impossible to create connectivity: {}'
- .format(self.error_msg))
+ raise SdnConnectorError(
+ "Impossible to create connectivity: {}".format(self.error_msg)
+ )
def delete_connectivity_service(self, service_uuid, _conn_info=None):
- raise SdnConnectorError('Impossible to delete {}: {}'
- .format(service_uuid, self.error_msg))
+ raise SdnConnectorError(
+ "Impossible to delete {}: {}".format(service_uuid, self.error_msg)
+ )
def edit_connectivity_service(self, service_uuid, *args, **kwargs):
- raise SdnConnectorError('Impossible to change connection {}: {}'
- .format(service_uuid, self.error_msg))
+ raise SdnConnectorError(
+ "Impossible to change connection {}: {}".format(
+ service_uuid, self.error_msg
+ )
+ )
def clear_all_connectivity_services(self):
- raise SdnConnectorError('Impossible to use WIM: {}'.format(self.error_msg))
+ raise SdnConnectorError("Impossible to use WIM: {}".format(self.error_msg))
def get_all_active_connectivity_services(self):
- raise SdnConnectorError('Impossible to use WIM: {}'.format(self.error_msg))
+ raise SdnConnectorError("Impossible to use WIM: {}".format(self.error_msg))
It receives information from ports to be connected .
"""
+
import logging
from http import HTTPStatus
class SdnConnectorError(Exception):
"""Base Exception for all connector related errors
- provide the parameter 'http_code' (int) with the error code:
- Bad_Request = 400
- Unauthorized = 401 (e.g. credentials are not valid)
- Not_Found = 404 (e.g. try to edit or delete a non existing connectivity service)
- Forbidden = 403
- Method_Not_Allowed = 405
- Not_Acceptable = 406
- Request_Timeout = 408 (e.g timeout reaching server, or cannot reach the server)
- Conflict = 409
- Service_Unavailable = 503
- Internal_Server_Error = 500
+ provide the parameter 'http_code' (int) with the error code:
+ Bad_Request = 400
+ Unauthorized = 401 (e.g. credentials are not valid)
+ Not_Found = 404 (e.g. try to edit or delete a non existing connectivity service)
+ Forbidden = 403
+ Method_Not_Allowed = 405
+ Not_Acceptable = 406
+ Request_Timeout = 408 (e.g timeout reaching server, or cannot reach the server)
+ Conflict = 409
+ Service_Unavailable = 503
+ Internal_Server_Error = 500
"""
+
def __init__(self, message, http_code=HTTPStatus.INTERNAL_SERVER_ERROR.value):
Exception.__init__(self, message)
self.http_code = http_code
The arguments of the constructor are converted to object attributes.
An extra property, ``service_endpoint_mapping`` is created from ``config``.
"""
+
def __init__(self, wim, wim_account, config=None, logger=None):
"""
-
:param wim: (dict). Contains among others 'wim_url'
:param wim_account: (dict). Contains among others 'uuid' (internal id), 'name',
'sdn' (True if is intended for SDN-assist or False if intended for WIM), 'user', 'password'.
:param config: (dict or None): Particular information of plugin. These keys if present have a common meaning:
'mapping_not_needed': (bool) False by default or if missing, indicates that mapping is not needed.
'service_endpoint_mapping': (list) provides the internal endpoint mapping. The meaning is:
- KEY meaning for WIM meaning for SDN assist
+ KEY meaning for WIM meaning for SDN assist
-------- -------- --------
- device_id pop_switch_dpid compute_id
- device_interface_id pop_switch_port compute_pci_address
- service_endpoint_id wan_service_endpoint_id SDN_service_endpoint_id
- service_mapping_info wan_service_mapping_info SDN_service_mapping_info
+ device_id pop_switch_dpid compute_id
+ device_interface_id pop_switch_port compute_pci_address
+ service_endpoint_id wan_service_endpoint_id SDN_service_endpoint_id
+ service_mapping_info wan_service_mapping_info SDN_service_mapping_info
contains extra information if needed. Text in Yaml format
- switch_dpid wan_switch_dpid SDN_switch_dpid
- switch_port wan_switch_port SDN_switch_port
+ switch_dpid wan_switch_dpid SDN_switch_dpid
+ switch_port wan_switch_port SDN_switch_port
datacenter_id vim_account vim_account
- id: (internal, do not use)
- wim_id: (internal, do not use)
+ id: (internal, do not use)
+ wim_id: (internal, do not use)
:param logger (logging.Logger): optional logger object. If none is passed 'openmano.sdn.sdnconn' is used.
"""
- self.logger = logger or logging.getLogger('ro.sdn')
-
+ self.logger = logger or logging.getLogger("ro.sdn")
self.wim = wim
self.wim_account = wim_account
self.config = config or {}
- self.service_endpoint_mapping = (
- self.config.get('service_endpoint_mapping', []))
+ self.service_endpoint_mapping = self.config.get("service_endpoint_mapping", [])
def check_credentials(self):
"""Check if the connector itself can access the SDN/WIM with the provided url (wim.wim_url),
def create_connectivity_service(self, service_type, connection_points, **kwargs):
"""
- Stablish SDN/WAN connectivity between the endpoints
+ Establish SDN/WAN connectivity between the endpoints
:param service_type: (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2), ``L3``.
:param connection_points: (list): each point corresponds to
an entry point to be connected. For WIM: from the DC to the transport network.
"""
raise NotImplementedError
- def edit_connectivity_service(self, service_uuid, conn_info=None, connection_points=None, **kwargs):
- """ Change an existing connectivity service.
+ def edit_connectivity_service(
+ self, service_uuid, conn_info=None, connection_points=None, **kwargs
+ ):
+ """Change an existing connectivity service.
This method's arguments and return value follow the same convention as
:meth:`~.create_connectivity_service`.
vm_ip: ip address to provide at VM creation. For some tests must be a valid reachable VM
ssh_key: private ssh key to use for inserting an authorized ssh key
"""
- def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None,
- config={}, persistent_info={}):
- super().__init__(uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
- config, persistent_info)
- self.logger = logging.getLogger('ro.vim.dummy')
+
+ def __init__(
+ self,
+ uuid,
+ name,
+ tenant_id,
+ tenant_name,
+ url,
+ url_admin=None,
+ user=None,
+ passwd=None,
+ log_level=None,
+ config={},
+ persistent_info={},
+ ):
+ super().__init__(
+ uuid,
+ name,
+ tenant_id,
+ tenant_name,
+ url,
+ url_admin,
+ user,
+ passwd,
+ log_level,
+ config,
+ persistent_info,
+ )
+ self.logger = logging.getLogger("ro.vim.dummy")
+
if log_level:
self.logger.setLevel(getattr(logging, log_level))
+
self.nets = {
"mgmt": {
"id": "mgmt",
"name": "mgmt",
"status": "ACTIVE",
- "vim_info": '{status: ACTIVE}'
+ "vim_info": "{status: ACTIVE}",
}
}
self.vms = {}
"90681b39-dc09-49b7-ba2e-2c00c6b33b76": {
"id": "90681b39-dc09-49b7-ba2e-2c00c6b33b76",
"name": "cirros034",
- "checksum": "ee1eca47dc88f4879d8a229cc70a07c6"
+ "checksum": "ee1eca47dc88f4879d8a229cc70a07c6",
},
"83a39656-65db-47dc-af03-b55289115a53": {
"id": "",
"name": "cirros040",
- "checksum": "443b7623e27ecf03dc9e01ee93f67afe"
+ "checksum": "443b7623e27ecf03dc9e01ee93f67afe",
},
"208314f2-8eb6-4101-965d-fe2ffbaedf3c": {
"id": "208314f2-8eb6-4101-965d-fe2ffbaedf3c",
"name": "ubuntu18.04",
- "checksum": "b6fc7b9b91bca32e989e1edbcdeecb95"
+ "checksum": "b6fc7b9b91bca32e989e1edbcdeecb95",
},
"c03321f8-4b6e-4045-a309-1b3878bd32c1": {
"id": "c03321f8-4b6e-4045-a309-1b3878bd32c1",
"name": "ubuntu16.04",
- "checksum": "8f08442faebad2d4a99fedb22fca11b5"
+ "checksum": "8f08442faebad2d4a99fedb22fca11b5",
},
"4f6399a2-3554-457e-916e-ada01f8b950b": {
"id": "4f6399a2-3554-457e-916e-ada01f8b950b",
"name": "ubuntu1604",
- "checksum": "8f08442faebad2d4a99fedb22fca11b5"
+ "checksum": "8f08442faebad2d4a99fedb22fca11b5",
},
"59ac0b79-5c7d-4e83-b517-4c6c6a8ac1d3": {
"id": "59ac0b79-5c7d-4e83-b517-4c6c6a8ac1d3",
"name": "hackfest3-mgmt",
- "checksum": "acec1e5d5ad7be9be7e6342a16bcf66a"
+ "checksum": "acec1e5d5ad7be9be7e6342a16bcf66a",
},
"f8818a03-f099-4c18-b1c7-26b1324203c1": {
"id": "f8818a03-f099-4c18-b1c7-26b1324203c1",
"name": "hackfest-pktgen",
- "checksum": "f8818a03-f099-4c18-b1c7-26b1324203c1"
+ "checksum": "f8818a03-f099-4c18-b1c7-26b1324203c1",
},
}
- def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None):
+ def new_network(
+ self,
+ net_name,
+ net_type,
+ ip_profile=None,
+ shared=False,
+ provider_network_profile=None,
+ ):
net_id = str(uuid4())
- self.logger.debug("new network id={}, name={}, net_type={}, ip_profile={}, provider_network_profile={}".
- format(net_id, net_name, net_type, ip_profile, provider_network_profile))
+ self.logger.debug(
+ "new network id={}, name={}, net_type={}, ip_profile={}, provider_network_profile={}".format(
+ net_id, net_name, net_type, ip_profile, provider_network_profile
+ )
+ )
net = {
"id": net_id,
"name": net_name,
"status": "ACTIVE",
}
self.nets[net_id] = net
+
return net_id, net
def get_network_list(self, filter_dict=None):
nets = []
+
for net_id, net in self.nets.items():
if filter_dict and filter_dict.get("name"):
if net["name"] != filter_dict.get("name"):
continue
+
if filter_dict and filter_dict.get("id"):
if net_id != filter_dict.get("id"):
continue
+
nets.append(net)
+
return nets
def get_network(self, net_id):
if net_id not in self.nets:
- raise vimconn.VimConnNotFoundException("network with id {} not found".format(net_id))
+ raise vimconn.VimConnNotFoundException(
+ "network with id {} not found".format(net_id)
+ )
+
return self.nets[net_id]
def delete_network(self, net_id, created_items=None):
if net_id not in self.nets:
- raise vimconn.VimConnNotFoundException("network with id {} not found".format(net_id))
- self.logger.debug("delete network id={}, created_items={}".format(net_id, created_items))
+ raise vimconn.VimConnNotFoundException(
+ "network with id {} not found".format(net_id)
+ )
+
+ self.logger.debug(
+ "delete network id={}, created_items={}".format(net_id, created_items)
+ )
self.nets.pop(net_id)
+
return net_id
def refresh_nets_status(self, net_list):
nets = {}
+
for net_id in net_list:
if net_id not in self.nets:
net = {"status": "DELETED"}
else:
net = self.nets[net_id].copy()
- net["vim_info"] = yaml.dump({"status": "ACTIVE", "name": net["name"]},
- default_flow_style=True, width=256)
+ net["vim_info"] = yaml.dump(
+ {"status": "ACTIVE", "name": net["name"]},
+ default_flow_style=True,
+ width=256,
+ )
+
nets[net_id] = net
return nets
def get_flavor(self, flavor_id):
if flavor_id not in self.flavors:
- raise vimconn.VimConnNotFoundException("flavor with id {} not found".format(flavor_id))
+ raise vimconn.VimConnNotFoundException(
+ "flavor with id {} not found".format(flavor_id)
+ )
+
return self.flavors[flavor_id]
def new_flavor(self, flavor_data):
flavor_id = str(uuid4())
- self.logger.debug("new flavor id={}, flavor_data={}".format(flavor_id, flavor_data))
+ self.logger.debug(
+ "new flavor id={}, flavor_data={}".format(flavor_id, flavor_data)
+ )
flavor = deepcopy(flavor_data)
flavor["id"] = flavor_id
+
if "name" not in flavor:
flavor["name"] = flavor_id
+
self.flavors[flavor_id] = flavor
+
return flavor_id
def delete_flavor(self, flavor_id):
if flavor_id not in self.flavors:
- raise vimconn.VimConnNotFoundException("flavor with id {} not found".format(flavor_id))
+ raise vimconn.VimConnNotFoundException(
+ "flavor with id {} not found".format(flavor_id)
+ )
+
self.logger.debug("delete flavor id={}".format(flavor_id))
self.flavors.pop(flavor_id)
+
return flavor_id
def get_flavor_id_from_data(self, flavor_dict):
break
else:
return flavor_id
- raise vimconn.VimConnNotFoundException("flavor with ram={} cpu={} disk={} {} not found".format(
- flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"],
- "and extended" if flavor_dict.get("extended") else ""))
+
+ raise vimconn.VimConnNotFoundException(
+ "flavor with ram={} cpu={} disk={} {} not found".format(
+ flavor_dict["ram"],
+ flavor_dict["vcpus"],
+ flavor_dict["disk"],
+ "and extended" if flavor_dict.get("extended") else "",
+ )
+ )
def new_tenant(self, tenant_name, tenant_description):
tenant_id = str(uuid4())
- self.logger.debug("new tenant id={}, description={}".format(tenant_id, tenant_description))
- tenant = {'name': tenant_name, 'description': tenant_description, 'id': tenant_id}
+ self.logger.debug(
+ "new tenant id={}, description={}".format(tenant_id, tenant_description)
+ )
+ tenant = {
+ "name": tenant_name,
+ "description": tenant_description,
+ "id": tenant_id,
+ }
self.tenants[tenant_id] = tenant
+
return tenant_id
def delete_tenant(self, tenant_id):
if tenant_id not in self.tenants:
- raise vimconn.VimConnNotFoundException("tenant with id {} not found".format(tenant_id))
+ raise vimconn.VimConnNotFoundException(
+ "tenant with id {} not found".format(tenant_id)
+ )
+
self.tenants.pop(tenant_id)
self.logger.debug("delete tenant id={}".format(tenant_id))
+
return tenant_id
def get_tenant_list(self, filter_dict=None):
tenants = []
+
for tenant_id, tenant in self.tenants.items():
if filter_dict and filter_dict.get("name"):
if tenant["name"] != filter_dict.get("name"):
continue
+
if filter_dict and filter_dict.get("id"):
if tenant_id != filter_dict.get("id"):
continue
+
tenants.append(tenant)
+
return tenants
def new_image(self, image_dict):
self.logger.debug("new image id={}, iamge_dict={}".format(image_id, image_dict))
image = deepcopy(image_dict)
image["id"] = image_id
+
if "name" not in image:
image["id"] = image_id
+
self.images[image_id] = image
+
return image_id
def delete_image(self, image_id):
if image_id not in self.images:
- raise vimconn.VimConnNotFoundException("image with id {} not found".format(image_id))
+ raise vimconn.VimConnNotFoundException(
+ "image with id {} not found".format(image_id)
+ )
+
self.logger.debug("delete image id={}".format(image_id))
self.images.pop(image_id)
+
return image_id
def get_image_list(self, filter_dict=None):
if filter_dict and filter_dict.get("name"):
if image["name"] != filter_dict.get("name"):
continue
+
if filter_dict and filter_dict.get("checksum"):
if image["checksum"] != filter_dict.get("checksum"):
continue
+
if filter_dict and filter_dict.get("id"):
if image_id != filter_dict.get("id"):
continue
+
images.append(image)
+
return images
- def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
- availability_zone_index=None, availability_zone_list=None):
+ def new_vminstance(
+ self,
+ name,
+ description,
+ start,
+ image_id,
+ flavor_id,
+ net_list,
+ cloud_config=None,
+ disk_list=None,
+ availability_zone_index=None,
+ availability_zone_list=None,
+ ):
vm_id = str(uuid4())
interfaces = []
- self.logger.debug("new vm id={}, name={}, image_id={}, flavor_id={}, net_list={}, cloud_config={}".
- format(vm_id, name, image_id, flavor_id, net_list, cloud_config))
+ self.logger.debug(
+ "new vm id={}, name={}, image_id={}, flavor_id={}, net_list={}, cloud_config={}".format(
+ vm_id, name, image_id, flavor_id, net_list, cloud_config
+ )
+ )
+
for iface_index, iface in enumerate(net_list):
iface["vim_id"] = str(iface_index)
interface = {
- "ip_address": iface.get("ip_address") or self.config.get("vm_ip") or "192.168.4.2",
- "mac_address": iface.get("mac_address") or self.config.get("vm_mac") or "00:11:22:33:44:55",
+ "ip_address": iface.get("ip_address")
+ or self.config.get("vm_ip")
+ or "192.168.4.2",
+ "mac_address": iface.get("mac_address")
+ or self.config.get("vm_mac")
+ or "00:11:22:33:44:55",
"vim_interface_id": str(iface_index),
"vim_net_id": iface["net_id"],
}
- if iface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH") and self.config.get("sdn-port-mapping"):
+
+ if iface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH") and self.config.get(
+ "sdn-port-mapping"
+ ):
compute_index = randrange(len(self.config["sdn-port-mapping"]))
- port_index = randrange(len(self.config["sdn-port-mapping"][compute_index]["ports"]))
- interface["compute_node"] = self.config["sdn-port-mapping"][compute_index]["compute_node"]
- interface["pci"] = self.config["sdn-port-mapping"][compute_index]["ports"][port_index]["pci"]
+ port_index = randrange(
+ len(self.config["sdn-port-mapping"][compute_index]["ports"])
+ )
+ interface["compute_node"] = self.config["sdn-port-mapping"][
+ compute_index
+ ]["compute_node"]
+ interface["pci"] = self.config["sdn-port-mapping"][compute_index][
+ "ports"
+ ][port_index]["pci"]
interfaces.append(interface)
+
vm = {
"id": vm_id,
"name": name,
"image_id": image_id,
"flavor_id": flavor_id,
}
+
if image_id not in self.images:
- self.logger.error("vm create, image_id '{}' not found. Skip".format(image_id))
+ self.logger.error(
+ "vm create, image_id '{}' not found. Skip".format(image_id)
+ )
+
if flavor_id not in self.flavors:
- self.logger.error("vm create flavor_id '{}' not found. Skip".format(flavor_id))
+ self.logger.error(
+ "vm create flavor_id '{}' not found. Skip".format(flavor_id)
+ )
+
self.vms[vm_id] = vm
+
return vm_id, vm
def get_vminstance(self, vm_id):
if vm_id not in self.vms:
- raise vimconn.VimConnNotFoundException("vm with id {} not found".format(vm_id))
+ raise vimconn.VimConnNotFoundException(
+ "vm with id {} not found".format(vm_id)
+ )
+
return self.vms[vm_id]
def delete_vminstance(self, vm_id, created_items=None):
if vm_id not in self.vms:
- raise vimconn.VimConnNotFoundException("vm with id {} not found".format(vm_id))
+ raise vimconn.VimConnNotFoundException(
+ "vm with id {} not found".format(vm_id)
+ )
+
self.vms.pop(vm_id)
- self.logger.debug("delete vm id={}, created_items={}".format(vm_id, created_items))
+ self.logger.debug(
+ "delete vm id={}, created_items={}".format(vm_id, created_items)
+ )
+
return vm_id
def refresh_vms_status(self, vm_list):
vms = {}
+
for vm_id in vm_list:
if vm_id not in self.vms:
vm = {"status": "DELETED"}
else:
vm = deepcopy(self.vms[vm_id])
- vm["vim_info"] = yaml.dump({"status": "ACTIVE", "name": vm["name"]},
- default_flow_style=True, width=256)
+ vm["vim_info"] = yaml.dump(
+ {"status": "ACTIVE", "name": vm["name"]},
+ default_flow_style=True,
+ width=256,
+ )
+
vms[vm_id] = vm
+
return vms
def action_vminstance(self, vm_id, action_dict, created_items={}):
return None
- def inject_user_key(self, ip_addr=None, user=None, key=None, ro_key=None, password=None):
+ def inject_user_key(
+ self, ip_addr=None, user=None, key=None, ro_key=None, password=None
+ ):
if self.config.get("ssh_key"):
ro_key = self.config.get("ssh_key")
- return super().inject_user_key(ip_addr=ip_addr, user=user, key=key, ro_key=ro_key, password=password)
+
+ return super().inject_user_key(
+ ip_addr=ip_addr, user=user, key=key, ro_key=ro_key, password=password
+ )
def deprecated(message):
def deprecated_decorator(func):
def deprecated_func(*args, **kwargs):
- warnings.warn("{} is a deprecated function. {}".format(func.__name__, message),
- category=DeprecationWarning,
- stacklevel=2)
- warnings.simplefilter('default', DeprecationWarning)
+ warnings.warn(
+ "{} is a deprecated function. {}".format(func.__name__, message),
+ category=DeprecationWarning,
+ stacklevel=2,
+ )
+ warnings.simplefilter("default", DeprecationWarning)
+
return func(*args, **kwargs)
+
return deprecated_func
+
return deprecated_decorator
class VimConnException(Exception):
"""Common and base class Exception for all VimConnector exceptions"""
+
def __init__(self, message, http_code=HTTP_Bad_Request):
Exception.__init__(self, message)
self.http_code = http_code
class VimConnConnectionException(VimConnException):
"""Connectivity error with the VIM"""
+
def __init__(self, message, http_code=HTTP_Service_Unavailable):
VimConnException.__init__(self, message, http_code)
class VimConnUnexpectedResponse(VimConnException):
"""Get an wrong response from VIM"""
+
def __init__(self, message, http_code=HTTP_Service_Unavailable):
VimConnException.__init__(self, message, http_code)
class VimConnAuthException(VimConnException):
"""Invalid credentials or authorization to perform this action over the VIM"""
+
def __init__(self, message, http_code=HTTP_Unauthorized):
VimConnException.__init__(self, message, http_code)
class VimConnNotFoundException(VimConnException):
"""The item is not found at VIM"""
+
def __init__(self, message, http_code=HTTP_Not_Found):
VimConnException.__init__(self, message, http_code)
class VimConnConflictException(VimConnException):
"""There is a conflict, e.g. more item found than one"""
+
def __init__(self, message, http_code=HTTP_Conflict):
VimConnException.__init__(self, message, http_code)
class VimConnNotSupportedException(VimConnException):
"""The request is not supported by connector"""
+
def __init__(self, message, http_code=HTTP_Service_Unavailable):
VimConnException.__init__(self, message, http_code)
class VimConnNotImplemented(VimConnException):
"""The method is not implemented by the connected"""
+
def __init__(self, message, http_code=HTTP_Not_Implemented):
VimConnException.__init__(self, message, http_code)
-class VimConnector():
+class VimConnector:
"""Abstract base class for all the VIM connector plugins
These plugins must implement a VimConnector class derived from this
and all these privated methods
"""
- def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None,
- config={}, persistent_info={}):
+
+ def __init__(
+ self,
+ uuid,
+ name,
+ tenant_id,
+ tenant_name,
+ url,
+ url_admin=None,
+ user=None,
+ passwd=None,
+ log_level=None,
+ config={},
+ persistent_info={},
+ ):
"""
Constructor of VIM. Raise an exception is some needed parameter is missing, but it must not do any connectivity
checking against the VIM
self.passwd = passwd
self.config = config or {}
self.availability_zone = None
- self.logger = logging.getLogger('ro.vim')
+ self.logger = logging.getLogger("ro.vim")
+
if log_level:
self.logger.setLevel(getattr(logging, log_level))
- if not self.url_admin: # try to use normal url
+
+ if not self.url_admin: # try to use normal url
self.url_admin = self.url
def __getitem__(self, index):
- if index == 'tenant_id':
+ if index == "tenant_id":
return self.tenant_id
- if index == 'tenant_name':
+
+ if index == "tenant_name":
return self.tenant_name
- elif index == 'id':
+ elif index == "id":
return self.id
- elif index == 'name':
+ elif index == "name":
return self.name
- elif index == 'user':
+ elif index == "user":
return self.user
- elif index == 'passwd':
+ elif index == "passwd":
return self.passwd
- elif index == 'url':
+ elif index == "url":
return self.url
- elif index == 'url_admin':
+ elif index == "url_admin":
return self.url_admin
elif index == "config":
return self.config
raise KeyError("Invalid key '{}'".format(index))
def __setitem__(self, index, value):
- if index == 'tenant_id':
+ if index == "tenant_id":
self.tenant_id = value
- if index == 'tenant_name':
+
+ if index == "tenant_name":
self.tenant_name = value
- elif index == 'id':
+ elif index == "id":
self.id = value
- elif index == 'name':
+ elif index == "name":
self.name = value
- elif index == 'user':
+ elif index == "user":
self.user = value
- elif index == 'passwd':
+ elif index == "passwd":
self.passwd = value
- elif index == 'url':
+ elif index == "url":
self.url = value
- elif index == 'url_admin':
+ elif index == "url_admin":
self.url_admin = value
else:
raise KeyError("Invalid key '{}'".format(index))
return None
elif len(content_list) == 1:
return content_list[0]
+
combined_message = MIMEMultipart()
+
for content in content_list:
- if content.startswith('#include'):
- mime_format = 'text/x-include-url'
- elif content.startswith('#include-once'):
- mime_format = 'text/x-include-once-url'
- elif content.startswith('#!'):
- mime_format = 'text/x-shellscript'
- elif content.startswith('#cloud-config'):
- mime_format = 'text/cloud-config'
- elif content.startswith('#cloud-config-archive'):
- mime_format = 'text/cloud-config-archive'
- elif content.startswith('#upstart-job'):
- mime_format = 'text/upstart-job'
- elif content.startswith('#part-handler'):
- mime_format = 'text/part-handler'
- elif content.startswith('#cloud-boothook'):
- mime_format = 'text/cloud-boothook'
+ if content.startswith("#include"):
+ mime_format = "text/x-include-url"
+ elif content.startswith("#include-once"):
+ mime_format = "text/x-include-once-url"
+ elif content.startswith("#!"):
+ mime_format = "text/x-shellscript"
+ elif content.startswith("#cloud-config"):
+ mime_format = "text/cloud-config"
+ elif content.startswith("#cloud-config-archive"):
+ mime_format = "text/cloud-config-archive"
+ elif content.startswith("#upstart-job"):
+ mime_format = "text/upstart-job"
+ elif content.startswith("#part-handler"):
+ mime_format = "text/part-handler"
+ elif content.startswith("#cloud-boothook"):
+ mime_format = "text/cloud-boothook"
else: # by default
- mime_format = 'text/x-shellscript'
+ mime_format = "text/x-shellscript"
+
sub_message = MIMEText(content, mime_format, sys.getdefaultencoding())
combined_message.attach(sub_message)
+
return combined_message.as_string()
def _create_user_data(self, cloud_config):
config_drive = None
userdata = None
userdata_list = []
+
if isinstance(cloud_config, dict):
if cloud_config.get("user-data"):
if isinstance(cloud_config["user-data"], str):
else:
for u in cloud_config["user-data"]:
userdata_list.append(u)
+
if cloud_config.get("boot-data-drive") is not None:
config_drive = cloud_config["boot-data-drive"]
- if cloud_config.get("config-files") or cloud_config.get("users") or cloud_config.get("key-pairs"):
+
+ if (
+ cloud_config.get("config-files")
+ or cloud_config.get("users")
+ or cloud_config.get("key-pairs")
+ ):
userdata_dict = {}
+
# default user
if cloud_config.get("key-pairs"):
userdata_dict["ssh-authorized-keys"] = cloud_config["key-pairs"]
- userdata_dict["users"] = [{"default": None, "ssh-authorized-keys": cloud_config["key-pairs"]}]
+ userdata_dict["users"] = [
+ {
+ "default": None,
+ "ssh-authorized-keys": cloud_config["key-pairs"],
+ }
+ ]
+
if cloud_config.get("users"):
if "users" not in userdata_dict:
userdata_dict["users"] = ["default"]
+
for user in cloud_config["users"]:
user_info = {
"name": user["name"],
- "sudo": "ALL = (ALL)NOPASSWD:ALL"
+ "sudo": "ALL = (ALL)NOPASSWD:ALL",
}
+
if "user-info" in user:
user_info["gecos"] = user["user-info"]
+
if user.get("key-pairs"):
user_info["ssh-authorized-keys"] = user["key-pairs"]
+
userdata_dict["users"].append(user_info)
if cloud_config.get("config-files"):
userdata_dict["write_files"] = []
for file in cloud_config["config-files"]:
- file_info = {
- "path": file["dest"],
- "content": file["content"]
- }
+ file_info = {"path": file["dest"], "content": file["content"]}
+
if file.get("encoding"):
file_info["encoding"] = file["encoding"]
+
if file.get("permissions"):
file_info["permissions"] = file["permissions"]
+
if file.get("owner"):
file_info["owner"] = file["owner"]
+
userdata_dict["write_files"].append(file_info)
- userdata_list.append("#cloud-config\n" + yaml.safe_dump(userdata_dict, indent=4,
- default_flow_style=False))
+
+ userdata_list.append(
+ "#cloud-config\n"
+ + yaml.safe_dump(userdata_dict, indent=4, default_flow_style=False)
+ )
userdata = self._create_mimemultipart(userdata_list)
self.logger.debug("userdata: %s", userdata)
elif isinstance(cloud_config, str):
userdata = cloud_config
+
return config_drive, userdata
def check_vim_connectivity(self):
"""
raise VimConnNotImplemented("Should have implemented this")
- def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None):
+ def new_network(
+ self,
+ net_name,
+ net_type,
+ ip_profile=None,
+ shared=False,
+ provider_network_profile=None,
+ ):
"""Adds a tenant network to VIM
Params:
'net_name': name of the network
disk: disk size
is_public:
#TODO to concrete
- Returns the flavor identifier"""
+ Returns the flavor identifier
+ """
raise VimConnNotImplemented("Should have implemented this")
def delete_flavor(self, flavor_id):
"""Deletes a tenant flavor from VIM identify by its id
- Returns the used id or raise an exception"""
+ Returns the used id or raise an exception
+ """
raise VimConnNotImplemented("Should have implemented this")
def new_image(self, image_dict):
- """ Adds a tenant image to VIM
+ """Adds a tenant image to VIM
Returns the image id or raises an exception if failed
"""
raise VimConnNotImplemented("Should have implemented this")
def delete_image(self, image_id):
"""Deletes a tenant image from VIM
- Returns the image_id if image is deleted or raises an exception on error"""
+ Returns the image_id if image is deleted or raises an exception on error
+ """
raise VimConnNotImplemented("Should have implemented this")
def get_image_id_from_path(self, path):
"""Get the image id from image path in the VIM database.
- Returns the image_id or raises a VimConnNotFoundException
+ Returns the image_id or raises a VimConnNotFoundException
"""
raise VimConnNotImplemented("Should have implemented this")
"""
raise VimConnNotImplemented("Should have implemented this")
- def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
- availability_zone_index=None, availability_zone_list=None):
+ def new_vminstance(
+ self,
+ name,
+ description,
+ start,
+ image_id,
+ flavor_id,
+ net_list,
+ cloud_config=None,
+ disk_list=None,
+ availability_zone_index=None,
+ availability_zone_list=None,
+ ):
"""Adds a VM instance to VIM
Params:
'start': (boolean) indicates if VM must start or created in pause mode.
'net_list': list of interfaces, each one is a dictionary with:
'name': (optional) name for the interface.
'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
- 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM
+ 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM
capabilities
'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
'mac_address': (optional) mac address to assign to this interface
'ip_address': (optional) IP address to assign to this interface
#TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not
- provided, the VLAN tag to be used. In case net_id is provided, the internal network vlan is used
+ provided, the VLAN tag to be used. In case net_id is provided, the internal network vlan is used
for tagging VF
'type': (mandatory) can be one of:
'virtual', in this case always connected to a network of type 'net_type=bridge'
def refresh_vms_status(self, vm_list):
"""Get the status of the virtual machines and their interfaces/ports
- Params: the list of VM identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this Virtual Machine
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
- # BUILD (on building process), ERROR
- # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
- #
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
- interfaces: list with interface info. Each item a dictionary with:
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
- mac_address: #Text format XX:XX:XX:XX:XX:XX
- vim_net_id: #network id where this interface is connected, if provided at creation
- vim_interface_id: #interface/port VIM id
- ip_address: #null, or text with IPv4, IPv6 address
- compute_node: #identification of compute node where PF,VF interface is allocated
- pci: #PCI address of the NIC that hosts the PF,VF
- vlan: #physical VLAN used for VF
+ Params: the list of VM identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this Virtual Machine
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+ # BUILD (on building process), ERROR
+ # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+ #
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ interfaces: list with interface info. Each item a dictionary with:
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ mac_address: #Text format XX:XX:XX:XX:XX:XX
+ vim_net_id: #network id where this interface is connected, if provided at creation
+ vim_interface_id: #interface/port VIM id
+ ip_address: #null, or text with IPv4, IPv6 address
+ compute_node: #identification of compute node where PF,VF interface is allocated
+ pci: #PCI address of the NIC that hosts the PF,VF
+ vlan: #physical VLAN used for VF
"""
raise VimConnNotImplemented("Should have implemented this")
"""
raise VimConnNotImplemented("Should have implemented this")
- def inject_user_key(self, ip_addr=None, user=None, key=None, ro_key=None, password=None):
+ def inject_user_key(
+ self, ip_addr=None, user=None, key=None, ro_key=None, password=None
+ ):
"""
Inject a ssh public key in a VM
Params:
The function doesn't return a value:
"""
if not ip_addr or not user:
- raise VimConnNotSupportedException("All parameters should be different from 'None'")
+ raise VimConnNotSupportedException(
+ "All parameters should be different from 'None'"
+ )
elif not ro_key and not password:
- raise VimConnNotSupportedException("All parameters should be different from 'None'")
+ raise VimConnNotSupportedException(
+ "All parameters should be different from 'None'"
+ )
else:
- commands = {'mkdir -p ~/.ssh/', 'echo "{}" >> ~/.ssh/authorized_keys'.format(key),
- 'chmod 644 ~/.ssh/authorized_keys', 'chmod 700 ~/.ssh/'}
+ commands = {
+ "mkdir -p ~/.ssh/",
+ 'echo "{}" >> ~/.ssh/authorized_keys'.format(key),
+ "chmod 644 ~/.ssh/authorized_keys",
+ "chmod 700 ~/.ssh/",
+ }
client = paramiko.SSHClient()
+
try:
if ro_key:
pkey = paramiko.RSAKey.from_private_key(StringIO(ro_key))
else:
pkey = None
+
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- client.connect(ip_addr, username=user, password=password, pkey=pkey, timeout=10)
+ client.connect(
+ ip_addr, username=user, password=password, pkey=pkey, timeout=10
+ )
+
for command in commands:
(i, o, e) = client.exec_command(command, timeout=10)
returncode = o.channel.recv_exit_status()
outerror = e.read()
+
if returncode != 0:
text = "run_command='{}' Error='{}'".format(command, outerror)
- raise VimConnUnexpectedResponse("Cannot inject ssh key in VM: '{}'".format(text))
+ raise VimConnUnexpectedResponse(
+ "Cannot inject ssh key in VM: '{}'".format(text)
+ )
+
return
- except (socket.error, paramiko.AuthenticationException, paramiko.SSHException) as message:
+ except (
+ socket.error,
+ paramiko.AuthenticationException,
+ paramiko.SSHException,
+ ) as message:
raise VimConnUnexpectedResponse(
- "Cannot inject ssh key in VM: '{}' - {}".format(ip_addr, str(message)))
- return
+ "Cannot inject ssh key in VM: '{}' - {}".format(
+ ip_addr, str(message)
+ )
+ )
-# Optional methods
+ return
+ # Optional methods
def new_tenant(self, tenant_name, tenant_description):
"""Adds a new tenant to VIM with this name and description, this is done using admin_url if provided
"tenant_name": string max lenght 64
"""
raise VimConnNotImplemented("Should have implemented this")
- def delete_tenant(self, tenant_id,):
+ def delete_tenant(self, tenant_id):
"""Delete a tenant from VIM
tenant_id: returned VIM tenant_id on "new_tenant"
Returns None on success. Raises and exception of failure. If tenant is not found raises VimConnNotFoundException
raise VimConnNotImplemented("SFC support not implemented")
def refresh_classifications_status(self, classification_list):
- '''Get the status of the classifications
- Params: the list of classification identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this classifier
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE,
- # CREATING (on building process)
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
- '''
+ """Get the status of the classifications
+ Params: the list of classification identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this classifier
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ """
raise VimConnNotImplemented("Should have implemented this")
def delete_classification(self, classification_id):
raise VimConnNotImplemented("SFC support not implemented")
def refresh_sfis_status(self, sfi_list):
- '''Get the status of the service function instances
- Params: the list of sfi identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this service function instance
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE,
- # CREATING (on building process)
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
- '''
+ """Get the status of the service function instances
+ Params: the list of sfi identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function instance
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ """
raise VimConnNotImplemented("Should have implemented this")
def new_sf(self, name, sfis, sfc_encap=True):
raise VimConnNotImplemented("SFC support not implemented")
def refresh_sfs_status(self, sf_list):
- '''Get the status of the service functions
- Params: the list of sf identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this service function
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE,
- # CREATING (on building process)
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
- '''
+ """Get the status of the service functions
+ Params: the list of sf identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ """
raise VimConnNotImplemented("Should have implemented this")
def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
raise VimConnNotImplemented("SFC support not implemented")
def refresh_sfps_status(self, sfp_list):
- '''Get the status of the service function path
- Params: the list of sfp identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this service function path
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE,
- # CREATING (on building process)
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
- '''
+ """Get the status of the service function path
+ Params: the list of sfp identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function path
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
+ """
raise VimConnNotImplemented("Should have implemented this")
def delete_sfp(self, sfp_id):
"""
raise VimConnNotImplemented("SFC support not implemented")
-# NOT USED METHODS in current version. Deprecated
-
+ # NOT USED METHODS in current version. Deprecated
@deprecated
def host_vim2gui(self, host, server_dict):
"""Transform host dictionary from VIM format to GUI format,
setup(
name=_name,
- description='OSM ro base class for vim and SDN plugins',
+ description="OSM ro base class for vim and SDN plugins",
long_description=README,
- version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+ version_command=(
+ "git describe --match v* --tags --long --dirty",
+ "pep440-git-full",
+ ),
# version=VERSION,
# python_requires='>3.5.0',
- author='ETSI OSM',
- author_email='alfonso.tiernosepulveda@telefonica.com',
- maintainer='Alfonso Tierno',
- maintainer_email='alfonso.tiernosepulveda@telefonica.com',
- url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
- license='Apache 2.0',
-
+ author="ETSI OSM",
+ author_email="alfonso.tiernosepulveda@telefonica.com",
+ maintainer="Alfonso Tierno",
+ maintainer_email="alfonso.tiernosepulveda@telefonica.com",
+ url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary",
+ license="Apache 2.0",
packages=[_name],
include_package_data=True,
install_requires=[
- "requests", "paramiko", "PyYAML",
+ "requests",
+ "paramiko",
+ "PyYAML",
],
- setup_requires=['setuptools-version-command'],
+ setup_requires=["setuptools-version-command"],
entry_points={
- 'osm_ro.plugins': ['rovim_plugin = osm_ro_plugin.vimconn:VimConnector',
- 'rosdn_plugin = osm_ro_plugin.sdnconn:SdnConnectorBase'
- ],
+ "osm_ro.plugins": [
+ "rovim_plugin = osm_ro_plugin.vimconn:VimConnector",
+ "rosdn_plugin = osm_ro_plugin.sdnconn:SdnConnectorBase",
+ ],
},
)
basepython = python3
deps = flake8
commands = flake8 osm_ro_plugin --max-line-length 120 \
- --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+ --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241
[testenv:unittest]
basepython = python3