# under the License.
##
-import base64
-from osm_ro_plugin import vimconn
import logging
-import time
+from os import getenv
import random
from random import choice as random_choice
-from os import getenv
+import time
-from google.api_core.exceptions import NotFound
-import googleapiclient.discovery
from google.oauth2 import service_account
-
-from cryptography.hazmat.primitives import serialization as crypto_serialization
-from cryptography.hazmat.primitives.asymmetric import rsa
-from cryptography.hazmat.backends import default_backend as crypto_default_backend
-
-import logging
+import googleapiclient.discovery
+from osm_ro_plugin import vimconn
__author__ = "Sergio Gallardo Ruiz"
__date__ = "$11-aug-2021 08:30:00$"
class vimconnector(vimconn.VimConnector):
-
# Translate Google Cloud provisioning state to OSM provision state
# The first three ones are the transitional status once a user initiated action has been requested
# Once the operation is complete, it will transition into the states Succeeded or Failed
self.logger.debug("Config: %s", config)
scopes = ["https://www.googleapis.com/auth/cloud-platform"]
self.credentials = None
- if (
- "credentials" in config
- ):
+ if "credentials" in config:
self.logger.debug("Setting credentials")
# Settings Google Cloud credentials dict
credentials_body = config["credentials"]
self.logger.debug("create network name %s, ip_profile %s", net_name, ip_profile)
try:
-
self.logger.debug("creating network_name: {}".format(net_name))
network = "projects/{}/global/networks/default".format(self.project)
"description": net_name,
"network": network,
"ipCidrRange": subnet_address,
- # "autoCreateSubnetworks": True, # The network is created in AUTO mode (one subnet per region is created)
+ # The network is created in AUTO mode (one subnet per region is created)
+ # "autoCreateSubnetworks": True,
"autoCreateSubnetworks": False,
}
self.logger.debug("created network_name: {}".format(net_name))
# Adding firewall rules to allow the traffic in the network:
- rules_list = self._create_firewall_rules(net_name)
+ self._create_firewall_rules(net_name)
# create subnetwork, even if there is no profile
)
try:
-
self.logger.debug("creating subnet_name: {}".format(subnet_name))
subnetwork_body = {
)
try:
-
if self.reload_client:
self._reload_connection()
if not network_list:
return []
else:
- self.logger.debug(
- "get_network Return: network_list[0] %s", network_list[0]
- )
+ self.logger.debug("get_network Return: network_list[0] %s", network_list[0])
return network_list[0]
def delete_network(self, net_id, created_items=None):
self.logger.debug("Deleting network: {}".format(str(net_id)))
try:
-
net_name = self._get_resource_name_from_resource_id(net_id)
# Check associated VMs
- vms = (
- self.conn_compute.instances()
- .list(project=self.project, zone=self.zone)
- .execute()
- )
+ self.conn_compute.instances().list(
+ project=self.project, zone=self.zone
+ ).execute()
net_id = self.delete_subnet(net_name, created_items)
try:
# If the network has no more subnets, it will be deleted too
net_info = self.get_network(net_id)
- # If the subnet is in use by another resource, the deletion will be retried N times before abort the operation
+ # If the subnet is in use by another resource, the deletion will
+ # be retried N times before abort the operation
created_items = created_items or {}
created_items[net_id] = False
try:
# Deletion of the associated firewall rules:
- rules_list = self._delete_firewall_rules(network_name)
+ self._delete_firewall_rules(network_name)
operation = (
self.conn_compute.networks()
return vm_name_aux.lower()
def get_flavor_id_from_data(self, flavor_dict):
- self.logger.debug(
- "get_flavor_id_from_data begin: flavor_dict %s", flavor_dict
- )
+ self.logger.debug("get_flavor_id_from_data begin: flavor_dict %s", flavor_dict)
filter_dict = flavor_dict or {}
try:
cpus = filter_dict.get("vcpus") or 0
memMB = filter_dict.get("ram") or 0
- numberInterfaces = len(filter_dict.get("interfaces", [])) or 4 # Workaround (it should be 0)
+ # Workaround (it should be 0)
+ numberInterfaces = len(filter_dict.get("interfaces", [])) or 4
# Filter
filtered_machines = []
except Exception as e:
self._format_vimconn_exception(e)
- def delete_inuse_nic(self, nic_name):
- raise vimconn.VimConnNotImplemented("Not necessary")
-
def delete_image(self, image_id):
raise vimconn.VimConnNotImplemented("Not implemented")
+ "-"
+ "".join(random_choice("0123456789abcdef") for _ in range(12))
)
- response = (
- self.conn_compute.instances()
- .get(project=self.project, zone=self.zone, instance=random_name)
- .execute()
- )
- # If no exception is arisen, the random name exists for an instance, so a new random name must be generated
+ self.conn_compute.instances().get(
+ project=self.project, zone=self.zone, instance=random_name
+ ).execute()
+ # If no exception is arisen, the random name exists for an instance,
+ # so a new random name must be generated
except Exception as e:
if e.args[0]["status"] == "404":
self.logger.debug("New random name: %s", random_name)
break
else:
- self.logger.error("Exception generating random name (%s) for the instance", name)
+ self.logger.error(
+ "Exception generating random name (%s) for the instance", name
+ )
self._format_vimconn_exception(e)
return random_name
start,
image_id=None, # <image project>:(image|image-family):<image/family id>
flavor_id=None,
+ affinity_group_list=None,
net_list=None,
cloud_config=None,
disk_list=None,
net_iface["subnetwork"] = net.get("net_id")
# In order to get an external IP address, the key "accessConfigs" must be used
# in the interace. It has to be of type "ONE_TO_ONE_NAT" and name "External NAT"
- if net.get("floating_ip", False) or (net["use"] == "mgmt" and self.config.get("use_floating_ip")):
+ if net.get("floating_ip", False) or (
+ net["use"] == "mgmt" and self.config.get("use_floating_ip")
+ ):
net_iface["accessConfigs"] = [
{"type": "ONE_TO_ONE_NAT", "name": "External NAT"}
]
self.logger.error("new_vminstance rollback fail {}".format(e2))
else:
- self.logger.debug("Exception creating new vminstance: %s", e, exc_info=True)
+ self.logger.debug(
+ "Exception creating new vminstance: %s", e, exc_info=True
+ )
self._format_vimconn_exception(e)
-
def _build_metadata(self, vm_name, cloud_config):
-
# initial metadata
metadata = {}
metadata["items"] = []
- key_pairs = {}
# if there is a cloud-init load it
if cloud_config:
self.logger.debug("cloud config: %s", cloud_config)
_, userdata = self._create_user_data(cloud_config)
- metadata["items"].append(
- {"key": "user-data", "value": userdata}
- )
+ metadata["items"].append({"key": "user-data", "value": userdata})
# either password of ssh-keys are required
# we will always use ssh-keys, in case it is not available we will generate it
- """
- if cloud_config and cloud_config.get("key-pairs"):
- key_data = ""
- key_pairs = {}
- if cloud_config.get("key-pairs"):
- if isinstance(cloud_config["key-pairs"], list):
- # Transform the format "<key> <user@host>" into "<user>:<key>"
- key_data = ""
- for key in cloud_config.get("key-pairs"):
- key_data = key_data + key + "\n"
- key_pairs = {
- "key": "ssh-keys",
- "value": key_data
- }
- else:
- # If there is no ssh key in cloud config, a new key is generated:
- _, key_data = self._generate_keys()
- key_pairs = {
- "key": "ssh-keys",
- "value": "" + key_data
- }
- self.logger.debug("generated keys: %s", key_data)
-
- metadata["items"].append(key_pairs)
- """
self.logger.debug("metadata: %s", metadata)
return metadata
-
- def _generate_keys(self):
- """Method used to generate a pair of private/public keys.
- This method is used because to create a vm in Azure we always need a key or a password
- In some cases we may have a password in a cloud-init file but it may not be available
- """
- key = rsa.generate_private_key(
- backend=crypto_default_backend(), public_exponent=65537, key_size=2048
- )
- private_key = key.private_bytes(
- crypto_serialization.Encoding.PEM,
- crypto_serialization.PrivateFormat.PKCS8,
- crypto_serialization.NoEncryption(),
- )
- public_key = key.public_key().public_bytes(
- crypto_serialization.Encoding.OpenSSH,
- crypto_serialization.PublicFormat.OpenSSH,
- )
- private_key = private_key.decode("utf8")
- # Change first line because Paramiko needs a explicit start with 'BEGIN RSA PRIVATE KEY'
- i = private_key.find("\n")
- private_key = "-----BEGIN RSA PRIVATE KEY-----" + private_key[i:]
- public_key = public_key.decode("utf8")
-
- return private_key, public_key
-
-
- def _get_unused_vm_name(self, vm_name):
- """
- Checks the vm name and in case it is used adds a suffix to the name to allow creation
- :return:
- """
- all_vms = (
- self.conn_compute.instances()
- .list(project=self.project, zone=self.zone)
- .execute()
- )
- # Filter to vms starting with the indicated name
- vms = list(filter(lambda vm: (vm.name.startswith(vm_name)), all_vms))
- vm_names = [str(vm.name) for vm in vms]
-
- # get the name with the first not used suffix
- name_suffix = 0
- # name = subnet_name + "-" + str(name_suffix)
- name = vm_name # first subnet created will have no prefix
-
- while name in vm_names:
- name_suffix += 1
- name = vm_name + "-" + str(name_suffix)
-
- return name
-
def get_vminstance(self, vm_id):
"""
Obtaing the vm instance data from v_id
self.logger.debug("get_vminstance Return: response %s", response)
return response
- def delete_vminstance(self, vm_id, created_items=None):
+ def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
"""Deletes a vm instance from the vim."""
self.logger.debug(
"delete_vminstance begin: vm_id %s created_items %s",
self._get_resource_name_from_resource_id(netIface["subnetwork"])
in self.nets_to_be_deleted
):
- net_id = self._get_resource_name_from_resource_id(
+ self._get_resource_name_from_resource_id(
self.delete_network(netIface["subnetwork"])
)
else:
self._format_vimconn_exception(e)
- def _get_net_name_from_resource_id(self, resource_id):
- try:
- net_name = str(resource_id.split("/")[-1])
-
- return net_name
- except Exception:
- raise vimconn.VimConnException(
- "Unable to get google cloud net_name from invalid resource_id format '{}'".format(
- resource_id
- )
- )
-
def _get_resource_name_from_resource_id(self, resource_id):
"""
Obtains resource_name from the google cloud complete identifier: resource_name will always be last item
for net_id in net_list:
try:
- netName = self._get_net_name_from_resource_id(net_id)
resName = self._get_resource_name_from_resource_id(net_id)
net = (
interface_list = []
for network_interface in interfaces:
interface_dict = {}
- nic_name = network_interface["name"]
interface_dict["vim_interface_id"] = network_interface["name"]
ips = []
)
self._format_vimconn_exception(e)
- def _get_default_admin_user(self, image_id):
- if "ubuntu" in image_id.lower():
- return "ubuntu"
- else:
- return self._default_admin_user
-
def _create_firewall_rules(self, network):
"""
Creates the necessary firewall rules to allow the traffic in the network
"network": "global/networks/" + network,
"allowed": [{"IPProtocol": "tcp", "ports": ["80"]}],
}
- operation_firewall = (
- self.conn_compute.firewalls()
- .insert(project=self.project, body=firewall_rule_body)
- .execute()
- )
+ self.conn_compute.firewalls().insert(
+ project=self.project, body=firewall_rule_body
+ ).execute()
# Adding firewall rule to allow ssh:
self.logger.debug("creating firewall rule to allow ssh")
"network": "global/networks/" + network,
"allowed": [{"IPProtocol": "tcp", "ports": ["22"]}],
}
- operation_firewall = (
- self.conn_compute.firewalls()
- .insert(project=self.project, body=firewall_rule_body)
- .execute()
- )
+ self.conn_compute.firewalls().insert(
+ project=self.project, body=firewall_rule_body
+ ).execute()
# Adding firewall rule to allow ping:
self.logger.debug("creating firewall rule to allow ping")
"network": "global/networks/" + network,
"allowed": [{"IPProtocol": "icmp"}],
}
- operation_firewall = (
- self.conn_compute.firewalls()
- .insert(project=self.project, body=firewall_rule_body)
- .execute()
- )
+ self.conn_compute.firewalls().insert(
+ project=self.project, body=firewall_rule_body
+ ).execute()
# Adding firewall rule to allow internal:
self.logger.debug("creating firewall rule to allow internal")
{"IPProtocol": "icmp"},
],
}
- operation_firewall = (
- self.conn_compute.firewalls()
- .insert(project=self.project, body=firewall_rule_body)
- .execute()
- )
+ self.conn_compute.firewalls().insert(
+ project=self.project, body=firewall_rule_body
+ ).execute()
# Adding firewall rule to allow microk8s:
self.logger.debug("creating firewall rule to allow microk8s")
"network": "global/networks/" + network,
"allowed": [{"IPProtocol": "tcp", "ports": ["16443"]}],
}
- operation_firewall = (
- self.conn_compute.firewalls()
- .insert(project=self.project, body=firewall_rule_body)
- .execute()
- )
+ self.conn_compute.firewalls().insert(
+ project=self.project, body=firewall_rule_body
+ ).execute()
# Adding firewall rule to allow rdp:
self.logger.debug("creating firewall rule to allow rdp")
"network": "global/networks/" + network,
"allowed": [{"IPProtocol": "tcp", "ports": ["3389"]}],
}
- operation_firewall = (
- self.conn_compute.firewalls()
- .insert(project=self.project, body=firewall_rule_body)
- .execute()
- )
+ self.conn_compute.firewalls().insert(
+ project=self.project, body=firewall_rule_body
+ ).execute()
# Adding firewall rule to allow osm:
self.logger.debug("creating firewall rule to allow osm")
"network": "global/networks/" + network,
"allowed": [{"IPProtocol": "tcp", "ports": ["9001", "9999"]}],
}
- operation_firewall = (
- self.conn_compute.firewalls()
- .insert(project=self.project, body=firewall_rule_body)
- .execute()
- )
+ self.conn_compute.firewalls().insert(
+ project=self.project, body=firewall_rule_body
+ ).execute()
self.logger.debug(
"_create_firewall_rules Return: list_rules %s", rules_list
)
for item in rules_list["items"]:
if network == self._get_resource_name_from_resource_id(item["network"]):
- operation_firewall = (
- self.conn_compute.firewalls()
- .delete(project=self.project, firewall=item["name"])
- .execute()
- )
+ self.conn_compute.firewalls().delete(
+ project=self.project, firewall=item["name"]
+ ).execute()
self.logger.debug("_delete_firewall_rules Return: list_rules %s", 0)
return rules_list
)
self._format_vimconn_exception(e)
+ def migrate_instance(self, vm_id, compute_host=None):
+ """
+ Migrate a vdu
+ param:
+ vm_id: ID of an instance
+ compute_host: Host to migrate the vdu to
+ """
+ # TODO: Add support for migration
+ raise vimconn.VimConnNotImplemented("Not implemented")
+
+ def resize_instance(self, vm_id, flavor_id=None):
+ """
+ resize a vdu
+ param:
+ vm_id: ID of an instance
+ flavor_id: flavor_id to resize the vdu to
+ """
+ # TODO: Add support for resize
+ raise vimconn.VimConnNotImplemented("Not implemented")