import re
from os import getenv
-from azure.common.credentials import ServicePrincipalCredentials
+from azure.identity import ClientSecretCredential
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.compute.models import DiskCreateOption
+from azure.core.exceptions import ResourceNotFoundError
+from azure.profiles import ProfileDefinition
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import AuthenticationError
import msrestazure.tools as azure_tools
from requests.exceptions import ConnectionError
-__author__ = "Isabel Lloret, Sergio Gonzalez, Alfonso Tierno"
+from cryptography.hazmat.primitives import serialization as crypto_serialization
+from cryptography.hazmat.primitives.asymmetric import rsa
+from cryptography.hazmat.backends import default_backend as crypto_default_backend
+
+__author__ = "Isabel Lloret, Sergio Gonzalez, Alfonso Tierno, Gerardo Garcia"
__date__ = "$18-apr-2019 23:59:59$"
"deallocating": "BUILD",
}
+ # TODO - review availability zones
AZURE_ZONES = ["1", "2", "3"]
+ AZURE_COMPUTE_MGMT_CLIENT_API_VERSION = "2021-03-01"
+ AZURE_COMPUTE_MGMT_PROFILE_TAG = "azure.mgmt.compute.ComputeManagementClient"
+ AZURE_COMPUTE_MGMT_PROFILE = ProfileDefinition(
+ {
+ AZURE_COMPUTE_MGMT_PROFILE_TAG: {
+ None: AZURE_COMPUTE_MGMT_CLIENT_API_VERSION,
+ "availability_sets": "2020-12-01",
+ "dedicated_host_groups": "2020-12-01",
+ "dedicated_hosts": "2020-12-01",
+ "disk_accesses": "2020-12-01",
+ "disk_encryption_sets": "2020-12-01",
+ "disk_restore_point": "2020-12-01",
+ "disks": "2020-12-01",
+ "galleries": "2020-09-30",
+ "gallery_application_versions": "2020-09-30",
+ "gallery_applications": "2020-09-30",
+ "gallery_image_versions": "2020-09-30",
+ "gallery_images": "2020-09-30",
+ "gallery_sharing_profile": "2020-09-30",
+ "images": "2020-12-01",
+ "log_analytics": "2020-12-01",
+ "operations": "2020-12-01",
+ "proximity_placement_groups": "2020-12-01",
+ "resource_skus": "2019-04-01",
+ "shared_galleries": "2020-09-30",
+ "shared_gallery_image_versions": "2020-09-30",
+ "shared_gallery_images": "2020-09-30",
+ "snapshots": "2020-12-01",
+ "ssh_public_keys": "2020-12-01",
+ "usage": "2020-12-01",
+ "virtual_machine_extension_images": "2020-12-01",
+ "virtual_machine_extensions": "2020-12-01",
+ "virtual_machine_images": "2020-12-01",
+ "virtual_machine_images_edge_zone": "2020-12-01",
+ "virtual_machine_run_commands": "2020-12-01",
+ "virtual_machine_scale_set_extensions": "2020-12-01",
+ "virtual_machine_scale_set_rolling_upgrades": "2020-12-01",
+ "virtual_machine_scale_set_vm_extensions": "2020-12-01",
+ "virtual_machine_scale_set_vm_run_commands": "2020-12-01",
+ "virtual_machine_scale_set_vms": "2020-12-01",
+ "virtual_machine_scale_sets": "2020-12-01",
+ "virtual_machine_sizes": "2020-12-01",
+ "virtual_machines": "2020-12-01",
+ }
+ },
+ AZURE_COMPUTE_MGMT_PROFILE_TAG + " osm",
+ )
+
+ AZURE_RESOURCE_MGMT_CLIENT_API_VERSION = "2020-10-01"
+ AZURE_RESOURCE_MGMT_PROFILE_TAG = (
+ "azure.mgmt.resource.resources.ResourceManagementClient"
+ )
+ AZURE_RESOURCE_MGMT_PROFILE = ProfileDefinition(
+ {
+ AZURE_RESOURCE_MGMT_PROFILE_TAG: {
+ None: AZURE_RESOURCE_MGMT_CLIENT_API_VERSION,
+ }
+ },
+ AZURE_RESOURCE_MGMT_PROFILE_TAG + " osm",
+ )
+
+ AZURE_NETWORK_MGMT_CLIENT_API_VERSION = "2020-11-01"
+ AZURE_NETWORK_MGMT_PROFILE_TAG = "azure.mgmt.network.NetworkManagementClient"
+ AZURE_NETWORK_MGMT_PROFILE = ProfileDefinition(
+ {
+ AZURE_NETWORK_MGMT_PROFILE_TAG: {
+ None: AZURE_NETWORK_MGMT_CLIENT_API_VERSION,
+ "firewall_policy_rule_groups": "2020-04-01",
+ "interface_endpoints": "2019-02-01",
+ "p2_svpn_server_configurations": "2019-07-01",
+ }
+ },
+ AZURE_NETWORK_MGMT_PROFILE_TAG + " osm",
+ )
+
def __init__(
self,
uuid,
self.reload_client = True
self.vnet_address_space = None
+
# LOGGER
self.logger = logging.getLogger("ro.vim.azure")
-
if log_level:
- logging.basicConfig()
self.logger.setLevel(getattr(logging, log_level))
self.tenant = tenant_id or tenant_name
if "vnet_name" in config:
self.vnet_name = config["vnet_name"]
+ # TODO - not used, do anything about it?
# public ssh key
self.pub_key = config.get("pub_key")
+ # TODO - check default user for azure
+ # default admin user
+ self._default_admin_user = "azureuser"
+
# flavor pattern regex
if "flavors_pattern" in config:
self._config["flavors_pattern"] = config["flavors_pattern"]
self.logger.debug("reloading azure client")
try:
- self.credentials = ServicePrincipalCredentials(
+ self.credentials = ClientSecretCredential(
client_id=self._config["user"],
- secret=self._config["passwd"],
- tenant=self._config["tenant"],
+ client_secret=self._config["passwd"],
+ tenant_id=self._config["tenant"],
)
self.conn = ResourceManagementClient(
- self.credentials, self._config["subscription_id"]
+ self.credentials,
+ self._config["subscription_id"],
+ profile=self.AZURE_RESOURCE_MGMT_PROFILE,
)
self.conn_compute = ComputeManagementClient(
- self.credentials, self._config["subscription_id"]
+ self.credentials,
+ self._config["subscription_id"],
+ profile=self.AZURE_COMPUTE_MGMT_PROFILE,
)
self.conn_vnet = NetworkManagementClient(
- self.credentials, self._config["subscription_id"]
+ self.credentials,
+ self._config["subscription_id"],
+ profile=self.AZURE_NETWORK_MGMT_PROFILE,
)
self._check_or_create_resource_group()
self._check_or_create_vnet()
def _check_subnets_for_vm(self, net_list):
# All subnets must belong to the same resource group and vnet
+ # All subnets must belong to the same resource group anded vnet
rg_vnet = set(
self._get_resource_group_name_from_resource_id(net["net_id"])
+ self._get_net_name_from_resource_id(net["net_id"])
"""
Transforms a generic or azure exception to a vimcommException
"""
+ self.logger.error("Azure plugin error: {}".format(e))
if isinstance(e, vimconn.VimConnException):
- raise
+ raise e
elif isinstance(e, AuthenticationError):
raise vimconn.VimConnAuthException(type(e).__name__ + ": " + str(e))
elif isinstance(e, ConnectionError):
self.vnet_address_space = "10.0.0.0/8"
self.logger.debug("create base vnet: %s", self.vnet_name)
- self.conn_vnet.virtual_networks.create_or_update(
+ self.conn_vnet.virtual_networks.begin_create_or_update(
self.resource_group, self.vnet_name, vnet_params
)
vnet = self.conn_vnet.virtual_networks.get(
subnet_name = self._get_unused_subnet_name(net_name)
self.logger.debug("creating subnet_name: {}".format(subnet_name))
- async_creation = self.conn_vnet.subnets.create_or_update(
+ async_creation = self.conn_vnet.subnets.begin_create_or_update(
self.resource_group, self.vnet_name, subnet_name, subnet_params
)
async_creation.wait()
+ # TODO - do not wait here, check where it is used
self.logger.debug("created subnet_name: {}".format(subnet_name))
return "{}/subnets/{}".format(self.vnet_id, subnet_name), None
if mac_address:
net_ifz["mac_address"] = mac_address
- async_nic_creation = self.conn_vnet.network_interfaces.create_or_update(
- self.resource_group, nic_name, net_ifz
+ async_nic_creation = (
+ self.conn_vnet.network_interfaces.begin_create_or_update(
+ self.resource_group, nic_name, net_ifz
+ )
)
nic_data = async_nic_creation.result()
created_items[nic_data.id] = True
"public_ip_allocation_method": "Dynamic",
}
public_ip_name = nic_name + "-public-ip"
- async_public_ip = self.conn_vnet.public_ip_addresses.create_or_update(
- self.resource_group, public_ip_name, public_ip_address_params
+ async_public_ip = (
+ self.conn_vnet.public_ip_addresses.begin_create_or_update(
+ self.resource_group, public_ip_name, public_ip_address_params
+ )
)
public_ip = async_public_ip.result()
self.logger.debug("created public IP: {}".format(public_ip))
nic_data.ip_configurations[0].public_ip_address = public_ip
created_items[public_ip.id] = True
- self.conn_vnet.network_interfaces.create_or_update(
+ self.conn_vnet.network_interfaces.begin_create_or_update(
self.resource_group, nic_name, nic_data
)
# if version is defined get directly version, else list images
if len(params) == 4 and params[3]:
version = params[3]
- image_list = self._get_version_image_list(
- publisher, offer, sku, version
- )
+ if version == "latest":
+ image_list = self._get_sku_image_list(
+ publisher, offer, sku
+ )
+ image_list = [image_list[-1]]
+ else:
+ image_list = self._get_version_image_list(
+ publisher, offer, sku, version
+ )
else:
image_list = self._get_sku_image_list(
publisher, offer, sku
vm_nics.append({"id": str(vm_nic.id)})
net["vim_id"] = vm_nic.id
- # cloud-init configuration
- # cloud config
- if cloud_config:
- config_drive, userdata = self._create_user_data(cloud_config)
- custom_data = base64.b64encode(userdata.encode("utf-8")).decode(
- "latin-1"
- )
- key_data = None
- key_pairs = cloud_config.get("key-pairs")
- if key_pairs:
- key_data = key_pairs[0]
-
- if cloud_config.get("users"):
- user_name = cloud_config.get("users")[0].get("name", "osm")
- else:
- user_name = "osm" # DEFAULT USER IS OSM
-
- os_profile = {
- "computer_name": vm_name,
- "admin_username": user_name,
- "linux_configuration": {
- "disable_password_authentication": True,
- "ssh": {
- "public_keys": [
- {
- "path": "/home/{}/.ssh/authorized_keys".format(
- user_name
- ),
- "key_data": key_data,
- }
- ]
- },
- },
- "custom_data": custom_data,
- }
- else:
- os_profile = {
- "computer_name": vm_name,
- "admin_username": "osm",
- "admin_password": "Osm4u!",
- }
-
vm_parameters = {
"location": self.region,
- "os_profile": os_profile,
+ "os_profile": self._build_os_profile(vm_name, cloud_config, image_id),
"hardware_profile": {"vm_size": flavor_id},
"storage_profile": {"image_reference": image_reference},
}
vm_parameters["zones"] = [vm_zone]
self.logger.debug("create vm name: %s", vm_name)
- creation_result = self.conn_compute.virtual_machines.create_or_update(
- self.resource_group, vm_name, vm_parameters
+ creation_result = self.conn_compute.virtual_machines.begin_create_or_update(
+ self.resource_group, vm_name, vm_parameters, polling=False
)
+ self.logger.debug("obtained creation result: %s", creation_result)
virtual_machine = creation_result.result()
self.logger.debug("created vm name: %s", vm_name)
+ """ Por ahora no hacer polling para ver si tarda menos
# Add disks if they are provided
if disk_list:
for disk_index, disk in enumerate(disk_list):
if start:
self.conn_compute.virtual_machines.start(self.resource_group, vm_name)
# start_result.wait()
+ """
return virtual_machine.id, created_items
self.logger.debug("Exception creating new vminstance: %s", e, exc_info=True)
self._format_vimconn_exception(e)
+ def _build_os_profile(self, vm_name, cloud_config, image_id):
+
+ # initial os_profile
+ os_profile = {"computer_name": vm_name}
+
+ # for azure os_profile admin_username is required
+ if cloud_config and cloud_config.get("users"):
+ admin_username = cloud_config.get("users")[0].get(
+ "name", self._get_default_admin_user(image_id)
+ )
+ else:
+ admin_username = self._get_default_admin_user(image_id)
+ os_profile["admin_username"] = admin_username
+
+ # if there is a cloud-init load it
+ if cloud_config:
+ _, userdata = self._create_user_data(cloud_config)
+ custom_data = base64.b64encode(userdata.encode("utf-8")).decode("latin-1")
+ os_profile["custom_data"] = custom_data
+
+ # either password of ssh-keys are required
+ # we will always use ssh-keys, in case it is not available we will generate it
+ if cloud_config and cloud_config.get("key-pairs"):
+ key_data = cloud_config.get("key-pairs")[0]
+ else:
+ _, key_data = self._generate_keys()
+
+ os_profile["linux_configuration"] = {
+ "ssh": {
+ "public_keys": [
+ {
+ "path": "/home/{}/.ssh/authorized_keys".format(admin_username),
+ "key_data": key_data,
+ }
+ ]
+ },
+ }
+
+ return os_profile
+
+ def _generate_keys(self):
+ """Method used to generate a pair of private/public keys.
+ This method is used because to create a vm in Azure we always need a key or a password
+ In some cases we may have a password in a cloud-init file but it may not be available
+ """
+ key = rsa.generate_private_key(
+ backend=crypto_default_backend(), public_exponent=65537, key_size=2048
+ )
+ private_key = key.private_bytes(
+ crypto_serialization.Encoding.PEM,
+ crypto_serialization.PrivateFormat.PKCS8,
+ crypto_serialization.NoEncryption(),
+ )
+ public_key = key.public_key().public_bytes(
+ crypto_serialization.Encoding.OpenSSH,
+ crypto_serialization.PublicFormat.OpenSSH,
+ )
+ private_key = private_key.decode("utf8")
+ # Change first line because Paramiko needs a explicit start with 'BEGIN RSA PRIVATE KEY'
+ i = private_key.find("\n")
+ private_key = "-----BEGIN RSA PRIVATE KEY-----" + private_key[i:]
+ public_key = public_key.decode("utf8")
+
+ return private_key, public_key
+
def _get_unused_vm_name(self, vm_name):
"""
Checks the vm name and in case it is used adds a suffix to the name to allow creation
disk_name = vm_name + "_DataDisk_" + str(disk_index)
if not disk.get("image_id"):
self.logger.debug("create new data disk name: %s", disk_name)
- async_disk_creation = self.conn_compute.disks.create_or_update(
+ async_disk_creation = self.conn_compute.disks.begin_create_or_update(
self.resource_group,
disk_name,
{
if type == "snapshots" or type == "disks":
self.logger.debug("create disk from copy name: %s", image_name)
# ¿Should check that snapshot exists?
- async_disk_creation = self.conn_compute.disks.create_or_update(
- self.resource_group,
- disk_name,
- {
- "location": self.region,
- "creation_data": {
- "create_option": "Copy",
- "source_uri": image_id,
+ async_disk_creation = (
+ self.conn_compute.disks.begin_create_or_update(
+ self.resource_group,
+ disk_name,
+ {
+ "location": self.region,
+ "creation_data": {
+ "create_option": "Copy",
+ "source_uri": image_id,
+ },
},
- },
+ )
)
data_disk = async_disk_creation.result()
created_items[data_disk.id] = True
}
)
self.logger.debug("attach disk name: %s", disk_name)
- self.conn_compute.virtual_machines.create_or_update(
+ self.conn_compute.virtual_machines.begin_create_or_update(
self.resource_group, virtual_machine.name, virtual_machine
)
self._reload_connection()
vm_sizes_list = [
vm_size.serialize()
- for vm_size in self.conn_compute.virtual_machine_sizes.list(self.region)
+ for vm_size in self.conn_compute.resource_skus.list(
+ "location={}".format(self.region)
+ )
]
cpus = filter_dict.get("vcpus") or 0
memMB = filter_dict.get("ram") or 0
+ numberInterfaces = len(filter_dict.get("interfaces", [])) or 0
# Filter
if self._config.get("flavors_pattern"):
filtered_sizes = [
size
for size in vm_sizes_list
- if size["numberOfCores"] >= cpus
- and size["memoryInMB"] >= memMB
+ if size["capabilities"]["vCPUs"] >= cpus
+ and size["capabilities"]["MemoryGB"] >= memMB / 1024
+ and size["capabilities"]["MaxNetworkInterfaces"] >= numberInterfaces
and re.search(self._config.get("flavors_pattern"), size["name"])
]
else:
filtered_sizes = [
size
for size in vm_sizes_list
- if size["numberOfCores"] >= cpus and size["memoryInMB"] >= memMB
+ if size["capabilities"]["vCPUs"] >= cpus
+ and size["capabilities"]["MemoryGB"] >= memMB / 1024
+ and size["capabilities"]["MaxNetworkInterfaces"] >= numberInterfaces
]
# Sort
self._reload_connection()
vm_sizes_list = [
vm_size.serialize()
- for vm_size in self.conn_compute.virtual_machine_sizes.list(self.region)
+ for vm_size in self.conn_compute.resource_skus.list(
+ "location={}".format(self.region)
+ )
]
output_flavor = None
self._reload_connection()
res_name = self._get_resource_name_from_resource_id(net_id)
- filter_dict = {"name": res_name}
- network_list = self.get_network_list(filter_dict)
- if not network_list:
- raise vimconn.VimConnNotFoundException(
- "network '{}' not found".format(net_id)
- )
try:
+ # Obtain subnets ant try to delete nic first
+ subnet = self.conn_vnet.subnets.get(
+ self.resource_group, self.vnet_name, res_name
+ )
+ if not subnet:
+ raise vimconn.VimConnNotFoundException(
+ "network '{}' not found".format(net_id)
+ )
+
+ # TODO - for a quick-fix delete nics sequentially but should not wait
+ # for each in turn
+ if subnet.ip_configurations:
+ for ip_configuration in subnet.ip_configurations:
+ # obtain nic_name from ip_configuration
+ parsed_id = azure_tools.parse_resource_id(ip_configuration.id)
+ nic_name = parsed_id["name"]
+ self.delete_inuse_nic(nic_name)
+
# Subnet API fails (CloudError: Azure Error: ResourceNotFound)
# Put the initial virtual_network API
- async_delete = self.conn_vnet.subnets.delete(
+ async_delete = self.conn_vnet.subnets.begin_delete(
self.resource_group, self.vnet_name, res_name
)
async_delete.wait()
+
return net_id
+ except ResourceNotFoundError:
+ raise vimconn.VimConnNotFoundException(
+ "network '{}' not found".format(net_id)
+ )
except CloudError as e:
if e.error.error and "notfound" in e.error.error.lower():
raise vimconn.VimConnNotFoundException(
except Exception as e:
self._format_vimconn_exception(e)
+ def delete_inuse_nic(self, nic_name):
+
+ # Obtain nic data
+ nic_data = self.conn_vnet.network_interfaces.get(self.resource_group, nic_name)
+
+ # Obtain vm associated to nic in case it exists
+ if nic_data.virtual_machine:
+ vm_name = azure_tools.parse_resource_id(nic_data.virtual_machine.id)["name"]
+ self.logger.debug("vm_name: {}".format(vm_name))
+ virtual_machine = self.conn_compute.virtual_machines.get(
+ self.resource_group, vm_name
+ )
+ self.logger.debug("obtained vm")
+
+ # Deattach nic from vm if it has netwolk machines attached
+ network_interfaces = virtual_machine.network_profile.network_interfaces
+ network_interfaces[:] = [
+ interface
+ for interface in network_interfaces
+ if self._get_resource_name_from_resource_id(interface.id) != nic_name
+ ]
+
+ # TODO - check if there is a public ip to delete and delete it
+ if network_interfaces:
+
+ # Deallocate the vm
+ async_vm_deallocate = (
+ self.conn_compute.virtual_machines.begin_deallocate(
+ self.resource_group, vm_name
+ )
+ )
+ self.logger.debug("deallocating vm")
+ async_vm_deallocate.wait()
+ self.logger.debug("vm deallocated")
+
+ async_vm_update = (
+ self.conn_compute.virtual_machines.begin_create_or_update(
+ self.resource_group, vm_name, virtual_machine
+ )
+ )
+ virtual_machine = async_vm_update.result()
+ self.logger.debug("nic removed from interface")
+
+ else:
+ self.logger.debug("There are no interfaces left, delete vm")
+ self.delete_vminstance(virtual_machine.id)
+ self.logger.debug("Delete vm")
+
+ # Delete nic
+ self.logger.debug("delete NIC name: %s", nic_name)
+ nic_delete = self.conn_vnet.network_interfaces.begin_delete(
+ self.resource_group, nic_name
+ )
+ nic_delete.wait()
+ self.logger.debug("deleted NIC name: %s", nic_name)
+
def delete_vminstance(self, vm_id, created_items=None):
"""Deletes a vm instance from the vim."""
self.logger.debug(
# vm_stop = self.conn_compute.virtual_machines.power_off(self.resource_group, resName)
# vm_stop.wait()
- vm_delete = self.conn_compute.virtual_machines.delete(
+ vm_delete = self.conn_compute.virtual_machines.begin_delete(
self.resource_group, res_name
)
vm_delete.wait()
self.logger.debug("deleted VM name: %s", res_name)
- # Delete OS Disk
- os_disk_name = vm.storage_profile.os_disk.name
- self.logger.debug("delete OS DISK: %s", os_disk_name)
- async_disk_delete = self.conn_compute.disks.delete(
- self.resource_group, os_disk_name
- )
- async_disk_delete.wait()
- # os disks are created always with the machine
- self.logger.debug("deleted OS DISK name: %s", os_disk_name)
+ # Delete OS Disk, check if exists, in case of error creating
+ # it may not be fully created
+ if vm.storage_profile.os_disk:
+ os_disk_name = vm.storage_profile.os_disk.name
+ self.logger.debug("delete OS DISK: %s", os_disk_name)
+ async_disk_delete = self.conn_compute.disks.begin_delete(
+ self.resource_group, os_disk_name
+ )
+ async_disk_delete.wait()
+ # os disks are created always with the machine
+ self.logger.debug("deleted OS DISK name: %s", os_disk_name)
for data_disk in vm.storage_profile.data_disks:
self.logger.debug("delete data_disk: %s", data_disk.name)
- async_disk_delete = self.conn_compute.disks.delete(
+ async_disk_delete = self.conn_compute.disks.begin_delete(
self.resource_group, data_disk.name
)
async_disk_delete.wait()
# Public ip must be deleted afterwards of nic that is attached
self.logger.debug("delete NIC name: %s", nic_name)
- nic_delete = self.conn_vnet.network_interfaces.delete(
+ nic_delete = self.conn_vnet.network_interfaces.begin_delete(
self.resource_group, nic_name
)
nic_delete.wait()
# Delete list of public ips
if public_ip_name:
self.logger.debug("delete PUBLIC IP - " + public_ip_name)
- ip_delete = self.conn_vnet.public_ip_addresses.delete(
+ ip_delete = self.conn_vnet.public_ip_addresses.begin_delete(
self.resource_group, public_ip_name
)
ip_delete.wait()
# Delete created items
self._delete_created_items(created_items)
+ except ResourceNotFoundError:
+ raise vimconn.VimConnNotFoundException(
+ "No vm instance found '{}'".format(vm_id)
+ )
except CloudError as e:
if e.error.error and "notfound" in e.error.error.lower():
raise vimconn.VimConnNotFoundException(
virtual machine fails creating or in other cases of error
"""
self.logger.debug("Created items: %s", created_items)
+ # TODO - optimize - should not wait until it is deleted
# Must delete in order first nics, then public_ips
# As dictionaries don't preserve order, first get items to be deleted then delete them
nics_to_delete = []
for item_name in nics_to_delete:
try:
self.logger.debug("deleting nic name %s:", item_name)
- nic_delete = self.conn_vnet.network_interfaces.delete(
+ nic_delete = self.conn_vnet.network_interfaces.begin_delete(
self.resource_group, item_name
)
nic_delete.wait()
for item_name in publics_ip_to_delete:
try:
self.logger.debug("deleting public ip name %s:", item_name)
- ip_delete = self.conn_vnet.public_ip_addresses.delete(
+ ip_delete = self.conn_vnet.public_ip_addresses.begin_delete(
self.resource_group, name
)
ip_delete.wait()
for item_name in disks_to_delete:
try:
self.logger.debug("deleting data disk name %s:", name)
- async_disk_delete = self.conn_compute.disks.delete(
+ async_disk_delete = self.conn_compute.disks.begin_delete(
self.resource_group, item_name
)
async_disk_delete.wait()
resName = self._get_resource_name_from_resource_id(vm_id)
if "start" in action_dict:
- self.conn_compute.virtual_machines.start(self.resource_group, resName)
+ self.conn_compute.virtual_machines.begin_start(
+ self.resource_group, resName
+ )
elif (
"stop" in action_dict
or "shutdown" in action_dict
or "shutoff" in action_dict
):
- self.conn_compute.virtual_machines.power_off(
+ self.conn_compute.virtual_machines.begin_power_off(
self.resource_group, resName
)
elif "terminate" in action_dict:
- self.conn_compute.virtual_machines.delete(self.resource_group, resName)
+ self.conn_compute.virtual_machines.begin_delete(
+ self.resource_group, resName
+ )
elif "reboot" in action_dict:
- self.conn_compute.virtual_machines.restart(self.resource_group, resName)
+ self.conn_compute.virtual_machines.begin_restart(
+ self.resource_group, resName
+ )
return None
+ except ResourceNotFoundError:
+ raise vimconn.VimConnNotFoundException("No vm found '{}'".format(vm_id))
except CloudError as e:
if e.error.error and "notfound" in e.error.error.lower():
raise vimconn.VimConnNotFoundException("No vm found '{}'".format(vm_id))
try:
resName = self._get_resource_name_from_resource_id(vm_id)
vm = self.conn_compute.virtual_machines.get(self.resource_group, resName)
+ except ResourceNotFoundError:
+ raise vimconn.VimConnNotFoundException(
+ "No vminstance found '{}'".format(vm_id)
+ )
except CloudError as e:
if e.error.error and "notfound" in e.error.error.lower():
raise vimconn.VimConnNotFoundException(
return interface_list
except Exception as e:
self.logger.error(
- "Exception %s obtaining interface data for vm: %s, error: %s",
- vm_id,
+ "Exception %s obtaining interface data for vm: %s",
e,
+ vm_id,
exc_info=True,
)
self._format_vimconn_exception(e)
+ def _get_default_admin_user(self, image_id):
+ if "ubuntu" in image_id.lower():
+ return "ubuntu"
+ else:
+ return self._default_admin_user
+
if __name__ == "__main__":
+ # Init logger
+ log_format = "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(funcName)s(): %(message)s"
+ log_formatter = logging.Formatter(log_format, datefmt="%Y-%m-%dT%H:%M:%S")
+ handler = logging.StreamHandler()
+ handler.setFormatter(log_formatter)
+ logger = logging.getLogger("ro.vim.azure")
+ # logger.setLevel(level=logging.ERROR)
+ # logger.setLevel(level=logging.INFO)
+ logger.setLevel(level=logging.DEBUG)
+ logger.addHandler(handler)
+
# Making some basic test
vim_id = "azure"
vim_name = "azure"
test_params[param] = value
config = {
- "region_name": getenv("AZURE_REGION_NAME", "westeurope"),
+ "region_name": getenv("AZURE_REGION_NAME", "northeurope"),
"resource_group": getenv("AZURE_RESOURCE_GROUP"),
"subscription_id": getenv("AZURE_SUBSCRIPTION_ID"),
"pub_key": getenv("AZURE_PUB_KEY", None),
- "vnet_name": getenv("AZURE_VNET_NAME", "myNetwork"),
+ "vnet_name": getenv("AZURE_VNET_NAME", "osm_vnet"),
}
- virtualMachine = {
- "name": "sergio",
- "description": "new VM",
- "status": "running",
- "image": {
- "publisher": "Canonical",
- "offer": "UbuntuServer",
- "sku": "16.04.0-LTS",
- "version": "latest",
- },
- "hardware_profile": {"vm_size": "Standard_DS1_v2"},
- "networks": ["sergio"],
- }
-
- vnet_config = {
- "subnet_address": "10.1.2.0/24",
- # "subnet_name": "subnet-oam"
- }
- ###########################
-
azure = vimconnector(
vim_id,
vim_name,
config=config,
)
- # azure.get_flavor_id_from_data("here")
- # subnets=azure.get_network_list()
- # azure.new_vminstance(virtualMachine["name"], virtualMachine["description"], virtualMachine["status"],
- # virtualMachine["image"], virtualMachine["hardware_profile"]["vm_size"], subnets)
-
- azure.new_network("mynet", None)
- net_id = (
- "/subscriptions/82f80cc1-876b-4591-9911-1fb5788384fd/resourceGroups/osmRG/providers/Microsoft."
- "Network/virtualNetworks/test"
- )
- net_id_not_found = (
- "/subscriptions/82f80cc1-876b-4591-9911-1fb5788384fd/resourceGroups/osmRG/providers/"
- "Microsoft.Network/virtualNetworks/testALF"
- )
- azure.refresh_nets_status([net_id, net_id_not_found])
+ """
+ logger.debug("List images")
+ image = azure.get_image_list({"name": "Canonical:UbuntuServer:18.04-LTS:18.04.201809110"})
+ logger.debug("image: {}".format(image))
+
+ logger.debug("List networks")
+ network_list = azure.get_network_list({"name": "internal"})
+ logger.debug("Network_list: {}".format(network_list))
+
+ logger.debug("List flavors")
+ flavors = azure.get_flavor_id_from_data({"vcpu": "2"})
+ logger.debug("flavors: {}".format(flavors))
+ """
+
+ """
+ # Create network and test machine
+ #new_network_id, _ = azure.new_network("testnet1", "data")
+ new_network_id = ("/subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}/providers")
+ "/Microsoft.Network/virtualNetworks/osm_vnet/subnets/testnet1"
+ ).format(test_params["resource_group"])
+ logger.debug("new_network_id: {}".format(new_network_id))
+
+ logger.debug("Delete network")
+ new_network_id = azure.delete_network(new_network_id)
+ logger.debug("deleted network_id: {}".format(new_network_id))
+ """
+
+ """
+ logger.debug("List networks")
+ network_list = azure.get_network_list({"name": "internal"})
+ logger.debug("Network_list: {}".format(network_list))
+
+ logger.debug("Show machine isabelvm")
+ vmachine = azure.get_vminstance( ("/subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}"
+ "/providers/Microsoft.Compute/virtualMachines/isabelVM"
+ ).format(test_params["resource_group"])
+ )
+ logger.debug("Vmachine: {}".format(vmachine))
+ """
+
+ logger.debug("List images")
+ image = azure.get_image_list({"name": "Canonical:UbuntuServer:16.04"})
+ # image = azure.get_image_list({"name": "Canonical:UbuntuServer:18.04-LTS"})
+ logger.debug("image: {}".format(image))
+
+ """
+ # Create network and test machine
+ new_network_id, _ = azure.new_network("testnet1", "data")
+ image_id = ("/Subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/Providers/Microsoft.Compute"
+ "/Locations/northeurope/Publishers/Canonical/ArtifactTypes/VMImage/Offers/UbuntuServer"
+ "/Skus/18.04-LTS/Versions/18.04.201809110")
+ """
+ """
+
+ network_id = ("subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}
+ "/providers/Microsoft.Network/virtualNetworks/osm_vnet/subnets/internal"
+ ).format(test_params["resource_group"])
+ """
+
+ """
+ logger.debug("Create machine")
+ image_id = ("/Subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/Providers/Microsoft.Compute/Locations"
+ "/northeurope/Publishers/Canonical/ArtifactTypes/VMImage/Offers/UbuntuServer/Skus/18.04-LTS"
+ "/Versions/18.04.202103151")
+ cloud_config = {"user-data": (
+ "#cloud-config\n"
+ "password: osm4u\n"
+ "chpasswd: { expire: False }\n"
+ "ssh_pwauth: True\n\n"
+ "write_files:\n"
+ "- content: |\n"
+ " # My new helloworld file\n\n"
+ " owner: root:root\n"
+ " permissions: '0644'\n"
+ " path: /root/helloworld.txt",
+ "key-pairs": [
+ ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/p7fuw/W0+6uhx9XNPY4dN/K2cXZweDfjJN8W/sQ1AhKvn"
+ "j0MF+dbBdsd2tfq6XUhx5LiKoGTunRpRonOw249ivH7pSyNN7FYpdLaij7Krn3K+QRNEOahMI4eoqdglVftA3"
+ "vlw4Oe/aZOU9BXPdRLxfr9hRKzg5zkK91/LBkEViAijpCwK6ODPZLDDUwY4iihYK9R5eZ3fmM4+3k3Jd0hPRk"
+ "B5YbtDQOu8ASWRZ9iTAWqr1OwQmvNc6ohSVg1tbq3wSxj/5bbz0J24A7TTpY0giWctne8Qkl/F2e0ZSErvbBB"
+ "GXKxfnq7sc23OK1hPxMAuS+ufzyXsnL1+fB4t2iF azureuser@osm-test-client\n"
+ )]
+ }
+ network_id = ("subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}/providers"
+ "/Microsoft.Network/virtualNetworks/osm_vnet/subnets/internal"
+ ).format(test_params["resource_group"])
+ vm = azure.new_vminstance(name="isabelvm",
+ description="testvm",
+ start=True,
+ image_id=image_id,
+ flavor_id="Standard_B1ls",
+ net_list = [{"net_id": network_id, "name": "internal", "use": "mgmt", "floating_ip":True}],
+ cloud_config = cloud_config)
+ logger.debug("vm: {}".format(vm))
+ """
+
+ """
+ # Delete nonexistent vm
+ try:
+ logger.debug("Delete machine")
+ vm_id = ("/subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}/providers/Microsoft.Compute/"
+ "virtualMachines/isabelvm"
+ ).format(test_params["resource_group"])
+ created_items = {
+ ("/subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}/providers/Microsoft.Network"
+ "/networkInterfaces/isabelvm-nic-0"
+ ).format(test_params["resource_group"]): True,
+ ("/subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}/providers/Microsoft.Network"
+ "/publicIPAddresses/isabelvm-nic-0-public-ip"
+ ).format(test_params["resource_group"]): True
+ }
+ azure.delete_vminstance(vm_id, created_items)
+ except vimconn.VimConnNotFoundException as e:
+ print("Ok: excepcion no encontrada")
+ """
+
+ """
+ network_id = ("/subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}/providers/Microsoft.Network"
+ "/virtualNetworks/osm_vnet/subnets/hfcloudinit-internal-1"
+ ).format(test_params["resource_group"])
+ azure.delete_network(network_id)
+ """
# limitations under the License.
#######################################################################################
adal==1.2.7
- # via
- # azure-datalake-store
- # msrestazure
-aenum==3.0.0
+ # via msrestazure
+aenum==3.1.0
# via pyone
alabaster==0.7.12
# via sphinx
-alembic==1.5.8
+alembic==1.6.5
# via
# neutron
# oslo.db
# via openstacksdk
argparse==1.4.0
# via unittest2
-attrs==20.3.0
+attrs==21.2.0
# via
# cmd2
# jsonschema
-azure-applicationinsights==0.1.0
- # via azure
-azure-batch==4.1.3
- # via azure
azure-common==1.1.27
# via
- # azure-applicationinsights
- # azure-batch
- # azure-cosmosdb-table
- # azure-eventgrid
- # azure-graphrbac
- # azure-keyvault
- # azure-loganalytics
- # azure-mgmt-advisor
- # azure-mgmt-applicationinsights
- # azure-mgmt-authorization
- # azure-mgmt-batch
- # azure-mgmt-batchai
- # azure-mgmt-billing
- # azure-mgmt-cdn
- # azure-mgmt-cognitiveservices
- # azure-mgmt-commerce
+ # -r RO-VIM-azure/requirements.in
# azure-mgmt-compute
- # azure-mgmt-consumption
- # azure-mgmt-containerinstance
- # azure-mgmt-containerregistry
- # azure-mgmt-containerservice
- # azure-mgmt-cosmosdb
- # azure-mgmt-datafactory
- # azure-mgmt-datalake-analytics
- # azure-mgmt-datalake-store
- # azure-mgmt-datamigration
- # azure-mgmt-devspaces
- # azure-mgmt-devtestlabs
- # azure-mgmt-dns
- # azure-mgmt-eventgrid
- # azure-mgmt-eventhub
- # azure-mgmt-hanaonazure
- # azure-mgmt-iotcentral
- # azure-mgmt-iothub
- # azure-mgmt-iothubprovisioningservices
- # azure-mgmt-keyvault
- # azure-mgmt-loganalytics
- # azure-mgmt-logic
- # azure-mgmt-machinelearningcompute
- # azure-mgmt-managementgroups
- # azure-mgmt-managementpartner
- # azure-mgmt-maps
- # azure-mgmt-marketplaceordering
- # azure-mgmt-media
- # azure-mgmt-monitor
- # azure-mgmt-msi
# azure-mgmt-network
- # azure-mgmt-notificationhubs
- # azure-mgmt-policyinsights
- # azure-mgmt-powerbiembedded
- # azure-mgmt-rdbms
- # azure-mgmt-recoveryservices
- # azure-mgmt-recoveryservicesbackup
- # azure-mgmt-redis
- # azure-mgmt-relay
- # azure-mgmt-reservations
# azure-mgmt-resource
- # azure-mgmt-scheduler
- # azure-mgmt-search
- # azure-mgmt-servicebus
- # azure-mgmt-servicefabric
- # azure-mgmt-signalr
- # azure-mgmt-sql
- # azure-mgmt-storage
- # azure-mgmt-subscription
- # azure-mgmt-trafficmanager
- # azure-mgmt-web
- # azure-servicebus
- # azure-servicefabric
- # azure-servicemanagement-legacy
- # azure-storage-blob
- # azure-storage-common
- # azure-storage-file
- # azure-storage-queue
-azure-cosmosdb-nspkg==2.0.2
- # via azure-cosmosdb-table
-azure-cosmosdb-table==1.0.6
- # via azure
-azure-datalake-store==0.0.52
- # via azure
-azure-eventgrid==1.3.0
- # via azure
-azure-graphrbac==0.40.0
- # via azure
-azure-keyvault==1.1.0
- # via azure
-azure-loganalytics==0.1.0
- # via azure
-azure-mgmt-advisor==1.0.1
- # via azure-mgmt
-azure-mgmt-applicationinsights==0.1.1
- # via azure-mgmt
-azure-mgmt-authorization==0.50.0
- # via azure-mgmt
-azure-mgmt-batch==5.0.1
- # via azure-mgmt
-azure-mgmt-batchai==2.0.0
- # via azure-mgmt
-azure-mgmt-billing==0.2.0
- # via azure-mgmt
-azure-mgmt-cdn==3.1.0
- # via azure-mgmt
-azure-mgmt-cognitiveservices==3.0.0
- # via azure-mgmt
-azure-mgmt-commerce==1.0.1
- # via azure-mgmt
-azure-mgmt-compute==4.6.2
- # via azure-mgmt
-azure-mgmt-consumption==2.0.0
- # via azure-mgmt
-azure-mgmt-containerinstance==1.5.0
- # via azure-mgmt
-azure-mgmt-containerregistry==2.8.0
- # via azure-mgmt
-azure-mgmt-containerservice==4.4.0
- # via azure-mgmt
-azure-mgmt-cosmosdb==0.4.1
- # via azure-mgmt
-azure-mgmt-datafactory==0.6.0
- # via azure-mgmt
-azure-mgmt-datalake-analytics==0.6.0
- # via azure-mgmt
-azure-mgmt-datalake-nspkg==3.0.1
- # via
- # azure-mgmt-datalake-analytics
- # azure-mgmt-datalake-store
-azure-mgmt-datalake-store==0.5.0
- # via azure-mgmt
-azure-mgmt-datamigration==1.0.0
- # via azure-mgmt
-azure-mgmt-devspaces==0.1.0
- # via azure-mgmt
-azure-mgmt-devtestlabs==2.2.0
- # via azure-mgmt
-azure-mgmt-dns==2.1.0
- # via azure-mgmt
-azure-mgmt-eventgrid==1.0.0
- # via azure-mgmt
-azure-mgmt-eventhub==2.6.0
- # via azure-mgmt
-azure-mgmt-hanaonazure==0.1.1
- # via azure-mgmt
-azure-mgmt-iotcentral==0.1.0
- # via azure-mgmt
-azure-mgmt-iothub==0.5.0
- # via azure-mgmt
-azure-mgmt-iothubprovisioningservices==0.2.0
- # via azure-mgmt
-azure-mgmt-keyvault==1.1.0
- # via azure-mgmt
-azure-mgmt-loganalytics==0.2.0
- # via azure-mgmt
-azure-mgmt-logic==3.0.0
- # via azure-mgmt
-azure-mgmt-machinelearningcompute==0.4.1
- # via azure-mgmt
-azure-mgmt-managementgroups==0.1.0
- # via azure-mgmt
-azure-mgmt-managementpartner==0.1.1
- # via azure-mgmt
-azure-mgmt-maps==0.1.0
- # via azure-mgmt
-azure-mgmt-marketplaceordering==0.1.0
- # via azure-mgmt
-azure-mgmt-media==1.0.0
- # via azure-mgmt
-azure-mgmt-monitor==0.5.2
- # via azure-mgmt
-azure-mgmt-msi==0.2.0
- # via azure-mgmt
-azure-mgmt-network==2.7.0
- # via azure-mgmt
-azure-mgmt-notificationhubs==2.1.0
- # via azure-mgmt
-azure-mgmt-nspkg==3.0.2
- # via
- # azure-mgmt-advisor
- # azure-mgmt-applicationinsights
- # azure-mgmt-authorization
- # azure-mgmt-batch
- # azure-mgmt-batchai
- # azure-mgmt-billing
- # azure-mgmt-cognitiveservices
- # azure-mgmt-commerce
- # azure-mgmt-consumption
- # azure-mgmt-cosmosdb
- # azure-mgmt-datafactory
- # azure-mgmt-datalake-nspkg
- # azure-mgmt-datamigration
- # azure-mgmt-devspaces
- # azure-mgmt-devtestlabs
- # azure-mgmt-dns
- # azure-mgmt-eventgrid
- # azure-mgmt-hanaonazure
- # azure-mgmt-iotcentral
- # azure-mgmt-iothub
- # azure-mgmt-iothubprovisioningservices
- # azure-mgmt-keyvault
- # azure-mgmt-loganalytics
- # azure-mgmt-logic
- # azure-mgmt-machinelearningcompute
- # azure-mgmt-managementgroups
- # azure-mgmt-maps
- # azure-mgmt-marketplaceordering
- # azure-mgmt-monitor
- # azure-mgmt-msi
- # azure-mgmt-policyinsights
- # azure-mgmt-powerbiembedded
- # azure-mgmt-recoveryservices
- # azure-mgmt-recoveryservicesbackup
- # azure-mgmt-redis
- # azure-mgmt-relay
- # azure-mgmt-reservations
- # azure-mgmt-scheduler
- # azure-mgmt-servicefabric
- # azure-mgmt-signalr
- # azure-mgmt-sql
- # azure-mgmt-storage
- # azure-mgmt-subscription
- # azure-mgmt-trafficmanager
- # azure-mgmt-web
-azure-mgmt-policyinsights==0.1.0
- # via azure-mgmt
-azure-mgmt-powerbiembedded==2.0.0
- # via azure-mgmt
-azure-mgmt-rdbms==1.9.0
- # via azure-mgmt
-azure-mgmt-recoveryservices==0.3.0
- # via azure-mgmt
-azure-mgmt-recoveryservicesbackup==0.3.0
- # via azure-mgmt
-azure-mgmt-redis==5.0.0
- # via azure-mgmt
-azure-mgmt-relay==0.1.0
- # via azure-mgmt
-azure-mgmt-reservations==0.2.1
- # via azure-mgmt
-azure-mgmt-resource==2.2.0
- # via azure-mgmt
-azure-mgmt-scheduler==2.0.0
- # via azure-mgmt
-azure-mgmt-search==2.1.0
- # via azure-mgmt
-azure-mgmt-servicebus==0.5.3
- # via azure-mgmt
-azure-mgmt-servicefabric==0.2.0
- # via azure-mgmt
-azure-mgmt-signalr==0.1.1
- # via azure-mgmt
-azure-mgmt-sql==0.9.1
- # via azure-mgmt
-azure-mgmt-storage==2.0.0
- # via azure-mgmt
-azure-mgmt-subscription==0.2.0
- # via azure-mgmt
-azure-mgmt-trafficmanager==0.50.0
- # via azure-mgmt
-azure-mgmt-web==0.35.0
- # via azure-mgmt
-azure-mgmt==4.0.0
- # via azure
-azure-nspkg==3.0.2
- # via
- # azure-applicationinsights
- # azure-batch
- # azure-cosmosdb-nspkg
- # azure-graphrbac
- # azure-keyvault
- # azure-loganalytics
- # azure-mgmt-nspkg
- # azure-servicebus
- # azure-servicefabric
- # azure-servicemanagement-legacy
-azure-servicebus==0.21.1
- # via azure
-azure-servicefabric==6.3.0.0
- # via azure
-azure-servicemanagement-legacy==0.20.7
- # via azure
-azure-storage-blob==1.5.0
- # via azure
-azure-storage-common==1.4.2
- # via
- # azure-storage-blob
- # azure-storage-file
- # azure-storage-queue
-azure-storage-file==1.4.0
- # via azure
-azure-storage-queue==1.4.0
- # via azure
-azure==4.0.0
+azure-core==1.14.0
+ # via
+ # azure-identity
+ # azure-mgmt-core
+azure-identity==1.6.0
# via -r RO-VIM-azure/requirements.in
-babel==2.9.0
+azure-mgmt-compute==21.0.0
+ # via -r RO-VIM-azure/requirements.in
+azure-mgmt-core==1.2.2
# via
- # os-xenapi
- # sphinx
+ # azure-mgmt-compute
+ # azure-mgmt-network
+ # azure-mgmt-resource
+azure-mgmt-network==19.0.0
+ # via -r RO-VIM-azure/requirements.in
+azure-mgmt-resource==18.0.0
+ # via -r RO-VIM-azure/requirements.in
+babel==2.9.1
+ # via sphinx
bcrypt==3.2.0
# via paramiko
beautifulsoup4==4.9.3
# via webtest
-bitarray==1.9.2
+bitarray==2.1.0
# via pyangbind
boto==2.49.0
# via -r RO-VIM-aws/requirements.in
-cachetools==4.2.1
+cachetools==4.2.2
# via oslo.messaging
-certifi==2020.12.5
+certifi==2021.5.30
# via
# msrest
# requests
cffi==1.14.5
# via
- # azure-datalake-store
# bcrypt
# cryptography
# oslo.privsep
# via cherrypy
cherrypy==18.1.2
# via -r NG-RO/requirements.in
-cliff==3.7.0
+cliff==3.8.0
# via
# osc-lib
# python-designateclient
# via
# -r NG-RO/requirements.in
# adal
- # azure-cosmosdb-table
- # azure-keyvault
- # azure-storage-common
+ # azure-identity
+ # msal
# openstacksdk
# paramiko
+ # pyjwt
# pyopenssl
-cvprac==1.0.5
+cvprac==1.0.6
# via -r RO-SDN-arista_cloudvision/requirements.in
debtcollector==2.2.0
# via
# python-designateclient
# python-keystoneclient
# python-neutronclient
-decorator==5.0.6
+decorator==5.0.9
# via
# dogpile.cache
# neutron
# via pyone
dnspython==1.16.0
# via eventlet
-docutils==0.16
+docutils==0.17.1
# via sphinx
-dogpile.cache==1.1.2
+dogpile.cache==1.1.3
# via
# openstacksdk
# oslo.cache
enum34==1.1.10
# via pyangbind
-eventlet==0.30.2
+eventlet==0.31.0
# via
# neutron
# os-ken
- # os-xenapi
# oslo.privsep
# oslo.service
extras==1.0.0
# neutron
# oslo.messaging
# tooz
-greenlet==1.0.0
+greenlet==1.1.0
# via
# eventlet
# oslo.privsep
# via requests
imagesize==1.2.0
# via sphinx
-importlib-metadata==3.10.1
+importlib-metadata==4.4.0
# via -r NG-RO/requirements.in
iso8601==0.1.14
# via
# via
# cheroot
# tempora
-jinja2==2.11.3
+jinja2==3.0.1
# via
# neutron
# oslo.middleware
# python-keystoneclient
# python-neutronclient
# python-novaclient
-keystonemiddleware==9.2.0
+keystonemiddleware==9.3.0
# via neutron
-kombu==5.0.2
+kombu==5.1.0
# via oslo.messaging
linecache2==1.0.0
# via traceback2
# via
# alembic
# pecan
-markupsafe==1.1.1
+markupsafe==2.0.1
# via
# jinja2
# mako
-more-itertools==8.7.0
+more-itertools==8.8.0
# via
# cheroot
# cherrypy
# jaraco.functools
+msal-extensions==0.3.0
+ # via azure-identity
+msal==1.12.0
+ # via
+ # azure-identity
+ # msal-extensions
msgpack==1.0.2
# via
# os-ken
# tooz
msrest==0.6.21
# via
- # azure-applicationinsights
- # azure-eventgrid
- # azure-keyvault
- # azure-loganalytics
- # azure-mgmt-cdn
+ # -r RO-VIM-azure/requirements.in
# azure-mgmt-compute
- # azure-mgmt-containerinstance
- # azure-mgmt-containerregistry
- # azure-mgmt-containerservice
- # azure-mgmt-dns
- # azure-mgmt-eventhub
- # azure-mgmt-keyvault
- # azure-mgmt-managementpartner
- # azure-mgmt-media
# azure-mgmt-network
- # azure-mgmt-notificationhubs
- # azure-mgmt-rdbms
# azure-mgmt-resource
- # azure-mgmt-search
- # azure-mgmt-servicebus
- # azure-mgmt-servicefabric
- # azure-mgmt-signalr
- # azure-servicefabric
# msrestazure
msrestazure==0.6.4
- # via
- # azure-batch
- # azure-eventgrid
- # azure-graphrbac
- # azure-keyvault
- # azure-mgmt-advisor
- # azure-mgmt-applicationinsights
- # azure-mgmt-authorization
- # azure-mgmt-batch
- # azure-mgmt-batchai
- # azure-mgmt-billing
- # azure-mgmt-cdn
- # azure-mgmt-cognitiveservices
- # azure-mgmt-commerce
- # azure-mgmt-compute
- # azure-mgmt-consumption
- # azure-mgmt-containerinstance
- # azure-mgmt-containerregistry
- # azure-mgmt-containerservice
- # azure-mgmt-cosmosdb
- # azure-mgmt-datafactory
- # azure-mgmt-datalake-analytics
- # azure-mgmt-datalake-store
- # azure-mgmt-datamigration
- # azure-mgmt-devspaces
- # azure-mgmt-devtestlabs
- # azure-mgmt-dns
- # azure-mgmt-eventgrid
- # azure-mgmt-eventhub
- # azure-mgmt-hanaonazure
- # azure-mgmt-iotcentral
- # azure-mgmt-iothub
- # azure-mgmt-iothubprovisioningservices
- # azure-mgmt-keyvault
- # azure-mgmt-loganalytics
- # azure-mgmt-logic
- # azure-mgmt-machinelearningcompute
- # azure-mgmt-managementgroups
- # azure-mgmt-managementpartner
- # azure-mgmt-maps
- # azure-mgmt-marketplaceordering
- # azure-mgmt-media
- # azure-mgmt-monitor
- # azure-mgmt-msi
- # azure-mgmt-network
- # azure-mgmt-notificationhubs
- # azure-mgmt-policyinsights
- # azure-mgmt-powerbiembedded
- # azure-mgmt-rdbms
- # azure-mgmt-recoveryservices
- # azure-mgmt-recoveryservicesbackup
- # azure-mgmt-redis
- # azure-mgmt-relay
- # azure-mgmt-reservations
- # azure-mgmt-resource
- # azure-mgmt-scheduler
- # azure-mgmt-search
- # azure-mgmt-servicebus
- # azure-mgmt-servicefabric
- # azure-mgmt-signalr
- # azure-mgmt-sql
- # azure-mgmt-storage
- # azure-mgmt-subscription
- # azure-mgmt-trafficmanager
- # azure-mgmt-web
+ # via -r RO-VIM-azure/requirements.in
munch==2.5.0
# via openstacksdk
mvar==0.0.1
# osprofiler
# ovsdbapp
# python-neutronclient
-netifaces==0.10.9
+netifaces==0.11.0
# via
# neutron
# openstacksdk
# oslo.utils
networking-l2gw==18.0.0
# via -r RO-VIM-openstack/requirements.in
-neutron-lib==2.10.1
+neutron-lib==2.12.0
# via
# networking-l2gw
# neutron
-neutron==17.1.1
+neutron==18.0.0
# via networking-l2gw
-oauthlib==3.1.0
+oauthlib==3.1.1
# via requests-oauthlib
oca==4.10.0
# via -r RO-VIM-opennebula/requirements.in
-openstacksdk==0.55.0
+openstacksdk==0.57.0
# via
# neutron
# os-client-config
# python-openstackclient
os-client-config==2.1.0
# via python-neutronclient
-os-ken==1.4.0
+os-ken==2.0.0
# via
# neutron
# neutron-lib
# openstacksdk
os-traits==2.5.0
# via neutron-lib
-os-vif==2.4.0
- # via neutron
-os-xenapi==0.3.4
+os-vif==2.5.0
# via neutron
-osc-lib==2.3.1
+osc-lib==2.4.0
# via
# python-designateclient
# python-neutronclient
# python-openstackclient
-oslo.cache==2.7.0
+oslo.cache==2.8.0
# via
# keystonemiddleware
# neutron
# neutron
# neutron-lib
# os-vif
- # os-xenapi
# oslo.service
# oslo.versionedobjects
# osprofiler
-oslo.config==8.5.0
+oslo.config==8.7.0
# via
# keystonemiddleware
# neutron
# oslo.versionedobjects
# pycadf
# python-keystoneclient
-oslo.context==3.2.0
+oslo.context==3.3.0
# via
# keystonemiddleware
# neutron
# oslo.middleware
# oslo.policy
# oslo.versionedobjects
-oslo.db==8.5.0
+oslo.db==9.0.0
# via
# neutron
# neutron-lib
# neutron
# neutron-lib
# os-vif
- # os-xenapi
# osc-lib
# oslo.cache
# oslo.concurrency
# python-neutronclient
# python-novaclient
# python-openstackclient
-oslo.log==4.4.0
+oslo.log==4.5.0
# via
# keystonemiddleware
# neutron
# neutron-lib
# os-vif
- # os-xenapi
# oslo.cache
# oslo.messaging
# oslo.privsep
# oslo.service
# oslo.versionedobjects
# python-neutronclient
-oslo.messaging==12.7.1
+oslo.messaging==12.8.0
# via
# neutron
# neutron-lib
# via
# neutron
# oslo.messaging
-oslo.policy==3.7.0
+oslo.policy==3.8.0
# via
# neutron
# neutron-lib
# neutron
# neutron-lib
# oslo.messaging
-oslo.upgradecheck==1.3.0
+oslo.upgradecheck==1.4.0
# via neutron
-oslo.utils==4.8.0
+oslo.utils==4.9.0
# via
# keystonemiddleware
# neutron
# neutron-lib
- # os-xenapi
# osc-lib
# oslo.cache
# oslo.concurrency
# neutron
# os-ken
# ovsdbapp
-ovsdbapp==1.9.0
+ovsdbapp==1.10.0
# via
# networking-l2gw
# neutron
# os-vif
packaging==20.9
# via
+ # neutron
# oslo.utils
# sphinx
papero==0.2.7
# via
# -r RO-SDN-dpb/requirements.in
# -r RO-plugin/requirements.in
- # os-xenapi
paste==3.5.0
# via
# neutron
# via
# neutron
# oslo.service
-pbr==5.5.1
+pbr==5.6.0
# via
# cliff
# debtcollector
# os-service-types
# os-traits
# os-vif
- # os-xenapi
# osc-lib
# oslo.concurrency
# oslo.context
# via
# neutron
# neutron-lib
+portalocker==1.7.1
+ # via msal-extensions
portend==2.7.1
# via cherrypy
prettytable==0.7.2
# via keystonemiddleware
pycparser==2.20
# via cffi
-pygments==2.8.1
+pygments==2.9.0
# via
# pyvcloud
# sphinx
pyinotify==0.9.6
# via oslo.log
-pyjwt==2.0.1
- # via adal
+pyjwt[crypto]==2.1.0
+ # via
+ # adal
+ # msal
pynacl==1.4.0
# via paramiko
-pyone==6.0.0
+pyone==6.0.2
# via -r RO-VIM-opennebula/requirements.in
pyopenssl==20.0.1
# via
# via cmd2
pyroute2==0.5.18
# via
+ # -r RO-VIM-openstack/requirements.in
# neutron
# os-vif
pyrsistent==0.17.3
# via
# adal
# alembic
- # azure-cosmosdb-table
- # azure-storage-common
# oslo.log
-python-designateclient==4.2.0
+python-designateclient==4.3.0
# via neutron
python-editor==1.0.4
# via alembic
# python-openstackclient
python-mimeparse==1.6.0
# via testtools
-python-neutronclient==7.3.0
+python-neutronclient==7.4.0
# via
# -r RO-VIM-openstack/requirements.in
# networking-l2gw
# neutron
-python-novaclient==17.4.0
+python-novaclient==17.5.0
# via
# -r RO-VIM-openstack/requirements.in
# neutron
# -r RO-VIM-vmware/requirements.in
# -r RO-plugin/requirements.in
# adal
- # azure-cosmosdb-table
- # azure-datalake-store
- # azure-keyvault
- # azure-servicebus
- # azure-servicemanagement-legacy
- # azure-storage-common
+ # azure-core
# cvprac
# keystoneauth1
# keystonemiddleware
+ # msal
# msrest
# neutron
# oslo.config
# sphinx
requestsexceptions==1.4.0
# via openstacksdk
-rfc3986==1.4.0
+rfc3986==1.5.0
# via oslo.config
routes==2.5.1
# via
# osc-lib
# python-cinderclient
# python-neutronclient
-six==1.15.0
+six==1.16.0
# via
+ # azure-core
+ # azure-identity
# bcrypt
# cheroot
# debtcollector
# msrestazure
# munch
# os-ken
- # os-xenapi
# oslo.i18n
# oslo.reports
# oslo.rootwrap
# pyone
# pyopenssl
# python-dateutil
- # python-designateclient
# python-keystoneclient
# pyvmomi
# routes
# sqlalchemy-migrate
# tenacity
# testtools
- # tinyrpc
# unittest2
# warlock
# webtest
snowballstemmer==2.1.0
# via sphinx
-sortedcontainers==2.3.0
+sortedcontainers==2.4.0
# via ovs
soupsieve==2.2.1
# via beautifulsoup4
-sphinx==3.5.4
+sphinx==4.0.2
# via -r RO-VIM-fos/requirements.in
sphinxcontrib-applehelp==1.0.2
# via sphinx
sphinxcontrib-devhelp==1.0.2
# via sphinx
-sphinxcontrib-htmlhelp==1.0.3
+sphinxcontrib-htmlhelp==2.0.0
# via sphinx
sphinxcontrib-jsmath==1.0.1
# via sphinx
sphinxcontrib-qthelp==1.0.3
# via sphinx
-sphinxcontrib-serializinghtml==1.1.4
+sphinxcontrib-serializinghtml==1.1.5
# via sphinx
sqlalchemy-migrate==0.13.0
# via oslo.db
-sqlalchemy==1.4.7
+sqlalchemy==1.4.17
# via
# alembic
# neutron
# via sqlalchemy-migrate
tempora==4.0.2
# via portend
-tenacity==7.0.0
+tenacity==6.3.1
# via
# neutron
# tooz
# via
# fixtures
# testscenarios
-tinyrpc==1.0.4
- # via os-ken
-tooz==2.8.0
+tooz==2.9.0
# via neutron
traceback2==1.4.0
# via
# unittest2
unittest2==1.1.0
# via testtools
-urllib3==1.26.4
+urllib3==1.26.5
# via requests
uuid==1.30
# via -r RO-SDN-arista_cloudvision/requirements.in
vine==5.0.0
- # via amqp
+ # via
+ # amqp
+ # kombu
voluptuous==0.12.1
# via tooz
waitress==2.0.0