if [[ $PLATFORM == ub16 ]]; then
PLATFORM_REPOSITORY=${1:-OSM}
- PLATFORM_VERSION=${2:-4.3.1.0.49556}
+ PLATFORM_VERSION=${2:-4.3.1.0.50309}
elif [[ $PLATFORM == fc20 ]]; then
PLATFORM_REPOSITORY=${1:-OSM} # change to OSM when published
- PLATFORM_VERSION=${2:-4.3.1.0.49553-1}
+ PLATFORM_VERSION=${2:-4.3.1.0.50310}
else
echo "Internal error: unknown platform $PLATFORM"
exit 1
# Subscribers
from .subscriber.core import AbstractOpdataSubscriber, AbstractConfigSubscriber
from .subscriber.vnf_subscriber import VnfdCatalogSubscriber, VnfrCatalogSubscriber
-from .subscriber.ns_subscriber import NsrCatalogSubscriber, NsdCatalogSubscriber
+from .subscriber.ns_subscriber import (
+ NsrCatalogSubscriber,
+ NsdCatalogSubscriber,
+ NsInstanceConfigSubscriber)
from .subscriber.store import SubscriberStore
from .subscriber.ro_account import ROAccountConfigSubscriber
\ No newline at end of file
def get_xpath(self):
return "C,/nsd:nsd-catalog/nsd:nsd"
+
+
+class NsInstanceConfigSubscriber(core.AbstractConfigSubscriber):
+ """ The network service descriptor DTS handler """
+
+ def key_name(self):
+ return "id"
+
+ def get_xpath(self):
+ return "C,/nsr:ns-instance-config/nsr:nsr"
#!/usr/bin/env python3
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
node = vdu.guest_epa.numa_node_policy.node.add()
node.id = 0
node.memory_mb = 512
- node.vcpu = [0, 1]
+ vcpu = node.vcpu.add()
+ vcpu.id = 0
+ vcpu = node.vcpu.add()
+ vcpu.id = 1
node = vdu.guest_epa.numa_node_policy.node.add()
node.id = 1
node.memory_mb = 512
- node.vcpu = [2, 3]
+ vcpu = node.vcpu.add()
+ vcpu.id = 2
+ vcpu = node.vcpu.add()
+ vcpu.id = 3
# specify the vswitch EPA
vdu.vswitch_epa.ovs_acceleration = 'DISABLED'
internal_cp.name = cp_name + "/icp{}".format(i)
internal_cp.id = cp_name + "/icp{}".format(i)
internal_cp.type_yang = 'VPORT'
- internal_vlds[i].internal_connection_point_ref.append(internal_cp.id)
+ ivld_cp = internal_vlds[i].internal_connection_point_ref.add()
+ ivld_cp.id_ref = internal_cp.id
internal_interface = vdu.internal_interface.add()
internal_interface.name = 'fab%d' % i
ip_profile.ip_profile_params.ip_version = "ipv4"
ip_profile.ip_profile_params.subnet_address = "31.31.31.0/24"
ip_profile.ip_profile_params.gateway_address = "31.31.31.210"
-
+
vld_id = 1
for cpgroup in cpgroup_list:
vld = nsd.vld.add()
return uuid
+
+ def ns_vim_network_create(self, net_create_yaml_str,datacenter_name):
+ """ Create a Openmano VIM network from input YAML string """
+
+ self._log.debug("Creating VIM network instance: %s, DC %s", net_create_yaml_str,datacenter_name)
+
+ with tempfile.NamedTemporaryFile() as net_create_file_hdl:
+ net_create_file_hdl.write(net_create_yaml_str.encode())
+ net_create_file_hdl.flush()
+
+ try:
+ output_lines = self._openmano_cmd(
+ ["vim-net-create","--datacenter", datacenter_name, net_create_file_hdl.name],
+ expected_lines=1
+ )
+ except OpenmanoCommandFailed as e:
+ raise
+
+ uuid, _ = output_lines[0].split(" ", 1)
+
+ self._log.info("VIM Networks created in DC %s with ID: %s", datacenter_name, uuid)
+
+ return uuid
+
+ def ns_vim_network_delete(self, network_name,datacenter_name):
+ """ Delete a Openmano VIM network with given name """
+
+ self._log.debug("Deleting VIM network instance: %s, DC %s", network_name,datacenter_name)
+ try:
+ output_lines = self._openmano_cmd(
+ ["vim-net-delete","--datacenter", datacenter_name, network_name],
+ expected_lines=1
+ )
+ except OpenmanoCommandFailed as e:
+ raise
+ self._log.info("VIM Network deleted in DC %s with name: %s", datacenter_name, network_name)
+
+
def ns_instantiate(self, scenario_name, instance_name, datacenter_name=None):
self._log.info(
"Instantiating NS %s using instance name %s",
}
# Add the specific VDU connection points
- for int_cp_ref in vld.internal_connection_point_ref:
- vdu, int_if = find_vdu_and_int_if_by_cp_ref(int_cp_ref)
+ for int_cp in vld.internal_connection_point:
+ vdu, int_if = find_vdu_and_int_if_by_cp_ref(int_cp.id_ref)
connection["elements"].append({
"VNFC": vdu.name,
"local_iface_name": int_if.name,
vnfc = {
"name": vdu.name,
"description": vdu.name,
- "numas": [{
- "memory": max(int(vdu.vm_flavor.memory_mb/1024), 1),
- "interfaces":[],
- }],
"bridge-ifaces": [],
}
+ if vdu.vm_flavor.has_field("storage_gb") and vdu.vm_flavor.storage_gb:
+ vnfc["disk"] = vdu.vm_flavor.storage_gb
+
if os.path.isabs(vdu.image):
vnfc["VNFC image"] = vdu.image
else:
if vdu.has_field("image_checksum"):
vnfc["image checksum"] = vdu.image_checksum
- numa_node_policy = vdu.guest_epa.numa_node_policy
- if numa_node_policy.has_field("node"):
- numa_node = numa_node_policy.node[0]
+ if vdu.guest_epa.has_field("numa_node_policy"):
+ vnfc["numas"] = [{
+ "memory": max(int(vdu.vm_flavor.memory_mb/1024), 1),
+ "interfaces":[],
+ }]
+ numa_node_policy = vdu.guest_epa.numa_node_policy
+ if numa_node_policy.has_field("node"):
+ numa_node = numa_node_policy.node[0]
+
+ if numa_node.has_field("paired_threads"):
+ if numa_node.paired_threads.has_field("num_paired_threads"):
+ vnfc["numas"][0]["paired-threads"] = numa_node.paired_threads.num_paired_threads
+ if len(numa_node.paired_threads.paired_thread_ids) > 0:
+ vnfc["numas"][0]["paired-threads-id"] = []
+ for pair in numa_node.paired_threads.paired_thread_ids:
+ vnfc["numas"][0]["paired-threads-id"].append(
+ [pair.thread_a, pair.thread_b]
+ )
- if numa_node.has_field("paired_threads"):
- if numa_node.paired_threads.has_field("num_paired_threads"):
- vnfc["numas"][0]["paired-threads"] = numa_node.paired_threads.num_paired_threads
- if len(numa_node.paired_threads.paired_thread_ids) > 0:
- vnfc["numas"][0]["paired-threads-id"] = []
- for pair in numa_node.paired_threads.paired_thread_ids:
- vnfc["numas"][0]["paired-threads-id"].append(
- [pair.thread_a, pair.thread_b]
- )
+ else:
+ if vdu.vm_flavor.has_field("vcpu_count"):
+ vnfc["numas"][0]["cores"] = max(vdu.vm_flavor.vcpu_count, 1)
else:
- if vdu.vm_flavor.has_field("vcpu_count"):
- vnfc["numas"][0]["cores"] = max(vdu.vm_flavor.vcpu_count, 1)
+ if vdu.vm_flavor.has_field("vcpu_count") and vdu.vm_flavor.vcpu_count:
+ vnfc["vcpus"] = vdu.vm_flavor.vcpu_count
+
+ if vdu.vm_flavor.has_field("memory_mb") and vdu.vm_flavor.memory_mb:
+ vnfc["ram"] = vdu.vm_flavor.memory_mb
+
if vdu.has_field("hypervisor_epa"):
vnfc["hypervisor"] = {}
if vdu.host_epa.has_field("om_cpu_feature"):
vnfc["processor"]["features"] = []
for feature in vdu.host_epa.om_cpu_feature:
- vnfc["processor"]["features"].append(feature)
-
+ vnfc["processor"]["features"].append(feature.feature)
- if vdu.vm_flavor.has_field("storage_gb"):
- vnfc["disk"] = vdu.vm_flavor.storage_gb
vnf["VNFC"].append(vnfc)
type uint64;
}
- leaf-list cpu-feature {
- description
- "List of CPU features.";
- type cpu-feature-type;
+ list cpu-feature {
+ key "feature";
+ description "List of CPU features.";
+ leaf feature {
+ description "CPU feature.";
+ type cpu-feature-type;
+ }
}
type string;
}
- leaf-list om-cpu-feature {
- description "Openmano CPU features";
- type string;
+ list om-cpu-feature {
+ key "feature";
+ description "List of openmano CPU features";
+ leaf feature {
+ description "CPU feature";
+ type string;
+ }
}
}
}
type uint64;
}
- leaf-list vcpu {
+ list vcpu {
+ key "id";
description
"List of vcpus to allocate on
this numa node.";
- type uint64;
+ leaf id {
+ type uint64;
+ description "List of vcpus ids to allocate on
+ this numa node";
+ }
}
leaf memory-mb {
type uint64;
}
- leaf-list internal-connection-point-ref {
- type leafref {
- path "../../vdu/internal-connection-point/id";
+ list internal-connection-point {
+ key "id-ref";
+ description "List of internal connection points in this VLD";
+ leaf id-ref {
+ description "reference to the internal connection point id";
+ type leafref {
+ path "../../../vdu/internal-connection-point/id";
+ }
}
}
-
uses manotypes:provider-network;
}
if guest_epa.numa_node_policy.has_field('node'):
for node in guest_epa.numa_node_policy.node:
if node.has_field('vcpu') and node.vcpu:
- epa_specs['hw:numa_cpus.'+str(node.id)] = ','.join([str(j) for j in node.vcpu])
+ epa_specs['hw:numa_cpus.'+str(node.id)] = ','.join([str(j.id) for j in node.vcpu])
if node.memory_mb:
epa_specs['hw:numa_mem.'+str(node.id)] = str(node.memory_mb)
cpu_features = []
espec_cpu_features = []
for feature in host_epa.cpu_feature:
- cpu_features.append(feature)
+ cpu_features.append(feature.feature)
espec_cpu_features = espec_utils.host.mano_to_extra_spec_cpu_features(cpu_features)
if espec_cpu_features is not None:
epa_specs['capabilities:cpu_info:features'] = espec_cpu_features
numa_node = getattr(flavor,'guest_epa').numa_node_policy.node.add()
numa_node.id = int(node_id)
- numa_node.vcpu = [ int(x) for x in flavor_info['extra_specs'][attr].split(',') ]
+ for x in flavor_info['extra_specs'][attr].split(','):
+ numa_node_vcpu = numa_node.vcpu.add()
+ numa_node_vcpu.id = int(x)
elif attr.startswith('hw:numa_mem.'):
node_id = attr.split('.')[1]
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
logger = logging.getLogger('rwcal-openstack')
#
-# Important information about openstack installation. This needs to be manually verified
+# Important information about openstack installation. This needs to be manually verified
#
openstack_info = {
'username' : 'pluto',
logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
else:
logger.info("Openstack Cal plugin successfully instantiated")
- return cal
+ return cal
class OpenStackTest(unittest.TestCase):
HostTrust = "trusted"
PCIPassThroughAlias = "PCI_10G_ALIAS"
SEG_ID = openstack_info['segmentation_id']
-
+
def setUp(self):
"""
Assumption:
logger.info("Openstack-CAL-Test: setUp")
self.cal = get_cal_plugin()
logger.info("Openstack-CAL-Test: setUpEND")
-
+
# First check for VM Flavor and Image and get the corresponding IDs
rc, rs = self.cal.get_flavor_list(self._acct)
self.assertEqual(rc, RwStatus.SUCCESS)
networks = [ network for network in rs.networkinfo_list if (network.network_name == 'rift.cal.unittest.network' or network.network_name == 'rift.cal.virtual_link') ]
for network in networks:
self.cal.delete_virtual_link(self._acct, network.network_id)
-
+
def tearDown(self):
logger.info("Openstack-CAL-Test: tearDown")
-
+
def _md5(fname, blksize=1048576):
hash_md5 = hashlib.md5()
hash_md5.update(chunk)
return hash_md5.hexdigest()
- @unittest.skip("Skipping test_list_flavors")
+ @unittest.skip("Skipping test_list_flavors")
def test_list_flavor(self):
"""
List existing flavors from openstack installation
rc, flv = self.cal.get_flavor(self._acct, flavor.id)
self.assertEqual(rc, RwStatus.SUCCESS)
self.assertEqual(flavor.id, flv.id)
-
- @unittest.skip("Skipping test_list_images")
+
+ @unittest.skip("Skipping test_list_images")
def test_list_images(self):
"""
List existing images from openstack installation
# rc, img = self.cal.get_image(self._acct, image.id)
# self.assertEqual(rc, RwStatus.SUCCESS)
# self.assertEqual(image.id, img.id)
-
- @unittest.skip("Skipping test_list_vms")
+
+ @unittest.skip("Skipping test_list_vms")
def test_list_vms(self):
"""
List existing VMs from openstack installation
for vm in rsp.vminfo_list:
rc, server = self.cal.get_vm(self._acct, vm.vm_id)
self.assertEqual(vm.vm_id, server.vm_id)
-
- @unittest.skip("Skipping test_list_networks")
+
+ @unittest.skip("Skipping test_list_networks")
def test_list_networks(self):
"""
List existing Network from openstack installation
for network in rsp.networkinfo_list:
rc, net = self.cal.get_network(self._acct, network.network_id)
self.assertEqual(network.network_id, net.network_id)
-
- @unittest.skip("Skipping test_list_ports")
+
+ @unittest.skip("Skipping test_list_ports")
def test_list_ports(self):
"""
List existing Ports from openstack installation
else:
time.sleep(2) # Sleep for a second
return rs
-
- @unittest.skip("Skipping test_create_delete_image")
+
+ @unittest.skip("Skipping test_create_delete_image")
def test_create_delete_image(self):
"""
Create/Query/Delete a new image in openstack installation
flavor = RwcalYang.FlavorInfoItem()
flavor.name = 'rift.cal.unittest.flavor'
flavor.vm_flavor.memory_mb = 16384 # 16GB
- flavor.vm_flavor.vcpu_count = 4
+ flavor.vm_flavor.vcpu_count = 4
flavor.vm_flavor.storage_gb = 40 # 40GB
flavor.guest_epa.mempage_size = OpenStackTest.MemoryPageSize
flavor.guest_epa.cpu_pinning_policy = OpenStackTest.CpuPolicy
node = flavor.guest_epa.numa_node_policy.node.add()
node.id = i
if i == 0:
- node.vcpu = [0,1]
+ vcpu = node.vcpu.add()
+ vcpu.id = 0
+ vcpu = node.vcpu.add()
+ vcpu.id = 1
elif i == 1:
- node.vcpu = [2,3]
+ vcpu = node.vcpu.add()
+ vcpu.id = 2
+ vcpu = node.vcpu.add()
+ vcpu.id = 3
node.memory_mb = 8196
dev = flavor.guest_epa.pcie_device.add()
dev.device_id = OpenStackTest.PCIPassThroughAlias
dev.count = 1
return flavor
-
- @unittest.skip("Skipping test_create_delete_flavor")
+
+ @unittest.skip("Skipping test_create_delete_flavor")
def test_create_delete_flavor(self):
"""
Create/Query/Delete a new flavor in openstack installation
if flavor_list:
rc = self.cal.delete_flavor(self._acct, flavor_list[0].id)
self.assertEqual(rc, RwStatus.SUCCESS)
-
+
flavor = self._get_flavor_info_request()
rc, flavor_id = self.cal.create_flavor(self._acct, flavor)
self.assertEqual(rc, RwStatus.SUCCESS)
-
+
logger.info("Openstack-CAL-Test: Created new flavor with flavor_id : %s" %(flavor_id))
rc, rs = self.cal.get_flavor(self._acct, flavor_id)
self.assertEqual(rc, RwStatus.SUCCESS)
def _check_vm_state(self, vm_id, expected_state):
"""
- Wait until VM reaches particular state (expected_state).
+ Wait until VM reaches particular state (expected_state).
"""
# Wait while VM goes to required state
if port_list:
for port_id in port_list:
port = vm.port_list.add()
- port.port_id = port_id
+ port.port_id = port_id
rc, vm_id = self.cal.create_vm(self._acct, vm)
self.assertEqual(rc, RwStatus.SUCCESS)
vm_list = [vm for vm in rs.vminfo_list if vm.vm_id == vm_id]
if not len(vm_list):
break
-
+
rc, rs = self.cal.get_vm_list(self._acct)
self.assertEqual(rc, RwStatus.SUCCESS)
vm_list = [vm for vm in rs.vminfo_list if vm.vm_id == vm_id]
self.assertEqual(rc, RwStatus.SUCCESS)
### Ensure that VM state is SHUTOFF
self._check_vm_state(vm_id, 'SHUTOFF')
-
-
+
+
def _start_vm(self, vm_id):
"""
Starts VM and performs validity checks
### Ensure that VM state is ACTIVE
self._check_vm_state(vm_id, 'ACTIVE')
-
+
def _reboot_vm(self, vm_id):
"""
Reboot VM and perform validity checks
logger.info("Openstack-CAL-Test: Starting VM(EPA) create/delete test")
flavor = self._get_flavor_info_request()
-
+
rc, flavor_id = self.cal.do_create_flavor(self._acct, flavor)
self.assertEqual(rc, RwStatus.SUCCESS)
flavor.id = flavor_id
flavors = nova.flavor_list()
self.assertTrue(len(flavors) > 1)
- @unittest.skip("Skipping test_vm_operations")
+ @unittest.skip("Skipping test_vm_operations")
def test_vm_operations(self):
"""
Primary goal: Create/Query/Delete VM in openstack installation.
### Delete the VM
self._delete_vm(vm_id)
-
+
def _get_network_info_request(self):
"""
Returns request object of type RwcalYang.NetworkInfoItem
logger.info("Openstack-CAL-Test: Deleting a network with id : %s. " %(net_id))
rc = self.cal.delete_network(self._acct, net_id)
self.assertEqual(rc, RwStatus.SUCCESS)
-
+
# Verify that network is no longer available via get_network_list API
rc, rs = self.cal.get_network_list(self._acct)
self.assertEqual(rc, RwStatus.SUCCESS)
network_info = [ network for network in rs.networkinfo_list if network.network_id == net_id ]
self.assertEqual(len(network_info), 0)
logger.info("Openstack-CAL-Test: Successfully deleted Network with id : %s" %(net_id))
-
-
- @unittest.skip("Skipping test_network_operations")
+
+
+ @unittest.skip("Skipping test_network_operations")
def test_network_operations(self):
"""
Create/Delete Networks
### Delete Port
self.cal.delete_port(self._acct, port_id)
-
+
rc, rs = self.cal.get_port_list(self._acct)
self.assertEqual(rc, RwStatus.SUCCESS)
port_list = [ port for port in rs.portinfo_list if port.port_id == port_id ]
self.assertEqual(rc, RwStatus.SUCCESS)
self.assertEqual(rs.port_state, expected_state)
logger.info("Openstack-CAL-Test: Port with port_id : %s reached expected state : %s" %(port_id, rs.port_state))
-
+
@unittest.skip("Skipping test_port_operations_with_vm")
def test_port_operations_with_vm(self):
"""
### Delete VM
self._delete_vm(vm_id)
-
+
### Delete Port
self._delete_port(port_id)
### Delete VM
self._delete_vm(vm_id)
-
+
### Delete Port
self._delete_port(port_id)
vlink.provider_network.segmentation_id = OpenStackTest.SEG_ID
OpenStackTest.SEG_ID += 1
return vlink
-
+
def _get_vdu_request_info(self, virtual_link_id):
"""
Returns object of type RwcalYang.VDUInitParams
c1 = vdu.connection_points_add.add()
c1.name = "c_modify1"
c1.virtual_link_id = virtual_link_id
-
- return vdu
-
+
+ return vdu
+
#@unittest.skip("Skipping test_create_delete_virtual_link_and_vdu")
def test_create_delete_virtual_link_and_vdu(self):
"""
self.assertEqual(rc, RwStatus.SUCCESS)
logger.info("Openstack-CAL-Test: Created virtual_link with Id: %s" %rsp)
vlink_id = rsp
-
+
#Check if virtual_link create is successful
rc, rsp = self.cal.get_virtual_link(self._acct, rsp)
self.assertEqual(rc, RwStatus.SUCCESS)
self.assertEqual(rs.state, 'active')
logger.info("Openstack-CAL-Test: VDU with id : %s reached expected state : %s" %(vdu_id, rs.state))
logger.info("Openstack-CAL-Test: VDUInfo: %s" %(rs))
-
+
vlink_req = self._get_virtual_link_request_info()
### Create another virtual_link
self._loop,
callback=self.on_ro_account_change
)
+ self._nsr_sub = mano_dts.NsrCatalogSubscriber(
+ self._log,
+ self._dts,
+ self._loop,
+ self.handle_nsr)
# The default plugin will be RwNsPlugin
- self._plugin_instances = {}
self._ro_plugin = self._create_plugin(self.DEFAULT_PLUGIN, None)
+ self.live_instances = 0
@property
def ro_plugin(self):
return self._ro_plugin
- def on_ro_account_change(self, ro_account, action):
+ def handle_nsr(self, nsr, action):
if action == rwdts.QueryAction.CREATE:
- self._on_ro_account_added(ro_account)
+ self.live_instances += 1
+ elif action == rwdts.QueryAction.DELETE:
+ self.live_instances -= 1
+
+ def on_ro_account_change(self, ro_account, action):
+ if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+ self._on_ro_account_change(ro_account)
elif action == rwdts.QueryAction.DELETE:
self._on_ro_account_deleted(ro_account)
- def _on_ro_account_added(self, ro_account):
+ def _on_ro_account_change(self, ro_account):
self._log.debug("Got nsm plugin RO account: %s", ro_account)
try:
nsm_cls = self._nsm_plugins.class_by_plugin_name(
)
nsm_cls = self.DEFAULT_PLUGIN
- self._ro_plugin = self._create_plugin(nsm_cls, ro_account)
+ ro_plugin = self._create_plugin(nsm_cls, ro_account)
+ if self.live_instances == 0:
+ self._ro_plugin = ro_plugin
+ else:
+ raise ValueError("Unable to change the plugin when live NS instances exists!")
def _on_ro_account_deleted(self, ro_account):
self._ro_plugin = None
def _create_plugin(self, nsm_cls, ro_account):
- # Check to see if the plugin was already instantiated
- if nsm_cls in self._plugin_instances:
- self._log.debug("RO account nsm plugin already instantiated. Using existing.")
- return self._plugin_instances[nsm_cls]
- # Otherwise, instantiate a new plugin using the cloud account
self._log.debug("Instantiating new RO account using class: %s", nsm_cls)
nsm_instance = nsm_cls(self._dts, self._log, self._loop,
self._records_publisher, ro_account)
- self._plugin_instances[nsm_cls] = nsm_instance
return nsm_instance
@asyncio.coroutine
def register(self):
yield from self._ro_sub.register()
+ yield from self._nsr_sub.register()
import rift.openmano.rift2openmano as rift2openmano
import rift.openmano.openmano_client as openmano_client
from . import rwnsmplugin
+from enum import Enum
+
import rift.tasklets
)
+class OpenmanoNSRecordState(Enum):
+ """ Network Service Record State """
+ INIT = 101
+ INSTANTIATION_PENDING = 102
+ RUNNING = 103
+ SCALING_OUT = 104
+ SCALING_IN = 105
+ TERMINATE = 106
+ TERMINATE_RCVD = 107
+ TERMINATED = 108
+ FAILED = 109
+ VL_INSTANTIATE = 110
+ VL_TERMINATE = 111
+
+
class OpenmanoNsr(object):
TIMEOUT_SECS = 300
self._created = False
self._monitor_task = None
+ self._state = OpenmanoNSRecordState.INIT
@property
def nsd(self):
self._vlrs.append(vlr)
yield from asyncio.sleep(1, loop=self._loop)
+ @asyncio.coroutine
+ def remove_vlr(self, vlr):
+ if vlr in self._vlrs:
+ self._vlrs.remove(vlr)
+ if not vlr.vld_msg.vim_network_name:
+ yield from self._loop.run_in_executor(
+ None,
+ self._cli_api.ns_vim_network_delete,
+ vlr.name,
+ vlr.om_datacenter_name)
+ yield from asyncio.sleep(1, loop=self._loop)
+
@asyncio.coroutine
def add_vnfr(self, vnfr):
vnfr = OpenmanoVnfr(self._log, self._loop, self._cli_api, vnfr)
self._log.debug("Found VNF status: %s", vnf_status)
if vnf_status is None:
self._log.error("Could not find VNF status from openmano")
+ self._state = OpenmanoNSRecordState.FAILED
vnfr_msg.operational_status = "failed"
yield from self._publisher.publish_vnfr(None, vnfr_msg)
return
# If there was a VNF that has a errored VM, then just fail the VNF and stop monitoring.
if any_vms_error(vnf_status):
self._log.debug("VM was found to be in error state. Marking as failed.")
+ self._state = OpenmanoNSRecordState.FAILED
vnfr_msg.operational_status = "failed"
yield from self._publisher.publish_vnfr(None, vnfr_msg)
return
if (time.time() - start_time) > OpenmanoNsr.TIMEOUT_SECS:
self._log.error("NSR timed out before reaching running state")
+ self._state = OpenmanoNSRecordState.FAILED
vnfr_msg.operational_status = "failed"
yield from self._publisher.publish_vnfr(None, vnfr_msg)
return
except Exception as e:
vnfr_msg.operational_status = "failed"
+ self._state = OpenmanoNSRecordState.FAILED
yield from self._publisher.publish_vnfr(None, vnfr_msg)
self._log.exception("Caught exception publishing vnfr info: %s", str(e))
return
if len(active_vnfs) == len(self._vnfrs):
+ self._state = OpenmanoNSRecordState.RUNNING
self._log.info("All VNF's are active. Exiting NSR monitoring task")
return
self._cli_api.ns_instance_scenario_create,
self.openmano_instance_create_yaml)
+ self._state = OpenmanoNSRecordState.INSTANTIATION_PENDING
self._monitor_task = asyncio.ensure_future(
self.instance_monitor_task(), loop=self._loop
self._nsr_uuid,
)
+ @asyncio.coroutine
+ def create_vlr(self,vlr):
+ self._log.debug("Creating openmano vim network VLR name %s, VLR DC: %s",vlr.vld_msg.name,
+ vlr.om_datacenter_name)
+ net_create = {}
+ net = {}
+ net['name'] = vlr.name
+ net['shared'] = True
+ net['type'] = 'bridge'
+ self._log.debug("Received ip profile is %s",vlr._ip_profile)
+ if vlr._ip_profile and vlr._ip_profile.has_field("ip_profile_params"):
+ ip_profile_params = vlr._ip_profile.ip_profile_params
+ ip_profile = {}
+ if ip_profile_params.ip_version == "ipv6":
+ ip_profile['ip_version'] = "IPv6"
+ else:
+ ip_profile['ip_version'] = "IPv4"
+ if ip_profile_params.has_field('subnet_address'):
+ ip_profile['subnet_address'] = ip_profile_params.subnet_address
+ if ip_profile_params.has_field('gateway_address'):
+ ip_profile['gateway_address'] = ip_profile_params.gateway_address
+ if ip_profile_params.has_field('dns_server') and len(ip_profile_params.dns_server) > 0:
+ ip_profile['dns_address'] = ip_profile_params.dns_server[0].address
+ if ip_profile_params.has_field('dhcp_params'):
+ ip_profile['dhcp_enabled'] = ip_profile_params.dhcp_params.enabled
+ ip_profile['dhcp_start_address'] = ip_profile_params.dhcp_params.start_address
+ ip_profile['dhcp_count'] = ip_profile_params.dhcp_params.count
+ net['ip_profile'] = ip_profile
+ net_create["network"]= net
+
+ net_create_msg = yaml.safe_dump(net_create,default_flow_style=False)
+ fpath = dump_openmano_descriptor(
+ "{}_vim_net_create_{}".format(self._nsr_config_msg.name,vlr.name),
+ net_create_msg)
+ self._log.debug("Dumped Openmano VIM Net create to: %s", fpath)
+
+ vim_network_uuid = yield from self._loop.run_in_executor(
+ None,
+ self._cli_api.ns_vim_network_create,
+ net_create_msg,
+ vlr.om_datacenter_name)
+ self._vlrs.append(vlr)
+
+
class OpenmanoNsPlugin(rwnsmplugin.NsmPluginBase):
"""
"""
self._log.debug("Received instantiate VL for NSR {}; VLR {}".format(nsr.id,vlr))
openmano_nsr = self._openmano_nsrs[nsr.id]
- yield from openmano_nsr.add_vlr(vlr)
+ if openmano_nsr._state == OpenmanoNSRecordState.RUNNING:
+ yield from openmano_nsr.create_vlr(vlr)
+ else:
+ yield from openmano_nsr.add_vlr(vlr)
@asyncio.coroutine
def terminate_ns(self, nsr):
"""
Terminate the virtual link
"""
- pass
+ self._log.debug("Received terminate VL for VLR {}".format(vlr))
+ openmano_nsr = self._openmano_nsrs[vlr._nsr_id]
+ yield from openmano_nsr.remove_vlr(vlr)
+
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
self._vlr_id = str(uuid.uuid4())
self._state = VlRecordState.INIT
self._prev_state = None
-
+
@property
def xpath(self):
""" path for this object """
self._vnffgrs[vnffgr.id] = vnffgr
def resolve_vld_ip_profile(self, nsd_msg, vld):
+ self._log.debug("Receieved ip profile ref is %s",vld.ip_profile_ref)
if not vld.has_field('ip_profile_ref'):
return None
- profile = [ profile for profile in nsd_msg.ip_profiles if profile.name == vld.ip_profile_ref ]
+ profile = [profile for profile in nsd_msg.ip_profiles if profile.name == vld.ip_profile_ref]
return profile[0] if profile else None
@asyncio.coroutine
def _create_vls(self, vld, cloud_account,om_datacenter):
"""Create a VLR in the cloud account specified using the given VLD
-
+
Args:
vld : VLD yang obj
cloud_account : Cloud account name
-
+
Returns:
VirtualLinkRecord
"""
if vlr is None:
cloud_account_list = self._extract_cloud_accounts_for_vl(vld)
- for account in cloud_account_list:
- vlr = yield from self._create_vls(vld, account)
+ for account,om_datacenter in cloud_account_list:
+ vlr = yield from self._create_vls(vld, account,om_datacenter)
self._vlrs.append(vlr)
vlr.state = VlRecordState.INSTANTIATION_PENDING
rpc_op = NsrYang.YangOutput_Nsr_StartNetworkService.from_dict({
"nsr_id":str(uuid.uuid4())
})
-
+
if not ('name' in rpc_ip and 'nsd_ref' in rpc_ip and ('cloud_account' in rpc_ip or 'om_datacenter' in rpc_ip)):
self._log.error("Mandatory parameters name or nsd_ref or cloud account not found in start-network-service {}".format(rpc_ip))
-
+
self._log.debug("start-network-service RPC input: {}".format(rpc_ip))
try:
# Add used value to the pool
self._log.debug("RPC output: {}".format(rpc_op))
-
+
nsd_copy = self.nsm.get_nsd(rpc_ip.nsd_ref)
#if not self._manager:
# self._manager = yield from self._connect()
-
+
self._log.debug("Configuring ns-instance-config with name %s nsd-ref: %s",
rpc_ip.name, rpc_ip.nsd_ref)
def get_nsr_key_pairs(dts_member_reg, xact):
key_pairs = {}
for instance_cfg, keyspec in dts_member_reg.get_xact_elements(xact, include_keyspec=True):
- self._log.debug("Key pair received is {} KS: {}".format(instance_cfg, keyspec))
+ self._log.debug("Key pair received is {} KS: {}".format(instance_cfg, keyspec))
xpath = keyspec.to_xpath(RwNsrYang.get_schema())
key_pairs[instance_cfg.name] = instance_cfg
- return key_pairs
+ return key_pairs
def on_apply(dts, acg, xact, action, scratch):
"""Apply the configuration"""
# msg.nsr_id_ref,
# msg.scaling_group_name_ref,
# msg.instance_id)
-
+
def nsr_update_cfg(self, nsr_id, msg):
nsr = self._nsrs[nsr_id]
nsr.nsr_cfg_msg= msg
self.create_nsd(nsd)
else:
self._log.debug("Updating NSD id = %s, nsd = %s", nsd.id, nsd)
- self._nsds[nsd.id].update(nsd)
+ self._nsds[nsd.id].update(nsd)
def delete_nsd(self, nsd_id):
""" Delete the Network service descriptor with the passed id """
""" Update the virtual network function descriptor """
self._log.debug("Update virtual network function descriptor- %s", vnfd)
- # Hack to remove duplicates from leaf-lists - to be fixed by RIFT-6511
- for ivld in vnfd.internal_vld:
- ivld.internal_connection_point_ref = list(set(ivld.internal_connection_point_ref))
if vnfd.id not in self._vnfds:
self._log.debug("No VNFD found - creating VNFD id = %s", vnfd.id)
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
if (vlr.has_field('ip_profile_params')) and (vlr.ip_profile_params.has_field('security_group')):
cp_info['security_group'] = vlr.ip_profile_params.security_group
-
+
cp_list.append(cp_info)
for intf, cp, vlr in self._int_intf:
for ivld_msg in self.vnfd.msg.internal_vld:
self._log.debug("Creating internal vld:"
" %s, int_cp_ref = %s",
- ivld_msg, ivld_msg.internal_connection_point_ref
+ ivld_msg, ivld_msg.internal_connection_point
)
vlr = InternalVirtualLinkRecord(dts=self._dts,
log=self._log,
)
self._vlrs.append(vlr)
- for int_cp in ivld_msg.internal_connection_point_ref:
- if int_cp in self._vlr_by_cp:
+ for int_cp in ivld_msg.internal_connection_point:
+ if int_cp.id_ref in self._vlr_by_cp:
msg = ("Connection point %s already "
- " bound %s" % (int_cp, self._vlr_by_cp[int_cp]))
+ " bound %s" % (int_cp.id_ref, self._vlr_by_cp[int_cp.id_ref]))
raise InternalVirtualLinkRecordError(msg)
self._log.debug("Setting vlr %s to internal cp = %s",
- vlr, int_cp)
- self._vlr_by_cp[int_cp] = vlr
+ vlr, int_cp.id_ref)
+ self._vlr_by_cp[int_cp.id_ref] = vlr
@asyncio.coroutine
def instantiate_vls(self, xact, restart_mode=False):
if not vdur._state == VDURecordState.READY:
self._log.debug("VDUR state is not READY. current state is {}".format(vdur._state))
xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK)
- return
+ return
with self._dts.transaction() as new_xact:
resp = yield from vdur.read_resource(new_xact)
vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
vdur_console.id = self._vdur_id
vdur_console.console_url = 'none'
-
+
xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK,
xpath=self.vnfr_vdu_console_xpath,
msg=vdur_console)
#raise VnfRecordError("Not supported operation %s" % action)
self._log.error("Not supported operation %s" % action)
xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK)
- return
-
+ return
+
self._log.debug("Registering for VNFR VDU using xpath: %s",
self.vnfr_vdu_console_xpath)
""" update the Virtual Network Function descriptor """
self._log.debug("Update virtual network function descriptor - %s", vnfd)
- # Hack to remove duplicates from leaf-lists - to be fixed by RIFT-6511
- for ivld in vnfd.internal_vld:
- ivld.internal_connection_point_ref = list(set(ivld.internal_connection_point_ref))
-
if vnfd.id not in self._vnfds:
self._log.debug("No VNFD found - creating VNFD id = %s", vnfd.id)
self.create_vnfd(vnfd)
${CMAKE_CURRENT_SOURCE_DIR}/utest_rwnsm.py
)
+rift_py3test(utest_ro_account
+ TEST_ARGS
+ ${CMAKE_CURRENT_SOURCE_DIR}/utest_ro_account.py
+ )
+
rift_py3test(tosca_ut
TEST_ARGS
${CMAKE_CURRENT_SOURCE_DIR}/tosca_ut.py
mock_orch_acc = launchpadyang.ResourceOrchestrator.from_dict(
{'name': 'rift-ro', 'account_type': 'rift_ro', 'rift_ro': {'rift_ro': True}})
- # Test rift-ro plugin
+ # Test rift-ro plugin CREATE
w_xpath = "C,/rw-launchpad:resource-orchestrator"
xpath = w_xpath
yield from self.publisher.publish(w_xpath, xpath, mock_orch_acc)
assert type(orch.ro_plugin) is cloud.RwNsPlugin
- # Test Openmano plugin
+ # Test Openmano plugin CREATE
mock_orch_acc = launchpadyang.ResourceOrchestrator.from_dict(
{'name': 'openmano',
'account_type': 'openmano',
- 'openmano': {'tenant_id': "abc"}})
+ 'openmano': {'tenant_id': "abc",
+ "port": 9999,
+ "host": "10.64.11.77"}})
yield from self.publisher.publish(w_xpath, xpath, mock_orch_acc)
yield from asyncio.sleep(5, loop=self.loop)
- print (type(orch.ro_plugin))
assert type(orch.ro_plugin) is openmano_nsm.OpenmanoNsPlugin
+ assert orch.ro_plugin._cli_api._port == mock_orch_acc.openmano.port
+ assert orch.ro_plugin._cli_api._host == mock_orch_acc.openmano.host
+
+ # Test update
+ mock_orch_acc.openmano.port = 9789
+ mock_orch_acc.openmano.host = "10.64.11.78"
+ yield from self.dts.query_update("C,/rw-launchpad:resource-orchestrator",
+ rwdts.XactFlag.ADVISE, mock_orch_acc)
+ assert orch.ro_plugin._cli_api._port == mock_orch_acc.openmano.port
+ assert orch.ro_plugin._cli_api._host == mock_orch_acc.openmano.host
+
+ # Test update when a live instance exists
+ # Exception should be thrown
+ orch.handle_nsr(None, rwdts.QueryAction.CREATE)
+ mock_orch_acc.openmano.port = 9788
+
+ with self.assertRaises(Exception):
+ yield from self.dts.query_update("C,/rw-launchpad:resource-orchestrator",
+ rwdts.XactFlag.ADVISE, mock_orch_acc)
# Test delete
yield from self.dts.query_delete("C,/rw-launchpad:resource-orchestrator",