VNFFG support
This brings VNFFG support to OSM, together with making use of the VIM
connector's SFC interface (see https://osm.etsi.org/gerrit/#/c/2065).
Change-Id: Ia064a9da4aaf62f9f81d3b7f6ac62ea4667864d6
Signed-off-by: Igor D.C <igor.duarte.cardoso@intel.com>
diff --git a/osm_ro/nfvo.py b/osm_ro/nfvo.py
index 5c0fefe..f5ffe09 100644
--- a/osm_ro/nfvo.py
+++ b/osm_ro/nfvo.py
@@ -2107,7 +2107,7 @@
:param mydb:
:param tenant_id:
:param nsd_descriptor:
- :return: The list of cretated NSD ids
+ :return: The list of created NSD ids
"""
try:
mynsd = nsd_catalog.nsd()
@@ -2119,6 +2119,11 @@
db_sce_nets = []
db_sce_vnfs = []
db_sce_interfaces = []
+ db_sce_vnffgs = []
+ db_sce_rsps = []
+ db_sce_rsp_hops = []
+ db_sce_classifiers = []
+ db_sce_classifier_matches = []
db_ip_profiles = []
db_ip_profiles_index = 0
uuid_list = []
@@ -2126,7 +2131,7 @@
for nsd_yang in mynsd.nsd_catalog.nsd.itervalues():
nsd = nsd_yang.get()
- # table sceanrios
+ # table scenarios
scenario_uuid = str(uuid4())
uuid_list.append(scenario_uuid)
nsd_uuid_list.append(scenario_uuid)
@@ -2261,15 +2266,141 @@
if not db_sce_net["type"]:
db_sce_net["type"] = "bridge"
+ # table sce_vnffgs (vnffgd)
+ for vnffg in nsd.get("vnffgd").itervalues():
+ sce_vnffg_uuid = str(uuid4())
+ uuid_list.append(sce_vnffg_uuid)
+ db_sce_vnffg = {
+ "uuid": sce_vnffg_uuid,
+ "name": get_str(vnffg, "name", 255),
+ "scenario_id": scenario_uuid,
+ "vendor": get_str(vnffg, "vendor", 255),
+ "description": get_str(vld, "description", 255),
+ }
+ db_sce_vnffgs.append(db_sce_vnffg)
+
+ # deal with rsps
+ db_sce_rsps = []
+ for rsp in vnffg.get("rsp").itervalues():
+ sce_rsp_uuid = str(uuid4())
+ uuid_list.append(sce_rsp_uuid)
+ db_sce_rsp = {
+ "uuid": sce_rsp_uuid,
+ "name": get_str(rsp, "name", 255),
+ "sce_vnffg_id": sce_vnffg_uuid,
+ "id": get_str(rsp, "id", 255), # only useful to link with classifiers; will be removed later in the code
+ }
+ db_sce_rsps.append(db_sce_rsp)
+ db_sce_rsp_hops = []
+ for iface in rsp.get("vnfd-connection-point-ref").itervalues():
+ vnf_index = int(iface['member-vnf-index-ref'])
+ if_order = int(iface['order'])
+ # check correct parameters
+ if vnf_index not in vnf_index2vnf_uuid:
+ raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+ "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
+ "'nsd':'constituent-vnfd'".format(
+ str(nsd["id"]), str(rsp["id"]), str(iface["member-vnf-index-ref"])),
+ HTTP_Bad_Request)
+
+ existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
+ FROM="interfaces as i join vms on i.vm_id=vms.uuid",
+ WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index],
+ 'external_name': get_str(iface, "vnfd-connection-point-ref",
+ 255)})
+ if not existing_ifaces:
+ raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+ "-ref':'vnfd-connection-point-ref':'{}'. Reference to a non-existing "
+ "connection-point name at VNFD '{}'".format(
+ str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-connection-point-ref"]),
+ str(iface.get("vnfd-id-ref"))[:255]),
+ HTTP_Bad_Request)
+ interface_uuid = existing_ifaces[0]["uuid"]
+ sce_rsp_hop_uuid = str(uuid4())
+ uuid_list.append(sce_rsp_hop_uuid)
+ db_sce_rsp_hop = {
+ "uuid": sce_rsp_hop_uuid,
+ "if_order": if_order,
+ "interface_id": interface_uuid,
+ "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
+ "sce_rsp_id": sce_rsp_uuid,
+ }
+ db_sce_rsp_hops.append(db_sce_rsp_hop)
+
+ # deal with classifiers
+ db_sce_classifiers = []
+ for classifier in vnffg.get("classifier").itervalues():
+ sce_classifier_uuid = str(uuid4())
+ uuid_list.append(sce_classifier_uuid)
+
+ # source VNF
+ vnf_index = int(classifier['member-vnf-index-ref'])
+ if vnf_index not in vnf_index2vnf_uuid:
+ raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'classifier[{}]':'vnfd-connection-point"
+ "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
+ "'nsd':'constituent-vnfd'".format(
+ str(nsd["id"]), str(classifier["id"]), str(classifier["member-vnf-index-ref"])),
+ HTTP_Bad_Request)
+ existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
+ FROM="interfaces as i join vms on i.vm_id=vms.uuid",
+ WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index],
+ 'external_name': get_str(classifier, "vnfd-connection-point-ref",
+ 255)})
+ if not existing_ifaces:
+ raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+ "-ref':'vnfd-connection-point-ref':'{}'. Reference to a non-existing "
+ "connection-point name at VNFD '{}'".format(
+ str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-connection-point-ref"]),
+ str(iface.get("vnfd-id-ref"))[:255]),
+ HTTP_Bad_Request)
+ interface_uuid = existing_ifaces[0]["uuid"]
+
+ db_sce_classifier = {
+ "uuid": sce_classifier_uuid,
+ "name": get_str(classifier, "name", 255),
+ "sce_vnffg_id": sce_vnffg_uuid,
+ "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
+ "interface_id": interface_uuid,
+ }
+ rsp_id = get_str(classifier, "rsp-id-ref", 255)
+ rsp = next((item for item in db_sce_rsps if item["id"] == rsp_id), None)
+ db_sce_classifier["sce_rsp_id"] = rsp["uuid"]
+ db_sce_classifiers.append(db_sce_classifier)
+
+ db_sce_classifier_matches = []
+ for match in classifier.get("match-attributes").itervalues():
+ sce_classifier_match_uuid = str(uuid4())
+ uuid_list.append(sce_classifier_match_uuid)
+ db_sce_classifier_match = {
+ "uuid": sce_classifier_match_uuid,
+ "ip_proto": get_str(match, "ip-proto", 2),
+ "source_ip": get_str(match, "source-ip-address", 16),
+ "destination_ip": get_str(match, "destination-ip-address", 16),
+ "source_port": get_str(match, "source-port", 5),
+ "destination_port": get_str(match, "destination-port", 5),
+ "sce_classifier_id": sce_classifier_uuid,
+ }
+ db_sce_classifier_matches.append(db_sce_classifier_match)
+ # TODO: vnf/cp keys
+
+ # remove unneeded id's in sce_rsps
+ for rsp in db_sce_rsps:
+ rsp.pop('id')
+
db_tables = [
{"scenarios": db_scenarios},
{"sce_nets": db_sce_nets},
{"ip_profiles": db_ip_profiles},
{"sce_vnfs": db_sce_vnfs},
{"sce_interfaces": db_sce_interfaces},
+ {"sce_vnffgs": db_sce_vnffgs},
+ {"sce_rsps": db_sce_rsps},
+ {"sce_rsp_hops": db_sce_rsp_hops},
+ {"sce_classifiers": db_sce_classifiers},
+ {"sce_classifier_matches": db_sce_classifier_matches},
]
- logger.debug("create_vnf Deployment done vnfDict: %s",
+ logger.debug("new_nsd_v3 done: %s",
yaml.safe_dump(db_tables, indent=4, default_flow_style=False) )
mydb.new_rows(db_tables, uuid_list)
return nsd_uuid_list
@@ -2694,7 +2825,6 @@
myvim_threads_id[default_datacenter_id], _ = get_vim_thread(mydb, tenant_id, default_datacenter_id)
tenant = mydb.get_rows_by_id('nfvo_tenants', tenant_id)
# myvim_tenant = myvim['tenant_id']
-
rollbackList=[]
# print "Checking that the scenario exists and getting the scenario dictionary"
@@ -2708,6 +2838,10 @@
db_instance_vnfs = []
db_instance_vms = []
db_instance_interfaces = []
+ db_instance_sfis = []
+ db_instance_sfs = []
+ db_instance_classifications = []
+ db_instance_sfps = []
db_ip_profiles = []
db_vim_actions = []
uuid_list = []
@@ -3244,6 +3378,157 @@
task_index += 1
db_vim_actions.append(db_vim_action)
+ task_depends_on = []
+ for vnffg in scenarioDict['vnffgs']:
+ for rsp in vnffg['rsps']:
+ sfs_created = []
+ for cp in rsp['connection_points']:
+ count = mydb.get_rows(
+ SELECT=('vms.count'),
+ FROM="vms join interfaces on vms.uuid=interfaces.vm_id join sce_rsp_hops as h on interfaces.uuid=h.interface_id",
+ WHERE={'h.uuid': cp['uuid']})[0]['count']
+ instance_vnf = next((item for item in db_instance_vnfs if item['sce_vnf_id'] == cp['sce_vnf_id']), None)
+ instance_vms = [item for item in db_instance_vms if item['instance_vnf_id'] == instance_vnf['uuid']]
+ dependencies = []
+ for instance_vm in instance_vms:
+ action = next((item for item in db_vim_actions if item['item_id'] == instance_vm['uuid']), None)
+ if action:
+ dependencies.append(action['task_index'])
+ # TODO: throw exception if count != len(instance_vms)
+ # TODO: and action shouldn't ever be None
+ sfis_created = []
+ for i in range(count):
+ # create sfis
+ sfi_uuid = str(uuid4())
+ uuid_list.append(sfi_uuid)
+ db_sfi = {
+ "uuid": sfi_uuid,
+ "instance_scenario_id": instance_uuid,
+ 'sce_rsp_hop_id': cp['uuid'],
+ 'datacenter_id': datacenter_id,
+ 'datacenter_tenant_id': myvim_thread_id,
+ "vim_sfi_id": None, # vim thread will populate
+ }
+ db_instance_sfis.append(db_sfi)
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": myvim_thread_id,
+ "action": "CREATE",
+ "status": "SCHEDULED",
+ "item": "instance_sfis",
+ "item_id": sfi_uuid,
+ "extra": yaml.safe_dump({"params": "", "depends_on": [dependencies[i]]},
+ default_flow_style=True, width=256)
+ }
+ sfis_created.append(task_index)
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+ # create sfs
+ sf_uuid = str(uuid4())
+ uuid_list.append(sf_uuid)
+ db_sf = {
+ "uuid": sf_uuid,
+ "instance_scenario_id": instance_uuid,
+ 'sce_rsp_hop_id': cp['uuid'],
+ 'datacenter_id': datacenter_id,
+ 'datacenter_tenant_id': myvim_thread_id,
+ "vim_sf_id": None, # vim thread will populate
+ }
+ db_instance_sfs.append(db_sf)
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": myvim_thread_id,
+ "action": "CREATE",
+ "status": "SCHEDULED",
+ "item": "instance_sfs",
+ "item_id": sf_uuid,
+ "extra": yaml.safe_dump({"params": "", "depends_on": sfis_created},
+ default_flow_style=True, width=256)
+ }
+ sfs_created.append(task_index)
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+ classifier = rsp['classifier']
+
+ # TODO the following ~13 lines can be reused for the sfi case
+ count = mydb.get_rows(
+ SELECT=('vms.count'),
+ FROM="vms join interfaces on vms.uuid=interfaces.vm_id join sce_classifiers as c on interfaces.uuid=c.interface_id",
+ WHERE={'c.uuid': classifier['uuid']})[0]['count']
+ instance_vnf = next((item for item in db_instance_vnfs if item['sce_vnf_id'] == classifier['sce_vnf_id']), None)
+ instance_vms = [item for item in db_instance_vms if item['instance_vnf_id'] == instance_vnf['uuid']]
+ dependencies = []
+ for instance_vm in instance_vms:
+ action = next((item for item in db_vim_actions if item['item_id'] == instance_vm['uuid']), None)
+ if action:
+ dependencies.append(action['task_index'])
+ # TODO: throw exception if count != len(instance_vms)
+ # TODO: and action shouldn't ever be None
+ classifications_created = []
+ for i in range(count):
+ for match in classifier['matches']:
+ # create classifications
+ classification_uuid = str(uuid4())
+ uuid_list.append(classification_uuid)
+ db_classification = {
+ "uuid": classification_uuid,
+ "instance_scenario_id": instance_uuid,
+ 'sce_classifier_match_id': match['uuid'],
+ 'datacenter_id': datacenter_id,
+ 'datacenter_tenant_id': myvim_thread_id,
+ "vim_classification_id": None, # vim thread will populate
+ }
+ db_instance_classifications.append(db_classification)
+ classification_params = {
+ "ip_proto": match["ip_proto"],
+ "source_ip": match["source_ip"],
+ "destination_ip": match["destination_ip"],
+ "source_port": match["source_port"],
+ "destination_port": match["destination_port"]
+ }
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": myvim_thread_id,
+ "action": "CREATE",
+ "status": "SCHEDULED",
+ "item": "instance_classifications",
+ "item_id": classification_uuid,
+ "extra": yaml.safe_dump({"params": classification_params, "depends_on": [dependencies[i]]},
+ default_flow_style=True, width=256)
+ }
+ classifications_created.append(task_index)
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+
+ # create sfps
+ sfp_uuid = str(uuid4())
+ uuid_list.append(sfp_uuid)
+ db_sfp = {
+ "uuid": sfp_uuid,
+ "instance_scenario_id": instance_uuid,
+ 'sce_rsp_id': rsp['uuid'],
+ 'datacenter_id': datacenter_id,
+ 'datacenter_tenant_id': myvim_thread_id,
+ "vim_sfp_id": None, # vim thread will populate
+ }
+ db_instance_sfps.append(db_sfp)
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": myvim_thread_id,
+ "action": "CREATE",
+ "status": "SCHEDULED",
+ "item": "instance_sfps",
+ "item_id": sfp_uuid,
+ "extra": yaml.safe_dump({"params": "", "depends_on": sfs_created + classifications_created},
+ default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+
scenarioDict["datacenter2tenant"] = myvim_threads_id
db_instance_action["number_tasks"] = task_index
@@ -3257,6 +3542,10 @@
{"instance_vms": db_instance_vms},
{"instance_interfaces": db_instance_interfaces},
{"instance_actions": db_instance_action},
+ {"instance_sfis": db_instance_sfis},
+ {"instance_sfs": db_instance_sfs},
+ {"instance_classifications": db_instance_classifications},
+ {"instance_sfps": db_instance_sfps},
{"vim_actions": db_vim_actions}
]
@@ -3288,7 +3577,6 @@
# print yaml.safe_dump(instanceDict, indent=4, default_flow_style=False)
tenant_id = instanceDict["tenant_id"]
# print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
-
# 1. Delete from Database
message = mydb.delete_instance_scenario(instance_id, tenant_id)
@@ -3397,6 +3685,156 @@
task_index += 1
db_vim_actions.append(db_vim_action)
+ # 2.3 deleting VNFFGs
+
+ for sfp in instanceDict['sfps']:
+ vimthread_affected[sfp["datacenter_tenant_id"]] = None
+ datacenter_key = (sfp["datacenter_id"], sfp["datacenter_tenant_id"])
+ if datacenter_key not in myvims:
+ try:
+ _,myvim_thread = get_vim_thread(mydb, tenant_id, sfp["datacenter_id"], sfp["datacenter_tenant_id"])
+ except NfvoException as e:
+ logger.error(str(e))
+ myvim_thread = None
+ myvim_threads[datacenter_key] = myvim_thread
+ vims = get_vim(mydb, tenant_id, datacenter_id=sfp["datacenter_id"],
+ datacenter_tenant_id=sfp["datacenter_tenant_id"])
+ if len(vims) == 0:
+ logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sfp["datacenter_id"], sfp["datacenter_tenant_id"]))
+ myvims[datacenter_key] = None
+ else:
+ myvims[datacenter_key] = vims.values()[0]
+ myvim = myvims[datacenter_key]
+ myvim_thread = myvim_threads[datacenter_key]
+
+ if not myvim:
+ error_msg += "\n vim_sfp_id={} cannot be deleted because datacenter={} not found".format(sfp['vim_sfp_id'], sfp["datacenter_id"])
+ continue
+ extra = {"params": (sfp['vim_sfp_id'])}
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": sfp["datacenter_tenant_id"],
+ "action": "DELETE",
+ "status": "SCHEDULED",
+ "item": "instance_sfps",
+ "item_id": sfp["uuid"],
+ "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+
+ for sf in instanceDict['sfs']:
+ vimthread_affected[sf["datacenter_tenant_id"]] = None
+ datacenter_key = (sf["datacenter_id"], sf["datacenter_tenant_id"])
+ if datacenter_key not in myvims:
+ try:
+ _,myvim_thread = get_vim_thread(mydb, tenant_id, sf["datacenter_id"], sf["datacenter_tenant_id"])
+ except NfvoException as e:
+ logger.error(str(e))
+ myvim_thread = None
+ myvim_threads[datacenter_key] = myvim_thread
+ vims = get_vim(mydb, tenant_id, datacenter_id=sf["datacenter_id"],
+ datacenter_tenant_id=sf["datacenter_tenant_id"])
+ if len(vims) == 0:
+ logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sf["datacenter_id"], sf["datacenter_tenant_id"]))
+ myvims[datacenter_key] = None
+ else:
+ myvims[datacenter_key] = vims.values()[0]
+ myvim = myvims[datacenter_key]
+ myvim_thread = myvim_threads[datacenter_key]
+
+ if not myvim:
+ error_msg += "\n vim_sf_id={} cannot be deleted because datacenter={} not found".format(sf['vim_sf_id'], sf["datacenter_id"])
+ continue
+ extra = {"params": (sf['vim_sf_id'])}
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": sf["datacenter_tenant_id"],
+ "action": "DELETE",
+ "status": "SCHEDULED",
+ "item": "instance_sfs",
+ "item_id": sf["uuid"],
+ "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+
+ for sfi in instanceDict['sfis']:
+ vimthread_affected[sfi["datacenter_tenant_id"]] = None
+ datacenter_key = (sfi["datacenter_id"], sfi["datacenter_tenant_id"])
+ if datacenter_key not in myvims:
+ try:
+ _,myvim_thread = get_vim_thread(mydb, tenant_id, sfi["datacenter_id"], sfi["datacenter_tenant_id"])
+ except NfvoException as e:
+ logger.error(str(e))
+ myvim_thread = None
+ myvim_threads[datacenter_key] = myvim_thread
+ vims = get_vim(mydb, tenant_id, datacenter_id=sfi["datacenter_id"],
+ datacenter_tenant_id=sfi["datacenter_tenant_id"])
+ if len(vims) == 0:
+ logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sfi["datacenter_id"], sfi["datacenter_tenant_id"]))
+ myvims[datacenter_key] = None
+ else:
+ myvims[datacenter_key] = vims.values()[0]
+ myvim = myvims[datacenter_key]
+ myvim_thread = myvim_threads[datacenter_key]
+
+ if not myvim:
+ error_msg += "\n vim_sfi_id={} cannot be deleted because datacenter={} not found".format(sfi['vim_sfi_id'], sfi["datacenter_id"])
+ continue
+ extra = {"params": (sfi['vim_sfi_id'])}
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": sfi["datacenter_tenant_id"],
+ "action": "DELETE",
+ "status": "SCHEDULED",
+ "item": "instance_sfis",
+ "item_id": sfi["uuid"],
+ "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+
+ for classification in instanceDict['classifications']:
+ vimthread_affected[classification["datacenter_tenant_id"]] = None
+ datacenter_key = (classification["datacenter_id"], classification["datacenter_tenant_id"])
+ if datacenter_key not in myvims:
+ try:
+ _,myvim_thread = get_vim_thread(mydb, tenant_id, classification["datacenter_id"], classification["datacenter_tenant_id"])
+ except NfvoException as e:
+ logger.error(str(e))
+ myvim_thread = None
+ myvim_threads[datacenter_key] = myvim_thread
+ vims = get_vim(mydb, tenant_id, datacenter_id=classification["datacenter_id"],
+ datacenter_tenant_id=classification["datacenter_tenant_id"])
+ if len(vims) == 0:
+ logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(classification["datacenter_id"], classification["datacenter_tenant_id"]))
+ myvims[datacenter_key] = None
+ else:
+ myvims[datacenter_key] = vims.values()[0]
+ myvim = myvims[datacenter_key]
+ myvim_thread = myvim_threads[datacenter_key]
+
+ if not myvim:
+ error_msg += "\n vim_classification_id={} cannot be deleted because datacenter={} not found".format(classification['vim_classification_id'], classification["datacenter_id"])
+ continue
+ extra = {"params": (classification['vim_classification_id'])}
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": classification["datacenter_tenant_id"],
+ "action": "DELETE",
+ "status": "SCHEDULED",
+ "item": "instance_classifications",
+ "item_id": classification["uuid"],
+ "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+
db_instance_action["number_tasks"] = task_index
db_tables = [
{"instance_actions": db_instance_action},
diff --git a/osm_ro/nfvo_db.py b/osm_ro/nfvo_db.py
index 8b72e14..e9db479 100644
--- a/osm_ro/nfvo_db.py
+++ b/osm_ro/nfvo_db.py
@@ -37,7 +37,9 @@
tables_with_createdat_field=["datacenters","instance_nets","instance_scenarios","instance_vms","instance_vnfs",
"interfaces","nets","nfvo_tenants","scenarios","sce_interfaces","sce_nets",
"sce_vnfs","tenants_datacenters","datacenter_tenants","vms","vnfs", "datacenter_nets",
- "instance_actions", "vim_actions"]
+ "instance_actions", "vim_actions", "sce_vnffgs", "sce_rsps", "sce_rsp_hops",
+ "sce_classifiers", "sce_classifier_matches", "instance_sfis", "instance_sfs",
+ "instance_classifications", "instance_sfps"]
class nfvo_db(db_base.db_base):
@@ -695,6 +697,36 @@
db_base._convert_datetime2str(scenario_dict)
db_base._convert_str2boolean(scenario_dict, ('public','shared','external','port-security','floating-ip') )
+
+ #forwarding graphs
+ cmd = "SELECT uuid,name,description,vendor FROM sce_vnffgs WHERE scenario_id='{}' "\
+ "ORDER BY created_at".format(scenario_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ scenario_dict['vnffgs'] = self.cur.fetchall()
+ for vnffg in scenario_dict['vnffgs']:
+ cmd = "SELECT uuid,name FROM sce_rsps WHERE sce_vnffg_id='{}' "\
+ "ORDER BY created_at".format(vnffg['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ vnffg['rsps'] = self.cur.fetchall()
+ for rsp in vnffg['rsps']:
+ cmd = "SELECT uuid,if_order,interface_id,sce_vnf_id FROM sce_rsp_hops WHERE sce_rsp_id='{}' "\
+ "ORDER BY created_at".format(rsp['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rsp['connection_points'] = self.cur.fetchall();
+ cmd = "SELECT uuid,name,sce_vnf_id,interface_id FROM sce_classifiers WHERE sce_vnffg_id='{}' "\
+ "AND sce_rsp_id='{}' ORDER BY created_at".format(vnffg['uuid'], rsp['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rsp['classifier'] = self.cur.fetchone();
+ cmd = "SELECT uuid,ip_proto,source_ip,destination_ip,source_port,destination_port FROM sce_classifier_matches "\
+ "WHERE sce_classifier_id='{}' ORDER BY created_at".format(rsp['classifier']['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rsp['classifier']['matches'] = self.cur.fetchall()
+
return scenario_dict
except (mdb.Error, AttributeError) as e:
self._format_error(e, tries)
@@ -983,7 +1015,47 @@
self.logger.debug(cmd)
self.cur.execute(cmd)
instance_dict['nets'] = self.cur.fetchall()
-
+
+ #instance_sfps
+ cmd = "SELECT uuid,vim_sfp_id,sce_rsp_id,datacenter_id,"\
+ "datacenter_tenant_id,status,error_msg,vim_info"\
+ " FROM instance_sfps" \
+ " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['sfps'] = self.cur.fetchall()
+
+ for sfp in instance_dict['sfps']:
+ #instance_sfs
+ cmd = "SELECT uuid,vim_sf_id,sce_rsp_hop_id,datacenter_id,"\
+ "datacenter_tenant_id,status,error_msg,vim_info"\
+ " FROM instance_sfs" \
+ " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid']) # TODO: replace instance_scenario_id with instance_sfp_id
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['sfs'] = self.cur.fetchall()
+
+ for sf in instance_dict['sfs']:
+ #instance_sfis
+ cmd = "SELECT uuid,vim_sfi_id,sce_rsp_hop_id,datacenter_id,"\
+ "datacenter_tenant_id,status,error_msg,vim_info"\
+ " FROM instance_sfis" \
+ " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid']) # TODO: replace instance_scenario_id with instance_sf_id
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['sfis'] = self.cur.fetchall()
+# for sfi in instance_dict['sfi']:
+
+ #instance_classifications
+ cmd = "SELECT uuid,vim_classification_id,sce_classifier_match_id,datacenter_id,"\
+ "datacenter_tenant_id,status,error_msg,vim_info"\
+ " FROM instance_classifications" \
+ " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['classifications'] = self.cur.fetchall()
+# for classification in instance_dict['classifications']
+
db_base._convert_datetime2str(instance_dict)
db_base._convert_str2boolean(instance_dict, ('public','shared','created') )
return instance_dict
diff --git a/osm_ro/vim_thread.py b/osm_ro/vim_thread.py
index ce0fc7f..d5574b4 100644
--- a/osm_ro/vim_thread.py
+++ b/osm_ro/vim_thread.py
@@ -530,6 +530,38 @@
result, database_update = self.get_net(task)
else:
raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+ elif task["item"] == 'instance_sfis':
+ if task["action"] == "CREATE":
+ result, database_update = self.new_sfi(task)
+ nb_created += 1
+ elif task["action"] == "DELETE":
+ result, database_update = self.del_sfi(task)
+ else:
+ raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+ elif task["item"] == 'instance_sfs':
+ if task["action"] == "CREATE":
+ result, database_update = self.new_sf(task)
+ nb_created += 1
+ elif task["action"] == "DELETE":
+ result, database_update = self.del_sf(task)
+ else:
+ raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+ elif task["item"] == 'instance_classifications':
+ if task["action"] == "CREATE":
+ result, database_update = self.new_classification(task)
+ nb_created += 1
+ elif task["action"] == "DELETE":
+ result, database_update = self.del_classification(task)
+ else:
+ raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+ elif task["item"] == 'instance_sfps':
+ if task["action"] == "CREATE":
+ result, database_update = self.new_sfp(task)
+ nb_created += 1
+ elif task["action"] == "DELETE":
+ result, database_update = self.del_sfp(task)
+ else:
+ raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
else:
raise vimconn.vimconnException(self.name + "unknown task item {}".format(task["item"]))
# TODO
@@ -543,11 +575,14 @@
elif task["item"] == 'instance_nets':
database_update["vim_net_id"] = None
+ no_refresh_tasks = ['instance_sfis', 'instance_sfs',
+ 'instance_classifications', 'instance_sfps']
if task["action"] == "DELETE":
action_key = task["item"] + task["item_id"]
del self.grouped_tasks[action_key]
elif task["action"] in ("CREATE", "FIND") and task["status"] in ("DONE", "BUILD"):
- self._insert_refresh(task)
+ if task["item"] not in no_refresh_tasks:
+ self._insert_refresh(task)
task_id = task["instance_action_id"] + "." + str(task["task_index"])
self.logger.debug("task={} item={} action={} result={}:'{}' params={}".format(
@@ -941,3 +976,234 @@
return True, None
task["status"] = "FAILED"
return False, None
+
+ ## Service Function Instances
+
+ def new_sfi(self, task):
+ vim_sfi_id = None
+ try:
+ params = task["params"]
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ depends = task.get("depends")
+ error_text = ""
+ interfaces = task.get("depends").values()[0].get("extra").get("params")[5]
+ # At the moment, every port associated with the VM will be used both as ingress and egress ports.
+ # Bear in mind that different VIM connectors might support SFI differently. In the case of OpenStack, only the
+ # first ingress and first egress ports will be used to create the SFI (Port Pair).
+ port_id_list = [interfaces[0].get("vim_id")]
+ name = "sfi-%s" % task["item_id"][:8]
+ # By default no form of IETF SFC Encapsulation will be used
+ vim_sfi_id = self.vim.new_sfi(name, port_id_list, port_id_list, sfc_encap=False)
+
+ task["extra"]["created"] = True
+ task["error_msg"] = None
+ task["status"] = "DONE"
+ task["vim_id"] = vim_sfi_id
+ instance_element_update = {"status": "ACTIVE", "vim_sfi_id": vim_sfi_id, "error_msg": None}
+ return True, instance_element_update
+
+ except (vimconn.vimconnException, VimThreadException) as e:
+ self.logger.error("Error creating Service Function Instance, task=%s: %s", task_id, str(e))
+ error_text = self._format_vim_error_msg(str(e))
+ task["error_msg"] = error_text
+ task["status"] = "FAILED"
+ task["vim_id"] = None
+ instance_element_update = {"status": "VIM_ERROR", "vim_sfi_id": None, "error_msg": error_text}
+ return False, instance_element_update
+
+ def del_sfi(self, task):
+ sfi_vim_id = task["vim_id"]
+ try:
+ self.vim.delete_sfi(sfi_vim_id)
+ task["status"] = "DONE"
+ task["error_msg"] = None
+ return True, None
+
+ except vimconn.vimconnException as e:
+ task["error_msg"] = self._format_vim_error_msg(str(e))
+ if isinstance(e, vimconn.vimconnNotFoundException):
+ # If not found mark as Done and fill error_msg
+ task["status"] = "DONE"
+ return True, None
+ task["status"] = "FAILED"
+ return False, None
+
+ def new_sf(self, task):
+ vim_sf_id = None
+ try:
+ params = task["params"]
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ depends = task.get("depends")
+ error_text = ""
+ #sfis = task.get("depends").values()[0].get("extra").get("params")[5]
+ sfis = task.get("depends").values()
+ sfi_id_list = []
+ for sfi in sfis:
+ sfi_id_list.append(sfi.get("vim_id"))
+ name = "sf-%s" % task["item_id"][:8]
+ # By default no form of IETF SFC Encapsulation will be used
+ vim_sf_id = self.vim.new_sf(name, sfi_id_list, sfc_encap=False)
+
+ task["extra"]["created"] = True
+ task["error_msg"] = None
+ task["status"] = "DONE"
+ task["vim_id"] = vim_sf_id
+ instance_element_update = {"status": "ACTIVE", "vim_sf_id": vim_sf_id, "error_msg": None}
+ return True, instance_element_update
+
+ except (vimconn.vimconnException, VimThreadException) as e:
+ self.logger.error("Error creating Service Function, task=%s: %s", task_id, str(e))
+ error_text = self._format_vim_error_msg(str(e))
+ task["error_msg"] = error_text
+ task["status"] = "FAILED"
+ task["vim_id"] = None
+ instance_element_update = {"status": "VIM_ERROR", "vim_sf_id": None, "error_msg": error_text}
+ return False, instance_element_update
+
+ def del_sf(self, task):
+ sf_vim_id = task["vim_id"]
+ try:
+ self.vim.delete_sf(sf_vim_id)
+ task["status"] = "DONE"
+ task["error_msg"] = None
+ return True, None
+
+ except vimconn.vimconnException as e:
+ task["error_msg"] = self._format_vim_error_msg(str(e))
+ if isinstance(e, vimconn.vimconnNotFoundException):
+ # If not found mark as Done and fill error_msg
+ task["status"] = "DONE"
+ return True, None
+ task["status"] = "FAILED"
+ return False, None
+
+ def new_classification(self, task):
+ vim_classification_id = None
+ try:
+ params = task["params"]
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ depends = task.get("depends")
+ error_text = ""
+ interfaces = task.get("depends").values()[0].get("extra").get("params")[5]
+ # Bear in mind that different VIM connectors might support Classifications differently.
+ # In the case of OpenStack, only the first VNF attached to the classifier will be used
+ # to create the Classification(s) (the "logical source port" of the "Flow Classifier").
+ # Since the VNFFG classifier match lacks the ethertype, classification defaults to
+ # using the IPv4 flow classifier.
+ name = "c-%s" % task["item_id"][:8]
+ # if not CIDR is given for the IP addresses, add /32:
+ ip_proto = int(params.get("ip_proto"))
+ source_ip = params.get("source_ip")
+ destination_ip = params.get("destination_ip")
+ if ip_proto == 1:
+ ip_proto = 'icmp'
+ elif ip_proto == 6:
+ ip_proto = 'tcp'
+ elif ip_proto == 17:
+ ip_proto = 'udp'
+ if '/' not in source_ip:
+ source_ip += '/32'
+ if '/' not in destination_ip:
+ destination_ip += '/32'
+ definition = {
+ "logical_source_port": interfaces[0].get("vim_id"),
+ "protocol": ip_proto,
+ "source_ip_prefix": source_ip,
+ "destination_ip_prefix": destination_ip,
+ "source_port_range_min": params.get("source_port"),
+ "source_port_range_max": params.get("source_port"),
+ "destination_port_range_min": params.get("destination_port"),
+ "destination_port_range_max": params.get("destination_port"),
+ }
+
+ vim_classification_id = self.vim.new_classification(
+ name, 'legacy_flow_classifier', definition)
+
+ task["extra"]["created"] = True
+ task["error_msg"] = None
+ task["status"] = "DONE"
+ task["vim_id"] = vim_classification_id
+ instance_element_update = {"status": "ACTIVE", "vim_classification_id": vim_classification_id, "error_msg": None}
+ return True, instance_element_update
+
+ except (vimconn.vimconnException, VimThreadException) as e:
+ self.logger.error("Error creating Classification, task=%s: %s", task_id, str(e))
+ error_text = self._format_vim_error_msg(str(e))
+ task["error_msg"] = error_text
+ task["status"] = "FAILED"
+ task["vim_id"] = None
+ instance_element_update = {"status": "VIM_ERROR", "vim_classification_id": None, "error_msg": error_text}
+ return False, instance_element_update
+
+ def del_classification(self, task):
+ classification_vim_id = task["vim_id"]
+ try:
+ self.vim.delete_classification(classification_vim_id)
+ task["status"] = "DONE"
+ task["error_msg"] = None
+ return True, None
+
+ except vimconn.vimconnException as e:
+ task["error_msg"] = self._format_vim_error_msg(str(e))
+ if isinstance(e, vimconn.vimconnNotFoundException):
+ # If not found mark as Done and fill error_msg
+ task["status"] = "DONE"
+ return True, None
+ task["status"] = "FAILED"
+ return False, None
+
+ def new_sfp(self, task):
+ vim_sfp_id = None
+ try:
+ params = task["params"]
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ depends = task.get("depends")
+ error_text = ""
+ deps = task.get("depends").values()
+ sf_id_list = []
+ classification_id_list = []
+ for dep in deps:
+ vim_id = dep.get("vim_id")
+ resource = dep.get("item")
+ if resource == "instance_sfs":
+ sf_id_list.append(vim_id)
+ elif resource == "instance_classifications":
+ classification_id_list.append(vim_id)
+
+ name = "sfp-%s" % task["item_id"][:8]
+ # By default no form of IETF SFC Encapsulation will be used
+ vim_sfp_id = self.vim.new_sfp(name, classification_id_list, sf_id_list, sfc_encap=False)
+
+ task["extra"]["created"] = True
+ task["error_msg"] = None
+ task["status"] = "DONE"
+ task["vim_id"] = vim_sfp_id
+ instance_element_update = {"status": "ACTIVE", "vim_sfp_id": vim_sfp_id, "error_msg": None}
+ return True, instance_element_update
+
+ except (vimconn.vimconnException, VimThreadException) as e:
+ self.logger.error("Error creating Service Function, task=%s: %s", task_id, str(e))
+ error_text = self._format_vim_error_msg(str(e))
+ task["error_msg"] = error_text
+ task["status"] = "FAILED"
+ task["vim_id"] = None
+ instance_element_update = {"status": "VIM_ERROR", "vim_sfp_id": None, "error_msg": error_text}
+ return False, instance_element_update
+ return
+
+ def del_sfp(self, task):
+ sfp_vim_id = task["vim_id"]
+ try:
+ self.vim.delete_sfp(sfp_vim_id)
+ task["status"] = "DONE"
+ task["error_msg"] = None
+ return True, None
+
+ except vimconn.vimconnException as e:
+ task["error_msg"] = self._format_vim_error_msg(str(e))
+ if isinstance(e, vimconn.vimconnNotFoundException):
+ # If not found mark as Done and fill error_msg
+ task["status"] = "DONE"
+ return True, None
+ task["status"] = "FAILED"
+ return False, None
diff --git a/osm_ro/vimconn_openstack.py b/osm_ro/vimconn_openstack.py
index 85b8dc8..e8263ad 100644
--- a/osm_ro/vimconn_openstack.py
+++ b/osm_ro/vimconn_openstack.py
@@ -1691,7 +1691,7 @@
classification_dict = definition
classification_dict['name'] = name
- new_class = self.neutron.create_flow_classifier(
+ new_class = self.neutron.create_sfc_flow_classifier(
{'flow_classifier': classification_dict})
return new_class['flow_classifier']['id']
except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
@@ -1720,9 +1720,9 @@
self._reload_connection()
if self.api_version3 and "tenant_id" in filter_dict:
filter_dict['project_id'] = filter_dict.pop('tenant_id')
- classification_dict = self.neutron.list_flow_classifier(
+ classification_dict = self.neutron.list_sfc_flow_classifiers(
**filter_dict)
- classification_list = classification_dict["flow_classifiers"]
+ classification_list = cliassification_dict["flow_classifiers"]
self.__classification_os2mano(classification_list)
return classification_list
except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
@@ -1733,7 +1733,7 @@
self.logger.debug("Deleting Classification '%s' from VIM", class_id)
try:
self._reload_connection()
- self.neutron.delete_flow_classifier(class_id)
+ self.neutron.delete_sfc_flow_classifier(class_id)
return class_id
except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
ksExceptions.ClientException, neExceptions.NeutronException,
@@ -1748,9 +1748,7 @@
self._reload_connection()
correlation = None
if sfc_encap:
- # TODO(igordc): must be changed to NSH in Queens
- # (MPLS is a workaround)
- correlation = 'mpls'
+ correlation = 'nsh'
if len(ingress_ports) != 1:
raise vimconn.vimconnNotSupportedException(
"OpenStack VIM connector can only have "
@@ -1764,13 +1762,13 @@
'egress': egress_ports[0],
'service_function_parameters': {
'correlation': correlation}}
- new_sfi = self.neutron.create_port_pair({'port_pair': sfi_dict})
+ new_sfi = self.neutron.create_sfc_port_pair({'port_pair': sfi_dict})
return new_sfi['port_pair']['id']
except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
neExceptions.NeutronException, ConnectionError) as e:
if new_sfi:
try:
- self.neutron.delete_port_pair_group(
+ self.neutron.delete_sfc_port_pair(
new_sfi['port_pair']['id'])
except Exception:
self.logger.error(
@@ -1800,7 +1798,7 @@
self._reload_connection()
if self.api_version3 and "tenant_id" in filter_dict:
filter_dict['project_id'] = filter_dict.pop('tenant_id')
- sfi_dict = self.neutron.list_port_pair(**filter_dict)
+ sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict)
sfi_list = sfi_dict["port_pairs"]
self.__sfi_os2mano(sfi_list)
return sfi_list
@@ -1813,7 +1811,7 @@
"from VIM", sfi_id)
try:
self._reload_connection()
- self.neutron.delete_port_pair(sfi_id)
+ self.neutron.delete_sfc_port_pair(sfi_id)
return sfi_id
except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
ksExceptions.ClientException, neExceptions.NeutronException,
@@ -1828,25 +1826,23 @@
self._reload_connection()
correlation = None
if sfc_encap:
- # TODO(igordc): must be changed to NSH in Queens
- # (MPLS is a workaround)
- correlation = 'mpls'
+ correlation = 'nsh'
for instance in sfis:
sfi = self.get_sfi(instance)
- if sfi.get('sfc_encap') != correlation:
+ if sfi.get('sfc_encap') != sfc_encap:
raise vimconn.vimconnNotSupportedException(
"OpenStack VIM connector requires all SFIs of the "
"same SF to share the same SFC Encapsulation")
sf_dict = {'name': name,
'port_pairs': sfis}
- new_sf = self.neutron.create_port_pair_group({
+ new_sf = self.neutron.create_sfc_port_pair_group({
'port_pair_group': sf_dict})
return new_sf['port_pair_group']['id']
except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
neExceptions.NeutronException, ConnectionError) as e:
if new_sf:
try:
- self.neutron.delete_port_pair_group(
+ self.neutron.delete_sfc_port_pair_group(
new_sf['port_pair_group']['id'])
except Exception:
self.logger.error(
@@ -1874,7 +1870,7 @@
self._reload_connection()
if self.api_version3 and "tenant_id" in filter_dict:
filter_dict['project_id'] = filter_dict.pop('tenant_id')
- sf_dict = self.neutron.list_port_pair_group(**filter_dict)
+ sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict)
sf_list = sf_dict["port_pair_groups"]
self.__sf_os2mano(sf_list)
return sf_list
@@ -1886,7 +1882,7 @@
self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
try:
self._reload_connection()
- self.neutron.delete_port_pair_group(sf_id)
+ self.neutron.delete_sfc_port_pair_group(sf_id)
return sf_id
except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
ksExceptions.ClientException, neExceptions.NeutronException,
@@ -1899,26 +1895,24 @@
try:
new_sfp = None
self._reload_connection()
- if not sfc_encap:
- raise vimconn.vimconnNotSupportedException(
- "OpenStack VIM connector only supports "
- "SFC-Encapsulated chains")
- # TODO(igordc): must be changed to NSH in Queens
- # (MPLS is a workaround)
- correlation = 'mpls'
+ # In networking-sfc the MPLS encapsulation is legacy
+ # should be used when no full SFC Encapsulation is intended
+ sfc_encap = 'mpls'
+ if sfc_encap:
+ correlation = 'nsh'
sfp_dict = {'name': name,
'flow_classifiers': classifications,
'port_pair_groups': sfs,
'chain_parameters': {'correlation': correlation}}
if spi:
sfp_dict['chain_id'] = spi
- new_sfp = self.neutron.create_port_chain({'port_chain': sfp_dict})
+ new_sfp = self.neutron.create_sfc_port_chain({'port_chain': sfp_dict})
return new_sfp["port_chain"]["id"]
except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
neExceptions.NeutronException, ConnectionError) as e:
if new_sfp:
try:
- self.neutron.delete_port_chain(new_sfp['port_chain']['id'])
+ self.neutron.delete_sfc_port_chain(new_sfp['port_chain']['id'])
except Exception:
self.logger.error(
'Creation of Service Function Path failed, with '
@@ -1945,7 +1939,7 @@
self._reload_connection()
if self.api_version3 and "tenant_id" in filter_dict:
filter_dict['project_id'] = filter_dict.pop('tenant_id')
- sfp_dict = self.neutron.list_port_chain(**filter_dict)
+ sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict)
sfp_list = sfp_dict["port_chains"]
self.__sfp_os2mano(sfp_list)
return sfp_list
@@ -1958,7 +1952,7 @@
"Deleting Service Function Path '%s' from VIM", sfp_id)
try:
self._reload_connection()
- self.neutron.delete_port_chain(sfp_id)
+ self.neutron.delete_sfc_port_chain(sfp_id)
return sfp_id
except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
ksExceptions.ClientException, neExceptions.NeutronException,