DBNAME="mano_db"
QUIET_MODE=""
#TODO update it with the last database version
-LAST_DB_VERSION=27
-
+LAST_DB_VERSION=28
+
# Detect paths
MYSQL=$(which mysql)
AWK=$(which awk)
#[ $OPENMANO_VER_NUM -ge 5022 ] && DB_VERSION=25 #0.5.22 => 25
#[ $OPENMANO_VER_NUM -ge 5024 ] && DB_VERSION=26 #0.5.24 => 26
#[ $OPENMANO_VER_NUM -ge 5025 ] && DB_VERSION=27 #0.5.25 => 27
+#[ $OPENMANO_VER_NUM -ge 5052 ] && DB_VERSION=28 #0.5.52 => 28
#TODO ... put next versions here
function upgrade_to_1(){
sql "ALTER TABLE nfvo_tenants DROP COLUMN RO_pub_key;"
sql "DELETE FROM schema_version WHERE version_int='27';"
}
+function upgrade_to_28(){
+ echo " [Adding necessary tables for VNFFG]"
+ echo " Adding sce_vnffgs"
+ sql "CREATE TABLE IF NOT EXISTS sce_vnffgs (
+ uuid VARCHAR(36) NOT NULL,
+ tenant_id VARCHAR(36) NULL DEFAULT NULL,
+ name VARCHAR(255) NOT NULL,
+ description VARCHAR(255) NULL DEFAULT NULL,
+ vendor VARCHAR(255) NULL DEFAULT NULL,
+ scenario_id VARCHAR(36) NOT NULL,
+ created_at DOUBLE NOT NULL,
+ modified_at DOUBLE NULL DEFAULT NULL,
+ PRIMARY KEY (uuid),
+ INDEX FK_scenarios_sce_vnffg (scenario_id),
+ CONSTRAINT FK_scenarios_vnffg FOREIGN KEY (tenant_id) REFERENCES scenarios (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+ echo " Adding sce_rsps"
+ sql "CREATE TABLE IF NOT EXISTS sce_rsps (
+ uuid VARCHAR(36) NOT NULL,
+ tenant_id VARCHAR(36) NULL DEFAULT NULL,
+ name VARCHAR(255) NOT NULL,
+ sce_vnffg_id VARCHAR(36) NOT NULL,
+ created_at DOUBLE NOT NULL,
+ modified_at DOUBLE NULL DEFAULT NULL,
+ PRIMARY KEY (uuid),
+ INDEX FK_sce_vnffgs_rsp (sce_vnffg_id),
+ CONSTRAINT FK_sce_vnffgs_rsp FOREIGN KEY (sce_vnffg_id) REFERENCES sce_vnffgs (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+ echo " Adding sce_rsp_hops"
+ sql "CREATE TABLE IF NOT EXISTS sce_rsp_hops (
+ uuid VARCHAR(36) NOT NULL,
+ if_order INT DEFAULT 0 NOT NULL,
+ interface_id VARCHAR(36) NOT NULL,
+ sce_vnf_id VARCHAR(36) NOT NULL,
+ sce_rsp_id VARCHAR(36) NOT NULL,
+ created_at DOUBLE NOT NULL,
+ modified_at DOUBLE NULL DEFAULT NULL,
+ PRIMARY KEY (uuid),
+ INDEX FK_interfaces_rsp_hop (interface_id),
+ INDEX FK_sce_vnfs_rsp_hop (sce_vnf_id),
+ INDEX FK_sce_rsps_rsp_hop (sce_rsp_id),
+ CONSTRAINT FK_interfaces_rsp_hop FOREIGN KEY (interface_id) REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+ CONSTRAINT FK_sce_vnfs_rsp_hop FOREIGN KEY (sce_vnf_id) REFERENCES sce_vnfs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+ CONSTRAINT FK_sce_rsps_rsp_hop FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+ echo " Adding sce_classifiers"
+ sql "CREATE TABLE IF NOT EXISTS sce_classifiers (
+ uuid VARCHAR(36) NOT NULL,
+ tenant_id VARCHAR(36) NULL DEFAULT NULL,
+ name VARCHAR(255) NOT NULL,
+ sce_vnffg_id VARCHAR(36) NOT NULL,
+ sce_rsp_id VARCHAR(36) NOT NULL,
+ sce_vnf_id VARCHAR(36) NOT NULL,
+ interface_id VARCHAR(36) NOT NULL,
+ created_at DOUBLE NOT NULL,
+ modified_at DOUBLE NULL DEFAULT NULL,
+ PRIMARY KEY (uuid),
+ INDEX FK_sce_vnffgs_classifier (sce_vnffg_id),
+ INDEX FK_sce_rsps_classifier (sce_rsp_id),
+ INDEX FK_sce_vnfs_classifier (sce_vnf_id),
+ INDEX FK_interfaces_classifier (interface_id),
+ CONSTRAINT FK_sce_vnffgs_classifier FOREIGN KEY (sce_vnffg_id) REFERENCES sce_vnffgs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+ CONSTRAINT FK_sce_rsps_classifier FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+ CONSTRAINT FK_sce_vnfs_classifier FOREIGN KEY (sce_vnf_id) REFERENCES sce_vnfs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+ CONSTRAINT FK_interfaces_classifier FOREIGN KEY (interface_id) REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+ echo " Adding sce_classifier_matches"
+ sql "CREATE TABLE IF NOT EXISTS sce_classifier_matches (
+ uuid VARCHAR(36) NOT NULL,
+ ip_proto VARCHAR(2) NOT NULL,
+ source_ip VARCHAR(16) NOT NULL,
+ destination_ip VARCHAR(16) NOT NULL,
+ source_port VARCHAR(5) NOT NULL,
+ destination_port VARCHAR(5) NOT NULL,
+ sce_classifier_id VARCHAR(36) NOT NULL,
+ created_at DOUBLE NOT NULL,
+ modified_at DOUBLE NULL DEFAULT NULL,
+ PRIMARY KEY (uuid),
+ INDEX FK_classifiers_classifier_match (sce_classifier_id),
+ CONSTRAINT FK_sce_classifiers_classifier_match FOREIGN KEY (sce_classifier_id) REFERENCES sce_classifiers (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+
+ echo " [Adding necessary tables for VNFFG-SFC instance mapping]"
+ echo " Adding instance_sfis"
+ sql "CREATE TABLE IF NOT EXISTS instance_sfis (
+ uuid varchar(36) NOT NULL,
+ instance_scenario_id varchar(36) NOT NULL,
+ vim_sfi_id varchar(36) DEFAULT NULL,
+ sce_rsp_hop_id varchar(36) DEFAULT NULL,
+ datacenter_id varchar(36) DEFAULT NULL,
+ datacenter_tenant_id varchar(36) DEFAULT NULL,
+ status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ error_msg varchar(1024) DEFAULT NULL,
+ vim_info text,
+ created_at double NOT NULL,
+ modified_at double DEFAULT NULL,
+ PRIMARY KEY (uuid),
+ KEY FK_instance_sfis_instance_scenarios (instance_scenario_id),
+ KEY FK_instance_sfis_sce_rsp_hops (sce_rsp_hop_id),
+ KEY FK_instance_sfis_datacenters (datacenter_id),
+ KEY FK_instance_sfis_datacenter_tenants (datacenter_tenant_id),
+ CONSTRAINT FK_instance_sfis_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
+ CONSTRAINT FK_instance_sfis_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
+ CONSTRAINT FK_instance_sfis_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT FK_instance_sfis_sce_rsp_hops FOREIGN KEY (sce_rsp_hop_id) REFERENCES sce_rsp_hops (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+ echo " Adding instance_sfs"
+ sql "CREATE TABLE IF NOT EXISTS instance_sfs (
+ uuid varchar(36) NOT NULL,
+ instance_scenario_id varchar(36) NOT NULL,
+ vim_sf_id varchar(36) DEFAULT NULL,
+ sce_rsp_hop_id varchar(36) DEFAULT NULL,
+ datacenter_id varchar(36) DEFAULT NULL,
+ datacenter_tenant_id varchar(36) DEFAULT NULL,
+ status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ error_msg varchar(1024) DEFAULT NULL,
+ vim_info text,
+ created_at double NOT NULL,
+ modified_at double DEFAULT NULL,
+ PRIMARY KEY (uuid),
+ KEY FK_instance_sfs_instance_scenarios (instance_scenario_id),
+ KEY FK_instance_sfs_sce_rsp_hops (sce_rsp_hop_id),
+ KEY FK_instance_sfs_datacenters (datacenter_id),
+ KEY FK_instance_sfs_datacenter_tenants (datacenter_tenant_id),
+ CONSTRAINT FK_instance_sfs_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
+ CONSTRAINT FK_instance_sfs_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
+ CONSTRAINT FK_instance_sfs_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT FK_instance_sfs_sce_rsp_hops FOREIGN KEY (sce_rsp_hop_id) REFERENCES sce_rsp_hops (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+ echo " Adding instance_classifications"
+ sql "CREATE TABLE IF NOT EXISTS instance_classifications (
+ uuid varchar(36) NOT NULL,
+ instance_scenario_id varchar(36) NOT NULL,
+ vim_classification_id varchar(36) DEFAULT NULL,
+ sce_classifier_match_id varchar(36) DEFAULT NULL,
+ datacenter_id varchar(36) DEFAULT NULL,
+ datacenter_tenant_id varchar(36) DEFAULT NULL,
+ status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ error_msg varchar(1024) DEFAULT NULL,
+ vim_info text,
+ created_at double NOT NULL,
+ modified_at double DEFAULT NULL,
+ PRIMARY KEY (uuid),
+ KEY FK_instance_classifications_instance_scenarios (instance_scenario_id),
+ KEY FK_instance_classifications_sce_classifier_matches (sce_classifier_match_id),
+ KEY FK_instance_classifications_datacenters (datacenter_id),
+ KEY FK_instance_classifications_datacenter_tenants (datacenter_tenant_id),
+ CONSTRAINT FK_instance_classifications_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
+ CONSTRAINT FK_instance_classifications_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
+ CONSTRAINT FK_instance_classifications_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT FK_instance_classifications_sce_classifier_matches FOREIGN KEY (sce_classifier_match_id) REFERENCES sce_classifier_matches (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+ echo " Adding instance_sfps"
+ sql "CREATE TABLE IF NOT EXISTS instance_sfps (
+ uuid varchar(36) NOT NULL,
+ instance_scenario_id varchar(36) NOT NULL,
+ vim_sfp_id varchar(36) DEFAULT NULL,
+ sce_rsp_id varchar(36) DEFAULT NULL,
+ datacenter_id varchar(36) DEFAULT NULL,
+ datacenter_tenant_id varchar(36) DEFAULT NULL,
+ status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ error_msg varchar(1024) DEFAULT NULL,
+ vim_info text,
+ created_at double NOT NULL,
+ modified_at double DEFAULT NULL,
+ PRIMARY KEY (uuid),
+ KEY FK_instance_sfps_instance_scenarios (instance_scenario_id),
+ KEY FK_instance_sfps_sce_rsps (sce_rsp_id),
+ KEY FK_instance_sfps_datacenters (datacenter_id),
+ KEY FK_instance_sfps_datacenter_tenants (datacenter_tenant_id),
+ CONSTRAINT FK_instance_sfps_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
+ CONSTRAINT FK_instance_sfps_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
+ CONSTRAINT FK_instance_sfps_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT FK_instance_sfps_sce_rsps FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
+ COLLATE='utf8_general_ci'
+ ENGINE=InnoDB;"
+
+
+ echo " [Altering vim_actions table]"
+ sql "ALTER TABLE vim_actions MODIFY COLUMN item ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces','instance_sfis','instance_sfs','instance_classifications','instance_sfps') NOT NULL COMMENT 'table where the item is stored'"
+
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+ "VALUES (28, '0.28', '0.5.28', 'Adding VNFFG-related tables', '2017-11-20');"
+}
+function downgrade_from_28(){
+ echo " [Undo adding the VNFFG tables]"
+ echo " Dropping instance_sfps"
+ sql "DROP TABLE instance_sfps;"
+ echo " Dropping sce_classifications"
+ sql "DROP TABLE instance_classifications;"
+ echo " Dropping instance_sfs"
+ sql "DROP TABLE instance_sfs;"
+ echo " Dropping instance_sfis"
+ sql "DROP TABLE instance_sfis;"
+ echo " Dropping sce_classifier_matches"
+ echo " [Undo adding the VNFFG-SFC instance mapping tables]"
+ sql "DROP TABLE sce_classifier_matches;"
+ echo " Dropping sce_classifiers"
+ sql "DROP TABLE sce_classifiers;"
+ echo " Dropping sce_rsp_hops"
+ sql "DROP TABLE sce_rsp_hops;"
+ echo " Dropping sce_rsps"
+ sql "DROP TABLE sce_rsps;"
+ echo " Dropping sce_vnffgs"
+ sql "DROP TABLE sce_vnffgs;"
+ echo " [Altering vim_actions table]"
+ sql "ALTER TABLE vim_actions MODIFY COLUMN item ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces') NOT NULL COMMENT 'table where the item is stored'"
+ sql "DELETE FROM schema_version WHERE version_int='28';"
+}
function upgrade_to_X(){
echo " change 'datacenter_nets'"
sql "ALTER TABLE datacenter_nets ADD COLUMN vim_tenant_id VARCHAR(36) NOT NULL AFTER datacenter_id, DROP INDEX name_datacenter_id, ADD UNIQUE INDEX name_datacenter_id (name, datacenter_id, vim_tenant_id);"
__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes"
__date__ = "$26-aug-2014 11:09:29$"
-__version__ = "0.5.51-r561"
-version_date = "Jan 2018"
-database_version = 27 # expected database schema version
+__version__ = "0.5.52-r562"
+version_date = "Mar 2018"
+database_version = 28 # expected database schema version
global global_config
:param mydb:
:param tenant_id:
:param nsd_descriptor:
- :return: The list of cretated NSD ids
+ :return: The list of created NSD ids
"""
try:
mynsd = nsd_catalog.nsd()
db_sce_nets = []
db_sce_vnfs = []
db_sce_interfaces = []
+ db_sce_vnffgs = []
+ db_sce_rsps = []
+ db_sce_rsp_hops = []
+ db_sce_classifiers = []
+ db_sce_classifier_matches = []
db_ip_profiles = []
db_ip_profiles_index = 0
uuid_list = []
for nsd_yang in mynsd.nsd_catalog.nsd.itervalues():
nsd = nsd_yang.get()
- # table sceanrios
+ # table scenarios
scenario_uuid = str(uuid4())
uuid_list.append(scenario_uuid)
nsd_uuid_list.append(scenario_uuid)
if not db_sce_net["type"]:
db_sce_net["type"] = "bridge"
+ # table sce_vnffgs (vnffgd)
+ for vnffg in nsd.get("vnffgd").itervalues():
+ sce_vnffg_uuid = str(uuid4())
+ uuid_list.append(sce_vnffg_uuid)
+ db_sce_vnffg = {
+ "uuid": sce_vnffg_uuid,
+ "name": get_str(vnffg, "name", 255),
+ "scenario_id": scenario_uuid,
+ "vendor": get_str(vnffg, "vendor", 255),
+ "description": get_str(vld, "description", 255),
+ }
+ db_sce_vnffgs.append(db_sce_vnffg)
+
+ # deal with rsps
+ db_sce_rsps = []
+ for rsp in vnffg.get("rsp").itervalues():
+ sce_rsp_uuid = str(uuid4())
+ uuid_list.append(sce_rsp_uuid)
+ db_sce_rsp = {
+ "uuid": sce_rsp_uuid,
+ "name": get_str(rsp, "name", 255),
+ "sce_vnffg_id": sce_vnffg_uuid,
+ "id": get_str(rsp, "id", 255), # only useful to link with classifiers; will be removed later in the code
+ }
+ db_sce_rsps.append(db_sce_rsp)
+ db_sce_rsp_hops = []
+ for iface in rsp.get("vnfd-connection-point-ref").itervalues():
+ vnf_index = int(iface['member-vnf-index-ref'])
+ if_order = int(iface['order'])
+ # check correct parameters
+ if vnf_index not in vnf_index2vnf_uuid:
+ raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+ "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
+ "'nsd':'constituent-vnfd'".format(
+ str(nsd["id"]), str(rsp["id"]), str(iface["member-vnf-index-ref"])),
+ HTTP_Bad_Request)
+
+ existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
+ FROM="interfaces as i join vms on i.vm_id=vms.uuid",
+ WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index],
+ 'external_name': get_str(iface, "vnfd-connection-point-ref",
+ 255)})
+ if not existing_ifaces:
+ raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+ "-ref':'vnfd-connection-point-ref':'{}'. Reference to a non-existing "
+ "connection-point name at VNFD '{}'".format(
+ str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-connection-point-ref"]),
+ str(iface.get("vnfd-id-ref"))[:255]),
+ HTTP_Bad_Request)
+ interface_uuid = existing_ifaces[0]["uuid"]
+ sce_rsp_hop_uuid = str(uuid4())
+ uuid_list.append(sce_rsp_hop_uuid)
+ db_sce_rsp_hop = {
+ "uuid": sce_rsp_hop_uuid,
+ "if_order": if_order,
+ "interface_id": interface_uuid,
+ "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
+ "sce_rsp_id": sce_rsp_uuid,
+ }
+ db_sce_rsp_hops.append(db_sce_rsp_hop)
+
+ # deal with classifiers
+ db_sce_classifiers = []
+ for classifier in vnffg.get("classifier").itervalues():
+ sce_classifier_uuid = str(uuid4())
+ uuid_list.append(sce_classifier_uuid)
+
+ # source VNF
+ vnf_index = int(classifier['member-vnf-index-ref'])
+ if vnf_index not in vnf_index2vnf_uuid:
+ raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'classifier[{}]':'vnfd-connection-point"
+ "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
+ "'nsd':'constituent-vnfd'".format(
+ str(nsd["id"]), str(classifier["id"]), str(classifier["member-vnf-index-ref"])),
+ HTTP_Bad_Request)
+ existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
+ FROM="interfaces as i join vms on i.vm_id=vms.uuid",
+ WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index],
+ 'external_name': get_str(classifier, "vnfd-connection-point-ref",
+ 255)})
+ if not existing_ifaces:
+ raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+ "-ref':'vnfd-connection-point-ref':'{}'. Reference to a non-existing "
+ "connection-point name at VNFD '{}'".format(
+ str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-connection-point-ref"]),
+ str(iface.get("vnfd-id-ref"))[:255]),
+ HTTP_Bad_Request)
+ interface_uuid = existing_ifaces[0]["uuid"]
+
+ db_sce_classifier = {
+ "uuid": sce_classifier_uuid,
+ "name": get_str(classifier, "name", 255),
+ "sce_vnffg_id": sce_vnffg_uuid,
+ "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
+ "interface_id": interface_uuid,
+ }
+ rsp_id = get_str(classifier, "rsp-id-ref", 255)
+ rsp = next((item for item in db_sce_rsps if item["id"] == rsp_id), None)
+ db_sce_classifier["sce_rsp_id"] = rsp["uuid"]
+ db_sce_classifiers.append(db_sce_classifier)
+
+ db_sce_classifier_matches = []
+ for match in classifier.get("match-attributes").itervalues():
+ sce_classifier_match_uuid = str(uuid4())
+ uuid_list.append(sce_classifier_match_uuid)
+ db_sce_classifier_match = {
+ "uuid": sce_classifier_match_uuid,
+ "ip_proto": get_str(match, "ip-proto", 2),
+ "source_ip": get_str(match, "source-ip-address", 16),
+ "destination_ip": get_str(match, "destination-ip-address", 16),
+ "source_port": get_str(match, "source-port", 5),
+ "destination_port": get_str(match, "destination-port", 5),
+ "sce_classifier_id": sce_classifier_uuid,
+ }
+ db_sce_classifier_matches.append(db_sce_classifier_match)
+ # TODO: vnf/cp keys
+
+ # remove unneeded id's in sce_rsps
+ for rsp in db_sce_rsps:
+ rsp.pop('id')
+
db_tables = [
{"scenarios": db_scenarios},
{"sce_nets": db_sce_nets},
{"ip_profiles": db_ip_profiles},
{"sce_vnfs": db_sce_vnfs},
{"sce_interfaces": db_sce_interfaces},
+ {"sce_vnffgs": db_sce_vnffgs},
+ {"sce_rsps": db_sce_rsps},
+ {"sce_rsp_hops": db_sce_rsp_hops},
+ {"sce_classifiers": db_sce_classifiers},
+ {"sce_classifier_matches": db_sce_classifier_matches},
]
- logger.debug("create_vnf Deployment done vnfDict: %s",
+ logger.debug("new_nsd_v3 done: %s",
yaml.safe_dump(db_tables, indent=4, default_flow_style=False) )
mydb.new_rows(db_tables, uuid_list)
return nsd_uuid_list
myvim_threads_id[default_datacenter_id], _ = get_vim_thread(mydb, tenant_id, default_datacenter_id)
tenant = mydb.get_rows_by_id('nfvo_tenants', tenant_id)
# myvim_tenant = myvim['tenant_id']
-
rollbackList=[]
# print "Checking that the scenario exists and getting the scenario dictionary"
db_instance_vnfs = []
db_instance_vms = []
db_instance_interfaces = []
+ db_instance_sfis = []
+ db_instance_sfs = []
+ db_instance_classifications = []
+ db_instance_sfps = []
db_ip_profiles = []
db_vim_actions = []
uuid_list = []
task_index += 1
db_vim_actions.append(db_vim_action)
+ task_depends_on = []
+ for vnffg in scenarioDict['vnffgs']:
+ for rsp in vnffg['rsps']:
+ sfs_created = []
+ for cp in rsp['connection_points']:
+ count = mydb.get_rows(
+ SELECT=('vms.count'),
+ FROM="vms join interfaces on vms.uuid=interfaces.vm_id join sce_rsp_hops as h on interfaces.uuid=h.interface_id",
+ WHERE={'h.uuid': cp['uuid']})[0]['count']
+ instance_vnf = next((item for item in db_instance_vnfs if item['sce_vnf_id'] == cp['sce_vnf_id']), None)
+ instance_vms = [item for item in db_instance_vms if item['instance_vnf_id'] == instance_vnf['uuid']]
+ dependencies = []
+ for instance_vm in instance_vms:
+ action = next((item for item in db_vim_actions if item['item_id'] == instance_vm['uuid']), None)
+ if action:
+ dependencies.append(action['task_index'])
+ # TODO: throw exception if count != len(instance_vms)
+ # TODO: and action shouldn't ever be None
+ sfis_created = []
+ for i in range(count):
+ # create sfis
+ sfi_uuid = str(uuid4())
+ uuid_list.append(sfi_uuid)
+ db_sfi = {
+ "uuid": sfi_uuid,
+ "instance_scenario_id": instance_uuid,
+ 'sce_rsp_hop_id': cp['uuid'],
+ 'datacenter_id': datacenter_id,
+ 'datacenter_tenant_id': myvim_thread_id,
+ "vim_sfi_id": None, # vim thread will populate
+ }
+ db_instance_sfis.append(db_sfi)
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": myvim_thread_id,
+ "action": "CREATE",
+ "status": "SCHEDULED",
+ "item": "instance_sfis",
+ "item_id": sfi_uuid,
+ "extra": yaml.safe_dump({"params": "", "depends_on": [dependencies[i]]},
+ default_flow_style=True, width=256)
+ }
+ sfis_created.append(task_index)
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+ # create sfs
+ sf_uuid = str(uuid4())
+ uuid_list.append(sf_uuid)
+ db_sf = {
+ "uuid": sf_uuid,
+ "instance_scenario_id": instance_uuid,
+ 'sce_rsp_hop_id': cp['uuid'],
+ 'datacenter_id': datacenter_id,
+ 'datacenter_tenant_id': myvim_thread_id,
+ "vim_sf_id": None, # vim thread will populate
+ }
+ db_instance_sfs.append(db_sf)
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": myvim_thread_id,
+ "action": "CREATE",
+ "status": "SCHEDULED",
+ "item": "instance_sfs",
+ "item_id": sf_uuid,
+ "extra": yaml.safe_dump({"params": "", "depends_on": sfis_created},
+ default_flow_style=True, width=256)
+ }
+ sfs_created.append(task_index)
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+ classifier = rsp['classifier']
+
+ # TODO the following ~13 lines can be reused for the sfi case
+ count = mydb.get_rows(
+ SELECT=('vms.count'),
+ FROM="vms join interfaces on vms.uuid=interfaces.vm_id join sce_classifiers as c on interfaces.uuid=c.interface_id",
+ WHERE={'c.uuid': classifier['uuid']})[0]['count']
+ instance_vnf = next((item for item in db_instance_vnfs if item['sce_vnf_id'] == classifier['sce_vnf_id']), None)
+ instance_vms = [item for item in db_instance_vms if item['instance_vnf_id'] == instance_vnf['uuid']]
+ dependencies = []
+ for instance_vm in instance_vms:
+ action = next((item for item in db_vim_actions if item['item_id'] == instance_vm['uuid']), None)
+ if action:
+ dependencies.append(action['task_index'])
+ # TODO: throw exception if count != len(instance_vms)
+ # TODO: and action shouldn't ever be None
+ classifications_created = []
+ for i in range(count):
+ for match in classifier['matches']:
+ # create classifications
+ classification_uuid = str(uuid4())
+ uuid_list.append(classification_uuid)
+ db_classification = {
+ "uuid": classification_uuid,
+ "instance_scenario_id": instance_uuid,
+ 'sce_classifier_match_id': match['uuid'],
+ 'datacenter_id': datacenter_id,
+ 'datacenter_tenant_id': myvim_thread_id,
+ "vim_classification_id": None, # vim thread will populate
+ }
+ db_instance_classifications.append(db_classification)
+ classification_params = {
+ "ip_proto": match["ip_proto"],
+ "source_ip": match["source_ip"],
+ "destination_ip": match["destination_ip"],
+ "source_port": match["source_port"],
+ "destination_port": match["destination_port"]
+ }
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": myvim_thread_id,
+ "action": "CREATE",
+ "status": "SCHEDULED",
+ "item": "instance_classifications",
+ "item_id": classification_uuid,
+ "extra": yaml.safe_dump({"params": classification_params, "depends_on": [dependencies[i]]},
+ default_flow_style=True, width=256)
+ }
+ classifications_created.append(task_index)
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+
+ # create sfps
+ sfp_uuid = str(uuid4())
+ uuid_list.append(sfp_uuid)
+ db_sfp = {
+ "uuid": sfp_uuid,
+ "instance_scenario_id": instance_uuid,
+ 'sce_rsp_id': rsp['uuid'],
+ 'datacenter_id': datacenter_id,
+ 'datacenter_tenant_id': myvim_thread_id,
+ "vim_sfp_id": None, # vim thread will populate
+ }
+ db_instance_sfps.append(db_sfp)
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": myvim_thread_id,
+ "action": "CREATE",
+ "status": "SCHEDULED",
+ "item": "instance_sfps",
+ "item_id": sfp_uuid,
+ "extra": yaml.safe_dump({"params": "", "depends_on": sfs_created + classifications_created},
+ default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+
scenarioDict["datacenter2tenant"] = myvim_threads_id
db_instance_action["number_tasks"] = task_index
{"instance_vms": db_instance_vms},
{"instance_interfaces": db_instance_interfaces},
{"instance_actions": db_instance_action},
+ {"instance_sfis": db_instance_sfis},
+ {"instance_sfs": db_instance_sfs},
+ {"instance_classifications": db_instance_classifications},
+ {"instance_sfps": db_instance_sfps},
{"vim_actions": db_vim_actions}
]
# print yaml.safe_dump(instanceDict, indent=4, default_flow_style=False)
tenant_id = instanceDict["tenant_id"]
# print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
-
# 1. Delete from Database
message = mydb.delete_instance_scenario(instance_id, tenant_id)
task_index += 1
db_vim_actions.append(db_vim_action)
+ # 2.3 deleting VNFFGs
+
+ for sfp in instanceDict['sfps']:
+ vimthread_affected[sfp["datacenter_tenant_id"]] = None
+ datacenter_key = (sfp["datacenter_id"], sfp["datacenter_tenant_id"])
+ if datacenter_key not in myvims:
+ try:
+ _,myvim_thread = get_vim_thread(mydb, tenant_id, sfp["datacenter_id"], sfp["datacenter_tenant_id"])
+ except NfvoException as e:
+ logger.error(str(e))
+ myvim_thread = None
+ myvim_threads[datacenter_key] = myvim_thread
+ vims = get_vim(mydb, tenant_id, datacenter_id=sfp["datacenter_id"],
+ datacenter_tenant_id=sfp["datacenter_tenant_id"])
+ if len(vims) == 0:
+ logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sfp["datacenter_id"], sfp["datacenter_tenant_id"]))
+ myvims[datacenter_key] = None
+ else:
+ myvims[datacenter_key] = vims.values()[0]
+ myvim = myvims[datacenter_key]
+ myvim_thread = myvim_threads[datacenter_key]
+
+ if not myvim:
+ error_msg += "\n vim_sfp_id={} cannot be deleted because datacenter={} not found".format(sfp['vim_sfp_id'], sfp["datacenter_id"])
+ continue
+ extra = {"params": (sfp['vim_sfp_id'])}
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": sfp["datacenter_tenant_id"],
+ "action": "DELETE",
+ "status": "SCHEDULED",
+ "item": "instance_sfps",
+ "item_id": sfp["uuid"],
+ "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+
+ for sf in instanceDict['sfs']:
+ vimthread_affected[sf["datacenter_tenant_id"]] = None
+ datacenter_key = (sf["datacenter_id"], sf["datacenter_tenant_id"])
+ if datacenter_key not in myvims:
+ try:
+ _,myvim_thread = get_vim_thread(mydb, tenant_id, sf["datacenter_id"], sf["datacenter_tenant_id"])
+ except NfvoException as e:
+ logger.error(str(e))
+ myvim_thread = None
+ myvim_threads[datacenter_key] = myvim_thread
+ vims = get_vim(mydb, tenant_id, datacenter_id=sf["datacenter_id"],
+ datacenter_tenant_id=sf["datacenter_tenant_id"])
+ if len(vims) == 0:
+ logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sf["datacenter_id"], sf["datacenter_tenant_id"]))
+ myvims[datacenter_key] = None
+ else:
+ myvims[datacenter_key] = vims.values()[0]
+ myvim = myvims[datacenter_key]
+ myvim_thread = myvim_threads[datacenter_key]
+
+ if not myvim:
+ error_msg += "\n vim_sf_id={} cannot be deleted because datacenter={} not found".format(sf['vim_sf_id'], sf["datacenter_id"])
+ continue
+ extra = {"params": (sf['vim_sf_id'])}
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": sf["datacenter_tenant_id"],
+ "action": "DELETE",
+ "status": "SCHEDULED",
+ "item": "instance_sfs",
+ "item_id": sf["uuid"],
+ "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+
+ for sfi in instanceDict['sfis']:
+ vimthread_affected[sfi["datacenter_tenant_id"]] = None
+ datacenter_key = (sfi["datacenter_id"], sfi["datacenter_tenant_id"])
+ if datacenter_key not in myvims:
+ try:
+ _,myvim_thread = get_vim_thread(mydb, tenant_id, sfi["datacenter_id"], sfi["datacenter_tenant_id"])
+ except NfvoException as e:
+ logger.error(str(e))
+ myvim_thread = None
+ myvim_threads[datacenter_key] = myvim_thread
+ vims = get_vim(mydb, tenant_id, datacenter_id=sfi["datacenter_id"],
+ datacenter_tenant_id=sfi["datacenter_tenant_id"])
+ if len(vims) == 0:
+ logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sfi["datacenter_id"], sfi["datacenter_tenant_id"]))
+ myvims[datacenter_key] = None
+ else:
+ myvims[datacenter_key] = vims.values()[0]
+ myvim = myvims[datacenter_key]
+ myvim_thread = myvim_threads[datacenter_key]
+
+ if not myvim:
+ error_msg += "\n vim_sfi_id={} cannot be deleted because datacenter={} not found".format(sfi['vim_sfi_id'], sfi["datacenter_id"])
+ continue
+ extra = {"params": (sfi['vim_sfi_id'])}
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": sfi["datacenter_tenant_id"],
+ "action": "DELETE",
+ "status": "SCHEDULED",
+ "item": "instance_sfis",
+ "item_id": sfi["uuid"],
+ "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+
+ for classification in instanceDict['classifications']:
+ vimthread_affected[classification["datacenter_tenant_id"]] = None
+ datacenter_key = (classification["datacenter_id"], classification["datacenter_tenant_id"])
+ if datacenter_key not in myvims:
+ try:
+ _,myvim_thread = get_vim_thread(mydb, tenant_id, classification["datacenter_id"], classification["datacenter_tenant_id"])
+ except NfvoException as e:
+ logger.error(str(e))
+ myvim_thread = None
+ myvim_threads[datacenter_key] = myvim_thread
+ vims = get_vim(mydb, tenant_id, datacenter_id=classification["datacenter_id"],
+ datacenter_tenant_id=classification["datacenter_tenant_id"])
+ if len(vims) == 0:
+ logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(classification["datacenter_id"], classification["datacenter_tenant_id"]))
+ myvims[datacenter_key] = None
+ else:
+ myvims[datacenter_key] = vims.values()[0]
+ myvim = myvims[datacenter_key]
+ myvim_thread = myvim_threads[datacenter_key]
+
+ if not myvim:
+ error_msg += "\n vim_classification_id={} cannot be deleted because datacenter={} not found".format(classification['vim_classification_id'], classification["datacenter_id"])
+ continue
+ extra = {"params": (classification['vim_classification_id'])}
+ db_vim_action = {
+ "instance_action_id": instance_action_id,
+ "task_index": task_index,
+ "datacenter_vim_id": classification["datacenter_tenant_id"],
+ "action": "DELETE",
+ "status": "SCHEDULED",
+ "item": "instance_classifications",
+ "item_id": classification["uuid"],
+ "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+ }
+ task_index += 1
+ db_vim_actions.append(db_vim_action)
+
db_instance_action["number_tasks"] = task_index
db_tables = [
{"instance_actions": db_instance_action},
tables_with_createdat_field=["datacenters","instance_nets","instance_scenarios","instance_vms","instance_vnfs",
"interfaces","nets","nfvo_tenants","scenarios","sce_interfaces","sce_nets",
"sce_vnfs","tenants_datacenters","datacenter_tenants","vms","vnfs", "datacenter_nets",
- "instance_actions", "vim_actions"]
+ "instance_actions", "vim_actions", "sce_vnffgs", "sce_rsps", "sce_rsp_hops",
+ "sce_classifiers", "sce_classifier_matches", "instance_sfis", "instance_sfs",
+ "instance_classifications", "instance_sfps"]
class nfvo_db(db_base.db_base):
db_base._convert_datetime2str(scenario_dict)
db_base._convert_str2boolean(scenario_dict, ('public','shared','external','port-security','floating-ip') )
+
+ #forwarding graphs
+ cmd = "SELECT uuid,name,description,vendor FROM sce_vnffgs WHERE scenario_id='{}' "\
+ "ORDER BY created_at".format(scenario_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ scenario_dict['vnffgs'] = self.cur.fetchall()
+ for vnffg in scenario_dict['vnffgs']:
+ cmd = "SELECT uuid,name FROM sce_rsps WHERE sce_vnffg_id='{}' "\
+ "ORDER BY created_at".format(vnffg['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ vnffg['rsps'] = self.cur.fetchall()
+ for rsp in vnffg['rsps']:
+ cmd = "SELECT uuid,if_order,interface_id,sce_vnf_id FROM sce_rsp_hops WHERE sce_rsp_id='{}' "\
+ "ORDER BY created_at".format(rsp['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rsp['connection_points'] = self.cur.fetchall();
+ cmd = "SELECT uuid,name,sce_vnf_id,interface_id FROM sce_classifiers WHERE sce_vnffg_id='{}' "\
+ "AND sce_rsp_id='{}' ORDER BY created_at".format(vnffg['uuid'], rsp['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rsp['classifier'] = self.cur.fetchone();
+ cmd = "SELECT uuid,ip_proto,source_ip,destination_ip,source_port,destination_port FROM sce_classifier_matches "\
+ "WHERE sce_classifier_id='{}' ORDER BY created_at".format(rsp['classifier']['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rsp['classifier']['matches'] = self.cur.fetchall()
+
return scenario_dict
except (mdb.Error, AttributeError) as e:
self._format_error(e, tries)
self.logger.debug(cmd)
self.cur.execute(cmd)
instance_dict['nets'] = self.cur.fetchall()
-
+
+ #instance_sfps
+ cmd = "SELECT uuid,vim_sfp_id,sce_rsp_id,datacenter_id,"\
+ "datacenter_tenant_id,status,error_msg,vim_info"\
+ " FROM instance_sfps" \
+ " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['sfps'] = self.cur.fetchall()
+
+ for sfp in instance_dict['sfps']:
+ #instance_sfs
+ cmd = "SELECT uuid,vim_sf_id,sce_rsp_hop_id,datacenter_id,"\
+ "datacenter_tenant_id,status,error_msg,vim_info"\
+ " FROM instance_sfs" \
+ " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid']) # TODO: replace instance_scenario_id with instance_sfp_id
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['sfs'] = self.cur.fetchall()
+
+ for sf in instance_dict['sfs']:
+ #instance_sfis
+ cmd = "SELECT uuid,vim_sfi_id,sce_rsp_hop_id,datacenter_id,"\
+ "datacenter_tenant_id,status,error_msg,vim_info"\
+ " FROM instance_sfis" \
+ " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid']) # TODO: replace instance_scenario_id with instance_sf_id
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['sfis'] = self.cur.fetchall()
+# for sfi in instance_dict['sfi']:
+
+ #instance_classifications
+ cmd = "SELECT uuid,vim_classification_id,sce_classifier_match_id,datacenter_id,"\
+ "datacenter_tenant_id,status,error_msg,vim_info"\
+ " FROM instance_classifications" \
+ " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['classifications'] = self.cur.fetchall()
+# for classification in instance_dict['classifications']
+
db_base._convert_datetime2str(instance_dict)
db_base._convert_str2boolean(instance_dict, ('public','shared','created') )
return instance_dict
result, database_update = self.get_net(task)
else:
raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+ elif task["item"] == 'instance_sfis':
+ if task["action"] == "CREATE":
+ result, database_update = self.new_sfi(task)
+ nb_created += 1
+ elif task["action"] == "DELETE":
+ result, database_update = self.del_sfi(task)
+ else:
+ raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+ elif task["item"] == 'instance_sfs':
+ if task["action"] == "CREATE":
+ result, database_update = self.new_sf(task)
+ nb_created += 1
+ elif task["action"] == "DELETE":
+ result, database_update = self.del_sf(task)
+ else:
+ raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+ elif task["item"] == 'instance_classifications':
+ if task["action"] == "CREATE":
+ result, database_update = self.new_classification(task)
+ nb_created += 1
+ elif task["action"] == "DELETE":
+ result, database_update = self.del_classification(task)
+ else:
+ raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+ elif task["item"] == 'instance_sfps':
+ if task["action"] == "CREATE":
+ result, database_update = self.new_sfp(task)
+ nb_created += 1
+ elif task["action"] == "DELETE":
+ result, database_update = self.del_sfp(task)
+ else:
+ raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
else:
raise vimconn.vimconnException(self.name + "unknown task item {}".format(task["item"]))
# TODO
elif task["item"] == 'instance_nets':
database_update["vim_net_id"] = None
+ no_refresh_tasks = ['instance_sfis', 'instance_sfs',
+ 'instance_classifications', 'instance_sfps']
if task["action"] == "DELETE":
action_key = task["item"] + task["item_id"]
del self.grouped_tasks[action_key]
elif task["action"] in ("CREATE", "FIND") and task["status"] in ("DONE", "BUILD"):
- self._insert_refresh(task)
+ if task["item"] not in no_refresh_tasks:
+ self._insert_refresh(task)
task_id = task["instance_action_id"] + "." + str(task["task_index"])
self.logger.debug("task={} item={} action={} result={}:'{}' params={}".format(
return True, None
task["status"] = "FAILED"
return False, None
+
+ ## Service Function Instances
+
+ def new_sfi(self, task):
+ vim_sfi_id = None
+ try:
+ params = task["params"]
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ depends = task.get("depends")
+ error_text = ""
+ interfaces = task.get("depends").values()[0].get("extra").get("params")[5]
+ # At the moment, every port associated with the VM will be used both as ingress and egress ports.
+ # Bear in mind that different VIM connectors might support SFI differently. In the case of OpenStack, only the
+ # first ingress and first egress ports will be used to create the SFI (Port Pair).
+ port_id_list = [interfaces[0].get("vim_id")]
+ name = "sfi-%s" % task["item_id"][:8]
+ # By default no form of IETF SFC Encapsulation will be used
+ vim_sfi_id = self.vim.new_sfi(name, port_id_list, port_id_list, sfc_encap=False)
+
+ task["extra"]["created"] = True
+ task["error_msg"] = None
+ task["status"] = "DONE"
+ task["vim_id"] = vim_sfi_id
+ instance_element_update = {"status": "ACTIVE", "vim_sfi_id": vim_sfi_id, "error_msg": None}
+ return True, instance_element_update
+
+ except (vimconn.vimconnException, VimThreadException) as e:
+ self.logger.error("Error creating Service Function Instance, task=%s: %s", task_id, str(e))
+ error_text = self._format_vim_error_msg(str(e))
+ task["error_msg"] = error_text
+ task["status"] = "FAILED"
+ task["vim_id"] = None
+ instance_element_update = {"status": "VIM_ERROR", "vim_sfi_id": None, "error_msg": error_text}
+ return False, instance_element_update
+
+ def del_sfi(self, task):
+ sfi_vim_id = task["vim_id"]
+ try:
+ self.vim.delete_sfi(sfi_vim_id)
+ task["status"] = "DONE"
+ task["error_msg"] = None
+ return True, None
+
+ except vimconn.vimconnException as e:
+ task["error_msg"] = self._format_vim_error_msg(str(e))
+ if isinstance(e, vimconn.vimconnNotFoundException):
+ # If not found mark as Done and fill error_msg
+ task["status"] = "DONE"
+ return True, None
+ task["status"] = "FAILED"
+ return False, None
+
+ def new_sf(self, task):
+ vim_sf_id = None
+ try:
+ params = task["params"]
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ depends = task.get("depends")
+ error_text = ""
+ #sfis = task.get("depends").values()[0].get("extra").get("params")[5]
+ sfis = task.get("depends").values()
+ sfi_id_list = []
+ for sfi in sfis:
+ sfi_id_list.append(sfi.get("vim_id"))
+ name = "sf-%s" % task["item_id"][:8]
+ # By default no form of IETF SFC Encapsulation will be used
+ vim_sf_id = self.vim.new_sf(name, sfi_id_list, sfc_encap=False)
+
+ task["extra"]["created"] = True
+ task["error_msg"] = None
+ task["status"] = "DONE"
+ task["vim_id"] = vim_sf_id
+ instance_element_update = {"status": "ACTIVE", "vim_sf_id": vim_sf_id, "error_msg": None}
+ return True, instance_element_update
+
+ except (vimconn.vimconnException, VimThreadException) as e:
+ self.logger.error("Error creating Service Function, task=%s: %s", task_id, str(e))
+ error_text = self._format_vim_error_msg(str(e))
+ task["error_msg"] = error_text
+ task["status"] = "FAILED"
+ task["vim_id"] = None
+ instance_element_update = {"status": "VIM_ERROR", "vim_sf_id": None, "error_msg": error_text}
+ return False, instance_element_update
+
+ def del_sf(self, task):
+ sf_vim_id = task["vim_id"]
+ try:
+ self.vim.delete_sf(sf_vim_id)
+ task["status"] = "DONE"
+ task["error_msg"] = None
+ return True, None
+
+ except vimconn.vimconnException as e:
+ task["error_msg"] = self._format_vim_error_msg(str(e))
+ if isinstance(e, vimconn.vimconnNotFoundException):
+ # If not found mark as Done and fill error_msg
+ task["status"] = "DONE"
+ return True, None
+ task["status"] = "FAILED"
+ return False, None
+
+ def new_classification(self, task):
+ vim_classification_id = None
+ try:
+ params = task["params"]
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ depends = task.get("depends")
+ error_text = ""
+ interfaces = task.get("depends").values()[0].get("extra").get("params")[5]
+ # Bear in mind that different VIM connectors might support Classifications differently.
+ # In the case of OpenStack, only the first VNF attached to the classifier will be used
+ # to create the Classification(s) (the "logical source port" of the "Flow Classifier").
+ # Since the VNFFG classifier match lacks the ethertype, classification defaults to
+ # using the IPv4 flow classifier.
+ name = "c-%s" % task["item_id"][:8]
+ # if not CIDR is given for the IP addresses, add /32:
+ ip_proto = int(params.get("ip_proto"))
+ source_ip = params.get("source_ip")
+ destination_ip = params.get("destination_ip")
+ if ip_proto == 1:
+ ip_proto = 'icmp'
+ elif ip_proto == 6:
+ ip_proto = 'tcp'
+ elif ip_proto == 17:
+ ip_proto = 'udp'
+ if '/' not in source_ip:
+ source_ip += '/32'
+ if '/' not in destination_ip:
+ destination_ip += '/32'
+ definition = {
+ "logical_source_port": interfaces[0].get("vim_id"),
+ "protocol": ip_proto,
+ "source_ip_prefix": source_ip,
+ "destination_ip_prefix": destination_ip,
+ "source_port_range_min": params.get("source_port"),
+ "source_port_range_max": params.get("source_port"),
+ "destination_port_range_min": params.get("destination_port"),
+ "destination_port_range_max": params.get("destination_port"),
+ }
+
+ vim_classification_id = self.vim.new_classification(
+ name, 'legacy_flow_classifier', definition)
+
+ task["extra"]["created"] = True
+ task["error_msg"] = None
+ task["status"] = "DONE"
+ task["vim_id"] = vim_classification_id
+ instance_element_update = {"status": "ACTIVE", "vim_classification_id": vim_classification_id, "error_msg": None}
+ return True, instance_element_update
+
+ except (vimconn.vimconnException, VimThreadException) as e:
+ self.logger.error("Error creating Classification, task=%s: %s", task_id, str(e))
+ error_text = self._format_vim_error_msg(str(e))
+ task["error_msg"] = error_text
+ task["status"] = "FAILED"
+ task["vim_id"] = None
+ instance_element_update = {"status": "VIM_ERROR", "vim_classification_id": None, "error_msg": error_text}
+ return False, instance_element_update
+
+ def del_classification(self, task):
+ classification_vim_id = task["vim_id"]
+ try:
+ self.vim.delete_classification(classification_vim_id)
+ task["status"] = "DONE"
+ task["error_msg"] = None
+ return True, None
+
+ except vimconn.vimconnException as e:
+ task["error_msg"] = self._format_vim_error_msg(str(e))
+ if isinstance(e, vimconn.vimconnNotFoundException):
+ # If not found mark as Done and fill error_msg
+ task["status"] = "DONE"
+ return True, None
+ task["status"] = "FAILED"
+ return False, None
+
+ def new_sfp(self, task):
+ vim_sfp_id = None
+ try:
+ params = task["params"]
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ depends = task.get("depends")
+ error_text = ""
+ deps = task.get("depends").values()
+ sf_id_list = []
+ classification_id_list = []
+ for dep in deps:
+ vim_id = dep.get("vim_id")
+ resource = dep.get("item")
+ if resource == "instance_sfs":
+ sf_id_list.append(vim_id)
+ elif resource == "instance_classifications":
+ classification_id_list.append(vim_id)
+
+ name = "sfp-%s" % task["item_id"][:8]
+ # By default no form of IETF SFC Encapsulation will be used
+ vim_sfp_id = self.vim.new_sfp(name, classification_id_list, sf_id_list, sfc_encap=False)
+
+ task["extra"]["created"] = True
+ task["error_msg"] = None
+ task["status"] = "DONE"
+ task["vim_id"] = vim_sfp_id
+ instance_element_update = {"status": "ACTIVE", "vim_sfp_id": vim_sfp_id, "error_msg": None}
+ return True, instance_element_update
+
+ except (vimconn.vimconnException, VimThreadException) as e:
+ self.logger.error("Error creating Service Function, task=%s: %s", task_id, str(e))
+ error_text = self._format_vim_error_msg(str(e))
+ task["error_msg"] = error_text
+ task["status"] = "FAILED"
+ task["vim_id"] = None
+ instance_element_update = {"status": "VIM_ERROR", "vim_sfp_id": None, "error_msg": error_text}
+ return False, instance_element_update
+ return
+
+ def del_sfp(self, task):
+ sfp_vim_id = task["vim_id"]
+ try:
+ self.vim.delete_sfp(sfp_vim_id)
+ task["status"] = "DONE"
+ task["error_msg"] = None
+ return True, None
+
+ except vimconn.vimconnException as e:
+ task["error_msg"] = self._format_vim_error_msg(str(e))
+ if isinstance(e, vimconn.vimconnNotFoundException):
+ # If not found mark as Done and fill error_msg
+ task["status"] = "DONE"
+ return True, None
+ task["status"] = "FAILED"
+ return False, None
classification_dict = definition
classification_dict['name'] = name
- new_class = self.neutron.create_flow_classifier(
+ new_class = self.neutron.create_sfc_flow_classifier(
{'flow_classifier': classification_dict})
return new_class['flow_classifier']['id']
except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
self._reload_connection()
if self.api_version3 and "tenant_id" in filter_dict:
filter_dict['project_id'] = filter_dict.pop('tenant_id')
- classification_dict = self.neutron.list_flow_classifier(
+ classification_dict = self.neutron.list_sfc_flow_classifiers(
**filter_dict)
- classification_list = classification_dict["flow_classifiers"]
+ classification_list = cliassification_dict["flow_classifiers"]
self.__classification_os2mano(classification_list)
return classification_list
except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
self.logger.debug("Deleting Classification '%s' from VIM", class_id)
try:
self._reload_connection()
- self.neutron.delete_flow_classifier(class_id)
+ self.neutron.delete_sfc_flow_classifier(class_id)
return class_id
except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
ksExceptions.ClientException, neExceptions.NeutronException,
self._reload_connection()
correlation = None
if sfc_encap:
- # TODO(igordc): must be changed to NSH in Queens
- # (MPLS is a workaround)
- correlation = 'mpls'
+ correlation = 'nsh'
if len(ingress_ports) != 1:
raise vimconn.vimconnNotSupportedException(
"OpenStack VIM connector can only have "
'egress': egress_ports[0],
'service_function_parameters': {
'correlation': correlation}}
- new_sfi = self.neutron.create_port_pair({'port_pair': sfi_dict})
+ new_sfi = self.neutron.create_sfc_port_pair({'port_pair': sfi_dict})
return new_sfi['port_pair']['id']
except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
neExceptions.NeutronException, ConnectionError) as e:
if new_sfi:
try:
- self.neutron.delete_port_pair_group(
+ self.neutron.delete_sfc_port_pair(
new_sfi['port_pair']['id'])
except Exception:
self.logger.error(
self._reload_connection()
if self.api_version3 and "tenant_id" in filter_dict:
filter_dict['project_id'] = filter_dict.pop('tenant_id')
- sfi_dict = self.neutron.list_port_pair(**filter_dict)
+ sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict)
sfi_list = sfi_dict["port_pairs"]
self.__sfi_os2mano(sfi_list)
return sfi_list
"from VIM", sfi_id)
try:
self._reload_connection()
- self.neutron.delete_port_pair(sfi_id)
+ self.neutron.delete_sfc_port_pair(sfi_id)
return sfi_id
except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
ksExceptions.ClientException, neExceptions.NeutronException,
self._reload_connection()
correlation = None
if sfc_encap:
- # TODO(igordc): must be changed to NSH in Queens
- # (MPLS is a workaround)
- correlation = 'mpls'
+ correlation = 'nsh'
for instance in sfis:
sfi = self.get_sfi(instance)
- if sfi.get('sfc_encap') != correlation:
+ if sfi.get('sfc_encap') != sfc_encap:
raise vimconn.vimconnNotSupportedException(
"OpenStack VIM connector requires all SFIs of the "
"same SF to share the same SFC Encapsulation")
sf_dict = {'name': name,
'port_pairs': sfis}
- new_sf = self.neutron.create_port_pair_group({
+ new_sf = self.neutron.create_sfc_port_pair_group({
'port_pair_group': sf_dict})
return new_sf['port_pair_group']['id']
except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
neExceptions.NeutronException, ConnectionError) as e:
if new_sf:
try:
- self.neutron.delete_port_pair_group(
+ self.neutron.delete_sfc_port_pair_group(
new_sf['port_pair_group']['id'])
except Exception:
self.logger.error(
self._reload_connection()
if self.api_version3 and "tenant_id" in filter_dict:
filter_dict['project_id'] = filter_dict.pop('tenant_id')
- sf_dict = self.neutron.list_port_pair_group(**filter_dict)
+ sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict)
sf_list = sf_dict["port_pair_groups"]
self.__sf_os2mano(sf_list)
return sf_list
self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
try:
self._reload_connection()
- self.neutron.delete_port_pair_group(sf_id)
+ self.neutron.delete_sfc_port_pair_group(sf_id)
return sf_id
except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
ksExceptions.ClientException, neExceptions.NeutronException,
try:
new_sfp = None
self._reload_connection()
- if not sfc_encap:
- raise vimconn.vimconnNotSupportedException(
- "OpenStack VIM connector only supports "
- "SFC-Encapsulated chains")
- # TODO(igordc): must be changed to NSH in Queens
- # (MPLS is a workaround)
- correlation = 'mpls'
+ # In networking-sfc the MPLS encapsulation is legacy
+ # should be used when no full SFC Encapsulation is intended
+ sfc_encap = 'mpls'
+ if sfc_encap:
+ correlation = 'nsh'
sfp_dict = {'name': name,
'flow_classifiers': classifications,
'port_pair_groups': sfs,
'chain_parameters': {'correlation': correlation}}
if spi:
sfp_dict['chain_id'] = spi
- new_sfp = self.neutron.create_port_chain({'port_chain': sfp_dict})
+ new_sfp = self.neutron.create_sfc_port_chain({'port_chain': sfp_dict})
return new_sfp["port_chain"]["id"]
except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
neExceptions.NeutronException, ConnectionError) as e:
if new_sfp:
try:
- self.neutron.delete_port_chain(new_sfp['port_chain']['id'])
+ self.neutron.delete_sfc_port_chain(new_sfp['port_chain']['id'])
except Exception:
self.logger.error(
'Creation of Service Function Path failed, with '
self._reload_connection()
if self.api_version3 and "tenant_id" in filter_dict:
filter_dict['project_id'] = filter_dict.pop('tenant_id')
- sfp_dict = self.neutron.list_port_chain(**filter_dict)
+ sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict)
sfp_list = sfp_dict["port_chains"]
self.__sfp_os2mano(sfp_list)
return sfp_list
"Deleting Service Function Path '%s' from VIM", sfp_id)
try:
self._reload_connection()
- self.neutron.delete_port_chain(sfp_id)
+ self.neutron.delete_sfc_port_chain(sfp_id)
return sfp_id
except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
ksExceptions.ClientException, neExceptions.NeutronException,
--- /dev/null
+nsd:nsd-catalog:
+ nsd:
+ - id: 3vdu_2vnf_1vnffg_nsd
+ name: 3vdu_2vnf_1vnffg_ns-name
+ short-name: 3vdu_2vnf_1vnffg-sname
+ description: 3 vnfs, each one with 2 cirros vdu, with 1 vnffg connecting the vnfs
+ vendor: OSM
+ version: '1.0'
+
+ logo: osm_2x.png
+
+ constituent-vnfd:
+ # The member-vnf-index needs to be unique, starting from 1
+ # vnfd-id-ref is the id of the VNFD
+ # Multiple constituent VNFDs can be specified
+ - member-vnf-index: 1
+ vnfd-id-ref: 2vdu_vnfd
+ - member-vnf-index: 2
+ vnfd-id-ref: 2vdu_vnfd
+ - member-vnf-index: 3
+ vnfd-id-ref: 2vdu_vnfd
+
+ ip-profiles:
+ - description: Inter VNF Link
+ ip-profile-params:
+ gateway-address: 31.31.31.210
+ ip-version: ipv4
+ subnet-address: 31.31.31.0/24
+ dns-server:
+ - address: 8.8.8.8
+ - address: 8.8.8.9
+ dhcp-params:
+ count: 200
+ start-address: 31.31.31.2
+ name: ipprofileA
+
+
+ vld:
+ # Networks for the VNFs
+ - id: vld1
+ name: vld1-name
+ short-name: vld1-sname
+ type: ELAN
+ # vim-network-name: <update>
+ # provider-network:
+ # overlay-type: VLAN
+ # segmentation_id: <update>
+ ip-profile-ref: ipprofileA
+ vnfd-connection-point-ref:
+ # Specify the constituent VNFs
+ # member-vnf-index-ref - entry from constituent vnf
+ # vnfd-id-ref - VNFD id
+ # vnfd-connection-point-ref - connection point name in the VNFD
+ - member-vnf-index-ref: 1
+ vnfd-id-ref: 2vdu_vnfd
+ vnfd-connection-point-ref: eth0
+ - member-vnf-index-ref: 2
+ vnfd-id-ref: 2vdu_vnfd
+ vnfd-connection-point-ref: eth0
+ - member-vnf-index-ref: 3
+ vnfd-id-ref: 2vdu_vnfd
+ vnfd-connection-point-ref: eth0
+
+
+ vnffgd:
+ # VNF Forwarding Graph Descriptors
+ - id: vnffg1
+ name: vnffg1-name
+ short-name: vnffg1-sname
+ description: vnffg1-description
+ vendor: vnffg1-vendor
+ version: '1.0'
+ rsp:
+ - id: rsp1
+ name: rsp1-name
+ vnfd-connection-point-ref:
+ - member-vnf-index-ref: 2
+ order: 0
+ vnfd-id-ref: 2vdu_vnfd
+ vnfd-connection-point-ref: eth0
+ - member-vnf-index-ref: 3
+ order: 1
+ vnfd-id-ref: 2vdu_vnfd
+ vnfd-connection-point-ref: eth0
+ classifier:
+ - id: class1
+ name: class1-name
+ rsp-id-ref: rsp1
+ member-vnf-index-ref: 1
+ vnfd-id-ref: 2vdu_vnfd
+ vnfd-connection-point-ref: eth0
+ match-attributes:
+ - id: match1
+ ip-proto: 6 # TCP
+ source-ip-address: 10.0.0.1
+ destination-ip-address: 10.0.0.2
+ source-port: 0
+ destination-port: 80
+ - id: match2
+ ip-proto: 6 # TCP
+ source-ip-address: 10.0.0.1
+ destination-ip-address: 10.0.0.3
+ source-port: 0
+ destination-port: 80
--- /dev/null
+vnfd:vnfd-catalog:
+ vnfd:
+ - id: 2vdu_vnfd
+ name: 2vdu_vnfd-name
+ short-name: 2vdu-sname
+ description: Simple VNF example with a cirros and 2 vdu count
+ vendor: OSM
+ version: '1.0'
+
+ # Place the logo as png in icons directory and provide the name here
+ logo: cirros-64.png
+
+ # Management interface
+ mgmt-interface:
+ vdu-id: 2vduVM
+
+ # Atleast one VDU need to be specified
+ vdu:
+ - id: 2vduVM
+ name: 2vduVM-name
+ description: 2vduVM-description
+ count: 2
+
+ # Flavour of the VM to be instantiated for the VDU
+ # flavor below can fit into m1.micro
+ vm-flavor:
+ vcpu-count: 1
+ memory-mb: 96
+ storage-gb: 0
+
+ # Image/checksum or image including the full path
+ image: 'cirros-0.3.5-x86_64-disk'
+ #checksum:
+
+ interface:
+ # Specify the external interfaces
+ # There can be multiple interfaces defined
+ - name: eth0
+ type: EXTERNAL
+ position: 0
+ virtual-interface:
+ type: OM-MGMT
+ bandwidth: '0'
+ # vnfd-connection-point-ref: eth0
+ external-connection-point-ref: eth0
+
+ # Replace the ssh-rsa public key to use your own public key
+ cloud-init: |
+ #cloud-config
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDajuABKm3lzcA0hk1IQGAfSWxxE0viRedl1EnZ2s0qQL54zTGVqYzC73CndSu0az57ysAKDapKOnTWl6zfj+bU4j3c4jORDWrIelBVaeQaoWGfKtBmS7jE54I94cRgkAIk+4zM1ViRyPQ+0FoOOq7I/6rQZITZ4VqfyhygW7j2ke2vl3oJ/TKocOpdk4WlMmPC6dFYppmwlpTpPYKJVdh58aeq9G/wTRP1qvCAgZAm/1GYoj7JgQjw11j6ZZE0ci03F9aOqqMlICDJF87Zk3fUhnt+g6EYNMiEafd7kuNwXBAJ5D1n4vZnj/EpdQY+dlXhhGS2Bncr1db1YBJCoRWN Generated-by-Nova
+ users:
+ - name: osm
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDajuABKm3lzcA0hk1IQGAfSWxxE0viRedl1EnZ2s0qQL54zTGVqYzC73CndSu0az57ysAKDapKOnTWl6zfj+bU4j3c4jORDWrIelBVaeQaoWGfKtBmS7jE54I94cRgkAIk+4zM1ViRyPQ+0FoOOq7I/6rQZITZ4VqfyhygW7j2ke2vl3oJ/TKocOpdk4WlMmPC6dFYppmwlpTpPYKJVdh58aeq9G/wTRP1qvCAgZAm/1GYoj7JgQjw11j6ZZE0ci03F9aOqqMlICDJF87Zk3fUhnt+g6EYNMiEafd7kuNwXBAJ5D1n4vZnj/EpdQY+dlXhhGS2Bncr1db1YBJCoRWN Generated-by-Nova
+
+ connection-point:
+ - name: eth0
+ type: VPORT