RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get -y install git make python python-pip debhelper python3 python3-all python3-pip python3-setuptools && \
- DEBIAN_FRONTEND=noninteractive apt-get -y install wget tox && \
+ DEBIAN_FRONTEND=noninteractive apt-get -y install wget tox apt-utils && \
DEBIAN_FRONTEND=noninteractive pip install pip==9.0.3 && \
DEBIAN_FRONTEND=noninteractive pip3 install pip==9.0.3 && \
DEBIAN_FRONTEND=noninteractive pip install -U setuptools setuptools-version-command stdeb && \
DEBIAN_FRONTEND=noninteractive apt-get -y install software-properties-common && \
DEBIAN_FRONTEND=noninteractive add-apt-repository -y cloud-archive:queens && \
DEBIAN_FRONTEND=noninteractive apt-get update && \
- DEBIAN_FRONTEND=noninteractive apt-get -y install python-novaclient python-keystoneclient python-glanceclient python-cinderclient python-neutronclient && \
+ DEBIAN_FRONTEND=noninteractive apt-get -y install python-novaclient python-keystoneclient python-glanceclient python-cinderclient python-neutronclient python-networking-l2gw && \
DEBIAN_FRONTEND=noninteractive pip install -U progressbar pyvmomi pyvcloud==19.1.1 && \
DEBIAN_FRONTEND=noninteractive apt-get -y install python-argcomplete python-bottle python-cffi python-packaging python-paramiko python-pkgconfig libmysqlclient-dev libssl-dev libffi-dev python-mysqldb && \
DEBIAN_FRONTEND=noninteractive apt-get -y install python-logutils python-openstackclient python-openstacksdk && \
DBPORT_="-P$DBPORT"
echo " loading ${DIRNAME}/mano_db_structure.sql"
-sed -e "s/{{mano_db}}/$DBNAME/" ${DIRNAME}/mano_db_structure.sql | mysql $DEF_EXTRA_FILE_PARAM
+sed -e "s/{{mano_db}}/$DBNAME/" ${DIRNAME}/mano_db_structure.sql | mysql $DEF_EXTRA_FILE_PARAM ||
+ ! echo "ERROR at init $DBNAME" || exit 1
echo " migrage database version"
# echo "${DIRNAME}/migrate_mano_db.sh $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ -d$DBNAME $QUIET_MODE $DB_VERSION"
* contact with: nfvlabs@tid.es
**/
--- MySQL dump 10.13 Distrib 5.7.20, for Linux (x86_64)
+-- MySQL dump 10.13 Distrib 5.7.24, for Linux (x86_64)
--
-- Host: localhost Database: {{mano_db}}
-- ------------------------------------------------------
--- Server version 5.7.20-0ubuntu0.16.04.1
+-- Server version 5.7.24
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40000 DROP DATABASE IF EXISTS `{{mano_db}}`*/;
-CREATE DATABASE /*!32312 IF NOT EXISTS*/ `{{mano_db}}` /*!40100 DEFAULT CHARACTER SET latin1 */;
+CREATE DATABASE /*!32312 IF NOT EXISTS*/ `{{mano_db}}` /*!40100 DEFAULT CHARACTER SET utf8 */;
USE `{{mano_db}}`;
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Contains client actions over instances';
/*!40101 SET character_set_client = @saved_cs_client */;
+--
+-- Table structure for table `instance_classifications`
+--
+
+DROP TABLE IF EXISTS `instance_classifications`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_classifications` (
+ `uuid` varchar(36) NOT NULL,
+ `instance_scenario_id` varchar(36) NOT NULL,
+ `vim_classification_id` varchar(36) DEFAULT NULL,
+ `sce_classifier_match_id` varchar(36) DEFAULT NULL,
+ `datacenter_id` varchar(36) DEFAULT NULL,
+ `datacenter_tenant_id` varchar(36) DEFAULT NULL,
+ `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ `error_msg` varchar(1024) DEFAULT NULL,
+ `vim_info` text,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_instance_classifications_instance_scenarios` (`instance_scenario_id`),
+ KEY `FK_instance_classifications_sce_classifier_matches` (`sce_classifier_match_id`),
+ KEY `FK_instance_classifications_datacenters` (`datacenter_id`),
+ KEY `FK_instance_classifications_datacenter_tenants` (`datacenter_tenant_id`),
+ CONSTRAINT `FK_instance_classifications_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
+ CONSTRAINT `FK_instance_classifications_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
+ CONSTRAINT `FK_instance_classifications_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_instance_classifications_sce_classifier_matches` FOREIGN KEY (`sce_classifier_match_id`) REFERENCES `sce_classifier_matches` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
--
-- Table structure for table `instance_interfaces`
--
CREATE TABLE `instance_nets` (
`uuid` varchar(36) NOT NULL,
`vim_net_id` varchar(128) DEFAULT NULL,
+ `vim_name` varchar(255) DEFAULT NULL,
`instance_scenario_id` varchar(36) DEFAULT NULL,
`sce_net_id` varchar(36) DEFAULT NULL,
`net_id` varchar(36) DEFAULT NULL,
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Instances of scenarios defined by the user';
/*!40101 SET character_set_client = @saved_cs_client */;
+--
+-- Table structure for table `instance_sfis`
+--
+
+DROP TABLE IF EXISTS `instance_sfis`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_sfis` (
+ `uuid` varchar(36) NOT NULL,
+ `instance_scenario_id` varchar(36) NOT NULL,
+ `vim_sfi_id` varchar(36) DEFAULT NULL,
+ `sce_rsp_hop_id` varchar(36) DEFAULT NULL,
+ `datacenter_id` varchar(36) DEFAULT NULL,
+ `datacenter_tenant_id` varchar(36) DEFAULT NULL,
+ `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ `error_msg` varchar(1024) DEFAULT NULL,
+ `vim_info` text,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_instance_sfis_instance_scenarios` (`instance_scenario_id`),
+ KEY `FK_instance_sfis_sce_rsp_hops` (`sce_rsp_hop_id`),
+ KEY `FK_instance_sfis_datacenters` (`datacenter_id`),
+ KEY `FK_instance_sfis_datacenter_tenants` (`datacenter_tenant_id`),
+ CONSTRAINT `FK_instance_sfis_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
+ CONSTRAINT `FK_instance_sfis_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
+ CONSTRAINT `FK_instance_sfis_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_instance_sfis_sce_rsp_hops` FOREIGN KEY (`sce_rsp_hop_id`) REFERENCES `sce_rsp_hops` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_sfps`
+--
+
+DROP TABLE IF EXISTS `instance_sfps`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_sfps` (
+ `uuid` varchar(36) NOT NULL,
+ `instance_scenario_id` varchar(36) NOT NULL,
+ `vim_sfp_id` varchar(36) DEFAULT NULL,
+ `sce_rsp_id` varchar(36) DEFAULT NULL,
+ `datacenter_id` varchar(36) DEFAULT NULL,
+ `datacenter_tenant_id` varchar(36) DEFAULT NULL,
+ `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ `error_msg` varchar(1024) DEFAULT NULL,
+ `vim_info` text,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_instance_sfps_instance_scenarios` (`instance_scenario_id`),
+ KEY `FK_instance_sfps_sce_rsps` (`sce_rsp_id`),
+ KEY `FK_instance_sfps_datacenters` (`datacenter_id`),
+ KEY `FK_instance_sfps_datacenter_tenants` (`datacenter_tenant_id`),
+ CONSTRAINT `FK_instance_sfps_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
+ CONSTRAINT `FK_instance_sfps_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
+ CONSTRAINT `FK_instance_sfps_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_instance_sfps_sce_rsps` FOREIGN KEY (`sce_rsp_id`) REFERENCES `sce_rsps` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_sfs`
+--
+
+DROP TABLE IF EXISTS `instance_sfs`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_sfs` (
+ `uuid` varchar(36) NOT NULL,
+ `instance_scenario_id` varchar(36) NOT NULL,
+ `vim_sf_id` varchar(36) DEFAULT NULL,
+ `sce_rsp_hop_id` varchar(36) DEFAULT NULL,
+ `datacenter_id` varchar(36) DEFAULT NULL,
+ `datacenter_tenant_id` varchar(36) DEFAULT NULL,
+ `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ `error_msg` varchar(1024) DEFAULT NULL,
+ `vim_info` text,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_instance_sfs_instance_scenarios` (`instance_scenario_id`),
+ KEY `FK_instance_sfs_sce_rsp_hops` (`sce_rsp_hop_id`),
+ KEY `FK_instance_sfs_datacenters` (`datacenter_id`),
+ KEY `FK_instance_sfs_datacenter_tenants` (`datacenter_tenant_id`),
+ CONSTRAINT `FK_instance_sfs_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
+ CONSTRAINT `FK_instance_sfs_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
+ CONSTRAINT `FK_instance_sfs_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_instance_sfs_sce_rsp_hops` FOREIGN KEY (`sce_rsp_hop_id`) REFERENCES `sce_rsp_hops` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
--
-- Table structure for table `instance_vms`
--
`instance_vnf_id` varchar(36) NOT NULL,
`vm_id` varchar(36) DEFAULT NULL,
`vim_vm_id` varchar(128) DEFAULT NULL,
+ `vim_name` varchar(255) DEFAULT NULL,
`status` enum('ACTIVE:NoMgmtIP','ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
`error_msg` varchar(1024) DEFAULT NULL,
`vim_info` text,
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Instances of VNFs as part of a scenario';
/*!40101 SET character_set_client = @saved_cs_client */;
+--
+-- Table structure for table `instance_wim_nets`
+--
+
+DROP TABLE IF EXISTS `instance_wim_nets`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_wim_nets` (
+ `uuid` varchar(36) NOT NULL,
+ `wim_internal_id` varchar(128) DEFAULT NULL COMMENT 'Internal ID used by the WIM to refer to the network',
+ `instance_scenario_id` varchar(36) DEFAULT NULL,
+ `sce_net_id` varchar(36) DEFAULT NULL,
+ `wim_id` varchar(36) DEFAULT NULL,
+ `wim_account_id` varchar(36) NOT NULL,
+ `status` enum('ACTIVE','INACTIVE','DOWN','BUILD','ERROR','WIM_ERROR','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+ `error_msg` varchar(1024) DEFAULT NULL,
+ `wim_info` text,
+ `multipoint` enum('true','false') NOT NULL DEFAULT 'false',
+ `created` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'Created or already exists at WIM',
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_instance_wim_nets_instance_scenarios` (`instance_scenario_id`),
+ KEY `FK_instance_wim_nets_sce_nets` (`sce_net_id`),
+ KEY `FK_instance_wim_nets_wims` (`wim_id`),
+ KEY `FK_instance_wim_nets_wim_accounts` (`wim_account_id`),
+ CONSTRAINT `FK_instance_wim_nets_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_instance_wim_nets_sce_nets` FOREIGN KEY (`sce_net_id`) REFERENCES `sce_nets` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE,
+ CONSTRAINT `FK_instance_wim_nets_wim_accounts` FOREIGN KEY (`wim_account_id`) REFERENCES `wim_accounts` (`uuid`),
+ CONSTRAINT `FK_instance_wim_nets_wims` FOREIGN KEY (`wim_id`) REFERENCES `wims` (`uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Instances of wim networks';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
--
-- Table structure for table `interfaces`
--
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `nets` (
`uuid` varchar(36) NOT NULL,
+ `osm_id` varchar(255) DEFAULT NULL,
`vnf_id` varchar(36) NOT NULL,
`name` varchar(255) NOT NULL,
`type` enum('bridge','data','ptp') NOT NULL DEFAULT 'data' COMMENT 'Type of network',
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Scenarios defined by the user';
/*!40101 SET character_set_client = @saved_cs_client */;
+--
+-- Table structure for table `sce_classifier_matches`
+--
+
+DROP TABLE IF EXISTS `sce_classifier_matches`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_classifier_matches` (
+ `uuid` varchar(36) NOT NULL,
+ `ip_proto` varchar(2) NOT NULL,
+ `source_ip` varchar(16) NOT NULL,
+ `destination_ip` varchar(16) NOT NULL,
+ `source_port` varchar(5) NOT NULL,
+ `destination_port` varchar(5) NOT NULL,
+ `sce_classifier_id` varchar(36) NOT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_classifiers_classifier_match` (`sce_classifier_id`),
+ CONSTRAINT `FK_sce_classifiers_classifier_match` FOREIGN KEY (`sce_classifier_id`) REFERENCES `sce_classifiers` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `sce_classifiers`
+--
+
+DROP TABLE IF EXISTS `sce_classifiers`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_classifiers` (
+ `uuid` varchar(36) NOT NULL,
+ `tenant_id` varchar(36) DEFAULT NULL,
+ `name` varchar(255) NOT NULL,
+ `sce_vnffg_id` varchar(36) NOT NULL,
+ `sce_rsp_id` varchar(36) NOT NULL,
+ `sce_vnf_id` varchar(36) NOT NULL,
+ `interface_id` varchar(36) NOT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_sce_vnffgs_classifier` (`sce_vnffg_id`),
+ KEY `FK_sce_rsps_classifier` (`sce_rsp_id`),
+ KEY `FK_sce_vnfs_classifier` (`sce_vnf_id`),
+ KEY `FK_interfaces_classifier` (`interface_id`),
+ CONSTRAINT `FK_interfaces_classifier` FOREIGN KEY (`interface_id`) REFERENCES `interfaces` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_sce_rsps_classifier` FOREIGN KEY (`sce_rsp_id`) REFERENCES `sce_rsps` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_sce_vnffgs_classifier` FOREIGN KEY (`sce_vnffg_id`) REFERENCES `sce_vnffgs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_sce_vnfs_classifier` FOREIGN KEY (`sce_vnf_id`) REFERENCES `sce_vnfs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
--
-- Table structure for table `sce_interfaces`
--
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `sce_nets` (
`uuid` varchar(36) NOT NULL,
+ `osm_id` varchar(255) DEFAULT NULL,
`name` varchar(255) NOT NULL,
`scenario_id` varchar(36) DEFAULT NULL COMMENT 'NULL if net is matched to several scenarios',
`type` enum('bridge','data','ptp') NOT NULL DEFAULT 'data' COMMENT 'Type of network',
`multipoint` enum('true','false') NOT NULL DEFAULT 'true',
`external` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'If external, net is already present at VIM',
`description` varchar(255) DEFAULT NULL,
+ `vim_network_name` varchar(255) DEFAULT NULL,
`created_at` double NOT NULL,
`modified_at` double DEFAULT NULL,
`graph` varchar(2000) DEFAULT NULL,
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Networks in a scenario definition. It only considers networks among VNFs. Networks among internal VMs are only considered in tble ''nets''.';
/*!40101 SET character_set_client = @saved_cs_client */;
+--
+-- Table structure for table `sce_rsp_hops`
+--
+
+DROP TABLE IF EXISTS `sce_rsp_hops`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_rsp_hops` (
+ `uuid` varchar(36) NOT NULL,
+ `if_order` int(11) NOT NULL DEFAULT '0',
+ `ingress_interface_id` varchar(36) NOT NULL,
+ `egress_interface_id` varchar(36) NOT NULL,
+ `sce_vnf_id` varchar(36) NOT NULL,
+ `sce_rsp_id` varchar(36) NOT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_interfaces_rsp_hop` (`ingress_interface_id`),
+ KEY `FK_sce_vnfs_rsp_hop` (`sce_vnf_id`),
+ KEY `FK_sce_rsps_rsp_hop` (`sce_rsp_id`),
+ KEY `FK_interfaces_rsp_hop_egress` (`egress_interface_id`),
+ CONSTRAINT `FK_interfaces_rsp_hop_egress` FOREIGN KEY (`egress_interface_id`) REFERENCES `interfaces` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_interfaces_rsp_hop_ingress` FOREIGN KEY (`ingress_interface_id`) REFERENCES `interfaces` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_sce_rsps_rsp_hop` FOREIGN KEY (`sce_rsp_id`) REFERENCES `sce_rsps` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_sce_vnfs_rsp_hop` FOREIGN KEY (`sce_vnf_id`) REFERENCES `sce_vnfs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `sce_rsps`
+--
+
+DROP TABLE IF EXISTS `sce_rsps`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_rsps` (
+ `uuid` varchar(36) NOT NULL,
+ `tenant_id` varchar(36) DEFAULT NULL,
+ `name` varchar(255) NOT NULL,
+ `sce_vnffg_id` varchar(36) NOT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_sce_vnffgs_rsp` (`sce_vnffg_id`),
+ CONSTRAINT `FK_sce_vnffgs_rsp` FOREIGN KEY (`sce_vnffg_id`) REFERENCES `sce_vnffgs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `sce_vnffgs`
+--
+
+DROP TABLE IF EXISTS `sce_vnffgs`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_vnffgs` (
+ `uuid` varchar(36) NOT NULL,
+ `tenant_id` varchar(36) DEFAULT NULL,
+ `name` varchar(255) NOT NULL,
+ `description` varchar(255) DEFAULT NULL,
+ `vendor` varchar(255) DEFAULT NULL,
+ `scenario_id` varchar(36) NOT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ KEY `FK_scenarios_sce_vnffg` (`scenario_id`),
+ KEY `FK_scenarios_vnffg` (`tenant_id`),
+ CONSTRAINT `FK_scenarios_vnffg` FOREIGN KEY (`tenant_id`) REFERENCES `scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
--
-- Table structure for table `sce_vnfs`
--
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `sce_vnfs` (
`uuid` varchar(36) NOT NULL,
- `member_vnf_index` smallint(6) DEFAULT NULL,
+ `member_vnf_index` varchar(255) DEFAULT NULL,
`name` varchar(255) NOT NULL,
`scenario_id` varchar(36) NOT NULL,
`vnf_id` varchar(36) NOT NULL,
/*!40101 SET character_set_client = @saved_cs_client */;
--
--- Table structure for table `vim_actions`
+-- Table structure for table `vim_wim_actions`
--
-DROP TABLE IF EXISTS `vim_actions`;
+DROP TABLE IF EXISTS `vim_wim_actions`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `vim_actions` (
+CREATE TABLE `vim_wim_actions` (
`instance_action_id` varchar(36) NOT NULL,
`task_index` int(6) NOT NULL,
- `datacenter_vim_id` varchar(36) NOT NULL,
+ `datacenter_vim_id` varchar(36) DEFAULT NULL,
`vim_id` varchar(64) DEFAULT NULL,
+ `wim_account_id` varchar(36) DEFAULT NULL,
+ `wim_internal_id` varchar(64) DEFAULT NULL,
`action` varchar(36) NOT NULL COMMENT 'CREATE,DELETE,START,STOP...',
- `item` enum('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces') NOT NULL COMMENT 'table where the item is stored',
+ `item` enum('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces','instance_wim_nets') NOT NULL COMMENT 'table where the item is stored',
`item_id` varchar(36) DEFAULT NULL COMMENT 'uuid of the entry in the table',
`status` enum('SCHEDULED','BUILD','DONE','FAILED','SUPERSEDED') NOT NULL DEFAULT 'SCHEDULED',
`extra` text COMMENT 'json with params:, depends_on: for the task',
PRIMARY KEY (`task_index`,`instance_action_id`),
KEY `FK_actions_instance_actions` (`instance_action_id`),
KEY `FK_actions_vims` (`datacenter_vim_id`),
+ KEY `item_type_id` (`item`,`item_id`),
+ KEY `FK_actions_wims` (`wim_account_id`),
CONSTRAINT `FK_actions_instance_actions` FOREIGN KEY (`instance_action_id`) REFERENCES `instance_actions` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
- CONSTRAINT `FK_actions_vims` FOREIGN KEY (`datacenter_vim_id`) REFERENCES `datacenter_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+ CONSTRAINT `FK_actions_vims` FOREIGN KEY (`datacenter_vim_id`) REFERENCES `datacenter_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_actions_wims` FOREIGN KEY (`wim_account_id`) REFERENCES `wim_accounts` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Table with the individual VIM actions.';
/*!40101 SET character_set_client = @saved_cs_client */;
CREATE TABLE `vms` (
`uuid` varchar(36) NOT NULL,
`osm_id` varchar(255) DEFAULT NULL,
+ `pdu_type` varchar(255) DEFAULT NULL,
`name` varchar(255) NOT NULL,
`vnf_id` varchar(36) NOT NULL,
`count` smallint(6) NOT NULL DEFAULT '1',
`flavor_id` varchar(36) NOT NULL COMMENT 'Link to flavor table',
- `image_id` varchar(36) NOT NULL COMMENT 'Link to image table',
+ `image_id` varchar(36) DEFAULT NULL COMMENT 'Link to image table',
+ `image_list` text COMMENT 'Alternative images',
`image_path` varchar(100) DEFAULT NULL COMMENT 'Path where the image of the VM is located',
`boot_data` text,
`description` varchar(255) DEFAULT NULL,
/*!40101 SET character_set_client = @saved_cs_client */;
--
--- Dumping routines for database '{{mano_db}}'
+-- Table structure for table `wim_accounts`
+--
+
+DROP TABLE IF EXISTS `wim_accounts`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `wim_accounts` (
+ `uuid` varchar(36) NOT NULL,
+ `name` varchar(255) DEFAULT NULL,
+ `wim_id` varchar(36) NOT NULL,
+ `created` enum('true','false') NOT NULL DEFAULT 'false',
+ `user` varchar(64) DEFAULT NULL,
+ `password` varchar(64) DEFAULT NULL,
+ `config` text,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ UNIQUE KEY `wim_name` (`wim_id`,`name`),
+ KEY `FK_wim_accounts_wims` (`wim_id`),
+ CONSTRAINT `FK_wim_accounts_wims` FOREIGN KEY (`wim_id`) REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='WIM accounts by the user';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `wim_nfvo_tenants`
+--
+
+DROP TABLE IF EXISTS `wim_nfvo_tenants`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `wim_nfvo_tenants` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `nfvo_tenant_id` varchar(36) NOT NULL,
+ `wim_id` varchar(36) NOT NULL,
+ `wim_account_id` varchar(36) NOT NULL,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`id`),
+ UNIQUE KEY `wim_nfvo_tenant` (`wim_id`,`nfvo_tenant_id`),
+ KEY `FK_wims_nfvo_tenants` (`wim_id`),
+ KEY `FK_wim_accounts_nfvo_tenants` (`wim_account_id`),
+ KEY `FK_nfvo_tenants_wim_accounts` (`nfvo_tenant_id`),
+ CONSTRAINT `FK_nfvo_tenants_wim_accounts` FOREIGN KEY (`nfvo_tenant_id`) REFERENCES `nfvo_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_wim_accounts_nfvo_tenants` FOREIGN KEY (`wim_account_id`) REFERENCES `wim_accounts` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_wims_nfvo_tenants` FOREIGN KEY (`wim_id`) REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB AUTO_INCREMENT=86 DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='WIM accounts mapping to NFVO tenants';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `wim_port_mappings`
+--
+
+DROP TABLE IF EXISTS `wim_port_mappings`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `wim_port_mappings` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `wim_id` varchar(36) NOT NULL,
+ `datacenter_id` varchar(36) NOT NULL,
+ `pop_switch_dpid` varchar(64) NOT NULL,
+ `pop_switch_port` varchar(64) NOT NULL,
+ `wan_service_endpoint_id` varchar(256) NOT NULL COMMENT 'this field contains a unique identifier used to check the mapping_info consistency',
+ `wan_service_mapping_info` text,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`id`),
+ UNIQUE KEY `unique_datacenter_port_mapping` (`datacenter_id`,`pop_switch_dpid`,`pop_switch_port`),
+ UNIQUE KEY `unique_wim_port_mapping` (`wim_id`,`wan_service_endpoint_id`),
+ KEY `FK_wims_wim_physical_connections` (`wim_id`),
+ KEY `FK_datacenters_wim_port_mappings` (`datacenter_id`),
+ CONSTRAINT `FK_datacenters_wim_port_mappings` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT `FK_wims_wim_port_mappings` FOREIGN KEY (`wim_id`) REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='WIM port mappings managed by the WIM.';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `wims`
+--
+
+DROP TABLE IF EXISTS `wims`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `wims` (
+ `uuid` varchar(36) NOT NULL,
+ `name` varchar(255) NOT NULL,
+ `description` varchar(255) DEFAULT NULL,
+ `type` varchar(36) NOT NULL DEFAULT 'odl',
+ `wim_url` varchar(150) NOT NULL,
+ `config` text,
+ `created_at` double NOT NULL,
+ `modified_at` double DEFAULT NULL,
+ PRIMARY KEY (`uuid`),
+ UNIQUE KEY `name` (`name`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='WIMs managed by the NFVO.';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Dumping routines for database 'mano_db'
--
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
--- Dump completed on 2017-11-23 10:24:39
+-- Dump completed on 2018-12-10 9:58:03
--- MySQL dump 10.13 Distrib 5.7.20, for Linux (x86_64)
+-- MySQL dump 10.13 Distrib 5.7.24, for Linux (x86_64)
--
-- Host: localhost Database: {{mano_db}}
-- ------------------------------------------------------
--- Server version 5.7.20-0ubuntu0.16.04.1
+-- Server version 5.7.24
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
LOCK TABLES `schema_version` WRITE;
/*!40000 ALTER TABLE `schema_version` DISABLE KEYS */;
-INSERT INTO `schema_version` VALUES (1,'0.1','0.2.2','insert schema_version','2015-05-08'),(2,'0.2','0.2.5','new tables images,flavors','2015-07-13'),(3,'0.3','0.3.3','alter vim_tenant tables','2015-07-28'),(4,'0.4','0.3.5','enlarge graph field at sce_vnfs/nets','2015-10-20'),(5,'0.5','0.4.1','Add mac address for bridge interfaces','2015-12-14'),(6,'0.6','0.4.2','Adding VIM status info','2015-12-22'),(7,'0.7','0.4.3','Changing created_at time at database','2016-01-25'),(8,'0.8','0.4.32','Enlarging name at database','2016-02-01'),(9,'0.9','0.4.33','Add ACTIVE:NoMgmtIP to instance_vms table','2016-02-05'),(10,'0.10','0.4.36','tenant management of vnfs,scenarios','2016-03-08'),(11,'0.11','0.4.43','remove unique name at scenarios,instance_scenarios','2016-07-18'),(12,'0.12','0.4.46','create ip_profiles table, with foreign keys to all nets tables, and add ip_address column to interfaces and sce_interfaces','2016-08-29'),(13,'0.13','0.4.47','insert cloud-config at scenarios,instance_scenarios','2016-08-30'),(14,'0.14','0.4.57','remove unique index vim_net_id, instance_scenario_id','2016-09-26'),(15,'0.15','0.4.59','add columns universal_name and checksum at table images, add unique index universal_name_checksum, and change location to allow NULL; change column image_path in table vms to allow NULL','2016-09-27'),(16,'0.16','0.5.2','enlarge vim_tenant_name and id. New config at datacenter_tenants','2016-10-11'),(17,'0.17','0.5.3','Extra description json format of additional devices in datacenter_flavors','2016-12-20'),(18,'0.18','0.5.4','Add columns \'floating_ip\' and \'port_security\' at tables \'interfaces\' and \'instance_interfaces\'','2017-01-09'),(19,'0.19','0.5.5','Extra Boot-data content at VNFC (vms)','2017-01-11'),(20,'0.20','0.5.9','Added columns to store dataplane connectivity info','2017-03-13'),(21,'0.21','0.5.15','Edit instance_nets to allow instance_scenario_id=None and enlarge column dns_address at table ip_profiles','2017-06-02'),(22,'0.22','0.5.16','Changed type of ram in flavors from SMALLINT to MEDIUMINT','2017-06-02'),(23,'0.23','0.5.20','Changed type of ram in flavors from SMALLINT to MEDIUMINT','2017-08-29'),(24,'0.24','0.5.21','Added vnfd fields','2017-08-29'),(25,'0.25','0.5.22','Added osm_id to vnfs,scenarios','2017-09-01'),(26,'0.26','0.5.23','Several changes','2017-09-09'),(27,'0.27','0.5.25','Added encrypted_RO_priv_key,RO_pub_key to table nfvo_tenants','2017-09-29');
+INSERT INTO `schema_version` VALUES
+(0,'0.0','0.0.0','Database in init process','2015-05-08'),
+(1,'0.1','0.2.2','insert schema_version','2015-05-08'),
+(2,'0.2','0.2.5','new tables images,flavors','2015-07-13'),
+(3,'0.3','0.3.3','alter vim_tenant tables','2015-07-28'),
+(4,'0.4','0.3.5','enlarge graph field at sce_vnfs/nets','2015-10-20'),
+(5,'0.5','0.4.1','Add mac address for bridge interfaces','2015-12-14'),
+(6,'0.6','0.4.2','Adding VIM status info','2015-12-22'),
+(7,'0.7','0.4.3','Changing created_at time at database','2016-01-25'),
+(8,'0.8','0.4.32','Enlarging name at database','2016-02-01'),
+(9,'0.9','0.4.33','Add ACTIVE:NoMgmtIP to instance_vms table','2016-02-05'),
+(10,'0.10','0.4.36','tenant management of vnfs,scenarios','2016-03-08'),
+(11,'0.11','0.4.43','remove unique name at scenarios,instance_scenarios','2016-07-18'),
+(12,'0.12','0.4.46','create ip_profiles table, with foreign keys to all nets tables, and add ip_address column to interfaces and sce_interfaces','2016-08-29'),
+(13,'0.13','0.4.47','insert cloud-config at scenarios,instance_scenarios','2016-08-30'),
+(14,'0.14','0.4.57','remove unique index vim_net_id, instance_scenario_id','2016-09-26'),
+(15,'0.15','0.4.59','add columns universal_name and checksum at table images, add unique index universal_name_checksum, and change location to allow NULL; change column image_path in table vms to allow NULL','2016-09-27'),
+(16,'0.16','0.5.2','enlarge vim_tenant_name and id. New config at datacenter_tenants','2016-10-11'),
+(17,'0.17','0.5.3','Extra description json format of additional devices in datacenter_flavors','2016-12-20'),
+(18,'0.18','0.5.4','Add columns \'floating_ip\' and \'port_security\' at tables \'interfaces\' and \'instance_interfaces\'','2017-01-09'),
+(19,'0.19','0.5.5','Extra Boot-data content at VNFC (vms)','2017-01-11'),
+(20,'0.20','0.5.9','Added columns to store dataplane connectivity info','2017-03-13'),
+(21,'0.21','0.5.15','Edit instance_nets to allow instance_scenario_id=None and enlarge column dns_address at table ip_profiles','2017-06-02'),
+(22,'0.22','0.5.16','Changed type of ram in flavors from SMALLINT to MEDIUMINT','2017-06-02'),
+(23,'0.23','0.5.20','Changed type of ram in flavors from SMALLINT to MEDIUMINT','2017-08-29'),
+(24,'0.24','0.5.21','Added vnfd fields','2017-08-29'),
+(25,'0.25','0.5.22','Added osm_id to vnfs,scenarios','2017-09-01'),
+(26,'0.26','0.5.23','Several changes','2017-09-09'),
+(27,'0.27','0.5.25','Added encrypted_RO_priv_key,RO_pub_key to table nfvo_tenants','2017-09-29'),
+(28,'0.28','0.5.28','Adding VNFFG-related tables','2017-11-20'),
+(29,'0.29','0.5.59','Change member_vnf_index to str accordingly to the model','2018-04-11'),
+(30,'0.30','0.5.60','Add image_list to vms','2018-04-24'),
+(31,'0.31','0.5.61','Add vim_network_name to sce_nets','2018-05-03'),
+(32,'0.32','0.5.70','Add vim_name to instance vms','2018-06-28'),
+(33,'0.33','0.5.82','Add pdu information to vms','2018-11-13'),
+(34,'0.34','0.6.00','Added WIM tables','2018-09-10'),
+(35,'0.35','0.6.02','Adding ingress and egress ports for RSPs','2018-12-11'),
+(36,'0.36','0.6.03','Allow vm without image_id for PDUs','2018-12-19');
/*!40000 ALTER TABLE `schema_version` ENABLE KEYS */;
UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
--- Dump completed on 2017-11-23 10:24:39
+-- Dump completed on 2018-12-10 9:58:03
DBPORT="3306"
DBNAME="mano_db"
QUIET_MODE=""
+BACKUP_DIR=""
+BACKUP_FILE=""
#TODO update it with the last database version
-LAST_DB_VERSION=34
+LAST_DB_VERSION=38
# Detect paths
MYSQL=$(which mysql)
echo -e " -P PORT database port. '$DBPORT' by default"
echo -e " -h HOST database host. 'localhost' by default"
echo -e " -d NAME database name. '$DBNAME' by default. Prompts if DB access fails"
+ echo -e " -b DIR backup folder where to create rollback backup file"
echo -e " -q --quiet: Do not prompt for credentials and exit if cannot access to database"
echo -e " --help shows this help"
}
-while getopts ":u:p:P:h:d:q-:" o; do
+while getopts ":u:p:b:P:h:d:q-:" o; do
case "${o}" in
u)
DBUSER="$OPTARG"
h)
DBHOST="$OPTARG"
;;
+ b)
+ BACKUP_DIR="$OPTARG"
+ ;;
q)
export QUIET_MODE=yes
;;
DBCMD="mysql $DEF_EXTRA_FILE_PARAM $DBNAME"
#echo DBCMD $DBCMD
-#GET DATABASE VERSION
#check that the database seems a openmano database
if ! echo -e "show create table vnfs;\nshow create table scenarios" | $DBCMD >/dev/null 2>&1
then
exit 1;
fi
-if ! echo 'show create table schema_version;' | $DBCMD >/dev/null 2>&1
-then
- DATABASE_VER="0.0"
- DATABASE_VER_NUM=0
-else
- DATABASE_VER_NUM=`echo "select max(version_int) from schema_version;" | $DBCMD | tail -n+2`
- DATABASE_VER=`echo "select version from schema_version where version_int='$DATABASE_VER_NUM';" | $DBCMD | tail -n+2`
- [ "$DATABASE_VER_NUM" -lt 0 -o "$DATABASE_VER_NUM" -gt 100 ] &&
- echo " Error can not get database version ($DATABASE_VER?)" >&2 && exit 1
- #echo "_${DATABASE_VER_NUM}_${DATABASE_VER}"
-fi
-
-[ "$DATABASE_VER_NUM" -gt "$LAST_DB_VERSION" ] &&
- echo "Database has been upgraded with a newer version of this script. Use this version to downgrade" >&2 &&
- exit 1
-
#GET DATABASE TARGET VERSION
#DB_VERSION=0
#[ $OPENMANO_VER_NUM -ge 2002 ] && DB_VERSION=1 #0.2.2 => 1
#[ $OPENMANO_VER_NUM -ge 5070 ] && DB_VERSION=32 #0.5.70 => 32
#[ $OPENMANO_VER_NUM -ge 5082 ] && DB_VERSION=33 #0.5.82 => 33
#[ $OPENMANO_VER_NUM -ge 6000 ] && DB_VERSION=34 #0.6.00 => 34
+#[ $OPENMANO_VER_NUM -ge 6001 ] && DB_VERSION=35 #0.6.01 => 35
+#[ $OPENMANO_VER_NUM -ge 6003 ] && DB_VERSION=36 #0.6.03 => 36
+#[ $OPENMANO_VER_NUM -ge 6009 ] && DB_VERSION=37 #0.6.09 => 37
+#[ $OPENMANO_VER_NUM -ge 6011 ] && DB_VERSION=38 #0.6.11 => 38
#TODO ... put next versions here
function upgrade_to_1(){
}
function upgrade_to_33(){
- echo " Add PDU information to 'vms"
+ echo " Add PDU information to 'vms'"
sql "ALTER TABLE vms ADD COLUMN pdu_type VARCHAR(255) NULL DEFAULT NULL AFTER osm_id;"
sql "ALTER TABLE instance_nets ADD COLUMN vim_name VARCHAR(255) NULL DEFAULT NULL AFTER vim_net_id;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
"VALUES (33, '0.33', '0.5.82', 'Add pdu information to vms', '2018-11-13');"
}
function downgrade_from_33(){
- echo " Remove back PDU information from' vms'"
+ echo " Remove back PDU information from 'vms'"
sql "ALTER TABLE vms DROP COLUMN pdu_type;"
sql "ALTER TABLE instance_nets DROP COLUMN vim_name;"
sql "DELETE FROM schema_version WHERE version_int='33';"
}
-
-
function upgrade_to_X(){
echo " change 'datacenter_nets'"
sql "ALTER TABLE datacenter_nets ADD COLUMN vim_tenant_id VARCHAR(36) NOT NULL AFTER datacenter_id, DROP INDEX name_datacenter_id, ADD UNIQUE INDEX name_datacenter_id (name, datacenter_id, vim_tenant_id);"
echo " Change back 'datacenter_nets'"
sql "ALTER TABLE datacenter_nets DROP COLUMN vim_tenant_id, DROP INDEX name_datacenter_id, ADD UNIQUE INDEX name_datacenter_id (name, datacenter_id);"
}
-
function upgrade_to_34() {
echo " Create databases required for WIM features"
script="$(find "${DBUTILS}/migrations/up" -iname "34*.sql" | tail -1)"
sql "source ${script}"
}
-
function downgrade_from_34() {
echo " Drop databases required for WIM features"
script="$(find "${DBUTILS}/migrations/down" -iname "34*.sql" | tail -1)"
sql "source ${script}"
}
+function upgrade_to_35(){
+ echo " Create databases required for WIM features"
+ script="$(find "${DBUTILS}/migrations/up" -iname "35*.sql" | tail -1)"
+ sql "source ${script}"
+}
+function downgrade_from_35(){
+ echo " Drop databases required for WIM features"
+ script="$(find "${DBUTILS}/migrations/down" -iname "35*.sql" | tail -1)"
+ sql "source ${script}"
+}
+function upgrade_to_36(){
+ echo " Allow null for image_id at 'vms'"
+ sql "ALTER TABLE vms ALTER image_id DROP DEFAULT;"
+ sql "ALTER TABLE vms CHANGE COLUMN image_id image_id VARCHAR(36) NULL COMMENT 'Link to image table' AFTER " \
+ "flavor_id;"
+ echo " Enlarge config at 'wims' and 'wim_accounts'"
+ sql "ALTER TABLE wims CHANGE COLUMN config config TEXT NULL DEFAULT NULL AFTER wim_url;"
+ sql "ALTER TABLE wim_accounts CHANGE COLUMN config config TEXT NULL DEFAULT NULL AFTER password;"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+ "VALUES (36, '0.36', '0.6.03', 'Allow vm without image_id for PDUs', '2018-12-19');"
+}
+function downgrade_from_36(){
+ echo " Force back not null for image_id at 'vms'"
+ sql "ALTER TABLE vms ALTER image_id DROP DEFAULT;"
+ sql "ALTER TABLE vms CHANGE COLUMN image_id image_id VARCHAR(36) NOT NULL COMMENT 'Link to image table' AFTER " \
+ "flavor_id;"
+ # For downgrade do not restore wims/wim_accounts config to varchar 4000
+ sql "DELETE FROM schema_version WHERE version_int='36';"
+}
+function upgrade_to_37(){
+ echo " Adding the enum tags for SFC"
+ sql "ALTER TABLE vim_wim_actions " \
+ "MODIFY COLUMN item " \
+ "ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces'," \
+ "'instance_sfis','instance_sfs','instance_classifications','instance_sfps','instance_wim_nets') " \
+ "NOT NULL COMMENT 'table where the item is stored';"
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
+ "VALUES (37, '0.37', '0.6.09', 'Adding the enum tags for SFC', '2019-02-07');"
+}
+function downgrade_from_37(){
+ echo " Adding the enum tags for SFC isn't going to be reversed"
+ # It doesn't make sense to reverse to a bug state.
+ sql "DELETE FROM schema_version WHERE version_int='37';"
+}
+function upgrade_to_38(){
+ echo " Change vim_wim_actions, add worker, related"
+ sql "ALTER TABLE vim_wim_actions ADD COLUMN worker VARCHAR(64) NULL AFTER task_index, " \
+ "ADD COLUMN related VARCHAR(36) NULL AFTER worker, " \
+ "CHANGE COLUMN status status ENUM('SCHEDULED','BUILD','DONE','FAILED','SUPERSEDED','FINISHED') " \
+ "NOT NULL DEFAULT 'SCHEDULED' AFTER item_id;"
+ sql "UPDATE vim_wim_actions set related=item_id;"
+ echo " Change DONE to FINISHED when DELETE has been completed"
+ sql "UPDATE vim_wim_actions as v1 join vim_wim_actions as v2 on (v1.action='CREATE' or v1.action='FIND') and " \
+ "v2.action='DELETE' and (v2.status='SUPERSEDED' or v2.status='DONE') and v1.item_id=v2.item_id " \
+ "SET v1.status='FINISHED', v2.status='FINISHED';"
+ echo " Add osm_id to instance_nets"
+ sql "ALTER TABLE instance_nets ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
+ echo " Add related to instance_xxxx"
+ for table in instance_classifications instance_nets instance_sfis instance_sfps instance_sfs \
+ instance_vms
+ do
+ sql "ALTER TABLE $table ADD COLUMN related VARCHAR(36) NULL AFTER vim_info;"
+ sql "UPDATE $table set related=uuid;"
+ done
+ sql "ALTER TABLE instance_wim_nets ADD COLUMN related VARCHAR(36) NULL AFTER wim_info;"
+ sql "UPDATE instance_wim_nets set related=uuid;"
+
+ sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
+ "VALUES (38, '0.38', '0.6.11', 'Adding related to vim_wim_actions', '2019-03-07');"
+
+}
+function downgrade_from_38(){
+ echo " Change vim_wim_actions, delete worker, related"
+ sql "UPDATE vim_wim_actions SET status='DONE' WHERE status='FINISHED';"
+ sql "ALTER TABLE vim_wim_actions DROP COLUMN worker, DROP COLUMN related, " \
+ "CHANGE COLUMN status status ENUM('SCHEDULED','BUILD','DONE','FAILED','SUPERSEDED') " \
+ "NOT NULL DEFAULT 'SCHEDULED' AFTER item_id;"
+ echo " Remove related from instance_xxxx"
+ for table in instance_classifications instance_nets instance_wim_netsinstance_sfis instance_sfps instance_sfs \
+ instance_vms
+ do
+ sql "ALTER TABLE $table DROP COLUMN related;"
+ done
+ echo " Remove osm_id from instance_nets"
+ sql "ALTER TABLE instance_nets DROP COLUMN osm_id;"
+ sql "DELETE FROM schema_version WHERE version_int='38';"
+}
#TODO ... put functions here
-# echo "db version = "${DATABASE_VER_NUM}
-[ $DB_VERSION -eq $DATABASE_VER_NUM ] && echo " current database version '$DATABASE_VER_NUM' is ok" && exit 0
-# Create a backup database content
-TEMPFILE2="$(mktemp -q --tmpdir "backupdb.XXXXXX.sql")"
-trap 'rm -f "$TEMPFILE2"' EXIT
-mysqldump $DEF_EXTRA_FILE_PARAM --add-drop-table --add-drop-database --routines --databases $DBNAME > $TEMPFILE2
+function del_schema_version_process()
+{
+ echo "DELETE FROM schema_version WHERE version_int='0';" | $DBCMD ||
+ ! echo " ERROR writing on schema_version" >&2 || exit 1
+}
+
+function set_schema_version_process()
+{
+ echo "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES "\
+ "(0, '0.0', '0.0.0', 'migration from $DATABASE_VER_NUM to $DB_VERSION backup: $BACKUP_FILE',"\
+ "'$(date +%Y-%m-%d)');" | $DBCMD ||
+ ! echo " Cannot set database at migration process writing into schema_version" >&2 || exit 1
+
+}
function rollback_db()
{
- cat $TEMPFILE2 | mysql $DEF_EXTRA_FILE_PARAM && echo " Aborted! Rollback database OK" ||
- echo " Aborted! Rollback database FAIL"
- exit 1
+ if echo $DATABASE_PROCESS | grep -q init ; then # Empty database. No backup needed
+ echo " Aborted! Rollback database not needed" && exit 1
+ else # migration a non empty database or Recovering a migration process
+ cat $BACKUP_FILE | mysql $DEF_EXTRA_FILE_PARAM && echo " Aborted! Rollback database OK" &&
+ del_schema_version_process && rm -f "$BACKUP_FILE" && exit 1
+ echo " Aborted! Rollback database FAIL" && exit 1
+ fi
}
function sql() # send a sql command
return 0
}
-#UPGRADE DATABASE step by step
-while [ $DB_VERSION -gt $DATABASE_VER_NUM ]
-do
- echo " upgrade database from version '$DATABASE_VER_NUM' to '$((DATABASE_VER_NUM+1))'"
- DATABASE_VER_NUM=$((DATABASE_VER_NUM+1))
- upgrade_to_${DATABASE_VER_NUM}
- #FILE_="${DIRNAME}/upgrade_to_${DATABASE_VER_NUM}.sh"
- #[ ! -x "$FILE_" ] && echo "Error, can not find script '$FILE_' to upgrade" >&2 && exit -1
- #$FILE_ || exit -1 # if fail return
-done
+function migrate()
+{
+ #UPGRADE DATABASE step by step
+ while [ $DB_VERSION -gt $DATABASE_VER_NUM ]
+ do
+ echo " upgrade database from version '$DATABASE_VER_NUM' to '$((DATABASE_VER_NUM+1))'"
+ DATABASE_VER_NUM=$((DATABASE_VER_NUM+1))
+ upgrade_to_${DATABASE_VER_NUM}
+ #FILE_="${DIRNAME}/upgrade_to_${DATABASE_VER_NUM}.sh"
+ #[ ! -x "$FILE_" ] && echo "Error, can not find script '$FILE_' to upgrade" >&2 && exit -1
+ #$FILE_ || exit -1 # if fail return
+ done
-#DOWNGRADE DATABASE step by step
-while [ $DB_VERSION -lt $DATABASE_VER_NUM ]
-do
- echo " downgrade database from version '$DATABASE_VER_NUM' to '$((DATABASE_VER_NUM-1))'"
- #FILE_="${DIRNAME}/downgrade_from_${DATABASE_VER_NUM}.sh"
- #[ ! -x "$FILE_" ] && echo "Error, can not find script '$FILE_' to downgrade" >&2 && exit -1
- #$FILE_ || exit -1 # if fail return
- downgrade_from_${DATABASE_VER_NUM}
- DATABASE_VER_NUM=$((DATABASE_VER_NUM-1))
-done
+ #DOWNGRADE DATABASE step by step
+ while [ $DB_VERSION -lt $DATABASE_VER_NUM ]
+ do
+ echo " downgrade database from version '$DATABASE_VER_NUM' to '$((DATABASE_VER_NUM-1))'"
+ #FILE_="${DIRNAME}/downgrade_from_${DATABASE_VER_NUM}.sh"
+ #[ ! -x "$FILE_" ] && echo "Error, can not find script '$FILE_' to downgrade" >&2 && exit -1
+ #$FILE_ || exit -1 # if fail return
+ downgrade_from_${DATABASE_VER_NUM}
+ DATABASE_VER_NUM=$((DATABASE_VER_NUM-1))
+ done
+}
+
+
+# check if current database is ok
+function check_migration_needed()
+{
+ DATABASE_VER_NUM=`echo "select max(version_int) from schema_version;" | $DBCMD | tail -n+2` ||
+ ! echo " ERROR cannot read from schema_version" || exit 1
+
+ if [[ -z "$DATABASE_VER_NUM" ]] || [[ "$DATABASE_VER_NUM" -lt 0 ]] || [[ "$DATABASE_VER_NUM" -gt 100 ]] ; then
+ echo " Error can not get database version ($DATABASE_VER_NUM?)" >&2
+ exit 1
+ fi
+
+ [[ $DB_VERSION -eq $DATABASE_VER_NUM ]] && echo " current database version '$DATABASE_VER_NUM' is ok" && return 1
+ [[ "$DATABASE_VER_NUM" -gt "$LAST_DB_VERSION" ]] &&
+ echo "Database has been upgraded with a newer version of this script. Use this version to downgrade" >&2 &&
+ exit 1
+ return 0
+}
+
+DATABASE_PROCESS=`echo "select comments from schema_version where version_int=0;" | $DBCMD | tail -n+2` ||
+ ! echo " ERROR cannot read from schema_version" || exit 1
+if [[ -z "$DATABASE_PROCESS" ]] ; then # migration a non empty database
+ check_migration_needed || exit 0
+ # Create a backup database content
+ [[ -n "$BACKUP_DIR" ]] && BACKUP_FILE="$(mktemp -q "${BACKUP_DIR}/backupdb.XXXXXX.sql")"
+ [[ -z "$BACKUP_DIR" ]] && BACKUP_FILE="$(mktemp -q --tmpdir "backupdb.XXXXXX.sql")"
+ mysqldump $DEF_EXTRA_FILE_PARAM --add-drop-table --add-drop-database --routines --databases $DBNAME > $BACKUP_FILE ||
+ ! echo "Cannot create Backup file '$BACKUP_FILE'" >&2 || exit 1
+ echo " Backup file '$BACKUP_FILE' created"
+ # Set schema version
+ set_schema_version_process
+ migrate
+ del_schema_version_process
+ rm -f "$BACKUP_FILE"
+elif echo $DATABASE_PROCESS | grep -q init ; then # Empty database. No backup needed
+ echo " Migrating an empty database"
+ if check_migration_needed ; then
+ migrate
+ fi
+ del_schema_version_process
+
+else # Recover Migration process
+ BACKUP_FILE=${DATABASE_PROCESS##*backup: }
+ [[ -f "$BACKUP_FILE" ]] || ! echo "Previous migration process fail and cannot recover backup file '$BACKUP_FILE'" >&2 ||
+ exit 1
+ echo " Previous migration was killed. Restoring database from rollback file'$BACKUP_FILE'"
+ cat $BACKUP_FILE | mysql $DEF_EXTRA_FILE_PARAM || ! echo " Cannot load backup file '$BACKUP_FILE'" >&2 || exit 1
+ if check_migration_needed ; then
+ set_schema_version_process
+ migrate
+ fi
+ del_schema_version_process
+ rm -f "$BACKUP_FILE"
+fi
+exit 0
#echo done
'datacenter_images',
'instance_nets',
'instance_vms',
- 'instance_interfaces') NOT NULL
+ 'instance_interfaces',
+ 'instance_sfis',
+ 'instance_sfs',
+ 'instance_classifications',
+ 'instance_sfps') NOT NULL
COMMENT 'table where the item is stored';
ALTER TABLE `vim_wim_actions` MODIFY `datacenter_vim_id` varchar(36) NOT NULL;
ALTER TABLE `vim_wim_actions` DROP `wim_internal_id`, DROP `wim_account_id`;
--- /dev/null
+--
+-- Removing ingress and egress ports for SFC purposes.
+-- Inserting only one port for ingress and egress.
+--
+
+ALTER TABLE sce_rsp_hops
+ DROP FOREIGN KEY FK_interfaces_rsp_hop_ingress,
+ CHANGE COLUMN ingress_interface_id interface_id VARCHAR(36) NOT NULL
+ AFTER if_order,
+ ADD CONSTRAINT FK_interfaces_rsp_hop
+ FOREIGN KEY (interface_id)
+ REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+ DROP FOREIGN KEY FK_interfaces_rsp_hop_egress,
+ DROP COLUMN egress_interface_id;
+
+DELETE FROM schema_version WHERE version_int='35';
'instance_nets',
'instance_vms',
'instance_interfaces',
+ 'instance_sfis',
+ 'instance_sfs',
+ 'instance_classifications',
+ 'instance_sfps',
'instance_wim_nets') NOT NULL
COMMENT 'table where the item is stored';
ALTER TABLE `vim_wim_actions`
--- /dev/null
+--
+-- Adding different ingress and egress ports for SFC.
+--
+
+ALTER TABLE sce_rsp_hops
+ DROP FOREIGN KEY FK_interfaces_rsp_hop,
+ CHANGE COLUMN interface_id ingress_interface_id VARCHAR(36) NOT NULL
+ AFTER if_order,
+ ADD CONSTRAINT FK_interfaces_rsp_hop_ingress
+ FOREIGN KEY (ingress_interface_id)
+ REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+ ADD COLUMN egress_interface_id VARCHAR(36) NULL DEFAULT NULL
+ AFTER ingress_interface_id;
+
+UPDATE sce_rsp_hops
+ SET egress_interface_id = ingress_interface_id;
+
+ALTER TABLE sce_rsp_hops
+ ALTER COLUMN egress_interface_id DROP DEFAULT;
+
+ALTER TABLE sce_rsp_hops
+ MODIFY COLUMN egress_interface_id VARCHAR(36) NOT NULL
+ AFTER ingress_interface_id,
+ ADD CONSTRAINT FK_interfaces_rsp_hop_egress
+ FOREIGN KEY (egress_interface_id)
+ REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE;
+
+INSERT INTO schema_version (version_int, version, openmano_ver, comments, date)
+ VALUES (35, '0.35', '0.6.02', 'Adding ingress and egress ports for RSPs', '2018-12-11');
DEBIAN_FRONTEND=noninteractive apt-get -y install wget tox && \
DEBIAN_FRONTEND=noninteractive pip2 install pip==9.0.3 && \
DEBIAN_FRONTEND=noninteractive pip2 install -U progressbar pyvmomi pyvcloud==19.1.1 && \
- DEBIAN_FRONTEND=noninteractive apt-get -y install python-novaclient python-keystoneclient python-glanceclient python-cinderclient python-neutronclient && \
+ DEBIAN_FRONTEND=noninteractive apt-get -y install python-novaclient python-keystoneclient python-glanceclient python-cinderclient python-neutronclient python-networking-l2gw && \
DEBIAN_FRONTEND=noninteractive apt-get -y install python-cffi libmysqlclient-dev libssl-dev libffi-dev python-mysqldb && \
DEBIAN_FRONTEND=noninteractive apt-get -y install python-openstacksdk python-openstackclient && \
DEBIAN_FRONTEND=noninteractive apt-get -y install python-networkx && \
+ DEBIAN_FRONTEND=noninteractive apt-get -y install genisoimage && \
DEBIAN_FRONTEND=noninteractive pip2 install untangle && \
DEBIAN_FRONTEND=noninteractive pip2 install -e git+https://github.com/python-oca/python-oca#egg=oca && \
DEBIAN_FRONTEND=noninteractive apt-get -y install mysql-client
RO_LOG_LEVEL=DEBUG
CMD RO-start.sh
+
+# HEALTHCHECK --start-period=30s --interval=10s --timeout=5s --retries=12 \
+# CMD curl --silent --fail localhost:9090/openmano/tenants || exit 1
__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes"
__date__ = "$26-aug-2014 11:09:29$"
-__version__ = "0.6.00"
-version_date = "Nov 2018"
-database_version = 34 # expected database schema version
+__version__ = "0.6.12"
+version_date = "Apr 2019"
+database_version = 38 # expected database schema version
global global_config
global logger
import time
import logging
import datetime
+from contextlib import contextmanager
+from functools import wraps, partial
+from threading import Lock
from jsonschema import validate as js_v, exceptions as js_e
from .http_tools import errors as httperrors
+from .utils import Attempt, get_arg, inject_args
+
+
+RECOVERY_TIME = 3
+
+_ATTEMPT = Attempt()
+
+
+def with_transaction(fn=None, cursor=None):
+ """Decorator that can be used together with instances of the ``db_base``
+ class, to perform database actions wrapped in a commit/rollback fashion
+
+ This decorator basically executes the function inside the context object
+ given by the ``transaction`` method in ``db_base``
+
+ Arguments:
+ cursor: [Optional] cursor class
+ """
+ if fn is None: # Allows calling the decorator directly or with parameters
+ return partial(with_transaction, cursor=cursor)
+
+ @wraps(fn)
+ def _wrapper(self, *args, **kwargs):
+ cursor_type = None
+ if cursor == 'dict':
+ # MySQLdB define the "cursors" module attribute lazily,
+ # so we have to defer references to mdb.cursors.DictCursor
+ cursor_type = mdb.cursors.DictCursor
+
+ with self.transaction(cursor_type):
+ return fn(self, *args, **kwargs)
+
+ return _wrapper
+
+
+def retry(fn=None, max_attempts=Attempt.MAX, **info):
+ """Decorator that can be used together with instances of the ``db_base``
+ class, to replay a method again after a unexpected error.
+
+ The function being decorated needs to either be a method of ``db_base``
+ subclasses or accept an ``db_base`` instance as the first parameter.
+
+ All the extra keyword arguments will be passed to the ``_format_error``
+ method
+ """
+ if fn is None: # Allows calling the decorator directly or with parameters
+ return partial(retry, max_attempts=max_attempts, **info)
+
+ @wraps(fn)
+ def _wrapper(*args, **kwargs):
+ self = args[0]
+ info.setdefault('table', get_arg('table', fn, args, kwargs))
+ attempt = Attempt(max_attempts=max_attempts, info=info)
+ while attempt.countdown >= 0:
+ try:
+ return inject_args(fn, attempt=attempt)(*args, **kwargs)
+ except (mdb.Error, AttributeError) as ex:
+ self.logger.debug("Attempt #%d", attempt.number)
+ try:
+ # The format error will throw exceptions, however it can
+ # tolerate a certain amount of retries if it judges that
+ # the error can be solved with retrying
+ self._format_error(ex, attempt.countdown, **attempt.info)
+ # Anyway, unexpected/unknown errors can still be retried
+ except db_base_Exception as db_ex:
+ if (attempt.countdown < 0 or db_ex.http_code !=
+ httperrors.Internal_Server_Error):
+ raise
+
+ attempt.count += 1
+
+ return _wrapper
+
def _check_valid_uuid(uuid):
id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
class db_base():
tables_with_created_field=()
- def __init__(self, host=None, user=None, passwd=None, database=None, log_name='db', log_level=None):
+ def __init__(self, host=None, user=None, passwd=None, database=None,
+ log_name='db', log_level=None, lock=None):
self.host = host
self.user = user
self.passwd = passwd
self.logger = logging.getLogger(log_name)
if self.log_level:
self.logger.setLevel( getattr(logging, log_level) )
+ self.lock = lock or Lock()
def connect(self, host=None, user=None, passwd=None, database=None):
'''Connect to specific data base.
def escape(self, value):
return self.con.escape(value)
-
def escape_string(self, value):
+ if isinstance(value, unicode):
+ value = value.encode("utf8")
return self.con.escape_string(value)
-
+ @retry
+ @with_transaction
def get_db_version(self):
''' Obtain the database schema version.
Return: (negative, text) if error or version 0.0 where schema_version table is missing
(version_int, version_text) if ok
'''
cmd = "SELECT version_int,version FROM schema_version"
- tries = 2
- while tries:
- try:
- with self.con:
- self.cur = self.con.cursor()
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- rows = self.cur.fetchall()
- highest_version_int=0
- highest_version=""
- for row in rows: #look for the latest version
- if row[0]>highest_version_int:
- highest_version_int, highest_version = row[0:2]
- return highest_version_int, highest_version
- except (mdb.Error, AttributeError) as e:
- self.logger.error("Exception '{}' with command '{}'".format(e, cmd))
- #self.logger.error("get_db_version DB Exception %d: %s. Command %s",e.args[0], e.args[1], cmd)
- self._format_error(e, tries)
- tries -= 1
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+ highest_version_int=0
+ highest_version=""
+ for row in rows: #look for the latest version
+ if row[0]>highest_version_int:
+ highest_version_int, highest_version = row[0:2]
+ return highest_version_int, highest_version
def disconnect(self):
'''disconnect from specific data base'''
else:
raise
- def _format_error(self, e, tries=1, command=None, extra=None, table=None):
+ def reconnect(self):
+ """Try to gracefully to the database in case of error"""
+ try:
+ self.con.ping(True) # auto-reconnect if the server is available
+ except:
+ # The server is probably not available...
+ # Let's wait a bit
+ time.sleep(RECOVERY_TIME)
+ self.con = None
+ self.connect()
+
+ def fork_connection(self):
+ """Return a new database object, with a separated connection to the
+ database (and lock), so it can act independently
+ """
+ obj = self.__class__(
+ host=self.host,
+ user=self.user,
+ passwd=self.passwd,
+ database=self.database,
+ log_name=self.logger.name,
+ log_level=self.log_level,
+ lock=Lock()
+ )
+
+ obj.connect()
+
+ return obj
+
+ @contextmanager
+ def transaction(self, cursor_type=None):
+ """DB changes that are executed inside this context will be
+ automatically rolled back in case of error.
+
+ This implementation also adds a lock, so threads sharing the same
+ connection object are synchronized.
+
+ Arguments:
+ cursor_type: default: MySQLdb.cursors.DictCursor
+
+ Yields:
+ Cursor object
+
+ References:
+ https://www.oreilly.com/library/view/mysql-cookbook-2nd/059652708X/ch15s08.html
+ https://github.com/PyMySQL/mysqlclient-python/commit/c64915b1e5c705f4fb10e86db5dcfed0b58552cc
+ """
+ # Previously MySQLdb had built-in support for that using the context
+ # API for the connection object.
+ # This support was removed in version 1.40
+ # https://github.com/PyMySQL/mysqlclient-python/blob/master/HISTORY.rst#whats-new-in-140
+ with self.lock:
+ try:
+ if self.con.get_autocommit():
+ self.con.query("BEGIN")
+
+ self.cur = self.con.cursor(cursor_type)
+ yield self.cur
+ except: # noqa
+ self.con.rollback()
+ raise
+ else:
+ self.con.commit()
+
+
+ def _format_error(self, e, tries=1, command=None,
+ extra=None, table=None, cmd=None, **_):
'''Creates a text error base on the produced exception
Params:
e: mdb exception
extra: extra information to add to some commands
Return
HTTP error in negative, formatted error text
- '''
+ ''' # the **_ ignores extra kwargs
+ table_info = ' (table `{}`)'.format(table) if table else ''
+ if cmd:
+ self.logger.debug("Exception '%s' with command '%s'%s",
+ e, cmd, table_info)
+
if isinstance(e,AttributeError ):
self.logger.debug(str(e), exc_info=True)
raise db_base_Exception("DB Exception " + str(e), httperrors.Internal_Server_Error)
if e.args[0]==2006 or e.args[0]==2013 : #MySQL server has gone away (((or))) Exception 2013: Lost connection to MySQL server during query
- if tries>1:
+ # Let's aways reconnect if the connection is lost
+ # so future calls are not affected.
+ self.reconnect()
+
+ if tries > 1:
self.logger.warn("DB Exception '%s'. Retry", str(e))
- #reconnect
- self.connect()
return
else:
raise db_base_Exception("Database connection timeout Try Again", httperrors.Request_Timeout)
wc = e.args[1].find("in 'where clause'")
fl = e.args[1].find("in 'field list'")
#print de, fk, uk, wc,fl
- table_info = ' (table `{}`)'.format(table) if table else ''
if de>=0:
if fk>=0: #error 1062
raise db_base_Exception(
httperrors.Internal_Server_Error)
def __str2db_format(self, data):
- '''Convert string data to database format.
+ """Convert string data to database format.
If data is None it returns the 'Null' text,
otherwise it returns the text surrounded by quotes ensuring internal quotes are escaped.
- '''
- if data==None:
+ """
+ if data is None:
return 'Null'
- elif isinstance(data[1], str):
+ elif isinstance(data[1], (str, unicode)):
return json.dumps(data)
else:
return json.dumps(str(data))
B can be also a dict with special keys:
{"INCREMENT": NUMBER}, then it produce "A=A+NUMBER"
"""
- if data[1] == None:
+ if data[1] is None:
return str(data[0]) + "=Null"
- elif isinstance(data[1], str):
+ elif isinstance(data[1], (str, unicode)):
return str(data[0]) + '=' + json.dumps(data[1])
elif isinstance(data[1], dict):
if "INCREMENT" in data[1]:
for v2 in v:
if v2 is None:
cmd2.append(k.replace("=", " is").replace("<>", " is not") + " Null")
+ elif isinstance(v2, (str, unicode)):
+ cmd2.append(k + json.dumps(v2))
else:
cmd2.append(k + json.dumps(str(v2)))
cmd.append("(" + " OR ".join(cmd2) + ")")
+ elif isinstance(v, (str, unicode)):
+ cmd.append(k + json.dumps(v))
else:
cmd.append(k + json.dumps(str(v)))
elif isinstance(data, (tuple, list)):
INSERT: dictionary with the key:value to insert
table: table where to insert
add_uuid: if True, it will create an uuid key entry at INSERT if not provided
- created_time: time to add to the created_time column
+ created_time: time to add to the created_at column
It checks presence of uuid and add one automatically otherwise
Return: uuid
'''
cmd= "INSERT INTO " + table +" SET " + \
",".join(map(self.__tuple2db_format_set, INSERT.iteritems() ))
if created_time:
- cmd += ",created_at=%f" % created_time
+ cmd += ",created_at={time:.9f},modified_at={time:.9f}".format(time=created_time)
if confidential_data:
index = cmd.find("SET")
subcmd = cmd[:index] + 'SET...'
rows = self.cur.fetchall()
return rows
+ @retry
+ @with_transaction
def new_row(self, table, INSERT, add_uuid=False, created_time=0, confidential_data=False):
''' Add one row into a table.
Attribute
'''
if table in self.tables_with_created_field and created_time==0:
created_time=time.time()
- tries = 2
- while tries:
- try:
- with self.con:
- self.cur = self.con.cursor()
- return self._new_row_internal(table, INSERT, add_uuid, None, created_time, confidential_data)
+ return self._new_row_internal(table, INSERT, add_uuid, None, created_time, confidential_data)
- except (mdb.Error, AttributeError) as e:
- self._format_error(e, tries, table=table)
- tries -= 1
-
- def update_rows(self, table, UPDATE, WHERE, modified_time=0):
+ @retry
+ @with_transaction
+ def update_rows(self, table, UPDATE, WHERE, modified_time=None, attempt=_ATTEMPT):
""" Update one or several rows of a table.
:param UPDATE: dictionary with the changes. dict keys are database columns that will be set with the dict values
:param table: database table to update
keys can be suffixed by >,<,<>,>=,<= so that this is used to compare key and value instead of "="
The special keys "OR", "AND" with a dict value is used to create a nested WHERE
If a list, each item will be a dictionary that will be concatenated with OR
- :param modified_time: Can contain the time to be set to the table row
+ :param modified_time: Can contain the time to be set to the table row.
+ None to set automatically, 0 to do not modify it
:return: the number of updated rows, raises exception upon error
"""
- if table in self.tables_with_created_field and modified_time==0:
- modified_time=time.time()
- tries = 2
- while tries:
- try:
- with self.con:
- self.cur = self.con.cursor()
- return self._update_rows(
- table, UPDATE, WHERE, modified_time)
- except (mdb.Error, AttributeError) as e:
- self._format_error(e, tries, table=table)
- tries -= 1
+ if table in self.tables_with_created_field and modified_time is None:
+ modified_time = time.time()
+
+ return self._update_rows(table, UPDATE, WHERE, modified_time)
def _delete_row_by_id_internal(self, table, uuid):
cmd = "DELETE FROM {} WHERE uuid = '{}'".format(table, uuid)
self.cur.execute(cmd)
return deleted
+ @retry(command='delete', extra='dependencies')
+ @with_transaction
def delete_row_by_id(self, table, uuid):
- tries = 2
- while tries:
- try:
- with self.con:
- self.cur = self.con.cursor()
- return self._delete_row_by_id_internal(table, uuid)
- except (mdb.Error, AttributeError) as e:
- self._format_error(
- e, tries, "delete", "dependencies", table=table)
- tries -= 1
-
- def delete_row(self, **sql_dict):
+ return self._delete_row_by_id_internal(table, uuid)
+
+ @retry
+ def delete_row(self, attempt=_ATTEMPT, **sql_dict):
""" Deletes rows from a table.
:param UPDATE: dictionary with the changes. dict keys are database columns that will be set with the dict values
:param FROM: string with table name (Mandatory)
cmd += " WHERE " + self.__create_where(sql_dict['WHERE'])
if sql_dict.get('LIMIT'):
cmd += " LIMIT " + str(sql_dict['LIMIT'])
- tries = 2
- while tries:
- try:
- with self.con:
- self.cur = self.con.cursor()
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- deleted = self.cur.rowcount
- return deleted
- except (mdb.Error, AttributeError) as e:
- self._format_error(e, tries)
- tries -= 1
-
- def get_rows_by_id(self, table, uuid):
+
+ attempt.info['cmd'] = cmd
+
+ with self.transaction():
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ deleted = self.cur.rowcount
+ return deleted
+
+ @retry
+ @with_transaction(cursor='dict')
+ def get_rows_by_id(self, table, uuid, attempt=_ATTEMPT):
'''get row from a table based on uuid'''
- tries = 2
- while tries:
- try:
- with self.con:
- self.cur = self.con.cursor(mdb.cursors.DictCursor)
- cmd="SELECT * FROM {} where uuid='{}'".format(str(table), str(uuid))
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- rows = self.cur.fetchall()
- return rows
- except (mdb.Error, AttributeError) as e:
- self._format_error(e, tries, table=table)
- tries -= 1
-
- def get_rows(self, **sql_dict):
+ cmd="SELECT * FROM {} where uuid='{}'".format(str(table), str(uuid))
+ attempt.info['cmd'] = cmd
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+ return rows
+
+ @retry
+ def get_rows(self, attempt=_ATTEMPT, **sql_dict):
""" Obtain rows from a table.
:param SELECT: list or tuple of fields to retrieve) (by default all)
:param FROM: string with table name (Mandatory)
if 'LIMIT' in sql_dict:
cmd += " LIMIT " + str(sql_dict['LIMIT'])
- tries = 2
- while tries:
- try:
- with self.con:
- self.cur = self.con.cursor(mdb.cursors.DictCursor)
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- rows = self.cur.fetchall()
- return rows
- except (mdb.Error, AttributeError) as e:
- self.logger.error("Exception '{}' with command '{}'".format(e, cmd))
- self._format_error(e, tries)
- tries -= 1
-
- def get_table_by_uuid_name(self, table, uuid_name, error_item_text=None, allow_serveral=False, WHERE_OR={}, WHERE_AND_OR="OR"):
+ attempt.info['cmd'] = cmd
+
+ with self.transaction(mdb.cursors.DictCursor):
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+ return rows
+
+ @retry
+ def get_table_by_uuid_name(self, table, uuid_name, error_item_text=None, allow_several=False, WHERE_OR={}, WHERE_AND_OR="OR", attempt=_ATTEMPT):
''' Obtain One row from a table based on name or uuid.
Attribute:
table: string of table name
uuid_name: name or uuid. If not uuid format is found, it is considered a name
- allow_severeral: if False return ERROR if more than one row are founded
+ allow_several: if False return ERROR if more than one row are found
error_item_text: in case of error it identifies the 'item' name for a proper output text
'WHERE_OR': dict of key:values, translated to key=value OR ... (Optional)
'WHERE_AND_OR: str 'AND' or 'OR'(by default) mark the priority to 'WHERE AND (WHERE_OR)' or (WHERE) OR WHERE_OR' (Optional
else:
cmd += " OR " + where_or
- tries = 2
- while tries:
- try:
- with self.con:
- self.cur = self.con.cursor(mdb.cursors.DictCursor)
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- number = self.cur.rowcount
- if number == 0:
- raise db_base_Exception("No {} found with {} '{}'".format(error_item_text, what, uuid_name), http_code=httperrors.Not_Found)
- elif number > 1 and not allow_serveral:
- raise db_base_Exception("More than one {} found with {} '{}'".format(error_item_text, what, uuid_name), http_code=httperrors.Conflict)
- if allow_serveral:
- rows = self.cur.fetchall()
- else:
- rows = self.cur.fetchone()
- return rows
- except (mdb.Error, AttributeError) as e:
- self._format_error(e, tries, table=table)
- tries -= 1
+ attempt.info['cmd'] = cmd
+
+ with self.transaction(mdb.cursors.DictCursor):
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ number = self.cur.rowcount
+ if number == 0:
+ raise db_base_Exception("No {} found with {} '{}'".format(error_item_text, what, uuid_name), http_code=httperrors.Not_Found)
+ elif number > 1 and not allow_several:
+ raise db_base_Exception("More than one {} found with {} '{}'".format(error_item_text, what, uuid_name), http_code=httperrors.Conflict)
+ if allow_several:
+ rows = self.cur.fetchall()
+ else:
+ rows = self.cur.fetchone()
+ return rows
+ @retry(table='uuids')
+ @with_transaction(cursor='dict')
def get_uuid(self, uuid):
'''check in the database if this uuid is already present'''
- tries = 2
- while tries:
- try:
- with self.con:
- self.cur = self.con.cursor(mdb.cursors.DictCursor)
- self.cur.execute("SELECT * FROM uuids where uuid='" + str(uuid) + "'")
- rows = self.cur.fetchall()
- return self.cur.rowcount, rows
- except (mdb.Error, AttributeError) as e:
- self._format_error(e, tries)
- tries -= 1
+ self.cur.execute("SELECT * FROM uuids where uuid='" + str(uuid) + "'")
+ rows = self.cur.fetchall()
+ return self.cur.rowcount, rows
+ @retry
+ @with_transaction(cursor='dict')
def get_uuid_from_name(self, table, name):
'''Searchs in table the name and returns the uuid
'''
- tries = 2
- while tries:
- try:
- with self.con:
- self.cur = self.con.cursor(mdb.cursors.DictCursor)
- where_text = "name='" + name +"'"
- self.cur.execute("SELECT * FROM " + table + " WHERE "+ where_text)
- rows = self.cur.fetchall()
- if self.cur.rowcount==0:
- return 0, "Name %s not found in table %s" %(name, table)
- elif self.cur.rowcount>1:
- return self.cur.rowcount, "More than one VNF with name %s found in table %s" %(name, table)
- return self.cur.rowcount, rows[0]["uuid"]
- except (mdb.Error, AttributeError) as e:
- self._format_error(e, tries, table=table)
- tries -= 1
-
+ where_text = "name='" + name +"'"
+ self.cur.execute("SELECT * FROM " + table + " WHERE "+ where_text)
+ rows = self.cur.fetchall()
+ if self.cur.rowcount==0:
+ return 0, "Name %s not found in table %s" %(name, table)
+ elif self.cur.rowcount>1:
+ return self.cur.rowcount, "More than one VNF with name %s found in table %s" %(name, table)
+ return self.cur.rowcount, rows[0]["uuid"]
def start_service(mydb, persistence=None, wim=None):
global db, global_config
- db = nfvo_db.nfvo_db()
+ db = nfvo_db.nfvo_db(lock=db_lock)
+ mydb.lock = db_lock
db.connect(global_config['db_host'], global_config['db_user'], global_config['db_passwd'], global_config['db_name'])
global ovim
- if persistence:
- persistence.lock = db_lock
- else:
- persistence = WimPersistence(db, lock=db_lock)
+ persistence = persistence or WimPersistence(db)
# Initialize openvim for SDN control
# TODO: Avoid static configuration by adding new parameters to openmanod.cfg
nb_deleted += len(actions_to_delete)
if len(actions_to_delete) < 100:
break
+ # clean locks
+ mydb.update_rows("vim_wim_actions", UPDATE={"worker": None}, WHERE={"worker<>": None})
+
if nb_deleted:
logger.debug("Removed {} unused vim_wim_actions".format(nb_deleted))
image_list = []
vms = mydb.get_rows(SELECT=('image_id','image_list'), FROM='vms', WHERE={'vnf_id': vnf_id})
for vm in vms:
- if vm["image_id"] not in image_list:
+ if vm["image_id"] and vm["image_id"] not in image_list:
image_list.append(vm["image_id"])
if vm["image_list"]:
vm_image_list = yaml.load(vm["image_list"])
try:
myvnfd = vnfd_catalog.vnfd()
try:
- pybindJSONDecoder.load_ietf_json(vnf_descriptor, None, None, obj=myvnfd, path_helper=True)
+ pybindJSONDecoder.load_ietf_json(vnf_descriptor, None, None, obj=myvnfd, path_helper=True,
+ skip_unknown=True)
except Exception as e:
raise NfvoException("Error. Invalid VNF descriptor format " + str(e), httperrors.Bad_Request)
db_vnfs = []
devices.append(device)
+ if not db_vm.get("image_id"):
+ if not db_vm["pdu_type"]:
+ raise NfvoException("Not defined image for VDU")
+ # create a fake image
+
# cloud-init
boot_data = {}
if vdu.get("cloud-init"):
"'member-vdus':'{vdu}'. Reference to a non-existing vdu".format(
vnf=vnfd_id, pg=pg_name, vdu=vdu_id),
httperrors.Bad_Request)
- if vdu_id2db_table_index[vdu_id]:
- db_vms[vdu_id2db_table_index[vdu_id]]["availability_zone"] = pg_name
+ db_vms[vdu_id2db_table_index[vdu_id]]["availability_zone"] = pg_name
# TODO consider the case of isolation and not colocation
# if pg.get("strategy") == "ISOLATION":
try:
mynsd = nsd_catalog.nsd()
try:
- pybindJSONDecoder.load_ietf_json(nsd_descriptor, None, None, obj=mynsd)
+ pybindJSONDecoder.load_ietf_json(nsd_descriptor, None, None, obj=mynsd, skip_unknown=True)
except Exception as e:
raise NfvoException("Error. Invalid NS descriptor format: " + str(e), httperrors.Bad_Request)
db_scenarios = []
str(iface.get("vnfd-id-ref"))[:255]),
httperrors.Bad_Request)
interface_uuid = existing_ifaces[0]["uuid"]
- if existing_ifaces[0]["iface_type"] == "data" and not db_sce_net["type"]:
+ if existing_ifaces[0]["iface_type"] == "data":
db_sce_net["type"] = "data"
sce_interface_uuid = str(uuid4())
uuid_list.append(sce_net_uuid)
db_sce_vnffgs.append(db_sce_vnffg)
# deal with rsps
- db_sce_rsps = []
for rsp in vnffg.get("rsp").itervalues():
sce_rsp_uuid = str(uuid4())
uuid_list.append(sce_rsp_uuid)
"id": get_str(rsp, "id", 255), # only useful to link with classifiers; will be removed later in the code
}
db_sce_rsps.append(db_sce_rsp)
- db_sce_rsp_hops = []
for iface in rsp.get("vnfd-connection-point-ref").itervalues():
vnf_index = str(iface['member-vnf-index-ref'])
if_order = int(iface['order'])
str(nsd["id"]), str(rsp["id"]), str(iface["member-vnf-index-ref"])),
httperrors.Bad_Request)
- existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
- FROM="interfaces as i join vms on i.vm_id=vms.uuid",
- WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index],
- 'external_name': get_str(iface, "vnfd-connection-point-ref",
- 255)})
- if not existing_ifaces:
+ ingress_existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
+ FROM="interfaces as i join vms on i.vm_id=vms.uuid",
+ WHERE={
+ 'vnf_id': vnf_index2vnf_uuid[vnf_index],
+ 'external_name': get_str(iface, "vnfd-ingress-connection-point-ref",
+ 255)})
+ if not ingress_existing_ifaces:
raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
- "-ref':'vnfd-connection-point-ref':'{}'. Reference to a non-existing "
+ "-ref':'vnfd-ingress-connection-point-ref':'{}'. Reference to a non-existing "
"connection-point name at VNFD '{}'".format(
- str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-connection-point-ref"]),
- str(iface.get("vnfd-id-ref"))[:255]),
- httperrors.Bad_Request)
- interface_uuid = existing_ifaces[0]["uuid"]
+ str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-ingress-connection-point-ref"]),
+ str(iface.get("vnfd-id-ref"))[:255]), httperrors.Bad_Request)
+
+ egress_existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
+ FROM="interfaces as i join vms on i.vm_id=vms.uuid",
+ WHERE={
+ 'vnf_id': vnf_index2vnf_uuid[vnf_index],
+ 'external_name': get_str(iface, "vnfd-egress-connection-point-ref",
+ 255)})
+ if not egress_existing_ifaces:
+ raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+ "-ref':'vnfd-egress-connection-point-ref':'{}'. Reference to a non-existing "
+ "connection-point name at VNFD '{}'".format(
+ str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-egress-connection-point-ref"]),
+ str(iface.get("vnfd-id-ref"))[:255]), HTTP_Bad_Request)
+
+ ingress_interface_uuid = ingress_existing_ifaces[0]["uuid"]
+ egress_interface_uuid = egress_existing_ifaces[0]["uuid"]
sce_rsp_hop_uuid = str(uuid4())
uuid_list.append(sce_rsp_hop_uuid)
db_sce_rsp_hop = {
"uuid": sce_rsp_hop_uuid,
"if_order": if_order,
- "interface_id": interface_uuid,
+ "ingress_interface_id": ingress_interface_uuid,
+ "egress_interface_id": egress_interface_uuid,
"sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
"sce_rsp_id": sce_rsp_uuid,
}
db_sce_rsp_hops.append(db_sce_rsp_hop)
# deal with classifiers
- db_sce_classifiers = []
for classifier in vnffg.get("classifier").itervalues():
sce_classifier_uuid = str(uuid4())
uuid_list.append(sce_classifier_uuid)
db_sce_classifier["sce_rsp_id"] = rsp["uuid"]
db_sce_classifiers.append(db_sce_classifier)
- db_sce_classifier_matches = []
for match in classifier.get("match-attributes").itervalues():
sce_classifier_match_uuid = str(uuid4())
uuid_list.append(sce_classifier_match_uuid)
#We should use the dictionary as input parameter for new_network
#print myNetDict
if not sce_net["external"]:
- network_id = myvim.new_network(myNetName, myNetType, myNetIPProfile)
+ network_id, _ = myvim.new_network(myNetName, myNetType, myNetIPProfile)
#print "New VIM network created for scenario %s. Network id: %s" % (scenarioDict['name'],network_id)
sce_net['vim_id'] = network_id
auxNetDict['scenario'][sce_net['uuid']] = network_id
#print myNetDict
#TODO:
#We should use the dictionary as input parameter for new_network
- network_id = myvim.new_network(myNetName, myNetType, myNetIPProfile)
+ network_id, _ = myvim.new_network(myNetName, myNetType, myNetIPProfile)
#print "VIM network id for scenario %s: %s" % (scenarioDict['name'],network_id)
net['vim_id'] = network_id
if sce_vnf['uuid'] not in auxNetDict:
def update(d, u):
- '''Takes dict d and updates it with the values in dict u.'''
- '''It merges all depth levels'''
+ """Takes dict d and updates it with the values in dict u.
+ It merges all depth levels"""
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = update(d.get(k, {}), v)
myvims = {}
myvim_threads_id = {}
datacenter = instance_dict.get("datacenter")
+ default_wim_account = instance_dict.get("wim_account")
default_datacenter_id, vim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
myvims[default_datacenter_id] = vim
myvim_threads_id[default_datacenter_id], _ = get_vim_thread(mydb, tenant_id, default_datacenter_id)
raise NfvoException("Invalid net id or name '{}' at instance:vnfs:networks".format(net_id), httperrors.Bad_Request)
if net_instance_desc.get("vim-network-name"):
scenario_net["vim-network-name"] = net_instance_desc["vim-network-name"]
+ if net_instance_desc.get("vim-network-id"):
+ scenario_net["vim-network-id"] = net_instance_desc["vim-network-id"]
if net_instance_desc.get("name"):
scenario_net["name"] = net_instance_desc["name"]
if 'ip-profile' in net_instance_desc:
# However, this is not possible yet.
for net_name, net_instance_desc in instance_dict.get("networks", {}).iteritems():
for scenario_net in scenarioDict['nets']:
- if net_name == scenario_net["name"]:
+ if net_name == scenario_net.get("name") or net_name == scenario_net.get("osm_id") or net_name == scenario_net.get("uuid"):
+ if "wim_account" in net_instance_desc and net_instance_desc["wim_account"] is not None:
+ scenario_net["wim_account"] = net_instance_desc["wim_account"]
if 'ip-profile' in net_instance_desc:
ipprofile_db = ip_profile_IM2RO(net_instance_desc['ip-profile'])
if 'ip_profile' not in scenario_net:
break
if not involved_datacenters:
involved_datacenters.append(default_datacenter_id)
+ target_wim_account = sce_net.get("wim_account", default_wim_account)
# --> WIM
# TODO: use this information during network creation
- wim_account_id = None
+ wim_account_id = wim_account_name = None
if len(involved_datacenters) > 1 and 'uuid' in sce_net:
- # OBS: sce_net without uuid are used internally to VNFs
- # and the assumption is that VNFs will not be split among
- # different datacenters
- wim_account_id = wim_engine.find_suitable_wim_account(
- involved_datacenters, tenant_id)
- wim_usage[sce_net['uuid']] = wim_account_id
+ if target_wim_account is None or target_wim_account is True: # automatic selection of WIM
+ # OBS: sce_net without uuid are used internally to VNFs
+ # and the assumption is that VNFs will not be split among
+ # different datacenters
+ wim_account = wim_engine.find_suitable_wim_account(
+ involved_datacenters, tenant_id)
+ wim_account_id = wim_account['uuid']
+ wim_account_name = wim_account['name']
+ wim_usage[sce_net['uuid']] = wim_account_id
+ elif isinstance(target_wim_account, str): # manual selection of WIM
+ wim_account.persist.get_wim_account_by(target_wim_account, tenant_id)
+ wim_account_id = wim_account['uuid']
+ wim_account_name = wim_account['name']
+ wim_usage[sce_net['uuid']] = wim_account_id
+ else: # not WIM usage
+ wim_usage[sce_net['uuid']] = False
# <-- WIM
descriptor_net = {}
- if instance_dict.get("networks") and instance_dict["networks"].get(sce_net["name"]):
- descriptor_net = instance_dict["networks"][sce_net["name"]]
+ if instance_dict.get("networks"):
+ if sce_net.get("uuid") in instance_dict["networks"]:
+ descriptor_net = instance_dict["networks"][sce_net["uuid"]]
+ descriptor_net_name = sce_net["uuid"]
+ elif sce_net.get("osm_id") in instance_dict["networks"]:
+ descriptor_net = instance_dict["networks"][sce_net["osm_id"]]
+ descriptor_net_name = sce_net["osm_id"]
+ elif sce_net["name"] in instance_dict["networks"]:
+ descriptor_net = instance_dict["networks"][sce_net["name"]]
+ descriptor_net_name = sce_net["name"]
net_name = descriptor_net.get("vim-network-name")
# add datacenters from instantiation parameters
if descriptor_net.get("sites"):
sce_net2instance[sce_net_uuid] = {}
net2task_id['scenario'][sce_net_uuid] = {}
+ use_network = None
+ related_network = None
+ if descriptor_net.get("use-network"):
+ target_instance_nets = mydb.get_rows(
+ SELECT="related",
+ FROM="instance_nets",
+ WHERE={"instance_scenario_id": descriptor_net["use-network"]["instance_scenario_id"],
+ "osm_id": descriptor_net["use-network"]["osm_id"]},
+ )
+ if not target_instance_nets:
+ raise NfvoException(
+ "Cannot find the target network at instance:networks[{}]:use-network".format(descriptor_net_name),
+ httperrors.Bad_Request)
+ else:
+ use_network = target_instance_nets[0]["related"]
+
if sce_net["external"]:
number_mgmt_networks += 1
lookfor_network = True
lookfor_filter["name"] = sce_net.get("vim_network_name")
elif sce_net["external"]:
- if sce_net['vim_id'] is not None:
+ if sce_net.get('vim_id'):
# there is a netmap at datacenter_nets database # TODO REVISE!!!!
create_network = False
lookfor_network = True
task_extra = {}
if create_network:
task_action = "CREATE"
- task_extra["params"] = (net_vim_name, net_type, sce_net.get('ip_profile', None))
+ task_extra["params"] = (net_vim_name, net_type, sce_net.get('ip_profile', None), wim_account_name)
if lookfor_network:
task_extra["find"] = (lookfor_filter,)
elif lookfor_network:
net_uuid = str(uuid4())
uuid_list.append(net_uuid)
sce_net2instance[sce_net_uuid][datacenter_id] = net_uuid
+ if not related_network: # all db_instance_nets will have same related
+ related_network = use_network or net_uuid
db_net = {
"uuid": net_uuid,
+ "osm_id": sce_net.get("osm_id") or sce_net["name"],
+ "related": related_network,
'vim_net_id': None,
"vim_name": net_vim_name,
"instance_scenario_id": instance_uuid,
"action": task_action,
"item": "instance_nets",
"item_id": net_uuid,
+ "related": related_network,
"extra": yaml.safe_dump(task_extra, default_flow_style=True, width=256)
}
net2task_id['scenario'][sce_net_uuid][datacenter_id] = task_index
sfs_created = []
for cp in rsp['connection_points']:
count = mydb.get_rows(
- SELECT=('vms.count'),
- FROM="vms join interfaces on vms.uuid=interfaces.vm_id join sce_rsp_hops as h on interfaces.uuid=h.interface_id",
+ SELECT='vms.count',
+ FROM="vms join interfaces on vms.uuid=interfaces.vm_id join sce_rsp_hops as h "
+ "on interfaces.uuid=h.ingress_interface_id",
WHERE={'h.uuid': cp['uuid']})[0]['count']
instance_vnf = next((item for item in db_instance_vnfs if item['sce_vnf_id'] == cp['sce_vnf_id']), None)
instance_vms = [item for item in db_instance_vms if item['instance_vnf_id'] == instance_vnf['uuid']]
for i in range(count):
# create sfis
sfi_uuid = str(uuid4())
+ extra_params = {
+ "ingress_interface_id": cp["ingress_interface_id"],
+ "egress_interface_id": cp["egress_interface_id"]
+ }
uuid_list.append(sfi_uuid)
db_sfi = {
"uuid": sfi_uuid,
+ "related": sfi_uuid,
"instance_scenario_id": instance_uuid,
'sce_rsp_hop_id': cp['uuid'],
'datacenter_id': datacenter_id,
"status": "SCHEDULED",
"item": "instance_sfis",
"item_id": sfi_uuid,
- "extra": yaml.safe_dump({"params": "", "depends_on": [dependencies[i]]},
+ "related": sfi_uuid,
+ "extra": yaml.safe_dump({"params": extra_params, "depends_on": [dependencies[i]]},
default_flow_style=True, width=256)
}
sfis_created.append(task_index)
uuid_list.append(sf_uuid)
db_sf = {
"uuid": sf_uuid,
+ "related": sf_uuid,
"instance_scenario_id": instance_uuid,
'sce_rsp_hop_id': cp['uuid'],
'datacenter_id': datacenter_id,
"status": "SCHEDULED",
"item": "instance_sfs",
"item_id": sf_uuid,
+ "related": sf_uuid,
"extra": yaml.safe_dump({"params": "", "depends_on": sfis_created},
default_flow_style=True, width=256)
}
uuid_list.append(classification_uuid)
db_classification = {
"uuid": classification_uuid,
+ "related": classification_uuid,
"instance_scenario_id": instance_uuid,
'sce_classifier_match_id': match['uuid'],
'datacenter_id': datacenter_id,
"status": "SCHEDULED",
"item": "instance_classifications",
"item_id": classification_uuid,
+ "related": classification_uuid,
"extra": yaml.safe_dump({"params": classification_params, "depends_on": [dependencies[i]]},
default_flow_style=True, width=256)
}
uuid_list.append(sfp_uuid)
db_sfp = {
"uuid": sfp_uuid,
+ "related": sfp_uuid,
"instance_scenario_id": instance_uuid,
'sce_rsp_id': rsp['uuid'],
'datacenter_id': datacenter_id,
"status": "SCHEDULED",
"item": "instance_sfps",
"item_id": sfp_uuid,
+ "related": sfp_uuid,
"extra": yaml.safe_dump({"params": "", "depends_on": sfs_created + classifications_created},
default_flow_style=True, width=256)
}
returned_instance = mydb.get_instance_scenario(instance_uuid)
returned_instance["action_id"] = instance_action_id
return returned_instance
- except (NfvoException, vimconn.vimconnException, db_base_Exception) as e:
+ except (NfvoException, vimconn.vimconnException, wimconn.WimConnectorError, db_base_Exception) as e:
message = rollback(mydb, myvims, rollbackList)
if isinstance(e, db_base_Exception):
error_text = "database Exception"
elif isinstance(e, vimconn.vimconnException):
error_text = "VIM Exception"
+ elif isinstance(e, wimconn.WimConnectorError):
+ error_text = "WIM Exception"
else:
error_text = "Exception"
error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
vnf_net2instance[sce_vnf['uuid']][net['uuid']] = net_uuid
db_net = {
"uuid": net_uuid,
+ "related": net_uuid,
'vim_net_id': None,
"vim_name": net_name,
"instance_scenario_id": instance_uuid,
}
db_instance_nets.append(db_net)
+ lookfor_filter = {}
if net.get("vim-network-name"):
- lookfor_filter = {"name": net["vim-network-name"]}
+ lookfor_filter["name"] = net["vim-network-name"]
+ if net.get("vim-network-id"):
+ lookfor_filter["id"] = net["vim-network-id"]
+ if lookfor_filter:
task_action = "FIND"
task_extra = {"params": (lookfor_filter,)}
else:
"action": task_action,
"item": "instance_nets",
"item_id": net_uuid,
+ "related": net_uuid,
"extra": yaml.safe_dump(task_extra, default_flow_style=True, width=256)
}
task_index += 1
uuid_list.append(vm_uuid)
db_vm = {
"uuid": vm_uuid,
+ "related": vm_uuid,
'instance_vnf_id': vnf_uuid,
# TODO delete "vim_vm_id": vm_id,
"vm_id": vm["uuid"],
"status": "SCHEDULED",
"item": "instance_vms",
"item_id": vm_uuid,
+ "related": vm_uuid,
"extra": yaml.safe_dump({"params": task_params, "depends_on": task_depends_on},
default_flow_style=True, width=256)
}
"status": "SCHEDULED",
"item": "instance_sfps",
"item_id": sfp["uuid"],
+ "related": sfp["related"],
"extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
}
task_index += 1
"status": "SCHEDULED",
"item": "instance_classifications",
"item_id": classification["uuid"],
+ "related": classification["related"],
"extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
}
task_index += 1
"status": "SCHEDULED",
"item": "instance_sfs",
"item_id": sf["uuid"],
+ "related": sf["related"],
"extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
}
task_index += 1
"status": "SCHEDULED",
"item": "instance_sfis",
"item_id": sfi["uuid"],
+ "related": sfi["related"],
"extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
}
task_index += 1
"status": "SCHEDULED",
"item": "instance_vms",
"item_id": vm["uuid"],
+ "related": vm["related"],
"extra": yaml.safe_dump({"params": vm["interfaces"], "depends_on": sfi_dependencies},
default_flow_style=True, width=256)
}
"status": "SCHEDULED",
"item": "instance_nets",
"item_id": net["uuid"],
+ "related": net["related"],
"extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
}
task_index += 1
"status": "SCHEDULED",
"item": "instance_vms",
"item_id": vdu_id,
+ "related": vm["related"],
"extra": yaml.safe_dump({"params": vm_interfaces},
default_flow_style=True, width=256)
}
iface2iface = {}
where = {"item": "instance_vms", "item_id": target_vm["uuid"], "action": "CREATE"}
- vim_action_to_clone = mydb.get_rows(FROM="vim_actions", WHERE=where)
+ vim_action_to_clone = mydb.get_rows(FROM="vim_wim_actions", WHERE=where)
if not vim_action_to_clone:
raise NfvoException("Cannot find the vim_action at database with {}".format(where), httperrors.Internal_Server_Error)
vim_action_to_clone = vim_action_to_clone[0]
"status": "SCHEDULED",
"item": "instance_vms",
"item_id": vm_uuid,
+ "related": target_vm["related"],
# ALF
# ALF
# TODO examinar parametros, quitar MAC o incrementar. Incrementar IP y colocar las dependencias con ACTION-asdfasd.
# {"instance_sfs": db_instance_sfs},
# {"instance_classifications": db_instance_classifications},
# {"instance_sfps": db_instance_sfps},
- {"vim_actions": db_vim_actions}
+ {"vim_wim_actions": db_vim_actions}
]
logger.debug("create_vdu done DB tables: %s",
yaml.safe_dump(db_tables, indent=4, default_flow_style=False))
net_public = net.pop("shared", False)
net_ipprofile = net.pop("ip_profile", None)
net_vlan = net.pop("vlan", None)
- content = myvim.new_network(net_name, net_type, net_ipprofile, shared=net_public, vlan=net_vlan) #, **net)
+ content, _ = myvim.new_network(net_name, net_type, net_ipprofile, shared=net_public, vlan=net_vlan) #, **net)
#If the datacenter has a SDN controller defined and the network is of dataplane type, then create the sdn network
if get_sdn_controller_id(mydb, datacenter) != None and (net_type == 'data' or net_type == 'ptp'):
sdn_network['type'] = net_type
sdn_network['name'] = net_name
sdn_network['region'] = datacenter_tenant_id
- ovim_content = ovim.new_network(sdn_network)
+ ovim_content = ovim.new_network(sdn_network)
except ovimException as e:
logger.error("ovimException creating SDN network={} ".format(
sdn_network) + str(e), exc_info=True)
pci = port.get("pci")
element["switch_port"] = port.get("switch_port")
element["switch_mac"] = port.get("switch_mac")
- if not pci or not (element["switch_port"] or element["switch_mac"]):
- raise NfvoException ("The mapping must contain the 'pci' and at least one of the elements 'switch_port'"
- " or 'switch_mac'", httperrors.Bad_Request)
+ if not element["switch_port"] and not element["switch_mac"]:
+ raise NfvoException ("The mapping must contain 'switch_port' or 'switch_mac'", httperrors.Bad_Request)
for pci_expanded in utils.expand_brackets(pci):
element["pci"] = pci_expanded
maps.append(dict(element))
# contact with: nfvlabs@tid.es
##
-'''
+"""
NFVO DB engine. It implements all the methods to interact with the Openmano Database
-'''
+"""
__author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
__date__ ="$28-aug-2014 10:05:01$"
import time
#import sys, os
+from .db_base import retry, with_transaction
from .http_tools import errors as httperrors
+from .utils import Attempt
+
+
+_ATTEMPT = Attempt()
+
tables_with_createdat_field=["datacenters","instance_nets","instance_scenarios","instance_vms","instance_vnfs",
"interfaces","nets","nfvo_tenants","scenarios","sce_interfaces","sce_nets",
class nfvo_db(db_base.db_base):
- def __init__(self, host=None, user=None, passwd=None, database=None, log_name='openmano.db', log_level=None):
- db_base.db_base.__init__(self, host, user, passwd, database, log_name, log_level)
+ def __init__(self, host=None, user=None, passwd=None, database=None,
+ log_name='openmano.db', log_level=None, lock=None):
+ db_base.db_base.__init__(self, host, user, passwd, database,
+ log_name, log_level, lock)
db_base.db_base.tables_with_created_field=tables_with_createdat_field
return
+ @retry
+ @with_transaction
def new_vnf_as_a_whole(self,nfvo_tenant,vnf_name,vnf_descriptor,VNFCDict):
self.logger.debug("Adding new vnf to the NFVO database")
- tries = 2
- while tries:
- created_time = time.time()
- try:
- with self.con:
-
- myVNFDict = {}
- myVNFDict["name"] = vnf_name
- myVNFDict["descriptor"] = vnf_descriptor['vnf'].get('descriptor')
- myVNFDict["public"] = vnf_descriptor['vnf'].get('public', "false")
- myVNFDict["description"] = vnf_descriptor['vnf']['description']
- myVNFDict["class"] = vnf_descriptor['vnf'].get('class',"MISC")
- myVNFDict["tenant_id"] = vnf_descriptor['vnf'].get("tenant_id")
-
- vnf_id = self._new_row_internal('vnfs', myVNFDict, add_uuid=True, root_uuid=None, created_time=created_time)
- #print "Adding new vms to the NFVO database"
- #For each vm, we must create the appropriate vm in the NFVO database.
- vmDict = {}
- for _,vm in VNFCDict.iteritems():
- #This code could make the name of the vms grow and grow.
- #If we agree to follow this convention, we should check with a regex that the vnfc name is not including yet the vnf name
- #vm['name'] = "%s-%s" % (vnf_name,vm['name'])
- #print "VM name: %s. Description: %s" % (vm['name'], vm['description'])
- vm["vnf_id"] = vnf_id
- created_time += 0.00001
- vm_id = self._new_row_internal('vms', vm, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
- #print "Internal vm id in NFVO DB: %s" % vm_id
- vmDict[vm['name']] = vm_id
-
- #Collect the bridge interfaces of each VM/VNFC under the 'bridge-ifaces' field
- bridgeInterfacesDict = {}
- for vm in vnf_descriptor['vnf']['VNFC']:
- if 'bridge-ifaces' in vm:
- bridgeInterfacesDict[vm['name']] = {}
- for bridgeiface in vm['bridge-ifaces']:
- created_time += 0.00001
- if 'port-security' in bridgeiface:
- bridgeiface['port_security'] = bridgeiface.pop('port-security')
- if 'floating-ip' in bridgeiface:
- bridgeiface['floating_ip'] = bridgeiface.pop('floating-ip')
- db_base._convert_bandwidth(bridgeiface, logger=self.logger)
- bridgeInterfacesDict[vm['name']][bridgeiface['name']] = {}
- bridgeInterfacesDict[vm['name']][bridgeiface['name']]['vpci'] = bridgeiface.get('vpci',None)
- bridgeInterfacesDict[vm['name']][bridgeiface['name']]['mac'] = bridgeiface.get('mac_address',None)
- bridgeInterfacesDict[vm['name']][bridgeiface['name']]['bw'] = bridgeiface.get('bandwidth', None)
- bridgeInterfacesDict[vm['name']][bridgeiface['name']]['model'] = bridgeiface.get('model', None)
- bridgeInterfacesDict[vm['name']][bridgeiface['name']]['port_security'] = \
- int(bridgeiface.get('port_security', True))
- bridgeInterfacesDict[vm['name']][bridgeiface['name']]['floating_ip'] = \
- int(bridgeiface.get('floating_ip', False))
- bridgeInterfacesDict[vm['name']][bridgeiface['name']]['created_time'] = created_time
-
- # Collect the data interfaces of each VM/VNFC under the 'numas' field
- dataifacesDict = {}
- for vm in vnf_descriptor['vnf']['VNFC']:
- dataifacesDict[vm['name']] = {}
- for numa in vm.get('numas', []):
- for dataiface in numa.get('interfaces', []):
- created_time += 0.00001
- db_base._convert_bandwidth(dataiface, logger=self.logger)
- dataifacesDict[vm['name']][dataiface['name']] = {}
- dataifacesDict[vm['name']][dataiface['name']]['vpci'] = dataiface.get('vpci')
- dataifacesDict[vm['name']][dataiface['name']]['bw'] = dataiface['bandwidth']
- dataifacesDict[vm['name']][dataiface['name']]['model'] = "PF" if dataiface[
- 'dedicated'] == "yes" else (
- "VF" if dataiface['dedicated'] == "no" else "VFnotShared")
- dataifacesDict[vm['name']][dataiface['name']]['created_time'] = created_time
-
- #For each internal connection, we add it to the interfaceDict and we create the appropriate net in the NFVO database.
- #print "Adding new nets (VNF internal nets) to the NFVO database (if any)"
- internalconnList = []
- if 'internal-connections' in vnf_descriptor['vnf']:
- for net in vnf_descriptor['vnf']['internal-connections']:
- #print "Net name: %s. Description: %s" % (net['name'], net['description'])
-
- myNetDict = {}
- myNetDict["name"] = net['name']
- myNetDict["description"] = net['description']
- myNetDict["type"] = net['type']
- myNetDict["vnf_id"] = vnf_id
-
- created_time += 0.00001
- net_id = self._new_row_internal('nets', myNetDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
-
- for element in net['elements']:
- ifaceItem = {}
- #ifaceItem["internal_name"] = "%s-%s-%s" % (net['name'],element['VNFC'], element['local_iface_name'])
- ifaceItem["internal_name"] = element['local_iface_name']
- #ifaceItem["vm_id"] = vmDict["%s-%s" % (vnf_name,element['VNFC'])]
- ifaceItem["vm_id"] = vmDict[element['VNFC']]
- ifaceItem["net_id"] = net_id
- ifaceItem["type"] = net['type']
- if ifaceItem ["type"] == "data":
- dataiface = dataifacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
- ifaceItem["vpci"] = dataiface['vpci']
- ifaceItem["bw"] = dataiface['bw']
- ifaceItem["model"] = dataiface['model']
- created_time_iface = dataiface['created_time']
- else:
- bridgeiface = bridgeInterfacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
- ifaceItem["vpci"] = bridgeiface['vpci']
- ifaceItem["mac"] = bridgeiface['mac']
- ifaceItem["bw"] = bridgeiface['bw']
- ifaceItem["model"] = bridgeiface['model']
- ifaceItem["port_security"] = bridgeiface['port_security']
- ifaceItem["floating_ip"] = bridgeiface['floating_ip']
- created_time_iface = bridgeiface['created_time']
- internalconnList.append(ifaceItem)
- #print "Internal net id in NFVO DB: %s" % net_id
-
- #print "Adding internal interfaces to the NFVO database (if any)"
- for iface in internalconnList:
- #print "Iface name: %s" % iface['internal_name']
- iface_id = self._new_row_internal('interfaces', iface, add_uuid=True, root_uuid=vnf_id, created_time = created_time_iface)
- #print "Iface id in NFVO DB: %s" % iface_id
-
- #print "Adding external interfaces to the NFVO database"
- for iface in vnf_descriptor['vnf']['external-connections']:
- myIfaceDict = {}
- #myIfaceDict["internal_name"] = "%s-%s-%s" % (vnf_name,iface['VNFC'], iface['local_iface_name'])
- myIfaceDict["internal_name"] = iface['local_iface_name']
- #myIfaceDict["vm_id"] = vmDict["%s-%s" % (vnf_name,iface['VNFC'])]
- myIfaceDict["vm_id"] = vmDict[iface['VNFC']]
- myIfaceDict["external_name"] = iface['name']
- myIfaceDict["type"] = iface['type']
- if iface["type"] == "data":
- dataiface = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]
- myIfaceDict["vpci"] = dataiface['vpci']
- myIfaceDict["bw"] = dataiface['bw']
- myIfaceDict["model"] = dataiface['model']
- created_time_iface = dataiface['created_time']
- else:
- bridgeiface = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]
- myIfaceDict["vpci"] = bridgeiface['vpci']
- myIfaceDict["bw"] = bridgeiface['bw']
- myIfaceDict["model"] = bridgeiface['model']
- myIfaceDict["mac"] = bridgeiface['mac']
- myIfaceDict["port_security"]= bridgeiface['port_security']
- myIfaceDict["floating_ip"] = bridgeiface['floating_ip']
- created_time_iface = bridgeiface['created_time']
- #print "Iface name: %s" % iface['name']
- iface_id = self._new_row_internal('interfaces', myIfaceDict, add_uuid=True, root_uuid=vnf_id, created_time = created_time_iface)
- #print "Iface id in NFVO DB: %s" % iface_id
-
- return vnf_id
-
- except (mdb.Error, AttributeError) as e:
- self._format_error(e, tries)
- tries -= 1
-
+ created_time = time.time()
+ myVNFDict = {}
+ myVNFDict["name"] = vnf_name
+ myVNFDict["descriptor"] = vnf_descriptor['vnf'].get('descriptor')
+ myVNFDict["public"] = vnf_descriptor['vnf'].get('public', "false")
+ myVNFDict["description"] = vnf_descriptor['vnf']['description']
+ myVNFDict["class"] = vnf_descriptor['vnf'].get('class',"MISC")
+ myVNFDict["tenant_id"] = vnf_descriptor['vnf'].get("tenant_id")
+
+ vnf_id = self._new_row_internal('vnfs', myVNFDict, add_uuid=True, root_uuid=None, created_time=created_time)
+ #print "Adding new vms to the NFVO database"
+ #For each vm, we must create the appropriate vm in the NFVO database.
+ vmDict = {}
+ for _,vm in VNFCDict.iteritems():
+ #This code could make the name of the vms grow and grow.
+ #If we agree to follow this convention, we should check with a regex that the vnfc name is not including yet the vnf name
+ #vm['name'] = "%s-%s" % (vnf_name,vm['name'])
+ #print "VM name: %s. Description: %s" % (vm['name'], vm['description'])
+ vm["vnf_id"] = vnf_id
+ created_time += 0.00001
+ vm_id = self._new_row_internal('vms', vm, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
+ #print "Internal vm id in NFVO DB: %s" % vm_id
+ vmDict[vm['name']] = vm_id
+
+ #Collect the bridge interfaces of each VM/VNFC under the 'bridge-ifaces' field
+ bridgeInterfacesDict = {}
+ for vm in vnf_descriptor['vnf']['VNFC']:
+ if 'bridge-ifaces' in vm:
+ bridgeInterfacesDict[vm['name']] = {}
+ for bridgeiface in vm['bridge-ifaces']:
+ created_time += 0.00001
+ if 'port-security' in bridgeiface:
+ bridgeiface['port_security'] = bridgeiface.pop('port-security')
+ if 'floating-ip' in bridgeiface:
+ bridgeiface['floating_ip'] = bridgeiface.pop('floating-ip')
+ db_base._convert_bandwidth(bridgeiface, logger=self.logger)
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']] = {}
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['vpci'] = bridgeiface.get('vpci',None)
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['mac'] = bridgeiface.get('mac_address',None)
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['bw'] = bridgeiface.get('bandwidth', None)
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['model'] = bridgeiface.get('model', None)
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['port_security'] = \
+ int(bridgeiface.get('port_security', True))
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['floating_ip'] = \
+ int(bridgeiface.get('floating_ip', False))
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']]['created_time'] = created_time
+
+ # Collect the data interfaces of each VM/VNFC under the 'numas' field
+ dataifacesDict = {}
+ for vm in vnf_descriptor['vnf']['VNFC']:
+ dataifacesDict[vm['name']] = {}
+ for numa in vm.get('numas', []):
+ for dataiface in numa.get('interfaces', []):
+ created_time += 0.00001
+ db_base._convert_bandwidth(dataiface, logger=self.logger)
+ dataifacesDict[vm['name']][dataiface['name']] = {}
+ dataifacesDict[vm['name']][dataiface['name']]['vpci'] = dataiface.get('vpci')
+ dataifacesDict[vm['name']][dataiface['name']]['bw'] = dataiface['bandwidth']
+ dataifacesDict[vm['name']][dataiface['name']]['model'] = "PF" if dataiface[
+ 'dedicated'] == "yes" else (
+ "VF" if dataiface['dedicated'] == "no" else "VFnotShared")
+ dataifacesDict[vm['name']][dataiface['name']]['created_time'] = created_time
+
+ #For each internal connection, we add it to the interfaceDict and we create the appropriate net in the NFVO database.
+ #print "Adding new nets (VNF internal nets) to the NFVO database (if any)"
+ internalconnList = []
+ if 'internal-connections' in vnf_descriptor['vnf']:
+ for net in vnf_descriptor['vnf']['internal-connections']:
+ #print "Net name: %s. Description: %s" % (net['name'], net['description'])
+
+ myNetDict = {}
+ myNetDict["name"] = net['name']
+ myNetDict["description"] = net['description']
+ myNetDict["type"] = net['type']
+ myNetDict["vnf_id"] = vnf_id
+
+ created_time += 0.00001
+ net_id = self._new_row_internal('nets', myNetDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
+
+ for element in net['elements']:
+ ifaceItem = {}
+ #ifaceItem["internal_name"] = "%s-%s-%s" % (net['name'],element['VNFC'], element['local_iface_name'])
+ ifaceItem["internal_name"] = element['local_iface_name']
+ #ifaceItem["vm_id"] = vmDict["%s-%s" % (vnf_name,element['VNFC'])]
+ ifaceItem["vm_id"] = vmDict[element['VNFC']]
+ ifaceItem["net_id"] = net_id
+ ifaceItem["type"] = net['type']
+ if ifaceItem ["type"] == "data":
+ dataiface = dataifacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
+ ifaceItem["vpci"] = dataiface['vpci']
+ ifaceItem["bw"] = dataiface['bw']
+ ifaceItem["model"] = dataiface['model']
+ created_time_iface = dataiface['created_time']
+ else:
+ bridgeiface = bridgeInterfacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
+ ifaceItem["vpci"] = bridgeiface['vpci']
+ ifaceItem["mac"] = bridgeiface['mac']
+ ifaceItem["bw"] = bridgeiface['bw']
+ ifaceItem["model"] = bridgeiface['model']
+ ifaceItem["port_security"] = bridgeiface['port_security']
+ ifaceItem["floating_ip"] = bridgeiface['floating_ip']
+ created_time_iface = bridgeiface['created_time']
+ internalconnList.append(ifaceItem)
+ #print "Internal net id in NFVO DB: %s" % net_id
+
+ #print "Adding internal interfaces to the NFVO database (if any)"
+ for iface in internalconnList:
+ #print "Iface name: %s" % iface['internal_name']
+ iface_id = self._new_row_internal('interfaces', iface, add_uuid=True, root_uuid=vnf_id, created_time = created_time_iface)
+ #print "Iface id in NFVO DB: %s" % iface_id
+
+ #print "Adding external interfaces to the NFVO database"
+ for iface in vnf_descriptor['vnf']['external-connections']:
+ myIfaceDict = {}
+ #myIfaceDict["internal_name"] = "%s-%s-%s" % (vnf_name,iface['VNFC'], iface['local_iface_name'])
+ myIfaceDict["internal_name"] = iface['local_iface_name']
+ #myIfaceDict["vm_id"] = vmDict["%s-%s" % (vnf_name,iface['VNFC'])]
+ myIfaceDict["vm_id"] = vmDict[iface['VNFC']]
+ myIfaceDict["external_name"] = iface['name']
+ myIfaceDict["type"] = iface['type']
+ if iface["type"] == "data":
+ dataiface = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]
+ myIfaceDict["vpci"] = dataiface['vpci']
+ myIfaceDict["bw"] = dataiface['bw']
+ myIfaceDict["model"] = dataiface['model']
+ created_time_iface = dataiface['created_time']
+ else:
+ bridgeiface = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]
+ myIfaceDict["vpci"] = bridgeiface['vpci']
+ myIfaceDict["bw"] = bridgeiface['bw']
+ myIfaceDict["model"] = bridgeiface['model']
+ myIfaceDict["mac"] = bridgeiface['mac']
+ myIfaceDict["port_security"]= bridgeiface['port_security']
+ myIfaceDict["floating_ip"] = bridgeiface['floating_ip']
+ created_time_iface = bridgeiface['created_time']
+ #print "Iface name: %s" % iface['name']
+ iface_id = self._new_row_internal('interfaces', myIfaceDict, add_uuid=True, root_uuid=vnf_id, created_time = created_time_iface)
+ #print "Iface id in NFVO DB: %s" % iface_id
+
+ return vnf_id
+
+ @retry
+ @with_transaction
def new_vnf_as_a_whole2(self,nfvo_tenant,vnf_name,vnf_descriptor,VNFCDict):
self.logger.debug("Adding new vnf to the NFVO database")
- tries = 2
- while tries:
- created_time = time.time()
- try:
- with self.con:
-
- myVNFDict = {}
- myVNFDict["name"] = vnf_name
- myVNFDict["descriptor"] = vnf_descriptor['vnf'].get('descriptor')
- myVNFDict["public"] = vnf_descriptor['vnf'].get('public', "false")
- myVNFDict["description"] = vnf_descriptor['vnf']['description']
- myVNFDict["class"] = vnf_descriptor['vnf'].get('class',"MISC")
- myVNFDict["tenant_id"] = vnf_descriptor['vnf'].get("tenant_id")
-
- vnf_id = self._new_row_internal('vnfs', myVNFDict, add_uuid=True, root_uuid=None, created_time=created_time)
- #print "Adding new vms to the NFVO database"
- #For each vm, we must create the appropriate vm in the NFVO database.
- vmDict = {}
- for _,vm in VNFCDict.iteritems():
- #This code could make the name of the vms grow and grow.
- #If we agree to follow this convention, we should check with a regex that the vnfc name is not including yet the vnf name
- #vm['name'] = "%s-%s" % (vnf_name,vm['name'])
- #print "VM name: %s. Description: %s" % (vm['name'], vm['description'])
- vm["vnf_id"] = vnf_id
- created_time += 0.00001
- vm_id = self._new_row_internal('vms', vm, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
- #print "Internal vm id in NFVO DB: %s" % vm_id
- vmDict[vm['name']] = vm_id
-
- #Collect the bridge interfaces of each VM/VNFC under the 'bridge-ifaces' field
- bridgeInterfacesDict = {}
- for vm in vnf_descriptor['vnf']['VNFC']:
- if 'bridge-ifaces' in vm:
- bridgeInterfacesDict[vm['name']] = {}
- for bridgeiface in vm['bridge-ifaces']:
- created_time += 0.00001
- db_base._convert_bandwidth(bridgeiface, logger=self.logger)
- if 'port-security' in bridgeiface:
- bridgeiface['port_security'] = bridgeiface.pop('port-security')
- if 'floating-ip' in bridgeiface:
- bridgeiface['floating_ip'] = bridgeiface.pop('floating-ip')
- ifaceDict = {}
- ifaceDict['vpci'] = bridgeiface.get('vpci',None)
- ifaceDict['mac'] = bridgeiface.get('mac_address',None)
- ifaceDict['bw'] = bridgeiface.get('bandwidth', None)
- ifaceDict['model'] = bridgeiface.get('model', None)
- ifaceDict['port_security'] = int(bridgeiface.get('port_security', True))
- ifaceDict['floating_ip'] = int(bridgeiface.get('floating_ip', False))
- ifaceDict['created_time'] = created_time
- bridgeInterfacesDict[vm['name']][bridgeiface['name']] = ifaceDict
-
- # Collect the data interfaces of each VM/VNFC under the 'numas' field
- dataifacesDict = {}
- for vm in vnf_descriptor['vnf']['VNFC']:
- dataifacesDict[vm['name']] = {}
- for numa in vm.get('numas', []):
- for dataiface in numa.get('interfaces', []):
- created_time += 0.00001
- db_base._convert_bandwidth(dataiface, logger=self.logger)
- ifaceDict = {}
- ifaceDict['vpci'] = dataiface.get('vpci')
- ifaceDict['bw'] = dataiface['bandwidth']
- ifaceDict['model'] = "PF" if dataiface['dedicated'] == "yes" else \
- ("VF" if dataiface['dedicated'] == "no" else "VFnotShared")
- ifaceDict['created_time'] = created_time
- dataifacesDict[vm['name']][dataiface['name']] = ifaceDict
-
- #For each internal connection, we add it to the interfaceDict and we create the appropriate net in the NFVO database.
- #print "Adding new nets (VNF internal nets) to the NFVO database (if any)"
- if 'internal-connections' in vnf_descriptor['vnf']:
- for net in vnf_descriptor['vnf']['internal-connections']:
- #print "Net name: %s. Description: %s" % (net['name'], net['description'])
-
- myNetDict = {}
- myNetDict["name"] = net['name']
- myNetDict["description"] = net['description']
- if (net["implementation"] == "overlay"):
- net["type"] = "bridge"
- #It should give an error if the type is e-line. For the moment, we consider it as a bridge
- elif (net["implementation"] == "underlay"):
- if (net["type"] == "e-line"):
- net["type"] = "ptp"
- elif (net["type"] == "e-lan"):
- net["type"] = "data"
- net.pop("implementation")
- myNetDict["type"] = net['type']
- myNetDict["vnf_id"] = vnf_id
-
- created_time += 0.00001
- net_id = self._new_row_internal('nets', myNetDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
-
- if "ip-profile" in net:
- ip_profile = net["ip-profile"]
- myIPProfileDict = {}
- myIPProfileDict["net_id"] = net_id
- myIPProfileDict["ip_version"] = ip_profile.get('ip-version',"IPv4")
- myIPProfileDict["subnet_address"] = ip_profile.get('subnet-address',None)
- myIPProfileDict["gateway_address"] = ip_profile.get('gateway-address',None)
- myIPProfileDict["dns_address"] = ip_profile.get('dns-address',None)
- if ("dhcp" in ip_profile):
- myIPProfileDict["dhcp_enabled"] = ip_profile["dhcp"].get('enabled',"true")
- myIPProfileDict["dhcp_start_address"] = ip_profile["dhcp"].get('start-address',None)
- myIPProfileDict["dhcp_count"] = ip_profile["dhcp"].get('count',None)
-
- created_time += 0.00001
- ip_profile_id = self._new_row_internal('ip_profiles', myIPProfileDict)
-
- for element in net['elements']:
- ifaceItem = {}
- #ifaceItem["internal_name"] = "%s-%s-%s" % (net['name'],element['VNFC'], element['local_iface_name'])
- ifaceItem["internal_name"] = element['local_iface_name']
- #ifaceItem["vm_id"] = vmDict["%s-%s" % (vnf_name,element['VNFC'])]
- ifaceItem["vm_id"] = vmDict[element['VNFC']]
- ifaceItem["net_id"] = net_id
- ifaceItem["type"] = net['type']
- ifaceItem["ip_address"] = element.get('ip_address',None)
- if ifaceItem ["type"] == "data":
- ifaceDict = dataifacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
- ifaceItem["vpci"] = ifaceDict['vpci']
- ifaceItem["bw"] = ifaceDict['bw']
- ifaceItem["model"] = ifaceDict['model']
- else:
- ifaceDict = bridgeInterfacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
- ifaceItem["vpci"] = ifaceDict['vpci']
- ifaceItem["mac"] = ifaceDict['mac']
- ifaceItem["bw"] = ifaceDict['bw']
- ifaceItem["model"] = ifaceDict['model']
- ifaceItem["port_security"] = ifaceDict['port_security']
- ifaceItem["floating_ip"] = ifaceDict['floating_ip']
- created_time_iface = ifaceDict["created_time"]
- #print "Iface name: %s" % iface['internal_name']
- iface_id = self._new_row_internal('interfaces', ifaceItem, add_uuid=True, root_uuid=vnf_id, created_time=created_time_iface)
- #print "Iface id in NFVO DB: %s" % iface_id
-
- #print "Adding external interfaces to the NFVO database"
- for iface in vnf_descriptor['vnf']['external-connections']:
- myIfaceDict = {}
- #myIfaceDict["internal_name"] = "%s-%s-%s" % (vnf_name,iface['VNFC'], iface['local_iface_name'])
- myIfaceDict["internal_name"] = iface['local_iface_name']
- #myIfaceDict["vm_id"] = vmDict["%s-%s" % (vnf_name,iface['VNFC'])]
- myIfaceDict["vm_id"] = vmDict[iface['VNFC']]
- myIfaceDict["external_name"] = iface['name']
- myIfaceDict["type"] = iface['type']
- if iface["type"] == "data":
- myIfaceDict["vpci"] = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['vpci']
- myIfaceDict["bw"] = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['bw']
- myIfaceDict["model"] = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['model']
- created_time_iface = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['created_time']
- else:
- myIfaceDict["vpci"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['vpci']
- myIfaceDict["bw"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['bw']
- myIfaceDict["model"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['model']
- myIfaceDict["mac"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['mac']
- myIfaceDict["port_security"] = \
- bridgeInterfacesDict[iface['VNFC']][iface['local_iface_name']]['port_security']
- myIfaceDict["floating_ip"] = \
- bridgeInterfacesDict[iface['VNFC']][iface['local_iface_name']]['floating_ip']
- created_time_iface = bridgeInterfacesDict[iface['VNFC']][iface['local_iface_name']]['created_time']
- #print "Iface name: %s" % iface['name']
- iface_id = self._new_row_internal('interfaces', myIfaceDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time_iface)
- #print "Iface id in NFVO DB: %s" % iface_id
-
- return vnf_id
-
- except (mdb.Error, AttributeError) as e:
- self._format_error(e, tries)
+ created_time = time.time()
+ myVNFDict = {}
+ myVNFDict["name"] = vnf_name
+ myVNFDict["descriptor"] = vnf_descriptor['vnf'].get('descriptor')
+ myVNFDict["public"] = vnf_descriptor['vnf'].get('public', "false")
+ myVNFDict["description"] = vnf_descriptor['vnf']['description']
+ myVNFDict["class"] = vnf_descriptor['vnf'].get('class',"MISC")
+ myVNFDict["tenant_id"] = vnf_descriptor['vnf'].get("tenant_id")
+
+ vnf_id = self._new_row_internal('vnfs', myVNFDict, add_uuid=True, root_uuid=None, created_time=created_time)
+ #print "Adding new vms to the NFVO database"
+ #For each vm, we must create the appropriate vm in the NFVO database.
+ vmDict = {}
+ for _,vm in VNFCDict.iteritems():
+ #This code could make the name of the vms grow and grow.
+ #If we agree to follow this convention, we should check with a regex that the vnfc name is not including yet the vnf name
+ #vm['name'] = "%s-%s" % (vnf_name,vm['name'])
+ #print "VM name: %s. Description: %s" % (vm['name'], vm['description'])
+ vm["vnf_id"] = vnf_id
+ created_time += 0.00001
+ vm_id = self._new_row_internal('vms', vm, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
+ #print "Internal vm id in NFVO DB: %s" % vm_id
+ vmDict[vm['name']] = vm_id
+
+ #Collect the bridge interfaces of each VM/VNFC under the 'bridge-ifaces' field
+ bridgeInterfacesDict = {}
+ for vm in vnf_descriptor['vnf']['VNFC']:
+ if 'bridge-ifaces' in vm:
+ bridgeInterfacesDict[vm['name']] = {}
+ for bridgeiface in vm['bridge-ifaces']:
+ created_time += 0.00001
+ db_base._convert_bandwidth(bridgeiface, logger=self.logger)
+ if 'port-security' in bridgeiface:
+ bridgeiface['port_security'] = bridgeiface.pop('port-security')
+ if 'floating-ip' in bridgeiface:
+ bridgeiface['floating_ip'] = bridgeiface.pop('floating-ip')
+ ifaceDict = {}
+ ifaceDict['vpci'] = bridgeiface.get('vpci',None)
+ ifaceDict['mac'] = bridgeiface.get('mac_address',None)
+ ifaceDict['bw'] = bridgeiface.get('bandwidth', None)
+ ifaceDict['model'] = bridgeiface.get('model', None)
+ ifaceDict['port_security'] = int(bridgeiface.get('port_security', True))
+ ifaceDict['floating_ip'] = int(bridgeiface.get('floating_ip', False))
+ ifaceDict['created_time'] = created_time
+ bridgeInterfacesDict[vm['name']][bridgeiface['name']] = ifaceDict
+
+ # Collect the data interfaces of each VM/VNFC under the 'numas' field
+ dataifacesDict = {}
+ for vm in vnf_descriptor['vnf']['VNFC']:
+ dataifacesDict[vm['name']] = {}
+ for numa in vm.get('numas', []):
+ for dataiface in numa.get('interfaces', []):
+ created_time += 0.00001
+ db_base._convert_bandwidth(dataiface, logger=self.logger)
+ ifaceDict = {}
+ ifaceDict['vpci'] = dataiface.get('vpci')
+ ifaceDict['bw'] = dataiface['bandwidth']
+ ifaceDict['model'] = "PF" if dataiface['dedicated'] == "yes" else \
+ ("VF" if dataiface['dedicated'] == "no" else "VFnotShared")
+ ifaceDict['created_time'] = created_time
+ dataifacesDict[vm['name']][dataiface['name']] = ifaceDict
+
+ #For each internal connection, we add it to the interfaceDict and we create the appropriate net in the NFVO database.
+ #print "Adding new nets (VNF internal nets) to the NFVO database (if any)"
+ if 'internal-connections' in vnf_descriptor['vnf']:
+ for net in vnf_descriptor['vnf']['internal-connections']:
+ #print "Net name: %s. Description: %s" % (net['name'], net['description'])
+
+ myNetDict = {}
+ myNetDict["name"] = net['name']
+ myNetDict["description"] = net['description']
+ if (net["implementation"] == "overlay"):
+ net["type"] = "bridge"
+ #It should give an error if the type is e-line. For the moment, we consider it as a bridge
+ elif (net["implementation"] == "underlay"):
+ if (net["type"] == "e-line"):
+ net["type"] = "ptp"
+ elif (net["type"] == "e-lan"):
+ net["type"] = "data"
+ net.pop("implementation")
+ myNetDict["type"] = net['type']
+ myNetDict["vnf_id"] = vnf_id
+
+ created_time += 0.00001
+ net_id = self._new_row_internal('nets', myNetDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
+
+ if "ip-profile" in net:
+ ip_profile = net["ip-profile"]
+ myIPProfileDict = {}
+ myIPProfileDict["net_id"] = net_id
+ myIPProfileDict["ip_version"] = ip_profile.get('ip-version',"IPv4")
+ myIPProfileDict["subnet_address"] = ip_profile.get('subnet-address',None)
+ myIPProfileDict["gateway_address"] = ip_profile.get('gateway-address',None)
+ myIPProfileDict["dns_address"] = ip_profile.get('dns-address',None)
+ if ("dhcp" in ip_profile):
+ myIPProfileDict["dhcp_enabled"] = ip_profile["dhcp"].get('enabled',"true")
+ myIPProfileDict["dhcp_start_address"] = ip_profile["dhcp"].get('start-address',None)
+ myIPProfileDict["dhcp_count"] = ip_profile["dhcp"].get('count',None)
+
+ created_time += 0.00001
+ ip_profile_id = self._new_row_internal('ip_profiles', myIPProfileDict)
+
+ for element in net['elements']:
+ ifaceItem = {}
+ #ifaceItem["internal_name"] = "%s-%s-%s" % (net['name'],element['VNFC'], element['local_iface_name'])
+ ifaceItem["internal_name"] = element['local_iface_name']
+ #ifaceItem["vm_id"] = vmDict["%s-%s" % (vnf_name,element['VNFC'])]
+ ifaceItem["vm_id"] = vmDict[element['VNFC']]
+ ifaceItem["net_id"] = net_id
+ ifaceItem["type"] = net['type']
+ ifaceItem["ip_address"] = element.get('ip_address',None)
+ if ifaceItem ["type"] == "data":
+ ifaceDict = dataifacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
+ ifaceItem["vpci"] = ifaceDict['vpci']
+ ifaceItem["bw"] = ifaceDict['bw']
+ ifaceItem["model"] = ifaceDict['model']
+ else:
+ ifaceDict = bridgeInterfacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
+ ifaceItem["vpci"] = ifaceDict['vpci']
+ ifaceItem["mac"] = ifaceDict['mac']
+ ifaceItem["bw"] = ifaceDict['bw']
+ ifaceItem["model"] = ifaceDict['model']
+ ifaceItem["port_security"] = ifaceDict['port_security']
+ ifaceItem["floating_ip"] = ifaceDict['floating_ip']
+ created_time_iface = ifaceDict["created_time"]
+ #print "Iface name: %s" % iface['internal_name']
+ iface_id = self._new_row_internal('interfaces', ifaceItem, add_uuid=True, root_uuid=vnf_id, created_time=created_time_iface)
+ #print "Iface id in NFVO DB: %s" % iface_id
+
+ #print "Adding external interfaces to the NFVO database"
+ for iface in vnf_descriptor['vnf']['external-connections']:
+ myIfaceDict = {}
+ #myIfaceDict["internal_name"] = "%s-%s-%s" % (vnf_name,iface['VNFC'], iface['local_iface_name'])
+ myIfaceDict["internal_name"] = iface['local_iface_name']
+ #myIfaceDict["vm_id"] = vmDict["%s-%s" % (vnf_name,iface['VNFC'])]
+ myIfaceDict["vm_id"] = vmDict[iface['VNFC']]
+ myIfaceDict["external_name"] = iface['name']
+ myIfaceDict["type"] = iface['type']
+ if iface["type"] == "data":
+ myIfaceDict["vpci"] = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['vpci']
+ myIfaceDict["bw"] = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['bw']
+ myIfaceDict["model"] = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['model']
+ created_time_iface = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['created_time']
+ else:
+ myIfaceDict["vpci"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['vpci']
+ myIfaceDict["bw"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['bw']
+ myIfaceDict["model"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['model']
+ myIfaceDict["mac"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['mac']
+ myIfaceDict["port_security"] = \
+ bridgeInterfacesDict[iface['VNFC']][iface['local_iface_name']]['port_security']
+ myIfaceDict["floating_ip"] = \
+ bridgeInterfacesDict[iface['VNFC']][iface['local_iface_name']]['floating_ip']
+ created_time_iface = bridgeInterfacesDict[iface['VNFC']][iface['local_iface_name']]['created_time']
+ #print "Iface name: %s" % iface['name']
+ iface_id = self._new_row_internal('interfaces', myIfaceDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time_iface)
+ #print "Iface id in NFVO DB: %s" % iface_id
+
+ return vnf_id
+
# except KeyError as e2:
# exc_type, exc_obj, exc_tb = sys.exc_info()
# fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
# self.logger.debug("Exception type: %s; Filename: %s; Line number: %s", exc_type, fname, exc_tb.tb_lineno)
# raise KeyError
- tries -= 1
+ @retry
+ @with_transaction
def new_scenario(self, scenario_dict):
- tries = 2
- while tries:
- created_time = time.time()
- try:
- with self.con:
- self.cur = self.con.cursor()
- tenant_id = scenario_dict.get('tenant_id')
- #scenario
- INSERT_={'tenant_id': tenant_id,
- 'name': scenario_dict['name'],
- 'description': scenario_dict['description'],
- 'public': scenario_dict.get('public', "false")}
-
- scenario_uuid = self._new_row_internal('scenarios', INSERT_, add_uuid=True, root_uuid=None, created_time=created_time)
- #sce_nets
- for net in scenario_dict['nets'].values():
- net_dict={'scenario_id': scenario_uuid}
- net_dict["name"] = net["name"]
- net_dict["type"] = net["type"]
- net_dict["description"] = net.get("description")
- net_dict["external"] = net.get("external", False)
- if "graph" in net:
- #net["graph"]=yaml.safe_dump(net["graph"],default_flow_style=True,width=256)
- #TODO, must be json because of the GUI, change to yaml
- net_dict["graph"]=json.dumps(net["graph"])
- created_time += 0.00001
- net_uuid = self._new_row_internal('sce_nets', net_dict, add_uuid=True, root_uuid=scenario_uuid, created_time=created_time)
- net['uuid']=net_uuid
-
- if net.get("ip-profile"):
- ip_profile = net["ip-profile"]
- myIPProfileDict = {
- "sce_net_id": net_uuid,
- "ip_version": ip_profile.get('ip-version', "IPv4"),
- "subnet_address": ip_profile.get('subnet-address'),
- "gateway_address": ip_profile.get('gateway-address'),
- "dns_address": ip_profile.get('dns-address')}
- if "dhcp" in ip_profile:
- myIPProfileDict["dhcp_enabled"] = ip_profile["dhcp"].get('enabled', "true")
- myIPProfileDict["dhcp_start_address"] = ip_profile["dhcp"].get('start-address')
- myIPProfileDict["dhcp_count"] = ip_profile["dhcp"].get('count')
- self._new_row_internal('ip_profiles', myIPProfileDict)
-
- # sce_vnfs
- for k, vnf in scenario_dict['vnfs'].items():
- INSERT_ = {'scenario_id': scenario_uuid,
- 'name': k,
- 'vnf_id': vnf['uuid'],
- # 'description': scenario_dict['name']
- 'description': vnf['description']}
- if "graph" in vnf:
- #I NSERT_["graph"]=yaml.safe_dump(vnf["graph"],default_flow_style=True,width=256)
- # TODO, must be json because of the GUI, change to yaml
- INSERT_["graph"] = json.dumps(vnf["graph"])
- created_time += 0.00001
- scn_vnf_uuid = self._new_row_internal('sce_vnfs', INSERT_, add_uuid=True,
- root_uuid=scenario_uuid, created_time=created_time)
- vnf['scn_vnf_uuid']=scn_vnf_uuid
- # sce_interfaces
- for iface in vnf['ifaces'].values():
- # print 'iface', iface
- if 'net_key' not in iface:
- continue
- iface['net_id'] = scenario_dict['nets'][ iface['net_key'] ]['uuid']
- INSERT_={'sce_vnf_id': scn_vnf_uuid,
- 'sce_net_id': iface['net_id'],
- 'interface_id': iface['uuid'],
- 'ip_address': iface.get('ip_address')}
- created_time += 0.00001
- iface_uuid = self._new_row_internal('sce_interfaces', INSERT_, add_uuid=True,
- root_uuid=scenario_uuid, created_time=created_time)
-
- return scenario_uuid
-
- except (mdb.Error, AttributeError) as e:
- self._format_error(e, tries)
- tries -= 1
-
+ created_time = time.time()
+ tenant_id = scenario_dict.get('tenant_id')
+ #scenario
+ INSERT_={'tenant_id': tenant_id,
+ 'name': scenario_dict['name'],
+ 'description': scenario_dict['description'],
+ 'public': scenario_dict.get('public', "false")}
+
+ scenario_uuid = self._new_row_internal('scenarios', INSERT_, add_uuid=True, root_uuid=None, created_time=created_time)
+ #sce_nets
+ for net in scenario_dict['nets'].values():
+ net_dict={'scenario_id': scenario_uuid}
+ net_dict["name"] = net["name"]
+ net_dict["type"] = net["type"]
+ net_dict["description"] = net.get("description")
+ net_dict["external"] = net.get("external", False)
+ if "graph" in net:
+ #net["graph"]=yaml.safe_dump(net["graph"],default_flow_style=True,width=256)
+ #TODO, must be json because of the GUI, change to yaml
+ net_dict["graph"]=json.dumps(net["graph"])
+ created_time += 0.00001
+ net_uuid = self._new_row_internal('sce_nets', net_dict, add_uuid=True, root_uuid=scenario_uuid, created_time=created_time)
+ net['uuid']=net_uuid
+
+ if net.get("ip-profile"):
+ ip_profile = net["ip-profile"]
+ myIPProfileDict = {
+ "sce_net_id": net_uuid,
+ "ip_version": ip_profile.get('ip-version', "IPv4"),
+ "subnet_address": ip_profile.get('subnet-address'),
+ "gateway_address": ip_profile.get('gateway-address'),
+ "dns_address": ip_profile.get('dns-address')}
+ if "dhcp" in ip_profile:
+ myIPProfileDict["dhcp_enabled"] = ip_profile["dhcp"].get('enabled', "true")
+ myIPProfileDict["dhcp_start_address"] = ip_profile["dhcp"].get('start-address')
+ myIPProfileDict["dhcp_count"] = ip_profile["dhcp"].get('count')
+ self._new_row_internal('ip_profiles', myIPProfileDict)
+
+ # sce_vnfs
+ for k, vnf in scenario_dict['vnfs'].items():
+ INSERT_ = {'scenario_id': scenario_uuid,
+ 'name': k,
+ 'vnf_id': vnf['uuid'],
+ # 'description': scenario_dict['name']
+ 'description': vnf['description']}
+ if "graph" in vnf:
+ #I NSERT_["graph"]=yaml.safe_dump(vnf["graph"],default_flow_style=True,width=256)
+ # TODO, must be json because of the GUI, change to yaml
+ INSERT_["graph"] = json.dumps(vnf["graph"])
+ created_time += 0.00001
+ scn_vnf_uuid = self._new_row_internal('sce_vnfs', INSERT_, add_uuid=True,
+ root_uuid=scenario_uuid, created_time=created_time)
+ vnf['scn_vnf_uuid']=scn_vnf_uuid
+ # sce_interfaces
+ for iface in vnf['ifaces'].values():
+ # print 'iface', iface
+ if 'net_key' not in iface:
+ continue
+ iface['net_id'] = scenario_dict['nets'][ iface['net_key'] ]['uuid']
+ INSERT_={'sce_vnf_id': scn_vnf_uuid,
+ 'sce_net_id': iface['net_id'],
+ 'interface_id': iface['uuid'],
+ 'ip_address': iface.get('ip_address')}
+ created_time += 0.00001
+ iface_uuid = self._new_row_internal('sce_interfaces', INSERT_, add_uuid=True,
+ root_uuid=scenario_uuid, created_time=created_time)
+
+ return scenario_uuid
+
+ @retry
+ @with_transaction
def edit_scenario(self, scenario_dict):
- tries = 2
- while tries:
- modified_time = time.time()
- item_changed=0
- try:
- with self.con:
- self.cur = self.con.cursor()
- #check that scenario exist
- tenant_id = scenario_dict.get('tenant_id')
- scenario_uuid = scenario_dict['uuid']
-
- where_text = "uuid='{}'".format(scenario_uuid)
- if not tenant_id and tenant_id != "any":
- where_text += " AND (tenant_id='{}' OR public='True')".format(tenant_id)
- cmd = "SELECT * FROM scenarios WHERE "+ where_text
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- self.cur.fetchall()
- if self.cur.rowcount==0:
- raise db_base.db_base_Exception("No scenario found with this criteria " + where_text, httperrors.Bad_Request)
- elif self.cur.rowcount>1:
- raise db_base.db_base_Exception("More than one scenario found with this criteria " + where_text, httperrors.Bad_Request)
-
- #scenario
- nodes = {}
- topology = scenario_dict.pop("topology", None)
- if topology != None and "nodes" in topology:
- nodes = topology.get("nodes",{})
- UPDATE_ = {}
- if "name" in scenario_dict: UPDATE_["name"] = scenario_dict["name"]
- if "description" in scenario_dict: UPDATE_["description"] = scenario_dict["description"]
- if len(UPDATE_)>0:
- WHERE_={'tenant_id': tenant_id, 'uuid': scenario_uuid}
- item_changed += self._update_rows('scenarios', UPDATE_, WHERE_, modified_time=modified_time)
- #sce_nets
- for node_id, node in nodes.items():
- if "graph" in node:
- #node["graph"] = yaml.safe_dump(node["graph"],default_flow_style=True,width=256)
- #TODO, must be json because of the GUI, change to yaml
- node["graph"] = json.dumps(node["graph"])
- WHERE_={'scenario_id': scenario_uuid, 'uuid': node_id}
- #Try to change at sce_nets(version 0 API backward compatibility and sce_vnfs)
- item_changed += self._update_rows('sce_nets', node, WHERE_)
- item_changed += self._update_rows('sce_vnfs', node, WHERE_, modified_time=modified_time)
- return item_changed
-
- except (mdb.Error, AttributeError) as e:
- self._format_error(e, tries)
- tries -= 1
+ modified_time = time.time()
+ item_changed=0
+ #check that scenario exist
+ tenant_id = scenario_dict.get('tenant_id')
+ scenario_uuid = scenario_dict['uuid']
+
+ where_text = "uuid='{}'".format(scenario_uuid)
+ if not tenant_id and tenant_id != "any":
+ where_text += " AND (tenant_id='{}' OR public='True')".format(tenant_id)
+ cmd = "SELECT * FROM scenarios WHERE "+ where_text
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ self.cur.fetchall()
+ if self.cur.rowcount==0:
+ raise db_base.db_base_Exception("No scenario found with this criteria " + where_text, httperrors.Bad_Request)
+ elif self.cur.rowcount>1:
+ raise db_base.db_base_Exception("More than one scenario found with this criteria " + where_text, httperrors.Bad_Request)
+
+ #scenario
+ nodes = {}
+ topology = scenario_dict.pop("topology", None)
+ if topology != None and "nodes" in topology:
+ nodes = topology.get("nodes",{})
+ UPDATE_ = {}
+ if "name" in scenario_dict: UPDATE_["name"] = scenario_dict["name"]
+ if "description" in scenario_dict: UPDATE_["description"] = scenario_dict["description"]
+ if len(UPDATE_)>0:
+ WHERE_={'tenant_id': tenant_id, 'uuid': scenario_uuid}
+ item_changed += self._update_rows('scenarios', UPDATE_, WHERE_, modified_time=modified_time)
+ #sce_nets
+ for node_id, node in nodes.items():
+ if "graph" in node:
+ #node["graph"] = yaml.safe_dump(node["graph"],default_flow_style=True,width=256)
+ #TODO, must be json because of the GUI, change to yaml
+ node["graph"] = json.dumps(node["graph"])
+ WHERE_={'scenario_id': scenario_uuid, 'uuid': node_id}
+ #Try to change at sce_nets(version 0 API backward compatibility and sce_vnfs)
+ item_changed += self._update_rows('sce_nets', node, WHERE_)
+ item_changed += self._update_rows('sce_vnfs', node, WHERE_, modified_time=modified_time)
+ return item_changed
# def get_instance_scenario(self, instance_scenario_id, tenant_id=None):
-# '''Obtain the scenario instance information, filtering by one or serveral of the tenant, uuid or name
+# '''Obtain the scenario instance information, filtering by one or several of the tenant, uuid or name
# instance_scenario_id is the uuid or the name if it is not a valid uuid format
# Only one scenario isntance must mutch the filtering or an error is returned
# '''
# print "1******************************************************************"
# try:
-# with self.con:
-# self.cur = self.con.cursor(mdb.cursors.DictCursor)
+# with self.transaction(mdb.cursors.DictCursor):
# #scenario table
# where_list=[]
# if tenant_id is not None: where_list.append( "tenant_id='" + tenant_id +"'" )
# print "nfvo_db.get_instance_scenario DB Exception %d: %s" % (e.args[0], e.args[1])
# return self._format_error(e)
+ @retry
+ @with_transaction(cursor='dict')
def get_scenario(self, scenario_id, tenant_id=None, datacenter_vim_id=None, datacenter_id=None):
- '''Obtain the scenario information, filtering by one or serveral of the tenant, uuid or name
+ '''Obtain the scenario information, filtering by one or several of the tenant, uuid or name
scenario_id is the uuid or the name if it is not a valid uuid format
if datacenter_vim_id,d datacenter_id is provided, it supply aditional vim_id fields with the matching vim uuid
Only one scenario must mutch the filtering or an error is returned
'''
- tries = 2
- while tries:
- try:
- with self.con:
- self.cur = self.con.cursor(mdb.cursors.DictCursor)
- where_text = "uuid='{}'".format(scenario_id)
- if not tenant_id and tenant_id != "any":
- where_text += " AND (tenant_id='{}' OR public='True')".format(tenant_id)
- cmd = "SELECT * FROM scenarios WHERE " + where_text
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- rows = self.cur.fetchall()
- if self.cur.rowcount==0:
- raise db_base.db_base_Exception("No scenario found with this criteria " + where_text, httperrors.Bad_Request)
- elif self.cur.rowcount>1:
- raise db_base.db_base_Exception("More than one scenario found with this criteria " + where_text, httperrors.Bad_Request)
- scenario_dict = rows[0]
- if scenario_dict["cloud_config"]:
- scenario_dict["cloud-config"] = yaml.load(scenario_dict["cloud_config"])
- del scenario_dict["cloud_config"]
- # sce_vnfs
- cmd = "SELECT uuid,name,member_vnf_index,vnf_id,description FROM sce_vnfs WHERE scenario_id='{}' "\
- "ORDER BY created_at".format(scenario_dict['uuid'])
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- scenario_dict['vnfs'] = self.cur.fetchall()
-
- for vnf in scenario_dict['vnfs']:
- cmd = "SELECT mgmt_access FROM vnfs WHERE uuid='{}'".format(scenario_dict['vnfs'][0]['vnf_id'])
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- mgmt_access_dict = self.cur.fetchall()
- if mgmt_access_dict[0].get('mgmt_access'):
- vnf['mgmt_access'] = yaml.load(mgmt_access_dict[0]['mgmt_access'])
- else:
- vnf['mgmt_access'] = None
- # sce_interfaces
- cmd = "SELECT scei.uuid,scei.sce_net_id,scei.interface_id,i.external_name,scei.ip_address"\
- " FROM sce_interfaces as scei join interfaces as i on scei.interface_id=i.uuid"\
- " WHERE scei.sce_vnf_id='{}' ORDER BY scei.created_at".format(vnf['uuid'])
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- vnf['interfaces'] = self.cur.fetchall()
- # vms
- cmd = "SELECT vms.uuid as uuid, flavor_id, image_id, image_list, vms.name as name," \
- " vms.description as description, vms.boot_data as boot_data, count," \
- " vms.availability_zone as availability_zone, vms.osm_id as osm_id, vms.pdu_type" \
- " FROM vnfs join vms on vnfs.uuid=vms.vnf_id" \
- " WHERE vnfs.uuid='" + vnf['vnf_id'] + "'" \
- " ORDER BY vms.created_at"
+ where_text = "uuid='{}'".format(scenario_id)
+ if not tenant_id and tenant_id != "any":
+ where_text += " AND (tenant_id='{}' OR public='True')".format(tenant_id)
+ cmd = "SELECT * FROM scenarios WHERE " + where_text
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+ if self.cur.rowcount==0:
+ raise db_base.db_base_Exception("No scenario found with this criteria " + where_text, httperrors.Bad_Request)
+ elif self.cur.rowcount>1:
+ raise db_base.db_base_Exception("More than one scenario found with this criteria " + where_text, httperrors.Bad_Request)
+ scenario_dict = rows[0]
+ if scenario_dict["cloud_config"]:
+ scenario_dict["cloud-config"] = yaml.load(scenario_dict["cloud_config"])
+ del scenario_dict["cloud_config"]
+ # sce_vnfs
+ cmd = "SELECT uuid,name,member_vnf_index,vnf_id,description FROM sce_vnfs WHERE scenario_id='{}' "\
+ "ORDER BY created_at".format(scenario_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ scenario_dict['vnfs'] = self.cur.fetchall()
+
+ for vnf in scenario_dict['vnfs']:
+ cmd = "SELECT mgmt_access FROM vnfs WHERE uuid='{}'".format(scenario_dict['vnfs'][0]['vnf_id'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ mgmt_access_dict = self.cur.fetchall()
+ if mgmt_access_dict[0].get('mgmt_access'):
+ vnf['mgmt_access'] = yaml.load(mgmt_access_dict[0]['mgmt_access'])
+ else:
+ vnf['mgmt_access'] = None
+ # sce_interfaces
+ cmd = "SELECT scei.uuid,scei.sce_net_id,scei.interface_id,i.external_name,scei.ip_address"\
+ " FROM sce_interfaces as scei join interfaces as i on scei.interface_id=i.uuid"\
+ " WHERE scei.sce_vnf_id='{}' ORDER BY scei.created_at".format(vnf['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ vnf['interfaces'] = self.cur.fetchall()
+ # vms
+ cmd = "SELECT vms.uuid as uuid, flavor_id, image_id, image_list, vms.name as name," \
+ " vms.description as description, vms.boot_data as boot_data, count," \
+ " vms.availability_zone as availability_zone, vms.osm_id as osm_id, vms.pdu_type" \
+ " FROM vnfs join vms on vnfs.uuid=vms.vnf_id" \
+ " WHERE vnfs.uuid='" + vnf['vnf_id'] + "'" \
+ " ORDER BY vms.created_at"
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ vnf['vms'] = self.cur.fetchall()
+ for vm in vnf['vms']:
+ if vm["boot_data"]:
+ vm["boot_data"] = yaml.safe_load(vm["boot_data"])
+ else:
+ del vm["boot_data"]
+ if vm["image_list"]:
+ vm["image_list"] = yaml.safe_load(vm["image_list"])
+ else:
+ del vm["image_list"]
+ if datacenter_vim_id!=None:
+ if vm['image_id']:
+ cmd = "SELECT vim_id FROM datacenters_images WHERE image_id='{}' AND " \
+ "datacenter_vim_id='{}'".format(vm['image_id'], datacenter_vim_id)
self.logger.debug(cmd)
self.cur.execute(cmd)
- vnf['vms'] = self.cur.fetchall()
- for vm in vnf['vms']:
- if vm["boot_data"]:
- vm["boot_data"] = yaml.safe_load(vm["boot_data"])
- else:
- del vm["boot_data"]
- if vm["image_list"]:
- vm["image_list"] = yaml.safe_load(vm["image_list"])
- else:
- del vm["image_list"]
- if datacenter_vim_id!=None:
- cmd = "SELECT vim_id FROM datacenters_images WHERE image_id='{}' AND datacenter_vim_id='{}'".format(vm['image_id'],datacenter_vim_id)
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- if self.cur.rowcount==1:
- vim_image_dict = self.cur.fetchone()
- vm['vim_image_id']=vim_image_dict['vim_id']
- cmd = "SELECT vim_id FROM datacenters_flavors WHERE flavor_id='{}' AND datacenter_vim_id='{}'".format(vm['flavor_id'],datacenter_vim_id)
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- if self.cur.rowcount==1:
- vim_flavor_dict = self.cur.fetchone()
- vm['vim_flavor_id']=vim_flavor_dict['vim_id']
-
- #interfaces
- cmd = "SELECT uuid,internal_name,external_name,net_id,type,vpci,mac,bw,model,ip_address," \
- "floating_ip, port_security" \
- " FROM interfaces" \
- " WHERE vm_id='{}'" \
- " ORDER BY created_at".format(vm['uuid'])
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- vm['interfaces'] = self.cur.fetchall()
- for iface in vm['interfaces']:
- iface['port-security'] = iface.pop("port_security")
- iface['floating-ip'] = iface.pop("floating_ip")
- for sce_interface in vnf["interfaces"]:
- if sce_interface["interface_id"] == iface["uuid"]:
- if sce_interface["ip_address"]:
- iface["ip_address"] = sce_interface["ip_address"]
- break
- #nets every net of a vms
- cmd = "SELECT uuid,name,type,description, osm_id FROM nets WHERE vnf_id='{}'".format(vnf['vnf_id'])
+ if self.cur.rowcount==1:
+ vim_image_dict = self.cur.fetchone()
+ vm['vim_image_id']=vim_image_dict['vim_id']
+ if vm['flavor_id']:
+ cmd = "SELECT vim_id FROM datacenters_flavors WHERE flavor_id='{}' AND " \
+ "datacenter_vim_id='{}'".format(vm['flavor_id'], datacenter_vim_id)
self.logger.debug(cmd)
self.cur.execute(cmd)
- vnf['nets'] = self.cur.fetchall()
- for vnf_net in vnf['nets']:
- SELECT_ = "ip_version,subnet_address,gateway_address,dns_address,dhcp_enabled,dhcp_start_address,dhcp_count"
- cmd = "SELECT {} FROM ip_profiles WHERE net_id='{}'".format(SELECT_,vnf_net['uuid'])
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- ipprofiles = self.cur.fetchall()
- if self.cur.rowcount==1:
- vnf_net["ip_profile"] = ipprofiles[0]
- elif self.cur.rowcount>1:
- raise db_base.db_base_Exception("More than one ip-profile found with this criteria: net_id='{}'".format(vnf_net['uuid']), httperrors.Bad_Request)
-
- #sce_nets
- cmd = "SELECT uuid,name,type,external,description,vim_network_name, osm_id" \
- " FROM sce_nets WHERE scenario_id='{}'" \
- " ORDER BY created_at ".format(scenario_dict['uuid'])
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- scenario_dict['nets'] = self.cur.fetchall()
- #datacenter_nets
- for net in scenario_dict['nets']:
- if str(net['external']) == 'false':
- SELECT_ = "ip_version,subnet_address,gateway_address,dns_address,dhcp_enabled,dhcp_start_address,dhcp_count"
- cmd = "SELECT {} FROM ip_profiles WHERE sce_net_id='{}'".format(SELECT_,net['uuid'])
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- ipprofiles = self.cur.fetchall()
- if self.cur.rowcount==1:
- net["ip_profile"] = ipprofiles[0]
- elif self.cur.rowcount>1:
- raise db_base.db_base_Exception("More than one ip-profile found with this criteria: sce_net_id='{}'".format(net['uuid']), httperrors.Bad_Request)
- continue
- WHERE_=" WHERE name='{}'".format(net['name'])
- if datacenter_id!=None:
- WHERE_ += " AND datacenter_id='{}'".format(datacenter_id)
- cmd = "SELECT vim_net_id FROM datacenter_nets" + WHERE_
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- d_net = self.cur.fetchone()
- if d_net==None or datacenter_vim_id==None:
- #print "nfvo_db.get_scenario() WARNING external net %s not found" % net['name']
- net['vim_id']=None
- else:
- net['vim_id']=d_net['vim_net_id']
-
- db_base._convert_datetime2str(scenario_dict)
- db_base._convert_str2boolean(scenario_dict, ('public','shared','external','port-security','floating-ip') )
-
- #forwarding graphs
- cmd = "SELECT uuid,name,description,vendor FROM sce_vnffgs WHERE scenario_id='{}' "\
- "ORDER BY created_at".format(scenario_dict['uuid'])
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- scenario_dict['vnffgs'] = self.cur.fetchall()
- for vnffg in scenario_dict['vnffgs']:
- cmd = "SELECT uuid,name FROM sce_rsps WHERE sce_vnffg_id='{}' "\
- "ORDER BY created_at".format(vnffg['uuid'])
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- vnffg['rsps'] = self.cur.fetchall()
- for rsp in vnffg['rsps']:
- cmd = "SELECT uuid,if_order,interface_id,sce_vnf_id FROM sce_rsp_hops WHERE sce_rsp_id='{}' "\
- "ORDER BY created_at".format(rsp['uuid'])
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- rsp['connection_points'] = self.cur.fetchall();
- cmd = "SELECT uuid,name,sce_vnf_id,interface_id FROM sce_classifiers WHERE sce_vnffg_id='{}' "\
- "AND sce_rsp_id='{}' ORDER BY created_at".format(vnffg['uuid'], rsp['uuid'])
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- rsp['classifier'] = self.cur.fetchone();
- cmd = "SELECT uuid,ip_proto,source_ip,destination_ip,source_port,destination_port FROM sce_classifier_matches "\
- "WHERE sce_classifier_id='{}' ORDER BY created_at".format(rsp['classifier']['uuid'])
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- rsp['classifier']['matches'] = self.cur.fetchall()
-
- return scenario_dict
- except (mdb.Error, AttributeError) as e:
- self._format_error(e, tries)
- tries -= 1
-
+ if self.cur.rowcount==1:
+ vim_flavor_dict = self.cur.fetchone()
+ vm['vim_flavor_id']=vim_flavor_dict['vim_id']
+
+ #interfaces
+ cmd = "SELECT uuid,internal_name,external_name,net_id,type,vpci,mac,bw,model,ip_address," \
+ "floating_ip, port_security" \
+ " FROM interfaces" \
+ " WHERE vm_id='{}'" \
+ " ORDER BY created_at".format(vm['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ vm['interfaces'] = self.cur.fetchall()
+ for iface in vm['interfaces']:
+ iface['port-security'] = iface.pop("port_security")
+ iface['floating-ip'] = iface.pop("floating_ip")
+ for sce_interface in vnf["interfaces"]:
+ if sce_interface["interface_id"] == iface["uuid"]:
+ if sce_interface["ip_address"]:
+ iface["ip_address"] = sce_interface["ip_address"]
+ break
+ #nets every net of a vms
+ cmd = "SELECT uuid,name,type,description, osm_id FROM nets WHERE vnf_id='{}'".format(vnf['vnf_id'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ vnf['nets'] = self.cur.fetchall()
+ for vnf_net in vnf['nets']:
+ SELECT_ = "ip_version,subnet_address,gateway_address,dns_address,dhcp_enabled,dhcp_start_address,dhcp_count"
+ cmd = "SELECT {} FROM ip_profiles WHERE net_id='{}'".format(SELECT_,vnf_net['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ ipprofiles = self.cur.fetchall()
+ if self.cur.rowcount==1:
+ vnf_net["ip_profile"] = ipprofiles[0]
+ elif self.cur.rowcount>1:
+ raise db_base.db_base_Exception("More than one ip-profile found with this criteria: net_id='{}'".format(vnf_net['uuid']), httperrors.Bad_Request)
+
+ #sce_nets
+ cmd = "SELECT uuid,name,type,external,description,vim_network_name, osm_id" \
+ " FROM sce_nets WHERE scenario_id='{}'" \
+ " ORDER BY created_at ".format(scenario_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ scenario_dict['nets'] = self.cur.fetchall()
+ #datacenter_nets
+ for net in scenario_dict['nets']:
+ if str(net['external']) == 'false':
+ SELECT_ = "ip_version,subnet_address,gateway_address,dns_address,dhcp_enabled,dhcp_start_address,dhcp_count"
+ cmd = "SELECT {} FROM ip_profiles WHERE sce_net_id='{}'".format(SELECT_,net['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ ipprofiles = self.cur.fetchall()
+ if self.cur.rowcount==1:
+ net["ip_profile"] = ipprofiles[0]
+ elif self.cur.rowcount>1:
+ raise db_base.db_base_Exception("More than one ip-profile found with this criteria: sce_net_id='{}'".format(net['uuid']), httperrors.Bad_Request)
+ continue
+ WHERE_=" WHERE name='{}'".format(net['name'])
+ if datacenter_id!=None:
+ WHERE_ += " AND datacenter_id='{}'".format(datacenter_id)
+ cmd = "SELECT vim_net_id FROM datacenter_nets" + WHERE_
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ d_net = self.cur.fetchone()
+ if d_net==None or datacenter_vim_id==None:
+ #print "nfvo_db.get_scenario() WARNING external net %s not found" % net['name']
+ net['vim_id']=None
+ else:
+ net['vim_id']=d_net['vim_net_id']
+
+ db_base._convert_datetime2str(scenario_dict)
+ db_base._convert_str2boolean(scenario_dict, ('public','shared','external','port-security','floating-ip') )
+
+ #forwarding graphs
+ cmd = "SELECT uuid,name,description,vendor FROM sce_vnffgs WHERE scenario_id='{}' "\
+ "ORDER BY created_at".format(scenario_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ scenario_dict['vnffgs'] = self.cur.fetchall()
+ for vnffg in scenario_dict['vnffgs']:
+ cmd = "SELECT uuid,name FROM sce_rsps WHERE sce_vnffg_id='{}' "\
+ "ORDER BY created_at".format(vnffg['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ vnffg['rsps'] = self.cur.fetchall()
+ for rsp in vnffg['rsps']:
+ cmd = "SELECT uuid,if_order,ingress_interface_id,egress_interface_id,sce_vnf_id " \
+ "FROM sce_rsp_hops WHERE sce_rsp_id='{}' "\
+ "ORDER BY created_at".format(rsp['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rsp['connection_points'] = self.cur.fetchall();
+ cmd = "SELECT uuid,name,sce_vnf_id,interface_id FROM sce_classifiers WHERE sce_vnffg_id='{}' "\
+ "AND sce_rsp_id='{}' ORDER BY created_at".format(vnffg['uuid'], rsp['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rsp['classifier'] = self.cur.fetchone();
+ cmd = "SELECT uuid,ip_proto,source_ip,destination_ip,source_port,destination_port FROM sce_classifier_matches "\
+ "WHERE sce_classifier_id='{}' ORDER BY created_at".format(rsp['classifier']['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rsp['classifier']['matches'] = self.cur.fetchall()
+
+ return scenario_dict
+
+ @retry(command="delete", extra="instances running")
+ @with_transaction(cursor='dict')
def delete_scenario(self, scenario_id, tenant_id=None):
'''Deletes a scenario, filtering by one or several of the tenant, uuid or name
scenario_id is the uuid or the name if it is not a valid uuid format
Only one scenario must mutch the filtering or an error is returned
'''
- tries = 2
- while tries:
- try:
- with self.con:
- self.cur = self.con.cursor(mdb.cursors.DictCursor)
-
- #scenario table
- where_text = "uuid='{}'".format(scenario_id)
- if not tenant_id and tenant_id != "any":
- where_text += " AND (tenant_id='{}' OR public='True')".format(tenant_id)
- cmd = "SELECT * FROM scenarios WHERE "+ where_text
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- rows = self.cur.fetchall()
- if self.cur.rowcount==0:
- raise db_base.db_base_Exception("No scenario found where " + where_text, httperrors.Not_Found)
- elif self.cur.rowcount>1:
- raise db_base.db_base_Exception("More than one scenario found where " + where_text, httperrors.Conflict)
- scenario_uuid = rows[0]["uuid"]
- scenario_name = rows[0]["name"]
-
- #sce_vnfs
- cmd = "DELETE FROM scenarios WHERE uuid='{}'".format(scenario_uuid)
- self.logger.debug(cmd)
- self.cur.execute(cmd)
-
- return scenario_uuid + " " + scenario_name
- except (mdb.Error, AttributeError) as e:
- self._format_error(e, tries, "delete", "instances running")
- tries -= 1
-
- def new_rows(self, tables, uuid_list=None, confidential_data=False):
+ #scenario table
+ where_text = "uuid='{}'".format(scenario_id)
+ if not tenant_id and tenant_id != "any":
+ where_text += " AND (tenant_id='{}' OR public='True')".format(tenant_id)
+ cmd = "SELECT * FROM scenarios WHERE "+ where_text
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+ if self.cur.rowcount==0:
+ raise db_base.db_base_Exception("No scenario found where " + where_text, httperrors.Not_Found)
+ elif self.cur.rowcount>1:
+ raise db_base.db_base_Exception("More than one scenario found where " + where_text, httperrors.Conflict)
+ scenario_uuid = rows[0]["uuid"]
+ scenario_name = rows[0]["name"]
+
+ #sce_vnfs
+ cmd = "DELETE FROM scenarios WHERE uuid='{}'".format(scenario_uuid)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+
+ return scenario_uuid + " " + scenario_name
+
+ @retry
+ @with_transaction
+ def new_rows(self, tables, uuid_list=None, confidential_data=False, attempt=_ATTEMPT):
"""
Make a transactional insertion of rows at several tables. Can be also a deletion
:param tables: list with dictionary where the keys are the table names and the values are a row or row list
:param uuid_list: list of created uuids, first one is the root (#TODO to store at uuid table)
:return: None if success, raise exception otherwise
"""
- tries = 2
table_name = None
- while tries:
- created_time = time.time()
- try:
- with self.con:
- self.cur = self.con.cursor()
- for table in tables:
- for table_name, row_list in table.items():
- index = 0
- if isinstance(row_list, dict):
- row_list = (row_list, ) #create a list with the single value
- for row in row_list:
- if "TO-DELETE" in row:
- self._delete_row_by_id_internal(table_name, row["TO-DELETE"])
- continue
-
- if table_name in self.tables_with_created_field:
- if "created_at" in row:
- created_time_param = created_time + (index + row.pop("created_at"))*0.00001
- else:
- created_time_param = created_time + index*0.00001
- index += 1
- else:
- created_time_param = 0
- self._new_row_internal(table_name, row, add_uuid=False, root_uuid=None,
- confidential_data=confidential_data,
- created_time=created_time_param)
- return
- except (mdb.Error, AttributeError) as e:
- self._format_error(e, tries, table=table_name)
- tries -= 1
+ created_time = time.time()
+ for table in tables:
+ for table_name, row_list in table.items():
+ index = 0
+ attempt.info['table'] = table_name
+ if isinstance(row_list, dict):
+ row_list = (row_list, ) #create a list with the single value
+ for row in row_list:
+ if "TO-DELETE" in row:
+ self._delete_row_by_id_internal(table_name, row["TO-DELETE"])
+ continue
+ if table_name in self.tables_with_created_field:
+ if "created_at" in row:
+ created_time_param = created_time + (index + row.pop("created_at"))*0.00001
+ else:
+ created_time_param = created_time + index*0.00001
+ index += 1
+ else:
+ created_time_param = 0
+ self._new_row_internal(table_name, row, add_uuid=False, root_uuid=None,
+ confidential_data=confidential_data,
+ created_time=created_time_param)
+ @retry
+ @with_transaction
def new_instance_scenario_as_a_whole(self,tenant_id,instance_scenario_name,instance_scenario_description,scenarioDict):
- tries = 2
- while tries:
- created_time = time.time()
- try:
- with self.con:
- self.cur = self.con.cursor()
- #instance_scenarios
- datacenter_id = scenarioDict['datacenter_id']
- INSERT_={'tenant_id': tenant_id,
- 'datacenter_tenant_id': scenarioDict["datacenter2tenant"][datacenter_id],
- 'name': instance_scenario_name,
- 'description': instance_scenario_description,
- 'scenario_id' : scenarioDict['uuid'],
- 'datacenter_id': datacenter_id
- }
- if scenarioDict.get("cloud-config"):
- INSERT_["cloud_config"] = yaml.safe_dump(scenarioDict["cloud-config"], default_flow_style=True, width=256)
-
- instance_uuid = self._new_row_internal('instance_scenarios', INSERT_, add_uuid=True, root_uuid=None, created_time=created_time)
-
- net_scene2instance={}
- #instance_nets #nets interVNF
- for net in scenarioDict['nets']:
- net_scene2instance[ net['uuid'] ] ={}
- datacenter_site_id = net.get('datacenter_id', datacenter_id)
- if not "vim_id_sites" in net:
- net["vim_id_sites"] ={datacenter_site_id: net['vim_id']}
- net["vim_id_sites"]["datacenter_site_id"] = {datacenter_site_id: net['vim_id']}
- sce_net_id = net.get("uuid")
-
- for datacenter_site_id,vim_id in net["vim_id_sites"].iteritems():
- INSERT_={'vim_net_id': vim_id, 'created': net.get('created', False), 'instance_scenario_id':instance_uuid } #, 'type': net['type']
- INSERT_['datacenter_id'] = datacenter_site_id
- INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_site_id]
- if not net.get('created', False):
- INSERT_['status'] = "ACTIVE"
- if sce_net_id:
- INSERT_['sce_net_id'] = sce_net_id
- created_time += 0.00001
- instance_net_uuid = self._new_row_internal('instance_nets', INSERT_, True, instance_uuid, created_time)
- net_scene2instance[ sce_net_id ][datacenter_site_id] = instance_net_uuid
- net['uuid'] = instance_net_uuid #overwrite scnario uuid by instance uuid
-
- if 'ip_profile' in net:
- net['ip_profile']['net_id'] = None
- net['ip_profile']['sce_net_id'] = None
- net['ip_profile']['instance_net_id'] = instance_net_uuid
- created_time += 0.00001
- ip_profile_id = self._new_row_internal('ip_profiles', net['ip_profile'])
-
- #instance_vnfs
- for vnf in scenarioDict['vnfs']:
- datacenter_site_id = vnf.get('datacenter_id', datacenter_id)
- INSERT_={'instance_scenario_id': instance_uuid, 'vnf_id': vnf['vnf_id'] }
- INSERT_['datacenter_id'] = datacenter_site_id
- INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_site_id]
- if vnf.get("uuid"):
- INSERT_['sce_vnf_id'] = vnf['uuid']
- created_time += 0.00001
- instance_vnf_uuid = self._new_row_internal('instance_vnfs', INSERT_, True, instance_uuid, created_time)
- vnf['uuid'] = instance_vnf_uuid #overwrite scnario uuid by instance uuid
-
- #instance_nets #nets intraVNF
- for net in vnf['nets']:
- net_scene2instance[ net['uuid'] ] = {}
- INSERT_={'vim_net_id': net['vim_id'], 'created': net.get('created', False), 'instance_scenario_id':instance_uuid } #, 'type': net['type']
- INSERT_['datacenter_id'] = net.get('datacenter_id', datacenter_site_id)
- INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_id]
- if net.get("uuid"):
- INSERT_['net_id'] = net['uuid']
- created_time += 0.00001
- instance_net_uuid = self._new_row_internal('instance_nets', INSERT_, True, instance_uuid, created_time)
- net_scene2instance[ net['uuid'] ][datacenter_site_id] = instance_net_uuid
- net['uuid'] = instance_net_uuid #overwrite scnario uuid by instance uuid
-
- if 'ip_profile' in net:
- net['ip_profile']['net_id'] = None
- net['ip_profile']['sce_net_id'] = None
- net['ip_profile']['instance_net_id'] = instance_net_uuid
- created_time += 0.00001
- ip_profile_id = self._new_row_internal('ip_profiles', net['ip_profile'])
-
- #instance_vms
- for vm in vnf['vms']:
- INSERT_={'instance_vnf_id': instance_vnf_uuid, 'vm_id': vm['uuid'], 'vim_vm_id': vm['vim_id'] }
- created_time += 0.00001
- instance_vm_uuid = self._new_row_internal('instance_vms', INSERT_, True, instance_uuid, created_time)
- vm['uuid'] = instance_vm_uuid #overwrite scnario uuid by instance uuid
-
- #instance_interfaces
- for interface in vm['interfaces']:
- net_id = interface.get('net_id', None)
- if net_id is None:
- #check if is connected to a inter VNFs net
- for iface in vnf['interfaces']:
- if iface['interface_id'] == interface['uuid']:
- if 'ip_address' in iface:
- interface['ip_address'] = iface['ip_address']
- net_id = iface.get('sce_net_id', None)
- break
- if net_id is None:
- continue
- interface_type='external' if interface['external_name'] is not None else 'internal'
- INSERT_={'instance_vm_id': instance_vm_uuid, 'instance_net_id': net_scene2instance[net_id][datacenter_site_id],
- 'interface_id': interface['uuid'], 'vim_interface_id': interface.get('vim_id'), 'type': interface_type,
- 'ip_address': interface.get('ip_address'), 'floating_ip': int(interface.get('floating-ip',False)),
- 'port_security': int(interface.get('port-security',True))}
- #created_time += 0.00001
- interface_uuid = self._new_row_internal('instance_interfaces', INSERT_, True, instance_uuid) #, created_time)
- interface['uuid'] = interface_uuid #overwrite scnario uuid by instance uuid
- return instance_uuid
- except (mdb.Error, AttributeError) as e:
- self._format_error(e, tries)
- tries -= 1
-
+ created_time = time.time()
+ #instance_scenarios
+ datacenter_id = scenarioDict['datacenter_id']
+ INSERT_={'tenant_id': tenant_id,
+ 'datacenter_tenant_id': scenarioDict["datacenter2tenant"][datacenter_id],
+ 'name': instance_scenario_name,
+ 'description': instance_scenario_description,
+ 'scenario_id' : scenarioDict['uuid'],
+ 'datacenter_id': datacenter_id
+ }
+ if scenarioDict.get("cloud-config"):
+ INSERT_["cloud_config"] = yaml.safe_dump(scenarioDict["cloud-config"], default_flow_style=True, width=256)
+
+ instance_uuid = self._new_row_internal('instance_scenarios', INSERT_, add_uuid=True, root_uuid=None, created_time=created_time)
+
+ net_scene2instance={}
+ #instance_nets #nets interVNF
+ for net in scenarioDict['nets']:
+ net_scene2instance[ net['uuid'] ] ={}
+ datacenter_site_id = net.get('datacenter_id', datacenter_id)
+ if not "vim_id_sites" in net:
+ net["vim_id_sites"] ={datacenter_site_id: net['vim_id']}
+ net["vim_id_sites"]["datacenter_site_id"] = {datacenter_site_id: net['vim_id']}
+ sce_net_id = net.get("uuid")
+
+ for datacenter_site_id,vim_id in net["vim_id_sites"].iteritems():
+ INSERT_={'vim_net_id': vim_id, 'created': net.get('created', False), 'instance_scenario_id':instance_uuid } #, 'type': net['type']
+ INSERT_['datacenter_id'] = datacenter_site_id
+ INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_site_id]
+ if not net.get('created', False):
+ INSERT_['status'] = "ACTIVE"
+ if sce_net_id:
+ INSERT_['sce_net_id'] = sce_net_id
+ created_time += 0.00001
+ instance_net_uuid = self._new_row_internal('instance_nets', INSERT_, True, instance_uuid, created_time)
+ net_scene2instance[ sce_net_id ][datacenter_site_id] = instance_net_uuid
+ net['uuid'] = instance_net_uuid #overwrite scnario uuid by instance uuid
+
+ if 'ip_profile' in net:
+ net['ip_profile']['net_id'] = None
+ net['ip_profile']['sce_net_id'] = None
+ net['ip_profile']['instance_net_id'] = instance_net_uuid
+ created_time += 0.00001
+ ip_profile_id = self._new_row_internal('ip_profiles', net['ip_profile'])
+
+ #instance_vnfs
+ for vnf in scenarioDict['vnfs']:
+ datacenter_site_id = vnf.get('datacenter_id', datacenter_id)
+ INSERT_={'instance_scenario_id': instance_uuid, 'vnf_id': vnf['vnf_id'] }
+ INSERT_['datacenter_id'] = datacenter_site_id
+ INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_site_id]
+ if vnf.get("uuid"):
+ INSERT_['sce_vnf_id'] = vnf['uuid']
+ created_time += 0.00001
+ instance_vnf_uuid = self._new_row_internal('instance_vnfs', INSERT_, True, instance_uuid, created_time)
+ vnf['uuid'] = instance_vnf_uuid #overwrite scnario uuid by instance uuid
+
+ #instance_nets #nets intraVNF
+ for net in vnf['nets']:
+ net_scene2instance[ net['uuid'] ] = {}
+ INSERT_={'vim_net_id': net['vim_id'], 'created': net.get('created', False), 'instance_scenario_id':instance_uuid } #, 'type': net['type']
+ INSERT_['datacenter_id'] = net.get('datacenter_id', datacenter_site_id)
+ INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_id]
+ if net.get("uuid"):
+ INSERT_['net_id'] = net['uuid']
+ created_time += 0.00001
+ instance_net_uuid = self._new_row_internal('instance_nets', INSERT_, True, instance_uuid, created_time)
+ net_scene2instance[ net['uuid'] ][datacenter_site_id] = instance_net_uuid
+ net['uuid'] = instance_net_uuid #overwrite scnario uuid by instance uuid
+
+ if 'ip_profile' in net:
+ net['ip_profile']['net_id'] = None
+ net['ip_profile']['sce_net_id'] = None
+ net['ip_profile']['instance_net_id'] = instance_net_uuid
+ created_time += 0.00001
+ ip_profile_id = self._new_row_internal('ip_profiles', net['ip_profile'])
+
+ #instance_vms
+ for vm in vnf['vms']:
+ INSERT_={'instance_vnf_id': instance_vnf_uuid, 'vm_id': vm['uuid'], 'vim_vm_id': vm['vim_id'] }
+ created_time += 0.00001
+ instance_vm_uuid = self._new_row_internal('instance_vms', INSERT_, True, instance_uuid, created_time)
+ vm['uuid'] = instance_vm_uuid #overwrite scnario uuid by instance uuid
+
+ #instance_interfaces
+ for interface in vm['interfaces']:
+ net_id = interface.get('net_id', None)
+ if net_id is None:
+ #check if is connected to a inter VNFs net
+ for iface in vnf['interfaces']:
+ if iface['interface_id'] == interface['uuid']:
+ if 'ip_address' in iface:
+ interface['ip_address'] = iface['ip_address']
+ net_id = iface.get('sce_net_id', None)
+ break
+ if net_id is None:
+ continue
+ interface_type='external' if interface['external_name'] is not None else 'internal'
+ INSERT_={'instance_vm_id': instance_vm_uuid, 'instance_net_id': net_scene2instance[net_id][datacenter_site_id],
+ 'interface_id': interface['uuid'], 'vim_interface_id': interface.get('vim_id'), 'type': interface_type,
+ 'ip_address': interface.get('ip_address'), 'floating_ip': int(interface.get('floating-ip',False)),
+ 'port_security': int(interface.get('port-security',True))}
+ #created_time += 0.00001
+ interface_uuid = self._new_row_internal('instance_interfaces', INSERT_, True, instance_uuid) #, created_time)
+ interface['uuid'] = interface_uuid #overwrite scnario uuid by instance uuid
+ return instance_uuid
+
+ @retry
+ @with_transaction(cursor='dict')
def get_instance_scenario(self, instance_id, tenant_id=None, verbose=False):
'''Obtain the instance information, filtering by one or several of the tenant, uuid or name
instance_id is the uuid or the name if it is not a valid uuid format
Only one instance must mutch the filtering or an error is returned
'''
- tries = 2
- while tries:
- try:
- with self.con:
- self.cur = self.con.cursor(mdb.cursors.DictCursor)
- # instance table
- where_list = []
- if tenant_id:
- where_list.append("inst.tenant_id='{}'".format(tenant_id))
- if db_base._check_valid_uuid(instance_id):
- where_list.append("inst.uuid='{}'".format(instance_id))
- else:
- where_list.append("inst.name='{}'".format(instance_id))
- where_text = " AND ".join(where_list)
- cmd = "SELECT inst.uuid as uuid, inst.name as name, inst.scenario_id as scenario_id, datacenter_id"\
- " ,datacenter_tenant_id, s.name as scenario_name,inst.tenant_id as tenant_id" \
- " ,inst.description as description, inst.created_at as created_at" \
- " ,inst.cloud_config as cloud_config, s.osm_id as nsd_osm_id" \
- " FROM instance_scenarios as inst left join scenarios as s on inst.scenario_id=s.uuid" \
- " WHERE " + where_text
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- rows = self.cur.fetchall()
-
- if self.cur.rowcount == 0:
- raise db_base.db_base_Exception("No instance found where " + where_text, httperrors.Not_Found)
- elif self.cur.rowcount > 1:
- raise db_base.db_base_Exception("More than one instance found where " + where_text,
- httperrors.Bad_Request)
- instance_dict = rows[0]
- if instance_dict["cloud_config"]:
- instance_dict["cloud-config"] = yaml.load(instance_dict["cloud_config"])
- del instance_dict["cloud_config"]
-
- # instance_vnfs
- cmd = "SELECT iv.uuid as uuid, iv.vnf_id as vnf_id, sv.name as vnf_name, sce_vnf_id, datacenter_id"\
- ", datacenter_tenant_id, v.mgmt_access, sv.member_vnf_index, v.osm_id as vnfd_osm_id "\
- "FROM instance_vnfs as iv left join sce_vnfs as sv "\
- " on iv.sce_vnf_id=sv.uuid join vnfs as v on iv.vnf_id=v.uuid " \
- "WHERE iv.instance_scenario_id='{}' " \
- "ORDER BY iv.created_at ".format(instance_dict['uuid'])
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- instance_dict['vnfs'] = self.cur.fetchall()
- for vnf in instance_dict['vnfs']:
- vnf["ip_address"] = None
- vnf_mgmt_access_iface = None
- vnf_mgmt_access_vm = None
- if vnf["mgmt_access"]:
- vnf_mgmt_access = yaml.load(vnf["mgmt_access"])
- vnf_mgmt_access_iface = vnf_mgmt_access.get("interface_id")
- vnf_mgmt_access_vm = vnf_mgmt_access.get("vm_id")
- vnf["ip_address"] = vnf_mgmt_access.get("ip-address")
-
- # instance vms
- cmd = "SELECT iv.uuid as uuid, vim_vm_id, status, error_msg, vim_info, iv.created_at as "\
- "created_at, name, vms.osm_id as vdu_osm_id, vim_name, vms.uuid as vm_uuid"\
- " FROM instance_vms as iv join vms on iv.vm_id=vms.uuid "\
- " WHERE instance_vnf_id='{}' ORDER BY iv.created_at".format(vnf['uuid'])
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- vnf['vms'] = self.cur.fetchall()
- for vm in vnf['vms']:
- vm_manage_iface_list=[]
- # instance_interfaces
- cmd = "SELECT vim_interface_id, instance_net_id, internal_name,external_name, mac_address,"\
- " ii.ip_address as ip_address, vim_info, i.type as type, sdn_port_id, i.uuid"\
- " FROM instance_interfaces as ii join interfaces as i on ii.interface_id=i.uuid"\
- " WHERE instance_vm_id='{}' ORDER BY created_at".format(vm['uuid'])
- self.logger.debug(cmd)
- self.cur.execute(cmd )
- vm['interfaces'] = self.cur.fetchall()
- for iface in vm['interfaces']:
- if vnf_mgmt_access_iface and vnf_mgmt_access_iface == iface["uuid"]:
- if not vnf["ip_address"]:
- vnf["ip_address"] = iface["ip_address"]
- if iface["type"] == "mgmt" and iface["ip_address"]:
- vm_manage_iface_list.append(iface["ip_address"])
- if not verbose:
- del iface["type"]
- del iface["uuid"]
- if vm_manage_iface_list:
- vm["ip_address"] = ",".join(vm_manage_iface_list)
- if not vnf["ip_address"] and vnf_mgmt_access_vm == vm["vm_uuid"]:
- vnf["ip_address"] = vm["ip_address"]
- del vm["vm_uuid"]
-
- #instance_nets
- #select_text = "instance_nets.uuid as uuid,sce_nets.name as net_name,instance_nets.vim_net_id as net_id,instance_nets.status as status,instance_nets.external as external"
- #from_text = "instance_nets join instance_scenarios on instance_nets.instance_scenario_id=instance_scenarios.uuid " + \
- # "join sce_nets on instance_scenarios.scenario_id=sce_nets.scenario_id"
- #where_text = "instance_nets.instance_scenario_id='"+ instance_dict['uuid'] + "'"
- cmd = "SELECT inets.uuid as uuid,vim_net_id,status,error_msg,vim_info,created, sce_net_id, " \
- "net_id as vnf_net_id, datacenter_id, datacenter_tenant_id, sdn_net_id, " \
- "snets.osm_id as ns_net_osm_id, nets.osm_id as vnf_net_osm_id, inets.vim_name " \
- "FROM instance_nets as inets left join sce_nets as snets on inets.sce_net_id=snets.uuid " \
- "left join nets on inets.net_id=nets.uuid " \
- "WHERE instance_scenario_id='{}' ORDER BY inets.created_at".format(instance_dict['uuid'])
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- instance_dict['nets'] = self.cur.fetchall()
-
- #instance_sfps
- cmd = "SELECT uuid,vim_sfp_id,sce_rsp_id,datacenter_id,"\
- "datacenter_tenant_id,status,error_msg,vim_info"\
- " FROM instance_sfps" \
- " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- instance_dict['sfps'] = self.cur.fetchall()
-
- # for sfp in instance_dict['sfps']:
- #instance_sfs
- cmd = "SELECT uuid,vim_sf_id,sce_rsp_hop_id,datacenter_id,"\
- "datacenter_tenant_id,status,error_msg,vim_info"\
- " FROM instance_sfs" \
- " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid']) # TODO: replace instance_scenario_id with instance_sfp_id
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- instance_dict['sfs'] = self.cur.fetchall()
-
- #for sf in instance_dict['sfs']:
- #instance_sfis
- cmd = "SELECT uuid,vim_sfi_id,sce_rsp_hop_id,datacenter_id,"\
- "datacenter_tenant_id,status,error_msg,vim_info"\
- " FROM instance_sfis" \
- " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid']) # TODO: replace instance_scenario_id with instance_sf_id
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- instance_dict['sfis'] = self.cur.fetchall()
+ # instance table
+ where_list = []
+ if tenant_id:
+ where_list.append("inst.tenant_id='{}'".format(tenant_id))
+ if db_base._check_valid_uuid(instance_id):
+ where_list.append("inst.uuid='{}'".format(instance_id))
+ else:
+ where_list.append("inst.name='{}'".format(instance_id))
+ where_text = " AND ".join(where_list)
+ cmd = "SELECT inst.uuid as uuid, inst.name as name, inst.scenario_id as scenario_id, datacenter_id"\
+ " ,datacenter_tenant_id, s.name as scenario_name,inst.tenant_id as tenant_id" \
+ " ,inst.description as description, inst.created_at as created_at" \
+ " ,inst.cloud_config as cloud_config, s.osm_id as nsd_osm_id" \
+ " FROM instance_scenarios as inst left join scenarios as s on inst.scenario_id=s.uuid" \
+ " WHERE " + where_text
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+
+ if self.cur.rowcount == 0:
+ raise db_base.db_base_Exception("No instance found where " + where_text, httperrors.Not_Found)
+ elif self.cur.rowcount > 1:
+ raise db_base.db_base_Exception("More than one instance found where " + where_text,
+ httperrors.Bad_Request)
+ instance_dict = rows[0]
+ if instance_dict["cloud_config"]:
+ instance_dict["cloud-config"] = yaml.load(instance_dict["cloud_config"])
+ del instance_dict["cloud_config"]
+
+ # instance_vnfs
+ cmd = "SELECT iv.uuid as uuid, iv.vnf_id as vnf_id, sv.name as vnf_name, sce_vnf_id, datacenter_id"\
+ ", datacenter_tenant_id, v.mgmt_access, sv.member_vnf_index, v.osm_id as vnfd_osm_id "\
+ "FROM instance_vnfs as iv left join sce_vnfs as sv "\
+ " on iv.sce_vnf_id=sv.uuid join vnfs as v on iv.vnf_id=v.uuid " \
+ "WHERE iv.instance_scenario_id='{}' " \
+ "ORDER BY iv.created_at ".format(instance_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['vnfs'] = self.cur.fetchall()
+ for vnf in instance_dict['vnfs']:
+ vnf["ip_address"] = None
+ vnf_mgmt_access_iface = None
+ vnf_mgmt_access_vm = None
+ if vnf["mgmt_access"]:
+ vnf_mgmt_access = yaml.load(vnf["mgmt_access"])
+ vnf_mgmt_access_iface = vnf_mgmt_access.get("interface_id")
+ vnf_mgmt_access_vm = vnf_mgmt_access.get("vm_id")
+ vnf["ip_address"] = vnf_mgmt_access.get("ip-address")
+
+ # instance vms
+ cmd = "SELECT iv.uuid as uuid, vim_vm_id, status, error_msg, vim_info, iv.created_at as "\
+ "created_at, name, vms.osm_id as vdu_osm_id, vim_name, vms.uuid as vm_uuid, related"\
+ " FROM instance_vms as iv join vms on iv.vm_id=vms.uuid "\
+ " WHERE instance_vnf_id='{}' ORDER BY iv.created_at".format(vnf['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ vnf['vms'] = self.cur.fetchall()
+ for vm in vnf['vms']:
+ vm_manage_iface_list=[]
+ # instance_interfaces
+ cmd = "SELECT vim_interface_id, instance_net_id, internal_name,external_name, mac_address,"\
+ " ii.ip_address as ip_address, vim_info, i.type as type, sdn_port_id, i.uuid"\
+ " FROM instance_interfaces as ii join interfaces as i on ii.interface_id=i.uuid"\
+ " WHERE instance_vm_id='{}' ORDER BY created_at".format(vm['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd )
+ vm['interfaces'] = self.cur.fetchall()
+ for iface in vm['interfaces']:
+ if vnf_mgmt_access_iface and vnf_mgmt_access_iface == iface["uuid"]:
+ if not vnf["ip_address"]:
+ vnf["ip_address"] = iface["ip_address"]
+ if iface["type"] == "mgmt" and iface["ip_address"]:
+ vm_manage_iface_list.append(iface["ip_address"])
+ if not verbose:
+ del iface["type"]
+ del iface["uuid"]
+ if vm_manage_iface_list:
+ vm["ip_address"] = ",".join(vm_manage_iface_list)
+ if not vnf["ip_address"] and vnf_mgmt_access_vm == vm["vm_uuid"]:
+ vnf["ip_address"] = vm["ip_address"]
+ del vm["vm_uuid"]
+
+ #instance_nets
+ #select_text = "instance_nets.uuid as uuid,sce_nets.name as net_name,instance_nets.vim_net_id as net_id,instance_nets.status as status,instance_nets.external as external"
+ #from_text = "instance_nets join instance_scenarios on instance_nets.instance_scenario_id=instance_scenarios.uuid " + \
+ # "join sce_nets on instance_scenarios.scenario_id=sce_nets.scenario_id"
+ #where_text = "instance_nets.instance_scenario_id='"+ instance_dict['uuid'] + "'"
+ cmd = "SELECT inets.uuid as uuid,vim_net_id,status,error_msg,vim_info,created, sce_net_id, " \
+ "net_id as vnf_net_id, datacenter_id, datacenter_tenant_id, sdn_net_id, " \
+ "snets.osm_id as ns_net_osm_id, nets.osm_id as vnf_net_osm_id, inets.vim_name, related " \
+ "FROM instance_nets as inets left join sce_nets as snets on inets.sce_net_id=snets.uuid " \
+ "left join nets on inets.net_id=nets.uuid " \
+ "WHERE instance_scenario_id='{}' ORDER BY inets.created_at".format(instance_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['nets'] = self.cur.fetchall()
+
+ #instance_sfps
+ cmd = "SELECT uuid,vim_sfp_id,sce_rsp_id,datacenter_id,"\
+ "datacenter_tenant_id,status,error_msg,vim_info, related"\
+ " FROM instance_sfps" \
+ " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['sfps'] = self.cur.fetchall()
+
+ # for sfp in instance_dict['sfps']:
+ #instance_sfs
+ cmd = "SELECT uuid,vim_sf_id,sce_rsp_hop_id,datacenter_id,"\
+ "datacenter_tenant_id,status,error_msg,vim_info, related"\
+ " FROM instance_sfs" \
+ " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid']) # TODO: replace instance_scenario_id with instance_sfp_id
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['sfs'] = self.cur.fetchall()
+
+ #for sf in instance_dict['sfs']:
+ #instance_sfis
+ cmd = "SELECT uuid,vim_sfi_id,sce_rsp_hop_id,datacenter_id,"\
+ "datacenter_tenant_id,status,error_msg,vim_info, related"\
+ " FROM instance_sfis" \
+ " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid']) # TODO: replace instance_scenario_id with instance_sf_id
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['sfis'] = self.cur.fetchall()
# for sfi in instance_dict['sfi']:
- #instance_classifications
- cmd = "SELECT uuid,vim_classification_id,sce_classifier_match_id,datacenter_id,"\
- "datacenter_tenant_id,status,error_msg,vim_info"\
- " FROM instance_classifications" \
- " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- instance_dict['classifications'] = self.cur.fetchall()
+ #instance_classifications
+ cmd = "SELECT uuid,vim_classification_id,sce_classifier_match_id,datacenter_id,"\
+ "datacenter_tenant_id,status,error_msg,vim_info, related"\
+ " FROM instance_classifications" \
+ " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ instance_dict['classifications'] = self.cur.fetchall()
# for classification in instance_dict['classifications']
- db_base._convert_datetime2str(instance_dict)
- db_base._convert_str2boolean(instance_dict, ('public','shared','created') )
- return instance_dict
- except (mdb.Error, AttributeError) as e:
- self._format_error(e, tries)
- tries -= 1
+ db_base._convert_datetime2str(instance_dict)
+ db_base._convert_str2boolean(instance_dict, ('public','shared','created') )
+ return instance_dict
+ @retry(command='delete', extra='No dependences can avoid deleting!!!!')
+ @with_transaction(cursor='dict')
def delete_instance_scenario(self, instance_id, tenant_id=None):
- '''Deletes a instance_Scenario, filtering by one or serveral of the tenant, uuid or name
+ '''Deletes a instance_Scenario, filtering by one or several of the tenant, uuid or name
instance_id is the uuid or the name if it is not a valid uuid format
Only one instance_scenario must mutch the filtering or an error is returned
'''
- tries = 2
- while tries:
- try:
- with self.con:
- self.cur = self.con.cursor(mdb.cursors.DictCursor)
-
- #instance table
- where_list=[]
- if tenant_id is not None: where_list.append( "tenant_id='" + tenant_id +"'" )
- if db_base._check_valid_uuid(instance_id):
- where_list.append( "uuid='" + instance_id +"'" )
- else:
- where_list.append( "name='" + instance_id +"'" )
- where_text = " AND ".join(where_list)
- cmd = "SELECT * FROM instance_scenarios WHERE "+ where_text
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- rows = self.cur.fetchall()
-
- if self.cur.rowcount==0:
- raise db_base.db_base_Exception("No instance found where " + where_text, httperrors.Bad_Request)
- elif self.cur.rowcount>1:
- raise db_base.db_base_Exception("More than one instance found where " + where_text, httperrors.Bad_Request)
- instance_uuid = rows[0]["uuid"]
- instance_name = rows[0]["name"]
-
- #sce_vnfs
- cmd = "DELETE FROM instance_scenarios WHERE uuid='{}'".format(instance_uuid)
- self.logger.debug(cmd)
- self.cur.execute(cmd)
-
- return instance_uuid + " " + instance_name
- except (mdb.Error, AttributeError) as e:
- self._format_error(e, tries, "delete", "No dependences can avoid deleting!!!!")
- tries -= 1
-
+ #instance table
+ where_list=[]
+ if tenant_id is not None: where_list.append( "tenant_id='" + tenant_id +"'" )
+ if db_base._check_valid_uuid(instance_id):
+ where_list.append( "uuid='" + instance_id +"'" )
+ else:
+ where_list.append( "name='" + instance_id +"'" )
+ where_text = " AND ".join(where_list)
+ cmd = "SELECT * FROM instance_scenarios WHERE "+ where_text
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ rows = self.cur.fetchall()
+
+ if self.cur.rowcount==0:
+ raise db_base.db_base_Exception("No instance found where " + where_text, httperrors.Bad_Request)
+ elif self.cur.rowcount>1:
+ raise db_base.db_base_Exception("More than one instance found where " + where_text, httperrors.Bad_Request)
+ instance_uuid = rows[0]["uuid"]
+ instance_name = rows[0]["name"]
+
+ #sce_vnfs
+ cmd = "DELETE FROM instance_scenarios WHERE uuid='{}'".format(instance_uuid)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+
+ return instance_uuid + " " + instance_name
+
+ @retry(table='instance_scenarios')
+ @with_transaction
def new_instance_scenario(self, instance_scenario_dict, tenant_id):
#return self.new_row('vnfs', vnf_dict, None, tenant_id, True, True)
return self._new_row_internal('instance_scenarios', instance_scenario_dict, tenant_id, add_uuid=True, root_uuid=None, log=True)
#TODO:
return
+ @retry(table='instance_vnfs')
+ @with_transaction
def new_instance_vnf(self, instance_vnf_dict, tenant_id, instance_scenario_id = None):
#return self.new_row('vms', vm_dict, tenant_id, True, True)
return self._new_row_internal('instance_vnfs', instance_vnf_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
#TODO:
return
+ @retry(table='instance_vms')
+ @with_transaction
def new_instance_vm(self, instance_vm_dict, tenant_id, instance_scenario_id = None):
#return self.new_row('vms', vm_dict, tenant_id, True, True)
return self._new_row_internal('instance_vms', instance_vm_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
#TODO:
return
+ @retry(table='instance_nets')
+ @with_transaction
def new_instance_net(self, instance_net_dict, tenant_id, instance_scenario_id = None):
return self._new_row_internal('instance_nets', instance_net_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
#TODO:
return
+ @retry(table='instance_interfaces')
+ @with_transaction
def new_instance_interface(self, instance_interface_dict, tenant_id, instance_scenario_id = None):
return self._new_row_internal('instance_interfaces', instance_interface_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
#TODO:
return
+ @retry(table='datacenter_nets')
+ @with_transaction
def update_datacenter_nets(self, datacenter_id, new_net_list=[]):
''' Removes the old and adds the new net list at datacenter list for one datacenter.
Attribute
new_net_list: the new values to be inserted. If empty it only deletes the existing nets
Return: (Inserted items, Deleted items) if OK, (-Error, text) if error
'''
- tries = 2
- while tries:
- created_time = time.time()
- try:
- with self.con:
- self.cur = self.con.cursor()
- cmd="DELETE FROM datacenter_nets WHERE datacenter_id='{}'".format(datacenter_id)
- self.logger.debug(cmd)
- self.cur.execute(cmd)
- deleted = self.cur.rowcount
- inserted = 0
- for new_net in new_net_list:
- created_time += 0.00001
- self._new_row_internal('datacenter_nets', new_net, add_uuid=True, created_time=created_time)
- inserted += 1
- return inserted, deleted
- except (mdb.Error, AttributeError) as e:
- self._format_error(e, tries)
- tries -= 1
-
-
+ created_time = time.time()
+ cmd="DELETE FROM datacenter_nets WHERE datacenter_id='{}'".format(datacenter_id)
+ self.logger.debug(cmd)
+ self.cur.execute(cmd)
+ deleted = self.cur.rowcount
+ inserted = 0
+ for new_net in new_net_list:
+ created_time += 0.00001
+ self._new_row_internal('datacenter_nets', new_net, add_uuid=True, created_time=created_time)
+ inserted += 1
+ return inserted, deleted
log_level_schema={"type":"string", "enum":["DEBUG", "INFO", "WARNING","ERROR","CRITICAL"]}
checksum_schema={"type":"string", "pattern":"^[0-9a-fA-F]{32}$"}
size_schema={"type":"integer","minimum":1,"maximum":100}
+boolean_schema = {"type": "boolean"}
+null_schema = {"type": "null"}
metadata_schema={
"type":"object",
"http_port": port_schema,
"http_admin_port": port_schema,
"http_host": nameshort_schema,
- "auto_push_VNF_to_VIMs": {"type":"boolean"},
+ "auto_push_VNF_to_VIMs": boolean_schema,
"vnf_repository": path_schema,
"db_host": nameshort_schema,
"db_user": nameshort_schema,
"vim_tenant_name": nameshort_schema,
"mano_tenant_name": nameshort_schema,
"mano_tenant_id": id_schema,
- "http_console_proxy": {"type":"boolean"},
+ "http_console_proxy": boolean_schema,
"http_console_host": nameshort_schema,
"http_console_ports": {
"type": "array",
"net": name_schema, #name or uuid of net to change
"name": name_schema,
"description": description_schema,
- "shared": {"type": "boolean"}
+ "shared": boolean_schema
},
"minProperties": 1,
"additionalProperties": False
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties":{
- "enabled": {"type": "boolean"},
- "start-address": {"OneOf": [{"type": "null"}, ip_schema]},
+ "enabled": boolean_schema,
+ "start-address": {"OneOf": [null_schema, ip_schema]},
"count": integer0_schema
},
# "required": ["start-address", "count"],
"type":"object",
"properties":{
"name": name_schema,
- "mgmt": {"type":"boolean"},
+ "mgmt": boolean_schema,
"type": {"type": "string", "enum":["e-line", "e-lan"]},
"implementation": {"type": "string", "enum":["overlay", "underlay"]},
"VNFC": name_schema,
"vpci":pci_schema,
"mac_address": mac_schema,
"model": {"type":"string", "enum":["virtio","e1000","ne2k_pci","pcnet","rtl8139", "paravirt"]},
- "port-security": {"type" : "boolean"},
- "floating-ip": {"type" : "boolean"}
+ "port-security": boolean_schema,
+ "floating-ip": boolean_schema,
},
"additionalProperties": False,
"required": ["name"]
"user-data": {"type" : "string"}, # scrip to run
"config-files": {"type": "array", "items": config_files_schema},
# NOTE: “user-data” are mutually exclusive with users and config-files because user/files are injected using user-data
- "boot-data-drive": {"type": "boolean"},
+ "boot-data-drive": boolean_schema,
},
"additionalProperties": False,
}
"description": description_schema,
"class": nameshort_schema,
- "public": {"type" : "boolean"},
- "physical": {"type" : "boolean"},
+ "public": boolean_schema,
+ "physical": boolean_schema,
"default_user": name_schema,
"tenant_id": id_schema, #only valid for admin
"external-connections": {"type" : "array", "items": external_connection_schema, "minItems":1},
"name": name_schema,
"description": description_schema,
"class": nameshort_schema,
- "public": {"type" : "boolean"},
- "physical": {"type" : "boolean"},
+ "public": boolean_schema,
+ "physical": boolean_schema,
"tenant_id": id_schema, #only valid for admin
"external-connections": {"type" : "array", "items": external_connection_schema, "minItems":1},
"internal-connections": {"type" : "array", "items": internal_connection_schema_v02, "minItems":1},
"name":name_schema,
"description": description_schema,
"tenant_id": id_schema, #only valid for admin
- "public": {"type": "boolean"},
+ "public": boolean_schema,
"topology":{
"type":"object",
"properties":{
"name": name_schema,
"description": description_schema,
"tenant_id": id_schema, #only valid for admin
- "public": {"type": "boolean"},
+ "public": boolean_schema,
"vnfs": {
"type":"object",
"patternProperties":{
"properties":{
"interfaces":{"type":"array", "minLength":1},
"type": {"type": "string", "enum":["dataplane", "bridge"]},
- "external" : {"type": "boolean"},
+ "external" : boolean_schema,
"graph": graph_schema
},
"required": ["interfaces"]
"name": name_schema,
"description": description_schema,
"tenant_id": id_schema, #only valid for admin
- "public": {"type": "boolean"},
+ "public": boolean_schema,
"cloud-config": cloud_config_schema, #common for all vnfs in the scenario
#"datacenter": name_schema,
"vnfs": {
},
"type": {"type": "string", "enum":["e-line", "e-lan"]},
"implementation": {"type": "string", "enum":["overlay", "underlay"]},
- "external" : {"type": "boolean"},
+ "external" : boolean_schema,
"graph": graph_schema,
"ip-profile": ip_profile_schema
},
"type": "object",
"properties": {
"name": name_schema,
- "external": {"type": "boolean"},
+ "external": boolean_schema,
"type": {"enum": ["bridge", "ptp", "data"]}, # for overlay, underlay E-LINE, underlay E-LAN
},
"additionalProperties": False,
"name": name_schema,
"description":description_schema,
"datacenter": name_schema,
+ "wim_account": {"oneOf": [boolean_schema, id_schema, null_schema]},
"scenario" : {"oneOff": [name_schema, instance_scenario_object]}, # can be an UUID or name or a dict
"action":{"enum": ["deploy","reserve","verify" ]},
- "connect_mgmt_interfaces": {"oneOf": [{"type":"boolean"}, {"type":"object"}]},# can be true or a dict with datacenter: net_name
+ "connect_mgmt_interfaces": {"oneOf": [boolean_schema, {"type":"object"}]},# can be true or a dict with datacenter: net_name
"cloud-config": cloud_config_schema, #common to all vnfs in the instance scenario
"vnfs":{ #mapping from scenario to datacenter
"type": "object",
".": {
"ip_address": ip_schema,
"mac_address": mac_schema,
- "floating-ip": {"type": "boolean"},
+ "floating-ip": boolean_schema,
}
}
}
"type": "object",
"properties": {
"vim-network-name": name_schema,
+ "vim-network-id": name_schema,
"ip-profile": ip_profile_schema,
"name": name_schema,
}
"properties":{
"ip_address": ip_schema,
"datacenter": name_schema,
- "vim-network-name": name_schema
+ "vim-network-name": name_schema,
+ "vim-network-id": name_schema
},
"patternProperties":{
".": {"type": "string"}
}
}
},
+ "wim_account": {"oneOf": [boolean_schema, id_schema, null_schema]},
"ip-profile": ip_profile_schema,
+ "use-network": {
+ "type": "object",
+ "properties": {
+ "instance_scenario_id": id_schema,
+ # "member_vnf_index": name_schema, # if not null, network inside VNF
+ "osm_id": name_schema, # sce_network osm_id or name
+ },
+ "additionalProperties": False,
+ "required": ["instance_scenario_id", "osm_id"]
+ },
#if the network connects VNFs deployed at different sites, you must specify one entry per site that this network connect to
"sites": {
"type":"array",
# By default for an scenario 'external' network openmano looks for an existing VIM network to map this external scenario network,
# for other networks openamno creates at VIM
# Use netmap-create to force to create an external scenario network
- "netmap-create": {"oneOf":[name_schema,{"type": "null"}]}, #datacenter network to use. Null if must be created as an internal net
+ "netmap-create": {"oneOf":[name_schema,null_schema]}, #datacenter network to use. Null if must be created as an internal net
#netmap-use: Indicates an existing VIM network that must be used for this scenario network.
#Can use both the VIM network name (if it is not ambiguous) or the VIM net UUID
#If both 'netmap-create' and 'netmap-use'are supplied, netmap-use precedes, but if fails openmano follows the netmap-create
#In oder words, it is the same as 'try to map to the VIM network (netmap-use) if exist, and if not create the network (netmap-create)
"netmap-use": name_schema, #
"vim-network-name": name_schema, #override network name
+ "vim-network-id": name_schema,
#"ip-profile": ip_profile_schema,
"datacenter": name_schema,
}
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
- "start": {"type": "null"},
- "pause": {"type": "null"},
- "resume": {"type": "null"},
- "shutoff": {"type": "null"},
- "shutdown": {"type": "null"},
- "forceOff": {"type": "null"},
- "rebuild": {"type": "null"},
+ "start": null_schema,
+ "pause": null_schema,
+ "resume": null_schema,
+ "shutoff": null_schema,
+ "shutdown": null_schema,
+ "forceOff": null_schema,
+ "rebuild": null_schema,
"reboot": {
"type": ["object", "null"],
},
"items": {
"type": "object",
"properties": {
- "pci": pci_extended_schema, # pci_schema,
+ "pci": {"OneOf": [null_schema, pci_extended_schema]}, # pci_schema,
"switch_port": nameshort_schema,
"switch_mac": mac_schema
},
--- /dev/null
+# -*- coding: utf-8 -*-
+# pylint: disable=E1101
+import unittest
+
+from MySQLdb import connect, cursors, DatabaseError, IntegrityError
+import mock
+from mock import Mock
+
+from ..db_base import retry, with_transaction
+from ..nfvo_db import nfvo_db
+from .db_helpers import TestCaseWithDatabase
+
+
+class TestDbDecorators(TestCaseWithDatabase):
+ @classmethod
+ def setUpClass(cls):
+ connection = connect(cls.host, cls.user, cls.password)
+ cursor = connection.cursor()
+ cursor.execute(
+ "CREATE DATABASE IF NOT EXISTS {};".format(
+ connection.escape_string(cls.database)))
+ cursor.execute("use {};".format(cls.database))
+ cursor.execute("""\
+ CREATE TABLE IF NOT EXISTS `test_table` (\
+ `id` int(11) NOT NULL,
+ PRIMARY KEY (`id`)\
+ );\
+ """)
+ cursor.close()
+ connection.close()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.empty_database()
+
+ def setUp(self):
+ self.maxDiff = None
+ self.db = nfvo_db(self.host, self.user, self.password, self.database)
+ self.db.connect()
+ self.addCleanup(lambda: self.db.disconnect())
+
+ def db_run(self, query, cursor=None):
+ cursor = cursor or self.db.con.cursor()
+ cursor.execute(query)
+ return cursor.fetchone()
+
+ def test_retry_inject_attempt(self):
+ @retry
+ def _fn(db, attempt=None):
+ self.assertIsNotNone(attempt)
+ self.assertEqual(attempt.number, 1)
+
+ _fn(self.db)
+
+ def test_retry_accept_max_attempts(self):
+ success = []
+ failures = []
+
+ @retry(max_attempts=5)
+ def _fn(db, attempt=None):
+ if attempt.count < 4:
+ failures.append(attempt.count)
+ raise DatabaseError("Emulate DB error", "msg")
+ success.append(attempt.count)
+
+ _fn(self.db)
+ self.assertEqual(failures, [0, 1, 2, 3])
+ self.assertEqual(success, [4])
+
+ def test_retry_reconnect_auctomatically(self):
+ success = []
+ failures = []
+
+ @retry(max_attempts=3)
+ def _fn(db, attempt=None):
+ if attempt.count < 2:
+ failures.append(attempt.count)
+ db.con.close() # Simulate connection failure
+ result = self.db_run('select 1+1, 2+2;')
+ success.append(attempt.count)
+ return result
+
+ result = _fn(self.db)
+ self.assertEqual(failures, [0, 1])
+ self.assertEqual(success, [2])
+ self.assertEqual(result, (2, 4))
+
+ def test_retry_reraise_non_db_errors(self):
+ failures = []
+
+ @retry
+ def _fn(db, attempt=None):
+ failures.append(attempt.count)
+ raise SystemError("Non Correlated Error")
+
+ with self.assertRaises(SystemError):
+ _fn(self.db)
+
+ self.assertEqual(failures, [0])
+
+ def test_transaction_rollback(self):
+ with self.assertRaises(IntegrityError), \
+ self.db.transaction() as cursor:
+ # The first row is created normally
+ self.db_run('insert into test_table (id) values (1)', cursor)
+ # The second row fails due to repeated id
+ self.db_run('insert into test_table (id) values (1)', cursor)
+ # The entire transaction will rollback then, and therefore the
+ # first operation will be undone
+
+ count = self.db_run('select count(*) FROM test_table')
+ self.assertEqual(count, (0,))
+
+ def test_transaction_cursor(self):
+ with self.db.transaction(cursors.DictCursor) as cursor:
+ count = self.db_run('select count(*) as counter FROM test_table',
+ cursor)
+
+ self.assertEqual(count, {'counter': 0})
+
+
+if __name__ == '__main__':
+ unittest.main()
--- /dev/null
+# -*- coding: utf-8 -*-
+# pylint: disable=E1101
+
+import unittest
+
+from ..utils import get_arg, inject_args
+
+
+class TestUtils(unittest.TestCase):
+ def test_inject_args_curries_arguments(self):
+ fn = inject_args(lambda a=None, b=None: a+b, a=3, b=5)
+ self.assertEqual(fn(), 8)
+
+ def test_inject_args_doesnt_add_arg_if_not_needed(self):
+ fn = inject_args(lambda: 7, a=1, b=2)
+ self.assertEqual(fn(), 7)
+ fn = inject_args(lambda a=None: a, b=2)
+ self.assertEqual(fn(1), 1)
+
+ def test_inject_args_knows_how_to_handle_arg_order(self):
+ fn = inject_args(lambda a=None, b=None: b - a, a=3)
+ self.assertEqual(fn(b=4), 1)
+ fn = inject_args(lambda b=None, a=None: b - a, a=3)
+ self.assertEqual(fn(b=4), 1)
+
+ def test_inject_args_works_as_decorator(self):
+ fn = inject_args(x=1)(lambda x=None: x)
+ self.assertEqual(fn(), 1)
+
+ def test_get_arg__positional(self):
+ def _fn(x, y, z):
+ return x + y + z
+
+ x = get_arg('x', _fn, (1, 3, 4), {})
+ self.assertEqual(x, 1)
+ y = get_arg('y', _fn, (1, 3, 4), {})
+ self.assertEqual(y, 3)
+ z = get_arg('z', _fn, (1, 3, 4), {})
+ self.assertEqual(z, 4)
+
+ def test_get_arg__keyword(self):
+ def _fn(x, y, z=5):
+ return x + y + z
+
+ z = get_arg('z', _fn, (1, 2), {'z': 3})
+ self.assertEqual(z, 3)
+
+
+if __name__ == '__main__':
+ unittest.main()
import datetime
import time
import warnings
-from functools import reduce
+from functools import reduce, partial, wraps
from itertools import tee
+import six
from six.moves import filter, filterfalse
from jsonschema import exceptions as js_e
from jsonschema import validate as js_v
+if six.PY3:
+ from inspect import getfullargspec as getspec
+else:
+ from inspect import getargspec as getspec
+
#from bs4 import BeautifulSoup
def read_file(file_to_read):
:param text:
:return:
"""
+ if text is None:
+ return (None, )
start = text.find("[")
end = text.find("]")
if start < 0 or end < 0:
keys = key_path.split('.')
target = reduce(lambda acc, key: acc.get(key) or {}, keys[:-1], target)
return target.get(keys[-1], default)
+
+
+class Attempt(object):
+ """Auxiliary class to be used in an attempt to retry executing a failing
+ procedure
+
+ Attributes:
+ count (int): 0-based "retries" counter
+ max_attempts (int): maximum number of "retries" allowed
+ info (dict): extra information about the specific attempt
+ (can be used to produce more meaningful error messages)
+ """
+ __slots__ = ('count', 'max', 'info')
+
+ MAX = 3
+
+ def __init__(self, count=0, max_attempts=MAX, info=None):
+ self.count = count
+ self.max = max_attempts
+ self.info = info or {}
+
+ @property
+ def countdown(self):
+ """Like count, but in the opposite direction"""
+ return self.max - self.count
+
+ @property
+ def number(self):
+ """1-based counter"""
+ return self.count + 1
+
+
+def inject_args(fn=None, **args):
+ """Partially apply keyword arguments in a function, but only if the function
+ define them in the first place
+ """
+ if fn is None: # Allows calling the decorator directly or with parameters
+ return partial(inject_args, **args)
+
+ spec = getspec(fn)
+ return wraps(fn)(partial(fn, **filter_dict_keys(args, spec.args)))
+
+
+def get_arg(name, fn, args, kwargs):
+ """Find the value of an argument for a function, given its argument list.
+
+ This function can be used to display more meaningful errors for debugging
+ """
+ if name in kwargs:
+ return kwargs[name]
+
+ spec = getspec(fn)
+ if name in spec.args:
+ i = spec.args.index(name)
+ return args[i] if i < len(args) else None
+
+ return None
""""
This is thread that interacts with a VIM. It processes TASKs sequentially against a single VIM.
The tasks are stored at database in table vim_wim_actions
+Several vim_wim_actions can refer to the same element at VIM (flavor, network, ...). This is somethng to avoid if RO
+is migrated to a non-relational database as mongo db. Each vim_wim_actions reference a different instance_Xxxxx
+In this case "related" colunm contains the same value, to know they refer to the same vim. In case of deletion, it
+there is related tasks using this element, it is not deleted, The vim_info needed to delete is transfered to other task
+
The task content is (M: stored at memory, D: stored at database):
MD instance_action_id: reference a global action over an instance-scenario: database instance_actions
MD task_index: index number of the task. This together with the previous forms a unique key identifier
MD datacenter_vim_id: should contain the uuid of the VIM managed by this thread
MD vim_id: id of the vm,net,etc at VIM
- MD action: CREATE, DELETE, FIND
MD item: database table name, can be instance_vms, instance_nets, TODO: datacenter_flavors, datacenter_images
MD item_id: uuid of the referenced entry in the previous table
- MD status: SCHEDULED,BUILD,DONE,FAILED,SUPERSEDED
+ MD action: CREATE, DELETE, FIND
+ MD status: SCHEDULED: action need to be done
+ BUILD: not used
+ DONE: Done and it must be polled to VIM periodically to see status. ONLY for action=CREATE or FIND
+ FAILED: It cannot be created/found/deleted
+ FINISHED: similar to DONE, but no refresh is needed anymore. Task is maintained at database but
+ it is never processed by any thread
+ SUPERSEDED: similar to FINSISHED, but nothing has been done to completed the task.
MD extra: text with yaml format at database, dict at memory with:
- params: list with the params to be sent to the VIM for CREATE or FIND. For DELETE the vim_id is taken from other related tasks
- find: (only for CREATE tasks) if present it should FIND before creating and use if existing. Contains the FIND params
- depends_on: list with the 'task_index'es of tasks that must be completed before. e.g. a vm creation depends on a net creation
+ params: list with the params to be sent to the VIM for CREATE or FIND. For DELETE the vim_id is taken
+ from other related tasks
+ find: (only for CREATE tasks) if present it should FIND before creating and use if existing. Contains
+ the FIND params
+ depends_on: list with the 'task_index'es of tasks that must be completed before. e.g. a vm creation depends
+ on a net creation
can contain an int (single index on the same instance-action) or str (compete action ID)
sdn_net_id: used for net.
- tries:
interfaces: used for VMs. Each key is the uuid of the instance_interfaces entry at database
iface_id: uuid of intance_interfaces
sdn_port_id:
sdn_net_id:
+ vim_info
created_items: dictionary with extra elements created that need to be deleted. e.g. ports, volumes,...
created: False if the VIM element is not created by other actions, and it should not be deleted
vim_status: VIM status of the element. Stored also at database in the instance_XXX
- M depends: dict with task_index(from depends_on) to task class
- M params: same as extra[params] but with the resolved dependencies
- M vim_interfaces: similar to extra[interfaces] but with VIM information. Stored at database in the instance_XXX but not at vim_wim_actions
- M vim_info: Detailed information of a vm,net from the VIM. Stored at database in the instance_XXX but not at vim_wim_actions
+ vim_info: Detailed information of a vm/net from the VIM. Stored at database in the instance_XXX but not at
+ vim_wim_actions
+ M depends: dict with task_index(from depends_on) to vim_id
+ M params: same as extra[params]
MD error_msg: descriptive text upon an error.Stored also at database instance_XXX
- MD created_at: task creation time
- MD modified_at: last task update time. On refresh it contains when this task need to be refreshed
+ MD created_at: task creation time. The task of creation must be the oldest
+ MD modified_at: next time task need to be processed. For example, for a refresh, it contain next time refresh must
+ be done
+ MD related: All the tasks over the same VIM element have same "related". Note that other VIMs can contain the
+ same value of related, but this thread only process those task of one VIM. Also related can be the
+ same among several NS os isntance-scenarios
+ MD worker: Used to lock in case of several thread workers.
"""
"vmware": vimconn_vmware,
}
+
def is_task_id(task_id):
return task_id.startswith("TASK-")
class vim_thread(threading.Thread):
- REFRESH_BUILD = 5 # 5 seconds
- REFRESH_ACTIVE = 60 # 1 minute
+ REFRESH_BUILD = 5 # 5 seconds
+ REFRESH_ACTIVE = 60 # 1 minute
+ REFRESH_ERROR = 600
+ REFRESH_DELETE = 3600 * 10
def __init__(self, task_lock, name=None, datacenter_name=None, datacenter_tenant_id=None,
db=None, db_lock=None, ovim=None):
else:
self.name = name
self.vim_persistent_info = {}
+ self.my_id = self.name[:64]
- self.logger = logging.getLogger('openmano.vim.'+self.name)
+ self.logger = logging.getLogger('openmano.vim.' + self.name)
self.db = db
self.db_lock = db_lock
self.task_lock = task_lock
self.task_queue = Queue.Queue(2000)
- self.refresh_tasks = []
- """Contains time ordered task list for refreshing the status of VIM VMs and nets"""
-
- self.pending_tasks = []
- """Contains time ordered task list for creation, deletion of VIM VMs and nets"""
-
- self.grouped_tasks = {}
- """ It contains all the creation/deletion pending tasks grouped by its concrete vm, net, etc
- <item><item_id>:
- - <task1> # e.g. CREATE task
- <task2> # e.g. DELETE task
- """
-
def get_vimconnector(self):
try:
- from_= "datacenter_tenants as dt join datacenters as d on dt.datacenter_id=d.uuid"
+ from_ = "datacenter_tenants as dt join datacenters as d on dt.datacenter_id=d.uuid"
select_ = ('type', 'd.config as config', 'd.uuid as datacenter_id', 'vim_url', 'vim_url_admin',
'd.name as datacenter_name', 'dt.uuid as datacenter_tenant_id',
'dt.vim_tenant_name as vim_tenant_name', 'dt.vim_tenant_id as vim_tenant_id',
'user', 'passwd', 'dt.config as dt_config')
where_ = {"dt.uuid": self.datacenter_tenant_id}
- with self.db_lock:
- vims = self.db.get_rows(FROM=from_, SELECT=select_, WHERE=where_)
+ vims = self.db.get_rows(FROM=from_, SELECT=select_, WHERE=where_)
vim = vims[0]
vim_config = {}
if vim["config"]:
vim_config['datacenter_tenant_id'] = vim.get('datacenter_tenant_id')
vim_config['datacenter_id'] = vim.get('datacenter_id')
+ # get port_mapping
+ with self.db_lock:
+ vim_config["wim_external_ports"] = self.ovim.get_of_port_mappings(
+ db_filter={"region": vim_config['datacenter_id'], "pci": None})
+
self.vim = vim_module[vim["type"]].vimconnector(
uuid=vim['datacenter_id'], name=vim['datacenter_name'],
tenant_id=vim['vim_tenant_id'], tenant_name=vim['vim_tenant_name'],
self.vim = None
self.error_status = "Error loading vimconnector: {}".format(e)
- def _reload_vim_actions(self):
+ def _get_db_task(self):
"""
Read actions from database and reload them at memory. Fill self.refresh_list, pending_list, vim_actions
:return: None
"""
+ now = time.time()
try:
- action_completed = False
- task_list = []
- old_action_key = None
-
- old_item_id = ""
- old_item = ""
- old_created_at = 0.0
- database_limit = 200
+ database_limit = 20
+ task_related = None
while True:
- # get 200 (database_limit) entries each time
- with self.db_lock:
- vim_actions = self.db.get_rows(FROM="vim_wim_actions",
- WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
- "item_id>=": old_item_id},
- ORDER_BY=("item_id", "item", "created_at",),
- LIMIT=database_limit)
+ # get 20 (database_limit) entries each time
+ vim_actions = self.db.get_rows(FROM="vim_wim_actions",
+ WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+ "status": ['SCHEDULED', 'BUILD', 'DONE'],
+ "worker": [None, self.my_id], "modified_at<=": now
+ },
+ ORDER_BY=("modified_at", "created_at",),
+ LIMIT=database_limit)
+ if not vim_actions:
+ return None, None
+ # if vim_actions[0]["modified_at"] > now:
+ # return int(vim_actions[0] - now)
for task in vim_actions:
- item = task["item"]
- item_id = task["item_id"]
-
- # skip the first entries that are already processed in the previous pool of 200
- if old_item_id:
- if item_id == old_item_id and item == old_item and task["created_at"] == old_created_at:
- old_item_id = False # next one will be a new un-processed task
+ # block related task
+ if task_related == task["related"]:
+ continue # ignore if a locking has already tried for these task set
+ task_related = task["related"]
+ # lock ...
+ self.db.update_rows("vim_wim_actions", UPDATE={"worker": self.my_id}, modified_time=0,
+ WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+ "status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
+ "worker": [None, self.my_id],
+ "related": task_related,
+ "item": task["item"],
+ })
+ # ... and read all related and check if locked
+ related_tasks = self.db.get_rows(FROM="vim_wim_actions",
+ WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+ "status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
+ "related": task_related,
+ "item": task["item"],
+ },
+ ORDER_BY=("created_at",))
+ # check that all related tasks have been locked. If not release and try again. It can happen
+ # for race conditions if a new related task has been inserted by nfvo in the process
+ some_tasks_locked = False
+ some_tasks_not_locked = False
+ creation_task = None
+ for relate_task in related_tasks:
+ if relate_task["worker"] != self.my_id:
+ some_tasks_not_locked = True
+ else:
+ some_tasks_locked = True
+ if not creation_task and relate_task["action"] in ("CREATE", "FIND"):
+ creation_task = relate_task
+ if some_tasks_not_locked:
+ if some_tasks_locked: # unlock
+ self.db.update_rows("vim_wim_actions", UPDATE={"worker": None}, modified_time=0,
+ WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+ "worker": self.my_id,
+ "related": task_related,
+ "item": task["item"],
+ })
continue
- action_key = item + item_id
- if old_action_key != action_key:
- if not action_completed and task_list:
- # This will fill needed task parameters into memory, and insert the task if needed in
- # self.pending_tasks or self.refresh_tasks
- try:
- self._insert_pending_tasks(task_list)
- except Exception as e:
- self.logger.critical(
- "Unexpected exception at _reload_vim_actions:_insert_pending_tasks: " + str(e),
- exc_info=True)
- task_list = []
- old_action_key = action_key
- action_completed = False
- elif action_completed:
- continue
+ # task of creation must be the first in the list of related_task
+ assert(related_tasks[0]["action"] in ("CREATE", "FIND"))
- if task["status"] == "SCHEDULED" or task["action"] == "CREATE" or task["action"] == "FIND":
- task_list.append(task)
- elif task["action"] == "DELETE":
- # action completed because deleted and status is not SCHEDULED. Not needed anything
- action_completed = True
- if len(vim_actions) == database_limit:
- # update variables for get the next database iteration
- old_item_id = item_id
- old_item = item
- old_created_at = task["created_at"]
- else:
- break
- # Last actions group need to be inserted too
- if not action_completed and task_list:
- try:
- self._insert_pending_tasks(task_list)
- except Exception as e:
- self.logger.critical("Unexpected exception at _reload_vim_actions:_insert_pending_tasks: " + str(e),
- exc_info=True)
- self.logger.debug("reloaded vim actions pending:{} refresh:{}".format(
- len(self.pending_tasks), len(self.refresh_tasks)))
+ if task["extra"]:
+ extra = yaml.load(task["extra"])
+ else:
+ extra = {}
+ task["extra"] = extra
+ if extra.get("depends_on"):
+ task["depends"] = {}
+ if extra.get("params"):
+ task["params"] = deepcopy(extra["params"])
+ return task, related_tasks
except Exception as e:
- self.logger.critical("Unexpected exception at _reload_vim_actions: " + str(e), exc_info=True)
+ self.logger.critical("Unexpected exception at _get_db_task: " + str(e), exc_info=True)
+ return None, None
- def _refres_elements(self):
- """Call VIM to get VMs and networks status until 10 elements"""
- now = time.time()
- nb_processed = 0
- vm_to_refresh_list = []
- net_to_refresh_list = []
- vm_to_refresh_dict = {}
- net_to_refresh_dict = {}
- items_to_refresh = 0
- while self.refresh_tasks:
- task = self.refresh_tasks[0]
- with self.task_lock:
- if task['status'] == 'SUPERSEDED':
- self.refresh_tasks.pop(0)
- continue
- if task['modified_at'] > now:
+ def _delete_task(self, task):
+ """
+ Determine if this task need to be done or superseded
+ :return: None
+ """
+
+ def copy_extra_created(copy_to, copy_from):
+ copy_to["created"] = copy_from["created"]
+ if copy_from.get("sdn_net_id"):
+ copy_to["sdn_net_id"] = copy_from["sdn_net_id"]
+ if copy_from.get("interfaces"):
+ copy_to["interfaces"] = copy_from["interfaces"]
+ if copy_from.get("created_items"):
+ if not copy_to.get("created_items"):
+ copy_to["created_items"] = {}
+ copy_to["created_items"].update(copy_from["created_items"])
+
+ task_create = None
+ dependency_task = None
+ deletion_needed = False
+ if task["status"] == "FAILED":
+ return # TODO need to be retry??
+ try:
+ # get all related tasks
+ related_tasks = self.db.get_rows(FROM="vim_wim_actions",
+ WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+ "status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
+ "action": ["FIND", "CREATE"],
+ "related": task["related"],
+ },
+ ORDER_BY=("created_at",),
+ )
+ for related_task in related_tasks:
+ if related_task["item"] == task["item"] and related_task["item_id"] == task["item_id"]:
+ task_create = related_task
+ # TASK_CREATE
+ if related_task["extra"]:
+ extra_created = yaml.load(related_task["extra"])
+ if extra_created.get("created"):
+ deletion_needed = True
+ related_task["extra"] = extra_created
+ elif not dependency_task:
+ dependency_task = related_task
+ if task_create and dependency_task:
break
- # task["status"] = "processing"
- nb_processed += 1
- self.refresh_tasks.pop(0)
- if task["item"] == 'instance_vms':
- if task["vim_id"] not in vm_to_refresh_dict:
- vm_to_refresh_dict[task["vim_id"]] = [task]
- vm_to_refresh_list.append(task["vim_id"])
- else:
- vm_to_refresh_dict[task["vim_id"]].append(task)
- elif task["item"] == 'instance_nets':
- if task["vim_id"] not in net_to_refresh_dict:
- net_to_refresh_dict[task["vim_id"]] = [task]
- net_to_refresh_list.append(task["vim_id"])
- else:
- net_to_refresh_dict[task["vim_id"]].append(task)
+
+ # mark task_create as FINISHED
+ self.db.update_rows("vim_wim_actions", UPDATE={"status": "FINISHED"},
+ WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+ "instance_action_id": task_create["instance_action_id"],
+ "task_index": task_create["task_index"]
+ })
+ if not deletion_needed:
+ return
+ elif dependency_task:
+ # move create information from task_create to relate_task
+ extra_new_created = yaml.load(dependency_task["extra"]) or {}
+ extra_new_created["created"] = extra_created["created"]
+ copy_extra_created(copy_to=extra_new_created, copy_from=extra_created)
+
+ self.db.update_rows("vim_wim_actions",
+ UPDATE={"extra": yaml.safe_dump(extra_new_created, default_flow_style=True,
+ width=256),
+ "vim_id": task_create.get("vim_id")},
+ WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+ "instance_action_id": dependency_task["instance_action_id"],
+ "task_index": dependency_task["task_index"]
+ })
+ return False
else:
- task_id = task["instance_action_id"] + "." + str(task["task_index"])
- self.logger.critical("task={}: unknown task {}".format(task_id, task["item"]), exc_info=True)
- items_to_refresh += 1
- if items_to_refresh == 10:
- break
-
- if vm_to_refresh_list:
- now = time.time()
- try:
- vim_dict = self.vim.refresh_vms_status(vm_to_refresh_list)
- except vimconn.vimconnException as e:
- # Mark all tasks at VIM_ERROR status
- self.logger.error("task=several get-VM: vimconnException when trying to refresh vms " + str(e))
- vim_dict = {}
- for vim_id in vm_to_refresh_list:
- vim_dict[vim_id] = {"status": "VIM_ERROR", "error_msg": str(e)}
-
- for vim_id, vim_info in vim_dict.items():
-
- # look for task
- for task in vm_to_refresh_dict[vim_id]:
- task_need_update = False
- task_id = task["instance_action_id"] + "." + str(task["task_index"])
- self.logger.debug("task={} get-VM: vim_vm_id={} result={}".format(task_id, task["vim_id"], vim_info))
-
- # check and update interfaces
- task_warning_msg = ""
- for interface in vim_info.get("interfaces", ()):
- vim_interface_id = interface["vim_interface_id"]
- if vim_interface_id not in task["extra"]["interfaces"]:
- self.logger.critical("task={} get-VM: Interface not found {} on task info {}".format(
- task_id, vim_interface_id, task["extra"]["interfaces"]), exc_info=True)
- continue
- task_interface = task["extra"]["interfaces"][vim_interface_id]
- task_vim_interface = task["vim_interfaces"].get(vim_interface_id)
- if task_vim_interface != interface:
- # delete old port
- if task_interface.get("sdn_port_id"):
- try:
- with self.db_lock:
- self.ovim.delete_port(task_interface["sdn_port_id"], idempotent=True)
- task_interface["sdn_port_id"] = None
- task_need_update = True
- except ovimException as e:
- error_text = "ovimException deleting external_port={}: {}".format(
- task_interface["sdn_port_id"], e)
- self.logger.error("task={} get-VM: {}".format(task_id, error_text), exc_info=True)
- task_warning_msg += error_text
- # TODO Set error_msg at instance_nets instead of instance VMs
-
- # Create SDN port
- sdn_net_id = task_interface.get("sdn_net_id")
- if sdn_net_id and interface.get("compute_node") and interface.get("pci"):
- sdn_port_name = sdn_net_id + "." + task["vim_id"]
- sdn_port_name = sdn_port_name[:63]
- try:
- with self.db_lock:
- sdn_port_id = self.ovim.new_external_port(
- {"compute_node": interface["compute_node"],
- "pci": interface["pci"],
- "vlan": interface.get("vlan"),
- "net_id": sdn_net_id,
- "region": self.vim["config"]["datacenter_id"],
- "name": sdn_port_name,
- "mac": interface.get("mac_address")})
- task_interface["sdn_port_id"] = sdn_port_id
- task_need_update = True
- except (ovimException, Exception) as e:
- error_text = "ovimException creating new_external_port compute_node={}"\
- " pci={} vlan={} {}".format(
- interface["compute_node"],
- interface["pci"],
- interface.get("vlan"), e)
- self.logger.error("task={} get-VM: {}".format(task_id, error_text), exc_info=True)
- task_warning_msg += error_text
- # TODO Set error_msg at instance_nets instead of instance VMs
-
- with self.db_lock:
- self.db.update_rows(
- 'instance_interfaces',
+ task["vim_id"] = task_create["vim_id"]
+ copy_extra_created(copy_to=task["extra"], copy_from=task_create["extra"])
+ return True
+
+ except Exception as e:
+ self.logger.critical("Unexpected exception at _delete_task: " + str(e), exc_info=True)
+
+ def _refres_vm(self, task):
+ """Call VIM to get VMs status"""
+ database_update = None
+
+ vim_id = task["vim_id"]
+ vm_to_refresh_list = [vim_id]
+ try:
+ vim_dict = self.vim.refresh_vms_status(vm_to_refresh_list)
+ vim_info = vim_dict[vim_id]
+ except vimconn.vimconnException as e:
+ # Mark all tasks at VIM_ERROR status
+ self.logger.error("task=several get-VM: vimconnException when trying to refresh vms " + str(e))
+ vim_info = {"status": "VIM_ERROR", "error_msg": str(e)}
+
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ self.logger.debug("task={} get-VM: vim_vm_id={} result={}".format(task_id, task["vim_id"], vim_info))
+
+ # check and update interfaces
+ task_warning_msg = ""
+ for interface in vim_info.get("interfaces", ()):
+ vim_interface_id = interface["vim_interface_id"]
+ if vim_interface_id not in task["extra"]["interfaces"]:
+ self.logger.critical("task={} get-VM: Interface not found {} on task info {}".format(
+ task_id, vim_interface_id, task["extra"]["interfaces"]), exc_info=True)
+ continue
+ task_interface = task["extra"]["interfaces"][vim_interface_id]
+ task_vim_interface = task_interface.get("vim_info")
+ if task_vim_interface != interface:
+ # delete old port
+ if task_interface.get("sdn_port_id"):
+ try:
+ with self.db_lock:
+ self.ovim.delete_port(task_interface["sdn_port_id"], idempotent=True)
+ task_interface["sdn_port_id"] = None
+ except ovimException as e:
+ error_text = "ovimException deleting external_port={}: {}".format(
+ task_interface["sdn_port_id"], e)
+ self.logger.error("task={} get-VM: {}".format(task_id, error_text), exc_info=True)
+ task_warning_msg += error_text
+ # TODO Set error_msg at instance_nets instead of instance VMs
+
+ # Create SDN port
+ sdn_net_id = task_interface.get("sdn_net_id")
+ if sdn_net_id and interface.get("compute_node") and interface.get("pci"):
+ sdn_port_name = sdn_net_id + "." + task["vim_id"]
+ sdn_port_name = sdn_port_name[:63]
+ try:
+ with self.db_lock:
+ sdn_port_id = self.ovim.new_external_port(
+ {"compute_node": interface["compute_node"],
+ "pci": interface["pci"],
+ "vlan": interface.get("vlan"),
+ "net_id": sdn_net_id,
+ "region": self.vim["config"]["datacenter_id"],
+ "name": sdn_port_name,
+ "mac": interface.get("mac_address")})
+ task_interface["sdn_port_id"] = sdn_port_id
+ except (ovimException, Exception) as e:
+ error_text = "ovimException creating new_external_port compute_node={} pci={} vlan={} {}".\
+ format(interface["compute_node"], interface["pci"], interface.get("vlan"), e)
+ self.logger.error("task={} get-VM: {}".format(task_id, error_text), exc_info=True)
+ task_warning_msg += error_text
+ # TODO Set error_msg at instance_nets instead of instance VMs
+
+ self.db.update_rows('instance_interfaces',
UPDATE={"mac_address": interface.get("mac_address"),
"ip_address": interface.get("ip_address"),
+ "vim_interface_id": interface.get("vim_interface_id"),
"vim_info": interface.get("vim_info"),
"sdn_port_id": task_interface.get("sdn_port_id"),
"compute_node": interface.get("compute_node"),
"pci": interface.get("pci"),
"vlan": interface.get("vlan")},
WHERE={'uuid': task_interface["iface_id"]})
- task["vim_interfaces"][vim_interface_id] = interface
-
- # check and update task and instance_vms database
- vim_info_error_msg = None
- if vim_info.get("error_msg"):
- vim_info_error_msg = self._format_vim_error_msg(vim_info["error_msg"] + task_warning_msg)
- elif task_warning_msg:
- vim_info_error_msg = self._format_vim_error_msg(task_warning_msg)
- task_vim_info = task.get("vim_info")
- task_error_msg = task.get("error_msg")
- task_vim_status = task["extra"].get("vim_status")
- if task_vim_status != vim_info["status"] or task_error_msg != vim_info_error_msg or \
- (vim_info.get("vim_info") and task_vim_info != vim_info["vim_info"]):
- temp_dict = {"status": vim_info["status"], "error_msg": vim_info_error_msg}
- if vim_info.get("vim_info"):
- temp_dict["vim_info"] = vim_info["vim_info"]
- with self.db_lock:
- self.db.update_rows('instance_vms', UPDATE=temp_dict, WHERE={"uuid": task["item_id"]})
- task["extra"]["vim_status"] = vim_info["status"]
- task["error_msg"] = vim_info_error_msg
- if vim_info.get("vim_info"):
- task["vim_info"] = vim_info["vim_info"]
- task_need_update = True
-
- if task_need_update:
- with self.db_lock:
- self.db.update_rows(
- 'vim_wim_actions',
- UPDATE={"extra": yaml.safe_dump(task["extra"], default_flow_style=True, width=256),
- "error_msg": task.get("error_msg"), "modified_at": now},
- WHERE={'instance_action_id': task['instance_action_id'],
- 'task_index': task['task_index']})
- if task["extra"].get("vim_status") == "BUILD":
- self._insert_refresh(task, now + self.REFRESH_BUILD)
- else:
- self._insert_refresh(task, now + self.REFRESH_ACTIVE)
+ task_interface["vim_info"] = interface
+
+ # check and update task and instance_vms database
+ vim_info_error_msg = None
+ if vim_info.get("error_msg"):
+ vim_info_error_msg = self._format_vim_error_msg(vim_info["error_msg"] + task_warning_msg)
+ elif task_warning_msg:
+ vim_info_error_msg = self._format_vim_error_msg(task_warning_msg)
+ task_vim_info = task["extra"].get("vim_info")
+ task_error_msg = task.get("error_msg")
+ task_vim_status = task["extra"].get("vim_status")
+ if task_vim_status != vim_info["status"] or task_error_msg != vim_info_error_msg or \
+ (vim_info.get("vim_info") and task_vim_info != vim_info["vim_info"]):
+ database_update = {"status": vim_info["status"], "error_msg": vim_info_error_msg}
+ if vim_info.get("vim_info"):
+ database_update["vim_info"] = vim_info["vim_info"]
+
+ task["extra"]["vim_status"] = vim_info["status"]
+ task["error_msg"] = vim_info_error_msg
+ if vim_info.get("vim_info"):
+ task["extra"]["vim_info"] = vim_info["vim_info"]
+
+ return database_update
+
+ def _refres_net(self, task):
+ """Call VIM to get network status"""
+ database_update = None
+
+ vim_id = task["vim_id"]
+ net_to_refresh_list = [vim_id]
+ try:
+ vim_dict = self.vim.refresh_nets_status(net_to_refresh_list)
+ vim_info = vim_dict[vim_id]
+ except vimconn.vimconnException as e:
+ # Mark all tasks at VIM_ERROR status
+ self.logger.error("task=several get-net: vimconnException when trying to refresh nets " + str(e))
+ vim_info = {"status": "VIM_ERROR", "error_msg": str(e)}
- if net_to_refresh_list:
- now = time.time()
- try:
- vim_dict = self.vim.refresh_nets_status(net_to_refresh_list)
- except vimconn.vimconnException as e:
- # Mark all tasks at VIM_ERROR status
- self.logger.error("task=several get-net: vimconnException when trying to refresh nets " + str(e))
- vim_dict = {}
- for vim_id in net_to_refresh_list:
- vim_dict[vim_id] = {"status": "VIM_ERROR", "error_msg": str(e)}
-
- for vim_id, vim_info in vim_dict.items():
- # look for task
- for task in net_to_refresh_dict[vim_id]:
- task_id = task["instance_action_id"] + "." + str(task["task_index"])
- self.logger.debug("task={} get-net: vim_net_id={} result={}".format(task_id, task["vim_id"], vim_info))
-
- task_vim_info = task.get("vim_info")
- task_vim_status = task["extra"].get("vim_status")
- task_error_msg = task.get("error_msg")
- task_sdn_net_id = task["extra"].get("sdn_net_id")
-
- vim_info_status = vim_info["status"]
- vim_info_error_msg = vim_info.get("error_msg")
- # get ovim status
- if task_sdn_net_id:
- try:
- with self.db_lock:
- sdn_net = self.ovim.show_network(task_sdn_net_id)
- except (ovimException, Exception) as e:
- text_error = "ovimException getting network snd_net_id={}: {}".format(task_sdn_net_id, e)
- self.logger.error("task={} get-net: {}".format(task_id, text_error), exc_info=True)
- sdn_net = {"status": "ERROR", "last_error": text_error}
- if sdn_net["status"] == "ERROR":
- if not vim_info_error_msg:
- vim_info_error_msg = str(sdn_net.get("last_error"))
- else:
- vim_info_error_msg = "VIM_ERROR: {} && SDN_ERROR: {}".format(
- self._format_vim_error_msg(vim_info_error_msg, 1024//2-14),
- self._format_vim_error_msg(sdn_net["last_error"], 1024//2-14))
- vim_info_status = "ERROR"
- elif sdn_net["status"] == "BUILD":
- if vim_info_status == "ACTIVE":
- vim_info_status = "BUILD"
-
- # update database
- if vim_info_error_msg:
- vim_info_error_msg = self._format_vim_error_msg(vim_info_error_msg)
- if task_vim_status != vim_info_status or task_error_msg != vim_info_error_msg or \
- (vim_info.get("vim_info") and task_vim_info != vim_info["vim_info"]):
- task["extra"]["vim_status"] = vim_info_status
- task["error_msg"] = vim_info_error_msg
- if vim_info.get("vim_info"):
- task["vim_info"] = vim_info["vim_info"]
- temp_dict = {"status": vim_info_status, "error_msg": vim_info_error_msg}
- if vim_info.get("vim_info"):
- temp_dict["vim_info"] = vim_info["vim_info"]
- with self.db_lock:
- self.db.update_rows('instance_nets', UPDATE=temp_dict, WHERE={"uuid": task["item_id"]})
- self.db.update_rows(
- 'vim_wim_actions',
- UPDATE={"extra": yaml.safe_dump(task["extra"], default_flow_style=True, width=256),
- "error_msg": task.get("error_msg"), "modified_at": now},
- WHERE={'instance_action_id': task['instance_action_id'],
- 'task_index': task['task_index']})
- if task["extra"].get("vim_status") == "BUILD":
- self._insert_refresh(task, now + self.REFRESH_BUILD)
- else:
- self._insert_refresh(task, now + self.REFRESH_ACTIVE)
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ self.logger.debug("task={} get-net: vim_net_id={} result={}".format(task_id, task["vim_id"], vim_info))
- return nb_processed
+ task_vim_info = task["extra"].get("vim_info")
+ task_vim_status = task["extra"].get("vim_status")
+ task_error_msg = task.get("error_msg")
+ task_sdn_net_id = task["extra"].get("sdn_net_id")
- def _insert_refresh(self, task, threshold_time=None):
- """Insert a task at list of refreshing elements. The refreshing list is ordered by threshold_time (task['modified_at']
- It is assumed that this is called inside this thread
- """
- if not self.vim:
- return
- if not threshold_time:
- threshold_time = time.time()
- task["modified_at"] = threshold_time
- task_name = task["item"][9:] + "-" + task["action"]
- task_id = task["instance_action_id"] + "." + str(task["task_index"])
- for index in range(0, len(self.refresh_tasks)):
- if self.refresh_tasks[index]["modified_at"] > threshold_time:
- self.refresh_tasks.insert(index, task)
- break
- else:
- index = len(self.refresh_tasks)
- self.refresh_tasks.append(task)
- self.logger.debug("task={} new refresh name={}, modified_at={} index={}".format(
- task_id, task_name, task["modified_at"], index))
-
- def _remove_refresh(self, task_name, vim_id):
- """Remove a task with this name and vim_id from the list of refreshing elements.
- It is assumed that this is called inside this thread outside _refres_elements method
- Return True if self.refresh_list is modified, task is found
- Return False if not found
- """
- index_to_delete = None
- for index in range(0, len(self.refresh_tasks)):
- if self.refresh_tasks[index]["name"] == task_name and self.refresh_tasks[index]["vim_id"] == vim_id:
- index_to_delete = index
- break
- else:
- return False
- if not index_to_delete:
- del self.refresh_tasks[index_to_delete]
- return True
-
- def _proccess_pending_tasks(self):
- nb_created = 0
- nb_processed = 0
- while self.pending_tasks:
- task = self.pending_tasks.pop(0)
- nb_processed += 1
+ vim_info_status = vim_info["status"]
+ vim_info_error_msg = vim_info.get("error_msg")
+ # get ovim status
+ if task_sdn_net_id:
try:
+ with self.db_lock:
+ sdn_net = self.ovim.show_network(task_sdn_net_id)
+ except (ovimException, Exception) as e:
+ text_error = "ovimException getting network snd_net_id={}: {}".format(task_sdn_net_id, e)
+ self.logger.error("task={} get-net: {}".format(task_id, text_error), exc_info=True)
+ sdn_net = {"status": "ERROR", "last_error": text_error}
+ if sdn_net["status"] == "ERROR":
+ if not vim_info_error_msg:
+ vim_info_error_msg = str(sdn_net.get("last_error"))
+ else:
+ vim_info_error_msg = "VIM_ERROR: {} && SDN_ERROR: {}".format(
+ self._format_vim_error_msg(vim_info_error_msg, 1024 // 2 - 14),
+ self._format_vim_error_msg(sdn_net["last_error"], 1024 // 2 - 14))
+ vim_info_status = "ERROR"
+ elif sdn_net["status"] == "BUILD":
+ if vim_info_status == "ACTIVE":
+ vim_info_status = "BUILD"
+
+ # update database
+ if vim_info_error_msg:
+ vim_info_error_msg = self._format_vim_error_msg(vim_info_error_msg)
+ if task_vim_status != vim_info_status or task_error_msg != vim_info_error_msg or \
+ (vim_info.get("vim_info") and task_vim_info != vim_info["vim_info"]):
+ task["extra"]["vim_status"] = vim_info_status
+ task["error_msg"] = vim_info_error_msg
+ if vim_info.get("vim_info"):
+ task["extra"]["vim_info"] = vim_info["vim_info"]
+ database_update = {"status": vim_info_status, "error_msg": vim_info_error_msg}
+ if vim_info.get("vim_info"):
+ database_update["vim_info"] = vim_info["vim_info"]
+ return database_update
+
+ def _proccess_pending_tasks(self, task, related_tasks):
+ old_task_status = task["status"]
+ create_or_find = False # if as result of processing this task something is created or found
+ next_refresh = 0
+
+ try:
+ if task["status"] == "SCHEDULED":
# check if tasks that this depends on have been completed
dependency_not_completed = False
+ dependency_modified_at = 0
for task_index in task["extra"].get("depends_on", ()):
- task_dependency = task["depends"].get("TASK-" + str(task_index))
+ task_dependency = self._look_for_task(task["instance_action_id"], task_index)
if not task_dependency:
- task_dependency = self._look_for_task(task["instance_action_id"], task_index)
- if not task_dependency:
- raise VimThreadException(
- "Cannot get depending net task trying to get depending task {}.{}".format(
- task["instance_action_id"], task_index))
- # task["depends"]["TASK-" + str(task_index)] = task_dependency #it references another object,so database must be look again
+ raise VimThreadException(
+ "Cannot get depending net task trying to get depending task {}.{}".format(
+ task["instance_action_id"], task_index))
+ # task["depends"]["TASK-" + str(task_index)] = task_dependency #it references another object,so
+ # database must be look again
if task_dependency["status"] == "SCHEDULED":
dependency_not_completed = True
+ dependency_modified_at = task_dependency["modified_at"]
break
elif task_dependency["status"] == "FAILED":
raise VimThreadException(
task["instance_action_id"], task["task_index"],
task_dependency["instance_action_id"], task_dependency["task_index"],
task_dependency["action"], task_dependency["item"], task_dependency.get("error_msg")))
+
+ task["depends"]["TASK-"+str(task_index)] = task_dependency["vim_id"]
+ task["depends"]["TASK-{}.{}".format(task["instance_action_id"], task_index)] =\
+ task_dependency["vim_id"]
if dependency_not_completed:
- # Move this task to the end.
- task["extra"]["tries"] = task["extra"].get("tries", 0) + 1
- if task["extra"]["tries"] <= 3:
- self.pending_tasks.append(task)
- continue
- else:
- raise VimThreadException(
- "Cannot {} {}, (task {}.{}) because timeout waiting to complete {} {}, "
- "(task {}.{})".format(task["action"], task["item"],
- task["instance_action_id"], task["task_index"],
- task_dependency["instance_action_id"], task_dependency["task_index"],
- task_dependency["action"], task_dependency["item"]))
-
- if task["status"] == "SUPERSEDED":
- # not needed to do anything but update database with the new status
- result = True
- database_update = None
- elif not self.vim:
- task["status"] = "ERROR"
- task["error_msg"] = self.error_status
- result = False
- database_update = {"status": "VIM_ERROR", "error_msg": task["error_msg"]}
- elif task["item"] == 'instance_vms':
- if task["action"] == "CREATE":
- result, database_update = self.new_vm(task)
- nb_created += 1
- elif task["action"] == "DELETE":
- result, database_update = self.del_vm(task)
- else:
- raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
- elif task["item"] == 'instance_nets':
- if task["action"] == "CREATE":
- result, database_update = self.new_net(task)
- nb_created += 1
- elif task["action"] == "DELETE":
- result, database_update = self.del_net(task)
- elif task["action"] == "FIND":
- result, database_update = self.get_net(task)
- else:
- raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
- elif task["item"] == 'instance_sfis':
- if task["action"] == "CREATE":
- result, database_update = self.new_sfi(task)
- nb_created += 1
- elif task["action"] == "DELETE":
- result, database_update = self.del_sfi(task)
- else:
- raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
- elif task["item"] == 'instance_sfs':
- if task["action"] == "CREATE":
- result, database_update = self.new_sf(task)
- nb_created += 1
- elif task["action"] == "DELETE":
- result, database_update = self.del_sf(task)
- else:
- raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
- elif task["item"] == 'instance_classifications':
- if task["action"] == "CREATE":
- result, database_update = self.new_classification(task)
- nb_created += 1
- elif task["action"] == "DELETE":
- result, database_update = self.del_classification(task)
- else:
- raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
- elif task["item"] == 'instance_sfps':
- if task["action"] == "CREATE":
- result, database_update = self.new_sfp(task)
- nb_created += 1
- elif task["action"] == "DELETE":
- result, database_update = self.del_sfp(task)
- else:
- raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
- else:
- raise vimconn.vimconnException(self.name + "unknown task item {}".format(task["item"]))
- # TODO
- except VimThreadException as e:
- result = False
- task["error_msg"] = str(e)
+ # Move this task to the time dependency is going to be modified plus 10 seconds.
+ self.db.update_rows("vim_wim_actions", modified_time=dependency_modified_at + 10,
+ UPDATE={"worker": None},
+ WHERE={"datacenter_vim_id": self.datacenter_tenant_id, "worker": self.my_id,
+ "related": task["related"],
+ })
+ # task["extra"]["tries"] = task["extra"].get("tries", 0) + 1
+ # if task["extra"]["tries"] > 3:
+ # raise VimThreadException(
+ # "Cannot {} {}, (task {}.{}) because timeout waiting to complete {} {}, "
+ # "(task {}.{})".format(task["action"], task["item"],
+ # task["instance_action_id"], task["task_index"],
+ # task_dependency["instance_action_id"], task_dependency["task_index"]
+ # task_dependency["action"], task_dependency["item"]))
+ return
+
+ database_update = None
+ if task["action"] == "DELETE":
+ deleted_needed = self._delete_task(task)
+ if not deleted_needed:
+ task["status"] = "SUPERSEDED" # with FINISHED instead of DONE it will not be refreshing
+ task["error_msg"] = None
+
+ if task["status"] == "SUPERSEDED":
+ # not needed to do anything but update database with the new status
+ database_update = None
+ elif not self.vim:
task["status"] = "FAILED"
+ task["error_msg"] = self.error_status
database_update = {"status": "VIM_ERROR", "error_msg": task["error_msg"]}
+ elif task["item_id"] != related_tasks[0]["item_id"] and task["action"] in ("FIND", "CREATE"):
+ # Do nothing, just copy values from one to another and updata database
+ task["status"] = related_tasks[0]["status"]
+ task["error_msg"] = related_tasks[0]["error_msg"]
+ task["vim_id"] = related_tasks[0]["vim_id"]
+ extra = yaml.load(related_tasks[0]["extra"])
+ task["extra"]["vim_status"] = extra["vim_status"]
+ next_refresh = related_tasks[0]["modified_at"] + 0.001
+ database_update = {"status": task["extra"].get("vim_status", "VIM_ERROR"),
+ "error_msg": task["error_msg"]}
if task["item"] == 'instance_vms':
- database_update["vim_vm_id"] = None
+ database_update["vim_vm_id"] = task["vim_id"]
elif task["item"] == 'instance_nets':
- database_update["vim_net_id"] = None
-
- no_refresh_tasks = ['instance_sfis', 'instance_sfs',
- 'instance_classifications', 'instance_sfps']
- if task["action"] == "DELETE":
- action_key = task["item"] + task["item_id"]
- del self.grouped_tasks[action_key]
- elif task["action"] in ("CREATE", "FIND") and task["status"] in ("DONE", "BUILD"):
- if task["item"] not in no_refresh_tasks:
- self._insert_refresh(task)
-
- task_id = task["instance_action_id"] + "." + str(task["task_index"])
- self.logger.debug("task={} item={} action={} result={}:'{}' params={}".format(
- task_id, task["item"], task["action"], task["status"],
- task["vim_id"] if task["status"] == "DONE" else task.get("error_msg"), task["params"]))
- try:
- now = time.time()
- with self.db_lock:
- self.db.update_rows(
- table="vim_wim_actions",
- UPDATE={"status": task["status"], "vim_id": task.get("vim_id"), "modified_at": now,
- "error_msg": task["error_msg"],
- "extra": yaml.safe_dump(task["extra"], default_flow_style=True, width=256)},
- WHERE={"instance_action_id": task["instance_action_id"], "task_index": task["task_index"]})
- if result is not None:
- self.db.update_rows(
- table="instance_actions",
- UPDATE={("number_done" if result else "number_failed"): {"INCREMENT": 1},
- "modified_at": now},
- WHERE={"uuid": task["instance_action_id"]})
- if database_update:
- self.db.update_rows(table=task["item"],
- UPDATE=database_update,
- WHERE={"uuid": task["item_id"]})
- except db_base_Exception as e:
- self.logger.error("task={} Error updating database {}".format(task_id, e), exc_info=True)
-
- if nb_created == 10:
- break
- return nb_processed
-
- def _insert_pending_tasks(self, vim_actions_list):
- for task in vim_actions_list:
- if task["datacenter_vim_id"] != self.datacenter_tenant_id:
- continue
- item = task["item"]
- item_id = task["item_id"]
- action_key = item + item_id
- if action_key not in self.grouped_tasks:
- self.grouped_tasks[action_key] = []
- task["params"] = None
- task["depends"] = {}
- if task["extra"]:
- extra = yaml.load(task["extra"])
- task["extra"] = extra
- task["params"] = extra.get("params")
- depends_on_list = extra.get("depends_on")
- if depends_on_list:
- for dependency_task in depends_on_list:
- if isinstance(dependency_task, int):
- index = dependency_task
- else:
- instance_action_id, _, task_id = dependency_task.rpartition(".")
- if instance_action_id != task["instance_action_id"]:
- continue
- index = int(task_id)
-
- if index < len(vim_actions_list) and vim_actions_list[index]["task_index"] == index and\
- vim_actions_list[index]["instance_action_id"] == task["instance_action_id"]:
- task["depends"]["TASK-" + str(index)] = vim_actions_list[index]
- task["depends"]["TASK-{}.{}".format(task["instance_action_id"], index)] = vim_actions_list[index]
- if extra.get("interfaces"):
- task["vim_interfaces"] = {}
+ database_update["vim_net_id"] = task["vim_id"]
+ elif task["item"] == 'instance_vms':
+ if task["status"] in ('BUILD', 'DONE') and task["action"] in ("FIND", "CREATE"):
+ database_update = self._refres_vm(task)
+ create_or_find = True
+ elif task["action"] == "CREATE":
+ create_or_find = True
+ database_update = self.new_vm(task)
+ elif task["action"] == "DELETE":
+ self.del_vm(task)
+ else:
+ raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+ elif task["item"] == 'instance_nets':
+ if task["status"] in ('BUILD', 'DONE') and task["action"] in ("FIND", "CREATE"):
+ database_update = self._refres_net(task)
+ create_or_find = True
+ elif task["action"] == "CREATE":
+ create_or_find = True
+ database_update = self.new_net(task)
+ elif task["action"] == "DELETE":
+ self.del_net(task)
+ elif task["action"] == "FIND":
+ database_update = self.get_net(task)
+ else:
+ raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+ elif task["item"] == 'instance_sfis':
+ if task["action"] == "CREATE":
+ create_or_find = True
+ database_update = self.new_sfi(task)
+ elif task["action"] == "DELETE":
+ self.del_sfi(task)
+ else:
+ raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+ elif task["item"] == 'instance_sfs':
+ if task["action"] == "CREATE":
+ create_or_find = True
+ database_update = self.new_sf(task)
+ elif task["action"] == "DELETE":
+ self.del_sf(task)
+ else:
+ raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+ elif task["item"] == 'instance_classifications':
+ if task["action"] == "CREATE":
+ create_or_find = True
+ database_update = self.new_classification(task)
+ elif task["action"] == "DELETE":
+ self.del_classification(task)
+ else:
+ raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+ elif task["item"] == 'instance_sfps':
+ if task["action"] == "CREATE":
+ create_or_find = True
+ database_update = self.new_sfp(task)
+ elif task["action"] == "DELETE":
+ self.del_sfp(task)
+ else:
+ raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
else:
- task["extra"] = {}
- if "error_msg" not in task:
- task["error_msg"] = None
- if "vim_id" not in task:
- task["vim_id"] = None
+ raise vimconn.vimconnException(self.name + "unknown task item {}".format(task["item"]))
+ # TODO
+ except VimThreadException as e:
+ task["error_msg"] = str(e)
+ task["status"] = "FAILED"
+ database_update = {"status": "VIM_ERROR", "error_msg": task["error_msg"]}
+ if task["item"] == 'instance_vms':
+ database_update["vim_vm_id"] = None
+ elif task["item"] == 'instance_nets':
+ database_update["vim_net_id"] = None
- if task["action"] == "DELETE":
- need_delete_action = False
- for to_supersede in self.grouped_tasks.get(action_key, ()):
- if to_supersede["action"] == "FIND" and to_supersede.get("vim_id"):
- task["vim_id"] = to_supersede["vim_id"]
- if to_supersede["action"] == "CREATE" and to_supersede["extra"].get("created", True) and \
- (to_supersede.get("vim_id") or to_supersede["extra"].get("sdn_net_id")):
- need_delete_action = True
- task["vim_id"] = to_supersede["vim_id"]
- if to_supersede["extra"].get("sdn_net_id"):
- task["extra"]["sdn_net_id"] = to_supersede["extra"]["sdn_net_id"]
- if to_supersede["extra"].get("interfaces"):
- task["extra"]["interfaces"] = to_supersede["extra"]["interfaces"]
- if to_supersede["extra"].get("created_items"):
- if not task["extra"].get("created_items"):
- task["extra"]["created_items"] = {}
- task["extra"]["created_items"].update(to_supersede["extra"]["created_items"])
- # Mark task as SUPERSEDED.
- # If task is in self.pending_tasks, it will be removed and database will be update
- # If task is in self.refresh_tasks, it will be removed
- to_supersede["status"] = "SUPERSEDED"
- if not need_delete_action:
- task["status"] = "SUPERSEDED"
-
- self.grouped_tasks[action_key].append(task)
- self.pending_tasks.append(task)
- elif task["status"] == "SCHEDULED":
- self.grouped_tasks[action_key].append(task)
- self.pending_tasks.append(task)
- elif task["action"] in ("CREATE", "FIND"):
- self.grouped_tasks[action_key].append(task)
- if task["status"] in ("DONE", "BUILD"):
- self._insert_refresh(task)
- # TODO add VM reset, get console, etc...
- else:
- raise vimconn.vimconnException(self.name + "unknown vim_action action {}".format(task["action"]))
+ task_id = task["instance_action_id"] + "." + str(task["task_index"])
+ self.logger.debug("task={} item={} action={} result={}:'{}' params={}".format(
+ task_id, task["item"], task["action"], task["status"],
+ task["vim_id"] if task["status"] == "DONE" else task.get("error_msg"), task["params"]))
+ try:
+ if not next_refresh:
+ if task["status"] == "DONE":
+ next_refresh = time.time()
+ if task["extra"].get("vim_status") == "BUILD":
+ next_refresh += self.REFRESH_BUILD
+ elif task["extra"].get("vim_status") in ("ERROR", "VIM_ERROR"):
+ next_refresh += self.REFRESH_ERROR
+ elif task["extra"].get("vim_status") == "DELETED":
+ next_refresh += self.REFRESH_DELETE
+ else:
+ next_refresh += self.REFRESH_ACTIVE
+ elif task["status"] == "FAILED":
+ next_refresh = time.time() + self.REFRESH_DELETE
+
+ if create_or_find:
+ # modify all related task with action FIND/CREATED non SCHEDULED
+ self.db.update_rows(
+ table="vim_wim_actions", modified_time=next_refresh + 0.001,
+ UPDATE={"status": task["status"], "vim_id": task.get("vim_id"),
+ "error_msg": task["error_msg"],
+ },
+
+ WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+ "worker": self.my_id,
+ "action": ["FIND", "CREATE"],
+ "related": task["related"],
+ "status<>": "SCHEDULED",
+ })
+ # modify own task
+ self.db.update_rows(
+ table="vim_wim_actions", modified_time=next_refresh,
+ UPDATE={"status": task["status"], "vim_id": task.get("vim_id"),
+ "error_msg": task["error_msg"],
+ "extra": yaml.safe_dump(task["extra"], default_flow_style=True, width=256)},
+ WHERE={"instance_action_id": task["instance_action_id"], "task_index": task["task_index"]})
+ # Unlock tasks
+ self.db.update_rows(
+ table="vim_wim_actions", modified_time=0,
+ UPDATE={"worker": None},
+ WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+ "worker": self.my_id,
+ "related": task["related"],
+ })
+
+ # Update table instance_actions
+ if old_task_status == "SCHEDULED" and task["status"] != old_task_status:
+ self.db.update_rows(
+ table="instance_actions",
+ UPDATE={("number_failed" if task["status"] == "FAILED" else "number_done"): {"INCREMENT": 1}},
+ WHERE={"uuid": task["instance_action_id"]})
+ if database_update:
+ self.db.update_rows(table=task["item"],
+ UPDATE=database_update,
+ WHERE={"related": task["related"]})
+ except db_base_Exception as e:
+ self.logger.error("task={} Error updating database {}".format(task_id, e), exc_info=True)
def insert_task(self, task):
try:
if task["status"] == "SCHEDULED":
task["status"] = "SUPERSEDED"
return True
- else: # task["status"] == "processing"
+ else: # task["status"] == "processing"
self.task_lock.release()
return False
while True:
self.get_vimconnector()
self.logger.debug("Vimconnector loaded")
- self._reload_vim_actions()
reload_thread = False
while True:
while not self.task_queue.empty():
task = self.task_queue.get()
if isinstance(task, list):
- self._insert_pending_tasks(task)
+ pass
elif isinstance(task, str):
if task == 'exit':
return 0
self.task_queue.task_done()
if reload_thread:
break
- nb_processed = self._proccess_pending_tasks()
- nb_processed += self._refres_elements()
- if not nb_processed:
- time.sleep(1)
+
+ task, related_tasks = self._get_db_task()
+ if task:
+ self._proccess_pending_tasks(task, related_tasks)
+ else:
+ time.sleep(5)
except Exception as e:
self.logger.critical("Unexpected exception at run: " + str(e), exc_info=True)
if ins_action_id:
instance_action_id = ins_action_id
- with self.db_lock:
- tasks = self.db.get_rows(FROM="vim_wim_actions", WHERE={"instance_action_id": instance_action_id,
+ tasks = self.db.get_rows(FROM="vim_wim_actions", WHERE={"instance_action_id": instance_action_id,
"task_index": task_index})
if not tasks:
return None
extra = yaml.load(task["extra"])
task["extra"] = extra
task["params"] = extra.get("params")
- if extra.get("interfaces"):
- task["vim_interfaces"] = {}
else:
task["extra"] = {}
return task
@staticmethod
def _format_vim_error_msg(error_text, max_length=1024):
if error_text and len(error_text) >= max_length:
- return error_text[:max_length//2-3] + " ... " + error_text[-max_length//2+3:]
+ return error_text[:max_length // 2 - 3] + " ... " + error_text[-max_length // 2 + 3:]
return error_text
def new_vm(self, task):
net_list = params[5]
for net in net_list:
if "net_id" in net and is_task_id(net["net_id"]): # change task_id into network_id
- task_dependency = task["depends"].get(net["net_id"])
- if not task_dependency:
- task_dependency = self._look_for_task(task["instance_action_id"], net["net_id"])
- if not task_dependency:
- raise VimThreadException(
- "Cannot get depending net task trying to get depending task {}.{}".format(
- task["instance_action_id"], net["net_id"]))
- network_id = task_dependency.get("vim_id")
+ network_id = task["depends"][net["net_id"]]
if not network_id:
raise VimThreadException(
"Cannot create VM because depends on a network not created or found: " +
task_interfaces = {}
for iface in params_copy[5]:
task_interfaces[iface["vim_id"]] = {"iface_id": iface["uuid"]}
- with self.db_lock:
- result = self.db.get_rows(
- SELECT=('sdn_net_id',),
- FROM='instance_nets as ine join instance_interfaces as ii on ii.instance_net_id=ine.uuid',
- WHERE={'ii.uuid': iface["uuid"]})
+ result = self.db.get_rows(
+ SELECT=('sdn_net_id', 'interface_id'),
+ FROM='instance_nets as ine join instance_interfaces as ii on ii.instance_net_id=ine.uuid',
+ WHERE={'ii.uuid': iface["uuid"]})
if result:
task_interfaces[iface["vim_id"]]["sdn_net_id"] = result[0]['sdn_net_id']
+ task_interfaces[iface["vim_id"]]["interface_id"] = result[0]['interface_id']
else:
self.logger.critical("task={} new-VM: instance_nets uuid={} not found at DB".format(task_id,
- iface["uuid"]), exc_info=True)
+ iface["uuid"]),
+ exc_info=True)
task["vim_info"] = {}
- task["vim_interfaces"] = {}
task["extra"]["interfaces"] = task_interfaces
task["extra"]["created"] = True
task["extra"]["created_items"] = created_items
+ task["extra"]["vim_status"] = "BUILD"
task["error_msg"] = None
task["status"] = "DONE"
task["vim_id"] = vim_vm_id
instance_element_update = {"status": "BUILD", "vim_vm_id": vim_vm_id, "error_msg": None}
- return True, instance_element_update
+ return instance_element_update
except (vimconn.vimconnException, VimThreadException) as e:
self.logger.error("task={} new-VM: {}".format(task_id, e))
task["status"] = "FAILED"
task["vim_id"] = None
instance_element_update = {"status": "VIM_ERROR", "vim_vm_id": None, "error_msg": error_text}
- return False, instance_element_update
+ return instance_element_update
def del_vm(self, task):
task_id = task["instance_action_id"] + "." + str(task["task_index"])
# TODO Set error_msg at instance_nets
self.vim.delete_vminstance(vm_vim_id, task["extra"].get("created_items"))
- task["status"] = "DONE"
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
task["error_msg"] = None
- return True, None
+ return None
except vimconn.vimconnException as e:
task["error_msg"] = self._format_vim_error_msg(str(e))
if isinstance(e, vimconn.vimconnNotFoundException):
# If not found mark as Done and fill error_msg
- task["status"] = "DONE"
- return True, None
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ return None
task["status"] = "FAILED"
- return False, None
+ return None
def _get_net_internal(self, task, filter_param):
"""
# Discover if this network is managed by a sdn controller
sdn_net_id = None
- with self.db_lock:
- result = self.db.get_rows(SELECT=('sdn_net_id',), FROM='instance_nets',
- WHERE={'vim_net_id': vim_net_id,
- 'datacenter_tenant_id': self.datacenter_tenant_id},
- ORDER="instance_scenario_id")
+ result = self.db.get_rows(SELECT=('sdn_net_id',), FROM='instance_nets',
+ WHERE={'vim_net_id': vim_net_id, 'datacenter_tenant_id': self.datacenter_tenant_id},
+ ORDER="instance_scenario_id")
if result:
sdn_net_id = result[0]['sdn_net_id']
task["status"] = "DONE"
task["extra"]["vim_info"] = {}
task["extra"]["created"] = False
+ task["extra"]["vim_status"] = "BUILD"
task["extra"]["sdn_net_id"] = sdn_net_id
task["error_msg"] = None
task["vim_id"] = vim_net_id
params = task["params"]
filter_param = params[0]
instance_element_update = self._get_net_internal(task, filter_param)
- return True, instance_element_update
+ return instance_element_update
except (vimconn.vimconnException, VimThreadException) as e:
self.logger.error("task={} get-net: {}".format(task_id, e))
task["error_msg"] = self._format_vim_error_msg(str(e))
instance_element_update = {"vim_net_id": None, "status": "VIM_ERROR",
"error_msg": task["error_msg"]}
- return False, instance_element_update
+ return instance_element_update
def new_net(self, task):
vim_net_id = None
filter_param = task["extra"]["find"][0]
try:
instance_element_update = self._get_net_internal(task, filter_param)
- return True, instance_element_update
+ return instance_element_update
except VimThreadExceptionNotFound:
pass
# CREATE
params = task["params"]
action_text = "creating VIM"
- vim_net_id = self.vim.new_network(*params)
+ vim_net_id, created_items = self.vim.new_network(*params[0:3])
net_name = params[0]
net_type = params[1]
+ wim_account_name = None
+ if len(params) >= 4:
+ wim_account_name = params[3]
sdn_controller = self.vim.config.get('sdn-controller')
if sdn_controller and (net_type == "data" or net_type == "ptp"):
action_text = "creating SDN"
with self.db_lock:
sdn_net_id = self.ovim.new_network(network)
+
+ if wim_account_name and self.vim.config["wim_external_ports"]:
+ # add external port to connect WIM. Try with compute node __WIM:wim_name and __WIM
+ action_text = "attaching external port to ovim network"
+ sdn_port_name = sdn_net_id + "." + task["vim_id"]
+ sdn_port_name = sdn_port_name[:63]
+ sdn_port_data = {
+ "compute_node": "__WIM:" + wim_account_name[0:58],
+ "pci": None,
+ "vlan": network["vlan"],
+ "net_id": sdn_net_id,
+ "region": self.vim["config"]["datacenter_id"],
+ "name": sdn_port_name,
+ }
+ try:
+ with self.db_lock:
+ sdn_external_port_id = self.ovim.new_external_port(sdn_port_data)
+ except ovimException:
+ sdn_port_data["compute_node"] = "__WIM"
+ with self.db_lock:
+ sdn_external_port_id = self.ovim.new_external_port(sdn_port_data)
+ self.logger.debug("Added sdn_external_port {} to sdn_network {}".format(sdn_external_port_id,
+ sdn_net_id))
task["status"] = "DONE"
task["extra"]["vim_info"] = {}
task["extra"]["sdn_net_id"] = sdn_net_id
+ task["extra"]["vim_status"] = "BUILD"
task["extra"]["created"] = True
+ task["extra"]["created_items"] = created_items
task["error_msg"] = None
task["vim_id"] = vim_net_id
instance_element_update = {"vim_net_id": vim_net_id, "sdn_net_id": sdn_net_id, "status": "BUILD",
"created": True, "error_msg": None}
- return True, instance_element_update
+ return instance_element_update
except (vimconn.vimconnException, ovimException) as e:
self.logger.error("task={} new-net: Error {}: {}".format(task_id, action_text, e))
task["status"] = "FAILED"
task["extra"]["sdn_net_id"] = sdn_net_id
instance_element_update = {"vim_net_id": vim_net_id, "sdn_net_id": sdn_net_id, "status": "VIM_ERROR",
"error_msg": task["error_msg"]}
- return False, instance_element_update
+ return instance_element_update
def del_net(self, task):
net_vim_id = task["vim_id"]
self.ovim.delete_port(port['uuid'], idempotent=True)
self.ovim.delete_network(sdn_net_id, idempotent=True)
if net_vim_id:
- self.vim.delete_network(net_vim_id)
- task["status"] = "DONE"
+ self.vim.delete_network(net_vim_id, task["extra"].get("created_items"))
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
task["error_msg"] = None
- return True, None
+ return None
except ovimException as e:
task["error_msg"] = self._format_vim_error_msg("ovimException obtaining and deleting external "
"ports for net {}: {}".format(sdn_net_id, str(e)))
task["error_msg"] = self._format_vim_error_msg(str(e))
if isinstance(e, vimconn.vimconnNotFoundException):
# If not found mark as Done and fill error_msg
- task["status"] = "DONE"
- return True, None
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ return None
task["status"] = "FAILED"
- return False, None
-
- ## Service Function Instances
+ return None
+ # Service Function Instances
def new_sfi(self, task):
vim_sfi_id = None
try:
+ # Waits for interfaces to be ready (avoids failure)
+ time.sleep(1)
dep_id = "TASK-" + str(task["extra"]["depends_on"][0])
task_id = task["instance_action_id"] + "." + str(task["task_index"])
error_text = ""
- interfaces = task.get("depends").get(dep_id).get("extra").get("interfaces").keys()
+ interfaces = task.get("depends").get(dep_id).get("extra").get("interfaces")
+ ingress_interface_id = task.get("extra").get("params").get("ingress_interface_id")
+ egress_interface_id = task.get("extra").get("params").get("egress_interface_id")
+ ingress_vim_interface_id = None
+ egress_vim_interface_id = None
+ for vim_interface, interface_data in interfaces.iteritems():
+ if interface_data.get("interface_id") == ingress_interface_id:
+ ingress_vim_interface_id = vim_interface
+ break
+ if ingress_interface_id != egress_interface_id:
+ for vim_interface, interface_data in interfaces.iteritems():
+ if interface_data.get("interface_id") == egress_interface_id:
+ egress_vim_interface_id = vim_interface
+ break
+ else:
+ egress_vim_interface_id = ingress_vim_interface_id
+ if not ingress_vim_interface_id or not egress_vim_interface_id:
+ error_text = "Error creating Service Function Instance, Ingress: {}, Egress: {}".format(
+ ingress_vim_interface_id, egress_vim_interface_id)
+ self.logger.error(error_text)
+ task["error_msg"] = error_text
+ task["status"] = "FAILED"
+ task["vim_id"] = None
+ return None
# At the moment, every port associated with the VM will be used both as ingress and egress ports.
- # Bear in mind that different VIM connectors might support SFI differently. In the case of OpenStack, only the
- # first ingress and first egress ports will be used to create the SFI (Port Pair).
- port_id_list = [interfaces[0]]
+ # Bear in mind that different VIM connectors might support SFI differently. In the case of OpenStack,
+ # only the first ingress and first egress ports will be used to create the SFI (Port Pair).
+ ingress_port_id_list = [ingress_vim_interface_id]
+ egress_port_id_list = [egress_vim_interface_id]
name = "sfi-%s" % task["item_id"][:8]
# By default no form of IETF SFC Encapsulation will be used
- vim_sfi_id = self.vim.new_sfi(name, port_id_list, port_id_list, sfc_encap=False)
+ vim_sfi_id = self.vim.new_sfi(name, ingress_port_id_list, egress_port_id_list, sfc_encap=False)
task["extra"]["created"] = True
+ task["extra"]["vim_status"] = "ACTIVE"
task["error_msg"] = None
- task["status"] = "DONE"
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
task["vim_id"] = vim_sfi_id
instance_element_update = {"status": "ACTIVE", "vim_sfi_id": vim_sfi_id, "error_msg": None}
- return True, instance_element_update
+ return instance_element_update
except (vimconn.vimconnException, VimThreadException) as e:
self.logger.error("Error creating Service Function Instance, task=%s: %s", task_id, str(e))
task["status"] = "FAILED"
task["vim_id"] = None
instance_element_update = {"status": "VIM_ERROR", "vim_sfi_id": None, "error_msg": error_text}
- return False, instance_element_update
+ return instance_element_update
def del_sfi(self, task):
sfi_vim_id = task["vim_id"]
try:
self.vim.delete_sfi(sfi_vim_id)
- task["status"] = "DONE"
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
task["error_msg"] = None
- return True, None
+ return None
except vimconn.vimconnException as e:
task["error_msg"] = self._format_vim_error_msg(str(e))
if isinstance(e, vimconn.vimconnNotFoundException):
# If not found mark as Done and fill error_msg
- task["status"] = "DONE"
- return True, None
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ return None
task["status"] = "FAILED"
- return False, None
+ return None
def new_sf(self, task):
vim_sf_id = None
try:
task_id = task["instance_action_id"] + "." + str(task["task_index"])
error_text = ""
- depending_tasks = [ "TASK-" + str(dep_id) for dep_id in task["extra"]["depends_on"]]
- #sfis = task.get("depends").values()[0].get("extra").get("params")[5]
+ depending_tasks = ["TASK-" + str(dep_id) for dep_id in task["extra"]["depends_on"]]
+ # sfis = task.get("depends").values()[0].get("extra").get("params")[5]
sfis = [task.get("depends").get(dep_task) for dep_task in depending_tasks]
sfi_id_list = []
for sfi in sfis:
vim_sf_id = self.vim.new_sf(name, sfi_id_list, sfc_encap=False)
task["extra"]["created"] = True
+ task["extra"]["vim_status"] = "ACTIVE"
task["error_msg"] = None
- task["status"] = "DONE"
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
task["vim_id"] = vim_sf_id
instance_element_update = {"status": "ACTIVE", "vim_sf_id": vim_sf_id, "error_msg": None}
- return True, instance_element_update
+ return instance_element_update
except (vimconn.vimconnException, VimThreadException) as e:
self.logger.error("Error creating Service Function, task=%s: %s", task_id, str(e))
task["status"] = "FAILED"
task["vim_id"] = None
instance_element_update = {"status": "VIM_ERROR", "vim_sf_id": None, "error_msg": error_text}
- return False, instance_element_update
+ return instance_element_update
def del_sf(self, task):
sf_vim_id = task["vim_id"]
try:
self.vim.delete_sf(sf_vim_id)
- task["status"] = "DONE"
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
task["error_msg"] = None
- return True, None
+ return None
except vimconn.vimconnException as e:
task["error_msg"] = self._format_vim_error_msg(str(e))
if isinstance(e, vimconn.vimconnNotFoundException):
# If not found mark as Done and fill error_msg
- task["status"] = "DONE"
- return True, None
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ return None
task["status"] = "FAILED"
- return False, None
+ return None
def new_classification(self, task):
vim_classification_id = None
try:
params = task["params"]
task_id = task["instance_action_id"] + "." + str(task["task_index"])
- depending_task = "TASK-" + str(task.get("extra").get("depends_on")[0])
+ dep_id = "TASK-" + str(task["extra"]["depends_on"][0])
error_text = ""
- interfaces = task.get("depends").get(depending_task).get("vim_interfaces").keys()
+ interfaces = task.get("depends").get(dep_id).get("extra").get("interfaces").keys()
# Bear in mind that different VIM connectors might support Classifications differently.
# In the case of OpenStack, only the first VNF attached to the classifier will be used
# to create the Classification(s) (the "logical source port" of the "Flow Classifier").
ip_proto = int(params.get("ip_proto"))
source_ip = params.get("source_ip")
destination_ip = params.get("destination_ip")
- if ip_proto == 1:
- ip_proto = 'icmp'
- elif ip_proto == 6:
- ip_proto = 'tcp'
- elif ip_proto == 17:
- ip_proto = 'udp'
- if '/' not in source_ip:
- source_ip += '/32'
- if '/' not in destination_ip:
- destination_ip += '/32'
- definition = {
- "logical_source_port": interfaces[0],
- "protocol": ip_proto,
- "source_ip_prefix": source_ip,
- "destination_ip_prefix": destination_ip,
- "source_port_range_min": params.get("source_port"),
- "source_port_range_max": params.get("source_port"),
- "destination_port_range_min": params.get("destination_port"),
- "destination_port_range_max": params.get("destination_port"),
- }
+ source_port = params.get("source_port")
+ destination_port = params.get("destination_port")
+ definition = {"logical_source_port": interfaces[0]}
+ if ip_proto:
+ if ip_proto == 1:
+ ip_proto = 'icmp'
+ elif ip_proto == 6:
+ ip_proto = 'tcp'
+ elif ip_proto == 17:
+ ip_proto = 'udp'
+ definition["protocol"] = ip_proto
+ if source_ip:
+ if '/' not in source_ip:
+ source_ip += '/32'
+ definition["source_ip_prefix"] = source_ip
+ if source_port:
+ definition["source_port_range_min"] = source_port
+ definition["source_port_range_max"] = source_port
+ if destination_port:
+ definition["destination_port_range_min"] = destination_port
+ definition["destination_port_range_max"] = destination_port
+ if destination_ip:
+ if '/' not in destination_ip:
+ destination_ip += '/32'
+ definition["destination_ip_prefix"] = destination_ip
vim_classification_id = self.vim.new_classification(
name, 'legacy_flow_classifier', definition)
task["extra"]["created"] = True
+ task["extra"]["vim_status"] = "ACTIVE"
task["error_msg"] = None
- task["status"] = "DONE"
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
task["vim_id"] = vim_classification_id
- instance_element_update = {"status": "ACTIVE", "vim_classification_id": vim_classification_id, "error_msg": None}
- return True, instance_element_update
+ instance_element_update = {"status": "ACTIVE", "vim_classification_id": vim_classification_id,
+ "error_msg": None}
+ return instance_element_update
except (vimconn.vimconnException, VimThreadException) as e:
self.logger.error("Error creating Classification, task=%s: %s", task_id, str(e))
task["status"] = "FAILED"
task["vim_id"] = None
instance_element_update = {"status": "VIM_ERROR", "vim_classification_id": None, "error_msg": error_text}
- return False, instance_element_update
+ return instance_element_update
def del_classification(self, task):
classification_vim_id = task["vim_id"]
try:
self.vim.delete_classification(classification_vim_id)
- task["status"] = "DONE"
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
task["error_msg"] = None
- return True, None
+ return None
except vimconn.vimconnException as e:
task["error_msg"] = self._format_vim_error_msg(str(e))
if isinstance(e, vimconn.vimconnNotFoundException):
# If not found mark as Done and fill error_msg
- task["status"] = "DONE"
- return True, None
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ return None
task["status"] = "FAILED"
- return False, None
+ return None
def new_sfp(self, task):
vim_sfp_id = None
try:
- params = task["params"]
task_id = task["instance_action_id"] + "." + str(task["task_index"])
- depending_tasks = [task.get("depends").get("TASK-" + str(tsk_id)) for tsk_id in task.get("extra").get("depends_on")]
+ depending_tasks = [task.get("depends").get("TASK-" + str(tsk_id)) for tsk_id in
+ task.get("extra").get("depends_on")]
error_text = ""
sf_id_list = []
classification_id_list = []
vim_sfp_id = self.vim.new_sfp(name, classification_id_list, sf_id_list, sfc_encap=False)
task["extra"]["created"] = True
+ task["extra"]["vim_status"] = "ACTIVE"
task["error_msg"] = None
- task["status"] = "DONE"
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
task["vim_id"] = vim_sfp_id
instance_element_update = {"status": "ACTIVE", "vim_sfp_id": vim_sfp_id, "error_msg": None}
- return True, instance_element_update
+ return instance_element_update
except (vimconn.vimconnException, VimThreadException) as e:
self.logger.error("Error creating Service Function, task=%s: %s", task_id, str(e))
task["status"] = "FAILED"
task["vim_id"] = None
instance_element_update = {"status": "VIM_ERROR", "vim_sfp_id": None, "error_msg": error_text}
- return False, instance_element_update
- return
+ return instance_element_update
def del_sfp(self, task):
sfp_vim_id = task["vim_id"]
try:
self.vim.delete_sfp(sfp_vim_id)
- task["status"] = "DONE"
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
task["error_msg"] = None
- return True, None
+ return None
except vimconn.vimconnException as e:
task["error_msg"] = self._format_vim_error_msg(str(e))
if isinstance(e, vimconn.vimconnNotFoundException):
# If not found mark as Done and fill error_msg
- task["status"] = "DONE"
- return True, None
+ task["status"] = "FINISHED" # with FINISHED instead of DONE it will not be refreshing
+ return None
task["status"] = "FAILED"
- return False, None
+ return None
'dhcp_count': number of IPs to grant.
'shared': if this network can be seen/use by other tenants/organization
'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
- Returns the network identifier on success or raises and exception on failure
+ Returns a tuple with the network identifier and created_items, or raises an exception on error
+ created_items can be None or a dictionary where this method can include key-values that will be passed to
+ the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+ Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+ as not present.
"""
raise vimconnNotImplemented( "Should have implemented this" )
"""
raise vimconnNotImplemented( "Should have implemented this" )
- def delete_network(self, net_id):
- """Deletes a tenant network from VIM
+ def delete_network(self, net_id, created_items=None):
+ """
+ Removes a tenant network from VIM and its associated elements
+ :param net_id: VIM identifier of the network, provided by method new_network
+ :param created_items: dictionary with extra items to be deleted. provided by method new_network
Returns the network identifier or raises an exception upon error or when network is not found
"""
raise vimconnNotImplemented( "Should have implemented this" )
def delete_vminstance(self, vm_id, created_items=None):
"""
- Removes a VM instance from VIM and each associate elements
+ Removes a VM instance from VIM and its associated elements
:param vm_id: VIM identifier of the VM, provided by method new_vminstance
:param created_items: dictionary with extra items to be deleted. provided by method new_vminstance and/or method
action_vminstance
'count': number of IPs to grant.
'shared': if this network can be seen/use by other tenants/organization
'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
- Returns the network identifier on success or raises and exception on failure
+ Returns a tuple with the network identifier and created_items, or raises an exception on error
+ created_items can be None or a dictionary where this method can include key-values that will be passed to
+ the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+ Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+ as not present.
"""
self.logger.debug("Adding a subnet to VPC")
try:
+ created_items = {}
self._reload_connection()
subnet = None
vpc_id = self.vpc_id
subnet_list = self.subnet_sizes(len(self.get_availability_zones_list()), vpc['cidr_block'])
cidr_block = list(set(subnet_list) - set(self.get_network_details({'tenant_id': vpc['id']}, detail='cidr_block')))[0]
subnet = self.conn_vpc.create_subnet(vpc_id, cidr_block)
- return subnet.id
+ return subnet.id, created_items
except Exception as e:
self.format_vimconn_exception(e)
except Exception as e:
self.format_vimconn_exception(e)
- def delete_network(self, net_id):
- """Deletes a tenant network from VIM
+ def delete_network(self, net_id, created_items=None):
+ """
+ Removes a tenant network from VIM and its associated elements
+ :param net_id: VIM identifier of the network, provided by method new_network
+ :param created_items: dictionary with extra items to be deleted. provided by method new_network
Returns the network identifier or raises an exception upon error or when network is not found
"""
# os.remove("manage_bridge_OSM")
def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None): # , **vim_specific):
- """Returns the network identifier"""
+ """Adds a tenant network to VIM
+ Params:
+ 'net_name': name of the network
+ 'net_type': one of:
+ 'bridge': overlay isolated network
+ 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
+ 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
+ 'ip_profile': is a dict containing the IP parameters of the network
+ 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
+ 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
+ 'gateway_address': (Optional) ip_schema, that is X.X.X.X
+ 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
+ 'dhcp_enabled': True or False
+ 'dhcp_start_address': ip_schema, first IP to grant
+ 'dhcp_count': number of IPs to grant.
+ 'shared': if this network can be seen/use by other tenants/organization
+ 'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
+ Returns a tuple with the network identifier and created_items, or raises an exception on error
+ created_items can be None or a dictionary where this method can include key-values that will be passed to
+ the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+ Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+ as not present.
+ """
+
# oca library method cannot be used in this case (problem with cluster parameters)
try:
+ created_items = {}
# vlan = str(random.randint(self.config["vlan"]["start-range"], self.config["vlan"]["finish-range"]))
# self.create_bridge_host(vlan)
bridge_config = self.config["bridge_service"]
</methodCall>'.format(self.user, self.passwd, config, self.config["cluster"]["id"])
r = requests.post(self.url, params)
obj = untangle.parse(str(r.content))
- return obj.methodResponse.params.param.value.array.data.value[1].i4.cdata.encode('utf-8')
+ return obj.methodResponse.params.param.value.array.data.value[1].i4.cdata.encode('utf-8'), created_items
except Exception as e:
self.logger.error("Create new network error: " + str(e))
raise vimconn.vimconnException(e)
self.logger.error("Get network " + str(net_id) + " error): " + str(e))
raise vimconn.vimconnException(e)
- def delete_network(self, net_id):
- """Deletes a tenant network from VIM
- Returns the network identifier
+ def delete_network(self, net_id, created_items=None):
+ """
+ Removes a tenant network from VIM and its associated elements
+ :param net_id: VIM identifier of the network, provided by method new_network
+ :param created_items: dictionary with extra items to be deleted. provided by method new_network
+ Returns the network identifier or raises an exception upon error or when network is not found
"""
try:
# self.delete_bridge_host()
template_name = flavor_data["name"][:-4]
name = 'NAME = "{}" '.format(template_name)
cpu = 'CPU = "{}" '.format(flavor_data["vcpus"])
+ vcpu = 'VCPU = "{}" '.format(flavor_data["vcpus"])
memory = 'MEMORY = "{}" '.format(flavor_data["ram"])
context = 'CONTEXT = [NETWORK = "YES",SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]" ] '
graphics = 'GRAPHICS = [ LISTEN = "0.0.0.0", TYPE = "VNC" ] '
sched_requeriments = 'CLUSTER_ID={}'.format(self.config["cluster"]["id"])
- template = name + cpu + memory + context + graphics + sched_requeriments
+ template = name + cpu + vcpu + memory + context + graphics + sched_requeriments
template_id = oca.VmTemplate.allocate(client, template)
return template_id
except Exception as e:
for template in listaTemplate:
if str(template.id) == str(flavor_id):
cpu = ' CPU = "{}"'.format(template.template.cpu)
+ vcpu = ' VCPU = "{}"'.format(template.template.cpu)
memory = ' MEMORY = "{}"'.format(template.template.memory)
context = ' CONTEXT = [NETWORK = "YES",SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]" ]'
graphics = ' GRAPHICS = [ LISTEN = "0.0.0.0", TYPE = "VNC" ]'
disk = ' DISK = [ IMAGE_ID = {}]'.format(image_id)
- sched_requeriments = ' SCHED_REQUIREMENTS = "CLUSTER_ID={}"'.format(self.config["cluster"]["id"])
- template_updated = cpu + memory + context + graphics + disk + sched_requeriments
+ template_updated = cpu + vcpu + memory + context + graphics + disk
networkListVim = oca.VirtualNetworkPool(client)
networkListVim.info()
network = ""
if not network_found:
raise vimconn.vimconnNotFoundException("Network {} not found".format(net["net_id"]))
template_updated += network
+ if isinstance(cloud_config, dict):
+ if cloud_config.get("user-data"):
+ if isinstance(cloud_config["user-data"], str):
+ template_updated += cloud_config["user-data"]
+ else:
+ for u in cloud_config["user-data"]:
+ template_updated += u
oca.VmTemplate.update(template, template_updated)
self.logger.info(
"Instanciating in OpenNebula a new VM name:{} id:{}".format(template.name, template.id))
if config.get('dataplane_net_vlan_range') is not None:
#validate vlan ranges provided by user
- self._validate_vlan_ranges(config.get('dataplane_net_vlan_range'))
+ self._validate_vlan_ranges(config.get('dataplane_net_vlan_range'), 'dataplane_net_vlan_range')
+
+ if config.get('multisegment_vlan_range') is not None:
+ #validate vlan ranges provided by user
+ self._validate_vlan_ranges(config.get('multisegment_vlan_range'), 'multisegment_vlan_range')
vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
config)
self.persistent_info = persistent_info
self.availability_zone = persistent_info.get('availability_zone', None)
self.session = persistent_info.get('session', {'reload_client': True})
+ self.my_tenant_id = self.session.get('my_tenant_id')
self.nova = self.session.get('nova')
self.neutron = self.session.get('neutron')
self.cinder = self.session.get('cinder')
self.logger = logging.getLogger('openmano.vim.openstack')
+ # allow security_groups to be a list or a single string
+ if isinstance(self.config.get('security_groups'), str):
+ self.config['security_groups'] = [self.config['security_groups']]
+ self.security_groups_id = None
+
####### VIO Specific Changes #########
if self.vim_type == "VIO":
self.logger = logging.getLogger('openmano.vim.vio')
tenant_name=self.tenant_name,
tenant_id=self.tenant_id)
sess = session.Session(auth=auth, verify=self.verify)
+ # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River Titanium cloud and StarlingX
+ region_name = self.config.get('region_name')
if self.api_version3:
- self.keystone = ksClient_v3.Client(session=sess, endpoint_type=self.endpoint_type)
+ self.keystone = ksClient_v3.Client(session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
else:
self.keystone = ksClient_v2.Client(session=sess, endpoint_type=self.endpoint_type)
self.session['keystone'] = self.keystone
version = self.config.get("microversion")
if not version:
version = "2.1"
- self.nova = self.session['nova'] = nClient.Client(str(version), session=sess, endpoint_type=self.endpoint_type)
- self.neutron = self.session['neutron'] = neClient.Client('2.0', session=sess, endpoint_type=self.endpoint_type)
- self.cinder = self.session['cinder'] = cClient.Client(2, session=sess, endpoint_type=self.endpoint_type)
+ # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River Titanium cloud and StarlingX
+ self.nova = self.session['nova'] = nClient.Client(str(version), session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
+ self.neutron = self.session['neutron'] = neClient.Client('2.0', session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
+ self.cinder = self.session['cinder'] = cClient.Client(2, session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
+ try:
+ self.my_tenant_id = self.session['my_tenant_id'] = sess.get_project_id()
+ except Exception as e:
+ self.logger.error("Cannot get project_id from session", exc_info=True)
if self.endpoint_type == "internalURL":
glance_service_id = self.keystone.services.list(name="glance")[0].id
glance_endpoint = self.keystone.endpoints.list(glance_service_id, interface="internal")[0].url
else:
glance_endpoint = None
self.glance = self.session['glance'] = glClient.Client(2, session=sess, endpoint=glance_endpoint)
- #using version 1 of glance client in new_image()
+ # using version 1 of glance client in new_image()
# self.glancev1 = self.session['glancev1'] = glClient.Client('1', session=sess,
# endpoint=glance_endpoint)
self.session['reload_client'] = False
# add availablity zone info inside self.persistent_info
self._set_availablity_zones()
self.persistent_info['availability_zone'] = self.availability_zone
+ self.security_groups_id = None # force to get again security_groups_ids next time they are needed
def __net_os2mano(self, net_list_dict):
'''Transform the net openstack format to mano format
def _format_exception(self, exception):
'''Transform a keystone, nova, neutron exception into a vimconn exception'''
- if isinstance(exception, (neExceptions.NetworkNotFoundClient, nvExceptions.NotFound, ksExceptions.NotFound, gl1Exceptions.HTTPNotFound)):
- raise vimconn.vimconnNotFoundException(type(exception).__name__ + ": " + str(exception))
+
+ # Fixing bug 665 https://osm.etsi.org/bugzilla/show_bug.cgi?id=665
+ # There are some openstack versions that message error are unicode with non English
+ message_error = exception.message
+ if isinstance(message_error, unicode):
+ message_error = message_error.encode("utf")
+
+ if isinstance(exception, (neExceptions.NetworkNotFoundClient, nvExceptions.NotFound, ksExceptions.NotFound,
+ gl1Exceptions.HTTPNotFound)):
+ raise vimconn.vimconnNotFoundException(type(exception).__name__ + ": " + message_error)
elif isinstance(exception, (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError,
ConnectionError, ksExceptions.ConnectionError, neExceptions.ConnectionFailed)):
- raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + str(exception))
+ raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + message_error)
elif isinstance(exception, (KeyError, nvExceptions.BadRequest, ksExceptions.BadRequest)):
- raise vimconn.vimconnException(type(exception).__name__ + ": " + str(exception))
+ raise vimconn.vimconnException(type(exception).__name__ + ": " + message_error)
elif isinstance(exception, (nvExceptions.ClientException, ksExceptions.ClientException,
neExceptions.NeutronException)):
- raise vimconn.vimconnUnexpectedResponse(type(exception).__name__ + ": " + str(exception))
+ raise vimconn.vimconnUnexpectedResponse(type(exception).__name__ + ": " + message_error)
elif isinstance(exception, nvExceptions.Conflict):
- raise vimconn.vimconnConflictException(type(exception).__name__ + ": " + str(exception))
+ raise vimconn.vimconnConflictException(type(exception).__name__ + ": " + message_error)
elif isinstance(exception, vimconn.vimconnException):
raise exception
else: # ()
- self.logger.error("General Exception " + str(exception), exc_info=True)
- raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + str(exception))
+ self.logger.error("General Exception " + message_error, exc_info=True)
+ raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + message_error)
+
+ def _get_ids_from_name(self):
+ """
+ Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
+ :return: None
+ """
+ # get tenant_id if only tenant_name is supplied
+ self._reload_connection()
+ if not self.my_tenant_id:
+ raise vimconn.vimconnConnectionException("Error getting tenant information from name={} id={}".
+ format(self.tenant_name, self.tenant_id))
+ if self.config.get('security_groups') and not self.security_groups_id:
+ # convert from name to id
+ neutron_sg_list = self.neutron.list_security_groups(tenant_id=self.my_tenant_id)["security_groups"]
+
+ self.security_groups_id = []
+ for sg in self.config.get('security_groups'):
+ for neutron_sg in neutron_sg_list:
+ if sg in (neutron_sg["id"], neutron_sg["name"]):
+ self.security_groups_id.append(neutron_sg["id"])
+ break
+ else:
+ self.security_groups_id = None
+ raise vimconn.vimconnConnectionException("Not found security group {} for this tenant".format(sg))
def get_tenant_list(self, filter_dict={}):
'''Obtain tenants of VIM
self._format_exception(e)
def new_network(self,net_name, net_type, ip_profile=None, shared=False, vlan=None):
- '''Adds a tenant network to VIM. Returns the network identifier'''
+ """Adds a tenant network to VIM
+ Params:
+ 'net_name': name of the network
+ 'net_type': one of:
+ 'bridge': overlay isolated network
+ 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
+ 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
+ 'ip_profile': is a dict containing the IP parameters of the network
+ 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
+ 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
+ 'gateway_address': (Optional) ip_schema, that is X.X.X.X
+ 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
+ 'dhcp_enabled': True or False
+ 'dhcp_start_address': ip_schema, first IP to grant
+ 'dhcp_count': number of IPs to grant.
+ 'shared': if this network can be seen/use by other tenants/organization
+ 'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
+ Returns a tuple with the network identifier and created_items, or raises an exception on error
+ created_items can be None or a dictionary where this method can include key-values that will be passed to
+ the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+ Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+ as not present.
+ """
self.logger.debug("Adding a new network to VIM name '%s', type '%s'", net_name, net_type)
- #self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
+ # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
try:
new_net = None
+ created_items = {}
self._reload_connection()
network_dict = {'name': net_name, 'admin_state_up': True}
if net_type=="data" or net_type=="ptp":
if self.config.get('dataplane_physical_net') == None:
raise vimconn.vimconnConflictException("You must provide a 'dataplane_physical_net' at config value before creating sriov network")
- network_dict["provider:physical_network"] = self.config['dataplane_physical_net'] #"physnet_sriov" #TODO physical
- network_dict["provider:network_type"] = "vlan"
- if vlan!=None:
- network_dict["provider:network_type"] = vlan
+ if not self.config.get('multisegment_support'):
+ network_dict["provider:physical_network"] = self.config[
+ 'dataplane_physical_net'] # "physnet_sriov" #TODO physical
+ network_dict["provider:network_type"] = "vlan"
+ if vlan!=None:
+ network_dict["provider:network_type"] = vlan
+ else:
+ ###### Multi-segment case ######
+ segment_list = []
+ segment1_dict = {}
+ segment1_dict["provider:physical_network"] = ''
+ segment1_dict["provider:network_type"] = 'vxlan'
+ segment_list.append(segment1_dict)
+ segment2_dict = {}
+ segment2_dict["provider:physical_network"] = self.config['dataplane_physical_net']
+ segment2_dict["provider:network_type"] = "vlan"
+ if self.config.get('multisegment_vlan_range'):
+ vlanID = self._generate_multisegment_vlanID()
+ segment2_dict["provider:segmentation_id"] = vlanID
+ # else
+ # raise vimconn.vimconnConflictException(
+ # "You must provide 'multisegment_vlan_range' at config dict before creating a multisegment network")
+ segment_list.append(segment2_dict)
+ network_dict["segments"] = segment_list
####### VIO Specific Changes #########
if self.vim_type == "VIO":
"'dataplane_net_vlan_range' in format [start_ID - end_ID]"\
"at config value before creating sriov network with vlan tag")
- network_dict["provider:segmentation_id"] = self._genrate_vlanID()
+ network_dict["provider:segmentation_id"] = self._generate_vlanID()
- network_dict["shared"]=shared
- new_net=self.neutron.create_network({'network':network_dict})
- #print new_net
- #create subnetwork, even if there is no profile
+ network_dict["shared"] = shared
+ new_net = self.neutron.create_network({'network':network_dict})
+ # print new_net
+ # create subnetwork, even if there is no profile
if not ip_profile:
ip_profile = {}
if not ip_profile.get('subnet_address'):
ip_profile['subnet_address'] = "192.168.{}.0/24".format(subnet_rand)
if 'ip_version' not in ip_profile:
ip_profile['ip_version'] = "IPv4"
- subnet = {"name":net_name+"-subnet",
+ subnet = {"name": net_name+"-subnet",
"network_id": new_net["network"]["id"],
"ip_version": 4 if ip_profile['ip_version']=="IPv4" else 6,
"cidr": ip_profile['subnet_address']
subnet['allocation_pools'][0]['end'] = ip_str
#self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
self.neutron.create_subnet({"subnet": subnet} )
- return new_net["network"]["id"]
+
+ if net_type == "data" and self.config.get('multisegment_support'):
+ if self.config.get('l2gw_support'):
+ l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
+ for l2gw in l2gw_list:
+ l2gw_conn = {}
+ l2gw_conn["l2_gateway_id"] = l2gw["id"]
+ l2gw_conn["network_id"] = new_net["network"]["id"]
+ l2gw_conn["segmentation_id"] = str(vlanID)
+ new_l2gw_conn = self.neutron.create_l2_gateway_connection({"l2_gateway_connection": l2gw_conn})
+ created_items["l2gwconn:" + str(new_l2gw_conn["l2_gateway_connection"]["id"])] = True
+ return new_net["network"]["id"], created_items
except Exception as e:
+ #delete l2gw connections (if any) before deleting the network
+ for k, v in created_items.items():
+ if not v: # skip already deleted
+ continue
+ try:
+ k_item, _, k_id = k.partition(":")
+ if k_item == "l2gwconn":
+ self.neutron.delete_l2_gateway_connection(k_id)
+ except Exception as e2:
+ self.logger.error("Error deleting l2 gateway connection: {}: {}".format(type(e2).__name__, e2))
if new_net:
self.neutron.delete_network(new_net['network']['id'])
self._format_exception(e)
net["encapsulation_id"] = net.get('provider:segmentation_id')
return net
- def delete_network(self, net_id):
- '''Deletes a tenant network from VIM. Returns the old network identifier'''
+ def delete_network(self, net_id, created_items=None):
+ """
+ Removes a tenant network from VIM and its associated elements
+ :param net_id: VIM identifier of the network, provided by method new_network
+ :param created_items: dictionary with extra items to be deleted. provided by method new_network
+ Returns the network identifier or raises an exception upon error or when network is not found
+ """
self.logger.debug("Deleting network '%s' from VIM", net_id)
+ if created_items == None:
+ created_items = {}
try:
self._reload_connection()
+ #delete l2gw connections (if any) before deleting the network
+ for k, v in created_items.items():
+ if not v: # skip already deleted
+ continue
+ try:
+ k_item, _, k_id = k.partition(":")
+ if k_item == "l2gwconn":
+ self.neutron.delete_l2_gateway_connection(k_id)
+ except Exception as e:
+ self.logger.error("Error deleting l2 gateway connection: {}: {}".format(type(e).__name__, e))
#delete VM ports attached to this networks before the network
ports = self.neutron.list_ports(network_id=net_id)
for p in ports['ports']:
self._reload_connection()
# metadata_vpci = {} # For a specific neutron plugin
block_device_mapping = None
+
for net in net_list:
if not net.get("net_id"): # skip non connected iface
continue
- port_dict={
+ port_dict = {
"network_id": net["net_id"],
"name": net.get("name"),
"admin_state_up": True
}
+ if self.config.get("security_groups") and net.get("port_security") is not False and \
+ not self.config.get("no_port_security_extension"):
+ if not self.security_groups_id:
+ self._get_ids_from_name()
+ port_dict["security_groups"] = self.security_groups_id
+
if net["type"]=="virtual":
pass
# if "vpci" in net:
self.logger.debug("name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
name, image_id, flavor_id, str(net_list_vim), description)
- security_groups = self.config.get('security_groups')
- if type(security_groups) is str:
- security_groups = ( security_groups, )
# cloud config
config_drive, userdata = self._create_user_data(cloud_config)
self.logger.debug("nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
"availability_zone={}, key_name={}, userdata={}, config_drive={}, "
"block_device_mapping={})".format(name, image_id, flavor_id, net_list_vim,
- security_groups, vm_av_zone, self.config.get('keypair'),
- userdata, config_drive, block_device_mapping))
+ self.config.get("security_groups"), vm_av_zone,
+ self.config.get('keypair'), userdata, config_drive,
+ block_device_mapping))
server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim,
- security_groups=security_groups,
+ security_groups=self.config.get("security_groups"),
+ # TODO remove security_groups in future versions. Already at neutron port
availability_zone=vm_av_zone,
key_name=self.config.get('keypair'),
userdata=userdata,
#TODO insert exception vimconn.HTTP_Unauthorized
####### VIO Specific Changes #########
- def _genrate_vlanID(self):
+ def _generate_vlanID(self):
"""
Method to get unused vlanID
Args:
" All given Vlan IDs {} are in use.".format(self.config.get('dataplane_net_vlan_range')))
- def _validate_vlan_ranges(self, dataplane_net_vlan_range):
+ def _generate_multisegment_vlanID(self):
+ """
+ Method to get unused vlanID
+ Args:
+ None
+ Returns:
+ vlanID
+ """
+ #Get used VLAN IDs
+ usedVlanIDs = []
+ networks = self.get_network_list()
+ for net in networks:
+ if net.get('provider:network_type') == "vlan" and net.get('provider:segmentation_id'):
+ usedVlanIDs.append(net.get('provider:segmentation_id'))
+ elif net.get('segments'):
+ for segment in net.get('segments'):
+ if segment.get('provider:network_type') == "vlan" and segment.get('provider:segmentation_id'):
+ usedVlanIDs.append(segment.get('provider:segmentation_id'))
+ used_vlanIDs = set(usedVlanIDs)
+
+ #find unused VLAN ID
+ for vlanID_range in self.config.get('multisegment_vlan_range'):
+ try:
+ start_vlanid , end_vlanid = map(int, vlanID_range.replace(" ", "").split("-"))
+ for vlanID in xrange(start_vlanid, end_vlanid + 1):
+ if vlanID not in used_vlanIDs:
+ return vlanID
+ except Exception as exp:
+ raise vimconn.vimconnException("Exception {} occurred while generating VLAN ID.".format(exp))
+ else:
+ raise vimconn.vimconnConflictException("Unable to create the VLAN segment."\
+ " All VLAN IDs {} are in use.".format(self.config.get('multisegment_vlan_range')))
+
+
+ def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
"""
Method to validate user given vlanID ranges
Args: None
Returns: None
"""
- for vlanID_range in dataplane_net_vlan_range:
+ for vlanID_range in input_vlan_range:
vlan_range = vlanID_range.replace(" ", "")
#validate format
vlanID_pattern = r'(\d)*-(\d)*$'
match_obj = re.match(vlanID_pattern, vlan_range)
if not match_obj:
- raise vimconn.vimconnConflictException("Invalid dataplane_net_vlan_range {}.You must provide "\
- "'dataplane_net_vlan_range' in format [start_ID - end_ID].".format(vlanID_range))
+ raise vimconn.vimconnConflictException("Invalid VLAN range for {}: {}.You must provide "\
+ "'{}' in format [start_ID - end_ID].".format(text_vlan_range, vlanID_range, text_vlan_range))
start_vlanid , end_vlanid = map(int,vlan_range.split("-"))
if start_vlanid <= 0 :
- raise vimconn.vimconnConflictException("Invalid dataplane_net_vlan_range {}."\
+ raise vimconn.vimconnConflictException("Invalid VLAN range for {}: {}."\
"Start ID can not be zero. For VLAN "\
- "networks valid IDs are 1 to 4094 ".format(vlanID_range))
+ "networks valid IDs are 1 to 4094 ".format(text_vlan_range, vlanID_range))
if end_vlanid > 4094 :
- raise vimconn.vimconnConflictException("Invalid dataplane_net_vlan_range {}."\
+ raise vimconn.vimconnConflictException("Invalid VLAN range for {}: {}."\
"End VLAN ID can not be greater than 4094. For VLAN "\
- "networks valid IDs are 1 to 4094 ".format(vlanID_range))
+ "networks valid IDs are 1 to 4094 ".format(text_vlan_range, vlanID_range))
if start_vlanid > end_vlanid:
- raise vimconn.vimconnConflictException("Invalid dataplane_net_vlan_range {}."\
- "You must provide a 'dataplane_net_vlan_range' in format start_ID - end_ID and "\
- "start_ID < end_ID ".format(vlanID_range))
+ raise vimconn.vimconnConflictException("Invalid VLAN range for {}: {}."\
+ "You must provide '{}' in format start_ID - end_ID and "\
+ "start_ID < end_ID ".format(text_vlan_range, vlanID_range, text_vlan_range))
#NOT USED FUNCTIONS
self._reload_connection()
# In networking-sfc the MPLS encapsulation is legacy
# should be used when no full SFC Encapsulation is intended
- sfc_encap = 'mpls'
+ correlation = 'mpls'
if sfc_encap:
correlation = 'nsh'
sfp_dict = {'name': name,
self._format_request_exception(e)
def new_network(self,net_name, net_type, ip_profile=None, shared=False, vlan=None): #, **vim_specific):
- '''Adds a tenant network to VIM'''
- '''Returns the network identifier'''
+ """Adds a tenant network to VIM
+ Params:
+ 'net_name': name of the network
+ 'net_type': one of:
+ 'bridge': overlay isolated network
+ 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
+ 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
+ 'ip_profile': is a dict containing the IP parameters of the network
+ 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
+ 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
+ 'gateway_address': (Optional) ip_schema, that is X.X.X.X
+ 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
+ 'dhcp_enabled': True or False
+ 'dhcp_start_address': ip_schema, first IP to grant
+ 'dhcp_count': number of IPs to grant.
+ 'shared': if this network can be seen/use by other tenants/organization
+ 'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
+ Returns a tuple with the network identifier and created_items, or raises an exception on error
+ created_items can be None or a dictionary where this method can include key-values that will be passed to
+ the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+ Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+ as not present.
+ """
try:
+ created_items = {}
self._get_my_tenant()
if net_type=="bridge":
net_type="bridge_data"
#if r is not None:
# self.logger.warn("Warning: remove extra items %s", str(r))
network_id = response['network']['id']
- return network_id
+ return network_id, created_items
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
- def delete_network(self, net_id):
- '''Deletes a tenant network from VIM'''
- '''Returns the network identifier'''
+ def delete_network(self, net_id, created_items=None):
+ """
+ Removes a tenant network from VIM and its associated elements
+ :param net_id: VIM identifier of the network, provided by method new_network
+ :param created_items: dictionary with extra items to be deleted. provided by method new_network
+ Returns the network identifier or raises an exception upon error or when network is not found
+ """
try:
self._get_my_tenant()
url = self.url+'/networks/'+net_id
import vimconn
import os
+import shutil
+import subprocess
+import tempfile
import traceback
import itertools
import requests
INTERVAL_TIME = 5
MAX_WAIT_TIME = 1800
-API_VERSION = '5.9'
+API_VERSION = '31.0'
__author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare, Prakash Kasar"
__date__ = "$09-Mar-2018 11:09:29$"
Returns:
The return client object that latter can be used to connect to vcloud director as admin for provider vdc
"""
-
self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
try:
host = self.url
org = 'System'
client_as_admin = Client(host, verify_ssl_certs=False)
+ client_as_admin.set_highest_supported_version()
client_as_admin.set_credentials(BasicLoginCredentials(self.admin_user, org, self.admin_password))
except Exception as e:
raise vimconn.vimconnException(
Returns:
The return client object that latter can be used to connect to vCloud director as admin for VDC
"""
-
try:
self.logger.debug("Logging into vCD {} as {} to datacenter {}.".format(self.org_name,
self.user,
self.org_name))
host = self.url
client = Client(host, verify_ssl_certs=False)
+ client.set_highest_supported_version()
client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
except:
raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
return vdclist
- def new_network(self, net_name, net_type, ip_profile=None, shared=False):
+ def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
"""Adds a tenant network to VIM
- net_name is the name
- net_type can be 'bridge','data'.'ptp'.
- ip_profile is a dict containing the IP parameters of the network
- shared is a boolean
- Returns the network identifier"""
+ Params:
+ 'net_name': name of the network
+ 'net_type': one of:
+ 'bridge': overlay isolated network
+ 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
+ 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
+ 'ip_profile': is a dict containing the IP parameters of the network
+ 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
+ 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
+ 'gateway_address': (Optional) ip_schema, that is X.X.X.X
+ 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
+ 'dhcp_enabled': True or False
+ 'dhcp_start_address': ip_schema, first IP to grant
+ 'dhcp_count': number of IPs to grant.
+ 'shared': if this network can be seen/use by other tenants/organization
+ 'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
+ Returns a tuple with the network identifier and created_items, or raises an exception on error
+ created_items can be None or a dictionary where this method can include key-values that will be passed to
+ the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+ Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+ as not present.
+ """
self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
.format(net_name, net_type, ip_profile, shared))
+ created_items = {}
isshared = 'false'
if shared:
isshared = 'true'
network_uuid = self.create_network(network_name=net_name, net_type=net_type,
ip_profile=ip_profile, isshared=isshared)
if network_uuid is not None:
- return network_uuid
+ return network_uuid, created_items
else:
raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
return filter_dict
- def delete_network(self, net_id):
+ def delete_network(self, net_id, created_items=None):
"""
- Method Deletes a tenant network from VIM, provide the network id.
-
- Returns the network identifier or raise an exception
+ Removes a tenant network from VIM and its associated elements
+ :param net_id: VIM identifier of the network, provided by method new_network
+ :param created_items: dictionary with extra items to be deleted. provided by method new_network
+ Returns the network identifier or raises an exception upon error or when network is not found
"""
# ############# Stub code for SRIOV #################
"""
for catalog in catalogs:
if catalog['name'] == catalog_name:
- return True
- return False
+ return catalog['id']
def create_vimcatalog(self, vca=None, catalog_name=None):
""" Create new catalog entry in vCloud director.
catalog_name catalog that client wish to create. Note no validation done for a name.
Client must make sure that provide valid string representation.
- Return (bool) True if catalog created.
+ Returns catalog id if catalog created else None.
"""
try:
- result = vca.create_catalog(catalog_name, catalog_name)
- if result is not None:
- return True
+ lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
+ if lxml_catalog_element:
+ id_attr_value = lxml_catalog_element.get('id') # 'urn:vcloud:catalog:7490d561-d384-4dac-8229-3575fd1fc7b4'
+ return id_attr_value.split(':')[-1]
catalogs = vca.list_catalogs()
- except:
- return False
+ except Exception as ex:
+ self.logger.error(
+ 'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(catalog_name, ex))
+ raise
return self.catalog_exists(catalog_name, catalogs)
# noinspection PyIncorrectDocstring
if len(catalogs) == 0:
self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
- result = self.create_vimcatalog(org, catalog_md5_name)
- if not result:
+ if self.create_vimcatalog(org, catalog_md5_name) is None:
raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
# if we didn't find existing catalog we create a new one and upload image.
self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
- result = self.create_vimcatalog(org, catalog_md5_name)
- if not result:
+ if self.create_vimcatalog(org, catalog_md5_name) is None:
raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
# cloud-init for ssh-key injection
if cloud_config:
- self.cloud_init(vapp,cloud_config)
+ # Create a catalog which will be carrying the config drive ISO
+ # This catalog is deleted during vApp deletion. The catalog name carries
+ # vApp UUID and thats how it gets identified during its deletion.
+ config_drive_catalog_name = 'cfg_drv-' + vapp_uuid
+ self.logger.info('new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
+ config_drive_catalog_name))
+ config_drive_catalog_id = self.create_vimcatalog(org, config_drive_catalog_name)
+ if config_drive_catalog_id is None:
+ error_msg = "new_vminstance(): Failed to create new catalog '{}' to carry the config drive " \
+ "ISO".format(config_drive_catalog_name)
+ raise Exception(error_msg)
+
+ # Create config-drive ISO
+ _, userdata = self._create_user_data(cloud_config)
+ # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
+ iso_path = self.create_config_drive_iso(userdata)
+ self.logger.debug('new_vminstance(): The ISO is successfully created. Path: {}'.format(iso_path))
+
+ self.logger.info('new_vminstance(): uploading iso to catalog {}'.format(config_drive_catalog_name))
+ self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
+ # Attach the config-drive ISO to the VM
+ self.logger.info('new_vminstance(): Attaching the config-drive ISO to the VM')
+ # The ISO remains in INVALID_STATE right after the PUT request (its a blocking call though)
+ time.sleep(5)
+ self.insert_media_to_vm(vapp, config_drive_catalog_id)
+ shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
# If VM has PCI devices or SRIOV reserve memory for VM
if reserve_memory:
self.logger.error("new_vminstance(): failed to power on vApp "\
"{}".format(vmname_andid))
- except Exception as exp :
+ except Exception as exp:
+ try:
+ self.delete_vminstance(vapp_uuid)
+ except Exception as exp2:
+ self.logger.error("new_vminstance rollback fail {}".format(exp2))
# it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
self.logger.error("new_vminstance(): Failed create new vm instance {} with exception {}"
.format(name, exp))
else:
raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
+ def create_config_drive_iso(self, user_data):
+ tmpdir = tempfile.mkdtemp()
+ iso_path = os.path.join(tmpdir, 'ConfigDrive.iso')
+ latest_dir = os.path.join(tmpdir, 'openstack', 'latest')
+ os.makedirs(latest_dir)
+ with open(os.path.join(latest_dir, 'meta_data.json'), 'w') as meta_file_obj, \
+ open(os.path.join(latest_dir, 'user_data'), 'w') as userdata_file_obj:
+ userdata_file_obj.write(user_data)
+ meta_file_obj.write(json.dumps({"availability_zone": "nova",
+ "launch_index": 0,
+ "name": "ConfigDrive",
+ "uuid": str(uuid.uuid4())}
+ )
+ )
+ genisoimage_cmd = 'genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}'.format(
+ iso_path=iso_path, source_dir_path=tmpdir)
+ self.logger.info('create_config_drive_iso(): Creating ISO by running command "{}"'.format(genisoimage_cmd))
+ try:
+ FNULL = open(os.devnull, 'w')
+ subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
+ except subprocess.CalledProcessError as e:
+ shutil.rmtree(tmpdir, ignore_errors=True)
+ error_msg = 'create_config_drive_iso(): Exception while running genisoimage command: {}'.format(e)
+ self.logger.error(error_msg)
+ raise Exception(error_msg)
+ return iso_path
+
+ def upload_iso_to_catalog(self, catalog_id, iso_file_path):
+ if not os.path.isfile(iso_file_path):
+ error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(iso_file_path)
+ self.logger.error(error_msg)
+ raise Exception(error_msg)
+ iso_file_stat = os.stat(iso_file_path)
+ xml_media_elem = '''<?xml version="1.0" encoding="UTF-8"?>
+ <Media
+ xmlns="http://www.vmware.com/vcloud/v1.5"
+ name="{iso_name}"
+ size="{iso_size}"
+ imageType="iso">
+ <Description>ISO image for config-drive</Description>
+ </Media>'''.format(iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size)
+ headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+ 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+ headers['Content-Type'] = 'application/vnd.vmware.vcloud.media+xml'
+ catalog_href = self.url + '/api/catalog/' + catalog_id + '/action/upload'
+ response = self.perform_request(req_type='POST', url=catalog_href, headers=headers, data=xml_media_elem)
+
+ if response.status_code != 201:
+ error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(catalog_href)
+ self.logger.error(error_msg)
+ raise Exception(error_msg)
+
+ catalogItem = XmlElementTree.fromstring(response.content)
+ entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.media+xml"][0]
+ entity_href = entity.get('href')
+
+ response = self.perform_request(req_type='GET', url=entity_href, headers=headers)
+ if response.status_code != 200:
+ raise Exception("upload_iso_to_catalog(): Failed to GET entity href {}".format(entity_href))
+
+ match = re.search(r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>', response.text, re.DOTALL)
+ if match:
+ media_upload_href = match.group(1)
+ else:
+ raise Exception('Could not parse the upload URL for the media file from the last response')
+
+ headers['Content-Type'] = 'application/octet-stream'
+ response = self.perform_request(req_type='PUT',
+ url=media_upload_href,
+ headers=headers,
+ data=open(iso_file_path, 'rb'))
+
+ if response.status_code != 200:
+ raise Exception('PUT request to "{}" failed'.format(media_upload_href))
def get_vcd_availibility_zones(self,respool_href, headers):
""" Method to find presence of av zone is VIM resource pool
self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
else:
self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
+ config_drive_catalog_name, config_drive_catalog_id = 'cfg_drv-' + vm__vim_uuid, None
+ catalog_list = self.get_image_list()
+ try:
+ config_drive_catalog_id = [catalog_['id'] for catalog_ in catalog_list
+ if catalog_['name'] == config_drive_catalog_name][0]
+ except IndexError:
+ pass
+ if config_drive_catalog_id:
+ self.logger.debug('delete_vminstance(): Found a config drive catalog {} matching '
+ 'vapp_name"{}". Deleting it.'.format(config_drive_catalog_id, vapp_name))
+ self.delete_image(config_drive_catalog_id)
return vm__vim_uuid
except:
self.logger.debug(traceback.format_exc())
else:
self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
- def get_vminstance_console(self, vm_id, console_type="vnc"):
+ def get_vminstance_console(self, vm_id, console_type="novnc"):
"""
Get a console for the virtual machine
Params:
port: the http, ssh, ... port
suffix: extra text, e.g. the http path and query string
"""
- raise vimconn.vimconnNotImplemented("Should have implemented this")
+ console_dict = {}
+
+ if console_type==None or console_type=='novnc':
+
+ url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(self.url, vm_id)
+
+ headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+ 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+ response = self.perform_request(req_type='POST',
+ url=url_rest_call,
+ headers=headers)
+
+ if response.status_code == 403:
+ response = self.retry_rest('GET', url_rest_call)
+
+ if response.status_code != 200:
+ self.logger.error("REST call {} failed reason : {}"\
+ "status code : {}".format(url_rest_call,
+ response.content,
+ response.status_code))
+ raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
+ "VM Mks ticket details")
+ s = re.search("<Host>(.*?)</Host>",response.content)
+ console_dict['server'] = s.group(1) if s else None
+ s1 = re.search("<Port>(\d+)</Port>",response.content)
+ console_dict['port'] = s1.group(1) if s1 else None
+
+
+ url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(self.url, vm_id)
+
+ headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+ 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+ response = self.perform_request(req_type='POST',
+ url=url_rest_call,
+ headers=headers)
+
+ if response.status_code == 403:
+ response = self.retry_rest('GET', url_rest_call)
+
+ if response.status_code != 200:
+ self.logger.error("REST call {} failed reason : {}"\
+ "status code : {}".format(url_rest_call,
+ response.content,
+ response.status_code))
+ raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
+ "VM console details")
+ s = re.search(">.*?/(vm-\d+.*)</",response.content)
+ console_dict['suffix'] = s.group(1) if s else None
+ console_dict['protocol'] = "https"
+
+ return console_dict
# NOT USED METHODS in current version
#Creating all networks as Direct Org VDC type networks.
#Unused in case of Underlay (data/ptp) network interface.
- fence_mode="bridged"
+ fence_mode="isolated"
is_inherited='false'
dns_list = dns_address.split(";")
dns1 = dns_list[0]
</IpRanges>
</IpScope>
</IpScopes>
- <ParentNetwork href="{9:s}"/>
- <FenceMode>{10:s}</FenceMode>
+ <FenceMode>{9:s}</FenceMode>
</Configuration>
- <IsShared>{11:s}</IsShared>
+ <IsShared>{10:s}</IsShared>
</OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
subnet_address, dns1, dns2_text, dhcp_enabled,
- dhcp_start_address, dhcp_end_address, available_networks,
+ dhcp_start_address, dhcp_end_address,
fence_mode, isshared)
headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
if iso_name and media_id:
data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<ns6:MediaInsertOrEjectParams
- xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:ns6="http://www.vmware.com/vcloud/v1.5" xmlns:ns7="http://www.vmware.com/schema/ovf" xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
+ xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1"
+ xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
+ xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common"
+ xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
+ xmlns:ns6="http://www.vmware.com/vcloud/v1.5"
+ xmlns:ns7="http://www.vmware.com/schema/ovf"
+ xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
<ns6:Media
type="application/vnd.vmware.vcloud.media+xml"
- name="{}.iso"
+ name="{}"
id="urn:vcloud:media:{}"
href="https://{}/api/media/{}"/>
</ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
headers=headers)
if response.status_code != 202:
- self.logger.error("Failed to insert CD-ROM to vm")
- raise vimconn.vimconnException("insert_media_to_vm() : Failed to insert"\
- "ISO image to vm")
+ error_msg = "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. " \
+ "Status code {}".format(response.text, response.status_code)
+ self.logger.error(error_msg)
+ raise vimconn.vimconnException(error_msg)
else:
task = self.get_task_from_response(response.content)
result = self.client.get_task_monitor().wait_for_success(task=task)
tenant (str): UUID of the OSM tenant
Returns:
- str: UUID of the WIM account that is able to connect all the
+ object with the WIM account that is able to connect all the
datacenters.
"""
wim_id = self.find_common_wim(datacenter_ids, tenant)
- return self.persist.get_wim_account_by(wim_id, tenant)['uuid']
+ return self.persist.get_wim_account_by(wim_id, tenant)
def derive_wan_link(self,
wim_usage,
instance_scenario_id, sce_net_id,
- networks, tenant):
+ networks, tenant, related=None):
"""Create a instance_wim_nets record for the given information"""
if sce_net_id in wim_usage:
account_id = wim_usage[sce_net_id]
'instance_scenario_id': instance_scenario_id,
'sce_net_id': sce_net_id,
'wim_id': wim_id,
- 'wim_account_id': account['uuid']
+ 'wim_account_id': account['uuid'],
+ related: related
}
def derive_wan_links(self, wim_usage, networks, tenant=None):
(NSR).
Arguments:
- wim_usage(dict): Mapping between sce_net_id and wim_id
+ wim_usage(dict): Mapping between sce_net_id and wim_id. If wim_id is False, means not create wam_links
networks(list): Dicts containing the information about the networks
that will be instantiated to materialize a Network Service
(scenario) instance.
list: list of WAN links to be written to the database
"""
# Group networks by key=(instance_scenario_id, sce_net_id)
+ related = None
+ if networks:
+ related = networks[0].get("related")
filtered = _filter_multi_vim(networks)
grouped_networks = _group_networks(filtered)
datacenters_per_group = _count_datacenters(grouped_networks)
wan_groups = [key
for key, counter in datacenters_per_group
if counter > 1]
-
+ # Keys are tuples(instance_scenario_id, sce_net_id)
return [
self.derive_wan_link(wim_usage,
- key[0], key[1], grouped_networks[key], tenant)
- for key in wan_groups
+ key[0], key[1], grouped_networks[key], tenant, related)
+ for key in wan_groups if wim_usage.get(key[1]) is not False
]
def create_action(self, wan_link):
(scenario) instance.
Returns:
dict: Keys are tuples (instance_scenario_id, sce_net_id) and values
- are lits of networks.
+ are list of networks.
"""
criteria = itemgetter('instance_scenario_id', 'sce_net_id')
from itertools import groupby
from operator import itemgetter
from sys import exc_info
-from threading import Lock
from time import time
from uuid import uuid1 as generate_uuid
class WimPersistence(object):
"""High level interactions with the WIM tables in the database"""
- def __init__(self, db, logger=None, lock=None):
+ def __init__(self, db, logger=None):
self.db = db
self.logger = logger or logging.getLogger('openmano.wim.persistence')
- self.lock = lock or Lock()
def query(self,
FROM=None,
'SELECT': SELECT, 'FROM': FROM, 'WHERE': WHERE,
'LIMIT': LIMIT, 'ORDER_BY': ORDER_BY})
- with self.lock:
- records = self.db.get_rows(**query)
+ records = self.db.get_rows(**query)
table = FROM.split()[0]
if error_if_none and not records:
if "config" in wim_descriptor:
wim_descriptor["config"] = _serialize(wim_descriptor["config"])
- with self.lock:
- return self.db.new_row(
- "wims", wim_descriptor, add_uuid=True, confidential_data=True)
+ return self.db.new_row(
+ "wims", wim_descriptor, add_uuid=True, confidential_data=True)
def update_wim(self, uuid_or_name, wim_descriptor):
"""Change an existing WIM record on the database"""
wim_descriptor['config'] = (
_serialize(config_dict) if config_dict else None)
- with self.lock:
- self.db.update_rows('wims', wim_descriptor, where)
+ self.db.update_rows('wims', wim_descriptor, where)
return wim_id
# get nfvo_tenant info
wim = self.get_by_name_or_uuid('wims', wim)
- with self.lock:
- self.db.delete_row_by_id('wims', wim['uuid'])
+ self.db.delete_row_by_id('wims', wim['uuid'])
return wim['uuid'] + ' ' + wim['name']
``wim_nfvo_tenants``
"""
try:
- with self.lock:
- yield
+ yield
except DbBaseException as db_exception:
error_msg = str(db_exception)
if all([msg in error_msg
new_config = merge_dicts(old_config, properties['config'])
updates['config'] = _serialize(new_config)
- with self.lock:
- num_changes = self.db.update_rows(
- 'wim_accounts', UPDATE=updates,
- WHERE={'uuid': wim_account['uuid']})
+ num_changes = self.db.update_rows('wim_accounts', UPDATE=updates,
+ WHERE={'uuid': wim_account['uuid']})
if num_changes is None:
raise UnexpectedDatabaseError('Impossible to update wim_account '
# Since we have foreign keys configured with ON CASCADE, we can rely
# on the database engine to guarantee consistency, deleting the
# dependant records
- with self.lock:
- return self.db.delete_row_by_id('wim_accounts', uuid)
+ return self.db.delete_row_by_id('wim_accounts', uuid)
def get_datacenters_by(self, datacenter=None, tenant=None, **kwargs):
"""Retrieve datacenter information from the database together
properties['wan_service_mapping_info'] = _serialize(info)
try:
- with self.lock:
- self.db.new_row('wim_port_mappings', properties,
- add_uuid=False, confidential_data=True)
+ self.db.new_row('wim_port_mappings', properties,
+ add_uuid=False, confidential_data=True)
except DbBaseException as old_exception:
self.logger.exception(old_exception)
ex = InvalidParameters(
]
def delete_wim_port_mappings(self, wim_id):
- with self.lock:
- self.db.delete_row(FROM='wim_port_mappings',
- WHERE={"wim_id": wim_id})
+ self.db.delete_row(FROM='wim_port_mappings', WHERE={"wim_id": wim_id})
return "port mapping for wim {} deleted.".format(wim_id)
def update_wim_port_mapping(self, id, properties):
merge_dicts(original, remove_none_items(properties),
wan_service_mapping_info=mapping_info))
- with self.lock:
- num_changes = self.db.update_rows(
- 'wim_port_mappings', UPDATE=updates, WHERE={'id': id})
+ num_changes = self.db.update_rows('wim_port_mappings',
+ UPDATE=updates, WHERE={'id': id})
if num_changes is None:
raise UnexpectedDatabaseError(
)
join = 'vim_wim_actions NATURAL JOIN ({}) AS items'.format(items)
- with self.lock:
- db_results = self.db.get_rows(
- FROM=join, ORDER_BY=('item', 'item_id', 'created_at'))
+ db_results = self.db.get_rows(
+ FROM=join, ORDER_BY=('item', 'item_id', 'created_at'))
results = (_postprocess_action(r) for r in db_results)
criteria = itemgetter('item', 'item_id')
def update_action(self, instance_action_id, task_index, properties):
condition = {'instance_action_id': instance_action_id,
'task_index': task_index}
- action = self.query_one('vim_wim_actions', WHERE=condition)
+ try:
+ action = self.query_one('vim_wim_actions', WHERE=condition)
+ except:
+ actions = self.query('vim_wim_actions', WHERE=condition)
+ self.logger.error('More then one action found:\n%s',
+ json.dumps(actions, indent=4))
+ action = actions[0]
extra = remove_none_items(merge_dicts(
action.get('extra') or {},
updates = preprocess_record(
merge_dicts(action, properties, extra=extra))
- with self.lock:
- num_changes = self.db.update_rows('vim_wim_actions',
- UPDATE=updates, WHERE=condition)
+ num_changes = self.db.update_rows('vim_wim_actions',
+ UPDATE=updates, WHERE=condition)
if num_changes is None:
raise UnexpectedDatabaseError(
merge_dicts(wan_link, properties, wim_info=wim_info))
self.logger.debug({'UPDATE': updates})
- with self.lock:
- num_changes = self.db.update_rows(
- 'instance_wim_nets', UPDATE=updates,
- WHERE={'uuid': wan_link['uuid']})
+ num_changes = self.db.update_rows(
+ 'instance_wim_nets', UPDATE=updates,
+ WHERE={'uuid': wan_link['uuid']})
if num_changes is None:
raise UnexpectedDatabaseError(
if not changes:
return 0
- with self.lock:
- return self.db.update_rows('instance_actions',
- WHERE={'uuid': uuid}, UPDATE=changes)
+ return self.db.update_rows('instance_actions',
+ WHERE={'uuid': uuid}, UPDATE=changes)
def get_only_vm_with_external_net(self, instance_net_id, **kwargs):
"""Return an instance VM if that is the only VM connected to an
"""Return a SQL safe string"""
return self.db.escape_string(string)
+ def reconnect(self):
+ self.db.reconnect()
+
def _generate_port_mapping_id(self, mapping_info):
"""Given a port mapping represented by a dict with a 'type' field,
generate a unique string, in a injective way.
"required": ["mapping_type"]
}
},
- "oneOf": [
+ "anyOf": [
{
"required": [
"pop_switch_dpid",
the Layer 2 service.
"""
import requests
-import json
import uuid
-import time
import logging
from .wimconn import WimConnector, WimConnectorError
"""CHeck layer where we move it"""
+
class WimconnectorIETFL2VPN(WimConnector):
- """IETF L2VPM WIM connector
- Arguments: (To be completed)
- wim (dict): WIM record, as stored in the database
- wim_account (dict): WIM account record, as stored in the database
- """
def __init__(self, wim, wim_account, config=None, logger=None):
+ """IETF L2VPM WIM connector
+
+ Arguments: (To be completed)
+ wim (dict): WIM record, as stored in the database
+ wim_account (dict): WIM account record, as stored in the database
+ """
self.logger = logging.getLogger('openmano.wimconn.ietfl2vpn')
super(WimconnectorIETFL2VPN, self).__init__(wim, wim_account, config, logger)
- self.headers={'Content-Type': 'application/json'}
+ self.headers = {'Content-Type': 'application/json'}
self.mappings = {m['wan_service_endpoint_id']: m
for m in self.service_endpoint_mapping}
self.user = wim_account.get("user")
self.passwd = wim_account.get("passwd")
- if self.user != None and self.passwd != None:
+ if self.user and self.passwd is not None:
self.auth = (self.user, self.passwd)
else:
- self.auth=None
+ self.auth = None
self.logger.info("IETFL2VPN Connector Initialized.")
def check_credentials(self):
raise WimConnectorError("Failed while authenticating", http_code=http_code)
self.logger.info("Credentials checked")
-
def get_connectivity_service_status(self, service_uuid, conn_info=None):
"""Monitor the status of the connectivity service stablished
"""
try:
self.logger.info("Sending get connectivity service stuatus")
- servicepoint="{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(self.wim["wim_url"],service_uuid)
- response=requests.get(servicepoint, auth=self.auth)
- if response.status_code!= requests.codes.ok :
- raise WimConnectorError("Unable to obtain connectivity servcice status",http_code=response.status_code)
- service_status={'wim_status': 'ACTIVE'}
+ servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(
+ self.wim["wim_url"], service_uuid)
+ response = requests.get(servicepoint, auth=self.auth)
+ if response.status_code != requests.codes.ok:
+ raise WimConnectorError("Unable to obtain connectivity servcice status", http_code=response.status_code)
+ service_status = {'wim_status': 'ACTIVE'}
return service_status
except requests.exceptions.ConnectionError:
- raise WimConnectorError("Request Timeout",http_code=408)
+ raise WimConnectorError("Request Timeout", http_code=408)
-
- def search_mapp(self,connection_point):
+ def search_mapp(self, connection_point):
id = connection_point['service_endpoint_id']
if id not in self.mappings:
raise WimConnectorError("Endpoint {} not located".format(str(id)))
else:
return self.mappings[id]
-
- def create_connectivity_service(self, service_type, connection_points,
- **kwargs):
+ def create_connectivity_service(self, service_type, connection_points, **kwargs):
"""Stablish WAN connectivity between the endpoints
Arguments:
Raises:
WimConnectorException: In case of error.
"""
- if service_type=="ELINE":
- if len(connection_points)>2:
- raise WimConnectorError('Connections between more than '
- '2 endpoints are not supported')
- if len(connection_points)<2:
- raise WimConnectorError('Connections must be of at least '
- '2 endpoints')
+ if service_type == "ELINE":
+ if len(connection_points) > 2:
+ raise WimConnectorError('Connections between more than 2 endpoints are not supported')
+ if len(connection_points) < 2:
+ raise WimConnectorError('Connections must be of at least 2 endpoints')
""" First step, create the vpn service """
- uuid_l2vpn=str(uuid.uuid4())
- vpn_service={}
- vpn_service["vpn-id"]= uuid_l2vpn
- vpn_service["vpn-scv-type"]="vpws"
- vpn_service["svc-topo"]= "any-to-any"
- vpn_service["customer-name"]= "osm"
- vpn_service_list=[]
+ uuid_l2vpn = str(uuid.uuid4())
+ vpn_service = {}
+ vpn_service["vpn-id"] = uuid_l2vpn
+ vpn_service["vpn-scv-type"] = "vpws"
+ vpn_service["svc-topo"] = "any-to-any"
+ vpn_service["customer-name"] = "osm"
+ vpn_service_list = []
vpn_service_list.append(vpn_service)
- vpn_service_l={"vpn-service":vpn_service_list}
- response_service_creation=None
- conn_info=[]
+ vpn_service_l = {"vpn-service": vpn_service_list}
+ response_service_creation = None
+ conn_info = []
self.logger.info("Sending vpn-service :{}".format(vpn_service_l))
try:
- endpoint_service_creation="{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
- response_service_creation=requests.post(endpoint_service_creation, headers=self.headers, json=vpn_service_l, auth=self.auth )
+ endpoint_service_creation = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(
+ self.wim["wim_url"])
+ response_service_creation = requests.post(endpoint_service_creation, headers=self.headers,
+ json=vpn_service_l, auth=self.auth)
except requests.exceptions.ConnectionError:
- raise WimConnectorError("Request to create service Timeout",http_code=408)
+ raise WimConnectorError("Request to create service Timeout", http_code=408)
if response_service_creation.status_code == 409:
- raise WimConnectorError("Service already exists",http_code=response_service_creation.status_code)
- elif response_service_creation.status_code != requests.codes.created :
- raise WimConnectorError("Request to create service not accepted",http_code=response_service_creation.status_code)
+ raise WimConnectorError("Service already exists", http_code=response_service_creation.status_code)
+ elif response_service_creation.status_code != requests.codes.created:
+ raise WimConnectorError("Request to create service not accepted",
+ http_code=response_service_creation.status_code)
""" Second step, create the connections and vpn attachments """
for connection_point in connection_points:
- connection_point_wan_info=self.search_mapp(connection_point)
- site_network_access={}
- connection={}
- if connection_point["service_endpoint_encapsulation_type"]!="none":
- if connection_point["service_endpoint_encapsulation_type"]=="dot1q":
+ connection_point_wan_info = self.search_mapp(connection_point)
+ site_network_access = {}
+ connection = {}
+ if connection_point["service_endpoint_encapsulation_type"] != "none":
+ if connection_point["service_endpoint_encapsulation_type"] == "dot1q":
""" The connection is a VLAN """
- connection["encapsulation-type"]="dot1q-vlan-tagged"
- tagged={}
- tagged_interf={}
- service_endpoint_encapsulation_info=connection_point["service_endpoint_encapsulation_info"]
- if service_endpoint_encapsulation_info["vlan"]==None:
+ connection["encapsulation-type"] = "dot1q-vlan-tagged"
+ tagged = {}
+ tagged_interf = {}
+ service_endpoint_encapsulation_info = connection_point["service_endpoint_encapsulation_info"]
+ if service_endpoint_encapsulation_info["vlan"] is None:
raise WimConnectorError("VLAN must be provided")
- tagged_interf["cvlan-id"]= service_endpoint_encapsulation_info["vlan"]
- tagged["dot1q-vlan-tagged"]=tagged_interf
- connection["tagged-interface"]=tagged
+ tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info["vlan"]
+ tagged["dot1q-vlan-tagged"] = tagged_interf
+ connection["tagged-interface"] = tagged
else:
raise NotImplementedError("Encapsulation type not implemented")
- site_network_access["connection"]=connection
+ site_network_access["connection"] = connection
self.logger.info("Sending connection:{}".format(connection))
- vpn_attach={}
- vpn_attach["vpn-id"]=uuid_l2vpn
- vpn_attach["site-role"]=vpn_service["svc-topo"]+"-role"
- site_network_access["vpn-attachment"]=vpn_attach
+ vpn_attach = {}
+ vpn_attach["vpn-id"] = uuid_l2vpn
+ vpn_attach["site-role"] = vpn_service["svc-topo"]+"-role"
+ site_network_access["vpn-attachment"] = vpn_attach
self.logger.info("Sending vpn-attachement :{}".format(vpn_attach))
- uuid_sna=str(uuid.uuid4())
- site_network_access["network-access-id"]=uuid_sna
- site_network_accesses={}
- site_network_access_list=[]
+ uuid_sna = str(uuid.uuid4())
+ site_network_access["network-access-id"] = uuid_sna
+ site_network_accesses = {}
+ site_network_access_list = []
site_network_access_list.append(site_network_access)
- site_network_accesses["site-network-access"]=site_network_access_list
- conn_info_d={}
+ site_network_accesses["site-network-access"] = site_network_access_list
+ conn_info_d = {}
conn_info_d["site"] = connection_point_wan_info["site-id"]
conn_info_d["site-network-access-id"] = site_network_access["network-access-id"]
conn_info_d["mapping"] = None
conn_info.append(conn_info_d)
try:
- endpoint_site_network_access_creation="{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/sites/site={}/site-network-accesses/".format(self.wim["wim_url"],connection_point_wan_info["site-id"])
- response_endpoint_site_network_access_creation = requests.post(endpoint_site_network_access_creation, headers=self.headers, json=site_network_accesses, auth=self.auth )
+ endpoint_site_network_access_creation = \
+ "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/sites/site={}/site-network-accesses/".format(
+ self.wim["wim_url"], connection_point_wan_info["site-id"])
+ response_endpoint_site_network_access_creation = requests.post(
+ endpoint_site_network_access_creation,
+ headers=self.headers,
+ json=site_network_accesses,
+ auth=self.auth)
if response_endpoint_site_network_access_creation.status_code == 409:
self.delete_connectivity_service(vpn_service["vpn-id"])
- raise WimConnectorError("Site_Network_Access with ID '{}' already exists".format(site_network_access["network-access-id"]),http_code=response_endpoint_site_network_access_creation.status_code)
+ raise WimConnectorError("Site_Network_Access with ID '{}' already exists".format(
+ site_network_access["network-access-id"]),
+ http_code=response_endpoint_site_network_access_creation.status_code)
elif response_endpoint_site_network_access_creation.status_code == 400:
self.delete_connectivity_service(vpn_service["vpn-id"])
- raise WimConnectorError("Site {} does not exist".format(connection_point_wan_info["site-id"]),http_code=response_endpoint_site_network_access_creation.status_code)
+ raise WimConnectorError("Site {} does not exist".format(connection_point_wan_info["site-id"]),
+ http_code=response_endpoint_site_network_access_creation.status_code)
- elif response_endpoint_site_network_access_creation.status_code!= requests.codes.created and response_endpoint_site_network_access_creation.status_code!= requests.codes.no_content:
+ elif response_endpoint_site_network_access_creation.status_code != requests.codes.created and \
+ response_endpoint_site_network_access_creation.status_code != requests.codes.no_content:
self.delete_connectivity_service(vpn_service["vpn-id"])
- raise WimConnectorError("Request no accepted",http_code=response_endpoint_site_network_access_creation.status_code)
+ raise WimConnectorError("Request no accepted",
+ http_code=response_endpoint_site_network_access_creation.status_code)
except requests.exceptions.ConnectionError:
self.delete_connectivity_service(vpn_service["vpn-id"])
- raise WimConnectorError("Request Timeout",http_code=408)
+ raise WimConnectorError("Request Timeout", http_code=408)
return uuid_l2vpn, conn_info
else:
raise NotImplementedError
-
-
def delete_connectivity_service(self, service_uuid, conn_info=None):
"""Disconnect multi-site endpoints previously connected
"""
try:
self.logger.info("Sending delete")
- servicepoint="{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(self.wim["wim_url"],service_uuid)
- response=requests.delete(servicepoint, auth=self.auth)
- if response.status_code!=requests.codes.no_content :
- raise WimConnectorError("Error in the request",http_code=response.status_code)
+ servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(
+ self.wim["wim_url"], service_uuid)
+ response = requests.delete(servicepoint, auth=self.auth)
+ if response.status_code != requests.codes.no_content:
+ raise WimConnectorError("Error in the request", http_code=response.status_code)
except requests.exceptions.ConnectionError:
- raise WimConnectorError("Request Timeout",http_code=408)
-
-
+ raise WimConnectorError("Request Timeout", http_code=408)
def edit_connectivity_service(self, service_uuid, conn_info=None,
connection_points=None, **kwargs):
"""Change an existing connectivity service, see
``create_connectivity_service``"""
- sites={"sites":{}}
- site_list=[]
- vpn_service={}
- vpn_service["svc-topo"]= "any-to-any"
+ # sites = {"sites": {}}
+ # site_list = []
+ vpn_service = {}
+ vpn_service["svc-topo"] = "any-to-any"
counter = 0
for connection_point in connection_points:
- site_network_access={}
- connection_point_wan_info=self.search_mapp(connection_point)
- params_site={}
- params_site["site-id"]= connection_point_wan_info["site-id"]
- params_site["site-vpn-flavor"]= "site-vpn-flavor-single"
- device_site={}
- device_site["device-id"]=connection_point_wan_info["device-id"]
- params_site["devices"]=device_site
- network_access={}
- connection={}
- if connection_point["service_endpoint_encapsulation_type"]!="none":
- if connection_point["service_endpoint_encapsulation_type"]=="dot1q":
+ site_network_access = {}
+ connection_point_wan_info = self.search_mapp(connection_point)
+ params_site = {}
+ params_site["site-id"] = connection_point_wan_info["site-id"]
+ params_site["site-vpn-flavor"] = "site-vpn-flavor-single"
+ device_site = {}
+ device_site["device-id"] = connection_point_wan_info["device-id"]
+ params_site["devices"] = device_site
+ # network_access = {}
+ connection = {}
+ if connection_point["service_endpoint_encapsulation_type"] != "none":
+ if connection_point["service_endpoint_encapsulation_type"] == "dot1q":
""" The connection is a VLAN """
- connection["encapsulation-type"]="dot1q-vlan-tagged"
- tagged={}
- tagged_interf={}
- service_endpoint_encapsulation_info=connection_point["service_endpoint_encapsulation_info"]
- if service_endpoint_encapsulation_info["vlan"]==None:
+ connection["encapsulation-type"] = "dot1q-vlan-tagged"
+ tagged = {}
+ tagged_interf = {}
+ service_endpoint_encapsulation_info = connection_point["service_endpoint_encapsulation_info"]
+ if service_endpoint_encapsulation_info["vlan"] is None:
raise WimConnectorError("VLAN must be provided")
- tagged_interf["cvlan-id"]= service_endpoint_encapsulation_info["vlan"]
- tagged["dot1q-vlan-tagged"]=tagged_interf
- connection["tagged-interface"]=tagged
+ tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info["vlan"]
+ tagged["dot1q-vlan-tagged"] = tagged_interf
+ connection["tagged-interface"] = tagged
else:
raise NotImplementedError("Encapsulation type not implemented")
- site_network_access["connection"]=connection
- vpn_attach={}
- vpn_attach["vpn-id"]=service_uuid
- vpn_attach["site-role"]=vpn_service["svc-topo"]+"-role"
- site_network_access["vpn-attachment"]=vpn_attach
- uuid_sna=conn_info[counter]["site-network-access-id"]
- site_network_access["network-access-id"]=uuid_sna
- site_network_accesses={}
- site_network_access_list=[]
+ site_network_access["connection"] = connection
+ vpn_attach = {}
+ vpn_attach["vpn-id"] = service_uuid
+ vpn_attach["site-role"] = vpn_service["svc-topo"]+"-role"
+ site_network_access["vpn-attachment"] = vpn_attach
+ uuid_sna = conn_info[counter]["site-network-access-id"]
+ site_network_access["network-access-id"] = uuid_sna
+ site_network_accesses = {}
+ site_network_access_list = []
site_network_access_list.append(site_network_access)
- site_network_accesses["site-network-access"]=site_network_access_list
+ site_network_accesses["site-network-access"] = site_network_access_list
try:
- endpoint_site_network_access_edit="{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/sites/site={}/site-network-accesses/".format(self.wim["wim_url"],connection_point_wan_info["site-id"]) #MODIF
- response_endpoint_site_network_access_creation = requests.put(endpoint_site_network_access_edit, headers=self.headers, json=site_network_accesses, auth=self.auth )
- if response_endpoint_site_network_access_creation.status_code==400:
- raise WimConnectorError("Service does not exist",http_code=response_endpoint_site_network_access_creation.status_code)
- elif response_endpoint_site_network_access_creation.status_code!=201 and response_endpoint_site_network_access_creation.status_code!=204:
- raise WimConnectorError("Request no accepted",http_code=response_endpoint_site_network_access_creation.status_code)
+ endpoint_site_network_access_edit = \
+ "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/sites/site={}/site-network-accesses/".format(
+ self.wim["wim_url"], connection_point_wan_info["site-id"]) # MODIF
+ response_endpoint_site_network_access_creation = requests.put(endpoint_site_network_access_edit,
+ headers=self.headers,
+ json=site_network_accesses,
+ auth=self.auth)
+ if response_endpoint_site_network_access_creation.status_code == 400:
+ raise WimConnectorError("Service does not exist",
+ http_code=response_endpoint_site_network_access_creation.status_code)
+ elif response_endpoint_site_network_access_creation.status_code != 201 and \
+ response_endpoint_site_network_access_creation.status_code != 204:
+ raise WimConnectorError("Request no accepted",
+ http_code=response_endpoint_site_network_access_creation.status_code)
except requests.exceptions.ConnectionError:
- raise WimConnectorError("Request Timeout",http_code=408)
+ raise WimConnectorError("Request Timeout", http_code=408)
counter += 1
return None
"""Delete all WAN Links corresponding to a WIM"""
try:
self.logger.info("Sending clear all connectivity services")
- servicepoint="{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
- response=requests.delete(servicepoint, auth=self.auth)
- if response.status_code!=requests.codes.no_content :
- raise WimConnectorError("Unable to clear all connectivity services",http_code=response.status_code)
+ servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
+ response = requests.delete(servicepoint, auth=self.auth)
+ if response.status_code != requests.codes.no_content:
+ raise WimConnectorError("Unable to clear all connectivity services", http_code=response.status_code)
except requests.exceptions.ConnectionError:
- raise WimConnectorError("Request Timeout",http_code=408)
+ raise WimConnectorError("Request Timeout", http_code=408)
def get_all_active_connectivity_services(self):
"""Provide information about all active connections provisioned by a
"""
try:
self.logger.info("Sending get all connectivity services")
- servicepoint="{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
- response=requests.get(servicepoint, auth=self.auth)
- if response.status_code!= requests.codes.ok :
- raise WimConnectorError("Unable to get all connectivity services",http_code=response.status_code)
+ servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
+ response = requests.get(servicepoint, auth=self.auth)
+ if response.status_code != requests.codes.ok:
+ raise WimConnectorError("Unable to get all connectivity services", http_code=response.status_code)
return response
except requests.exceptions.ConnectionError:
- raise WimConnectorError("Request Timeout",http_code=408)
+ raise WimConnectorError("Request Timeout", http_code=408)
python-keystoneclient
python-glanceclient
python-neutronclient
+networking-l2gw
python-cinderclient
pyvcloud==19.1.1
pyvmomi
progressbar
prettytable
boto
+genisoimage
untangle
oca
- member-vnf-index-ref: 2
order: 0
vnfd-id-ref: 2vdu_vnfd
- vnfd-connection-point-ref: eth0
+ vnfd-ingress-connection-point-ref: eth0
+ vnfd-egress-connection-point-ref: eth0
- member-vnf-index-ref: 3
order: 1
vnfd-id-ref: 2vdu_vnfd
- vnfd-connection-point-ref: eth0
+ vnfd-ingress-connection-point-ref: eth0
+ vnfd-egress-connection-point-ref: eth0
classifier:
- id: class1
name: class1-name
db_user=$3
db_pswd=$4
db_name=$5
+ db_version=$6 # minimun database version
- RESULT=`mysqlshow -h"$db_host" -P"$db_port" -u"$db_user" -p"$db_pswd" | grep -v Wildcard | grep -o $db_name`
- if [ "$RESULT" == "$db_name" ]; then
-
- RESULT=`mysqlshow -h"$db_host" -P"$db_port" -u"$db_user" -p"$db_pswd" "$db_name" | grep -v Wildcard | grep schema_version`
- #TODO validate version
- if [ -n "$RESULT" ]; then
+ if mysqlshow -h"$db_host" -P"$db_port" -u"$db_user" -p"$db_pswd" | grep -v Wildcard | grep -q -e "$db_name" ; then
+ if echo "SELECT * FROM schema_version WHERE version='0'" |
+ mysql -h"$db_host" -P"$db_port" -u"$db_user" -p"$db_pswd" "$db_name" |
+ grep -q -e "init" ; then
+ echo " DB $db_name exists BUT failed in previous init"
+ return 1
+ elif echo "SELECT * FROM schema_version WHERE version='$db_version'" |
+ mysql -h"$db_host" -P"$db_port" -u"$db_user" -p"$db_pswd" "$db_name" |
+ grep -q -e "$db_version" ; then
echo " DB $db_name exists and inited"
return 0
else
echo "3/4 Init database"
RO_PATH=`python -c 'import osm_ro; print(osm_ro.__path__[0])'`
echo "RO_PATH: $RO_PATH"
-if ! is_db_created "$RO_DB_HOST" "$RO_DB_PORT" "$RO_DB_USER" "$RO_DB_PASSWORD" "$RO_DB_NAME"
+if ! is_db_created "$RO_DB_HOST" "$RO_DB_PORT" "$RO_DB_USER" "$RO_DB_PASSWORD" "$RO_DB_NAME" "0.27"
then
if [ -n "$RO_DB_ROOT_PASSWORD" ] ; then
mysqladmin -h"$RO_DB_HOST" -uroot -p"$RO_DB_ROOT_PASSWORD" create "$RO_DB_NAME"
${RO_PATH}/database_utils/init_mano_db.sh -u "$RO_DB_USER" -p "$RO_DB_PASSWORD" -h "$RO_DB_HOST" \
-P "${RO_DB_PORT}" -d "${RO_DB_NAME}" || exit 1
else
- echo " migrage database version"
+ echo " migrate database version"
${RO_PATH}/database_utils/migrate_mano_db.sh -u "$RO_DB_USER" -p "$RO_DB_PASSWORD" -h "$RO_DB_HOST" \
- -P "$RO_DB_PORT" -d "$RO_DB_NAME"
+ -P "$RO_DB_PORT" -d "$RO_DB_NAME" -b /var/log/osm
fi
OVIM_PATH=`python -c 'import lib_osm_openvim; print(lib_osm_openvim.__path__[0])'`
echo "OVIM_PATH: $OVIM_PATH"
-if ! is_db_created "$RO_DB_OVIM_HOST" "$RO_DB_OVIM_PORT" "$RO_DB_OVIM_USER" "$RO_DB_OVIM_PASSWORD" "$RO_DB_OVIM_NAME"
+if ! is_db_created "$RO_DB_OVIM_HOST" "$RO_DB_OVIM_PORT" "$RO_DB_OVIM_USER" "$RO_DB_OVIM_PASSWORD" "$RO_DB_OVIM_NAME" \
+ "0.22"
then
if [ -n "$RO_DB_OVIM_ROOT_PASSWORD" ] ; then
mysqladmin -h"$RO_DB_OVIM_HOST" -uroot -p"$RO_DB_OVIM_ROOT_PASSWORD" create "$RO_DB_OVIM_NAME"
${OVIM_PATH}/database_utils/init_vim_db.sh -u "$RO_DB_OVIM_USER" -p "$RO_DB_OVIM_PASSWORD" -h "$RO_DB_OVIM_HOST" \
-P "${RO_DB_OVIM_PORT}" -d "${RO_DB_OVIM_NAME}" || exit 1
else
- echo " migrage database version"
+ echo " migrate database version"
${OVIM_PATH}/database_utils/migrate_vim_db.sh -u "$RO_DB_OVIM_USER" -p "$RO_DB_OVIM_PASSWORD" -h "$RO_DB_OVIM_HOST"\
- -P "$RO_DB_OVIM_PORT" -d "$RO_DB_OVIM_NAME"
+ -P "$RO_DB_OVIM_PORT" -d "$RO_DB_OVIM_NAME" -b /var/log/osm
fi
pip2 install progressbar || exit 1
pip2 install prettytable || exit 1
pip2 install pyvmomi || exit 1
+ [ "$_DISTRO" == "Ubuntu" ] && install_packages "genisoimage"
+ [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "genisoimage"
# required for OpenNebula connector
pip2 install untangle || exit 1
# install openstack client needed for using openstack as a VIM
[ "$_DISTRO" == "Ubuntu" ] && install_packages "python-novaclient python-keystoneclient python-glanceclient "\
- "python-neutronclient python-cinderclient python-openstackclient"
+ "python-neutronclient python-cinderclient python-openstackclient "\
+ "python-networking-l2gw"
[ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "python-devel" && easy_install \
python-novaclient python-keystoneclient python-glanceclient python-neutronclient python-cinderclient \
- python-openstackclient #TODO revise if gcc python-pip is needed
+ python-openstackclient python-networking-l2gw #TODO revise if gcc python-pip is needed
fi # [[ -z "$NO_PACKAGES" ]]
if [[ -z $NOCLONE ]]; then
"python-glanceclient",
"python-neutronclient",
"python-cinderclient",
+ "networking-l2gw",
#"pyvcloud",
#"progressbar",
"prettytable",
Suite: xenial
XS-Python-Version: >= 2.7
Maintainer: Gerardo Garcia <gerardo.garciadeblas@telefonica.com>
-Depends: python-pip, libmysqlclient-dev, libssl-dev, libffi-dev, python-argcomplete, python-boto, python-bottle, python-jsonschema, python-logutils, python-cinderclient, python-glanceclient, python-keystoneclient, python-neutronclient, python-novaclient, python-openstackclient, python-mysqldb, python-lib-osm-openvim, python-osm-im, python-networkx
+Depends: python-pip, libmysqlclient-dev, libssl-dev, libffi-dev, python-argcomplete, python-boto, python-bottle, python-jsonschema, python-logutils, python-cinderclient, python-glanceclient, python-keystoneclient, python-neutronclient, python-networking-l2gw, python-novaclient, python-openstackclient, python-mysqldb, python-lib-osm-openvim, python-osm-im, python-networkx, genisoimage