Merge branch 'feature7106' 57/7457/1
authortierno <alfonso.tiernosepulveda@telefonica.com>
Mon, 13 May 2019 10:18:28 +0000 (10:18 +0000)
committertierno <alfonso.tiernosepulveda@telefonica.com>
Mon, 13 May 2019 10:18:42 +0000 (10:18 +0000)
Change-Id: I1670f7013c63c7a5e5c6855fe3ea168423f86834
Signed-off-by: tierno <alfonso.tiernosepulveda@telefonica.com>
1  2 
osm_ro/nfvo.py
osm_ro/vimconn_openstack.py

diff --combined osm_ro/nfvo.py
@@@ -126,12 -126,11 +126,12 @@@ def get_non_used_vim_name(datacenter_na
      if name not in vim_threads["names"]:
          vim_threads["names"].append(name)
          return name
 -    name = datacenter_name[:16] + "." + tenant_name[:16]
 -    if name not in vim_threads["names"]:
 -        vim_threads["names"].append(name)
 -        return name
 -    name = datacenter_id + "-" + tenant_id
 +    if tenant_name:
 +        name = datacenter_name[:16] + "." + tenant_name[:16]
 +        if name not in vim_threads["names"]:
 +            vim_threads["names"].append(name)
 +            return name
 +    name = datacenter_id
      vim_threads["names"].append(name)
      return name
  
@@@ -238,7 -237,7 +238,7 @@@ def start_service(mydb, persistence=Non
              except Exception as e:
                  raise NfvoException("Error at VIM  {}; {}: {}".format(vim["type"], type(e).__name__, e),
                                      httperrors.Internal_Server_Error)
 -            thread_name = get_non_used_vim_name(vim['datacenter_name'], vim['vim_tenant_id'], vim['vim_tenant_name'],
 +            thread_name = get_non_used_vim_name(vim['datacenter_name'], vim['datacenter_id'], vim['vim_tenant_name'],
                                                  vim['vim_tenant_id'])
              new_thread = vim_thread.vim_thread(task_lock, thread_name, vim['datacenter_name'],
                                                 vim['datacenter_tenant_id'], db=db, db_lock=db_lock, ovim=ovim)
@@@ -306,9 -305,6 +306,9 @@@ def clean_db(mydb)
          nb_deleted += len(actions_to_delete)
          if len(actions_to_delete) < 100:
              break
 +    # clean locks
 +    mydb.update_rows("vim_wim_actions", UPDATE={"worker": None}, WHERE={"worker<>": None})
 +
      if nb_deleted:
          logger.debug("Removed {} unused vim_wim_actions".format(nb_deleted))
  
@@@ -881,6 -877,21 +881,21 @@@ def _lookfor_or_create_image(db_image, 
          db_image["uuid"] = image_uuid
          return None
  
+ def get_resource_allocation_params(quota_descriptor):
+     """
+     read the quota_descriptor from vnfd and fetch the resource allocation properties from the descriptor object
+     :param quota_descriptor: cpu/mem/vif/disk-io quota descriptor
+     :return: quota params for limit, reserve, shares from the descriptor object
+     """
+     quota = {}
+     if quota_descriptor.get("limit"):
+         quota["limit"] = int(quota_descriptor["limit"])
+     if quota_descriptor.get("reserve"):
+         quota["reserve"] = int(quota_descriptor["reserve"])
+     if quota_descriptor.get("shares"):
+         quota["shares"] = int(quota_descriptor["shares"])
+     return quota
  def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
      """
      Parses an OSM IM vnfd_catalog and insert at DB
                                  numa["cores"] = max(db_flavor["vcpus"], 1)
                              else:
                                  numa["threads"] = max(db_flavor["vcpus"], 1)
+                             epa_vcpu_set = True
+                     if vdu["guest-epa"].get("cpu-quota") and not epa_vcpu_set:
+                         extended["cpu-quota"] = get_resource_allocation_params(vdu["guest-epa"].get("cpu-quota"))
+                     if vdu["guest-epa"].get("mem-quota"):
+                         extended["mem-quota"] = get_resource_allocation_params(vdu["guest-epa"].get("mem-quota"))
+                     if vdu["guest-epa"].get("disk-io-quota"):
+                         extended["disk-io-quota"] = get_resource_allocation_params(vdu["guest-epa"].get("disk-io-quota"))
+                     if vdu["guest-epa"].get("vif-quota"):
+                         extended["vif-quota"] = get_resource_allocation_params(vdu["guest-epa"].get("vif-quota"))
                  if numa:
                      extended["numas"] = [numa]
                  if extended:
@@@ -3228,16 -3248,8 +3252,16 @@@ def create_instance(mydb, tenant_id, in
              # <-- WIM
  
              descriptor_net = {}
 -            if instance_dict.get("networks") and instance_dict["networks"].get(sce_net["name"]):
 -                descriptor_net = instance_dict["networks"][sce_net["name"]]
 +            if instance_dict.get("networks"):
 +                if sce_net.get("uuid") in instance_dict["networks"]:
 +                    descriptor_net = instance_dict["networks"][sce_net["uuid"]]
 +                    descriptor_net_name = sce_net["uuid"]
 +                elif sce_net.get("osm_id") in instance_dict["networks"]:
 +                    descriptor_net = instance_dict["networks"][sce_net["osm_id"]]
 +                    descriptor_net_name = sce_net["osm_id"]
 +                elif sce_net["name"] in instance_dict["networks"]:
 +                    descriptor_net = instance_dict["networks"][sce_net["name"]]
 +                    descriptor_net_name = sce_net["name"]
              net_name = descriptor_net.get("vim-network-name")
              # add datacenters from instantiation parameters
              if descriptor_net.get("sites"):
              sce_net2instance[sce_net_uuid] = {}
              net2task_id['scenario'][sce_net_uuid] = {}
  
 +            use_network = None
 +            related_network = None
 +            if descriptor_net.get("use-network"):
 +                target_instance_nets = mydb.get_rows(
 +                    SELECT="related",
 +                    FROM="instance_nets",
 +                    WHERE={"instance_scenario_id": descriptor_net["use-network"]["instance_scenario_id"],
 +                           "osm_id":  descriptor_net["use-network"]["osm_id"]},
 +                )
 +                if not target_instance_nets:
 +                    raise NfvoException(
 +                        "Cannot find the target network at instance:networks[{}]:use-network".format(descriptor_net_name),
 +                        httperrors.Bad_Request)
 +                else:
 +                    use_network = target_instance_nets[0]["related"]
 +
              if sce_net["external"]:
                  number_mgmt_networks += 1
  
                  net_uuid = str(uuid4())
                  uuid_list.append(net_uuid)
                  sce_net2instance[sce_net_uuid][datacenter_id] = net_uuid
 +                if not related_network:   # all db_instance_nets will have same related
 +                    related_network = use_network or net_uuid
                  db_net = {
                      "uuid": net_uuid,
 +                    "osm_id": sce_net.get("osm_id") or sce_net["name"],
 +                    "related": related_network,
                      'vim_net_id': None,
                      "vim_name": net_vim_name,
                      "instance_scenario_id": instance_uuid,
                      "action": task_action,
                      "item": "instance_nets",
                      "item_id": net_uuid,
 +                    "related": related_network,
                      "extra": yaml.safe_dump(task_extra, default_flow_style=True, width=256)
                  }
                  net2task_id['scenario'][sce_net_uuid][datacenter_id] = task_index
                          uuid_list.append(sfi_uuid)
                          db_sfi = {
                              "uuid": sfi_uuid,
 +                            "related": sfi_uuid,
                              "instance_scenario_id": instance_uuid,
                              'sce_rsp_hop_id': cp['uuid'],
                              'datacenter_id': datacenter_id,
                              "status": "SCHEDULED",
                              "item": "instance_sfis",
                              "item_id": sfi_uuid,
 +                            "related": sfi_uuid,
                              "extra": yaml.safe_dump({"params": extra_params, "depends_on": [dependencies[i]]},
                                                      default_flow_style=True, width=256)
                          }
                      uuid_list.append(sf_uuid)
                      db_sf = {
                          "uuid": sf_uuid,
 +                        "related": sf_uuid,
                          "instance_scenario_id": instance_uuid,
                          'sce_rsp_hop_id': cp['uuid'],
                          'datacenter_id': datacenter_id,
                          "status": "SCHEDULED",
                          "item": "instance_sfs",
                          "item_id": sf_uuid,
 +                        "related": sf_uuid,
                          "extra": yaml.safe_dump({"params": "", "depends_on": sfis_created},
                                                  default_flow_style=True, width=256)
                      }
                          uuid_list.append(classification_uuid)
                          db_classification = {
                              "uuid": classification_uuid,
 +                            "related": classification_uuid,
                              "instance_scenario_id": instance_uuid,
                              'sce_classifier_match_id': match['uuid'],
                              'datacenter_id': datacenter_id,
                              "status": "SCHEDULED",
                              "item": "instance_classifications",
                              "item_id": classification_uuid,
 +                            "related": classification_uuid,
                              "extra": yaml.safe_dump({"params": classification_params, "depends_on": [dependencies[i]]},
                                                      default_flow_style=True, width=256)
                          }
                  uuid_list.append(sfp_uuid)
                  db_sfp = {
                      "uuid": sfp_uuid,
 +                    "related": sfp_uuid,
                      "instance_scenario_id": instance_uuid,
                      'sce_rsp_id': rsp['uuid'],
                      'datacenter_id': datacenter_id,
                      "status": "SCHEDULED",
                      "item": "instance_sfps",
                      "item_id": sfp_uuid,
 +                    "related": sfp_uuid,
                      "extra": yaml.safe_dump({"params": "", "depends_on": sfs_created + classifications_created},
                                              default_flow_style=True, width=256)
                  }
@@@ -3698,7 -3681,6 +3722,7 @@@ def instantiate_vnf(mydb, sce_vnf, para
          vnf_net2instance[sce_vnf['uuid']][net['uuid']] = net_uuid
          db_net = {
              "uuid": net_uuid,
 +            "related": net_uuid,
              'vim_net_id': None,
              "vim_name": net_name,
              "instance_scenario_id": instance_uuid,
              "action": task_action,
              "item": "instance_nets",
              "item_id": net_uuid,
 +            "related": net_uuid,
              "extra": yaml.safe_dump(task_extra, default_flow_style=True, width=256)
          }
          task_index += 1
              uuid_list.append(vm_uuid)
              db_vm = {
                  "uuid": vm_uuid,
 +                "related": vm_uuid,
                  'instance_vnf_id': vnf_uuid,
                  # TODO delete "vim_vm_id": vm_id,
                  "vm_id": vm["uuid"],
                  "status": "SCHEDULED",
                  "item": "instance_vms",
                  "item_id": vm_uuid,
 +                "related": vm_uuid,
                  "extra": yaml.safe_dump({"params": task_params, "depends_on": task_depends_on},
                                          default_flow_style=True, width=256)
              }
@@@ -4087,7 -4066,6 +4111,7 @@@ def delete_instance(mydb, tenant_id, in
              "status": "SCHEDULED",
              "item": "instance_sfps",
              "item_id": sfp["uuid"],
 +            "related": sfp["related"],
              "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
          }
          task_index += 1
              "status": "SCHEDULED",
              "item": "instance_classifications",
              "item_id": classification["uuid"],
 +            "related": classification["related"],
              "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
          }
          task_index += 1
              "status": "SCHEDULED",
              "item": "instance_sfs",
              "item_id": sf["uuid"],
 +            "related": sf["related"],
              "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
          }
          task_index += 1
              "status": "SCHEDULED",
              "item": "instance_sfis",
              "item_id": sfi["uuid"],
 +            "related": sfi["related"],
              "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
          }
          task_index += 1
                  "status": "SCHEDULED",
                  "item": "instance_vms",
                  "item_id": vm["uuid"],
 +                "related": vm["related"],
                  "extra": yaml.safe_dump({"params": vm["interfaces"], "depends_on": sfi_dependencies},
                                          default_flow_style=True, width=256)
              }
              "status": "SCHEDULED",
              "item": "instance_nets",
              "item_id": net["uuid"],
 +            "related": net["related"],
              "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
          }
          task_index += 1
@@@ -4603,7 -4576,6 +4627,7 @@@ def instance_action(mydb,nfvo_tenant,in
                          "status": "SCHEDULED",
                          "item": "instance_vms",
                          "item_id": vdu_id,
 +                        "related": vm["related"],
                          "extra": yaml.safe_dump({"params": vm_interfaces},
                                                  default_flow_style=True, width=256)
                      }
                          "status": "SCHEDULED",
                          "item": "instance_vms",
                          "item_id": vm_uuid,
 +                        "related": target_vm["related"],
                          # ALF
                          # ALF
                          # TODO examinar parametros, quitar MAC o incrementar. Incrementar IP y colocar las dependencias con ACTION-asdfasd.
@@@ -571,8 -571,6 +571,8 @@@ class vimconnector(vimconn.vimconnector
                          network_dict["provider:segmentation_id"] = self._generate_vlanID()
  
              network_dict["shared"] = shared
 +            if self.config.get("disable_network_port_security"):
 +                network_dict["port_security_enabled"] = False
              new_net = self.neutron.create_network({'network':network_dict})
              # print new_net
              # create subnetwork, even if there is no profile
              flavor_candidate_data = (10000, 10000, 10000)
              flavor_target = (flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"])
              # numa=None
-             numas = flavor_dict.get("extended", {}).get("numas")
-             if numas:
+             extended = flavor_dict.get("extended", {})
+             if extended:
                  #TODO
                  raise vimconn.vimconnNotFoundException("Flavor with EPA still not implemted")
                  # if len(numas) > 1:
          except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e:
              self._format_exception(e)
  
+     def process_resource_quota(self, quota, prefix, extra_specs):
+         """
+         :param prefix:
+         :param extra_specs: 
+         :return:
+         """
+         if 'limit' in quota:
+             extra_specs["quota:" + prefix + "_limit"] = quota['limit']
+         if 'reserve' in quota:
+             extra_specs["quota:" + prefix + "_reservation"] = quota['reserve']
+         if 'shares' in quota:
+             extra_specs["quota:" + prefix + "_shares_level"] = "custom"
+             extra_specs["quota:" + prefix + "_shares_share"] = quota['shares']
      def new_flavor(self, flavor_data, change_name_if_used=True):
          '''Adds a tenant flavor to openstack VIM
          if change_name_if_used is True, it will change name in case of conflict, because it is not supported name repetition
  
                      ram = flavor_data.get('ram',64)
                      vcpus = flavor_data.get('vcpus',1)
-                     numa_properties=None
+                     extra_specs={}
  
                      extended = flavor_data.get("extended")
                      if extended:
                              numa_nodes = len(numas)
                              if numa_nodes > 1:
                                  return -1, "Can not add flavor with more than one numa"
-                             numa_properties = {"hw:numa_nodes":str(numa_nodes)}
-                             numa_properties["hw:mem_page_size"] = "large"
-                             numa_properties["hw:cpu_policy"] = "dedicated"
-                             numa_properties["hw:numa_mempolicy"] = "strict"
+                             extra_specs["hw:numa_nodes"] = str(numa_nodes)
+                             extra_specs["hw:mem_page_size"] = "large"
+                             extra_specs["hw:cpu_policy"] = "dedicated"
+                             extra_specs["hw:numa_mempolicy"] = "strict"
                              if self.vim_type == "VIO":
-                                 numa_properties["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
-                                 numa_properties["vmware:latency_sensitivity_level"] = "high"
+                                 extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
+                                 extra_specs["vmware:latency_sensitivity_level"] = "high"
                              for numa in numas:
                                  #overwrite ram and vcpus
                                  #check if key 'memory' is present in numa else use ram value at flavor
                                  if 'paired-threads' in numa:
                                      vcpus = numa['paired-threads']*2
                                      #cpu_thread_policy "require" implies that the compute node must have an STM architecture
-                                     numa_properties["hw:cpu_thread_policy"] = "require"
-                                     numa_properties["hw:cpu_policy"] = "dedicated"
+                                     extra_specs["hw:cpu_thread_policy"] = "require"
+                                     extra_specs["hw:cpu_policy"] = "dedicated"
                                  elif 'cores' in numa:
                                      vcpus = numa['cores']
                                      # cpu_thread_policy "prefer" implies that the host must not have an SMT architecture, or a non-SMT architecture will be emulated
-                                     numa_properties["hw:cpu_thread_policy"] = "isolate"
-                                     numa_properties["hw:cpu_policy"] = "dedicated"
+                                     extra_specs["hw:cpu_thread_policy"] = "isolate"
+                                     extra_specs["hw:cpu_policy"] = "dedicated"
                                  elif 'threads' in numa:
                                      vcpus = numa['threads']
                                      # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
-                                     numa_properties["hw:cpu_thread_policy"] = "prefer"
-                                     numa_properties["hw:cpu_policy"] = "dedicated"
+                                     extra_specs["hw:cpu_thread_policy"] = "prefer"
+                                     extra_specs["hw:cpu_policy"] = "dedicated"
                                  # for interface in numa.get("interfaces",() ):
                                  #     if interface["dedicated"]=="yes":
                                  #         raise vimconn.vimconnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
                                  #     #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"' when a way to connect it is available
+                         elif extended.get("cpu-quota"):
+                             self.process_resource_quota(extended.get("cpu-quota"), "cpu", extra_specs)
+                         if extended.get("mem-quota"):
+                             self.process_resource_quota(extended.get("mem-quota"), "memory", extra_specs)
+                         if extended.get("vif-quota"):
+                             self.process_resource_quota(extended.get("vif-quota"), "vif", extra_specs)
+                         if extended.get("disk-io-quota"):
+                             self.process_resource_quota(extended.get("disk-io-quota"), "disk_io", extra_specs)
                      #create flavor
                      new_flavor=self.nova.flavors.create(name,
                                      ram,
                                      is_public=flavor_data.get('is_public', True)
                                  )
                      #add metadata
-                     if numa_properties:
-                         new_flavor.set_keys(numa_properties)
+                     if extra_specs:
+                         new_flavor.set_keys(extra_specs)
                      return new_flavor.id
                  except nvExceptions.Conflict as e:
                      if change_name_if_used and retry < max_retries: