+#!/usr/bin/env bash
#
# Copyright 2016 RIFT.IO Inc
#
#
# This is a top-level build script for RIFT.io
#
-# args none
+# Arguments and options: use -h or --help
#
# dependencies -- requires sudo rights
+###############################################################################
+# Options and arguments
+
+params="$(getopt -o suhb: -l install-so,install-ui,build-ui:,help --name "$0" -- "$@")"
+if [ $? != 0 ] ; then echo "Failed parsing options." >&2 ; exit 1 ; fi
+
+eval set -- $params
+
+installSO=false
+installUI=false
+UIPathToBuild=
+
+while true; do
+ case "$1" in
+ -s|--install-so) installSO=true; shift;;
+ -u|--install-ui) installUI=true; shift;;
+ -b|--build-ui) shift; UIPathToBuild=$1; shift;;
+ -h|--help)
+ echo
+ echo "NAME:"
+ echo " $0"
+ echo
+ echo "SYNOPSIS:"
+ echo " $0 -h|--help"
+ echo " $0 [-o] [-u|-b PATH-TO-UI-REPO] [PLATFORM_REPOSITORY] [PLATFORM_VERSION]"
+ echo
+ echo "DESCRIPTION:"
+ echo " Prepare current system to run SO and UI. By default, the system"
+ echo " is set up to support building SO and UI; optionally, either or"
+ echo " both SO and UI can be installed from a Debian package repository."
+ echo
+ echo " -o|--install-so: install SO from package"
+ echo " -u|--install-ui: install UI from package"
+ echo " -b|--build-ui PATH-TO-UI-REPO: build the UI in the specified repo"
+ echo " PLATFORM_REPOSITORY (optional): name of the RIFT.ware repository."
+ echo " PLATFORM_VERSION (optional): version of the platform packages to be installed."
+ echo
+ exit 0;;
+ --) shift; break;;
+ *) echo "Not implemented: $1" >&2; exit 1;;
+ esac
+done
+
+if $installUI && [[ $UIPathToBuild ]]; then
+ echo "Cannot both install and build the UI!"
+ exit 1
+fi
-# ARGS
+if [[ $UIPathToBuild && ! -d $UIPathToBuild ]]; then
+ echo "Not a directory: $UIPathToBuild"
+ exit 1
+fi
+
PLATFORM_REPOSITORY=${1:-OSM}
-PLATFORM_VERSION=${2:-4.3.1.0.49164}
+PLATFORM_VERSION=${2:-4.3.1.0.49556}
+
+###############################################################################
+# Main block
+
+# Turn these on after handling options, so the output doesn't get cluttered.
+set -o errexit # Exit on any error
+set -x # Print commands before executing them
# must be run from the top of a workspace
cd $(dirname $0)
# install the RIFT platform code:
-temp=$(mktemp -d /tmp/rw.XXX)
-pushd $temp
-
-apt-get download rw.toolchain-rwbase=${PLATFORM_VERSION} \
+sudo apt-get install -y rw.toolchain-rwbase=${PLATFORM_VERSION} \
rw.toolchain-rwtoolchain=${PLATFORM_VERSION} \
rw.core.mgmt-mgmt=${PLATFORM_VERSION} \
rw.core.util-util=${PLATFORM_VERSION} \
rw.automation.core-RWAUTO=${PLATFORM_VERSION} \
rw.core.rwvx-rwha-1.0=${PLATFORM_VERSION}
-sudo dpkg -i --force-overwrite *deb
-
-
-popd
-rm -rf $temp
-
-# this file gets in the way of the one generated by the build
-sudo rm -f /usr/rift/usr/lib/libmano_yang_gen.so
-
-
sudo chmod 777 /usr/rift /usr/rift/usr/share
+if $installSO; then
+ sudo apt-get install -y \
+ rw.core.mc-\*=${PLATFORM_VERSION}
+fi
+
+if $installUI; then
+ sudo apt-get install -y \
+ rw.ui-about=${PLATFORM_VERSION} \
+ rw.ui-logging=${PLATFORM_VERSION} \
+ rw.ui-skyquake=${PLATFORM_VERSION} \
+ rw.ui-accounts=${PLATFORM_VERSION} \
+ rw.ui-composer=${PLATFORM_VERSION} \
+ rw.ui-launchpad=${PLATFORM_VERSION} \
+ rw.ui-debug=${PLATFORM_VERSION} \
+ rw.ui-config=${PLATFORM_VERSION} \
+ rw.ui-dummy_component=${PLATFORM_VERSION}
+fi
+
# install some base files used to create VNFs
test -d /usr/rift/images || mkdir /usr/rift/images
for file in Fedora-x86_64-20-20131211.1-sda-ping.qcow2 Fedora-x86_64-20-20131211.1-sda-pong.qcow2 Fedora-x86_64-20-20131211.1-sda.qcow2; do
test -f /usr/rift/images/$file || curl -o /usr/rift/images/$file http://repo.riftio.com/releases/open.riftio.com/4.3.1/$file
done
-####### If you are re-building, you just need to run
+# If you are re-building SO, you just need to run
# these two steps
-make -j16
-sudo make install
-
-# you can now clone and build the UI using just make && sudo make install
-# or you can run without the UI, e.g.
-# note to start the RIFT.io UI please run
+if ! $installSO; then
+ make -j16
+ sudo make install
+fi
+
+if [[ $UIPathToBuild ]]; then
+ make -C $UIPathToBuild -j16
+ sudo make -C $UIPathToBuild install
+fi
+
+echo "To run SO with UI please run:"
+echo 'sudo -H /usr/rift/rift-shell -r -i /usr/rift -a /usr/rift/.artifacts -- ./demos/launchpad.py --use-xml-mode'
+echo
+echo "To run SO without UI please run:"
echo 'sudo -H /usr/rift/rift-shell -r -i /usr/rift -a /usr/rift/.artifacts -- ./demos/launchpad.py --use-xml-mode --no-ui'
for primitive in vnfr.primitive:
if primitive.execution_status == "failure":
errs += '<error>'
- errs += primitive.execution_error_details
+ if primitive.execution_error_details:
+ errs += primitive.execution_error_details
+ else:
+ errs += '{}: Unknown error'.format(primitive.name)
errs += "</error>"
return errs
job_status = []
for primitive in vnfr.primitive:
+ if primitive.execution_status != 'pending':
+ continue
+
if primitive.execution_id == "":
- # TODO: For some config data, the id will be empty, check if
- # mapping is needed.
+ # Actions which failed to queue can have empty id
job_status.append(primitive.execution_status)
continue
- task = self.loop.create_task(self.get_primitive_status(primitive))
+ elif primitive.execution_id == "config":
+ # Config job. Check if service is active
+ task = self.loop.create_task(self.get_service_status(vnfr.id, primitive))
+
+ else:
+ task = self.loop.create_task(self.get_primitive_status(primitive))
+
tasks.append(task)
if tasks:
vnfr.vnf_job_status = "success"
return "success"
+ @asyncio.coroutine
+ def get_service_status(self, vnfr_id, primitive):
+ try:
+ status = yield from self.loop.run_in_executor(
+ self.executor,
+ self.config_plugin.get_service_status,
+ vnfr_id
+ )
+
+ self.log.debug("Service status: {}".format(status))
+ if status in ['error', 'blocked']:
+ self.log.warning("Execution of config {} failed: {}".
+ format(primitive.execution_id, status))
+ primitive.execution_error_details = 'Config failed'
+ status = 'failure'
+ elif status in ['active']:
+ status = 'success'
+ elif status is None:
+ status = 'failure'
+ else:
+ status = 'pending'
+
+ except Exception as e:
+ self.log.exception(e)
+ status = "failed"
+
+ primitive.execution_status = status
+ return primitive.execution_status
+
@asyncio.coroutine
def get_primitive_status(self, primitive):
"""
self.log.debug("Creating a job monitor for Job id: {}".format(
rpc_output.job_id))
+ # If the tasks are none, assume juju actions
+ # TBD: This logic need to be revisited
+ ca = self.nsm.config_agent_plugins[0]
+ if tasks is None:
+ for agent in self.nsm.config_agent_plugins:
+ if agent.agent_type == 'juju':
+ ca = agent
+ break
+
# For every Job we will schedule a new monitoring process.
job_monitor = ConfigAgentJobMonitor(
self.dts,
self.jobs[nsr_id],
self.executor,
self.loop,
- self.nsm.config_agent_plugins[0] # Hack
+ ca
)
task = self.loop.create_task(job_monitor.publish_action_status())
except Exception as e:
msg = "{}: Resolve on unit {}: {}". \
format(self, unit, e)
- self.log.error(msg)
- self.log.exception(e)
- raise JujuResolveError(msg)
+ self.log.warn(msg)
@asyncio.coroutine
def resolve_error(self, service=None, status=None, env=None):
type string;
}
- leaf-list dns-server {
- description "List of DNS Servers associated with IP Profile";
- type inet:ip-address;
+ list dns-server {
+ key "address";
+ leaf address {
+ description "List of DNS Servers associated with IP Profile";
+ type inet:ip-address;
+ }
}
container dhcp-params {
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
except Exception as e:
self.log.error("Encountered exceptions during network creation. Exception: %s", str(e))
raise
-
+
kwargs = {'network_id' : network_id,
'dhcp_params': {'enable_dhcp': True},
'gateway_ip' : None,}
-
+
if link_params.ip_profile_params.has_field('ip_version'):
kwargs['ip_version'] = 6 if link_params.ip_profile_params.ip_version == 'ipv6' else 4
else:
link_params.ip_profile_params.subnet_prefix_pool,
link_params.name)
raise NeutronException.NotFound("SubnetPool with name %s not found"%(link_params.ip_profile_params.subnet_prefix_pool))
-
+
kwargs['subnetpool_id'] = subnet_pool['id']
elif link_params.has_field('subnet'):
kwargs['cidr'] = link_params.subnet
kwargs['dhcp_params']['start_address'] = link_params.ip_profile_params.dhcp_params.start_address
if link_params.ip_profile_params.dhcp_params.has_field('count'):
kwargs['dhcp_params']['count'] = link_params.ip_profile_params.dhcp_params.count
-
+
if link_params.ip_profile_params.has_field('dns_server'):
kwargs['dns_server'] = []
for server in link_params.ip_profile_params.dns_server:
- kwargs['dns_server'].append(server)
+ kwargs['dns_server'].append(server.address)
if link_params.ip_profile_params.has_field('gateway_address'):
kwargs['gateway_ip'] = link_params.ip_profile_params.gateway_address
-
+
drv.neutron_subnet_create(**kwargs)
-
+
return network_id
elif available.has_field('pcie_device'):
self.log.debug("Rejecting available flavor because pcie_device not required but available")
return False
-
-
+
+
if required.has_field('mempage_size'):
self.log.debug("Matching mempage_size")
if available.has_field('mempage_size') == False:
elif available.has_field('mempage_size'):
self.log.debug("Rejecting available flavor because mempage_size not required but available")
return False
-
+
if required.has_field('cpu_pinning_policy'):
self.log.debug("Matching cpu_pinning_policy")
if required.cpu_pinning_policy != 'ANY':
elif available.has_field('cpu_pinning_policy'):
self.log.debug("Rejecting available flavor because cpu_pinning_policy not required but available")
return False
-
+
if required.has_field('cpu_thread_pinning_policy'):
self.log.debug("Matching cpu_thread_pinning_policy")
if available.has_field('cpu_thread_pinning_policy') == False:
elif available.has_field('trusted_execution'):
self.log.debug("Rejecting available flavor because trusted_execution not required but available")
return False
-
+
if required.has_field('numa_node_policy'):
self.log.debug("Matching numa_node_policy")
if available.has_field('numa_node_policy') == False:
elif available.numa_node_policy.has_field('node_cnt'):
self.log.debug("Rejecting available flavor because numa node count not required but available")
return False
-
+
if required.numa_node_policy.has_field('mem_policy'):
self.log.debug("Matching numa_node_policy mem_policy")
if available.numa_node_policy.has_field('mem_policy') == False:
elif available.has_field('cpu_model'):
self.log.debug("Rejecting available flavor because cpu_model not required but available")
return False
-
+
if required.has_field('cpu_arch'):
self.log.debug("Matching CPU architecture")
if available.has_field('cpu_arch') == False:
elif available.has_field('cpu_arch'):
self.log.debug("Rejecting available flavor because cpu_arch not required but available")
return False
-
+
if required.has_field('cpu_vendor'):
self.log.debug("Matching CPU vendor")
if available.has_field('cpu_vendor') == False:
elif available.has_field('cpu_socket_count'):
self.log.debug("Rejecting available flavor because cpu_socket_count not required but available")
return False
-
+
if required.has_field('cpu_core_count'):
self.log.debug("Matching CPU core count")
if available.has_field('cpu_core_count') == False:
elif available.has_field('cpu_core_count'):
self.log.debug("Rejecting available flavor because cpu_core_count not required but available")
return False
-
+
if required.has_field('cpu_core_thread_count'):
self.log.debug("Matching CPU core thread count")
if available.has_field('cpu_core_thread_count') == False:
elif available.has_field('cpu_core_thread_count'):
self.log.debug("Rejecting available flavor because cpu_core_thread_count not required but available")
return False
-
+
if required.has_field('cpu_feature'):
self.log.debug("Matching CPU feature list")
if available.has_field('cpu_feature') == False:
elif available.has_field('cpu_feature'):
self.log.debug("Rejecting available flavor because cpu_feature not required but available")
return False
- self.log.info("Successful match for Host EPA attributes")
+ self.log.info("Successful match for Host EPA attributes")
return True
def _match_placement_group_inputs(self, required, available):
self.log.info("Matching Host aggregate attributes")
-
+
if not required and not available:
# Host aggregate not required and not available => success
self.log.info("Successful match for Host Aggregate attributes")
# - Host aggregate not required but available
self.log.debug("Rejecting available flavor because host Aggregate mismatch. Required: %s, Available: %s ", required, available)
return False
-
+
def match_epa_params(self, resource_info, request_params):
result = self._match_vm_flavor(getattr(request_params, 'vm_flavor'),
getattr(resource_info, 'vm_flavor'))
if result == False:
self.log.debug("Host Aggregate mismatched")
return False
-
+
return True
def _select_resource_flavor(self, account, vdu_init):
- """
+ """
Select a existing flavor if it matches the request or create new flavor
"""
flavor = RwcalYang.FlavorInfoItem()
epa_types = ['vm_flavor', 'guest_epa', 'host_epa', 'host_aggregate', 'hypervisor_epa', 'vswitch_epa']
epa_dict = {k: v for k, v in vdu_init.as_dict().items() if k in epa_types}
flavor.from_dict(epa_dict)
-
+
rc, response = self.do_get_flavor_list(account)
if rc != RwTypes.RwStatus.SUCCESS:
self.log.error("Get-flavor-info-list operation failed for cloud account: %s",
for vnfr_id in agent_nsr.vnfr_ids:
vnfr = agent_vnfrs[vnfr_id]
- self._log.debug("CA_RPC: VNFR metadata: {}".format(vnfr))
+ self._log.debug("CA-RPC: VNFR metadata: {}".format(vnfr))
# index->vnfr ref
vnfr_index_map[vnfr.member_vnf_index] = vnfr_id
for primitive in vnfr.vnf_configuration['initial_config_primitive']:
if 'parameter' in primitive:
for parameter in primitive['parameter']:
- value = xlate(parameter['value'], vnfr.tags)
- param_data[parameter.name] = value
+ try:
+ value = xlate(parameter['value'], vnfr.tags)
+ param_data[parameter['name']] = value
+ except KeyError as e:
+ self._log.warn("Unable to parse the parameter{}: {}".
+ format(parameter))
initial_params[vnfr_id] = param_data
return config_plugin.agent_data
return ret
- unit_names, init_data, vnfr_index_map, vnf_data_map = get_meta(agent_nsr, agent_vnfrs)
+ unit_names, init_data, vnfr_index_map, vnfr_data_map = get_meta(agent_nsr, agent_vnfrs)
# The data consists of 4 sections
# 1. Account data
tmp_file.write(yaml.dump(data, default_flow_style=True)
.encode("UTF-8"))
- self._log.debug("CA_RPC: Creating a temp file {} with input data: {}".
+ self._log.debug("CA-RPC: Creating a temp file {} with input data: {}".
format(tmp_file.name, data))
# Get the full path to the script
else:
script = os.path.join(self._rift_artif_dir, 'launchpad/libs', agent_nsr.id, 'scripts',
rpc_ip.user_defined_script)
- self.log.debug("CA_RPC: Checking for script in %s", script)
+ self._log.debug("CA-RPC: Checking for script in %s", script)
if not os.path.exists(script):
script = os.path.join(self._rift_install_dir, 'usr/bin', rpc_ip.user_defined_script)
cmd = "{} {}".format(rpc_ip.user_defined_script, tmp_file.name)
- self._log.debug("CA_RPC: Running the CMD: {}".format(cmd))
+ self._log.debug("CA-RPC: Running the CMD: {}".format(cmd))
coro = asyncio.create_subprocess_shell(cmd, loop=self._loop,
stderr=asyncio.subprocess.PIPE)
#
import asyncio
+import os
import re
import tempfile
+import time
import yaml
-import os
import rift.mano.utils.juju_api as juju
from . import riftcm_config_plugin
def vnf_config_primitive(self, nsr_id, vnfr_id, primitive, output):
self._log.debug("jujuCA: VNF config primititve {} for nsr {}, vnfr_id {}".
format(primitive, nsr_id, vnfr_id))
- output.execution_status = "failed"
- output.execution_id = ''
- output.execution_error_details = ''
-
try:
vnfr = self._juju_vnfs[vnfr_id].vnfr
except KeyError:
vnfr_id)
return
+ output.execution_status = "failed"
+ output.execution_id = ''
+ output.execution_error_details = ''
+
try:
service = vnfr['vnf_juju_name']
vnf_config = vnfr['config']
params.update({parameter.name: val})
if config.name == 'config':
+ output.execution_id = 'config'
if len(params):
self._log.debug("jujuCA: applying config with params {} for service {}".
format(params, service))
- rc = yield from self.api.apply_config(params, service=service)
+ rc = yield from self.api.apply_config(params, service=service, wait=False)
if rc:
- output.execution_status = "completed"
+ # Mark as pending and check later for the status
+ output.execution_status = "pending"
self._log.debug("jujuCA: applied config {} on {}".
format(params, service))
else:
self._log.error("jujuCA: Error applying config {} on service {}".
format(params, service))
else:
- self._log.warn("jujuCA: Did not find valid paramaters for config : {}".
+ self._log.warn("jujuCA: Did not find valid parameters for config : {}".
format(primitive.parameter))
+ output.execution_status = "completed"
else:
self._log.debug("jujuCA: Execute action {} on service {} with params {}".
format(config.name, service, params))
resp = yield from self.api.execute_action(action, params,
service=service)
if 'error' in resp:
- self._log.error("Applying initial config failed for {} with {}: {}".
- format(action, params, resp))
+ self._log.error("Applying initial config on {} failed for {} with {}: {}".
+ format(vnfr['vnf_juju_name'], action, params, resp))
return False
action_ids.append(resp['action']['tag'])
execution_id)
self._log.exception(e)
raise e
+
+ def get_service_status(self, vnfr_id):
+ '''Get the service status, used by job status handle
+ Make sure this is NOT a coroutine
+ '''
+ service = self.get_service_name(vnfr_id)
+ if service is None:
+ self._log.error("jujuCA: VNFR {} not managed by this Juju agent".
+ format(vnfr_id))
+ return None
+
+ # Delay for 3 seconds before checking as config apply takes a
+ # few seconds to transfer to the service
+ time.sleep(3)
+ return self.api._get_service_status(service=service)
""" Add VNR to be managed by this config agent """
pass
+ def get_service_status(self, vnfr_id):
+ """Get the status of the service"""
+ return None
+
@asyncio.coroutine
def invoke(self, method, *args):
try:
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
vdur_console.id = self._vdur_id
vdur_console.console_url = 'none'
-
+
xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK,
xpath=self.vnfr_vdu_console_xpath,
msg=vdur_console)
#raise VnfRecordError("Not supported operation %s" % action)
self._log.error("Not supported operation %s" % action)
xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK)
- return
+ return
self._log.debug("Registering for VNFR VDU using xpath: %s",
self.vnfr_vdu_console_xpath)
self._log.debug("Key pair NSD is %s",authorized_key)
key_pairs.append(authorized_key.key)
- if key_pairs:
- cloud_config["key-pairs"] = key_pairs
-
+ if key_pairs:
+ cloud_config["key-pairs"] = key_pairs
+
users = list()
for user_entry in self._nsr_config_msg.user:
self._log.debug("User present is %s",user_entry)
user = {}
- user["name"] = user_entry.name
+ user["name"] = user_entry.name
user["key-pairs"] = list()
for ssh_key in user_entry.key_pair:
user["key-pairs"].append(ssh_key.key)
for user_entry in self._nsd_msg.user:
self._log.debug("User present in NSD is %s",user_entry)
user = {}
- user["name"] = user_entry.name
+ user["name"] = user_entry.name
user["key-pairs"] = list()
for ssh_key in user_entry.key_pair:
user["key-pairs"].append(ssh_key.key)
self._log.debug("Cloud config formed is %s",cloud_config)
return cloud_config
-
+
@property
def openmano_instance_create_yaml(self):
for vnfr in self._vnfrs:
if "om_datacenter" in vnfr.vnfr.vnfr_msg:
vnfr_name = vnfr.vnfr.vnfd.name + "__" + str(vnfr.vnfr.vnfr_msg.member_vnf_index_ref)
- openmano_instance_create["vnfs"][vnfr_name] = {"datacenter": vnfr.vnfr.vnfr_msg.om_datacenter}
+ openmano_instance_create["vnfs"][vnfr_name] = {"datacenter": vnfr.vnfr.vnfr_msg.om_datacenter}
openmano_instance_create["networks"] = {}
for vld_msg in self._nsd_msg.vld:
openmano_instance_create["networks"][vld_msg.name] = {}
if ip_profile_params.has_field('gateway_address'):
ip_profile['gateway-address'] = ip_profile_params.gateway_address
if ip_profile_params.has_field('dns_server') and len(ip_profile_params.dns_server) > 0:
- ip_profile['dns-address'] = ip_profile_params.dns_server[0]
+ ip_profile['dns-address'] = ip_profile_params.dns_server[0].address
if ip_profile_params.has_field('dhcp_params'):
ip_profile['dhcp'] = {}
ip_profile['dhcp']['enabled'] = ip_profile_params.dhcp_params.enabled
elif "datacenter" in openmano_instance_create:
network["datacenter"] = openmano_instance_create["datacenter"]
if network:
- openmano_instance_create["networks"][vld_msg.name]["sites"].append(network)
+ openmano_instance_create["networks"][vld_msg.name]["sites"].append(network)
if ip_profile:
- openmano_instance_create["networks"][vld_msg.name]['ip-profile'] = ip_profile
-
-
+ openmano_instance_create["networks"][vld_msg.name]['ip-profile'] = ip_profile
+
+
return yaml.safe_dump(openmano_instance_create, default_flow_style=False)
@asyncio.coroutine
for vm in vnf_status["vms"]:
if vm["uuid"] not in self._vdur_console_handler:
- vdur_console_handler = VnfrConsoleOperdataDtsHandler(self._dts, self._log, self._loop,
+ vdur_console_handler = VnfrConsoleOperdataDtsHandler(self._dts, self._log, self._loop,
self, vnfr_msg.id,vm["uuid"],vm["name"])
yield from vdur_console_handler.register()
self._vdur_console_handler[vm["uuid"]] = vdur_console_handler
-
+
vdur_msg = vnfr_msg.vdur.add()
vdur_msg.vim_id = vm["vim_vm_id"]
vdur_msg.id = vm["uuid"]
for _,handler in self._vdur_console_handler.items():
handler._regh.deregister()
-
+
if self._nsr_uuid is None:
self._log.warning("Cannot terminate an un-instantiated nsr")
return