+ def stop_metric(self, vnf_name, vnf_interface=None, metric=None):\r
+\r
+ # check if port is specified (vnf:port)\r
+ if vnf_interface is None and metric is not None:\r
+ # take first interface by default\r
+ connected_sw = self.net.DCNetwork_graph.neighbors(vnf_name)[0]\r
+ link_dict = self.net.DCNetwork_graph[vnf_name][connected_sw]\r
+ vnf_interface = link_dict[0]['src_port_id']\r
+\r
+ for metric_dict in deepcopy(self.network_metrics):\r
+ if metric_dict['vnf_name'] == vnf_name and metric_dict['vnf_interface'] == vnf_interface \\r
+ and metric_dict['metric_key'] == metric:\r
+\r
+ self.monitor_lock.acquire()\r
+\r
+ self.network_metrics.remove(metric_dict)\r
+\r
+ # set values to NaN, prometheus api currently does not support removal of metrics\r
+ # self.prom_metrics[metric_dict['metric_key']].labels(vnf_name, vnf_interface).set(float('nan'))\r
+ self.prom_metrics[metric_dict['metric_key']]. \\r
+ labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=None). \\r
+ set(float('nan'))\r
+\r
+ # this removes the complete metric, all labels...\r
+ # 1 single monitor job for all metrics of the SDN controller\r
+ # we can only remove from the pushgateway grouping keys(labels) which we have defined for the add_to_pushgateway\r
+ # we can not specify labels from the metrics to be removed\r
+ # if we need to remove the metrics seperatelty, we need to give\r
+ # them a separate grouping key, and probably a diffferent\r
+ # registry also\r
+ delete_from_gateway(\r
+ self.pushgateway, job='sonemu-SDNcontroller')\r
+\r
+ self.monitor_lock.release()\r
+\r
+ logging.info('Stopped monitoring: {2} on {0}:{1}'.format(\r
+ vnf_name, vnf_interface, metric))\r
+ return 'Stopped monitoring: {2} on {0}:{1}'.format(\r
+ vnf_name, vnf_interface, metric)\r
+\r
+ # delete everything from this vnf\r
+ elif metric_dict['vnf_name'] == vnf_name and vnf_interface is None and metric is None:\r
+ self.monitor_lock.acquire()\r
+ self.network_metrics.remove(metric_dict)\r
+ logging.info('remove metric from monitor: vnf_name:{0} vnf_interface:{1} mon_port:{2}'.format(\r
+ metric_dict['vnf_name'], metric_dict['vnf_interface'], metric_dict['mon_port']))\r
+\r
+ delete_from_gateway(\r
+ self.pushgateway, job='sonemu-SDNcontroller')\r
+ self.monitor_lock.release()\r
+ continue\r
+\r
+ if vnf_interface is None and metric is None:\r
+ logging.info('Stopped monitoring vnf: {0}'.format(vnf_name))\r
+ return 'Stopped monitoring: {0}'.format(vnf_name)\r
+ else:\r
+ return 'Error stopping monitoring metric: {0} on {1}:{2}'.format(\r
+ metric, vnf_name, vnf_interface)\r
+\r
+ def get_flow_metrics(self):\r
+ """\r
+ Get all metrics defined in the list and export it to Prometheus.\r
+ """\r
+ while self.start_monitoring:\r
+\r
+ self.monitor_flow_lock.acquire()\r
+\r
+ for flow_dict in self.flow_metrics:\r
+ data = {}\r
+\r
+ data['cookie'] = flow_dict['cookie']\r
+ data['cookie_mask'] = COOKIE_MASK\r
+\r
+ if 'tx' in flow_dict['metric_key']:\r
+ data['match'] = {'in_port': flow_dict['mon_port']}\r
+ elif 'rx' in flow_dict['metric_key']:\r
+ data['out_port'] = flow_dict['mon_port']\r
+\r
+ # query Ryu\r
+ ret = self.net.ryu_REST(\r
+ 'stats/flow', dpid=flow_dict['switch_dpid'], data=data)\r
+ if isinstance(ret, dict):\r
+ flow_stat_dict = ret\r
+ elif isinstance(ret, basestring):\r
+ flow_stat_dict = ast.literal_eval(ret.rstrip())\r
+ else:\r
+ flow_stat_dict = None\r
+\r
+ logging.debug('received flow stat:{0} '.format(flow_stat_dict))\r
+\r
+ self.set_flow_metric(flow_dict, flow_stat_dict)\r
+\r
+ try:\r
+ if len(self.flow_metrics) > 0:\r
+ pushadd_to_gateway(\r
+ self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)\r
+ except Exception as e:\r
+ logging.warning(\r
+ "Pushgateway not reachable: {0} {1}".format(Exception, e))\r
+\r
+ self.monitor_flow_lock.release()\r
+ time.sleep(1)\r
+\r
+ def get_network_metrics(self):\r
+ while self.start_monitoring:\r
+\r
+ self.monitor_lock.acquire()\r
+\r
+ # group metrics by dpid to optimize the rest api calls\r
+ dpid_list = [metric_dict['switch_dpid']\r
+ for metric_dict in self.network_metrics]\r
+ dpid_set = set(dpid_list)\r
+\r
+ for dpid in dpid_set:\r
+\r
+ # query Ryu\r
+ ret = self.net.ryu_REST('stats/port', dpid=dpid)\r
+ if isinstance(ret, dict):\r
+ port_stat_dict = ret\r
+ elif isinstance(ret, basestring):\r
+ port_stat_dict = ast.literal_eval(ret.rstrip())\r
+ else:\r
+ port_stat_dict = None\r
+\r
+ metric_list = [metric_dict for metric_dict in self.network_metrics\r
+ if int(metric_dict['switch_dpid']) == int(dpid)]\r
+\r
+ for metric_dict in metric_list:\r
+ self.set_network_metric(metric_dict, port_stat_dict)\r
+\r
+ try:\r
+ if len(self.network_metrics) > 0:\r
+ pushadd_to_gateway(\r
+ self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)\r
+ except Exception as e:\r
+ logging.warning(\r
+ "Pushgateway not reachable: {0} {1}".format(Exception, e))\r
+\r
+ self.monitor_lock.release()\r
+ time.sleep(1)\r
+\r
+ # add metric to the list to export to Prometheus, parse the Ryu port-stats\r
+ # reply\r
+ def set_network_metric(self, metric_dict, port_stat_dict):\r
+ # vnf tx is the datacenter switch rx and vice-versa\r
+ metric_key = self.switch_tx_rx(metric_dict['metric_key'])\r
+ switch_dpid = metric_dict['switch_dpid']\r
+ vnf_name = metric_dict['vnf_name']\r
+ vnf_interface = metric_dict['vnf_interface']\r
+ previous_monitor_time = metric_dict['previous_monitor_time']\r
+ mon_port = metric_dict['mon_port']\r
+ for port_stat in port_stat_dict[str(switch_dpid)]:\r
+ # ovs output also gives back 'LOCAL' port\r
+ if port_stat['port_no'] == 'LOCAL':\r
+ continue\r
+ if int(port_stat['port_no']) == int(mon_port):\r
+ port_uptime = port_stat['duration_sec'] + \\r
+ port_stat['duration_nsec'] * 10 ** (-9)\r
+ this_measurement = int(port_stat[metric_key])\r
+\r
+ # set prometheus metric\r
+ self.prom_metrics[metric_dict['metric_key']].\\r
+ labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=None).\\r
+ set(this_measurement)\r
+\r
+ # also the rate is calculated here, but not used for now\r
+ # (rate can be easily queried from prometheus also)\r
+ if previous_monitor_time <= 0 or previous_monitor_time >= port_uptime:\r
+ metric_dict['previous_measurement'] = int(\r
+ port_stat[metric_key])\r
+ metric_dict['previous_monitor_time'] = port_uptime\r
+ # do first measurement\r
+ # time.sleep(1)\r
+ # self.monitor_lock.release()\r
+ # rate cannot be calculated yet (need a first measurement)\r
+ metric_dict['previous_measurement'] = this_measurement\r
+ metric_dict['previous_monitor_time'] = port_uptime\r
+ return\r
+\r
+ logging.exception('metric {0} not found on {1}:{2}'.format(\r
+ metric_key, vnf_name, vnf_interface))\r
+ logging.exception(\r
+ 'monport:{0}, dpid:{1}'.format(mon_port, switch_dpid))\r
+ logging.exception(\r
+ 'monitored network_metrics:{0}'.format(self.network_metrics))\r
+ logging.exception('port dict:{0}'.format(port_stat_dict))\r
+ return 'metric {0} not found on {1}:{2}'.format(\r
+ metric_key, vnf_name, vnf_interface)\r
+\r
+ def set_flow_metric(self, metric_dict, flow_stat_dict):\r
+ # vnf tx is the datacenter switch rx and vice-versa\r
+ metric_key = metric_dict['metric_key']\r
+ switch_dpid = metric_dict['switch_dpid']\r
+ vnf_name = metric_dict['vnf_name']\r
+ vnf_interface = metric_dict['vnf_interface']\r
+ cookie = metric_dict['cookie']\r
+\r
+ counter = 0\r
+ for flow_stat in flow_stat_dict[str(switch_dpid)]:\r
+ if 'bytes' in metric_key:\r
+ counter += flow_stat['byte_count']\r
+ elif 'packet' in metric_key:\r
+ counter += flow_stat['packet_count']\r
+\r
+ # flow_uptime disabled for now (can give error)\r
+ # flow_stat = flow_stat_dict[str(switch_dpid)][0]\r
+ # flow_uptime = flow_stat['duration_sec'] + flow_stat['duration_nsec'] * 10 ** (-9)\r
+\r
+ self.prom_metrics[metric_dict['metric_key']]. \\r
+ labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=cookie). \\r
+ set(counter)\r
+\r
+ def start_Prometheus(self, port=9090):\r
+ # prometheus.yml configuration file is located in the same directory as\r
+ # this file\r
+ cmd = ["docker",\r
+ "run",\r
+ "--rm",\r
+ "-p", "{0}:9090".format(port),\r
+ "-v", "{0}/prometheus.yml:/etc/prometheus/prometheus.yml".format(\r
+ os.path.dirname(os.path.abspath(__file__))),\r
+ "-v", "{0}/profile.rules:/etc/prometheus/profile.rules".format(\r
+ os.path.dirname(os.path.abspath(__file__))),\r
+ "--name", "prometheus",\r
+ "prom/prometheus"\r
+ ]\r
+ logging.info('Start Prometheus container {0}'.format(cmd))\r
+ return Popen(cmd)\r
+\r
+ def start_PushGateway(self, port=PUSHGATEWAY_PORT):\r
+ cmd = ["docker",\r
+ "run",\r
+ "-d",\r
+ "-p", "{0}:9091".format(port),\r
+ "--name", "pushgateway",\r
+ "--label", 'com.containernet=""',\r
+ "prom/pushgateway"\r
+ ]\r
+\r
+ logging.info('Start Prometheus Push Gateway container {0}'.format(cmd))\r
+ return Popen(cmd)\r
+\r
+ def start_cAdvisor(self, port=CADVISOR_PORT):\r
+ cmd = ["docker",\r
+ "run",\r
+ "--rm",\r
+ "--volume=/:/rootfs:ro",\r
+ "--volume=/var/run:/var/run:rw",\r
+ "--volume=/sys:/sys:ro",\r
+ "--volume=/var/lib/docker/:/var/lib/docker:ro",\r
+ "--publish={0}:8080".format(port),\r
+ "--name=cadvisor",\r
+ "--label", 'com.containernet=""',\r
+ "--detach=true",\r
+ "google/cadvisor:latest",\r
+ # "--storage_duration=1m0s",\r
+ # "--allow_dynamic_housekeeping=true",\r
+ # "--housekeeping_interval=1s",\r
+ ]\r
+ logging.info('Start cAdvisor container {0}'.format(cmd))\r
+ return Popen(cmd)\r
+\r
+ def stop(self):\r
+ # stop the monitoring thread\r
+ self.start_monitoring = False\r
+ self.monitor_thread.join()\r
+ self.monitor_flow_thread.join()\r
+\r
+ # these containers are used for monitoring but are started now outside\r
+ # of son-emu\r
+\r
+ if self.pushgateway_process is not None:\r
+ logging.info('stopping pushgateway container')\r
+ self._stop_container('pushgateway')\r
+\r
+ if self.cadvisor_process is not None:\r
+ logging.info('stopping cadvisor container')\r
+ self._stop_container('cadvisor')\r
+\r
+ def switch_tx_rx(self, metric=''):\r
+ # when monitoring vnfs, the tx of the datacenter switch is actually the rx of the vnf\r
+ # so we need to change the metric name to be consistent with the vnf rx\r
+ # or tx\r
+ if 'tx' in metric:\r
+ metric = metric.replace('tx', 'rx')\r
+ elif 'rx' in metric:\r
+ metric = metric.replace('rx', 'tx')\r
+\r
+ return metric\r
+\r
+ def _stop_container(self, name):\r
+\r
+ # container = self.dockercli.containers.get(name)\r
+ # container.stop()\r
+ # container.remove(force=True)\r
+\r
+ # the only robust way to stop these containers is via Popen, it seems\r
+ time.sleep(1)\r
+ cmd = ['docker', 'rm', '-f', name]\r
+ Popen(cmd)\r
+\r
+ def update_skewmon(self, vnf_name, resource_name, action):\r
+\r
+ ret = ''\r
+\r
+ config_file_path = '/tmp/skewmon.cfg'\r
+ configfile = open(config_file_path, 'a+')\r
+ try:\r
+ config = json.load(configfile)\r
+ except BaseException:\r
+ # not a valid json file or empty\r
+ config = {}\r
+\r
+ # initialize config file\r
+ if len(self.skewmon_metrics) == 0:\r
+ config = {}\r
+ json.dump(config, configfile)\r
+ configfile.close()\r
+\r
+ docker_name = 'mn.' + vnf_name\r
+ vnf_container = self.dockercli.containers.get(docker_name)\r
+ key = resource_name + '_' + vnf_container.short_id\r
+ vnf_id = vnf_container.id\r
+\r
+ if action == 'start':\r
+ # add a new vnf to monitor\r
+ config[key] = dict(VNF_NAME=vnf_name,\r
+ VNF_ID=vnf_id,\r
+ VNF_METRIC=resource_name)\r
+ ret = 'adding to skewness monitor: {0} {1} '.format(\r
+ vnf_name, resource_name)\r
+ logging.info(ret)\r
+ elif action == 'stop':\r
+ # remove vnf to monitor\r
+ config.pop(key)\r
+ ret = 'removing from skewness monitor: {0} {1} '.format(\r
+ vnf_name, resource_name)\r
+ logging.info(ret)\r
+\r
+ self.skewmon_metrics = config\r
+ configfile = open(config_file_path, 'w')\r
+ json.dump(config, configfile)\r
+ configfile.close()\r
+\r
+ try:\r
+ skewmon_container = self.dockercli.containers.get('skewmon')\r
+\r
+ # remove container if config is empty\r
+ if len(config) == 0:\r
+ ret += 'stopping skewness monitor'\r
+ logging.info('stopping skewness monitor')\r
+ skewmon_container.remove(force=True)\r
+\r
+ except docker.errors.NotFound:\r
+ # start container if not running\r
+ ret += 'starting skewness monitor'\r
+ logging.info('starting skewness monitor')\r
+ volumes = {'/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'},\r
+ '/tmp/skewmon.cfg': {'bind': '/config.txt', 'mode': 'ro'}}\r
+ self.dockercli.containers.run('skewmon',\r
+ detach=True,\r
+ volumes=volumes,\r
+ labels=['com.containernet'],\r
+ name='skewmon'\r
+ )\r
+ # Wait a while for containers to be completely started\r
+ started = False\r
+ wait_time = 0\r
+ while not started:\r
+ list1 = self.dockercli.containers.list(\r
+ filters={'status': 'running', 'name': 'prometheus'})\r
+ if len(list1) >= 1:\r
+ time.sleep(1)\r
+ started = True\r
+ if wait_time > 5:\r
+ return 'skewmon not started'\r
+ time.sleep(1)\r
+ wait_time += 1\r
+ return ret\r
+\r
+ def term(self, vnf_list=[]):\r
+ """\r
+ Start a terminal window for the specified VNFs\r
+ (start a terminal for all VNFs if vnf_list is empty)\r
+ :param vnf_list:\r
+ :return:\r
+ """\r
+\r
+ if vnf_list is None:\r
+ vnf_list = []\r
+ if not isinstance(vnf_list, list):\r
+ vnf_list = str(vnf_list).split(',')\r
+ vnf_list = map(str.strip, vnf_list)\r
+ logging.info('vnf_list: {}'.format(vnf_list))\r
+\r
+ return self.start_xterm(vnf_list)\r
+\r
+ # start an xterm for the specfified vnfs\r
+\r
+ def start_xterm(self, vnf_names):\r
+ # start xterm for all vnfs\r
+ for vnf_name in vnf_names:\r
+ terminal_cmd = "docker exec -it mn.{0} /bin/bash".format(vnf_name)\r