- return byte_rate\r
-\r
- logging.exception('metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface))\r
- return 'metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface)\r
-\r
-\r
- # call this function repeatedly for streaming measurements\r
- def get_rate(self, vnf_name, vnf_interface=None, direction='tx', metric='packets'):\r
-\r
- key = self.metric_key\r
-\r
- ret = self.REST_cmd('stats/port', self.switch_dpid)\r
- port_stat_dict = ast.literal_eval(ret)\r
- for port_stat in port_stat_dict[str(self.switch_dpid)]:\r
- if port_stat['port_no'] == self.mon_port:\r
- port_uptime = port_stat['duration_sec'] + port_stat['duration_nsec'] * 10 ** (-9)\r
- this_measurement = int(port_stat[key])\r
- #logging.info('packets:{0}'.format(this_measurement))\r
-\r
- # set prometheus metrics\r
- if metric == 'packets':\r
- self.prom_tx_packet_count.labels(self.vnf_name, self.vnf_interface).set(this_measurement)\r
- elif metric == 'bytes':\r
- self.prom_tx_byte_count.labels(self.vnf_name, self.vnf_interface).set(this_measurement)\r
-\r
- if self.previous_monitor_time <= 0 or self.previous_monitor_time >= port_uptime:\r
- self.previous_measurement = int(port_stat[key])\r
- self.previous_monitor_time = port_uptime\r
- # do first measurement\r
- time.sleep(1)\r
- byte_rate = self.get_rate(vnf_name, vnf_interface, direction, metric)\r
- return byte_rate\r
- else:\r
- time_delta = (port_uptime - self.previous_monitor_time)\r
- byte_rate = (this_measurement - self.previous_measurement) / float(time_delta)\r
- #logging.info('uptime:{2} delta:{0} rate:{1}'.format(time_delta,byte_rate,port_uptime))\r
-\r
- self.previous_measurement = this_measurement\r
- self.previous_monitor_time = port_uptime\r
- return byte_rate\r
-\r
- return ret\r
-\r
- def REST_cmd(self, prefix, dpid):\r
- url = self.REST_api + '/' + str(prefix) + '/' + str(dpid)\r
- req = urllib2.Request(url)\r
- ret = urllib2.urlopen(req).read()\r
- return ret
\ No newline at end of file
+ return\r
+\r
+ logging.exception('metric {0} not found on {1}:{2}'.format(\r
+ metric_key, vnf_name, vnf_interface))\r
+ logging.exception(\r
+ 'monport:{0}, dpid:{1}'.format(mon_port, switch_dpid))\r
+ logging.exception(\r
+ 'monitored network_metrics:{0}'.format(self.network_metrics))\r
+ logging.exception('port dict:{0}'.format(port_stat_dict))\r
+ return 'metric {0} not found on {1}:{2}'.format(\r
+ metric_key, vnf_name, vnf_interface)\r
+\r
+ def set_flow_metric(self, metric_dict, flow_stat_dict):\r
+ # vnf tx is the datacenter switch rx and vice-versa\r
+ metric_key = metric_dict['metric_key']\r
+ switch_dpid = metric_dict['switch_dpid']\r
+ vnf_name = metric_dict['vnf_name']\r
+ vnf_interface = metric_dict['vnf_interface']\r
+ cookie = metric_dict['cookie']\r
+\r
+ counter = 0\r
+ for flow_stat in flow_stat_dict[str(switch_dpid)]:\r
+ if 'bytes' in metric_key:\r
+ counter += flow_stat['byte_count']\r
+ elif 'packet' in metric_key:\r
+ counter += flow_stat['packet_count']\r
+\r
+ # flow_uptime disabled for now (can give error)\r
+ # flow_stat = flow_stat_dict[str(switch_dpid)][0]\r
+ # flow_uptime = flow_stat['duration_sec'] + flow_stat['duration_nsec'] * 10 ** (-9)\r
+\r
+ self.prom_metrics[metric_dict['metric_key']]. \\r
+ labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=cookie). \\r
+ set(counter)\r
+\r
+ def start_Prometheus(self, port=9090):\r
+ # prometheus.yml configuration file is located in the same directory as\r
+ # this file\r
+ cmd = ["docker",\r
+ "run",\r
+ "--rm",\r
+ "-p", "{0}:9090".format(port),\r
+ "-v", "{0}/prometheus.yml:/etc/prometheus/prometheus.yml".format(\r
+ os.path.dirname(os.path.abspath(__file__))),\r
+ "-v", "{0}/profile.rules:/etc/prometheus/profile.rules".format(\r
+ os.path.dirname(os.path.abspath(__file__))),\r
+ "--name", "prometheus",\r
+ "prom/prometheus"\r
+ ]\r
+ logging.info('Start Prometheus container {0}'.format(cmd))\r
+ return Popen(cmd)\r
+\r
+ def start_PushGateway(self, port=PUSHGATEWAY_PORT):\r
+ cmd = ["docker",\r
+ "run",\r
+ "-d",\r
+ "-p", "{0}:9091".format(port),\r
+ "--name", "pushgateway",\r
+ "--label", 'com.containernet=""',\r
+ "prom/pushgateway"\r
+ ]\r
+\r
+ logging.info('Start Prometheus Push Gateway container {0}'.format(cmd))\r
+ return Popen(cmd)\r
+\r
+ def start_cAdvisor(self, port=CADVISOR_PORT):\r
+ cmd = ["docker",\r
+ "run",\r
+ "--rm",\r
+ "--volume=/:/rootfs:ro",\r
+ "--volume=/var/run:/var/run:rw",\r
+ "--volume=/sys:/sys:ro",\r
+ "--volume=/var/lib/docker/:/var/lib/docker:ro",\r
+ "--publish={0}:8080".format(port),\r
+ "--name=cadvisor",\r
+ "--label", 'com.containernet=""',\r
+ "--detach=true",\r
+ "google/cadvisor:latest",\r
+ # "--storage_duration=1m0s",\r
+ # "--allow_dynamic_housekeeping=true",\r
+ # "--housekeeping_interval=1s",\r
+ ]\r
+ logging.info('Start cAdvisor container {0}'.format(cmd))\r
+ return Popen(cmd)\r
+\r
+ def stop(self):\r
+ # stop the monitoring thread\r
+ self.start_monitoring = False\r
+ self.monitor_thread.join()\r
+ self.monitor_flow_thread.join()\r
+\r
+ # these containers are used for monitoring but are started now outside\r
+ # of son-emu\r
+\r
+ if self.pushgateway_process is not None:\r
+ logging.info('stopping pushgateway container')\r
+ self._stop_container('pushgateway')\r
+\r
+ if self.cadvisor_process is not None:\r
+ logging.info('stopping cadvisor container')\r
+ self._stop_container('cadvisor')\r
+\r
+ def switch_tx_rx(self, metric=''):\r
+ # when monitoring vnfs, the tx of the datacenter switch is actually the rx of the vnf\r
+ # so we need to change the metric name to be consistent with the vnf rx\r
+ # or tx\r
+ if 'tx' in metric:\r
+ metric = metric.replace('tx', 'rx')\r
+ elif 'rx' in metric:\r
+ metric = metric.replace('rx', 'tx')\r
+\r
+ return metric\r
+\r
+ def _stop_container(self, name):\r
+\r
+ # container = self.dockercli.containers.get(name)\r
+ # container.stop()\r
+ # container.remove(force=True)\r
+\r
+ # the only robust way to stop these containers is via Popen, it seems\r
+ time.sleep(1)\r
+ cmd = ['docker', 'rm', '-f', name]\r
+ Popen(cmd)\r
+\r
+ def update_skewmon(self, vnf_name, resource_name, action):\r
+\r
+ ret = ''\r
+\r
+ config_file_path = '/tmp/skewmon.cfg'\r
+ configfile = open(config_file_path, 'a+')\r
+ try:\r
+ config = json.load(configfile)\r
+ except BaseException:\r
+ # not a valid json file or empty\r
+ config = {}\r
+\r
+ # initialize config file\r
+ if len(self.skewmon_metrics) == 0:\r
+ config = {}\r
+ json.dump(config, configfile)\r
+ configfile.close()\r
+\r
+ docker_name = 'mn.' + vnf_name\r
+ vnf_container = self.dockercli.containers.get(docker_name)\r
+ key = resource_name + '_' + vnf_container.short_id\r
+ vnf_id = vnf_container.id\r
+\r
+ if action == 'start':\r
+ # add a new vnf to monitor\r
+ config[key] = dict(VNF_NAME=vnf_name,\r
+ VNF_ID=vnf_id,\r
+ VNF_METRIC=resource_name)\r
+ ret = 'adding to skewness monitor: {0} {1} '.format(\r
+ vnf_name, resource_name)\r
+ logging.info(ret)\r
+ elif action == 'stop':\r
+ # remove vnf to monitor\r
+ config.pop(key)\r
+ ret = 'removing from skewness monitor: {0} {1} '.format(\r
+ vnf_name, resource_name)\r
+ logging.info(ret)\r
+\r
+ self.skewmon_metrics = config\r
+ configfile = open(config_file_path, 'w')\r
+ json.dump(config, configfile)\r
+ configfile.close()\r
+\r
+ try:\r
+ skewmon_container = self.dockercli.containers.get('skewmon')\r
+\r
+ # remove container if config is empty\r
+ if len(config) == 0:\r
+ ret += 'stopping skewness monitor'\r
+ logging.info('stopping skewness monitor')\r
+ skewmon_container.remove(force=True)\r
+\r
+ except docker.errors.NotFound:\r
+ # start container if not running\r
+ ret += 'starting skewness monitor'\r
+ logging.info('starting skewness monitor')\r
+ volumes = {'/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'},\r
+ '/tmp/skewmon.cfg': {'bind': '/config.txt', 'mode': 'ro'}}\r
+ self.dockercli.containers.run('skewmon',\r
+ detach=True,\r
+ volumes=volumes,\r
+ labels=['com.containernet'],\r
+ name='skewmon'\r
+ )\r
+ # Wait a while for containers to be completely started\r
+ started = False\r
+ wait_time = 0\r
+ while not started:\r
+ list1 = self.dockercli.containers.list(\r
+ filters={'status': 'running', 'name': 'prometheus'})\r
+ if len(list1) >= 1:\r
+ time.sleep(1)\r
+ started = True\r
+ if wait_time > 5:\r
+ return 'skewmon not started'\r
+ time.sleep(1)\r
+ wait_time += 1\r
+ return ret\r
+\r
+ def term(self, vnf_list=[]):\r
+ """\r
+ Start a terminal window for the specified VNFs\r
+ (start a terminal for all VNFs if vnf_list is empty)\r
+ :param vnf_list:\r
+ :return:\r
+ """\r
+\r
+ if vnf_list is None:\r
+ vnf_list = []\r
+ if not isinstance(vnf_list, list):\r
+ vnf_list = str(vnf_list).split(',')\r
+ vnf_list = map(str.strip, vnf_list)\r
+ logging.info('vnf_list: {}'.format(vnf_list))\r
+\r
+ return self.start_xterm(vnf_list)\r
+\r
+ # start an xterm for the specfified vnfs\r
+\r
+ def start_xterm(self, vnf_names):\r
+ # start xterm for all vnfs\r
+ for vnf_name in vnf_names:\r
+ terminal_cmd = "docker exec -it mn.{0} /bin/bash".format(vnf_name)\r
+\r
+ cmd = ['xterm', '-xrm', 'XTerm*selectToClipboard: true', '-xrm', 'XTerm.vt100.allowTitleOps: false',\r
+ '-T', vnf_name,\r
+ '-e', terminal_cmd]\r
+ Popen(cmd)\r
+\r
+ ret = 'xterms started for {0}'.format(vnf_names)\r
+ if len(vnf_names) == 0:\r
+ ret = 'vnf list is empty, no xterms started'\r
+ return ret\r