4 # Copyright 2016 RIFT.IO Inc
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
10 # http://www.apache.org/licenses/LICENSE-2.0
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
20 @author Paul Laidler (Paul.Laidler@riftio.com)
22 @brief Pingpong scaling system test
35 import rift
.auto
.session
36 import rift
.auto
.descriptor
38 from gi
.repository
import (
45 gi
.require_version('RwKeyspec', '1.0')
46 from gi
.repository
.RwKeyspec
import quoted_key
48 @pytest.mark
.setup('pingpong_nsd')
49 @pytest.mark
.depends('launchpad')
50 class TestSetupPingpongNsd(object):
51 def test_onboard(self
, mgmt_session
, descriptors
):
52 for descriptor
in descriptors
:
53 rift
.auto
.descriptor
.onboard(mgmt_session
, descriptor
)
55 def test_install_sar(self
, mgmt_session
):
56 get_platform_cmd
= 'ssh {host} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- python3 -mplatform'
57 platform_result
= subprocess
.check_output(get_platform_cmd
.format(host
=mgmt_session
.host
), shell
=True)
58 platform_match
= re
.search('(Ubuntu|fedora)-(\d+)', platform_result
.decode('ascii'))
59 assert platform_match
is not None
60 (dist
, ver
) = platform_match
.groups()
62 install_cmd
= 'ssh {host} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- sudo yum install sysstat --assumeyes'.format(
63 host
=mgmt_session
.host
,
65 elif dist
== 'Ubuntu':
66 install_cmd
= 'ssh {host} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- sudo apt-get -q -y install sysstat'.format(
67 host
=mgmt_session
.host
,
69 subprocess
.check_call(install_cmd
, shell
=True)
71 @pytest.fixture(scope
='function', params
=[5,10,15,20,25])
72 def service_count(request
):
73 '''Fixture representing the number of services to test'''
76 @pytest.mark
.depends('pingpong_nsd')
77 class TestScaling(object):
78 @pytest.mark
.preserve_fixture_order
79 def test_scaling(self
, mgmt_session
, cloud_account_name
, service_count
):
81 def start_services(mgmt_session
, desired_service_count
, max_attempts
=3):
82 catalog
= mgmt_session
.proxy(RwProjectNsdYang
).get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
85 nsr_path
= "/rw-project:project[rw-project:name='default']/ns-instance-config"
86 nsr
= mgmt_session
.proxy(RwNsrYang
).get_config(nsr_path
)
87 service_count
= len(nsr
.nsr
)
90 while attempts
< max_attempts
and service_count
< desired_service_count
:
93 old_opdata
= mgmt_session
.proxy(RwNsrYang
).get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
94 for count
in range(service_count
, desired_service_count
):
95 nsr
= rift
.auto
.descriptor
.create_nsr(
97 "pingpong_%s" % str(uuid
.uuid4().hex[:10]),
99 mgmt_session
.proxy(RwNsrYang
).create_config('/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr', nsr
)
103 new_opdata
= mgmt_session
.proxy(RwNsrYang
).get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
104 new_ns_instance_config_refs
= {nsr
.ns_instance_config_ref
for nsr
in new_opdata
.nsr
} - {nsr
.ns_instance_config_ref
for nsr
in old_opdata
.nsr
}
105 for ns_instance_config_ref
in new_ns_instance_config_refs
:
107 xpath
= "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format(quoted_key(ns_instance_config_ref
))
108 mgmt_session
.proxy(RwNsrYang
).wait_for(xpath
, "running", fail_on
=['failed'], timeout
=400)
109 xpath
= "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format(quoted_key(ns_instance_config_ref
))
110 mgmt_session
.proxy(RwNsrYang
).wait_for(xpath
, "configured", fail_on
=['failed'], timeout
=450)
112 attempts
= 0 # Made some progress so reset the number of attempts remaining
113 except rift
.auto
.session
.ProxyWaitForError
:
114 mgmt_session
.proxy(RwNsrYang
).delete_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id={}]".format(quoted_key(ns_instance_config_ref
)))
117 def monitor_launchpad_performance(service_count
, interval
=30, samples
=1):
118 sar_cmd
= "ssh {mgmt_ip} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- sar -A {interval} {samples}".format(
119 mgmt_ip
=mgmt_session
.host
,
123 output
= subprocess
.check_output(sar_cmd
, shell
=True, stderr
=subprocess
.STDOUT
)
124 outfile
= '{rift_artifacts}/scaling_{task_id}.log'.format(
125 rift_artifacts
=os
.environ
.get('RIFT_ARTIFACTS'),
126 task_id
=os
.environ
.get('AUTO_TASK_ID')
128 with
open(outfile
, 'a') as fh
:
130 == SCALING RESULTS : {service_count} Network Services ==
132 '''.format(service_count
=service_count
, output
=output
.decode())
135 start_services(mgmt_session
, service_count
)
136 monitor_launchpad_performance(service_count
, interval
=30, samples
=1)
138 @pytest.mark
.depends('pingpong_nsd')
139 @pytest.mark
.teardown('pingpong_nsd')
140 class TestTeardownPingpongNsr(object):
141 def test_teardown_nsr(self
, mgmt_session
):
143 ns_instance_config
= mgmt_session
.proxy(RwNsrYang
).get_config('/rw-project:project[rw-project:name="default"]/ns-instance-config')
144 for nsr
in ns_instance_config
.nsr
:
145 mgmt_session
.proxy(RwNsrYang
).delete_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id={}]".format(quoted_key(nsr
.id)))
148 vnfr_catalog
= mgmt_session
.proxy(RwVnfrYang
).get('/rw-project:project[rw-project:name="default"]/vnfr-catalog')
149 assert vnfr_catalog
is None or len(vnfr_catalog
.vnfr
) == 0
151 def test_generate_plots(self
):
153 ('python {rift_install}/usr/rift/systemtest/util/sarplot.py '
154 '--plot "{rift_artifacts}/scaling_cpu_{task_id}.png" '
155 '--title "CPU Utilization by network service count" '
157 '--fields %usr,%idle,%sys '
158 '--key-filter CPU:all '
159 '--ylabel "CPU Utilization %" '
160 '--xlabel "Network Service Count" '
161 '--xticklabels "5,10,15,20,25" < {rift_artifacts}/scaling_{task_id}.log'
163 ('python {rift_install}/usr/rift/systemtest/util/sarplot.py '
164 '--plot "{rift_artifacts}/scaling_mem_{task_id}.png" '
165 '--title "Memory Utilization by network service count" '
166 '--fields kbmemfree,kbmemused,kbbuffers,kbcached,kbcommit,kbactive,kbinact,kbdirty '
167 '--ylabel "Memory Utilization" '
168 '--xlabel "Network Service Count" '
169 '--xticklabels "5,10,15,20,25" < {rift_artifacts}/scaling_{task_id}.log'
171 ('python {rift_install}/usr/rift/systemtest/util/sarplot.py '
172 '--plot "{rift_artifacts}/scaling_mempct_{task_id}.png" '
173 '--title "Memory Utilization by network service count" '
174 '--fields %memused,%commit '
175 '--ylabel "Memory Utilization %" '
176 '--xlabel "Network Service Count" '
177 '--xticklabels "5,10,15,20,25" < {rift_artifacts}/scaling_{task_id}.log'
179 ('python {rift_install}/usr/rift/systemtest/util/sarplot.py '
180 '--plot "{rift_artifacts}/scaling_iface_{task_id}.png" '
181 '--title "Interface Utilization by network service count" '
183 '--fields rxpck/s,txpck/s,rxkB/s,txkB/s,rxcmp/s,txcmp/s,rxmcst/s '
184 '--key-filter IFACE:eth0 '
185 '--ylabel "Interface Utilization" '
186 '--xlabel "Network Service Count" '
187 '--xticklabels "5,10,15,20,25" < {rift_artifacts}/scaling_{task_id}.log'
189 ('python {rift_install}/usr/rift/systemtest/util/sarplot.py '
190 '--plot "{rift_artifacts}/scaling_iface_err_{task_id}.png" '
191 '--title "Interface Errors by network service count" '
193 '--fields rxerr/s,txerr/s,coll/s,rxdrop/s,txdrop/s,txcarr/s,rxfram/s,rxfifo/s,txfifo/s '
194 '--key-filter IFACE:eth0 '
195 '--ylabel "Interface Errors" '
196 '--xlabel "Network Service Count" '
197 '--xticklabels "5,10,15,20,25" < {rift_artifacts}/scaling_{task_id}.log'
201 for cmd
in plot_commands
:
202 subprocess
.check_call(
204 rift_install
=os
.environ
.get('RIFT_INSTALL'),
205 rift_artifacts
=os
.environ
.get('RIFT_ARTIFACTS'),
206 task_id
=os
.environ
.get('AUTO_TASK_ID')