2 # -*- coding: utf-8 -*-
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
26 from time
import sleep
27 from random
import randint
29 from sys
import stderr
30 from uuid
import uuid4
33 __author__
= "Alfonso Tierno, alfonso.tiernosepulveda@telefonica.com"
34 __date__
= "$2018-03-01$"
36 version_date
= "Oct 2018"
40 print("Usage: ", sys
.argv
[0], "[options]")
42 " Performs system tests over running NBI. It can be used for real OSM test using option '--test-osm'"
45 " If this is the case env variables 'OSMNBITEST_VIM_NAME' must be supplied to create a VIM if not exist "
46 "where deployment is done"
49 print(" -h|--help: shows this help")
50 print(" --insecure: Allows non trusted https NBI server")
51 print(" --list: list available tests")
53 " --manual-check: Deployment tests stop after deployed to allow manual inspection. Only make sense with "
56 print(" -p|--password PASSWORD: NBI access password. 'admin' by default")
57 print(" ---project PROJECT: NBI access project. 'admin' by default")
59 " --test TEST[,...]: Execute only a test or a comma separated list of tests"
62 " --params key=val: params to the previous test. key can be vnfd-files, nsd-file, ns-name, ns-config"
65 " --test-osm: If missing this test is intended for NBI only, no other OSM components are expected. Use "
66 "this flag to test the system. LCM and RO components are expected to be up and running"
69 " --timeout TIMEOUT: General NBI timeout, by default {}s".format(timeout
)
72 " --timeout-deploy TIMEOUT: Timeout used for getting NS deployed, by default {}s".format(
77 " --timeout-configure TIMEOUT: Timeout used for getting NS deployed and configured,"
78 " by default {}s".format(timeout_configure
)
80 print(" -u|--user USERNAME: NBI access username. 'admin' by default")
82 " --url URL: complete NBI server URL. 'https//localhost:9999/osm' by default"
84 print(" -v|--verbose print debug information, can be used several times")
85 print(" --no-verbose remove verbosity")
86 print(" --version: prints current version")
87 print("ENV variables used for real deployment tests with option osm-test.")
88 print(" export OSMNBITEST_VIM_NAME=vim-name")
89 print(" export OSMNBITEST_VIM_URL=vim-url")
90 print(" export OSMNBITEST_VIM_TYPE=vim-type")
91 print(" export OSMNBITEST_VIM_TENANT=vim-tenant")
92 print(" export OSMNBITEST_VIM_USER=vim-user")
93 print(" export OSMNBITEST_VIM_PASSWORD=vim-password")
94 print(' export OSMNBITEST_VIM_CONFIG="vim-config"')
95 print(' export OSMNBITEST_NS_NAME="vim-config"')
99 r_header_json
= {"Content-type": "application/json"}
100 headers_json
= {"Content-type": "application/json", "Accept": "application/json"}
101 r_header_yaml
= {"Content-type": "application/yaml"}
102 headers_yaml
= {"Content-type": "application/yaml", "Accept": "application/yaml"}
103 r_header_text
= {"Content-type": "text/plain"}
104 r_header_octect
= {"Content-type": "application/octet-stream"}
105 headers_text
= {"Accept": "text/plain,application/yaml"}
106 r_header_zip
= {"Content-type": "application/zip"}
107 headers_zip
= {"Accept": "application/zip,application/yaml"}
108 headers_zip_yaml
= {"Accept": "application/yaml", "Content-type": "application/zip"}
109 headers_zip_json
= {"Accept": "application/json", "Content-type": "application/zip"}
110 headers_txt_json
= {"Accept": "application/json", "Content-type": "text/plain"}
111 r_headers_yaml_location_vnfd
= {
112 "Location": "/vnfpkgm/v1/vnf_packages_content/",
113 "Content-Type": "application/yaml",
115 r_headers_yaml_location_nsd
= {
116 "Location": "/nsd/v1/ns_descriptors_content/",
117 "Content-Type": "application/yaml",
119 r_headers_yaml_location_nst
= {
120 "Location": "/nst/v1/netslice_templates_content",
121 "Content-Type": "application/yaml",
123 r_headers_yaml_location_nslcmop
= {
124 "Location": "nslcm/v1/ns_lcm_op_occs/",
125 "Content-Type": "application/yaml",
127 r_headers_yaml_location_nsilcmop
= {
128 "Location": "/osm/nsilcm/v1/nsi_lcm_op_occs/",
129 "Content-Type": "application/yaml",
132 # test ones authorized
133 test_authorized_list
= (
138 "/vnfpkgm/v1/vnf_packages/non-existing-id",
149 "/nsd/v1/ns_descriptors/non-existing-id",
160 "/nsd/v1/ns_descriptors_content/non-existing-id",
168 timeout
= 120 # general timeout
169 timeout_deploy
= 60 * 10 # timeout for NS deploying without charms
170 timeout_configure
= 60 * 20 # timeout for NS deploying and configuring
173 class TestException(Exception):
187 self
.url_base
= url_base
188 if header_base
is None:
189 self
.header_base
= {}
191 self
.header_base
= header_base
.copy()
192 self
.s
= requests
.session()
193 self
.s
.headers
= self
.header_base
197 self
.password
= password
198 self
.project
= project
200 # contains ID of tests obtained from Location response header. "" key contains last obtained id
202 self
.test_name
= None
203 self
.step
= 0 # number of subtest under test
204 self
.passed_tests
= 0
205 self
.failed_tests
= 0
207 def set_test_name(self
, test_name
):
208 self
.test_name
= test_name
212 def set_header(self
, header
):
213 self
.s
.headers
.update(header
)
215 def set_tet_name(self
, test_name
):
216 self
.test_name
= test_name
218 def unset_header(self
, key
):
219 if key
in self
.s
.headers
:
220 del self
.s
.headers
[key
]
236 Performs an http request and check http code response. Exit if different than allowed. It get the returned id
237 that can be used by following test in the URL with {name} where name is the name of the test
238 :param description: description of the test
239 :param method: HTTP method: GET,PUT,POST,DELETE,...
240 :param url: complete URL or relative URL
241 :param headers: request headers to add to the base headers
242 :param payload: Can be a dict, transformed to json, a text or a file if starts with '@'
243 :param expected_codes: expected response codes, can be int, int tuple or int range
244 :param expected_headers: expected response headers, dict with key values
245 :param expected_payload: expected payload, 0 if empty, 'yaml', 'json', 'text', 'zip', 'octet-stream'
246 :param store_file: filename to store content
247 :param pooling: if True do not count neither log this test. Because a pooling is done with many equal requests
248 :return: requests response
253 self
.s
= requests
.session()
257 elif not url
.startswith("http"):
258 url
= self
.url_base
+ url
260 # replace url <> with the last ID
261 url
= url
.replace("<>", self
.last_id
)
263 if isinstance(payload
, str):
264 if payload
.startswith("@"):
266 file_name
= payload
[1:]
267 if payload
.startswith("@b"):
269 file_name
= payload
[2:]
270 with
open(file_name
, mode
) as f
:
272 elif isinstance(payload
, dict):
273 payload
= json
.dumps(payload
)
276 test_description
= "Test {}{} {} {} {}".format(
277 self
.test_name
, self
.step
, description
, method
, url
279 logger
.warning(test_description
)
282 if expected_payload
in ("zip", "octet-string") or store_file
:
287 r
= getattr(self
.s
, method
.lower())(
295 except requests
.exceptions
.ConnectionError
as e
:
298 logger
.error("Exception {}. Retrying".format(e
))
301 if expected_payload
in ("zip", "octet-string") or store_file
:
302 logger
.debug("RX {}".format(r
.status_code
))
304 logger
.debug("RX {}: {}".format(r
.status_code
, r
.text
))
308 if isinstance(expected_codes
, int):
309 expected_codes
= (expected_codes
,)
310 if r
.status_code
not in expected_codes
:
312 "Got status {}. Expected {}. {}".format(
313 r
.status_code
, expected_codes
, r
.text
318 for header_key
, header_val
in expected_headers
.items():
319 if header_key
.lower() not in r
.headers
:
320 raise TestException("Header {} not present".format(header_key
))
321 if header_val
and header_val
.lower() not in r
.headers
[header_key
]:
323 "Header {} does not contain {} but {}".format(
324 header_key
, header_val
, r
.headers
[header_key
]
328 if expected_payload
is not None:
329 if expected_payload
== 0 and len(r
.content
) > 0:
330 raise TestException("Expected empty payload")
331 elif expected_payload
== "json":
334 except Exception as e
:
336 "Expected json response payload, but got Exception {}".format(
340 elif expected_payload
== "yaml":
342 yaml
.safe_load(r
.text
)
343 except Exception as e
:
345 "Expected yaml response payload, but got Exception {}".format(
349 elif expected_payload
in ("zip", "octet-string"):
350 if len(r
.content
) == 0:
352 "Expected some response payload, but got empty"
355 # tar = tarfile.open(None, 'r:gz', fileobj=r.raw)
356 # for tarinfo in tar:
357 # tarname = tarinfo.name
359 # except Exception as e:
360 # raise TestException("Expected zip response payload, but got Exception {}".format(e))
361 elif expected_payload
== "text":
362 if len(r
.content
) == 0:
364 "Expected some response payload, but got empty"
368 with
open(store_file
, "wb") as fd
:
369 for chunk
in r
.iter_content(chunk_size
=128):
372 location
= r
.headers
.get("Location")
374 _id
= location
[location
.rfind("/") + 1 :]
376 self
.last_id
= str(_id
)
378 self
.passed_tests
+= 1
380 except TestException
as e
:
381 self
.failed_tests
+= 1
385 r_status_code
= r
.status_code
387 logger
.error("{} \nRX code{}: {}".format(e
, r_status_code
, r_text
))
392 logger
.error("Cannot open file {}: {}".format(store_file
, e
))
394 logger
.error("Exception: {}".format(e
), exc_info
=True)
395 self
.failed_tests
+= 1
398 except requests
.exceptions
.RequestException
as e
:
399 logger
.error("Exception: {}".format(e
))
401 def get_autorization(self
): # user=None, password=None, project=None):
404 ): # and self.user == user and self.password == password and self.project == project:
407 # self.password = password
408 # self.project = project
415 "username": self
.user
,
416 "password": self
.password
,
417 "project_id": self
.project
,
426 self
.token
= response
["id"]
427 self
.set_header({"Authorization": "Bearer {}".format(self
.token
)})
429 def remove_authorization(self
):
434 "/admin/v1/tokens/{}".format(self
.token
),
442 self
.unset_header("Authorization")
444 def get_create_vim(self
, test_osm
):
447 self
.get_autorization()
449 vim_name
= os
.environ
.get("OSMNBITEST_VIM_NAME")
452 "Needed to define OSMNBITEST_VIM_XXX variables to create a real VIM for deployment"
460 "/admin/v1/vim_accounts?name={}".format(vim_name
),
471 return vims
[0]["_id"]
474 # check needed environ parameters:
475 if not os
.environ
.get("OSMNBITEST_VIM_URL") or not os
.environ
.get(
476 "OSMNBITEST_VIM_TENANT"
479 "Env OSMNBITEST_VIM_URL and OSMNBITEST_VIM_TENANT are needed for create a real VIM"
480 " to deploy on whit the --test-osm option"
482 vim_data
= "{{schema_version: '1.0', name: '{}', vim_type: {}, vim_url: '{}', vim_tenant_name: '{}', " "vim_user: {}, vim_password: {}".format(
484 os
.environ
.get("OSMNBITEST_VIM_TYPE", "openstack"),
485 os
.environ
.get("OSMNBITEST_VIM_URL"),
486 os
.environ
.get("OSMNBITEST_VIM_TENANT"),
487 os
.environ
.get("OSMNBITEST_VIM_USER"),
488 os
.environ
.get("OSMNBITEST_VIM_PASSWORD"),
490 if os
.environ
.get("OSMNBITEST_VIM_CONFIG"):
491 vim_data
+= " ,config: {}".format(
492 os
.environ
.get("OSMNBITEST_VIM_CONFIG")
497 "{schema_version: '1.0', name: fakeVim, vim_type: openstack, vim_url: 'http://10.11.12.13/fake'"
498 ", vim_tenant_name: 'vimtenant', vim_user: vimuser, vim_password: vimpassword}"
503 "/admin/v1/vim_accounts",
507 {"Location": "/admin/v1/vim_accounts/", "Content-Type": "application/yaml"},
512 def print_results(self
):
513 print("\n\n\n--------------------------------------------")
515 "TEST RESULTS: Total: {}, Passed: {}, Failed: {}".format(
516 self
.passed_tests
+ self
.failed_tests
,
521 print("--------------------------------------------")
523 def wait_until_delete(self
, url_op
, timeout_delete
):
525 Make a pooling until topic is not present, because of deleted
527 :param timeout_delete:
530 description
= "Wait to topic being deleted"
531 test_description
= "Test {}{} {} {} {}".format(
532 self
.test_name
, self
.step
, description
, "GET", url_op
534 logger
.warning(test_description
)
537 wait
= timeout_delete
553 if r
.status_code
== 404:
554 self
.passed_tests
+= 1
556 elif r
.status_code
== 200:
561 "Topic is not deleted after {} seconds".format(timeout_delete
)
563 self
.failed_tests
+= 1
565 def wait_operation_ready(self
, ns_nsi
, opp_id
, timeout
, expected_fail
=False):
567 Wait until nslcmop or nsilcmop finished
568 :param ns_nsi: "ns" o "nsi"
569 :param opp_id: Id o fthe operation
571 :param expected_fail:
572 :return: None. Updates passed/failed_tests
575 url_op
= "/nslcm/v1/ns_lcm_op_occs/{}".format(opp_id
)
577 url_op
= "/nsilcm/v1/nsi_lcm_op_occs/{}".format(opp_id
)
578 description
= "Wait to {} lcm operation complete".format(ns_nsi
)
579 test_description
= "Test {}{} {} {} {}".format(
580 self
.test_name
, self
.step
, description
, "GET", url_op
582 logger
.warning(test_description
)
600 if "COMPLETED" in nslcmop
["operationState"]:
603 "NS terminate has success, expecting failing: {}".format(
604 nslcmop
["detailed-status"]
607 self
.failed_tests
+= 1
609 self
.passed_tests
+= 1
611 elif "FAILED" in nslcmop
["operationState"]:
612 if not expected_fail
:
614 "NS terminate has failed: {}".format(nslcmop
["detailed-status"])
616 self
.failed_tests
+= 1
618 self
.passed_tests
+= 1
621 print(".", end
="", file=stderr
)
625 self
.failed_tests
+= 1
627 "NS instantiate is not terminate after {} seconds".format(timeout
)
630 print("", file=stderr
)
633 class TestNonAuthorized
:
634 description
= "Test invalid URLs. methods and no authorization"
637 def run(engine
, test_osm
, manual_check
, test_params
=None):
638 engine
.set_test_name("NonAuth")
639 engine
.remove_authorization()
640 test_not_authorized_list
= (
654 "/admin/v1/nonexist",
672 for t
in test_not_authorized_list
:
676 class TestUsersProjects
:
677 description
= "test project and user creation"
680 def run(engine
, test_osm
, manual_check
, test_params
=None):
681 engine
.set_test_name("UserProject")
682 # backend = test_params.get("backend") if test_params else None # UNUSED
687 u1
= u2
= u3
= u4
= None
689 engine
.get_autorization()
692 "Create project non admin 1",
694 "/admin/v1/projects",
698 {"Location": "/admin/v1/projects/", "Content-Type": "application/json"},
701 p1
= engine
.last_id
if res
else None
704 "Create project admin",
706 "/admin/v1/projects",
708 {"name": "Padmin", "admin": True},
710 {"Location": "/admin/v1/projects/", "Content-Type": "application/json"},
713 padmin
= engine
.last_id
if res
else None
716 "Create project bad format",
718 "/admin/v1/projects",
725 pbad
= engine
.last_id
if res
else None
728 "Get project admin role",
730 "/admin/v1/roles?name=project_admin",
734 {"Content-Type": "application/json"},
737 rpa
= res
.json()[0]["_id"] if res
else None
739 "Get project user role",
741 "/admin/v1/roles?name=project_user",
745 {"Content-Type": "application/json"},
748 rpu
= res
.json()[0]["_id"] if res
else None
750 "Get system admin role",
752 "/admin/v1/roles?name=system_admin",
756 {"Content-Type": "application/json"},
759 rsa
= res
.json()[0]["_id"] if res
else None
761 data
= {"username": "U1", "password": "pw1"}
763 data
["project_role_mappings"] = [
764 {"project": p1
, "role": rpa
},
765 {"project": p2
, "role": rpa
},
766 {"project": padmin
, "role": rpu
},
769 xhd
= {"Location": "/admin/v1/users/", "Content-Type": "application/json"}
771 "Create user with bad project and force",
773 "/admin/v1/users?FORCE=True",
783 # User is created sometimes even though an exception is raised
787 "/admin/v1/users?username=U1",
791 {"Content-Type": "application/json"},
794 u1
= res
.json()[0]["_id"] if res
else None
796 data
= {"username": "U2", "password": "pw2"}
797 data
["project_role_mappings"] = [
798 {"project": p1
, "role": rpa
},
799 {"project": padmin
, "role": rsa
},
808 {"Location": "/admin/v1/users/", "Content-Type": "application/json"},
811 u2
= engine
.last_id
if res
else None
814 ftt
= "project_role_mappings"
815 xpr
= [{"project": p1
, "role": rpa
}, {"project": padmin
, "role": rpu
}]
818 "Edit user U1, delete P2 project",
820 "/admin/v1/users/" + u1
,
828 "Check user U1, contains the right projects",
830 "/admin/v1/users/" + u1
,
839 xpr
[0]["project_name"] = "P1"
840 xpr
[0]["role_name"] = "project_admin"
841 xpr
[1]["project_name"] = "Padmin"
842 xpr
[1]["role_name"] = "project_user"
848 if pr
not in rj
[ftt
]:
852 "User {} '{}' are different than expected '{}'. Edition was not done properly".format(
856 engine
.failed_tests
+= 1
858 p2
= None # To prevent deletion attempts
860 # Add a test of 'default project' for Keystone?
864 "Edit user U2, change password",
866 "/admin/v1/users/" + u2
,
868 {"password": "pw2_new"},
876 "Change to project P1 non existing",
888 "Change to user U2 project P1",
892 {"username": "U2", "password": "pw2_new", "project_id": "P1"},
899 engine
.set_header({"Authorization": "Bearer {}".format(rj
["id"])})
902 "Edit user projects non admin",
904 "/admin/v1/users/U1",
906 {"remove_project_role_mappings": [{"project": "P1", "role": None}]},
913 "Add new project non admin",
915 "/admin/v1/projects",
922 if res
is None or res
.status_code
== 201:
923 # The project has been created even though it shouldn't
927 "/admin/v1/projects/P2",
934 p2
= res
.json()["_id"] if res
else None
937 data
= {"username": "U3", "password": "pw3"}
938 data
["project_role_mappings"] = [{"project": p1
, "role": rpu
}]
940 "Add new user non admin",
949 if res
is None or res
.status_code
== 201:
950 # The user has been created even though it shouldn't
954 "/admin/v1/users/U3",
961 u3
= res
.json()["_id"] if res
else None
967 "Change to user U2 project Padmin",
972 "project_id": "Padmin"
973 }, # Caused a Keystone authentication error
974 # {"username": "U2", "password": "pw2_new", "project_id": "Padmin"},
982 {"Authorization": "Bearer {}".format(rj
["id"])}
986 "Add new project admin",
988 "/admin/v1/projects",
993 "Location": "/admin/v1/projects/",
994 "Content-Type": "application/json",
998 p3
= engine
.last_id
if res
else None
1001 data
= {"username": "U4", "password": "pw4"}
1002 data
["project_role_mappings"] = [
1003 {"project": p1
, "role": rpa
}
1006 "Add new user admin",
1013 "Location": "/admin/v1/users/",
1014 "Content-Type": "application/json",
1018 u4
= engine
.last_id
if res
else None
1024 "project_role_mappings": [{"project": p3
, "role": rpa
}]
1027 "Edit user projects admin",
1029 "/admin/v1/users/U4",
1036 # Project is deleted even though it shouldn't - PROVISIONAL?
1038 "Delete project P3 conflict",
1040 "/admin/v1/projects/" + p3
,
1047 if res
and res
.status_code
in (200, 204):
1051 "Delete project P3 forcing",
1053 "/admin/v1/projects/" + p3
+ "?FORCE=True",
1060 if res
and res
.status_code
in (200, 204):
1065 "Delete user U2. Conflict deleting own user",
1067 "/admin/v1/users/" + u2
,
1074 if res
is None or res
.status_code
in (200, 204):
1080 "/admin/v1/users/" + u4
,
1087 if res
and res
.status_code
in (200, 204):
1091 "Delete project P3",
1093 "/admin/v1/projects/" + p3
,
1100 if res
and res
.status_code
in (200, 204):
1107 "/admin/v1/users/" + u3
,
1118 engine
.remove_authorization() # To force get authorization
1119 engine
.get_autorization()
1124 "/admin/v1/users/" + u1
,
1135 "/admin/v1/users/" + u2
,
1146 "/admin/v1/users/" + u3
,
1157 "/admin/v1/users/" + u4
,
1166 "Delete project P1",
1168 "/admin/v1/projects/" + p1
,
1177 "Delete project P2",
1179 "/admin/v1/projects/" + p2
,
1188 "Delete project P3",
1190 "/admin/v1/projects/" + p3
,
1199 "Delete project Padmin",
1201 "/admin/v1/projects/" + padmin
,
1210 "Delete bad project",
1212 "/admin/v1/projects/" + pbad
,
1220 # BEGIN New Tests - Addressing Projects/Users by Name/ID
1224 "Create new project P1",
1226 "/admin/v1/projects",
1230 {"Location": "/admin/v1/projects/", "Content-Type": "application/json"},
1234 pid1
= res
.json()["id"]
1235 # print("# pid =", pid1)
1237 "Create new project P2",
1239 "/admin/v1/projects",
1243 {"Location": "/admin/v1/projects/", "Content-Type": "application/json"},
1247 pid2
= res
.json()["id"]
1248 # print("# pid =", pid2)
1249 data
= {"username": "U1", "password": "pw1"}
1250 data
["project_role_mappings"] = [{"project": pid1
, "role": rpu
}]
1252 "Create new user U1",
1258 {"Location": "/admin/v1/users/", "Content-Type": "application/json"},
1262 uid1
= res
.json()["id"]
1263 # print("# uid =", uid1)
1264 data
= {"username": "U2", "password": "pw2"}
1265 data
["project_role_mappings"] = [{"project": pid2
, "role": rpu
}]
1267 "Create new user U2",
1273 {"Location": "/admin/v1/users/", "Content-Type": "application/json"},
1277 uid2
= res
.json()["id"]
1278 # print("# uid =", uid2)
1281 "Get Project P1 by Name",
1283 "/admin/v1/projects/P1",
1291 "Get Project P1 by ID",
1293 "/admin/v1/projects/" + pid1
,
1302 "Get User U1 by Name",
1304 "/admin/v1/users/U1",
1312 "Get User U1 by ID",
1314 "/admin/v1/users/" + uid1
,
1323 "Rename Project P1 by Name",
1325 "/admin/v1/projects/P1",
1334 "Get Project P1 by new Name",
1336 "/admin/v1/projects/P3",
1345 "Rename Project P2 by ID",
1347 "/admin/v1/projects/" + pid2
,
1356 "Get Project P2 by new Name",
1358 "/admin/v1/projects/P4",
1368 "Rename User U1 by Name",
1370 "/admin/v1/users/U1",
1379 "Get User U1 by new Name",
1381 "/admin/v1/users/U3",
1391 "Rename User U2 by ID",
1393 "/admin/v1/users/" + uid2
,
1402 "Get User U2 by new Name",
1404 "/admin/v1/users/U4",
1413 "Delete User U1 by Name",
1415 "/admin/v1/users/U3",
1427 "Delete User U2 by ID",
1429 "/admin/v1/users/" + uid2
,
1441 "Delete Project P1 by Name",
1443 "/admin/v1/projects/P3",
1455 "Delete Project P2 by ID",
1457 "/admin/v1/projects/" + pid2
,
1467 # END New Tests - Addressing Projects/Users by Name
1472 "Delete Project P1",
1474 "/admin/v1/projects/" + pid1
,
1483 "Delete Project P2",
1485 "/admin/v1/projects/" + pid2
,
1496 "/admin/v1/users/" + uid1
,
1507 "/admin/v1/users/" + uid2
,
1515 engine
.remove_authorization() # To finish
1518 class TestProjectsDescriptors
:
1519 description
= "test descriptors visibility among projects"
1522 def run(engine
, test_osm
, manual_check
, test_params
=None):
1524 engine
.set_test_name("ProjectDescriptors")
1525 engine
.get_autorization()
1527 project_admin_id
= None
1529 "Get my project Padmin",
1531 "/admin/v1/projects/{}".format(engine
.project
),
1539 response
= res
.json()
1540 project_admin_id
= response
["_id"]
1542 "Create project Padmin",
1544 "/admin/v1/projects",
1546 {"name": "Padmin", "admin": True},
1548 {"Location": "/admin/v1/projects/", "Content-Type": "application/json"},
1552 "Create project P2",
1554 "/admin/v1/projects",
1558 {"Location": "/admin/v1/projects/", "Content-Type": "application/json"},
1562 "Create project P3",
1564 "/admin/v1/projects",
1568 {"Location": "/admin/v1/projects/", "Content-Type": "application/json"},
1580 "project_role_mappings": [
1581 {"project": "Padmin", "role": "system_admin"},
1582 {"project": "P2", "role": "project_admin"},
1583 {"project": "P3", "role": "project_admin"},
1587 {"Location": "/admin/v1/users/", "Content-Type": "application/json"},
1594 "/vnfpkgm/v1/vnf_packages_content?id=id1",
1596 TestDescriptors
.vnfd_empty
,
1598 r_headers_yaml_location_vnfd
,
1601 vnfd_ids
.append(engine
.last_id
)
1603 "Onboard VNFD id2 PUBLIC",
1605 "/vnfpkgm/v1/vnf_packages_content?id=id2&PUBLIC=TRUE",
1607 TestDescriptors
.vnfd_empty
,
1609 r_headers_yaml_location_vnfd
,
1612 vnfd_ids
.append(engine
.last_id
)
1616 "/vnfpkgm/v1/vnf_packages_content?id=id3&PUBLIC=FALSE",
1618 TestDescriptors
.vnfd_empty
,
1620 r_headers_yaml_location_vnfd
,
1623 vnfd_ids
.append(engine
.last_id
)
1626 "Get VNFD descriptors",
1628 "/vnfpkgm/v1/vnf_packages?id=id1,id2,id3",
1635 response
= res
.json()
1636 if len(response
) != 3:
1638 "Only 3 vnfds should be present for project admin. {} listed".format(
1642 engine
.failed_tests
+= 1
1644 # Change to other project Padmin
1646 "Change to user U1 project Padmin",
1650 {"username": "U1", "password": "pw1", "project_id": "Padmin"},
1656 response
= res
.json()
1657 engine
.set_header({"Authorization": "Bearer {}".format(response
["id"])})
1661 "List VNFD descriptors for Padmin",
1663 "/vnfpkgm/v1/vnf_packages",
1670 response
= res
.json()
1671 if len(response
) != 0:
1673 "Only 0 vnfds should be present for project Padmin. {} listed".format(
1677 engine
.failed_tests
+= 1
1681 "List VNFD public descriptors",
1683 "/vnfpkgm/v1/vnf_packages?PUBLIC=True",
1690 response
= res
.json()
1691 if len(response
) != 1:
1693 "Only 1 vnfds should be present for project Padmin. {} listed".format(
1697 engine
.failed_tests
+= 1
1699 # list vnfds belonging to project "admin"
1701 "List VNFD of admin project",
1703 "/vnfpkgm/v1/vnf_packages?ADMIN={}".format(project_admin_id
),
1711 response
= res
.json()
1712 if len(response
) != 3:
1714 "Only 3 vnfds should be present for project Padmin. {} listed".format(
1718 engine
.failed_tests
+= 1
1722 "Get VNFD public descriptors",
1724 "/vnfpkgm/v1/vnf_packages/{}".format(vnfd_ids
[1]),
1731 # Edit not owned vnfd
1735 "/vnfpkgm/v1/vnf_packages/{}".format(vnfd_ids
[0]),
1745 "Add VNFD id2 to my catalog",
1747 "/vnfpkgm/v1/vnf_packages/{}?SET_PROJECT".format(vnfd_ids
[1]),
1759 "/vnfpkgm/v1/vnf_packages_content?id=id4",
1761 TestDescriptors
.vnfd_empty
,
1763 r_headers_yaml_location_vnfd
,
1766 vnfd_ids
.append(engine
.last_id
)
1770 "List VNFD public descriptors",
1772 "/vnfpkgm/v1/vnf_packages",
1779 response
= res
.json()
1780 if len(response
) != 2:
1782 "Only 2 vnfds should be present for project Padmin. {} listed".format(
1786 engine
.failed_tests
+= 1
1790 "VNFDs have been omboarded. Perform manual check and press enter to resume"
1796 "/vnfpkgm/v1/vnf_packages/{}".format(vnfd_ids
[1]),
1804 # change to admin project
1805 engine
.remove_authorization() # To force get authorization
1806 engine
.get_autorization()
1810 "/vnfpkgm/v1/vnf_packages/{}".format(vnfd_ids
[0]),
1820 "/vnfpkgm/v1/vnf_packages/{}".format(vnfd_ids
[1]),
1830 "/vnfpkgm/v1/vnf_packages/{}".format(vnfd_ids
[2]),
1840 "/vnfpkgm/v1/vnf_packages/{}".format(vnfd_ids
[3]),
1850 "/vnfpkgm/v1/vnf_packages/{}?ADMIN".format(vnfd_ids
[3]),
1859 "Get VNFD deleted id1",
1861 "/vnfpkgm/v1/vnf_packages/{}?ADMIN".format(vnfd_ids
[0]),
1869 "Get VNFD deleted id2",
1871 "/vnfpkgm/v1/vnf_packages/{}?ADMIN".format(vnfd_ids
[1]),
1879 "Get VNFD deleted id3",
1881 "/vnfpkgm/v1/vnf_packages/{}?ADMIN".format(vnfd_ids
[2]),
1889 "Get VNFD deleted id4",
1891 "/vnfpkgm/v1/vnf_packages/{}?ADMIN".format(vnfd_ids
[3]),
1902 "/admin/v1/users/U1",
1910 "Delete project Padmin",
1912 "/admin/v1/projects/Padmin",
1920 "Delete project P2",
1922 "/admin/v1/projects/P2",
1930 "Delete project P3",
1932 "/admin/v1/projects/P3",
1942 description
= "Creates/edit/delete fake VIMs and SDN controllers"
1946 "schema_version": "1.0",
1947 "schema_type": "No idea",
1949 "description": "Descriptor name",
1950 "vim_type": "openstack",
1951 "vim_url": "http://localhost:/vim",
1952 "vim_tenant_name": "vimTenant",
1954 "vim_password": "password",
1955 "config": {"config_param": 1},
1959 "description": "sdn-description",
1960 "dpid": "50:50:52:54:00:94:21:21",
1961 "ip": "192.168.15.17",
1963 "type": "opendaylight",
1966 "password": "passwd",
1968 self
.port_mapping
= [
1970 "compute_node": "compute node 1",
1973 "pci": "0000:81:00.0",
1974 "switch_port": "port-2/1",
1975 "switch_mac": "52:54:00:94:21:21",
1978 "pci": "0000:81:00.1",
1979 "switch_port": "port-2/2",
1980 "switch_mac": "52:54:00:94:21:22",
1985 "compute_node": "compute node 2",
1988 "pci": "0000:81:00.0",
1989 "switch_port": "port-2/3",
1990 "switch_mac": "52:54:00:94:21:23",
1993 "pci": "0000:81:00.1",
1994 "switch_port": "port-2/4",
1995 "switch_mac": "52:54:00:94:21:24",
2001 def run(self
, engine
, test_osm
, manual_check
, test_params
=None):
2003 vim_bad
= self
.vim
.copy()
2006 engine
.set_test_name("FakeVim")
2007 engine
.get_autorization()
2011 "/admin/v1/vim_accounts",
2015 {"Location": "/admin/v1/vim_accounts/", "Content-Type": "application/json"},
2018 vim_id
= engine
.last_id
2020 "Create VIM without name, bad schema",
2022 "/admin/v1/vim_accounts",
2030 "Create VIM name repeated",
2032 "/admin/v1/vim_accounts",
2042 "/admin/v1/vim_accounts",
2052 "/admin/v1/vim_accounts/{}".format(vim_id
),
2064 "/admin/v1/vim_accounts/{}?FORCE=True".format(vim_id
),
2072 "Check VIM is deleted",
2074 "/admin/v1/vim_accounts/{}".format(vim_id
),
2082 # delete and wait until is really deleted
2086 "/admin/v1/vim_accounts/{}".format(vim_id
),
2093 engine
.wait_until_delete(
2094 "/admin/v1/vim_accounts/{}".format(vim_id
), timeout
2098 class TestVIMSDN(TestFakeVim
):
2099 description
= "Creates VIM with SDN editing SDN controllers and port_mapping"
2102 TestFakeVim
.__init
__(self
)
2104 "schema_version": "1.0",
2105 "schema_type": "No idea",
2107 "description": "Descriptor name",
2109 "wim_url": "http://localhost:/wim",
2111 "password": "password",
2112 "config": {"config_param": 1},
2115 def run(self
, engine
, test_osm
, manual_check
, test_params
=None):
2116 engine
.set_test_name("VimSdn")
2117 engine
.get_autorization()
2126 {"Location": "/admin/v1/sdns/", "Content-Type": "application/json"},
2129 sdnc_id
= engine
.last_id
2135 "/admin/v1/sdns/{}".format(sdnc_id
),
2137 {"name": "new_sdn_name"},
2144 self
.vim
["config"]["sdn-controller"] = sdnc_id
2145 self
.vim
["config"]["sdn-port-mapping"] = self
.port_mapping
2149 "/admin/v1/vim_accounts",
2153 {"Location": "/admin/v1/vim_accounts/", "Content-Type": "application/json"},
2157 vim_id
= engine
.last_id
2158 self
.port_mapping
[0]["compute_node"] = "compute node XX"
2160 "Edit VIM change port-mapping",
2162 "/admin/v1/vim_accounts/{}".format(vim_id
),
2164 {"config": {"sdn-port-mapping": self
.port_mapping
}},
2170 "Edit VIM remove port-mapping",
2172 "/admin/v1/vim_accounts/{}".format(vim_id
),
2174 {"config": {"sdn-port-mapping": None}},
2183 "/admin/v1/wim_accounts",
2187 {"Location": "/admin/v1/wim_accounts/", "Content-Type": "application/json"},
2190 wim_id
= engine
.last_id
2195 "Delete VIM remove port-mapping",
2197 "/admin/v1/vim_accounts/{}?FORCE=True".format(vim_id
),
2207 "/admin/v1/sdns/{}?FORCE=True".format(sdnc_id
),
2218 "/admin/v1/wim_accounts/{}?FORCE=True".format(wim_id
),
2226 "Check VIM is deleted",
2228 "/admin/v1/vim_accounts/{}".format(vim_id
),
2236 "Check SDN is deleted",
2238 "/admin/v1/sdns/{}".format(sdnc_id
),
2246 "Check WIM is deleted",
2248 "/admin/v1/wim_accounts/{}".format(wim_id
),
2258 "VIM, SDN, WIM has been deployed. Perform manual check and press enter to resume"
2260 # delete and wait until is really deleted
2262 "Delete VIM remove port-mapping",
2264 "/admin/v1/vim_accounts/{}".format(vim_id
),
2274 "/admin/v1/sdns/{}".format(sdnc_id
),
2284 "/admin/v1/wim_accounts/{}".format(wim_id
),
2291 engine
.wait_until_delete(
2292 "/admin/v1/vim_accounts/{}".format(vim_id
), timeout
2294 engine
.wait_until_delete("/admin/v1/sdns/{}".format(sdnc_id
), timeout
)
2295 engine
.wait_until_delete(
2296 "/admin/v1/wim_accounts/{}".format(wim_id
), timeout
2301 description
= "Base class for downloading descriptors from ETSI, onboard and deploy in real VIM"
2304 self
.test_name
= "DEPLOY"
2309 self
.descriptor_url
= (
2310 "https://osm-download.etsi.org/ftp/osm-3.0-three/2nd-hackfest/packages/"
2312 self
.vnfd_filenames
= ("cirros_vnf.tar.gz",)
2313 self
.nsd_filename
= "cirros_2vnf_ns.tar.gz"
2314 self
.descriptor_edit
= None
2315 self
.uses_configuration
= False
2322 self
.ns_params
= None
2323 self
.vnfr_ip_list
= {}
2325 def create_descriptors(self
, engine
):
2326 temp_dir
= os
.path
.dirname(os
.path
.abspath(__file__
)) + "/temp/"
2327 if not os
.path
.exists(temp_dir
):
2328 os
.makedirs(temp_dir
)
2329 for vnfd_index
, vnfd_filename
in enumerate(self
.vnfd_filenames
):
2330 if "/" in vnfd_filename
:
2331 vnfd_filename_path
= vnfd_filename
2332 if not os
.path
.exists(vnfd_filename_path
):
2333 raise TestException(
2334 "File '{}' does not exist".format(vnfd_filename_path
)
2337 vnfd_filename_path
= temp_dir
+ vnfd_filename
2338 if not os
.path
.exists(vnfd_filename_path
):
2339 with
open(vnfd_filename_path
, "wb") as file:
2340 response
= requests
.get(self
.descriptor_url
+ vnfd_filename
)
2341 if response
.status_code
>= 300:
2342 raise TestException(
2343 "Error downloading descriptor from '{}': {}".format(
2344 self
.descriptor_url
+ vnfd_filename
,
2345 response
.status_code
,
2348 file.write(response
.content
)
2349 if vnfd_filename_path
.endswith(".yaml"):
2350 headers
= headers_yaml
2352 headers
= headers_zip_yaml
2353 if randint(0, 1) == 0:
2354 # vnfd CREATE AND UPLOAD in one step:
2356 "Onboard VNFD in one step",
2358 "/vnfpkgm/v1/vnf_packages_content" + self
.qforce
,
2360 "@b" + vnfd_filename_path
,
2362 r_headers_yaml_location_vnfd
,
2365 self
.vnfds_id
.append(engine
.last_id
)
2367 # vnfd CREATE AND UPLOAD ZIP
2369 "Onboard VNFD step 1",
2371 "/vnfpkgm/v1/vnf_packages",
2376 "Location": "/vnfpkgm/v1/vnf_packages/",
2377 "Content-Type": "application/json",
2381 self
.vnfds_id
.append(engine
.last_id
)
2383 "Onboard VNFD step 2 as ZIP",
2385 "/vnfpkgm/v1/vnf_packages/<>/package_content" + self
.qforce
,
2387 "@b" + vnfd_filename_path
,
2393 if self
.descriptor_edit
:
2394 if "vnfd{}".format(vnfd_index
) in self
.descriptor_edit
:
2399 "/vnfpkgm/v1/vnf_packages/{}".format(self
.vnfds_id
[-1]),
2401 self
.descriptor_edit
["vnfd{}".format(vnfd_index
)],
2407 if "/" in self
.nsd_filename
:
2408 nsd_filename_path
= self
.nsd_filename
2409 if not os
.path
.exists(nsd_filename_path
):
2410 raise TestException(
2411 "File '{}' does not exist".format(nsd_filename_path
)
2414 nsd_filename_path
= temp_dir
+ self
.nsd_filename
2415 if not os
.path
.exists(nsd_filename_path
):
2416 with
open(nsd_filename_path
, "wb") as file:
2417 response
= requests
.get(self
.descriptor_url
+ self
.nsd_filename
)
2418 if response
.status_code
>= 300:
2419 raise TestException(
2420 "Error downloading descriptor from '{}': {}".format(
2421 self
.descriptor_url
+ self
.nsd_filename
,
2422 response
.status_code
,
2425 file.write(response
.content
)
2426 if nsd_filename_path
.endswith(".yaml"):
2427 headers
= headers_yaml
2429 headers
= headers_zip_yaml
2431 if randint(0, 1) == 0:
2432 # nsd CREATE AND UPLOAD in one step:
2434 "Onboard NSD in one step",
2436 "/nsd/v1/ns_descriptors_content" + self
.qforce
,
2438 "@b" + nsd_filename_path
,
2440 r_headers_yaml_location_nsd
,
2443 self
.nsd_id
= engine
.last_id
2445 # nsd CREATE AND UPLOAD ZIP
2447 "Onboard NSD step 1",
2449 "/nsd/v1/ns_descriptors",
2454 "Location": "/nsd/v1/ns_descriptors/",
2455 "Content-Type": "application/json",
2459 self
.nsd_id
= engine
.last_id
2461 "Onboard NSD step 2 as ZIP",
2463 "/nsd/v1/ns_descriptors/<>/nsd_content" + self
.qforce
,
2465 "@b" + nsd_filename_path
,
2471 if self
.descriptor_edit
and "nsd" in self
.descriptor_edit
:
2476 "/nsd/v1/ns_descriptors/{}".format(self
.nsd_id
),
2478 self
.descriptor_edit
["nsd"],
2484 def delete_descriptors(self
, engine
):
2485 # delete descriptors
2487 "Delete NSSD SOL005",
2489 "/nsd/v1/ns_descriptors/{}".format(self
.nsd_id
),
2496 for vnfd_id
in self
.vnfds_id
:
2498 "Delete VNFD SOL005",
2500 "/vnfpkgm/v1/vnf_packages/{}".format(vnfd_id
),
2508 def instantiate(self
, engine
, ns_data
):
2509 ns_data_text
= yaml
.safe_dump(ns_data
, default_flow_style
=True, width
=256)
2510 # create NS Two steps
2514 "/nslcm/v1/ns_instances",
2518 {"Location": "nslcm/v1/ns_instances/", "Content-Type": "application/yaml"},
2523 self
.ns_id
= engine
.last_id
2525 "Instantiate NS step 2",
2527 "/nslcm/v1/ns_instances/{}/instantiate".format(self
.ns_id
),
2531 r_headers_yaml_location_nslcmop
,
2534 nslcmop_id
= engine
.last_id
2537 # Wait until status is Ok
2538 timeout
= timeout_configure
if self
.uses_configuration
else timeout_deploy
2539 engine
.wait_operation_ready("ns", nslcmop_id
, timeout
)
2541 def terminate(self
, engine
):
2547 "/nslcm/v1/ns_instances/{}/terminate".format(self
.ns_id
),
2551 r_headers_yaml_location_nslcmop
,
2554 nslcmop2_id
= engine
.last_id
2555 # Wait until status is Ok
2556 engine
.wait_operation_ready("ns", nslcmop2_id
, timeout_deploy
)
2561 "/nslcm/v1/ns_instances/{}".format(self
.ns_id
),
2570 "Delete NS with FORCE",
2572 "/nslcm/v1/ns_instances/{}?FORCE=True".format(self
.ns_id
),
2580 # check all it is deleted
2582 "Check NS is deleted",
2584 "/nslcm/v1/ns_instances/{}".format(self
.ns_id
),
2592 "Check NSLCMOPs are deleted",
2594 "/nslcm/v1/ns_lcm_op_occs?nsInstanceId={}".format(self
.ns_id
),
2604 if not isinstance(nslcmops
, list) or nslcmops
:
2605 raise TestException(
2606 "NS {} deleted but with ns_lcm_op_occ active: {}".format(
2607 self
.ns_id
, nslcmops
2625 "/nslcm/v1/ns_instances/{}".format(self
.ns_id
),
2636 vnfr_list
= ns_data
["constituent-vnfr-ref"]
2638 _commands
= commands
if commands
is not None else self
.commands
2639 _users
= users
if users
is not None else self
.users
2640 _passwds
= passwds
if passwds
is not None else self
.passwords
2641 _keys
= keys
if keys
is not None else self
.keys
2642 _timeout
= timeout
if timeout
!= 0 else self
.timeout
2644 # vnfr_list=[d8272263-6bd3-4680-84ca-6a4be23b3f2d, 88b22e2f-994a-4b61-94fd-4a3c90de3dc4]
2645 for vnfr_id
in vnfr_list
:
2647 "Get VNFR to get IP_ADDRESS",
2649 "/nslcm/v1/vnfrs/{}".format(vnfr_id
),
2658 vnfr_data
= r
.json()
2660 vnf_index
= str(vnfr_data
["member-vnf-index-ref"])
2662 ip_address
= self
.get_vnfr_ip(engine
, vnf_index
)
2663 description
= "Exec command='{}' at VNFR={} IP={}".format(
2664 _commands
.get(vnf_index
)[0], vnf_index
, ip_address
2667 test_description
= "{}{} {}".format(
2668 engine
.test_name
, engine
.step
, description
2670 logger
.warning(test_description
)
2671 while _timeout
>= time
:
2672 result
, message
= self
.do_checks(
2674 vnf_index
=vnfr_data
["member-vnf-index-ref"],
2675 commands
=_commands
.get(vnf_index
),
2676 user
=_users
.get(vnf_index
),
2677 passwd
=_passwds
.get(vnf_index
),
2678 key
=_keys
.get(vnf_index
),
2681 engine
.passed_tests
+= 1
2682 logger
.debug(message
)
2688 engine
.failed_tests
+= 1
2689 logger
.error(message
)
2693 engine
.failed_tests
+= 1
2694 logger
.error(message
)
2696 engine
.failed_tests
+= 1
2698 "VNFR {} has not mgmt address. Check failed".format(vnf_index
)
2701 def do_checks(self
, ip
, vnf_index
, commands
=[], user
=None, passwd
=None, key
=None):
2704 from pssh
.clients
import ParallelSSHClient
2705 from pssh
.utils
import load_private_key
2706 from ssh2
import exceptions
as ssh2Exception
2707 except ImportError as e
:
2709 "Package <pssh> or/and <urllib3> is not installed. Please add them with 'pip3 install "
2710 "parallel-ssh urllib3': {}".format(e
)
2712 return -1, "install needed packages 'pip3 install parallel-ssh urllib3'"
2713 urllib3
.disable_warnings(urllib3
.exceptions
.InsecureRequestWarning
)
2715 p_host
= os
.environ
.get("PROXY_HOST")
2716 p_user
= os
.environ
.get("PROXY_USER")
2717 p_password
= os
.environ
.get("PROXY_PASSWD")
2720 pkey
= load_private_key(key
)
2724 client
= ParallelSSHClient(
2731 proxy_password
=p_password
,
2735 for cmd
in commands
:
2736 output
= client
.run_command(cmd
)
2738 if output
[ip
[0]].exit_code
:
2739 return -1, "VNFR {} command '{}' returns error: '{}'".format(
2740 ip
[0], cmd
, "\n".join(output
[ip
[0]].stderr
)
2743 return 1, "VNFR {} command '{}' successful".format(ip
[0], cmd
)
2745 ssh2Exception
.ChannelFailure
,
2746 ssh2Exception
.SocketDisconnectError
,
2747 ssh2Exception
.SocketTimeout
,
2748 ssh2Exception
.SocketRecvError
,
2750 return 0, "Timeout accessing the VNFR {}: {}".format(ip
[0], str(e
))
2751 except Exception as e
:
2752 return -1, "ERROR checking the VNFR {}: {}".format(ip
[0], str(e
))
2754 def additional_operations(self
, engine
, test_osm
, manual_check
):
2757 def run(self
, engine
, test_osm
, manual_check
, test_params
=None):
2758 engine
.set_test_name(self
.test_name
)
2759 engine
.get_autorization()
2760 nsname
= os
.environ
.get("OSMNBITEST_NS_NAME", "OSMNBITEST")
2762 if "vnfd-files" in test_params
:
2763 self
.vnfd_filenames
= test_params
["vnfd-files"].split(",")
2764 if "nsd-file" in test_params
:
2765 self
.nsd_filename
= test_params
["nsd-file"]
2766 if test_params
.get("ns-name"):
2767 nsname
= test_params
["ns-name"]
2768 self
.create_descriptors(engine
)
2770 # create real VIM if not exist
2771 self
.vim_id
= engine
.get_create_vim(test_osm
)
2773 "nsDescription": "default description",
2775 "nsdId": self
.nsd_id
,
2776 "vimAccountId": self
.vim_id
,
2779 ns_data
.update(self
.ns_params
)
2780 if test_params
and test_params
.get("ns-config"):
2781 if isinstance(test_params
["ns-config"], str):
2782 ns_data
.update(yaml
.load(test_params
["ns-config"]), Loader
=yaml
.Loader
)
2784 ns_data
.update(test_params
["ns-config"])
2785 self
.instantiate(engine
, ns_data
)
2789 "NS has been deployed. Perform manual check and press enter to resume"
2791 if test_osm
and self
.commands
:
2792 self
.test_ns(engine
, test_osm
)
2793 self
.additional_operations(engine
, test_osm
, manual_check
)
2794 self
.terminate(engine
)
2795 self
.delete_descriptors(engine
)
2797 def get_first_ip(self
, ip_string
):
2798 # When using a floating IP, the vnfr_data['ip-address'] contains a semicolon-separated list of IP:s.
2799 first_ip
= ip_string
.split(";")[0] if ip_string
else ""
2802 def get_vnfr_ip(self
, engine
, vnfr_index_wanted
):
2803 # If the IP address list has been obtained before, it has been stored in 'vnfr_ip_list'
2804 ip
= self
.vnfr_ip_list
.get(vnfr_index_wanted
, "")
2806 return self
.get_first_ip(ip
)
2808 "Get VNFR to get IP_ADDRESS",
2810 "/nslcm/v1/vnfrs?member-vnf-index-ref={}&nsr-id-ref={}".format(
2811 vnfr_index_wanted
, self
.ns_id
2821 vnfr_data
= r
.json()
2822 if not (vnfr_data
and vnfr_data
[0]):
2824 # Store the IP (or list of IPs) in 'vnfr_ip_list'
2825 ip_list
= vnfr_data
[0].get("ip-address", "")
2827 self
.vnfr_ip_list
[vnfr_index_wanted
] = ip_list
2828 ip
= self
.get_first_ip(ip_list
)
2832 class TestDeployHackfestCirros(TestDeploy
):
2833 description
= "Load and deploy Hackfest cirros_2vnf_ns example"
2837 self
.test_name
= "CIRROS"
2838 self
.vnfd_filenames
= ("cirros_vnf.tar.gz",)
2839 self
.nsd_filename
= "cirros_2vnf_ns.tar.gz"
2848 self
.users
= {"1": "cirros", "2": "cirros"}
2849 self
.passwords
= {"1": "cubswin:)", "2": "cubswin:)"}
2851 def terminate(self
, engine
):
2852 # Make a delete in one step, overriding the normal two step of TestDeploy that launched terminate and delete
2855 "Terminate and delete NS in one step",
2857 "/nslcm/v1/ns_instances_content/{}".format(self
.ns_id
),
2865 engine
.wait_until_delete(
2866 "/nslcm/v1/ns_instances/{}".format(self
.ns_id
), timeout_deploy
2870 "Delete NS with FORCE",
2872 "/nslcm/v1/ns_instances/{}?FORCE=True".format(self
.ns_id
),
2880 # check all it is deleted
2882 "Check NS is deleted",
2884 "/nslcm/v1/ns_instances/{}".format(self
.ns_id
),
2892 "Check NSLCMOPs are deleted",
2894 "/nslcm/v1/ns_lcm_op_occs?nsInstanceId={}".format(self
.ns_id
),
2904 if not isinstance(nslcmops
, list) or nslcmops
:
2905 raise TestException(
2906 "NS {} deleted but with ns_lcm_op_occ active: {}".format(
2907 self
.ns_id
, nslcmops
2912 class TestDeployHackfest1(TestDeploy
):
2913 description
= "Load and deploy Hackfest_1_vnfd example"
2917 self
.test_name
= "HACKFEST1-"
2918 self
.vnfd_filenames
= ("hackfest_1_vnfd.tar.gz",)
2919 self
.nsd_filename
= "hackfest_1_nsd.tar.gz"
2920 # self.commands = {'1': ['ls -lrt', ], '2': ['ls -lrt', ]}
2921 # self.users = {'1': "cirros", '2': "cirros"}
2922 # self.passwords = {'1': "cubswin:)", '2': "cubswin:)"}
2925 class TestDeployHackfestCirrosScaling(TestDeploy
):
2927 "Load and deploy Hackfest cirros_2vnf_ns example with scaling modifications"
2932 self
.test_name
= "CIRROS-SCALE"
2933 self
.vnfd_filenames
= ("cirros_vnf.tar.gz",)
2934 self
.nsd_filename
= "cirros_2vnf_ns.tar.gz"
2935 # Modify VNFD to add scaling and count=2
2936 self
.descriptor_edit
= {
2938 "vdu": {"$id: 'cirros_vnfd-VM'": {"count": 2}},
2939 "scaling-group-descriptor": [
2941 "name": "scale_cirros",
2942 "max-instance-count": 2,
2943 "vdu": [{"vdu-id-ref": "cirros_vnfd-VM", "count": 2}],
2949 def additional_operations(self
, engine
, test_osm
, manual_check
):
2952 # 2 perform scale out twice
2954 "{scaleType: SCALE_VNF, scaleVnfData: {scaleVnfType: SCALE_OUT, scaleByStepData: "
2955 '{scaling-group-descriptor: scale_cirros, member-vnf-index: "1"}}}'
2957 for i
in range(0, 2):
2959 "Execute scale action over NS",
2961 "/nslcm/v1/ns_instances/{}/scale".format(self
.ns_id
),
2965 r_headers_yaml_location_nslcmop
,
2968 nslcmop2_scale_out
= engine
.last_id
2969 engine
.wait_operation_ready("ns", nslcmop2_scale_out
, timeout_deploy
)
2971 input("NS scale out done. Check that two more vdus are there")
2972 # TODO check automatic
2974 # 2 perform scale in
2976 "{scaleType: SCALE_VNF, scaleVnfData: {scaleVnfType: SCALE_IN, scaleByStepData: "
2977 '{scaling-group-descriptor: scale_cirros, member-vnf-index: "1"}}}'
2979 for i
in range(0, 2):
2981 "Execute scale IN action over NS",
2983 "/nslcm/v1/ns_instances/{}/scale".format(self
.ns_id
),
2987 r_headers_yaml_location_nslcmop
,
2990 nslcmop2_scale_in
= engine
.last_id
2991 engine
.wait_operation_ready("ns", nslcmop2_scale_in
, timeout_deploy
)
2993 input("NS scale in done. Check that two less vdus are there")
2994 # TODO check automatic
2996 # perform scale in that must fail as reached limit
2998 "Execute scale IN out of limit action over NS",
3000 "/nslcm/v1/ns_instances/{}/scale".format(self
.ns_id
),
3004 r_headers_yaml_location_nslcmop
,
3007 nslcmop2_scale_in
= engine
.last_id
3008 engine
.wait_operation_ready(
3009 "ns", nslcmop2_scale_in
, timeout_deploy
, expected_fail
=True
3013 class TestDeployIpMac(TestDeploy
):
3014 description
= "Load and deploy descriptor examples setting mac, ip address at descriptor and instantiate params"
3018 self
.test_name
= "SetIpMac"
3019 self
.vnfd_filenames
= (
3020 "vnfd_2vdu_set_ip_mac2.yaml",
3021 "vnfd_2vdu_set_ip_mac.yaml",
3023 self
.nsd_filename
= "scenario_2vdu_set_ip_mac.yaml"
3024 self
.descriptor_url
= "https://osm.etsi.org/gitweb/?p=osm/RO.git;a=blob_plain;f=test/RO_tests/v3_2vdu_set_ip_mac/"
3033 self
.users
= {"1": "osm", "2": "osm"}
3034 self
.passwords
= {"1": "osm4u", "2": "osm4u"}
3037 def run(self
, engine
, test_osm
, manual_check
, test_params
=None):
3038 # super().run(engine, test_osm, manual_check, test_params)
3039 # run again setting IPs with instantiate parameters
3040 instantiation_params
= {
3043 "member-vnf-index": "1",
3046 "name": "internal_vld1", # net_internal
3048 "ip-version": "ipv4",
3049 "subnet-address": "10.9.8.0/24",
3052 "start-address": "10.9.8.100",
3055 "internal-connection-point": [
3058 "ip-address": "10.9.8.2",
3062 "ip-address": "10.9.8.3",
3072 # "name": "iface11",
3073 # "floating-ip-required": True,
3075 {"name": "iface13", "mac-address": "52:33:44:55:66:13"},
3083 "ip-address": "10.31.31.22",
3084 "mac-address": "52:33:44:55:66:21",
3097 test_params
={"ns-config": instantiation_params
},
3101 class TestDeployHackfest4(TestDeploy
):
3102 description
= "Load and deploy Hackfest 4 example."
3106 self
.test_name
= "HACKFEST4-"
3107 self
.vnfd_filenames
= ("hackfest_4_vnfd.tar.gz",)
3108 self
.nsd_filename
= "hackfest_4_nsd.tar.gz"
3109 self
.uses_configuration
= True
3118 self
.users
= {"1": "ubuntu", "2": "ubuntu"}
3119 self
.passwords
= {"1": "osm4u", "2": "osm4u"}
3120 # Modify VNFD to add scaling
3121 # self.descriptor_edit = {
3123 # 'vnf-configuration': {
3124 # 'config-primitive': [{
3127 # 'name': 'filename',
3128 # 'data-type': 'STRING',
3129 # 'default-value': '/home/ubuntu/touched'
3133 # 'scaling-group-descriptor': [{
3134 # 'name': 'scale_dataVM',
3135 # 'scaling-policy': [{
3136 # 'threshold-time': 0,
3137 # 'name': 'auto_cpu_util_above_threshold',
3138 # 'scaling-type': 'automatic',
3139 # 'scaling-criteria': [{
3140 # 'name': 'cpu_util_above_threshold',
3141 # 'vnf-monitoring-param-ref': 'all_aaa_cpu_util',
3142 # 'scale-out-relational-operation': 'GE',
3143 # 'scale-in-threshold': 15,
3144 # 'scale-out-threshold': 60,
3145 # 'scale-in-relational-operation': 'LE'
3147 # 'cooldown-time': 60
3149 # 'max-instance-count': 10,
3150 # 'scaling-config-action': [
3151 # {'vnf-config-primitive-name-ref': 'touch',
3152 # 'trigger': 'post-scale-out'},
3153 # {'vnf-config-primitive-name-ref': 'touch',
3154 # 'trigger': 'pre-scale-in'}
3157 # 'vdu-id-ref': 'dataVM',
3165 class TestDeployHackfest3Charmed(TestDeploy
):
3166 description
= "Load and deploy Hackfest 3charmed_ns example"
3170 self
.test_name
= "HACKFEST3-"
3171 self
.vnfd_filenames
= ("hackfest_3charmed_vnfd.tar.gz",)
3172 self
.nsd_filename
= "hackfest_3charmed_nsd.tar.gz"
3173 self
.uses_configuration
= True
3175 "1": ["ls -lrt /home/ubuntu/first-touch"],
3176 "2": ["ls -lrt /home/ubuntu/first-touch"],
3178 self
.users
= {"1": "ubuntu", "2": "ubuntu"}
3179 self
.passwords
= {"1": "osm4u", "2": "osm4u"}
3180 self
.descriptor_edit
= {
3181 "vnfd0": yaml
.safe_load(
3184 terminate-config-primitive:
3189 value: '/home/ubuntu/last-touch1'
3194 value: '/home/ubuntu/last-touch3'
3199 value: '/home/ubuntu/last-touch2'
3204 def additional_operations(self
, engine
, test_osm
, manual_check
):
3208 vnfr_index_selected
= "2"
3209 payload
= '{member_vnf_index: "2", primitive: touch, primitive_params: { filename: /home/ubuntu/OSMTESTNBI }}'
3211 "Exec service primitive over NS",
3213 "/nslcm/v1/ns_instances/{}/action".format(self
.ns_id
),
3217 r_headers_yaml_location_nslcmop
,
3220 nslcmop2_action
= engine
.last_id
3221 # Wait until status is Ok
3222 engine
.wait_operation_ready("ns", nslcmop2_action
, timeout_deploy
)
3223 vnfr_ip
= self
.get_vnfr_ip(engine
, vnfr_index_selected
)
3226 "NS service primitive has been executed."
3227 "Check that file /home/ubuntu/OSMTESTNBI is present at {}".format(
3235 "ls -lrt /home/ubuntu/OSMTESTNBI",
3238 self
.test_ns(engine
, test_osm
, commands
=commands
)
3240 # # 2 perform scale out
3241 # payload = '{scaleType: SCALE_VNF, scaleVnfData: {scaleVnfType: SCALE_OUT, scaleByStepData: ' \
3242 # '{scaling-group-descriptor: scale_dataVM, member-vnf-index: "1"}}}'
3243 # engine.test("Execute scale action over NS", "POST",
3244 # "/nslcm/v1/ns_instances/{}/scale".format(self.ns_id), headers_yaml, payload,
3245 # (201, 202), r_headers_yaml_location_nslcmop, "yaml")
3246 # nslcmop2_scale_out = engine.last_id
3247 # engine.wait_operation_ready("ns", nslcmop2_scale_out, timeout_deploy)
3249 # input('NS scale out done. Check that file /home/ubuntu/touched is present and new VM is created')
3250 # # TODO check automatic
3252 # # 2 perform scale in
3253 # payload = '{scaleType: SCALE_VNF, scaleVnfData: {scaleVnfType: SCALE_IN, scaleByStepData: ' \
3254 # '{scaling-group-descriptor: scale_dataVM, member-vnf-index: "1"}}}'
3255 # engine.test("Execute scale action over NS", "POST",
3256 # "/nslcm/v1/ns_instances/{}/scale".format(self.ns_id), headers_yaml, payload,
3257 # (201, 202), r_headers_yaml_location_nslcmop, "yaml")
3258 # nslcmop2_scale_in = engine.last_id
3259 # engine.wait_operation_ready("ns", nslcmop2_scale_in, timeout_deploy)
3261 # input('NS scale in done. Check that file /home/ubuntu/touched is updated and new VM is deleted')
3262 # # TODO check automatic
3265 class TestDeployHackfest3Charmed2(TestDeployHackfest3Charmed
):
3267 "Load and deploy Hackfest 3charmed_ns example modified version of descriptors to have dots in "
3268 "ids and member-vnf-index."
3273 self
.test_name
= "HACKFEST3v2-"
3274 self
.qforce
= "?FORCE=True"
3275 self
.descriptor_edit
= {
3280 "$[0]": {"external-connection-point-ref": "pdu-mgmt"}
3285 "vnf-configuration": None,
3286 "connection-point": {
3290 "short-name": "pdu-mgmt",
3294 "mgmt-interface": {"cp": "pdu-mgmt"},
3295 "description": "A vnf single vdu to be used as PDU",
3299 "id": "pdu_internal",
3300 "name": "pdu_internal",
3301 "internal-connection-point": {"$[1]": None},
3302 "short-name": "pdu_internal",
3307 # Modify NSD accordingly
3309 "constituent-vnfd": {
3310 "$[0]": {"vnfd-id-ref": "vdu-as-pdu"},
3313 "description": "A nsd to deploy the vnf to act as as PDU",
3315 "name": "nsd-as-pdu",
3316 "short-name": "nsd-as-pdu",
3321 "short-name": "mgmt_pdu",
3322 "vnfd-connection-point-ref": {
3324 "vnfd-connection-point-ref": "pdu-mgmt",
3325 "vnfd-id-ref": "vdu-as-pdu",
3337 class TestDeployHackfest3Charmed3(TestDeployHackfest3Charmed
):
3338 description
= "Load and deploy Hackfest 3charmed_ns example modified version to test scaling and NS parameters"
3342 self
.test_name
= "HACKFEST3v3-"
3344 "1": ["ls -lrt /home/ubuntu/first-touch-1"],
3345 "2": ["ls -lrt /home/ubuntu/first-touch-2"],
3347 self
.descriptor_edit
= {
3350 scaling-group-descriptor:
3351 - name: "scale_dataVM"
3352 max-instance-count: 10
3354 - name: "auto_cpu_util_above_threshold"
3355 scaling-type: "automatic"
3359 - name: "cpu_util_above_threshold"
3360 scale-in-threshold: 15
3361 scale-in-relational-operation: "LE"
3362 scale-out-threshold: 60
3363 scale-out-relational-operation: "GE"
3364 vnf-monitoring-param-ref: "monitor1"
3366 - vdu-id-ref: dataVM
3368 scaling-config-action:
3369 - trigger: post-scale-out
3370 vnf-config-primitive-name-ref: touch
3371 - trigger: pre-scale-in
3372 vnf-config-primitive-name-ref: touch
3376 - id: "dataVM_cpu_util"
3377 nfvi-metric: "cpu_utilization"
3382 aggregation-type: AVERAGE
3383 vdu-monitoring-param:
3385 vdu-monitoring-param-ref: "dataVM_cpu_util"
3387 initial-config-primitive:
3391 value: "<touch_filename>" # default-value: /home/ubuntu/first-touch
3396 default-value: "<touch_filename2>"
3402 "additionalParamsForVnf": [
3404 "member-vnf-index": "1",
3405 "additionalParams": {
3406 "touch_filename": "/home/ubuntu/first-touch-1",
3407 "touch_filename2": "/home/ubuntu/second-touch-1",
3411 "member-vnf-index": "2",
3412 "additionalParams": {
3413 "touch_filename": "/home/ubuntu/first-touch-2",
3414 "touch_filename2": "/home/ubuntu/second-touch-2",
3420 def additional_operations(self
, engine
, test_osm
, manual_check
):
3421 super().additional_operations(engine
, test_osm
, manual_check
)
3425 # 2 perform scale out
3427 "{scaleType: SCALE_VNF, scaleVnfData: {scaleVnfType: SCALE_OUT, scaleByStepData: "
3428 '{scaling-group-descriptor: scale_dataVM, member-vnf-index: "1"}}}'
3431 "Execute scale action over NS",
3433 "/nslcm/v1/ns_instances/{}/scale".format(self
.ns_id
),
3437 r_headers_yaml_location_nslcmop
,
3440 nslcmop2_scale_out
= engine
.last_id
3441 engine
.wait_operation_ready("ns", nslcmop2_scale_out
, timeout_deploy
)
3444 "NS scale out done. Check that file /home/ubuntu/second-touch-1 is present and new VM is created"
3449 "ls -lrt /home/ubuntu/second-touch-1",
3452 self
.test_ns(engine
, test_osm
, commands
=commands
)
3453 # TODO check automatic connection to scaled VM
3455 # 2 perform scale in
3457 "{scaleType: SCALE_VNF, scaleVnfData: {scaleVnfType: SCALE_IN, scaleByStepData: "
3458 '{scaling-group-descriptor: scale_dataVM, member-vnf-index: "1"}}}'
3461 "Execute scale action over NS",
3463 "/nslcm/v1/ns_instances/{}/scale".format(self
.ns_id
),
3467 r_headers_yaml_location_nslcmop
,
3470 nslcmop2_scale_in
= engine
.last_id
3471 engine
.wait_operation_ready("ns", nslcmop2_scale_in
, timeout_deploy
)
3474 "NS scale in done. Check that file /home/ubuntu/second-touch-1 is updated and new VM is deleted"
3476 # TODO check automatic
3479 class TestDeploySimpleCharm(TestDeploy
):
3480 description
= "Deploy hackfest-4 hackfest_simplecharm example"
3484 self
.test_name
= "HACKFEST-SIMPLE"
3485 self
.descriptor_url
= (
3486 "https://osm-download.etsi.org/ftp/osm-4.0-four/4th-hackfest/packages/"
3488 self
.vnfd_filenames
= ("hackfest_simplecharm_vnf.tar.gz",)
3489 self
.nsd_filename
= "hackfest_simplecharm_ns.tar.gz"
3490 self
.uses_configuration
= True
3494 "ls -lrt /home/ubuntu/first-touch",
3497 self
.users
= {"1": "ubuntu", "2": "ubuntu"}
3498 self
.passwords
= {"1": "osm4u", "2": "osm4u"}
3501 class TestDeploySimpleCharm2(TestDeploySimpleCharm
):
3503 "Deploy hackfest-4 hackfest_simplecharm example changing naming to contain dots on ids and "
3509 self
.test_name
= "HACKFEST-SIMPLE2-"
3510 self
.qforce
= "?FORCE=True"
3511 self
.descriptor_edit
= {
3512 "vnfd0": {"id": "hackfest.simplecharm.vnf"},
3514 "id": "hackfest.simplecharm.ns",
3515 "constituent-vnfd": {
3517 "vnfd-id-ref": "hackfest.simplecharm.vnf",
3518 "member-vnf-index": "$1",
3521 "vnfd-id-ref": "hackfest.simplecharm.vnf",
3522 "member-vnf-index": "$2",
3527 "vnfd-connection-point-ref": {
3529 "member-vnf-index-ref": "$1",
3530 "vnfd-id-ref": "hackfest.simplecharm.vnf",
3533 "member-vnf-index-ref": "$2",
3534 "vnfd-id-ref": "hackfest.simplecharm.vnf",
3539 "vnfd-connection-point-ref": {
3541 "member-vnf-index-ref": "$1",
3542 "vnfd-id-ref": "hackfest.simplecharm.vnf",
3545 "member-vnf-index-ref": "$2",
3546 "vnfd-id-ref": "hackfest.simplecharm.vnf",
3555 class TestDeploySingleVdu(TestDeployHackfest3Charmed
):
3557 "Generate a single VDU base on editing Hackfest3Charmed descriptors and deploy"
3562 self
.test_name
= "SingleVDU"
3563 self
.qforce
= "?FORCE=True"
3564 self
.descriptor_edit
= {
3565 # Modify VNFD to remove one VDU
3570 "$[0]": {"external-connection-point-ref": "pdu-mgmt"}
3575 "vnf-configuration": None,
3576 "connection-point": {
3580 "short-name": "pdu-mgmt",
3584 "mgmt-interface": {"cp": "pdu-mgmt"},
3585 "description": "A vnf single vdu to be used as PDU",
3589 "id": "pdu_internal",
3590 "name": "pdu_internal",
3591 "internal-connection-point": {"$[1]": None},
3592 "short-name": "pdu_internal",
3597 # Modify NSD accordingly
3599 "constituent-vnfd": {
3600 "$[0]": {"vnfd-id-ref": "vdu-as-pdu"},
3603 "description": "A nsd to deploy the vnf to act as as PDU",
3605 "name": "nsd-as-pdu",
3606 "short-name": "nsd-as-pdu",
3611 "short-name": "mgmt_pdu",
3612 "vnfd-connection-point-ref": {
3614 "vnfd-connection-point-ref": "pdu-mgmt",
3615 "vnfd-id-ref": "vdu-as-pdu",
3627 class TestDeployHnfd(TestDeployHackfest3Charmed
):
3629 "Generate a HNFD base on editing Hackfest3Charmed descriptors and deploy"
3634 self
.test_name
= "HNFD"
3635 self
.pduDeploy
= TestDeploySingleVdu()
3636 self
.pdu_interface_0
= {}
3637 self
.pdu_interface_1
= {}
3640 # self.vnf_to_pdu = """
3643 # pdu-type: PDU-TYPE-1
3648 # name: pdu-iface-internal
3650 # description: HFND, one PDU + One VDU
3656 self
.pdu_descriptor
= {
3658 "type": "PDU-TYPE-1",
3659 "vim_accounts": "to-override",
3662 "name": "mgmt-iface",
3665 "ip-address": "to override",
3666 "mac-address": "mac_address",
3667 "vim-network-name": "mgmt",
3670 "name": "pdu-iface-internal",
3673 "ip-address": "to override",
3674 "mac-address": "mac_address",
3675 "vim-network-name": "pdu_internal", # OSMNBITEST-PDU-pdu_internal
3679 self
.vnfd_filenames
= (
3680 "hackfest_3charmed_vnfd.tar.gz",
3681 "hackfest_3charmed_vnfd.tar.gz",
3684 self
.descriptor_edit
= {
3688 "short-name": "hfn1",
3691 "pdu-type": "PDU-TYPE-1",
3693 "$[0]": {"name": "mgmt-iface"},
3694 "$[1]": {"name": "pdu-iface-internal"},
3700 "constituent-vnfd": {"$[1]": {"vnfd-id-ref": "hfnd1"}},