3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
17 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
18 echo -e "usage: $0 [OPTIONS]"
19 echo -e "Install OSM from binaries or source code (by default, from binaries)"
21 echo -e " -h / --help: print this help"
22 echo -e " -y: do not prompt for confirmation, assumes yes"
23 echo -e " -r <repo>: use specified repository name for osm packages"
24 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
25 echo -e " -u <repo base>: use specified repository url for osm packages"
26 echo -e " -k <repo key>: use specified repository public key url"
27 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
28 echo -e " -b master (main dev branch)"
29 echo -e " -b v2.0 (v2.0 branch)"
30 echo -e " -b tags/v1.1.0 (a specific tag)"
32 echo -e " -a <apt proxy url>: use this apt proxy url when downloading apt packages (air-gapped installation)"
33 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
34 echo -e " -H <VCA host> use specific juju host controller IP"
35 echo -e " -S <VCA secret> use VCA/juju secret key"
36 echo -e " -P <VCA pubkey> use VCA/juju public key file"
37 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
38 echo -e " --pla: install the PLA module for placement support"
39 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (k8s_monitor)"
41 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
42 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
43 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
44 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
45 echo -e " -D <devops path> use local devops installation path"
46 echo -e " -w <work dir> Location to store runtime installation"
47 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
48 echo -e " -l: LXD cloud yaml file"
49 echo -e " -L: LXD credentials yaml file"
50 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
51 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
52 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
53 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
54 echo -e " --debug: debug mode"
55 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
56 echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
57 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
58 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
59 echo -e " --nojuju: do not juju, assumes already installed"
60 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
61 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
62 echo -e " --nohostclient: do not install the osmclient"
63 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
64 echo -e " --source: install OSM from source code using the latest stable tag"
65 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
66 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
67 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
68 echo -e " --volume: create a VM volume when installing to OpenStack"
69 echo -e " --showopts: print chosen options and exit (only for debugging)"
70 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
71 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
72 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
73 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
74 echo -e " [--small-profile]: Do not install and configure LXD which aims to use only K8s Clouds (--charmed option)"
75 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
76 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
77 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
78 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
79 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
80 echo -e " [--tag]: Docker image tag. (--charmed option)"
81 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
82 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
85 # takes a juju/accounts.yaml file and returns the password specific
86 # for a controller. I wrote this using only bash tools to minimize
87 # additions of other packages
88 function parse_juju_password
{
89 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
90 password_file
="${HOME}/.local/share/juju/accounts.yaml"
91 local controller_name
=$1
92 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
93 sed -ne "s|^\($s\):|\1|" \
94 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
95 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
96 awk -F$fs -v controller
=$controller_name '{
97 indent = length($1)/2;
99 for (i in vname) {if (i > indent) {delete vname[i]}}
100 if (length($3) > 0) {
101 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
102 if (match(vn,controller) && match($2,"password")) {
107 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
110 function set_vca_variables
() {
111 OSM_VCA_CLOUDNAME
="lxd-cloud"
112 [ -n "$OSM_VCA_HOST" ] && OSM_VCA_CLOUDNAME
="localhost"
113 if [ -z "$OSM_VCA_HOST" ]; then
114 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
115 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
116 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
118 if [ -z "$OSM_VCA_SECRET" ]; then
119 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_STACK_NAME)
120 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$CONTROLLER_NAME)
121 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
123 if [ -z "$OSM_VCA_PUBKEY" ]; then
124 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
125 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
127 if [ -z "$OSM_VCA_CACERT" ]; then
128 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
129 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
130 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
134 function generate_secret
() {
135 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
136 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
137 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
140 function check_packages
() {
142 echo -e "Checking required packages: ${NEEDED_PACKAGES}"
143 for PACKAGE
in ${NEEDED_PACKAGES} ; do
145 if [ $?
-ne 0 ]; then
146 echo -e "Package ${PACKAGE} is not installed."
147 echo -e "Updating apt-cache ..."
149 echo -e "Installing ${PACKAGE} ..."
150 sudo apt-get
install -y ${PACKAGE} || FATAL
"failed to install ${PACKAGE}"
153 echo -e "Required packages are present: ${NEEDED_PACKAGES}"
157 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
158 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
159 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
160 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
161 read -e -p "$1" USER_CONFIRMATION
163 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
164 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
165 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
166 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
167 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
169 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
172 function install_osmclient
(){
173 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
174 CLIENT_RELEASE
=${RELEASE#"-R "}
175 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
176 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
177 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
178 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
179 curl
$key_location | sudo APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE
=1 apt-key add
-
180 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
182 sudo apt-get
install -y python3-pip
183 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
184 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind verboselogs
185 sudo apt-get
install -y python3-osm-im python3-osmclient
186 if [ -f /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
]; then
187 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
189 if [ -f /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
]; then
190 sudo apt-get
install -y libcurl4-openssl-dev libssl-dev libmagic1
191 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
193 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
194 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
195 echo -e "\nOSM client installed"
196 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
197 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
198 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
199 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
201 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
202 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
203 echo " export OSM_HOSTNAME=<OSM_host>"
205 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
209 function docker_login
() {
210 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
212 [ -z "${DEBUG_INSTALL}" ] || DEBUG
"Docker registry user: ${DOCKER_REGISTRY_USER}"
213 sg docker
-c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD} --password-stdin"
214 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
217 function generate_docker_images
() {
218 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
219 echo "Pulling and generating docker images"
220 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
222 echo "Pulling docker images"
224 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
225 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
226 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
229 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
230 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
233 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
234 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
237 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
238 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
239 sg docker
-c "docker pull kiwigrid/k8s-sidecar:${KIWIGRID_K8S_SIDECAR_TAG}" || FATAL
"cannot get kiwigrid k8s-sidecar docker image"
242 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
243 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
246 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
247 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
250 if [ -n "$PULL_IMAGES" ]; then
251 echo "Pulling OSM docker images"
252 for module
in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient
; do
253 module_lower
=${module,,}
254 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
257 module_tag
="${OSM_DOCKER_TAG}"
258 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD |
grep -q $module ; then
259 module_tag
="${MODULE_DOCKER_TAG}"
261 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
262 sg docker
-c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL
"cannot pull $module docker image"
265 _build_from
=$COMMIT_ID
266 [ -z "$_build_from" ] && _build_from
="latest"
267 echo "OSM Docker images generated from $_build_from"
268 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
269 trap 'rm -rf "${LWTEMPDIR}"' EXIT
270 for module
in MON POL NBI KEYSTONE RO LCM NG-UI PLA
; do
271 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q ${module} ; then
272 module_lower
=${module,,}
273 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
276 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/$module
277 git
-C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
278 sg docker
-c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
281 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q osmclient
; then
282 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
283 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
284 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
285 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
286 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
288 echo "Finished generation of docker images"
291 echo "Finished pulling and generating docker images"
292 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
295 function cmp_overwrite
() {
296 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
299 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
300 if [ -f "${file2}" ]; then
301 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
303 cp -b ${file1} ${file2}
306 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
309 function generate_k8s_manifest_files() {
310 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
311 #Kubernetes resources
312 sudo cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
313 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
316 function generate_prometheus_grafana_files() {
317 #this only works with docker swarm
318 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
320 sudo mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
321 sudo cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
324 sudo mkdir -p $OSM_DOCKER_WORK_DIR/grafana
325 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
326 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
327 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
328 sudo cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
330 # Prometheus Exporters files
331 sudo mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
332 sudo cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
333 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
336 function generate_docker_env_files() {
337 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
338 echo "Doing a backup of existing env files
"
339 sudo cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
340 sudo cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
341 sudo cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
342 sudo cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
343 sudo cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
344 sudo cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
345 sudo cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
346 sudo cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
348 echo "Generating docker env files
"
350 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
351 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
354 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
355 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
357 sudo sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
360 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
361 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
363 sudo sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
366 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
367 echo "OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
369 sudo sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
372 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
373 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
375 sudo sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
378 if [ -n "$OSM_VCA_APIPROXY" ]; then
379 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
380 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
382 sudo sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
386 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE
" $OSM_DOCKER_WORK_DIR/lcm.env; then
387 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
390 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env
; then
391 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
394 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
395 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
397 sudo
sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
400 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
401 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
403 sudo
sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
407 MYSQL_ROOT_PASSWORD
=$
(generate_secret
)
408 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env
]; then
409 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/ro-db.env
411 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env
]; then
412 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/ro.env
414 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env
; then
415 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/ro.env
419 KEYSTONE_DB_PASSWORD
=$
(generate_secret
)
420 SERVICE_PASSWORD
=$
(generate_secret
)
421 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env
]; then
422 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/keystone-db.env
424 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env
]; then
425 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/keystone.env
426 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |sudo
tee -a $OSM_DOCKER_WORK_DIR/keystone.env
427 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo
tee -a $OSM_DOCKER_WORK_DIR/keystone.env
431 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env
]; then
432 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/nbi.env
433 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/nbi.env
437 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env
]; then
438 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
439 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
440 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
443 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env
; then
444 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
446 sudo
sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
449 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env
; then
450 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
452 sudo
sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
455 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env
; then
456 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
458 sudo
sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
461 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env
; then
462 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
464 sudo
sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
469 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env
]; then
470 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | sudo
tee -a $OSM_DOCKER_WORK_DIR/pol.env
473 echo "Finished generation of docker env files"
474 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
477 #creates secrets from env files which will be used by containers
478 function kube_secrets
(){
479 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
480 kubectl create ns
$OSM_STACK_NAME
481 kubectl create secret generic lcm-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
482 kubectl create secret generic mon-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
483 kubectl create secret generic nbi-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
484 kubectl create secret generic ro-db-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
485 kubectl create secret generic ro-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
486 kubectl create secret generic keystone-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
487 kubectl create secret generic pol-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
488 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
491 #deploys osm pods and services
492 function deploy_osm_services
() {
493 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
494 kubectl apply
-n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
495 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
498 #deploy charmed services
499 function deploy_charmed_services
() {
500 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
501 juju add-model
$OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
502 juju deploy ch
:mongodb-k8s
-m $OSM_STACK_NAME
503 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
506 function deploy_osm_pla_service
() {
507 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
508 # corresponding to namespace_vol
509 sudo
sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
510 # corresponding to deploy_osm_services
511 kubectl apply
-n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
512 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
515 function parse_yaml
() {
516 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
520 for module
in $services; do
521 if [ "$module" == "pla" ]; then
522 if [ -n "$INSTALL_PLA" ]; then
523 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
524 sudo
sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
527 echo "Updating K8s manifest
file from opensourcemano\
/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
528 sudo sed -i "s
#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
531 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
534 function update_manifest_files
() {
535 osm_services
="nbi lcm ro pol mon ng-ui keystone pla prometheus"
537 for module
in $osm_services; do
538 module_upper
="${module^^}"
539 if ! echo $TO_REBUILD |
grep -q $module_upper ; then
540 list_of_services
="$list_of_services $module"
543 if [ ! "$OSM_DOCKER_TAG" == "12" ]; then
544 parse_yaml
$OSM_DOCKER_TAG $list_of_services
546 if [ -n "$MODULE_DOCKER_TAG" ]; then
547 parse_yaml
$MODULE_DOCKER_TAG $list_of_services_to_rebuild
549 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
552 function namespace_vol
() {
553 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
554 osm_services
="nbi lcm ro pol mon kafka mysql prometheus"
555 for osm
in $osm_services; do
556 sudo
sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
558 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
561 function add_local_k8scluster
() {
562 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
563 /usr
/bin
/osm
--all-projects vim-create \
564 --name _system-osm-vim \
565 --account_type dummy \
566 --auth_url http
://dummy \
567 --user osm
--password osm
--tenant osm \
568 --description "dummy" \
569 --config '{management_network_name: mgmt}'
570 /usr
/bin
/osm
--all-projects k8scluster-add \
571 --creds ${HOME}/.kube
/config \
572 --vim _system-osm-vim \
573 --k8s-nets '{"net1": null}' \
575 --description "OSM Internal Cluster" \
577 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
580 function configure_apt_proxy
() {
581 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
583 OSM_APT_PROXY_FILE
="/etc/apt/apt.conf.d/osm-apt"
584 echo "Configuring apt proxy in file ${OSM_APT_PROXY_FILE}"
585 if [ ! -f ${OSM_APT_PROXY_FILE} ]; then
586 sudo bash
-c "cat <<EOF > ${OSM_APT_PROXY}
587 Acquire::http { Proxy \"${OSM_APT_PROXY}\"; }
590 sudo
sed -i "s|Proxy.*|Proxy \"${OSM_APT_PROXY}\"; }|" ${OSM_APT_PROXY_FILE}
592 sudo apt-get update || FATAL
"Configured apt proxy, but couldn't run 'apt-get update'. Check ${OSM_APT_PROXY_FILE}"
593 track prereq apt_proxy_configured_ok
594 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
597 function install_osm
() {
598 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
601 track start release
$RELEASE none none docker_tag
$OSM_DOCKER_TAG none none installation_type
$OSM_INSTALLATION_TYPE none none
603 track checks checkingroot_ok
604 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
605 track checks noroot_ok
607 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
608 1. Install and configure LXD
611 4. Disable swap space
612 5. Install and initialize Kubernetes
614 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
615 track checks proceed_ok
617 echo "Installing OSM"
619 echo "Determining IP address of the interface with the default route"
620 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
621 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
622 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
623 DEFAULT_IP
=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]; exit}'`
624 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
626 # configure apt proxy
627 [ -n "$APT_PROXY_URL" ] && configure_apt_proxy
$APT_PROXY_URL
629 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
630 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
631 LXD_INSTALL_OPTS
="-D ${OSM_DEVOPS} -i ${DEFAULT_IP} ${DEBUG_INSTALL}"
632 $OSM_DEVOPS/installers
/install_lxd.sh
${LXD_INSTALL_OPTS} || FATAL_TRACK lxd
"install_lxd.sh failed"
635 track prereq prereqok_ok
637 if [ ! -n "$INSTALL_NODOCKER" ]; then
638 DOCKER_CE_OPTS
="-D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
639 [ -n "${DOCKER_PROXY_URL}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -p ${DOCKER_PROXY_URL}"
640 $OSM_DEVOPS/installers
/install_docker_ce.sh
${DOCKER_CE_OPTS} || FATAL_TRACK docker_ce
"install_docker_ce.sh failed"
643 track docker_ce docker_ce_ok
645 echo "Creating folders for installation"
646 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && sudo mkdir
-p $OSM_DOCKER_WORK_DIR
647 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && sudo mkdir
-p $OSM_DOCKER_WORK_DIR/osm_pla
648 sudo
cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
650 $OSM_DEVOPS/installers
/install_kubeadm_cluster.sh
-i ${DEFAULT_IP} -d ${OSM_DOCKER_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL} || \
651 FATAL_TRACK k8scluster
"install_kubeadm_cluster.sh failed"
652 track k8scluster k8scluster_ok
654 JUJU_OPTS
="-D ${OSM_DEVOPS} -s ${OSM_STACK_NAME} -i ${DEFAULT_IP} ${DEBUG_INSTALL} ${INSTALL_NOJUJU} ${INSTALL_CACHELXDIMAGES}"
655 [ -n "${OSM_VCA_HOST}" ] && JUJU_OPTS
="$JUJU_OPTS -H ${OSM_VCA_HOST}"
656 [ -n "${LXD_CLOUD_FILE}" ] && JUJU_OPTS
="$JUJU_OPTS -l ${LXD_CLOUD_FILE}"
657 [ -n "${LXD_CRED_FILE}" ] && JUJU_OPTS
="$JUJU_OPTS -L ${LXD_CRED_FILE}"
658 [ -n "${CONTROLLER_NAME}" ] && JUJU_OPTS
="$JUJU_OPTS -K ${CONTROLLER_NAME}"
659 $OSM_DEVOPS/installers
/install_juju.sh
${JUJU_OPTS} || FATAL_TRACK juju
"install_juju.sh failed"
663 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
664 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
665 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
668 # Deploy OSM services
669 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
670 track docker_images docker_images_ok
672 generate_k8s_manifest_files
673 track osm_files manifest_files_ok
674 generate_docker_env_files
675 track osm_files env_files_ok
677 deploy_charmed_services
678 track deploy_osm deploy_charmed_services_ok
680 track deploy_osm kube_secrets_ok
681 update_manifest_files
682 track deploy_osm update_manifest_files_ok
684 track deploy_osm namespace_vol_ok
686 track deploy_osm deploy_osm_services_k8s_ok
687 if [ -n "$INSTALL_PLA"]; then
688 # optional PLA install
689 deploy_osm_pla_service
690 track deploy_osm deploy_osm_pla_ok
692 if [ -n "$INSTALL_K8S_MONITOR" ]; then
693 # install OSM MONITORING
694 install_k8s_monitoring
695 track deploy_osm install_k8s_monitoring_ok
698 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
699 track osmclient osmclient_ok
701 echo -e "Checking OSM health state..."
702 $OSM_DEVOPS/installers
/osm_health.sh
-s ${OSM_STACK_NAME} -k || \
703 (echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
704 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
705 track healthchecks osm_unhealthy didnotconverge
)
706 track healthchecks after_healthcheck_ok
709 track final_ops add_local_k8scluster_ok
711 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-12.0
-twelve/README2.txt
&> /dev
/null
714 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
718 function install_to_openstack
() {
719 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
722 FATAL
"OpenStack installer requires a valid external network name"
725 # Install Pip for Python3
726 sudo apt
install -y python3-pip python3-venv
727 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
729 # Create a venv to avoid conflicts with the host installation
730 python3
-m venv
$OPENSTACK_PYTHON_VENV
732 source $OPENSTACK_PYTHON_VENV/bin
/activate
734 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
735 python
-m pip
install -U wheel
736 python
-m pip
install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
738 # Install the Openstack cloud module (ansible>=2.10)
739 ansible-galaxy collection
install openstack.cloud
741 export ANSIBLE_CONFIG
="$OSM_DEVOPS/installers/openstack/ansible.cfg"
743 OSM_INSTALLER_ARGS
="${REPO_ARGS[@]}"
745 ANSIBLE_VARS
="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
747 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
748 ANSIBLE_VARS
+=" key_file=$OPENSTACK_SSH_KEY_FILE"
751 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
752 ANSIBLE_VARS
+=" userdata_file=$OPENSTACK_USERDATA_FILE"
755 # Execute the Ansible playbook based on openrc or clouds.yaml
758 ansible-playbook
-e installer_args
="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
759 $OSM_DEVOPS/installers
/openstack
/site.yml
761 ansible-playbook
-e installer_args
="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
762 -e cloud_name
=$1 $OSM_DEVOPS/installers
/openstack
/site.yml
768 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
772 function install_k8s_monitoring
() {
773 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
774 # install OSM monitoring
775 sudo
chmod +x
$OSM_DEVOPS/installers
/k8s
/*.sh
776 sudo
$OSM_DEVOPS/installers
/k8s
/install_osm_k8s_monitoring.sh || FATAL_TRACK install_k8s_monitoring
"k8s/install_osm_k8s_monitoring.sh failed"
777 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
780 function dump_vars
(){
781 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
782 echo "APT_PROXY_URL=$APT_PROXY_URL"
783 echo "DEVELOP=$DEVELOP"
784 echo "DEBUG_INSTALL=$DEBUG_INSTALL"
785 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
786 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
787 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
788 echo "DOCKER_USER=$DOCKER_USER"
789 echo "INSTALL_CACHELXDIMAGES=$INSTALL_CACHELXDIMAGES"
790 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
791 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
792 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
793 echo "INSTALL_LXD=$INSTALL_LXD"
794 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
795 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
796 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
797 echo "INSTALL_ONLY=$INSTALL_ONLY"
798 echo "INSTALL_PLA=$INSTALL_PLA"
799 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
800 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
801 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
802 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
803 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
804 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
805 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
806 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
807 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
808 echo "OSM_DEVOPS=$OSM_DEVOPS"
809 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
810 echo "OSM_DOCKER_WORK_DIR=$OSM_DOCKER_WORK_DIR"
811 echo "OSM_K8S_WORK_DIR=$OSM_K8S_WORK_DIR"
812 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
813 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
814 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
815 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
816 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
817 echo "PULL_IMAGES=$PULL_IMAGES"
818 echo "RECONFIGURE=$RECONFIGURE"
819 echo "RELEASE=$RELEASE"
820 echo "REPOSITORY=$REPOSITORY"
821 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
822 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
823 echo "SHOWOPTS=$SHOWOPTS"
824 echo "TEST_INSTALLER=$TEST_INSTALLER"
825 echo "TO_REBUILD=$TO_REBUILD"
826 echo "UNINSTALL=$UNINSTALL"
827 echo "UPDATE=$UPDATE"
828 echo "Install from specific refspec (-b): $COMMIT_ID"
829 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
832 function parse_docker_registry_url
() {
833 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
834 DOCKER_REGISTRY_USER
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
835 DOCKER_REGISTRY_PASSWORD
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
836 DOCKER_REGISTRY_URL
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); print a[2]}')
837 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
841 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
842 echo "** Trapped CTRL-C"
843 FATAL
"User stopped the installation"
844 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
857 INSTALL_FROM_SOURCE
=""
863 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
864 LXD_REPOSITORY_PATH
=""
865 INSTALL_LIGHTWEIGHT
="y"
866 INSTALL_TO_OPENSTACK
=""
867 OPENSTACK_OPENRC_FILE_OR_CLOUD
=""
868 OPENSTACK_PUBLIC_NET_NAME
=""
869 OPENSTACK_ATTACH_VOLUME
="false"
870 OPENSTACK_SSH_KEY_FILE
=""
871 OPENSTACK_USERDATA_FILE
=""
872 OPENSTACK_VM_NAME
="server-osm"
873 OPENSTACK_PYTHON_VENV
="$HOME/.virtual-envs/osm"
879 INSTALL_K8S_MONITOR
=""
880 INSTALL_NOHOSTCLIENT
=""
881 INSTALL_CACHELXDIMAGES
=""
886 OSM_VCA_CLOUDNAME
="localhost"
887 OSM_VCA_K8S_CLOUDNAME
="k8scloud"
891 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
892 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
893 OSM_WORK_DIR
="/etc/osm"
894 OSM_DOCKER_WORK_DIR
="/etc/osm/docker"
895 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
896 OSM_HOST_VOL
="/var/lib/osm"
897 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
898 OSM_DOCKER_TAG
=latest
899 DOCKER_USER
=opensourcemano
902 KIWIGRID_K8S_SIDECAR_TAG
="1.15.6"
903 PROMETHEUS_TAG
=v2.28
.1
905 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
906 PROMETHEUS_CADVISOR_TAG
=latest
908 OSM_DATABASE_COMMONKEY
=
909 ELASTIC_VERSION
=6.4.2
910 ELASTIC_CURATOR_VERSION
=5.5.4
911 POD_NETWORK_CIDR
=10.244.0.0/16
912 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
913 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
917 OSM_INSTALLATION_TYPE
="Default"
919 while getopts ":a:b:r:n:k:u:R:D:o:O:m:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o
; do
922 APT_PROXY_URL
=${OPTARG}
929 REPOSITORY
="${OPTARG}"
930 REPO_ARGS
+=(-r "$REPOSITORY")
933 REPOSITORY_KEY
="${OPTARG}"
934 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
937 REPOSITORY_BASE
="${OPTARG}"
938 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
942 REPO_ARGS
+=(-R "$RELEASE")
945 OSM_DEVOPS
="${OPTARG}"
949 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
952 INSTALL_TO_OPENSTACK
="y"
953 if [ -n "${OPTARG}" ]; then
954 OPENSTACK_OPENRC_FILE_OR_CLOUD
="${OPTARG}"
956 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
961 OPENSTACK_SSH_KEY_FILE
="${OPTARG}"
964 OPENSTACK_USERDATA_FILE
="${OPTARG}"
967 OPENSTACK_PUBLIC_NET_NAME
="${OPTARG}"
970 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD
="$TO_REBUILD NG-UI" && continue
971 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
972 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
973 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
974 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
975 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
976 [ "${OPTARG}" == "PLA" ] && TO_REBUILD
="$TO_REBUILD PLA" && continue
977 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD
="$TO_REBUILD osmclient" && continue
978 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
979 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
980 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
981 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
982 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
983 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
984 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
987 OSM_VCA_HOST
="${OPTARG}"
990 OSM_VCA_SECRET
="${OPTARG}"
993 OSM_STACK_NAME
="${OPTARG}" && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
996 OSM_DOCKER_TAG
="${OPTARG}"
997 REPO_ARGS
+=(-t "$OSM_DOCKER_TAG")
1000 DOCKER_USER
="${OPTARG}"
1003 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1006 OSM_VCA_APIPROXY
="${OPTARG}"
1009 LXD_CLOUD_FILE
="${OPTARG}"
1012 LXD_CRED_FILE
="${OPTARG}"
1015 CONTROLLER_NAME
="${OPTARG}"
1018 DOCKER_REGISTRY_URL
="${OPTARG}"
1021 DOCKER_PROXY_URL
="${OPTARG}"
1024 MODULE_DOCKER_TAG
="${OPTARG}"
1027 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1028 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1029 [ "${OPTARG}" == "debug" ] && DEBUG_INSTALL
="--debug" && continue
1030 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1031 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1032 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1033 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1034 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1035 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1036 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1037 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1038 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1039 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1040 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="--nojuju" && continue
1041 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1042 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1043 [ "${OPTARG}" == "pullimages" ] && continue
1044 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1045 [ "${OPTARG}" == "charmed" ] && CHARMED
="y" && OSM_INSTALLATION_TYPE
="Charmed" && continue
1046 [ "${OPTARG}" == "bundle" ] && continue
1047 [ "${OPTARG}" == "k8s" ] && continue
1048 [ "${OPTARG}" == "lxd" ] && continue
1049 [ "${OPTARG}" == "lxd-cred" ] && continue
1050 [ "${OPTARG}" == "microstack" ] && continue
1051 [ "${OPTARG}" == "overlay" ] && continue
1052 [ "${OPTARG}" == "only-vca" ] && continue
1053 [ "${OPTARG}" == "small-profile" ] && continue
1054 [ "${OPTARG}" == "vca" ] && continue
1055 [ "${OPTARG}" == "ha" ] && continue
1056 [ "${OPTARG}" == "tag" ] && continue
1057 [ "${OPTARG}" == "registry" ] && continue
1058 [ "${OPTARG}" == "pla" ] && INSTALL_PLA
="y" && continue
1059 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME
="true" && continue
1060 [ "${OPTARG}" == "nocachelxdimages" ] && continue
1061 [ "${OPTARG}" == "cachelxdimages" ] && INSTALL_CACHELXDIMAGES
="--cachelxdimages" && continue
1062 echo -e "Invalid option: '--$OPTARG'\n" >&2
1066 echo "Option -$OPTARG requires an argument" >&2
1070 echo -e "Invalid option: '-$OPTARG'\n" >&2
1085 source $OSM_DEVOPS/common
/all_funcs
1087 [ -z "${DEBUG_INSTALL}" ] || DEBUG Debug is on
1088 [ -n "$SHOWOPTS" ] && dump_vars
&& exit 0
1090 # Uninstall if "--uninstall"
1091 if [ -n "$UNINSTALL" ]; then
1092 if [ -n "$CHARMED" ]; then
1093 ${OSM_DEVOPS}/installers
/charmed_uninstall.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
1094 FATAL_TRACK charmed_uninstall
"charmed_uninstall.sh failed"
1096 ${OSM_DEVOPS}/installers
/uninstall_osm.sh
"$@" || \
1097 FATAL_TRACK community_uninstall
"uninstall_osm.sh failed"
1103 # Charmed installation
1104 if [ -n "$CHARMED" ]; then
1105 export OSM_TRACK_INSTALLATION_ID
="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"
1106 track start release
$RELEASE none none docker_tag
$OSM_DOCKER_TAG none none installation_type
$OSM_INSTALLATION_TYPE none none
1107 ${OSM_DEVOPS}/installers
/charmed_install.sh
--tag $OSM_DOCKER_TAG "$@" || \
1108 FATAL_TRACK charmed_install
"charmed_install.sh failed"
1109 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-12.0
-twelve/README2.txt
&> /dev
/null
1110 track end installation_type
$OSM_INSTALLATION_TYPE
1115 # Installation to Openstack
1116 if [ -n "$INSTALL_TO_OPENSTACK" ]; then
1117 install_to_openstack
$OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME
1122 # Community_installer
1123 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
1124 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1125 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL
"Incompatible option: -m PLA cannot be used without --pla option"
1127 # if develop, we force master
1128 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1130 check_packages
"git wget curl tar snapd"
1132 sudo snap
install jq
1133 if [ -z "$OSM_DEVOPS" ]; then
1134 if [ -n "$TEST_INSTALLER" ]; then
1135 echo -e "\nUsing local devops repo for OSM installation"
1136 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
1138 echo -e "\nCreating temporary dir for OSM installation"
1139 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
1140 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1142 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
1144 if [ -z "$COMMIT_ID" ]; then
1145 echo -e "\nGuessing the current stable release"
1146 LATEST_STABLE_DEVOPS
=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1147 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1149 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1150 COMMIT_ID
="tags/$LATEST_STABLE_DEVOPS"
1152 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1154 git
-C $OSM_DEVOPS checkout
$COMMIT_ID
1158 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR
="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1159 OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1160 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1161 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1163 #Installation starts here
1164 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-12.0
-twelve/README.txt
&> /dev
/null
1165 export OSM_TRACK_INSTALLATION_ID
="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"