From 95b6127567c91e34311ea98b351ce05fb82a807e Mon Sep 17 00:00:00 2001 From: garciadeblas Date: Thu, 9 Jan 2025 16:04:45 +0100 Subject: [PATCH 1/4] Features 10923-10926: commands and tutorial for new declarative operations for ACM Signed-off-by: garciadeblas --- advanced-cluster-management.md | 156 +++++++++++++++++++++++++++++++-- 1 file changed, 151 insertions(+), 5 deletions(-) diff --git a/advanced-cluster-management.md b/advanced-cluster-management.md index 706f694..2457d06 100644 --- a/advanced-cluster-management.md +++ b/advanced-cluster-management.md @@ -83,6 +83,7 @@ osm cluster-create --node-count ${CLUSTER_NODES} --node-size ${CLUSTER_VM_SIZE} ```bash osm cluster-list +osm cluster-show cluster1 ``` When the cluster is created, the field `resourceState` should be `READY`. @@ -96,16 +97,28 @@ osm cluster-show cluster1 -o jsonpath='{.credentials}' | yq -P # Save them in a file osm cluster-show cluster1 -o jsonpath='{.credentials}' | yq -P > ~/kubeconfig-cluster1.yaml # Test it -export KUBECONFIG=~/kubeconfig-mydemo.yaml +export KUBECONFIG=~/kubeconfig-cluster1.yaml kubectl get nodes ``` -In case credentials are renewed by the cloud policy, credentials can be obtained using any of the +In case credentials are renewed by the cloud policy, credentials can be obtained using this command: ```bash osm cluster-get-credentials cluster1 ``` +#### Cluster scale + +```bash +osm cluster-scale cluster1 --node-count 2 +``` + +#### Cluster deletion + +```bash +osm cluster-delete cluster1 +``` + #### Cluster registration This should be run over a cluster that was not created by OSM: @@ -116,11 +129,17 @@ VIM_ACCOUNT=azure-site osm cluster-register --creds ~/kubeconfig-${CLUSTER_NAME}.yaml --vim ${VIM_ACCOUNT} --description "My existing K8s cluster" ${CLUSTER_NAME} ``` -#### Cluster scale +```bash +osm cluster-list +osm cluster-show cluster2 +``` + +When the cluster is created, the field `resourceState` should be `READY`. + +#### Cluster deregistration ```bash -CLUSTER_NAME=cluster1 -osm cluster-scale ${CLUSTER_NAME} --node-count 2 +osm cluster-deregister cluster2 ``` ### OKA operations @@ -150,6 +169,15 @@ osm oka-delete testacme osm oka-delete jenkins ``` +#### OKA generation for helm charts: + +```bash +osm oka-generate jenkins --base-directory okas --profile-type app-profile --helm-repo-name bitnamicharts --helm-repo-url oci://registry-1.docker.io/bitnamicharts --helm-chart jenkins --version 13.4.20 --namespace jenkins +tree okas/jenkins +# Once generated, you can add it with: +osm oka-add jenkins okas/jenkins --description jenkins --profile-type app-profile +``` + ### Profile operations #### Listing profiles @@ -185,3 +213,121 @@ osm ksu-delete testapp osm ksu-delete testacme osm ksu-delete jenkins ``` + +## Tutorial: how to operate infra and apps with OSM declarative framework + +The tutorial assumes that you have added a VIM/Cloud account to OSM. + +```bash +export OSM_HOSTNAME=$(kubectl get -n osm -o jsonpath="{.spec.rules[0].host}" ingress nbi-ingress) +``` + +Create a cluster: + +```bash +CLUSTER_NAME=mydemo +CLUSTER_VM_SIZE=Standard_D2_v2 +CLUSTER_NODES=2 +REGION_NAME=northeurope +VIM_ACCOUNT=azure-site +RESOURCE_GROUP= +KUBERNETES_VERSION="1.30" +osm cluster-create --node-count ${CLUSTER_NODES} --node-size ${CLUSTER_VM_SIZE} --version ${KUBERNETES_VERSION} --vim-account ${VIM_ACCOUNT} --description "Mydemo cluster" ${CLUSTER_NAME} --region-name ${REGION_NAME} --resource-group ${RESOURCE_GROUP} +``` + +Check progress: + +```bash +osm cluster-list +``` + +When the cluster is created, the field `resourceState` should be `READY`. + +Get credentials: + +```bash +osm cluster-show mydemo -o jsonpath='{.credentials}' | yq -P > ~/kubeconfig-mydemo.yaml +export KUBECONFIG=~/kubeconfig-mydemo.yaml +# Check that the credentials work +kubectl get nodes +``` + +Refreshing credentials in case they are renewed by the cloud policy: + +```bash +osm cluster-get-credentials mydemo > ~/kubeconfig-mydemo.yaml +export KUBECONFIG=~/kubeconfig-mydemo.yaml +``` + +OKA addition: + +```bash +export OSM_PACKAGES_FOLDER="${HOME}/osm-packages" +export OKA_FOLDER="${OSM_PACKAGES_FOLDER}/oka/apps" +osm oka-add jenkins ${OKA_FOLDER}/jenkins --description jenkins --profile-type app-profile +osm oka-add testapp ${OKA_FOLDER}/testapp --description testapp --profile-type app-profile +osm oka-add testacme ${OKA_FOLDER}/testacme --description testacme --profile-type app-profile +``` + +Check the progress: + +```bash +osm oka-list +``` + +When the OKAs are created, the field `resourceState` should be `READY`. + +KSU creation: + +```bash +osm ksu-create --ksu testapp --profile mydemo --profile-type app-profile --oka testapp --params ${OKA_FOLDER}/testapp-params.yaml +osm ksu-create --ksu testacme --profile mydemo --profile-type app-profile --oka testacme --params ${OKA_FOLDER}/testacme-params.yaml +osm ksu-create --ksu jenkins --description "Jenkins" --profile mydemo --profile-type app-profile --oka jenkins --params ${OKA_FOLDER}/jenkins-params.yaml +``` + +Check the progress: + +```bash +osm ksu-list +``` + +When the KSUs are created, the field `resourceState` should be `READY`. + +Check in the destination cluster: + +```bash +export KUBECONFIG=~/kubeconfig-mydemo.yaml +watch "kubectl get ns; echo; kubectl get ks -A; echo; kubectl get hr -A" +watch "kubectl get all -n testapp" +watch "kubectl get all -n testacme" +watch "kubectl get all -n jenkins" +``` + +KSU deletion: + +```bash +osm ksu-delete testapp +osm ksu-delete testacme +osm ksu-delete jenkins +``` + +Cluster scale: + +```bash +CLUSTER_NAME=mydemo +osm cluster-scale ${CLUSTER_NAME} --node-count 3 +``` + +Check progress: + +```bash +osm cluster-list +``` + +When the cluster is created, the field `resourceState` should be `READY`. + +Cluster deletion: + +```bash +osm cluster-delete ${CLUSTER_NAME} +``` -- GitLab From 2c234babdf655145f0c1ada57b3337a58792ac97 Mon Sep 17 00:00:00 2001 From: garciadeblas Date: Thu, 13 Feb 2025 17:54:39 +0100 Subject: [PATCH 2/4] Update troubleshooting section This change includes removing old help related to juju. It also adds documentation about how to check workflows and progress of operations in the new OSM declarative framework. Finally, it includes instructions on how to change the log level of components. Signed-off-by: garciadeblas --- 09-troubleshooting.md | 571 +++++++++++------------------------------- 1 file changed, 143 insertions(+), 428 deletions(-) diff --git a/09-troubleshooting.md b/09-troubleshooting.md index 925e7c8..2f08dc7 100644 --- a/09-troubleshooting.md +++ b/09-troubleshooting.md @@ -6,6 +6,8 @@ Run the following command to know the version of OSM client and OSM NBI: ```bash osm version +Server version: 17.0.0.post12+g194ced9 2020-04-17 +Client version: 17.0.0+geffca72 ``` In some circumstances, it could be useful to search the `osm-devops` package installed in your system, since `osm-devops` is the package used to drive installations: @@ -15,7 +17,7 @@ dpkg -l osm-devops ||/ Name Version Architecture Description +++-======================-=================-=====================-===================================== -ii osm-devops 8.0.0-1 all +ii osm-devops 17.0.0-1 all ``` To know the current verion of the OSM client, you can also search the `python3-osmclient` package as a way to know your current version of OSM: @@ -24,224 +26,185 @@ To know the current verion of the OSM client, you can also search the `python3-o dpkg -l python3-osmclient ||/ Name Version Architecture Description +++-======================-=================-=====================-===================================== -ii python3-osmclient 8.0.0-1 all +ii python3-osmclient 17.0.0-1 all ``` -## Troubleshooting installation +## Logs -### Recommended installation to facilitate troubleshooting +### Checking the logs of OSM in Kubernetes -It is highly recommended saving a log of your installation: +You can check the logs of any container with the following commands: ```bash -./install_osm.sh 2>&1 | tee osm_install_log.txt +kubectl -n osm logs deployment/nbi --all-containers=true +kubectl -n osm logs deployment/lcm --all-containers=true +kubectl -n osm logs deployment/ro --all-containers=true +kubectl -n osm logs deployment/ngui --all-containers=true +kubectl -n osm logs deployment/mon --all-containers=true +kubectl -n osm logs deployment/grafana --all-containers=true +kubectl -n osm logs statefulset/mongodb-k8s --all-containers=true +kubectl -n osm logs statefulset/kafka-controller --all-containers=true +kubectl -n osm logs statefulset/prometheus --all-containers=true ``` -### Recommended checks after installation - -#### Checking whether all processes/services are running in K8s +For live debugging, the following commands can be useful to save the log output to a file and show it in the screen: ```bash -kubectl -n osm get all +kubectl -n osm logs -f deployment/nbi --all-containers=true 2>&1 | tee nbi-log.txt +kubectl -n osm logs -f deployment/lcm --all-containers=true 2>&1 | tee lcm-log.txt +kubectl -n osm logs -f deployment/ro --all-containers=true 2>&1 | tee ro-log.txt +kubectl -n osm logs -f deployment/ngui --all-containers=true 2>&1 | tee ngui-log.txt +kubectl -n osm logs -f deployment/mon --all-containers=true 2>&1 | tee mon-log.txt +kubectl -n osm logs -f deployment/grafana --all-containers=true 2>&1 | tee grafana-log.txt +kubectl -n osm logs -f statefulset/mongodb-k8s --all-containers=true 2>&1 | tee mongo-log.txt +kubectl -n osm logs -f statefulset/kafka-controller --all-containers=true 2>&1 | tee kafka-log.txt +kubectl -n osm logs -f statefulset/prometheus --all-containers=true 2>&1 | tee prometheus-log.txt ``` -All the deployments and statefulsets should have 1 replica: 1/1 - -### Issues on standard installation - -#### Juju - -##### Juju bootstrap hangs +### Changing the log level -If the Juju bootstrap takes a long time, stuck at this status... +You can change the log level of any container, by updating the container with the right `LOG_LEVEL` env var. -```text -Installing Juju agent on bootstrap instance -Fetching Juju GUI 2.14.0 -Waiting for address -Attempting to connect to 10.71.22.78:22 -Connected to 10.71.22.78 -Running machine configuration script... -``` +Log levels are: -...it usually indicates that the LXD container with the Juju controller is having trouble connecting to the internet. +- ERROR +- WARNING +- INFO +- DEBUG -Get the name of the LXD container. It will begin with '`juju-`' and end with '`-0`'. +For instance, to set the log level to INFO for the LCM in a deployment of OSM over K8s: ```bash -lxc list -+-----------------+---------+---------------------+------+------------+-----------+ -| NAME | STATE | IPV4 | IPV6 | TYPE | SNAPSHOTS | -+-----------------+---------+---------------------+------+------------+-----------+ -| juju-0383f2-0 | RUNNING | 10.195.8.57 (eth0) | | PERSISTENT | | -+-----------------+---------+---------------------+------+------------+-----------+ +LOGLEVEL="INFO" +kubectl patch configmap osm-lcm-configmap -n osm --type='merge' -p '{"data":{"OSMLCM_GLOBAL_LOGLEVEL":"'${LOGLEVEL}'"}}' +kubectl get configmap osm-lcm-configmap -n osm -o yaml +kubectl -n osm rollout restart deployment lcm ``` -Next, tail the output of cloud-init to see where the bootstrap is stuck. +### Debugging Kafka + +To connect to Kafka bus and print the received messages: ```bash -lxc exec juju-0383f2-0 -- tail -f /var/log/cloud-init-output.log +kubectl -n osm exec -it kafka-controller-0 -- kafka-console-consumer.sh --bootstrap-server localhost:9092 --whitelist '.*' --formatter kafka.tools.DefaultMessageFormatter --property print.timestamp=true --property print.key=true --property print.value=true ``` -##### Is Juju running? +### Debugging MongoDB -If running, you should see something like this: +To connect to MongoDB and run commands: ```bash -$ juju status - -Model Controller Cloud/Region Version SLA -default osm localhost/localhost 2.3.7 unsupported +kubectl -n osm exec -it pod/mongodb-k8s-0 -- mongosh ``` -##### ERROR controller osm already exists +```mql +use osm; +db.getCollectionNames() +db.k8sclusters.find().pretty() +db.k8sclusters.deleteOne({"_id":"21323ef6-23ec-4f33-8171-dcc863aa9832"}) +``` -Did OSM installation fail during juju installation with an error like "ERROR controller osm already exists" ? +## Troubleshooting installation -```bash -$ ./install_osm.sh -... -ERROR controller "osm" already exists -ERROR try was stopped - -### Jum Agu 24 15:19:33 WIB 2018 install_juju: FATAL error: Juju installation failed -BACKTRACE: -### FATAL /usr/share/osm-devops/jenkins/common/logging 39 -### install_juju /usr/share/osm-devops/installers/full_install_osm.sh 564 -### install_lightweight /usr/share/osm-devops/installers/full_install_osm.sh 741 -### main /usr/share/osm-devops/installers/full_install_osm.sh 1033 -``` +### Recommended installation to facilitate troubleshooting -Try to destroy the Juju controller and run the installation again: +It is highly recommended saving a log of your installation: ```bash -$ juju destroy-controller osm --destroy-all-models -y -$ ./install_osm.sh +./install_osm.sh 2>&1 | tee osm_install_log.txt ``` -If it does not work, you can destroy Juju container and run the installation again +### Recommended checks after installation + +#### Checking whether all processes/services are running in K8s ```bash -#Destroy the Juju container -lxc stop juju-* -lxc delete juju-* -#Unregister the controller since we’ve manually freed the resources associated with it -juju unregister -y osm -#Verify that there are no controllers -juju list-controllers -#Run the installation again -./install_osm.sh +kubectl -n osm get all ``` -##### No controllers registered - -The following error appears when the user used for installation does not belong to some groups: - -_Finished installation of juju_ Password: **sg: failed to crypt password with previous salt: Invalid argument** ERROR No controllers registered. +All the deployments and statefulsets should have 1 replica: 1/1 -To fix it, just add the non-root user used for installation in *sudo , lxd, docker* groups +## How to troubleshoot issues in the new Service Assurance architecture -#### LXD +Since OSM Release FOURTEEN, the Service Assurance architecture is based on Apache Airflow and Prometheus. The Airflow DAGs, in addition to periodically collecting metrics from VIMs and storing them into Prometheus, implement auto-scaling and auto-healing closed-loop operations which are triggered by Prometheus alerts. These alerts are managed by AlertManager and forwarded to Webhook Translator, which re-formats them to adapt to Airflow expected webhook endpoints. So the alert workflow is this: `DAGs collect metrics => Prometheus => AlertManager => Webhook Translator => Alarm driven DAG` -##### ERROR profile default: `/etc/default/lxd-bridge` has IPv6 enabled +In case of any kind of error related to monitoring, the first thing to check should be the metrics stored in Prometheus. Its graphical interface can be visited at the URL . Some useful metrics to review are the following: -Make sure that you follow the instructions in the [Quickstart](01-quickstart.md). +- `ns_topology`: metric generated by a DAG with the current topology (VNFs and NSs) of instantiated VDUs in OSM. +- `vm_status`: status (1: ok, 0: error) of the VMs in the VIMs registered in OSM. +- `vm_status_extended`: metric enriched from the two previous ones, so it includes data about VNF and NS the VM belongs to as part of the metric labels. +- `osm_*`: resource consumption metrics. Only intantiated VNFs that include monitoring parameters have these kind of metrics in Prometheus. -When asked if you want to proceed with the installation and configuration of LXD, juju, docker CE and the initialization of a local docker swarm, as pre-requirements, Please answer "y". +In case you need to debug closed-loop operations you will also need to check the Prometheus alerts here . On this page you can see the alerting rules and their status: inactive, pending or active. When a alert is fired (its status changes from pending to active) or is marked as resolved (from active to inactive), the appropriate DAG is run on Airflow. There are three types of alerting rules: -When dialog messages related to LXD configuration are shown, please answer in the following way: +- `vdu_down`: this alert is fired when a VDU remains in a not OK state for several minutes and triggers `alert_vdu` DAG. Its labels include information about NS, VNF, VIM, etc. +- `scalein_*`: these rules manage scale-in operations based on the resource consumption metrics and the number of VDU instances. They trigger `scalein_vdu` DAG. +- `scaleout_*`: these rules manage scale-out operations based on the resource consumption metrics and the number of VDU instances. They trigger `scaleout_vdu` DAG. -- Do you want to configure the LXD bridge? Yes -- Do you want to setup an IPv4 subnet? Yes -- << Default values apply for next questions >> -- **Do you want to setup an IPv6 subnet? No** +Finally, it is also interesting for debugging to be able to view the logs of the execution of the DAGs. To do this, you must visit the Airflow website, which will be accessible on the port pointed by the `airflow-webserver` service in OSM's cluster (not a fixed port): -#### Docker Swarm +```bash +kubectl -n osm get svc airflow-webserver +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +airflow-webserver NodePort 10.100.57.168 8080:19371/TCP 12d +``` -##### `network netosm could not be found` +When you open the URL (`19371` in the example above) in a browser, you will be prompted for the user and password (`admin`/`admin` by default). After that you will see the dashboard with the list of DAGs: -The error is `network "netosm" is declared as external, but could not be found. You need to create a swarm-scoped network before the stack is deployed` +- `alert_vdu`: it is executed when a VDU down alarm is fired or resolved. +- `scalein_vdu`, `scaleout_vdu`: executed when auto-scaling conditions in a VNF are met. +- `ns_topology`: this DAG is executed periodically for updating the topology metric in Prometheus of the instantiated NS. +- `vim_status_*`: there is one such DAG for each VIM in OSM. It checks VIM's reachability every few minutes. +- `vm_status_vim_*`: these DAGs (one per VIM) get VM status from VIM and store them in Prometheus. +- `vm_metrics_vim_*`: these DAGs (one per VIM) store in Prometheus resource consumption metrics from VIM. -It usually happens when a `docker system prune` is done with the stack stopped. The following script will create it: +The logs of the executions can be accessed by clicking on the corresponding DAG in dashboard and then selecting the required date and time in the grid. Each DAG has a set of tasks, and each task has its own logs. -```bash - #!/bin/bash - # Create OSM Docker Network ... - [ -z "$OSM_STACK_NAME" ] && OSM_STACK_NAME=osm - OSM_NETWORK_NAME=net${OSM_STACK_NAME} - echo Creating OSM Docker Network - DEFAULT_INTERFACE=$(route -n | awk '$1~/^0.0.0.0/ {print $8}') - DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}') - echo \# OSM_STACK_NAME = $OSM_STACK_NAME - echo \# OSM_NETWORK_NAME = $OSM_NETWORK_NAME - echo \# DEFAULT_INTERFACE = $DEFAULT_INTERFACE - echo \# DEFAULT_MTU = $DEFAULT_MTU - sg docker -c "docker network create --driver=overlay --attachable \ - --opt com.docker.network.driver.mtu=${DEFAULT_MTU} \ - ${OSM_NETWORK_NAME}" -``` +## Checking workflows in new OSM declarative framework -### Issues on advanced installation (manual build of docker images) +Since Release SIXTEEN, operations involve launching an ArgoWorkflows workflow, which will end up with a commit being created in a Git repo. -#### Manual build of images. Were all docker images successfully built? +Be aware that workflows are automatically cleaned up after some time, so the check of the workflows is recommended to be done while the operation is running or a few seconds later. -Although controlled by the installer, you can check that the following images exist: +### How to expose ArgoWorkflows UI ```bash -$ docker image ls - -REPOSITORY TAG IMAGE ID CREATED SIZE -osm/ng-ui latest 1988aa262a97 18 hours ago 710MB -osm/lcm latest c9ad59bf96aa 46 hours ago 667MB -osm/ro latest 812c987fcb16 46 hours ago 791MB -osm/nbi latest 584b4e0084a7 46 hours ago 497MB -osm/pm latest 1ad1e4099f52 46 hours ago 462MB -osm/mon latest b17efa3412e3 46 hours ago 725MB -wurstmeister/kafka latest 7cfc4e57966c 10 days ago 293MB -mysql 5 0d16d0a97dd1 2 weeks ago 372MB -mongo latest 14c497d5c758 3 weeks ago 366MB -wurstmeister/zookeeper latest 351aa00d2fe9 18 months ago 478MB +# Get the kubeconfig and copy to your local machine +# Then, from your local machine +export KUBECONFIG=~/kubeconfig-osm.yaml +kubectl -n argo port-forward deployment/argo-server 2746:2746 ``` -#### Docker image failed to build - -##### Err:1 `http://archive.ubuntu.com/ubuntu xenial InRelease` +Access Argo UI from web browser: . Then click on the workflow, then on the step, then on "Logs". -In some cases, DNS resolution works on the host but fails when building the Docker container. This is caused when Docker doesn't automatically determine the DNS server to use. - -Check if the following works: +### How to check a workflow with kubectl ```bash -docker run busybox nslookup archive.ubuntu.com +export KUBECONFIG=~/kubeconfig-osm.yaml +kubectl -n osm-workflows get workflows +kubectl -n osm-workflows get workflows/${WORKFLOW_NAME} +kubectl -n osm-workflows get workflows/${WORKFLOW_NAME} -o json +kubectl -n osm-workflows get workflows/${WORKFLOW_NAME} -o jsonpath='{.status.conditions}' | jq -r '.[] | select(.type=="Completed").status' +watch kubectl -n osm-workflows get workflows ``` -If it does not work, you have to configure Docker to use the available DNS. +### How to check a workflow with argo CLI ```bash -# Get the IP address you’re using for DNS: -nmcli dev show | grep 'IP4.DNS' -# Create a new file, /etc/docker/daemon.json, that contains the following (but replace the DNS IP address with the output from the previous step: -{ - "dns": ["192.168.24.10"] -} -# Restart docker -sudo service docker restart -# Re-run -docker run busybox nslookup archive.ubuntu.com -# Now you should be able to re-run the installer and move past the DNS issue. +export KUBECONFIG=~/kubeconfig-osm.yaml +argo list -n osm-workflows +argo get -n osm-workflows @latest +argo watch -n osm-workflows @latest +argo logs -n osm-workflows @latest ``` -##### TypeError: `unsupported operand type(s) for -=: 'Retry' and 'int'` +## Checking progress of operations in new OSM declarative framework -In some cases, a MTU mismatch between the host and docker interfaces will cause this error while running pip. You can check this by running `ifconfig` and comparing the MTU of your host interface and the `docker_gwbridge` interface. +### How to check progres of resources in Flux ```bash -# Create a new file, /etc/docker/daemon.json, that contains the following (but replace the MTU value with that of your host interface from the previous step: -{ - "mtu": 1458 -} -# Restart docker -sudo service docker restart +export KUBECONFIG=~/kubeconfig-osm.yaml +watch 'echo; kubectl get managed; echo; kubectl get kustomizations -A; echo; kubectl get helmreleases -A' ``` ## Common issues with VIMs @@ -277,101 +240,63 @@ curl _In some cases, the errors come from the fact that the VIM was added to OSM using names in the URL that are not Fully Qualified Domain Names (FQDN)._ -When adding a VIM to OSM, you must use always FQDN or the IP addresses. It must be noted that “controller” or similar names are not proper FQDN (the suffix should be added). Non-FQDN names might be understood by docker’s dnsmasq as a docker container name to be resolved, which is not the case. In addition, all the VIM endpoints should also be FQDN or IP addresses, thus guaranteeing that all subsequent API calls can reach the appropriate endpoint. - -Think of an NFV infrastructure with tens of VIMs, first you will have to use different names for each controller (controller1, controller2, etc.), then you will have to add to every machine trying to interact with the different VIMs, not only OSM, all those entries in the /etc/hosts file. This is bad practice. +When adding a VIM to OSM, you must use always FQDN or the IP addresses. Non-FQDN names might be understood by Kubernetes as a container name to be resolved, which is not the case. In addition, all the VIM endpoints should also be FQDN or IP addresses, thus guaranteeing that all subsequent API calls can reach the appropriate endpoint. -However, it is useful to have a mean to work with lab environments using non-FQDN names. Three options here. Probably you are looking for the third one, but we recommend the first one: - -- Option 1. Change the admin URL and/or public URL of the endpoints to use an IP address or an FQDN. You might find this interesting if you want to bring your Openstack setup to production. -- Option 2. Modify `/etc/hosts` in the docker RO container. This is not persistent after reboots or restarts. -- Option 3a (for docker swarm). Modify `/etc/osm/docker/docker-compose.yaml` in the host, adding extra_hosts in the ro section with the entries that you want to add to `/etc/hosts` in the RO docker: -- Option 3b (for kubernetes). Modify `/etc/osm/docker/osm_pods/ro.yaml` in the host, adding extra_hosts in the ro section with the entries that you want to add to `/etc/hosts` in the RO docker: +### Issues when trying to access VM from OSM -With docker swarm, the modification of `/etc/osm/docker/docker-compose.yaml` would be: +**Is the VIM management network reachable from OSM (e.g. via ssh, port 22)?** -```yaml -ro: - extra_hosts: - controller: 1.2.3.4 -``` +The simplest check would consist on deploying a VM attached to the management network and trying to access it via e.g. ssh from the OSM host. -Then: +For instance, in the case of an OpenStack VIM you could try something like this: ```bash -docker stack rm osm -docker stack deploy -c /etc/osm/docker/docker-compose.yaml osm -``` - -With kubernetes, the procedure is very similar. The modification of `/etc/osm/docker/osm_pods/ro.yaml` would be: - -```yaml -... -spec: - ... - hostAliases: - - ip: "1.2.3.4" - hostnames: - - "controller" - ... +$ openstack server create --image ubuntu --flavor m1.small --nic mgmtnet test ``` -Then: - -```bash -kubectl -n osm apply -f /etc/osm/docker/osm_pods/ro.yaml -``` +If this does not work, typically it is due to one of these issues: -This is persistent after reboots and restarts. +- Security group policy in your VIM is blocking your traffic (contact your admin to fix it) +- IP address space in the management network is not routable from outside (or in the reverse direction, for the ACKs). -### VIM authentication +## How to report an issue -**What should I check if the VIM authentication is failing?** +**If you have bugs or issues to be reported, please use [Bugzilla](https://osm.etsi.org/bugzilla)** -Typically, you will get the following error message: +**If you have questions or feedback, feel free to contact us through:** -Error: `"VIM Exception vimconnUnexpectedResponse Unauthorized: The request you have made requieres authentication. (HTTP 401)"` +- **the mailing list [OSM_TECH@list.etsi.org](https://mail.google.com/mail/?view=cm&fs=1&tf=1&to=OSM_TECH@list.etsi.org)** +- **the [Slack work space](https://join.slack.com/t/opensourcemano/shared_invite/enQtMzQ3MzYzNTQ0NDIyLWVkNTE4ZjZjNWI0ZTQyN2VhOTI1MjViMzU1NWYwMWM3ODI4NTQyY2VlODA2ZjczMWIyYTFkZWNiZmFkM2M2ZDk)** -If your OpenStack URL is based on HTTPS, OSM will check by default the authenticity of your VIM using the appropriate public certificate. The recommended way to solve this is by modifying `/etc/osm/docker/docker-compose.yaml` in the host, sharing the host file (e.g. `/home/ubuntu/cafile.crt`) by adding a volume to the `ro` section as follows: +**Please be patient. Answers may take a few days.** -```yaml - ro: - ... - volumes: - - /home/ubuntu/cafile.crt:/etc/osm/cafile.crt -``` +------ -Then, when creating the VIM, you should use the config option `ca_cert` as follows: +Please provide some context to your questions. As an example, find below some guidelines: -```bash -$ # Create the VIM with all the usual options, and add the config option to specify the certificate -$ osm vim-create VIM-NAME ... --config '{ca_cert: /etc/osm/cafile.crt}' -``` +- In case of an installation issue: -For casual testing, when adding the VIM account to OSM, you can use `'insecure: True'` (without quotes) as part of the VIM config parameters: + - The full command used to run the installer and the full output of the installer (or at least enough context) might help on finding the solution. +- It is highly recommended to run the installer command capturing standard output and standard error, so that you can send them for analysis if needed. E.g.: ```bash -$ osm vim-create VIM-NAME ... --config '{insecure: True}' +./install_osm.sh 2>&1 | tee osm_install.log ``` -### Issues when trying to access VM from OSM - -**Is the VIM management network reachable from OSM (e.g. via ssh, port 22)?** - -The simplest check would consist on deploying a VM attached to the management network and trying to access it via e.g. ssh from the OSM host. - -For instance, in the case of an OpenStack VIM you could try something like this: - -```bash -$ openstack server create --image ubuntu --flavor m1.small --nic mgmtnet test -``` +- In case of operational issues, the following information might help: -If this does not work, typically it is due to one of these issues: + - Version of OSM that you are using +- Logs of the system. Check to know how to get them. + - Details on the actions you made to get that error so that we could reproduce it. + - IP network details in order to help troubleshooting potential network issues. For instance: + - Client IP address (browser, command line client, etc.) from where you are trying to access OSM + - IP address of the machine where OSM is running + - IP addresses of the containers + - NAT rules in the machine where OSM is running -- Security group policy in your VIM is blocking your traffic (contact your admin to fix it) -- IP address space in the management network is not routable from outside (or in the reverse direction, for the ACKs). +Common sense applies here, so you don't need to send everything, but just enough information to diagnose the issue and find a proper solution. -## Common issues with VCA/Juju +## (OLD) Common issues with VCA/Juju ### Juju status shows pending objects after deleting a NS @@ -456,64 +381,6 @@ You can make deployment of charms quicker by: - Preventing Juju from running `apt-get update && apt-get upgrade` when starting a machine: [Disable OS upgrades in charms](14-advanced-charm-development.md#disable-os-upgrades) - Building periodically a custom image that will be used as base image for all the charms: [Custom base image for charms](14-advanced-charm-development.md#build-a-custom-cloud-image) -## Common instantiation errors - -### File juju_id_rsa.pub not found - -- **ERROR**: `ERROR creating VCA model name 'xxxx': Traceback (most recent call last): File "/usr/lib/python3/dist-packages/osm_lcm/ns.py", line 822, in instantiate await ... [Errno 2] No such file or directory: '/root/.local/share/juju/ssh/juju_id_rsa.pub'` -- **CAUSE**: Normally a migration from release FIVE do not set properly the env for LCM -- **SOLUTION**: Ensure variable **OSMLCM_VCA_PUBKEY** is properly set at file `/etc/osm/docker/lcm.env`. The value must match with the output of this command `cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub`. If not, add or change it. Restart OSM, or just LCM service with `docker service update osm_lcm --force --env-add OSMLCM_VCA_PUBKEY=""` - -## Common issues when interacting with NBI - -### SSL certificate problem - -By default, OSM installer uses a self-signed certificate for HTTPS. That might lead to the error '_SSL certificate problem: self signed certificate_' on the client side. For testing environments, you might want to ignore this error just by using the appropriate options to skip certificate validation (e.g. `--insecure` for curl, `--no-check-certificate` for wget, etc.). However, for more stable setups you might prefer to address this issue by installing the appropriate certificate in your client system. - -These are the steps to install NBI certificate on the client side (tested for Ubuntu): - -1. Get the certificate file `cert.pem` by any of these means: - - From running docker container: - ```bash - docker ps | grep nbi - docker cp :/app/NBI/osm_nbi/http/cert.pem . - ``` - - From source code: NBI-folder/osm_nbi/http/cert.pem - - From ETSI's git: - ```bash - wget -O cert.pem "https://osm.etsi.org/gitweb/?p=osm/NBI.git;a=blob_plain;f=osm_nbi/http/cert.pem;hb=refs/heads/v8.0" - ``` -2. Then, you should install this certificate: - ```bash - sudo cp cert.pem /usr/local/share/ca-certificates/osm_nbi_cert.pem.crt - sudo update-ca-certificates - # 1 added, 0 removed; done - ``` -3. Add to the list of `/etc/hosts` a host called "nbi" with the IP address where OSM is running. - - It can be `localhost` if client and server are the same machine. - - For localhost, you would need to add (or edit) these lines: - ```text - 127.0.0.1 localhost nbi - OSM-ip nbi - ``` -4. Finally, for the URL, use the `nbi` as host name (i.e. ). - - Do not use neither `localhost` nor 127.0.0.1. - - You can run a quick test with `curl` by: - ```bash - curl https://nbi:9999/osm/version - ``` - -### Cannot login after migration to 6.0.2 - -- **ERROR**: NBI always return "UNAUTHORIZED". Cannot login neither with UI nor with CLI. CLI shows error "`can't find a default project for this user`" or "`project admin not allowed for this user`". -- **CAUSE**: Normally after a migration to release 6.0.2 There is a slight incompatibility with users created from older versions. -- **SOLUTION**: Delete user admin and reboot NBI so that a new compatible user is created by running these commands: - -```bash -curl --insecure https://localhost:9999/osm/test/db-clear/users -docker service update osm_nbi --force -``` - ## Other operational issues ### Running out of disk space @@ -524,155 +391,3 @@ If you are upgrading frequently your OSM installation, you might face that your docker system prune docker image prune ``` - -If you are still experiencing issues with disk space, logs in one of the dockers could be the cause of your issue. Check the containers that are consuming more space (typically kafka-exporter) - -```bash -du -sk /var/lib/docker/containers/* |sort -n -docker ps |grep -``` - -Then, remove the stack and redeploy it again after doing a prune: - -```bash -docker stack rm osm_metrics -docker system prune -docker image prune -docker stack deploy -c /etc/osm/docker/osm_metrics/docker-compose.yml osm_metrics -``` - -## Logs - -### Checking the logs of OSM in Kubernetes - -You can check the logs of any container with the following commands: - -```bash -kubectl -n osm logs deployment/mon --all-containers=true -kubectl -n osm logs deployment/pol --all-containers=true -kubectl -n osm logs deployment/lcm --all-containers=true -kubectl -n osm logs deployment/nbi --all-containers=true -kubectl -n osm logs deployment/ng-ui --all-containers=true -kubectl -n osm logs deployment/ro --all-containers=true -kubectl -n osm logs deployment/grafana --all-containers=true -kubectl -n osm logs deployment/keystone --all-containers=true -kubectl -n osm logs statefulset/mysql --all-containers=true -kubectl -n osm logs statefulset/mongo --all-containers=true -kubectl -n osm logs statefulset/kafka --all-containers=true -kubectl -n osm logs statefulset/zookeeper --all-containers=true -kubectl -n osm logs statefulset/prometheus --all-containers=true -``` - -For live debugging, the following commands can be useful to save the log output to a file and show it in the screen: - -```bash -kubectl -n osm logs -f deployment/mon --all-containers=true 2>&1 | tee mon-log.txt -kubectl -n osm logs -f deployment/pol --all-containers=true 2>&1 | tee pol-log.txt -kubectl -n osm logs -f deployment/lcm --all-containers=true 2>&1 | tee lcm-log.txt -kubectl -n osm logs -f deployment/nbi --all-containers=true 2>&1 | tee nbi-log.txt -kubectl -n osm logs -f deployment/ng-ui --all-containers=true 2>&1 | tee ng-log.txt -kubectl -n osm logs -f deployment/ro --all-containers=true 2>&1 | tee ro-log.txt -kubectl -n osm logs -f deployment/grafana --all-containers=true 2>&1 | tee grafana-log.txt -kubectl -n osm logs -f deployment/keystone --all-containers=true 2>&1 | tee keystone-log.txt -kubectl -n osm logs -f statefulset/mysql --all-containers=true 2>&1 | tee mysql-log.txt -kubectl -n osm logs -f statefulset/mongo --all-containers=true 2>&1 | tee mongo-log.txt -kubectl -n osm logs -f statefulset/kafka --all-containers=true 2>&1 | tee kafka-log.txt -kubectl -n osm logs -f statefulset/zookeeper --all-containers=true 2>&1 | tee zookeeper-log.txt -kubectl -n osm logs -f statefulset/prometheus --all-containers=true 2>&1 | tee prometheus-log.txt -``` - -### Changing the log level - -You can change the log level of any container, by updating the container with the right `LOG_LEVEL` env var. - -Log levels are: - -- ERROR -- WARNING -- INFO -- DEBUG - -For instance, to set the log level to INFO for the MON in a deployment of OSM over K8s: - -```bash -kubectl -n osm set env deployment mon OSMMON_GLOBAL_LOGLEVEL=INFO -``` - -For instance, to increase the log level to DEBUG for the NBI in a deployment of OSM over docker swarm: - -```bash -docker service update --env-add OSMNBI_LOG_LEVEL=DEBUG osm_nbi -``` - -## How to report an issue - -**If you have bugs or issues to be reported, please use [Bugzilla](https://osm.etsi.org/bugzilla)** - -**If you have questions or feedback, feel free to contact us through:** - -- **the mailing list [OSM_TECH@list.etsi.org](https://mail.google.com/mail/?view=cm&fs=1&tf=1&to=OSM_TECH@list.etsi.org)** -- **the [Slack work space](https://join.slack.com/t/opensourcemano/shared_invite/enQtMzQ3MzYzNTQ0NDIyLWVkNTE4ZjZjNWI0ZTQyN2VhOTI1MjViMzU1NWYwMWM3ODI4NTQyY2VlODA2ZjczMWIyYTFkZWNiZmFkM2M2ZDk)** - -**Please be patient. Answers may take a few days.** - ------- - -Please provide some context to your questions. As an example, find below some guidelines: - -- In case of an installation issue: - - - The full command used to run the installer and the full output of the installer (or at least enough context) might help on finding the solution. -- It is highly recommended to run the installer command capturing standard output and standard error, so that you can send them for analysis if needed. E.g.: - -```bash -./install_osm.sh 2>&1 | tee osm_install.log -``` - -- In case of operational issues, the following information might help: - - - Version of OSM that you are using -- Logs of the system. Check to know how to get them. - - Details on the actions you made to get that error so that we could reproduce it. - - IP network details in order to help troubleshooting potential network issues. For instance: - - Client IP address (browser, command line client, etc.) from where you are trying to access OSM - - IP address of the machine where OSM is running - - IP addresses of the containers - - NAT rules in the machine where OSM is running - -Common sense applies here, so you don't need to send everything, but just enough information to diagnose the issue and find a proper solution. - -## How to troubleshoot issues in the new Service Assurance architecture - -Since OSM Release FOURTEEN, the Service Assurance architecture is based on Apache Airflow and Prometheus. The Airflow DAGs, in addition to periodically collecting metrics from VIMs and storing them into Prometheus, implement auto-scaling and auto-healing closed-loop operations which are triggered by Prometheus alerts. These alerts are managed by AlertManager and forwarded to Webhook Translator, which re-formats them to adapt to Airflow expected webhook endpoints. So the alert workflow is this: `DAGs collect metrics => Prometheus => AlertManager => Webhook Translator => Alarm driven DAG` - -In case of any kind of error related to monitoring, the first thing to check should be the metrics stored in Prometheus. Its graphical interface can be visited at the URL . Some useful metrics to review are the following: - -- `ns_topology`: metric generated by a DAG with the current topology (VNFs and NSs) of instantiated VDUs in OSM. -- `vm_status`: status (1: ok, 0: error) of the VMs in the VIMs registered in OSM. -- `vm_status_extended`: metric enriched from the two previous ones, so it includes data about VNF and NS the VM belongs to as part of the metric labels. -- `osm_*`: resource consumption metrics. Only intantiated VNFs that include monitoring parameters have these kind of metrics in Prometheus. - -In case you need to debug closed-loop operations you will also need to check the Prometheus alerts here . On this page you can see the alerting rules and their status: inactive, pending or active. When a alert is fired (its status changes from pending to active) or is marked as resolved (from active to inactive), the appropriate DAG is run on Airflow. There are three types of alerting rules: - -- `vdu_down`: this alert is fired when a VDU remains in a not OK state for several minutes and triggers `alert_vdu` DAG. Its labels include information about NS, VNF, VIM, etc. -- `scalein_*`: these rules manage scale-in operations based on the resource consumption metrics and the number of VDU instances. They trigger `scalein_vdu` DAG. -- `scaleout_*`: these rules manage scale-out operations based on the resource consumption metrics and the number of VDU instances. They trigger `scaleout_vdu` DAG. - -Finally, it is also interesting for debugging to be able to view the logs of the execution of the DAGs. To do this, you must visit the Airflow website, which will be accessible on the port pointed by the `airflow-webserver` service in OSM's cluster (not a fixed port): - -```bash -kubectl -n osm get svc airflow-webserver -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -airflow-webserver NodePort 10.100.57.168 8080:19371/TCP 12d -``` - -When you open the URL (`19371` in the example above) in a browser, you will be prompted for the user and password (`admin`/`admin` by default). After that you will see the dashboard with the list of DAGs: - -- `alert_vdu`: it is executed when a VDU down alarm is fired or resolved. -- `scalein_vdu`, `scaleout_vdu`: executed when auto-scaling conditions in a VNF are met. -- `ns_topology`: this DAG is executed periodically for updating the topology metric in Prometheus of the instantiated NS. -- `vim_status_*`: there is one such DAG for each VIM in OSM. It checks VIM's reachability every few minutes. -- `vm_status_vim_*`: these DAGs (one per VIM) get VM status from VIM and store them in Prometheus. -- `vm_metrics_vim_*`: these DAGs (one per VIM) store in Prometheus resource consumption metrics from VIM. - -The logs of the executions can be accessed by clicking on the corresponding DAG in dashboard and then selecting the required date and time in the grid. Each DAG has a set of tasks, and each task has its own logs. -- GitLab From f91f917ace29793187cb158b0012832f02de31fd Mon Sep 17 00:00:00 2001 From: garciadeblas Date: Thu, 13 Feb 2025 23:35:37 +0100 Subject: [PATCH 3/4] Rename files to update numeration Signed-off-by: garciadeblas --- ...nt.md => 06-advanced-cluster-management.md | 0 ...ion.md => 07-osm-platform-configuration.md | 0 ...to-read-next.md => 08-what-to-read-next.md | 0 ...docs.md => 09-how-to-contribute-to-docs.md | 0 ...roubleshooting.md => 10-troubleshooting.md | 0 ....md => 11-osm-client-commands-reference.md | 0 11-osm-im.md => 12-osm-im.md | 0 12-osm-nbi.md => 13-osm-nbi.md | 0 ...tallation.md => 14-openvim-installation.md | 0 ...on.md => 17-tacacs-based-authentication.md | 0 19-lts-upgrade.md => 18-lts-upgrade.md | 0 20-tutorial.md => 19-tutorial.md | 0 21-reference.md => 20-reference.md | 0 index.md | 26 +++++++++--------- index.rst | 26 +++++++++--------- navigation.md | 27 +++++++++---------- 16 files changed, 39 insertions(+), 40 deletions(-) rename advanced-cluster-management.md => 06-advanced-cluster-management.md (100%) rename 06-osm-platform-configuration.md => 07-osm-platform-configuration.md (100%) rename 07-what-to-read-next.md => 08-what-to-read-next.md (100%) rename 08-how-to-contribute-to-docs.md => 09-how-to-contribute-to-docs.md (100%) rename 09-troubleshooting.md => 10-troubleshooting.md (100%) rename 10-osm-client-commands-reference.md => 11-osm-client-commands-reference.md (100%) rename 11-osm-im.md => 12-osm-im.md (100%) rename 12-osm-nbi.md => 13-osm-nbi.md (100%) rename 13-openvim-installation.md => 14-openvim-installation.md (100%) rename 18-tacacs-based-authentication.md => 17-tacacs-based-authentication.md (100%) rename 19-lts-upgrade.md => 18-lts-upgrade.md (100%) rename 20-tutorial.md => 19-tutorial.md (100%) rename 21-reference.md => 20-reference.md (100%) diff --git a/advanced-cluster-management.md b/06-advanced-cluster-management.md similarity index 100% rename from advanced-cluster-management.md rename to 06-advanced-cluster-management.md diff --git a/06-osm-platform-configuration.md b/07-osm-platform-configuration.md similarity index 100% rename from 06-osm-platform-configuration.md rename to 07-osm-platform-configuration.md diff --git a/07-what-to-read-next.md b/08-what-to-read-next.md similarity index 100% rename from 07-what-to-read-next.md rename to 08-what-to-read-next.md diff --git a/08-how-to-contribute-to-docs.md b/09-how-to-contribute-to-docs.md similarity index 100% rename from 08-how-to-contribute-to-docs.md rename to 09-how-to-contribute-to-docs.md diff --git a/09-troubleshooting.md b/10-troubleshooting.md similarity index 100% rename from 09-troubleshooting.md rename to 10-troubleshooting.md diff --git a/10-osm-client-commands-reference.md b/11-osm-client-commands-reference.md similarity index 100% rename from 10-osm-client-commands-reference.md rename to 11-osm-client-commands-reference.md diff --git a/11-osm-im.md b/12-osm-im.md similarity index 100% rename from 11-osm-im.md rename to 12-osm-im.md diff --git a/12-osm-nbi.md b/13-osm-nbi.md similarity index 100% rename from 12-osm-nbi.md rename to 13-osm-nbi.md diff --git a/13-openvim-installation.md b/14-openvim-installation.md similarity index 100% rename from 13-openvim-installation.md rename to 14-openvim-installation.md diff --git a/18-tacacs-based-authentication.md b/17-tacacs-based-authentication.md similarity index 100% rename from 18-tacacs-based-authentication.md rename to 17-tacacs-based-authentication.md diff --git a/19-lts-upgrade.md b/18-lts-upgrade.md similarity index 100% rename from 19-lts-upgrade.md rename to 18-lts-upgrade.md diff --git a/20-tutorial.md b/19-tutorial.md similarity index 100% rename from 20-tutorial.md rename to 19-tutorial.md diff --git a/21-reference.md b/20-reference.md similarity index 100% rename from 21-reference.md rename to 20-reference.md diff --git a/index.md b/index.md index 0268f87..b3941e2 100644 --- a/index.md +++ b/index.md @@ -10,23 +10,23 @@ - How to setup a VIM to use it from OSM 5. [OSM Usage](05-osm-usage.md) - Learn how to run common OSM operations -5. [Advanced cluster management](advanced-cluster-management.md) +6. [Advanced cluster management](06-advanced-cluster-management.md) - Learn how to run new OSM operations for advanced cluster management -6. [OSM platform configuration](06-osm-platform-configuration.md) +7. [OSM platform configuration](07-osm-platform-configuration.md) - Setting up your OSM -7. [What to read next](07-what-to-read-next.md) +8. [What to read next](08-what-to-read-next.md) - Where can I learn more? Some useful links -8. [How to contribute to documentation](08-how-to-contribute-to-docs.md) +9. [How to contribute to documentation](09-how-to-contribute-to-docs.md) - How can I help to improve this documentation? -9. [ANNEX 1: Troubleshooting](09-troubleshooting.md) -10. [ANNEX 2: Reference of OSM Client commands and library](10-osm-client-commands-reference.md) -11. [ANNEX 3: Reference of OSM's Information Model](11-osm-im.md) -12. [ANNEX 4: Reference of OSM's Northbound Interface](12-osm-nbi.md) -13. [ANNEX 5: OpenVIM installation](13-openvim-installation.md) +10. [ANNEX 1: Troubleshooting](10-troubleshooting.md) +11. [ANNEX 2: Reference of OSM Client commands and library](11-osm-client-commands-reference.md) +12. [ANNEX 3: Reference of OSM's Information Model](12-osm-im.md) +13. [ANNEX 4: Reference of OSM's Northbound Interface](13-osm-nbi.md) +14. [ANNEX 5: OpenVIM installation](14-openvim-installation.md) 15. [ANNEX 6: Kubernetes installation and requirements](15-k8s-installation.md) 16. [ANNEX 7: Setting up an LXD Cluster](16-lxd-cluster.md) -17. [ANNEX 8: TACACS Based Authentication Support In OSM](18-tacacs-based-authentication.md) -18. [ANNEX 9: LTS Upgrade](19-lts-upgrade.md) -19. [ANNEX 10: Tutorial - Charmed OSM installation and deployment of a CNF](20-tutorial.md) -20. [OSM reference](21-reference.md) +17. [ANNEX 8: TACACS Based Authentication Support In OSM](17-tacacs-based-authentication.md) +18. [ANNEX 9: LTS Upgrade](18-lts-upgrade.md) +19. [ANNEX 10: Tutorial - Charmed OSM installation and deployment of a CNF](19-tutorial.md) +20. [OSM reference](20-reference.md) diff --git a/index.rst b/index.rst index 167a893..e33013d 100644 --- a/index.rst +++ b/index.rst @@ -17,19 +17,19 @@ Welcome to Open Source MANO's documentation! 03-installing-osm 04-vim-setup 05-osm-usage - advanced-cluster-management - 06-osm-platform-configuration - 07-what-to-read-next - 08-how-to-contribute-to-docs - 09-troubleshooting - 10-osm-client-commands-reference - 11-osm-im - 12-osm-nbi - 13-openvim-installation + 06-advanced-cluster-management + 07-osm-platform-configuration + 08-what-to-read-next + 09-how-to-contribute-to-docs + 10-troubleshooting + 11-osm-client-commands-reference + 12-osm-im + 13-osm-nbi + 14-openvim-installation 15-k8s-installation.md 16-lxd-cluster.md - 18-tacacs-based-authentication.md - 19-lts-upgrade.md - 20-tutorial.md - 21-reference.md + 17-tacacs-based-authentication.md + 18-lts-upgrade.md + 19-tutorial.md + 20-reference.md diff --git a/navigation.md b/navigation.md index 9f39d29..62a54b3 100644 --- a/navigation.md +++ b/navigation.md @@ -7,19 +7,18 @@ [Installing OSM](03-installing-osm.md) [VIM(s) setup](04-vim-setup.md) [OSM Usage](05-osm-usage.md) -[Advanced cluster management](advanced-cluster-management.md) -[OSM platform configuration](06-osm-platform-configuration.md) -[What to read next](07-what-to-read-next.md) -[How to contribute to documentation](08-how-to-contribute-to-docs.md) -[ANNEX 1: Troubleshooting](09-troubleshooting.md) -[ANNEX 2: Reference of OSM Client commands and library](10-osm-client-commands-reference.md) -[ANNEX 3: Reference of OSM's Information Model](11-osm-im.md) -[ANNEX 4: Reference of OSM's Northbound Interface](12-osm-nbi.md) -[ANNEX 5: OpenVIM installation](13-openvim-installation.md) -[ANNEX 6: Tests to validate VIM capabilities from OSM](14-tests-for-vim-validation.md) +[Advanced cluster management](06-advanced-cluster-management.md) +[OSM platform configuration](07-osm-platform-configuration.md) +[What to read next](08-what-to-read-next.md) +[How to contribute to documentation](09-how-to-contribute-to-docs.md) +[ANNEX 1: Troubleshooting](10-troubleshooting.md) +[ANNEX 2: Reference of OSM Client commands and library](11-osm-client-commands-reference.md) +[ANNEX 3: Reference of OSM's Information Model](12-osm-im.md) +[ANNEX 4: Reference of OSM's Northbound Interface](13-osm-nbi.md) +[ANNEX 5: OpenVIM installation](14-openvim-installation.md) [ANNEX 6: Kubernetes installation and requirements](15-k8s-installation.md) [ANNEX 7: Setting up an LXD Cluster](16-lxd-cluster.md) -[ANNEX 8: TACACS Based Authentication Support In OSM](18-tacacs-based-authentication.md) -[ANNEX 9: LTS Upgrade](19-lts-upgrade.md) -[ANNEX 10: Tutorial - Charmed OSM installation and deployment of a CNF](20-tutorial.md) -[OSM reference](21-reference.md) +[ANNEX 8: TACACS Based Authentication Support In OSM](17-tacacs-based-authentication.md) +[ANNEX 9: LTS Upgrade](18-lts-upgrade.md) +[ANNEX 10: Tutorial - Charmed OSM installation and deployment of a CNF](19-tutorial.md) +[OSM reference](20-reference.md) -- GitLab From 9a768e105b65c7ff72b18b97774192363384ddea Mon Sep 17 00:00:00 2001 From: garciadeblas Date: Thu, 13 Feb 2025 23:41:27 +0100 Subject: [PATCH 4/4] Update references after renaming files to update numeration Signed-off-by: garciadeblas --- 01-quickstart.md | 2 +- 02-osm-architecture-and-functions.md | 4 ++-- 03-installing-osm.md | 2 +- 04-vim-setup.md | 6 +++--- 05-osm-usage.md | 4 ++-- 07-osm-platform-configuration.md | 2 +- 20-reference.md | 8 ++++---- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/01-quickstart.md b/01-quickstart.md index ac8b411..8427c08 100644 --- a/01-quickstart.md +++ b/01-quickstart.md @@ -135,7 +135,7 @@ Before proceeding, make sure that you have a site with a VIM configured to run w - **Google Cloud Platform (GCP)** Check the following link to learn how to configure Google Cloud Platform to be used by OSM: [Configuring Google Cloud Platform for OSM](04-vim-setup.md#google-cloud-platform) - **Amazon Web Services (AWS).** Check the following link to learn how to configure AWS (EC2 and Virtual Private Cloud) to be used by OSM: [Configuring AWS for OSM](04-vim-setup.md#amazon-web-services-aws) - **VMware vCloud Director.** Check the following link to learn how to configure VMware VCD to be used by OSM: [Configuring VMware vCloud Director](04-vim-setup.md#vmwares-vcloud-director) -- **OpenVIM.** Check the following link to know how to install and use openvim for OSM: [OpenVIM installation](13-openvim-installation.md). OpenVIM must run in 'normal' mode (not test or fake) to have real virtual machines reachable from OSM. +- **OpenVIM.** Check the following link to know how to install and use openvim for OSM: [OpenVIM installation](14-openvim-installation.md). OpenVIM must run in 'normal' mode (not test or fake) to have real virtual machines reachable from OSM. OSM can manage external SDN controllers to perform the dataplane underlay network connectivity on behalf of the VIM. See [EPA and SDN assist](04-vim-setup.md#advanced-setups-for-high-io-performance-epa-and-sdn-assist) diff --git a/02-osm-architecture-and-functions.md b/02-osm-architecture-and-functions.md index 7ca5c83..b747846 100644 --- a/02-osm-architecture-and-functions.md +++ b/02-osm-architecture-and-functions.md @@ -1,7 +1,7 @@ # OSM Architecture and Functions -[OSM-IM-PAGE]: 11-osm-im.md -[OSM-NBI-PAGE]: 12-osm-nbi.md +[OSM-IM-PAGE]: 12-osm-im.md +[OSM-NBI-PAGE]: 13-osm-nbi.md The goal of ETSI OSM (Open Source MANO) is the development of a community-driven production-quality **E2E Network Service Orchestrator (E2E NSO)** for telco services, capable of modelling and automating real telco-grade services, with all the intrinsic complexity of production environments. OSM provides a way to accelerate maturation of NFV technologies and standards, enable a broad ecosystem of VNF vendors, and test and validate the joint interaction of the orchestrator with the other components it has to interact with: commercial NFV infrastructures (NFVI+VIM) and Network Functions (either VNFs, PNFs or Hybrid ones). diff --git a/03-installing-osm.md b/03-installing-osm.md index 4787e1b..5524fb6 100644 --- a/03-installing-osm.md +++ b/03-installing-osm.md @@ -286,7 +286,7 @@ export OSM_HOSTNAME=$(kubectl get -n osm -o jsonpath="{.spec.rules[0].host}" ing echo "OSM_HOSTNAME (for osm client): $OSM_HOSTNAME" ``` -For additional options, see `osm --help` for more info, and check our OSM client reference guide [here](10-osm-client-commands-reference.md) +For additional options, see `osm --help` for more info, and check our OSM client reference guide [here](11-osm-client-commands-reference.md) ## Reference. Helm-based OSM installation diff --git a/04-vim-setup.md b/04-vim-setup.md index 33d30dd..6d717f2 100644 --- a/04-vim-setup.md +++ b/04-vim-setup.md @@ -117,7 +117,7 @@ For common options, you may refer to the general OpenStack Setup Guide. ### How to set up an OpenVIM environment -A full step-by step guide for installing an OpenVIM environment from scratch can be found in [a specific chapter](13-openvim-installation.md). +A full step-by step guide for installing an OpenVIM environment from scratch can be found in [a specific chapter](14-openvim-installation.md). ### How to add OpenVIM as VIM target in OSM @@ -464,13 +464,13 @@ Detailed documentation is available at and `: Currently it can be any string. - ``: It is the NS ID got after instantiation of network service. -Please note that a token should be obtained first in order to query a metric. More information on this can be found in the [OSM NBI Documentation](12-osm-nbi.md) +Please note that a token should be obtained first in order to query a metric. More information on this can be found in the [OSM NBI Documentation](13-osm-nbi.md) In response, you would get a list of the available VNF metrics, for example: diff --git a/07-osm-platform-configuration.md b/07-osm-platform-configuration.md index a87cf48..2f2e87d 100644 --- a/07-osm-platform-configuration.md +++ b/07-osm-platform-configuration.md @@ -1078,4 +1078,4 @@ Audit logs include the following event key names, ### Additional Notes -All the audit log events are captured as part of the NBI logs. For more information about how to check NBI logs, you can refer to [ANNEX 1: Troubleshooting](09-troubleshooting.md) +All the audit log events are captured as part of the NBI logs. For more information about how to check NBI logs, you can refer to [ANNEX 1: Troubleshooting](10-troubleshooting.md) diff --git a/20-reference.md b/20-reference.md index fb700cc..b92ecba 100644 --- a/20-reference.md +++ b/20-reference.md @@ -180,13 +180,13 @@ CPU thread pinning policy describes how to place the guest CPUs when the host su - EPA CPU Quota -CPU quota describes the CPU resource allocation policy. Limit and Reserve values are defined in MHz. Please see the [Quota Parameters](https://osm.etsi.org/docs/user-guide/latest/21-reference.html#Quota-Parameters) section for quota details. +CPU quota describes the CPU resource allocation policy. Limit and Reserve values are defined in MHz. Please see the [Quota Parameters](https://osm.etsi.org/docs/user-guide/latest/20-reference.html#Quota-Parameters) section for quota details. ### Virtual Interface EPA VIF Quota -Virtual interfaces quota describes the virtual interface bandwidth resource allocation policy. Limit and Reserve values are defined in Mbps. Please see the [Quota Parameters](https://osm.etsi.org/docs/user-guide/latest/21-reference.html#Quota-Parameters) section for quota details. +Virtual interfaces quota describes the virtual interface bandwidth resource allocation policy. Limit and Reserve values are defined in Mbps. Please see the [Quota Parameters](https://osm.etsi.org/docs/user-guide/latest/20-reference.html#Quota-Parameters) section for quota details. ### Virtual Memory @@ -237,13 +237,13 @@ Memory page allocation size. If a VM requires hugepages, it should choose LARGE Memory Quota -Memory quota describes the memory resource allocation policy. Limit and Reserve values are defined in MB. Please see the [Quota Parameters](https://osm.etsi.org/docs/user-guide/latest/21-reference.html#Quota-Parameters) for quota details. +Memory quota describes the memory resource allocation policy. Limit and Reserve values are defined in MB. Please see the [Quota Parameters](https://osm.etsi.org/docs/user-guide/latest/20-reference.html#Quota-Parameters) for quota details. ### Virtual Storage Disk IO Quota -Disk IO quota describes the disk IO operations resource allocation policy. Limit and Reserve values are defined in IOPS. Please see the [Quota Parameters](https://osm.etsi.org/docs/user-guide/latest/21-reference.html#Quota-Parameters) section for quota details. +Disk IO quota describes the disk IO operations resource allocation policy. Limit and Reserve values are defined in IOPS. Please see the [Quota Parameters](https://osm.etsi.org/docs/user-guide/latest/20-reference.html#Quota-Parameters) section for quota details. ### Quota Parameters -- GitLab