FailedConsole Output

Skipping 126 KB.. Full Log
[dryrun] Attached patch:
	{"metadata":{"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/containerd/containerd.sock"}}}
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node osmtest202401211214 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node osmtest202401211214 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[dryrun] Would perform action GET on resource "nodes" in API group "core/v1"
[dryrun] Resource name: "osmtest202401211214"
[dryrun] Would perform action PATCH on resource "nodes" in API group "core/v1"
[dryrun] Resource name: "osmtest202401211214"
[dryrun] Attached patch:
	{"metadata":{"labels":{"node-role.kubernetes.io/control-plane":"","node.kubernetes.io/exclude-from-external-load-balancers":""}},"spec":{"taints":[{"effect":"NoSchedule","key":"node-role.kubernetes.io/control-plane"}]}}
[bootstrap-token] Using token: 7lrtzk.dn1o3i6p30f7rbbd
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[dryrun] Would perform action GET on resource "secrets" in API group "core/v1"
[dryrun] Resource name: "bootstrap-token-7lrtzk"
[dryrun] Would perform action CREATE on resource "secrets" in API group "core/v1"
[dryrun] Attached object:
	apiVersion: v1
	data:
	  auth-extra-groups: c3lzdGVtOmJvb3RzdHJhcHBlcnM6a3ViZWFkbTpkZWZhdWx0LW5vZGUtdG9rZW4=
	  expiration: MjAyNC0wMS0yMlQxMjoxODozOVo=
	  token-id: N2xydHpr
	  token-secret: ZG4xbzNpNnAzMGY3cmJiZA==
	  usage-bootstrap-authentication: dHJ1ZQ==
	  usage-bootstrap-signing: dHJ1ZQ==
	kind: Secret
	metadata:
	  creationTimestamp: null
	  name: bootstrap-token-7lrtzk
	  namespace: kube-system
	type: bootstrap.kubernetes.io/token
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[dryrun] Would perform action CREATE on resource "clusterroles" in API group "rbac.authorization.k8s.io/v1"
[dryrun] Attached object:
	apiVersion: rbac.authorization.k8s.io/v1
	kind: ClusterRole
	metadata:
	  creationTimestamp: null
	  name: kubeadm:get-nodes
	  namespace: kube-system
	rules:
	- apiGroups:
	  - ""
	  resources:
	  - nodes
	  verbs:
	  - get
[dryrun] Would perform action CREATE on resource "clusterrolebindings" in API group "rbac.authorization.k8s.io/v1"
[dryrun] Attached object:
	apiVersion: rbac.authorization.k8s.io/v1
	kind: ClusterRoleBinding
	metadata:
	  creationTimestamp: null
	  name: kubeadm:get-nodes
	  namespace: kube-system
	roleRef:
	  apiGroup: rbac.authorization.k8s.io
	  kind: ClusterRole
	  name: kubeadm:get-nodes
	subjects:
	- kind: Group
	  name: system:bootstrappers:kubeadm:default-node-token
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[dryrun] Would perform action CREATE on resource "clusterrolebindings" in API group "rbac.authorization.k8s.io/v1"
[dryrun] Attached object:
	apiVersion: rbac.authorization.k8s.io/v1
	kind: ClusterRoleBinding
	metadata:
	  creationTimestamp: null
	  name: kubeadm:kubelet-bootstrap
	roleRef:
	  apiGroup: rbac.authorization.k8s.io
	  kind: ClusterRole
	  name: system:node-bootstrapper
	subjects:
	- kind: Group
	  name: system:bootstrappers:kubeadm:default-node-token
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[dryrun] Would perform action CREATE on resource "clusterrolebindings" in API group "rbac.authorization.k8s.io/v1"
[dryrun] Attached object:
	apiVersion: rbac.authorization.k8s.io/v1
	kind: ClusterRoleBinding
	metadata:
	  creationTimestamp: null
	  name: kubeadm:node-autoapprove-bootstrap
	roleRef:
	  apiGroup: rbac.authorization.k8s.io
	  kind: ClusterRole
	  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
	subjects:
	- kind: Group
	  name: system:bootstrappers:kubeadm:default-node-token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[dryrun] Would perform action CREATE on resource "clusterrolebindings" in API group "rbac.authorization.k8s.io/v1"
[dryrun] Attached object:
	apiVersion: rbac.authorization.k8s.io/v1
	kind: ClusterRoleBinding
	metadata:
	  creationTimestamp: null
	  name: kubeadm:node-autoapprove-certificate-rotation
	roleRef:
	  apiGroup: rbac.authorization.k8s.io
	  kind: ClusterRole
	  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
	subjects:
	- kind: Group
	  name: system:nodes
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[dryrun] Would perform action CREATE on resource "configmaps" in API group "core/v1"
[dryrun] Attached object:
	apiVersion: v1
	data:
	  kubeconfig: |
	    apiVersion: v1
	    clusters:
	    - cluster:
	        certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJME1ERXlNVEV5TVRnek5sb1hEVE0wTURFeE9ERXlNVGd6Tmxvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTldrClhmdGN2TllzSk9BclRwazNYZ1VBSkh2S1NwL3kwd1VSSUEwdGVsb3B0MjJrSjIzanRYSmNBSVAvbXBRODRWOGIKT1lZN01GM3d1WHRpZUlWeTl3bThLem9MSHZOeVVYZnhkT0RKRVRSUFlYb2NPd2Jud2xYYzJBVnFLajdDQTFPZQpVb2Z1clc5UzRXUW1BaGFhMHJMUWNIVjNKMXM0TWowRzNscDRWeEQ5QVNJZENSSlVzSVEycUlLUW1RMXd3RlFSCjR4T0J2U09Qb3dKSXFLREtZMGRDamlKKzlxTStFVTBqdDZnc21TWUNyWThrSzVEbUtPWldBcCtYQ2pveG11TG8KWUJWTGgwWEQ5NlBZWVdETzd3SHpqVEpVRUIrK3lYRmY3Qm1zVW1YU1dFT1BUbkNYK0VYTWNQMStZNjg1azJrMwpBTUJXNVpocTBtSThGQlNUTXpVQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZJemMzQytTL2g0ckVvVzNoY0RibThNWDdUcXFNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBSCtJWUpKQzNGenFjRzJCZmg0QwprU0ZRa0k3OUFqQUhDaktZcDR0V2hQTzdpTTBKUkYzNFFYejJ3UFhsNE1MRVRoS05jQXBFWGJXMmJxTUxCMVBuCkRmY3RNNVk1elZFNVUyS2Y3OUp0RUFkayt2OHZSMHkrVzQxQjFMWjNUWDdqUWFBWXUzVVB0cUNzbVJja21xZDgKaHBCZGRTZGhXYkQ1aTJkV25PYUFDd3Jhd0ZPVHVFL2J4SS9FQ0dkTnRYZ1Q2TkNrdGZUYXdEdEV1S1BhSGJmdQo3N0xiOXVhUGlvTDNZN1lHNTAyby83S0E2Ujc2NVh3TkpFZ3hhK0NTc2dOVXljcTNiZGFCWlZTRHM2RGFuRGhaCmx1ZHlxbHF1U1UxbmtWV21yNk1sV3Jpc3JyaTBVbllnTHNvbXRRS1JSclN3cWRac1BBRTNoWU00d0FwNkNVM3kKOFI0PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
	        server: https://172.21.23.11:6443
	      name: ""
	    contexts: null
	    current-context: ""
	    kind: Config
	    preferences: {}
	    users: null
	kind: ConfigMap
	metadata:
	  creationTimestamp: null
	  name: cluster-info
	  namespace: kube-public
[dryrun] Would perform action CREATE on resource "roles" in API group "rbac.authorization.k8s.io/v1"
[dryrun] Attached object:
	apiVersion: rbac.authorization.k8s.io/v1
	kind: Role
	metadata:
	  creationTimestamp: null
	  name: kubeadm:bootstrap-signer-clusterinfo
	  namespace: kube-public
	rules:
	- apiGroups:
	  - ""
	  resourceNames:
	  - cluster-info
	  resources:
	  - configmaps
	  verbs:
	  - get
[dryrun] Would perform action CREATE on resource "rolebindings" in API group "rbac.authorization.k8s.io/v1"
[dryrun] Attached object:
	apiVersion: rbac.authorization.k8s.io/v1
	kind: RoleBinding
	metadata:
	  creationTimestamp: null
	  name: kubeadm:bootstrap-signer-clusterinfo
	  namespace: kube-public
	roleRef:
	  apiGroup: rbac.authorization.k8s.io
	  kind: Role
	  name: kubeadm:bootstrap-signer-clusterinfo
	subjects:
	- kind: User
	  name: system:anonymous
[dryrun] Would perform action LIST on resource "deployments" in API group "apps/v1"
[dryrun] Would perform action GET on resource "configmaps" in API group "core/v1"
[dryrun] Resource name: "coredns"
[dryrun] Would perform action CREATE on resource "configmaps" in API group "core/v1"
[dryrun] Attached object:
	apiVersion: v1
	data:
	  Corefile: |
	    .:53 {
	        errors
	        health {
	           lameduck 5s
	        }
	        ready
	        kubernetes cluster.local in-addr.arpa ip6.arpa {
	           pods insecure
	           fallthrough in-addr.arpa ip6.arpa
	           ttl 30
	        }
	        prometheus :9153
	        forward . /etc/resolv.conf {
	           max_concurrent 1000
	        }
	        cache 30
	        loop
	        reload
	        loadbalance
	    }
	kind: ConfigMap
	metadata:
	  creationTimestamp: null
	  name: coredns
	  namespace: kube-system
[dryrun] Would perform action CREATE on resource "clusterroles" in API group "rbac.authorization.k8s.io/v1"
[dryrun] Attached object:
	apiVersion: rbac.authorization.k8s.io/v1
	kind: ClusterRole
	metadata:
	  creationTimestamp: null
	  name: system:coredns
	rules:
	- apiGroups:
	  - ""
	  resources:
	  - endpoints
	  - services
	  - pods
	  - namespaces
	  verbs:
	  - list
	  - watch
	- apiGroups:
	  - ""
	  resources:
	  - nodes
	  verbs:
	  - get
	- apiGroups:
	  - discovery.k8s.io
	  resources:
	  - endpointslices
	  verbs:
	  - list
	  - watch
[dryrun] Would perform action CREATE on resource "clusterrolebindings" in API group "rbac.authorization.k8s.io/v1"
[dryrun] Attached object:
	apiVersion: rbac.authorization.k8s.io/v1
	kind: ClusterRoleBinding
	metadata:
	  creationTimestamp: null
	  name: system:coredns
	roleRef:
	  apiGroup: rbac.authorization.k8s.io
	  kind: ClusterRole
	  name: system:coredns
	subjects:
	- kind: ServiceAccount
	  name: coredns
	  namespace: kube-system
[dryrun] Would perform action CREATE on resource "serviceaccounts" in API group "core/v1"
[dryrun] Attached object:
	apiVersion: v1
	kind: ServiceAccount
	metadata:
	  creationTimestamp: null
	  name: coredns
	  namespace: kube-system
[dryrun] Would perform action CREATE on resource "deployments" in API group "apps/v1"
[dryrun] Attached object:
	apiVersion: apps/v1
	kind: Deployment
	metadata:
	  creationTimestamp: null
	  labels:
	    k8s-app: kube-dns
	  name: coredns
	  namespace: kube-system
	spec:
	  replicas: 2
	  selector:
	    matchLabels:
	      k8s-app: kube-dns
	  strategy:
	    rollingUpdate:
	      maxUnavailable: 1
	    type: RollingUpdate
	  template:
	    metadata:
	      creationTimestamp: null
	      labels:
	        k8s-app: kube-dns
	    spec:
	      affinity:
	        podAntiAffinity:
	          preferredDuringSchedulingIgnoredDuringExecution:
	          - podAffinityTerm:
	              labelSelector:
	                matchExpressions:
	                - key: k8s-app
	                  operator: In
	                  values:
	                  - kube-dns
	              topologyKey: kubernetes.io/hostname
	            weight: 100
	      containers:
	      - args:
	        - -conf
	        - /etc/coredns/Corefile
	        image: registry.k8s.io/coredns/coredns:v1.9.3
	        imagePullPolicy: IfNotPresent
	        livenessProbe:
	          failureThreshold: 5
	          httpGet:
	            path: /health
	            port: 8080
	            scheme: HTTP
	          initialDelaySeconds: 60
	          successThreshold: 1
	          timeoutSeconds: 5
	        name: coredns
	        ports:
	        - containerPort: 53
	          name: dns
	          protocol: UDP
	        - containerPort: 53
	          name: dns-tcp
	          protocol: TCP
	        - containerPort: 9153
	          name: metrics
	          protocol: TCP
	        readinessProbe:
	          httpGet:
	            path: /ready
	            port: 8181
	            scheme: HTTP
	        resources:
	          limits:
	            memory: 170Mi
	          requests:
	            cpu: 100m
	            memory: 70Mi
	        securityContext:
	          allowPrivilegeEscalation: false
	          capabilities:
	            add:
	            - NET_BIND_SERVICE
	            drop:
	            - all
	          readOnlyRootFilesystem: true
	        volumeMounts:
	        - mountPath: /etc/coredns
	          name: config-volume
	          readOnly: true
	      dnsPolicy: Default
	      nodeSelector:
	        kubernetes.io/os: linux
	      priorityClassName: system-cluster-critical
	      serviceAccountName: coredns
	      tolerations:
	      - key: CriticalAddonsOnly
	        operator: Exists
	      - effect: NoSchedule
	        key: node-role.kubernetes.io/control-plane
	      volumes:
	      - configMap:
	          items:
	          - key: Corefile
	            path: Corefile
	          name: coredns
	        name: config-volume
	status: {}
[dryrun] Would perform action CREATE on resource "services" in API group "core/v1"
[dryrun] Attached object:
	apiVersion: v1
	kind: Service
	metadata:
	  annotations:
	    prometheus.io/port: "9153"
	    prometheus.io/scrape: "true"
	  creationTimestamp: null
	  labels:
	    k8s-app: kube-dns
	    kubernetes.io/cluster-service: "true"
	    kubernetes.io/name: CoreDNS
	  name: kube-dns
	  namespace: kube-system
	  resourceVersion: "0"
	spec:
	  clusterIP: 10.96.0.10
	  ports:
	  - name: dns
	    port: 53
	    protocol: UDP
	    targetPort: 53
	  - name: dns-tcp
	    port: 53
	    protocol: TCP
	    targetPort: 53
	  - name: metrics
	    port: 9153
	    protocol: TCP
	    targetPort: 9153
	  selector:
	    k8s-app: kube-dns
	status:
	  loadBalancer: {}
[addons] Applied essential addon: CoreDNS
[dryrun] Would perform action CREATE on resource "configmaps" in API group "core/v1"
[dryrun] Attached object:
	apiVersion: v1
	data:
	  config.conf: |-
	    apiVersion: kubeproxy.config.k8s.io/v1alpha1
	    bindAddress: 0.0.0.0
	    bindAddressHardFail: false
	    clientConnection:
	      acceptContentTypes: ""
	      burst: 0
	      contentType: ""
	      kubeconfig: /var/lib/kube-proxy/kubeconfig.conf
	      qps: 0
	    clusterCIDR: 10.244.0.0/16
	    configSyncPeriod: 0s
	    conntrack:
	      maxPerCore: null
	      min: null
	      tcpCloseWaitTimeout: null
	      tcpEstablishedTimeout: null
	    detectLocal:
	      bridgeInterface: ""
	      interfaceNamePrefix: ""
	    detectLocalMode: ""
	    enableProfiling: false
	    healthzBindAddress: ""
	    hostnameOverride: ""
	    iptables:
	      localhostNodePorts: null
	      masqueradeAll: false
	      masqueradeBit: null
	      minSyncPeriod: 0s
	      syncPeriod: 0s
	    ipvs:
	      excludeCIDRs: null
	      minSyncPeriod: 0s
	      scheduler: ""
	      strictARP: false
	      syncPeriod: 0s
	      tcpFinTimeout: 0s
	      tcpTimeout: 0s
	      udpTimeout: 0s
	    kind: KubeProxyConfiguration
	    metricsBindAddress: ""
	    mode: ""
	    nodePortAddresses: null
	    oomScoreAdj: null
	    portRange: ""
	    showHiddenMetricsForVersion: ""
	    winkernel:
	      enableDSR: false
	      forwardHealthCheckVip: false
	      networkName: ""
	      rootHnsEndpointName: ""
	      sourceVip: ""
	  kubeconfig.conf: |-
	    apiVersion: v1
	    kind: Config
	    clusters:
	    - cluster:
	        certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
	        server: https://172.21.23.11:6443
	      name: default
	    contexts:
	    - context:
	        cluster: default
	        namespace: default
	        user: default
	      name: default
	    current-context: default
	    users:
	    - name: default
	      user:
	        tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
	kind: ConfigMap
	metadata:
	  annotations:
	    kubeadm.kubernetes.io/component-config.hash: sha256:49070dca166611c9a463535f86effe210177eb9fe4c00c1943429079a0be10c0
	  creationTimestamp: null
	  labels:
	    app: kube-proxy
	  name: kube-proxy
	  namespace: kube-system
[dryrun] Would perform action CREATE on resource "daemonsets" in API group "apps/v1"
[dryrun] Attached object:
	apiVersion: apps/v1
	kind: DaemonSet
	metadata:
	  creationTimestamp: null
	  labels:
	    k8s-app: kube-proxy
	  name: kube-proxy
	  namespace: kube-system
	spec:
	  selector:
	    matchLabels:
	      k8s-app: kube-proxy
	  template:
	    metadata:
	      creationTimestamp: null
	      labels:
	        k8s-app: kube-proxy
	    spec:
	      containers:
	      - command:
	        - /usr/local/bin/kube-proxy
	        - --config=/var/lib/kube-proxy/config.conf
	        - --hostname-override=$(NODE_NAME)
	        env:
	        - name: NODE_NAME
	          valueFrom:
	            fieldRef:
	              fieldPath: spec.nodeName
	        image: registry.k8s.io/kube-proxy:v1.26.13
	        imagePullPolicy: IfNotPresent
	        name: kube-proxy
	        resources: {}
	        securityContext:
	          privileged: true
	        volumeMounts:
	        - mountPath: /var/lib/kube-proxy
	          name: kube-proxy
	        - mountPath: /run/xtables.lock
	          name: xtables-lock
	        - mountPath: /lib/modules
	          name: lib-modules
	          readOnly: true
	      hostNetwork: true
	      nodeSelector:
	        kubernetes.io/os: linux
	      priorityClassName: system-node-critical
	      serviceAccountName: kube-proxy
	      tolerations:
	      - operator: Exists
	      volumes:
	      - configMap:
	          name: kube-proxy
	        name: kube-proxy
	      - hostPath:
	          path: /run/xtables.lock
	          type: FileOrCreate
	        name: xtables-lock
	      - hostPath:
	          path: /lib/modules
	        name: lib-modules
	  updateStrategy:
	    type: RollingUpdate
	status:
	  currentNumberScheduled: 0
	  desiredNumberScheduled: 0
	  numberMisscheduled: 0
	  numberReady: 0
[dryrun] Would perform action CREATE on resource "serviceaccounts" in API group "core/v1"
[dryrun] Attached object:
	apiVersion: v1
	kind: ServiceAccount
	metadata:
	  creationTimestamp: null
	  name: kube-proxy
	  namespace: kube-system
[dryrun] Would perform action CREATE on resource "clusterrolebindings" in API group "rbac.authorization.k8s.io/v1"
[dryrun] Attached object:
	apiVersion: rbac.authorization.k8s.io/v1
	kind: ClusterRoleBinding
	metadata:
	  creationTimestamp: null
	  name: kubeadm:node-proxier
	roleRef:
	  apiGroup: rbac.authorization.k8s.io
	  kind: ClusterRole
	  name: system:node-proxier
	subjects:
	- kind: ServiceAccount
	  name: kube-proxy
	  namespace: kube-system
[dryrun] Would perform action CREATE on resource "roles" in API group "rbac.authorization.k8s.io/v1"
[dryrun] Attached object:
	apiVersion: rbac.authorization.k8s.io/v1
	kind: Role
	metadata:
	  creationTimestamp: null
	  name: kube-proxy
	  namespace: kube-system
	rules:
	- apiGroups:
	  - ""
	  resourceNames:
	  - kube-proxy
	  resources:
	  - configmaps
	  verbs:
	  - get
[dryrun] Would perform action CREATE on resource "rolebindings" in API group "rbac.authorization.k8s.io/v1"
[dryrun] Attached object:
	apiVersion: rbac.authorization.k8s.io/v1
	kind: RoleBinding
	metadata:
	  creationTimestamp: null
	  name: kube-proxy
	  namespace: kube-system
	roleRef:
	  apiGroup: rbac.authorization.k8s.io
	  kind: Role
	  name: kube-proxy
	subjects:
	- kind: Group
	  name: system:bootstrappers:kubeadm:default-node-token
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/tmp/kubeadm-init-dryrun473168221/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.21.23.11:6443 --token 7lrtzk.dn1o3i6p30f7rbbd \
	--discovery-token-ca-cert-hash sha256:886c71f62ad4f1241e0272195bc1af50e7329b8072043fb71e70c861d1af3e91 
I0121 12:18:39.781229    5701 version.go:256] remote version is much newer: v1.29.1; falling back to: stable-1.26
[init] Using Kubernetes version: v1.26.13
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local osmtest202401211214] and IPs [10.96.0.1 172.21.23.11]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost osmtest202401211214] and IPs [172.21.23.11 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost osmtest202401211214] and IPs [172.21.23.11 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 5.501851 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node osmtest202401211214 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node osmtest202401211214 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: lo11sh.ehf6ikarknkrl5sz
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.21.23.11:6443 --token lo11sh.ehf6ikarknkrl5sz \
	--discovery-token-ca-cert-hash sha256:cd04ff1d9b7f243dc736fec55d4e0f56a4bc27ae94220176aa028834fe4cb43d 
Reading existing namespaces
NAME              STATUS   AGE
default           Active   7s
kube-node-lease   Active   9s
kube-public       Active   9s
kube-system       Active   9s
Track k8scluster init_k8s_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705839548&event=k8scluster&operation=init_k8s_ok&value=&comment=&tags=
--2024-01-21 12:19:08--  https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.109.133, 185.199.110.133, 185.199.108.133, ...
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.109.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 4407 (4.3K) [text/plain]
Saving to: ‘/tmp/flannel.nWiHSD/kube-flannel.yml’

     0K ....                                                  100% 36.9M=0s

2024-01-21 12:19:08 (36.9 MB/s) - ‘/tmp/flannel.nWiHSD/kube-flannel.yml’ saved [4407/4407]

namespace/kube-flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
node/osmtest202401211214 untainted
LAST SEEN   TYPE      REASON                    OBJECT                     MESSAGE
12s         Normal    Starting                  node/osmtest202401211214   Starting kubelet.
12s         Warning   InvalidDiskCapacity       node/osmtest202401211214   invalid capacity 0 on image filesystem
12s         Normal    NodeAllocatableEnforced   node/osmtest202401211214   Updated Node Allocatable limit across pods
12s         Normal    NodeHasSufficientMemory   node/osmtest202401211214   Node osmtest202401211214 status is now: NodeHasSufficientMemory
12s         Normal    NodeHasNoDiskPressure     node/osmtest202401211214   Node osmtest202401211214 status is now: NodeHasNoDiskPressure
12s         Normal    NodeHasSufficientPID      node/osmtest202401211214   Node osmtest202401211214 status is now: NodeHasSufficientPID
Track k8scluster k8s_ready_before_helm: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705839555&event=k8scluster&operation=k8s_ready_before_helm&value=&comment=&tags=
Deleting existing namespace osm: kubectl delete ns osm
Helm3 is not installed, installing ...
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100 14.7M  100 14.7M    0     0  18.7M      0 --:--:-- --:--:-- --:--:-- 18.7M
linux-amd64/
linux-amd64/LICENSE
linux-amd64/README.md
linux-amd64/helm
version.BuildInfo{Version:"v3.11.3", GitCommit:"323249351482b3bbfc9f5004f65d400aa70f9ae7", GitTreeState:"clean", GoVersion:"go1.20.3"}
"stable" has been added to your repositories
Hang tight while we grab the latest from your chart repositories...
...Successfully got an update from the "stable" chart repository
Update Complete. ⎈Happy Helming!⎈
Track k8scluster install_helm_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705839558&event=k8scluster&operation=install_helm_ok&value=&comment=&tags=
Installing OpenEBS
"openebs" has been added to your repositories
Hang tight while we grab the latest from your chart repositories...
...Successfully got an update from the "openebs" chart repository
...Successfully got an update from the "stable" chart repository
Update Complete. ⎈Happy Helming!⎈
NAME: openebs
LAST DEPLOYED: Sun Jan 21 12:19:20 2024
NAMESPACE: openebs
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
Successfully installed OpenEBS.

Check the status by running: kubectl get pods -n openebs

The default values will install NDM and enable OpenEBS hostpath and device
storage engines along with their default StorageClasses. Use `kubectl get sc`
to see the list of installed OpenEBS StorageClasses.

**Note**: If you are upgrading from the older helm chart that was using cStor
and Jiva (non-csi) volumes, you will have to run the following command to include
the older provisioners:

helm upgrade openebs openebs/openebs \
	--namespace openebs \
	--set legacy.enabled=true \
	--reuse-values

For other engines, you will need to perform a few more additional steps to
enable the engine, configure the engines (e.g. creating pools) and create 
StorageClasses. 

For example, cStor can be enabled using commands like:

helm upgrade openebs openebs/openebs \
	--namespace openebs \
	--set cstor.enabled=true \
	--reuse-values

For more information, 
- view the online documentation at https://openebs.io/docs or
- connect with an active community on Kubernetes slack #openebs channel.
NAME   	NAMESPACE	REVISION	UPDATED                                	STATUS  	CHART        	APP VERSION
openebs	openebs  	1       	2024-01-21 12:19:20.843583399 +0000 UTC	deployed	openebs-3.7.0	3.7.0      
Waiting for storageclass
Storageclass available
storageclass.storage.k8s.io/openebs-hostpath patched
Track k8scluster k8s_storageclass_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705839561&event=k8scluster&operation=k8s_storageclass_ok&value=&comment=&tags=
Installing MetalLB
"metallb" has been added to your repositories
Hang tight while we grab the latest from your chart repositories...
...Successfully got an update from the "metallb" chart repository
...Successfully got an update from the "openebs" chart repository
...Successfully got an update from the "stable" chart repository
Update Complete. ⎈Happy Helming!⎈
NAME: metallb
LAST DEPLOYED: Sun Jan 21 12:19:23 2024
NAMESPACE: metallb-system
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
MetalLB is now running in the cluster.

Now you can configure it via its CRs. Please refer to the metallb official docs
on how to use the CRs.
Track k8scluster k8s_metallb_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705839563&event=k8scluster&operation=k8s_metallb_ok&value=&comment=&tags=
Installing cert-manager
"jetstack" has been added to your repositories
Hang tight while we grab the latest from your chart repositories...
...Successfully got an update from the "metallb" chart repository
...Successfully got an update from the "openebs" chart repository
...Successfully got an update from the "jetstack" chart repository
...Successfully got an update from the "stable" chart repository
Update Complete. ⎈Happy Helming!⎈
NAME: cert-manager
LAST DEPLOYED: Sun Jan 21 12:19:26 2024
NAMESPACE: cert-manager
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
cert-manager v1.9.1 has been deployed successfully!

In order to begin issuing certificates, you will need to set up a ClusterIssuer
or Issuer resource (for example, by creating a 'letsencrypt-staging' issuer).

More information on the different types of issuers and how to configure them
can be found in our documentation:

https://cert-manager.io/docs/configuration/

For information on how to configure cert-manager to automatically provision
Certificates for Ingress resources, take a look at the `ingress-shim`
documentation:

https://cert-manager.io/docs/usage/ingress/
Track k8scluster k8s_certmanager_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705839593&event=k8scluster&operation=k8s_certmanager_ok&value=&comment=&tags=

Bootstraping... 1 checks of 100
OpenEBS: Waiting for 1 of 3 pods to be ready:
openebs-ndm-operator-55f7cfb488-bchzq	0/1	

MetalLB: Waiting for 2 of 2 pods to be ready:
metallb-controller-58c5997556-qxcgm	0/1	
metallb-speaker-rhxsd	0/4	

CertManager: Waiting for 1 of 1 pods to be ready:
No	resources	


Bootstraping... 2 checks of 100
OpenEBS: Waiting for 1 of 3 pods to be ready:
openebs-ndm-operator-55f7cfb488-bchzq	0/1	

MetalLB: Waiting for 2 of 2 pods to be ready:
metallb-controller-58c5997556-qxcgm	0/1	
metallb-speaker-rhxsd	0/4	

CertManager: Waiting for 1 of 1 pods to be ready:
No	resources	


Bootstraping... 3 checks of 100
MetalLB: Waiting for 2 of 2 pods to be ready:
metallb-controller-58c5997556-qxcgm	0/1	
metallb-speaker-rhxsd	0/4	

CertManager: Waiting for 1 of 1 pods to be ready:
No	resources	


Bootstraping... 4 checks of 100
MetalLB: Waiting for 2 of 2 pods to be ready:
metallb-controller-58c5997556-qxcgm	0/1	
metallb-speaker-rhxsd	0/4	

CertManager: Waiting for 1 of 1 pods to be ready:
No	resources	


Bootstraping... 5 checks of 100
MetalLB: Waiting for 2 of 2 pods to be ready:
metallb-controller-58c5997556-qxcgm	0/1	
metallb-speaker-rhxsd	0/4	

CertManager: Waiting for 1 of 1 pods to be ready:
No	resources	


Bootstraping... 6 checks of 100
MetalLB: Waiting for 2 of 2 pods to be ready:
metallb-controller-58c5997556-qxcgm	0/1	
metallb-speaker-rhxsd	0/4	

CertManager: Waiting for 1 of 1 pods to be ready:
No	resources	


Bootstraping... 7 checks of 100
MetalLB: Waiting for 1 of 2 pods to be ready:
metallb-speaker-rhxsd	0/4	

CertManager: Waiting for 1 of 1 pods to be ready:
No	resources	


Bootstraping... 8 checks of 100
MetalLB: Waiting for 1 of 2 pods to be ready:
metallb-speaker-rhxsd	0/4	

CertManager: Waiting for 1 of 1 pods to be ready:
No	resources	


Bootstraping... 9 checks of 100
MetalLB: Waiting for 1 of 2 pods to be ready:
metallb-speaker-rhxsd	2/4	

CertManager: Waiting for 1 of 1 pods to be ready:
No	resources	


Bootstraping... 10 checks of 100
MetalLB: Waiting for 1 of 2 pods to be ready:
metallb-speaker-rhxsd	3/4	

CertManager: Waiting for 1 of 1 pods to be ready:
No	resources	


Bootstraping... 11 checks of 100
MetalLB: Waiting for 1 of 2 pods to be ready:
metallb-speaker-rhxsd	3/4	

CertManager: Waiting for 1 of 1 pods to be ready:
No	resources	


Bootstraping... 12 checks of 100
MetalLB: Waiting for 1 of 2 pods to be ready:
metallb-speaker-rhxsd	3/4	

CertManager: Waiting for 1 of 1 pods to be ready:
No	resources	


Bootstraping... 13 checks of 100
MetalLB: Waiting for 1 of 2 pods to be ready:
metallb-speaker-rhxsd	3/4	

CertManager: Waiting for 1 of 1 pods to be ready:
No	resources	


Bootstraping... 14 checks of 100
MetalLB: Waiting for 1 of 2 pods to be ready:
metallb-speaker-rhxsd	3/4	

CertManager: Waiting for 1 of 1 pods to be ready:
No	resources	


Bootstraping... 15 checks of 100
MetalLB: Waiting for 1 of 2 pods to be ready:
metallb-speaker-rhxsd	3/4	

CertManager: Waiting for 1 of 1 pods to be ready:
No	resources	


Bootstraping... 16 checks of 100
MetalLB: Waiting for 1 of 2 pods to be ready:
metallb-speaker-rhxsd	3/4	

CertManager: Waiting for 1 of 1 pods to be ready:
No	resources	

===> Successful checks: 1/10
===> Successful checks: 2/10
===> Successful checks: 3/10
===> Successful checks: 4/10
===> Successful checks: 5/10
===> Successful checks: 6/10
===> Successful checks: 7/10
===> Successful checks: 8/10
===> Successful checks: 9/10
===> Successful checks: 10/10
K8S CLUSTER IS READY
Track k8scluster k8s_ready_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705839652&event=k8scluster&operation=k8s_ready_ok&value=&comment=&tags=
Creating IP address pool manifest: /etc/osm/metallb-ipaddrpool.yaml
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
  name: first-pool
  namespace: metallb-system
spec:
  addresses:
  - 172.21.23.11/32
Applying IP address pool manifest: kubectl apply -f /etc/osm/metallb-ipaddrpool.yaml
ipaddresspool.metallb.io/first-pool created
Track k8scluster k8scluster_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705839652&event=k8scluster&operation=k8scluster_ok&value=&comment=&tags=
Track juju juju_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705839653&event=juju&operation=juju_ok&value=&comment=&tags=
Track docker_images docker_images_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705839653&event=docker_images&operation=docker_images_ok&value=&comment=&tags=
DEBUG_INSTALL=
OSM_DEVOPS=/usr/share/osm-devops
OSM_DOCKER_TAG=testing-daily
OSM_HELM_WORK_DIR=/etc/osm/helm
"bitnami" has been added to your repositories
Hang tight while we grab the latest from your chart repositories...
...Successfully got an update from the "metallb" chart repository
...Successfully got an update from the "openebs" chart repository
...Successfully got an update from the "jetstack" chart repository
...Successfully got an update from the "stable" chart repository
...Successfully got an update from the "bitnami" chart repository
Update Complete. ⎈Happy Helming!⎈
Release "mongodb-k8s" does not exist. Installing it now.
NAME: mongodb-k8s
LAST DEPLOYED: Sun Jan 21 12:20:57 2024
NAMESPACE: osm
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
CHART NAME: mongodb
CHART VERSION: 13.9.4
APP VERSION: 6.0.5

** Please be patient while the chart is being deployed **

MongoDB® can be accessed on the following DNS name(s) and ports from within your cluster:

    mongodb-k8s-0.mongodb-k8s-headless.osm.svc.cluster.local:27017
    mongodb-k8s-1.mongodb-k8s-headless.osm.svc.cluster.local:27017

To connect to your database, create a MongoDB® client container:

    kubectl run --namespace osm mongodb-k8s-client --rm --tty -i --restart='Never' --env="MONGODB_ROOT_PASSWORD=$MONGODB_ROOT_PASSWORD" --image docker.io/bitnami/mongodb:6.0.5-debian-11-r4 --command -- bash

Then, run the following command:
    mongosh admin --host "mongodb-k8s-0.mongodb-k8s-headless.osm.svc.cluster.local:27017,mongodb-k8s-1.mongodb-k8s-headless.osm.svc.cluster.local:27017"
Track deploy_osm deploy_mongodb_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705839657&event=deploy_osm&operation=deploy_mongodb_ok&value=&comment=&tags=
helm install -n osm --create-namespace osm /usr/share/osm-devops/installers/helm/osm  --set global.image.repositoryBase=opensourcemano --set mysql.dbHostPath=/var/lib/osm/osm
NAME: osm
LAST DEPLOYED: Sun Jan 21 12:20:58 2024
NAMESPACE: osm
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
1. Get the application URL by running these commands:
  export NODE_PORT=$(kubectl get --namespace osm -o jsonpath="{.spec.ports[0].nodePort}" services nbi)
  export NODE_IP=$(kubectl get nodes --namespace osm -o jsonpath="{.items[0].status.addresses[0].address}")
  echo http://$NODE_IP:$NODE_PORT
USER-SUPPLIED VALUES:
global:
  image:
    repositoryBase: opensourcemano
mysql:
  dbHostPath: /var/lib/osm/osm
Track deploy_osm deploy_osm_services_k8s_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705839659&event=deploy_osm&operation=deploy_osm_services_k8s_ok&value=&comment=&tags=
DEBUG_INSTALL=
OSM_DEVOPS=/usr/share/osm-devops
OSM_DOCKER_TAG=testing-daily
OSM_HELM_WORK_DIR=/etc/osm/helm
Updating Helm values file helm/values/airflow-values.yaml to use defaultAirflowTag: testing-daily
Updating Helm values file helm/values/airflow-values.yaml to use defaultAirflowRepository: opensourcemano/airflow
"apache-airflow" has been added to your repositories
Hang tight while we grab the latest from your chart repositories...
...Successfully got an update from the "metallb" chart repository
...Successfully got an update from the "apache-airflow" chart repository
...Successfully got an update from the "openebs" chart repository
...Successfully got an update from the "jetstack" chart repository
...Successfully got an update from the "stable" chart repository
...Successfully got an update from the "bitnami" chart repository
Update Complete. ⎈Happy Helming!⎈
Release "airflow" does not exist. Installing it now.
NAME: airflow
LAST DEPLOYED: Sun Jan 21 12:21:01 2024
NAMESPACE: osm
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
Thank you for installing Apache Airflow 2.5.3!

Your release is named airflow.
You can now access your dashboard(s) by executing the following command(s) and visiting the corresponding port at localhost in your browser:

Airflow Webserver:     kubectl port-forward svc/airflow-webserver 8080:8080 --namespace osm
Default Webserver (Airflow UI) Login credentials:
    username: admin
    password: admin
Default Postgres connection credentials:
    username: postgres
    password: postgres
    port: 5432

You can get Fernet Key value by running the following:

    echo Fernet Key: $(kubectl get secret --namespace osm airflow-fernet-key -o jsonpath="{.data.fernet-key}" | base64 --decode)
Track deploy_osm airflow_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705839887&event=deploy_osm&operation=airflow_ok&value=&comment=&tags=
"prometheus-community" has been added to your repositories
Hang tight while we grab the latest from your chart repositories...
...Successfully got an update from the "metallb" chart repository
...Successfully got an update from the "apache-airflow" chart repository
...Successfully got an update from the "openebs" chart repository
...Successfully got an update from the "jetstack" chart repository
...Successfully got an update from the "prometheus-community" chart repository
...Successfully got an update from the "stable" chart repository
...Successfully got an update from the "bitnami" chart repository
Update Complete. ⎈Happy Helming!⎈
Release "pushgateway" does not exist. Installing it now.
NAME: pushgateway
LAST DEPLOYED: Sun Jan 21 12:24:50 2024
NAMESPACE: osm
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
1. Get the application URL by running these commands:
  export POD_NAME=$(kubectl get pods --namespace osm -l "app=prometheus-pushgateway,release=pushgateway" -o jsonpath="{.items[0].metadata.name}")
  echo "Visit http://127.0.0.1:8080 to use your application"
  kubectl port-forward $POD_NAME 8080:80
Track deploy_osm pushgateway_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705839890&event=deploy_osm&operation=pushgateway_ok&value=&comment=&tags=
"prometheus-community" already exists with the same configuration, skipping
Hang tight while we grab the latest from your chart repositories...
...Successfully got an update from the "apache-airflow" chart repository
...Successfully got an update from the "metallb" chart repository
...Successfully got an update from the "openebs" chart repository
...Successfully got an update from the "jetstack" chart repository
...Successfully got an update from the "prometheus-community" chart repository
...Successfully got an update from the "stable" chart repository
...Successfully got an update from the "bitnami" chart repository
Update Complete. ⎈Happy Helming!⎈
Release "alertmanager" does not exist. Installing it now.
NAME: alertmanager
LAST DEPLOYED: Sun Jan 21 12:24:54 2024
NAMESPACE: osm
STATUS: deployed
REVISION: 1
NOTES:
1. Get the application URL by running these commands:
  export NODE_PORT=$(kubectl get --namespace osm -o jsonpath="{.spec.ports[0].nodePort}" services alertmanager)
  export NODE_IP=$(kubectl get nodes --namespace osm -o jsonpath="{.items[0].status.addresses[0].address}")
  echo http://$NODE_IP:$NODE_PORT
Track deploy_osm alertmanager_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705839894&event=deploy_osm&operation=alertmanager_ok&value=&comment=&tags=
Track deploy_osm install_osm_ngsa_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705839894&event=deploy_osm&operation=install_osm_ngsa_ok&value=&comment=&tags=
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100  3102  100  3102    0     0  22101      0 --:--:-- --:--:-- --:--:-- 22157
OK
Hit:1 http://azure.archive.ubuntu.com/ubuntu jammy InRelease
Hit:2 http://azure.archive.ubuntu.com/ubuntu jammy-updates InRelease
Hit:3 http://azure.archive.ubuntu.com/ubuntu jammy-backports InRelease
Hit:4 http://azure.archive.ubuntu.com/ubuntu jammy-security InRelease
Hit:5 https://osm-download.etsi.org/repository/osm/debian/testing-daily testing InRelease
Get:6 https://packages.cloud.google.com/apt kubernetes-xenial InRelease [8993 B]
Hit:7 https://download.docker.com/linux/ubuntu jammy InRelease
Get:8 https://osm-download.etsi.org/repository/osm/debian/testing-daily testing/IM amd64 Packages [855 B]
Get:9 https://osm-download.etsi.org/repository/osm/debian/testing-daily testing/osmclient amd64 Packages [480 B]
Fetched 10.3 kB in 1s (15.8 kB/s)
Reading package lists...
W: https://osm-download.etsi.org/repository/osm/debian/testing-daily/dists/testing/InRelease: Key is stored in legacy trusted.gpg keyring (/etc/apt/trusted.gpg), see the DEPRECATION section in apt-key(8) for details.
W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/testing-daily testing InRelease (expected testing but got )
Repository: 'deb [arch=amd64] https://osm-download.etsi.org/repository/osm/debian/testing-daily testing IM osmclient'
Description:
Archive for codename: testing components: IM,osmclient
More info: https://osm-download.etsi.org/repository/osm/debian/testing-daily
Adding repository.
Adding deb entry to /etc/apt/sources.list.d/archive_uri-https_osm-download_etsi_org_repository_osm_debian_testing-daily-jammy.list
Adding disabled deb-src entry to /etc/apt/sources.list.d/archive_uri-https_osm-download_etsi_org_repository_osm_debian_testing-daily-jammy.list
Hit:1 http://azure.archive.ubuntu.com/ubuntu jammy InRelease
Hit:2 http://azure.archive.ubuntu.com/ubuntu jammy-updates InRelease
Hit:3 http://azure.archive.ubuntu.com/ubuntu jammy-backports InRelease
Hit:4 http://azure.archive.ubuntu.com/ubuntu jammy-security InRelease
Hit:5 https://osm-download.etsi.org/repository/osm/debian/testing-daily testing InRelease
Hit:6 https://packages.cloud.google.com/apt kubernetes-xenial InRelease
Hit:7 https://download.docker.com/linux/ubuntu jammy InRelease
Reading package lists...
W: https://osm-download.etsi.org/repository/osm/debian/testing-daily/dists/testing/InRelease: Key is stored in legacy trusted.gpg keyring (/etc/apt/trusted.gpg), see the DEPRECATION section in apt-key(8) for details.
W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/testing-daily testing InRelease (expected testing but got )
Reading package lists...
Building dependency tree...
Reading state information...
The following additional packages will be installed:
  build-essential bzip2 cpp cpp-11 dpkg-dev fakeroot fontconfig-config
  fonts-dejavu-core g++ g++-11 gcc gcc-11 gcc-11-base javascript-common
  libalgorithm-diff-perl libalgorithm-diff-xs-perl libalgorithm-merge-perl
  libasan6 libatomic1 libc-dev-bin libc-devtools libc6-dev libcc1-0
  libcrypt-dev libdeflate0 libdpkg-perl libexpat1-dev libfakeroot
  libfile-fcntllock-perl libfontconfig1 libgcc-11-dev libgd3 libgomp1 libisl23
  libitm1 libjbig0 libjpeg-turbo8 libjpeg8 libjs-jquery libjs-sphinxdoc
  libjs-underscore liblsan0 libmpc3 libnsl-dev libpython3-dev
  libpython3.10-dev libquadmath0 libstdc++-11-dev libtiff5 libtirpc-dev
  libtsan0 libubsan1 libwebp7 libxpm4 linux-libc-dev lto-disabled-list make
  manpages-dev python3-dev python3-wheel python3.10-dev rpcsvc-proto
  zlib1g-dev
Suggested packages:
  bzip2-doc cpp-doc gcc-11-locales debian-keyring g++-multilib g++-11-multilib
  gcc-11-doc gcc-multilib autoconf automake libtool flex bison gdb gcc-doc
  gcc-11-multilib apache2 | lighttpd | httpd glibc-doc bzr libgd-tools
  libstdc++-11-doc make-doc
The following NEW packages will be installed:
  build-essential bzip2 cpp cpp-11 dpkg-dev fakeroot fontconfig-config
  fonts-dejavu-core g++ g++-11 gcc gcc-11 gcc-11-base javascript-common
  libalgorithm-diff-perl libalgorithm-diff-xs-perl libalgorithm-merge-perl
  libasan6 libatomic1 libc-dev-bin libc-devtools libc6-dev libcc1-0
  libcrypt-dev libdeflate0 libdpkg-perl libexpat1-dev libfakeroot
  libfile-fcntllock-perl libfontconfig1 libgcc-11-dev libgd3 libgomp1 libisl23
  libitm1 libjbig0 libjpeg-turbo8 libjpeg8 libjs-jquery libjs-sphinxdoc
  libjs-underscore liblsan0 libmpc3 libnsl-dev libpython3-dev
  libpython3.10-dev libquadmath0 libstdc++-11-dev libtiff5 libtirpc-dev
  libtsan0 libubsan1 libwebp7 libxpm4 linux-libc-dev lto-disabled-list make
  manpages-dev python3-dev python3-pip python3-wheel python3.10-dev
  rpcsvc-proto zlib1g-dev
0 upgraded, 64 newly installed, 0 to remove and 3 not upgraded.
Need to get 71.3 MB of archives.
After this operation, 239 MB of additional disk space will be used.
Get:1 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libc-dev-bin amd64 2.35-0ubuntu3.6 [20.3 kB]
Get:2 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 linux-libc-dev amd64 5.15.0-91.101 [1332 kB]
Get:3 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 libcrypt-dev amd64 1:4.4.27-1 [112 kB]
Get:4 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 rpcsvc-proto amd64 1.4.2-0ubuntu6 [68.5 kB]
Get:5 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libtirpc-dev amd64 1.3.2-2ubuntu0.1 [192 kB]
Get:6 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 libnsl-dev amd64 1.3.0-2build2 [71.3 kB]
Get:7 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libc6-dev amd64 2.35-0ubuntu3.6 [2100 kB]
Get:8 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 gcc-11-base amd64 11.4.0-1ubuntu1~22.04 [20.2 kB]
Get:9 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 libisl23 amd64 0.24-2build1 [727 kB]
Get:10 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 libmpc3 amd64 1.2.1-2build1 [46.9 kB]
Get:11 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 cpp-11 amd64 11.4.0-1ubuntu1~22.04 [10.0 MB]
Get:12 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 cpp amd64 4:11.2.0-1ubuntu1 [27.7 kB]
Get:13 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libcc1-0 amd64 12.3.0-1ubuntu1~22.04 [48.3 kB]
Get:14 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libgomp1 amd64 12.3.0-1ubuntu1~22.04 [126 kB]
Get:15 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libitm1 amd64 12.3.0-1ubuntu1~22.04 [30.2 kB]
Get:16 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libatomic1 amd64 12.3.0-1ubuntu1~22.04 [10.4 kB]
Get:17 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libasan6 amd64 11.4.0-1ubuntu1~22.04 [2282 kB]
Get:18 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 liblsan0 amd64 12.3.0-1ubuntu1~22.04 [1069 kB]
Get:19 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libtsan0 amd64 11.4.0-1ubuntu1~22.04 [2260 kB]
Get:20 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libubsan1 amd64 12.3.0-1ubuntu1~22.04 [976 kB]
Get:21 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libquadmath0 amd64 12.3.0-1ubuntu1~22.04 [154 kB]
Get:22 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libgcc-11-dev amd64 11.4.0-1ubuntu1~22.04 [2517 kB]
Get:23 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 gcc-11 amd64 11.4.0-1ubuntu1~22.04 [20.1 MB]
Get:24 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 gcc amd64 4:11.2.0-1ubuntu1 [5112 B]
Get:25 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libstdc++-11-dev amd64 11.4.0-1ubuntu1~22.04 [2101 kB]
Get:26 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 g++-11 amd64 11.4.0-1ubuntu1~22.04 [11.4 MB]
Get:27 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 g++ amd64 4:11.2.0-1ubuntu1 [1412 B]
Get:28 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 make amd64 4.3-4.1build1 [180 kB]
Get:29 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libdpkg-perl all 1.21.1ubuntu2.2 [237 kB]
Get:30 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 bzip2 amd64 1.0.8-5build1 [34.8 kB]
Get:31 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 lto-disabled-list all 24 [12.5 kB]
Get:32 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 dpkg-dev all 1.21.1ubuntu2.2 [922 kB]
Get:33 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 build-essential amd64 12.9ubuntu3 [4744 B]
Get:34 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 libfakeroot amd64 1.28-1ubuntu1 [31.5 kB]
Get:35 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 fakeroot amd64 1.28-1ubuntu1 [60.4 kB]
Get:36 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 fonts-dejavu-core all 2.37-2build1 [1041 kB]
Get:37 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 fontconfig-config all 2.13.1-4.2ubuntu5 [29.1 kB]
Get:38 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 javascript-common all 11+nmu1 [5936 B]
Get:39 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 libalgorithm-diff-perl all 1.201-1 [41.8 kB]
Get:40 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 libalgorithm-diff-xs-perl amd64 0.04-6build3 [11.9 kB]
Get:41 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 libalgorithm-merge-perl all 0.08-3 [12.0 kB]
Get:42 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 libfontconfig1 amd64 2.13.1-4.2ubuntu5 [131 kB]
Get:43 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 libjpeg-turbo8 amd64 2.1.2-0ubuntu1 [134 kB]
Get:44 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 libjpeg8 amd64 8c-2ubuntu10 [2264 B]
Get:45 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 libdeflate0 amd64 1.10-2 [70.9 kB]
Get:46 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libjbig0 amd64 2.1-3.1ubuntu0.22.04.1 [29.2 kB]
Get:47 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libwebp7 amd64 1.2.2-2ubuntu0.22.04.2 [206 kB]
Get:48 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libtiff5 amd64 4.3.0-6ubuntu0.7 [185 kB]
Get:49 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libxpm4 amd64 1:3.5.12-1ubuntu0.22.04.2 [36.7 kB]
Get:50 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 libgd3 amd64 2.3.0-2ubuntu2 [129 kB]
Get:51 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libc-devtools amd64 2.35-0ubuntu3.6 [29.0 kB]
Get:52 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libexpat1-dev amd64 2.4.7-1ubuntu0.2 [147 kB]
Get:53 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 libfile-fcntllock-perl amd64 0.22-3build7 [33.9 kB]
Get:54 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 libjs-jquery all 3.6.0+dfsg+~3.5.13-1 [321 kB]
Get:55 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 libjs-underscore all 1.13.2~dfsg-2 [118 kB]
Get:56 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 libjs-sphinxdoc all 4.3.2-1 [139 kB]
Get:57 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 zlib1g-dev amd64 1:1.2.11.dfsg-2ubuntu9.2 [164 kB]
Get:58 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libpython3.10-dev amd64 3.10.12-1~22.04.3 [4762 kB]
Get:59 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 libpython3-dev amd64 3.10.6-1~22.04 [7166 B]
Get:60 http://azure.archive.ubuntu.com/ubuntu jammy/main amd64 manpages-dev all 5.10-1ubuntu1 [2309 kB]
Get:61 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 python3.10-dev amd64 3.10.12-1~22.04.3 [507 kB]
Get:62 http://azure.archive.ubuntu.com/ubuntu jammy-updates/main amd64 python3-dev amd64 3.10.6-1~22.04 [26.0 kB]
Get:63 http://azure.archive.ubuntu.com/ubuntu jammy-updates/universe amd64 python3-wheel all 0.37.1-2ubuntu0.22.04.1 [32.0 kB]
Get:64 http://azure.archive.ubuntu.com/ubuntu jammy-updates/universe amd64 python3-pip all 22.0.2+dfsg-1ubuntu0.4 [1305 kB]
Fetched 71.3 MB in 1s (54.2 MB/s)
Selecting previously unselected package libc-dev-bin.
(Reading database ... 
(Reading database ... 5%
(Reading database ... 10%
(Reading database ... 15%
(Reading database ... 20%
(Reading database ... 25%
(Reading database ... 30%
(Reading database ... 35%
(Reading database ... 40%
(Reading database ... 45%
(Reading database ... 50%
(Reading database ... 55%
(Reading database ... 60%
(Reading database ... 65%
(Reading database ... 70%
(Reading database ... 75%
(Reading database ... 80%
(Reading database ... 85%
(Reading database ... 90%
(Reading database ... 95%
(Reading database ... 100%
(Reading database ... 62694 files and directories currently installed.)
Preparing to unpack .../00-libc-dev-bin_2.35-0ubuntu3.6_amd64.deb ...
Unpacking libc-dev-bin (2.35-0ubuntu3.6) ...
Selecting previously unselected package linux-libc-dev:amd64.
Preparing to unpack .../01-linux-libc-dev_5.15.0-91.101_amd64.deb ...
Unpacking linux-libc-dev:amd64 (5.15.0-91.101) ...
Selecting previously unselected package libcrypt-dev:amd64.
Preparing to unpack .../02-libcrypt-dev_1%3a4.4.27-1_amd64.deb ...
Unpacking libcrypt-dev:amd64 (1:4.4.27-1) ...
Selecting previously unselected package rpcsvc-proto.
Preparing to unpack .../03-rpcsvc-proto_1.4.2-0ubuntu6_amd64.deb ...
Unpacking rpcsvc-proto (1.4.2-0ubuntu6) ...
Selecting previously unselected package libtirpc-dev:amd64.
Preparing to unpack .../04-libtirpc-dev_1.3.2-2ubuntu0.1_amd64.deb ...
Unpacking libtirpc-dev:amd64 (1.3.2-2ubuntu0.1) ...
Selecting previously unselected package libnsl-dev:amd64.
Preparing to unpack .../05-libnsl-dev_1.3.0-2build2_amd64.deb ...
Unpacking libnsl-dev:amd64 (1.3.0-2build2) ...
Selecting previously unselected package libc6-dev:amd64.
Preparing to unpack .../06-libc6-dev_2.35-0ubuntu3.6_amd64.deb ...
Unpacking libc6-dev:amd64 (2.35-0ubuntu3.6) ...
Selecting previously unselected package gcc-11-base:amd64.
Preparing to unpack .../07-gcc-11-base_11.4.0-1ubuntu1~22.04_amd64.deb ...
Unpacking gcc-11-base:amd64 (11.4.0-1ubuntu1~22.04) ...
Selecting previously unselected package libisl23:amd64.
Preparing to unpack .../08-libisl23_0.24-2build1_amd64.deb ...
Unpacking libisl23:amd64 (0.24-2build1) ...
Selecting previously unselected package libmpc3:amd64.
Preparing to unpack .../09-libmpc3_1.2.1-2build1_amd64.deb ...
Unpacking libmpc3:amd64 (1.2.1-2build1) ...
Selecting previously unselected package cpp-11.
Preparing to unpack .../10-cpp-11_11.4.0-1ubuntu1~22.04_amd64.deb ...
Unpacking cpp-11 (11.4.0-1ubuntu1~22.04) ...
Selecting previously unselected package cpp.
Preparing to unpack .../11-cpp_4%3a11.2.0-1ubuntu1_amd64.deb ...
Unpacking cpp (4:11.2.0-1ubuntu1) ...
Selecting previously unselected package libcc1-0:amd64.
Preparing to unpack .../12-libcc1-0_12.3.0-1ubuntu1~22.04_amd64.deb ...
Unpacking libcc1-0:amd64 (12.3.0-1ubuntu1~22.04) ...
Selecting previously unselected package libgomp1:amd64.
Preparing to unpack .../13-libgomp1_12.3.0-1ubuntu1~22.04_amd64.deb ...
Unpacking libgomp1:amd64 (12.3.0-1ubuntu1~22.04) ...
Selecting previously unselected package libitm1:amd64.
Preparing to unpack .../14-libitm1_12.3.0-1ubuntu1~22.04_amd64.deb ...
Unpacking libitm1:amd64 (12.3.0-1ubuntu1~22.04) ...
Selecting previously unselected package libatomic1:amd64.
Preparing to unpack .../15-libatomic1_12.3.0-1ubuntu1~22.04_amd64.deb ...
Unpacking libatomic1:amd64 (12.3.0-1ubuntu1~22.04) ...
Selecting previously unselected package libasan6:amd64.
Preparing to unpack .../16-libasan6_11.4.0-1ubuntu1~22.04_amd64.deb ...
Unpacking libasan6:amd64 (11.4.0-1ubuntu1~22.04) ...
Selecting previously unselected package liblsan0:amd64.
Preparing to unpack .../17-liblsan0_12.3.0-1ubuntu1~22.04_amd64.deb ...
Unpacking liblsan0:amd64 (12.3.0-1ubuntu1~22.04) ...
Selecting previously unselected package libtsan0:amd64.
Preparing to unpack .../18-libtsan0_11.4.0-1ubuntu1~22.04_amd64.deb ...
Unpacking libtsan0:amd64 (11.4.0-1ubuntu1~22.04) ...
Selecting previously unselected package libubsan1:amd64.
Preparing to unpack .../19-libubsan1_12.3.0-1ubuntu1~22.04_amd64.deb ...
Unpacking libubsan1:amd64 (12.3.0-1ubuntu1~22.04) ...
Selecting previously unselected package libquadmath0:amd64.
Preparing to unpack .../20-libquadmath0_12.3.0-1ubuntu1~22.04_amd64.deb ...
Unpacking libquadmath0:amd64 (12.3.0-1ubuntu1~22.04) ...
Selecting previously unselected package libgcc-11-dev:amd64.
Preparing to unpack .../21-libgcc-11-dev_11.4.0-1ubuntu1~22.04_amd64.deb ...
Unpacking libgcc-11-dev:amd64 (11.4.0-1ubuntu1~22.04) ...
Selecting previously unselected package gcc-11.
Preparing to unpack .../22-gcc-11_11.4.0-1ubuntu1~22.04_amd64.deb ...
Unpacking gcc-11 (11.4.0-1ubuntu1~22.04) ...
Selecting previously unselected package gcc.
Preparing to unpack .../23-gcc_4%3a11.2.0-1ubuntu1_amd64.deb ...
Unpacking gcc (4:11.2.0-1ubuntu1) ...
Selecting previously unselected package libstdc++-11-dev:amd64.
Preparing to unpack .../24-libstdc++-11-dev_11.4.0-1ubuntu1~22.04_amd64.deb ...
Unpacking libstdc++-11-dev:amd64 (11.4.0-1ubuntu1~22.04) ...
Selecting previously unselected package g++-11.
Preparing to unpack .../25-g++-11_11.4.0-1ubuntu1~22.04_amd64.deb ...
Unpacking g++-11 (11.4.0-1ubuntu1~22.04) ...
Selecting previously unselected package g++.
Preparing to unpack .../26-g++_4%3a11.2.0-1ubuntu1_amd64.deb ...
Unpacking g++ (4:11.2.0-1ubuntu1) ...
Selecting previously unselected package make.
Preparing to unpack .../27-make_4.3-4.1build1_amd64.deb ...
Unpacking make (4.3-4.1build1) ...
Selecting previously unselected package libdpkg-perl.
Preparing to unpack .../28-libdpkg-perl_1.21.1ubuntu2.2_all.deb ...
Unpacking libdpkg-perl (1.21.1ubuntu2.2) ...
Selecting previously unselected package bzip2.
Preparing to unpack .../29-bzip2_1.0.8-5build1_amd64.deb ...
Unpacking bzip2 (1.0.8-5build1) ...
Selecting previously unselected package lto-disabled-list.
Preparing to unpack .../30-lto-disabled-list_24_all.deb ...
Unpacking lto-disabled-list (24) ...
Selecting previously unselected package dpkg-dev.
Preparing to unpack .../31-dpkg-dev_1.21.1ubuntu2.2_all.deb ...
Unpacking dpkg-dev (1.21.1ubuntu2.2) ...
Selecting previously unselected package build-essential.
Preparing to unpack .../32-build-essential_12.9ubuntu3_amd64.deb ...
Unpacking build-essential (12.9ubuntu3) ...
Selecting previously unselected package libfakeroot:amd64.
Preparing to unpack .../33-libfakeroot_1.28-1ubuntu1_amd64.deb ...
Unpacking libfakeroot:amd64 (1.28-1ubuntu1) ...
Selecting previously unselected package fakeroot.
Preparing to unpack .../34-fakeroot_1.28-1ubuntu1_amd64.deb ...
Unpacking fakeroot (1.28-1ubuntu1) ...
Selecting previously unselected package fonts-dejavu-core.
Preparing to unpack .../35-fonts-dejavu-core_2.37-2build1_all.deb ...
Unpacking fonts-dejavu-core (2.37-2build1) ...
Selecting previously unselected package fontconfig-config.
Preparing to unpack .../36-fontconfig-config_2.13.1-4.2ubuntu5_all.deb ...
Unpacking fontconfig-config (2.13.1-4.2ubuntu5) ...
Selecting previously unselected package javascript-common.
Preparing to unpack .../37-javascript-common_11+nmu1_all.deb ...
Unpacking javascript-common (11+nmu1) ...
Selecting previously unselected package libalgorithm-diff-perl.
Preparing to unpack .../38-libalgorithm-diff-perl_1.201-1_all.deb ...
Unpacking libalgorithm-diff-perl (1.201-1) ...
Selecting previously unselected package libalgorithm-diff-xs-perl.
Preparing to unpack .../39-libalgorithm-diff-xs-perl_0.04-6build3_amd64.deb ...
Unpacking libalgorithm-diff-xs-perl (0.04-6build3) ...
Selecting previously unselected package libalgorithm-merge-perl.
Preparing to unpack .../40-libalgorithm-merge-perl_0.08-3_all.deb ...
Unpacking libalgorithm-merge-perl (0.08-3) ...
Selecting previously unselected package libfontconfig1:amd64.
Preparing to unpack .../41-libfontconfig1_2.13.1-4.2ubuntu5_amd64.deb ...
Unpacking libfontconfig1:amd64 (2.13.1-4.2ubuntu5) ...
Selecting previously unselected package libjpeg-turbo8:amd64.
Preparing to unpack .../42-libjpeg-turbo8_2.1.2-0ubuntu1_amd64.deb ...
Unpacking libjpeg-turbo8:amd64 (2.1.2-0ubuntu1) ...
Selecting previously unselected package libjpeg8:amd64.
Preparing to unpack .../43-libjpeg8_8c-2ubuntu10_amd64.deb ...
Unpacking libjpeg8:amd64 (8c-2ubuntu10) ...
Selecting previously unselected package libdeflate0:amd64.
Preparing to unpack .../44-libdeflate0_1.10-2_amd64.deb ...
Unpacking libdeflate0:amd64 (1.10-2) ...
Selecting previously unselected package libjbig0:amd64.
Preparing to unpack .../45-libjbig0_2.1-3.1ubuntu0.22.04.1_amd64.deb ...
Unpacking libjbig0:amd64 (2.1-3.1ubuntu0.22.04.1) ...
Selecting previously unselected package libwebp7:amd64.
Preparing to unpack .../46-libwebp7_1.2.2-2ubuntu0.22.04.2_amd64.deb ...
Unpacking libwebp7:amd64 (1.2.2-2ubuntu0.22.04.2) ...
Selecting previously unselected package libtiff5:amd64.
Preparing to unpack .../47-libtiff5_4.3.0-6ubuntu0.7_amd64.deb ...
Unpacking libtiff5:amd64 (4.3.0-6ubuntu0.7) ...
Selecting previously unselected package libxpm4:amd64.
Preparing to unpack .../48-libxpm4_1%3a3.5.12-1ubuntu0.22.04.2_amd64.deb ...
Unpacking libxpm4:amd64 (1:3.5.12-1ubuntu0.22.04.2) ...
Selecting previously unselected package libgd3:amd64.
Preparing to unpack .../49-libgd3_2.3.0-2ubuntu2_amd64.deb ...
Unpacking libgd3:amd64 (2.3.0-2ubuntu2) ...
Selecting previously unselected package libc-devtools.
Preparing to unpack .../50-libc-devtools_2.35-0ubuntu3.6_amd64.deb ...
Unpacking libc-devtools (2.35-0ubuntu3.6) ...
Selecting previously unselected package libexpat1-dev:amd64.
Preparing to unpack .../51-libexpat1-dev_2.4.7-1ubuntu0.2_amd64.deb ...
Unpacking libexpat1-dev:amd64 (2.4.7-1ubuntu0.2) ...
Selecting previously unselected package libfile-fcntllock-perl.
Preparing to unpack .../52-libfile-fcntllock-perl_0.22-3build7_amd64.deb ...
Unpacking libfile-fcntllock-perl (0.22-3build7) ...
Selecting previously unselected package libjs-jquery.
Preparing to unpack .../53-libjs-jquery_3.6.0+dfsg+~3.5.13-1_all.deb ...
Unpacking libjs-jquery (3.6.0+dfsg+~3.5.13-1) ...
Selecting previously unselected package libjs-underscore.
Preparing to unpack .../54-libjs-underscore_1.13.2~dfsg-2_all.deb ...
Unpacking libjs-underscore (1.13.2~dfsg-2) ...
Selecting previously unselected package libjs-sphinxdoc.
Preparing to unpack .../55-libjs-sphinxdoc_4.3.2-1_all.deb ...
Unpacking libjs-sphinxdoc (4.3.2-1) ...
Selecting previously unselected package zlib1g-dev:amd64.
Preparing to unpack .../56-zlib1g-dev_1%3a1.2.11.dfsg-2ubuntu9.2_amd64.deb ...
Unpacking zlib1g-dev:amd64 (1:1.2.11.dfsg-2ubuntu9.2) ...
Selecting previously unselected package libpython3.10-dev:amd64.
Preparing to unpack .../57-libpython3.10-dev_3.10.12-1~22.04.3_amd64.deb ...
Unpacking libpython3.10-dev:amd64 (3.10.12-1~22.04.3) ...
Selecting previously unselected package libpython3-dev:amd64.
Preparing to unpack .../58-libpython3-dev_3.10.6-1~22.04_amd64.deb ...
Unpacking libpython3-dev:amd64 (3.10.6-1~22.04) ...
Selecting previously unselected package manpages-dev.
Preparing to unpack .../59-manpages-dev_5.10-1ubuntu1_all.deb ...
Unpacking manpages-dev (5.10-1ubuntu1) ...
Selecting previously unselected package python3.10-dev.
Preparing to unpack .../60-python3.10-dev_3.10.12-1~22.04.3_amd64.deb ...
Unpacking python3.10-dev (3.10.12-1~22.04.3) ...
Selecting previously unselected package python3-dev.
Preparing to unpack .../61-python3-dev_3.10.6-1~22.04_amd64.deb ...
Unpacking python3-dev (3.10.6-1~22.04) ...
Selecting previously unselected package python3-wheel.
Preparing to unpack .../62-python3-wheel_0.37.1-2ubuntu0.22.04.1_all.deb ...
Unpacking python3-wheel (0.37.1-2ubuntu0.22.04.1) ...
Selecting previously unselected package python3-pip.
Preparing to unpack .../63-python3-pip_22.0.2+dfsg-1ubuntu0.4_all.deb ...
Unpacking python3-pip (22.0.2+dfsg-1ubuntu0.4) ...
Setting up javascript-common (11+nmu1) ...
Setting up gcc-11-base:amd64 (11.4.0-1ubuntu1~22.04) ...
Setting up manpages-dev (5.10-1ubuntu1) ...
Setting up lto-disabled-list (24) ...
Setting up libxpm4:amd64 (1:3.5.12-1ubuntu0.22.04.2) ...
Setting up libfile-fcntllock-perl (0.22-3build7) ...
Setting up libalgorithm-diff-perl (1.201-1) ...
Setting up libdeflate0:amd64 (1.10-2) ...
Setting up linux-libc-dev:amd64 (5.15.0-91.101) ...
Setting up libgomp1:amd64 (12.3.0-1ubuntu1~22.04) ...
Setting up bzip2 (1.0.8-5build1) ...
Setting up python3-wheel (0.37.1-2ubuntu0.22.04.1) ...
Setting up libjbig0:amd64 (2.1-3.1ubuntu0.22.04.1) ...
Setting up libfakeroot:amd64 (1.28-1ubuntu1) ...
Setting up libasan6:amd64 (11.4.0-1ubuntu1~22.04) ...
Setting up fakeroot (1.28-1ubuntu1) ...
update-alternatives: using /usr/bin/fakeroot-sysv to provide /usr/bin/fakeroot (fakeroot) in auto mode
Setting up libtirpc-dev:amd64 (1.3.2-2ubuntu0.1) ...
Setting up rpcsvc-proto (1.4.2-0ubuntu6) ...
Setting up make (4.3-4.1build1) ...
Setting up libquadmath0:amd64 (12.3.0-1ubuntu1~22.04) ...
Setting up libmpc3:amd64 (1.2.1-2build1) ...
Setting up libatomic1:amd64 (12.3.0-1ubuntu1~22.04) ...
Setting up fonts-dejavu-core (2.37-2build1) ...
Setting up python3-pip (22.0.2+dfsg-1ubuntu0.4) ...
Setting up libjpeg-turbo8:amd64 (2.1.2-0ubuntu1) ...
Setting up libdpkg-perl (1.21.1ubuntu2.2) ...
Setting up libwebp7:amd64 (1.2.2-2ubuntu0.22.04.2) ...
Setting up libubsan1:amd64 (12.3.0-1ubuntu1~22.04) ...
Setting up libnsl-dev:amd64 (1.3.0-2build2) ...
Setting up libcrypt-dev:amd64 (1:4.4.27-1) ...
Setting up libjs-jquery (3.6.0+dfsg+~3.5.13-1) ...
Setting up libisl23:amd64 (0.24-2build1) ...
Setting up libc-dev-bin (2.35-0ubuntu3.6) ...
Setting up libalgorithm-diff-xs-perl (0.04-6build3) ...
Setting up libcc1-0:amd64 (12.3.0-1ubuntu1~22.04) ...
Setting up liblsan0:amd64 (12.3.0-1ubuntu1~22.04) ...
Setting up libitm1:amd64 (12.3.0-1ubuntu1~22.04) ...
Setting up libjs-underscore (1.13.2~dfsg-2) ...
Setting up libalgorithm-merge-perl (0.08-3) ...
Setting up libtsan0:amd64 (11.4.0-1ubuntu1~22.04) ...
Setting up libjpeg8:amd64 (8c-2ubuntu10) ...
Setting up cpp-11 (11.4.0-1ubuntu1~22.04) ...
Setting up fontconfig-config (2.13.1-4.2ubuntu5) ...
Setting up dpkg-dev (1.21.1ubuntu2.2) ...
Setting up libjs-sphinxdoc (4.3.2-1) ...
Setting up libgcc-11-dev:amd64 (11.4.0-1ubuntu1~22.04) ...
Setting up gcc-11 (11.4.0-1ubuntu1~22.04) ...
Setting up cpp (4:11.2.0-1ubuntu1) ...
Setting up libc6-dev:amd64 (2.35-0ubuntu3.6) ...
Setting up libtiff5:amd64 (4.3.0-6ubuntu0.7) ...
Setting up libfontconfig1:amd64 (2.13.1-4.2ubuntu5) ...
Setting up gcc (4:11.2.0-1ubuntu1) ...
Setting up libexpat1-dev:amd64 (2.4.7-1ubuntu0.2) ...
Setting up libgd3:amd64 (2.3.0-2ubuntu2) ...
Setting up libstdc++-11-dev:amd64 (11.4.0-1ubuntu1~22.04) ...
Setting up zlib1g-dev:amd64 (1:1.2.11.dfsg-2ubuntu9.2) ...
Setting up libc-devtools (2.35-0ubuntu3.6) ...
Setting up g++-11 (11.4.0-1ubuntu1~22.04) ...
Setting up libpython3.10-dev:amd64 (3.10.12-1~22.04.3) ...
Setting up python3.10-dev (3.10.12-1~22.04.3) ...
Setting up g++ (4:11.2.0-1ubuntu1) ...
update-alternatives: using /usr/bin/g++ to provide /usr/bin/c++ (c++) in auto mode
Setting up build-essential (12.9ubuntu3) ...
Setting up libpython3-dev:amd64 (3.10.6-1~22.04) ...
Setting up python3-dev (3.10.6-1~22.04) ...
Processing triggers for man-db (2.10.2-1) ...
Processing triggers for libc-bin (2.35-0ubuntu3.6) ...
NEEDRESTART-VER: 3.5
NEEDRESTART-KCUR: 6.2.0-1018-azure
NEEDRESTART-KEXP: 6.2.0-1018-azure
NEEDRESTART-KSTA: 1
Requirement already satisfied: pip in /usr/lib/python3/dist-packages (22.0.2)
Collecting pip
  Downloading pip-23.3.2-py3-none-any.whl (2.1 MB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.1/2.1 MB 31.9 MB/s eta 0:00:00
Installing collected packages: pip
  Attempting uninstall: pip
    Found existing installation: pip 22.0.2
    Not uninstalling pip at /usr/lib/python3/dist-packages, outside environment /usr
    Can't uninstall 'pip'. No files were found to uninstall.
Successfully installed pip-23.3.2
WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv
Reading package lists...
Building dependency tree...
Reading state information...
The following NEW packages will be installed:
  python3-osm-im python3-osmclient
0 upgraded, 2 newly installed, 0 to remove and 3 not upgraded.
Need to get 245 kB of archives.
After this operation, 8441 kB of additional disk space will be used.
Get:1 https://osm-download.etsi.org/repository/osm/debian/testing-daily testing/IM amd64 python3-osm-im all 15.0.0+g09d7979-1 [176 kB]
Get:2 https://osm-download.etsi.org/repository/osm/debian/testing-daily testing/osmclient amd64 python3-osmclient all 11.0.0rc1.post59+gb46c7c6-1 [68.6 kB]
Fetched 245 kB in 0s (1141 kB/s)
Selecting previously unselected package python3-osm-im.
(Reading database ... 
(Reading database ... 5%
(Reading database ... 10%
(Reading database ... 15%
(Reading database ... 20%
(Reading database ... 25%
(Reading database ... 30%
(Reading database ... 35%
(Reading database ... 40%
(Reading database ... 45%
(Reading database ... 50%
(Reading database ... 55%
(Reading database ... 60%
(Reading database ... 65%
(Reading database ... 70%
(Reading database ... 75%
(Reading database ... 80%
(Reading database ... 85%
(Reading database ... 90%
(Reading database ... 95%
(Reading database ... 100%
(Reading database ... 69481 files and directories currently installed.)
Preparing to unpack .../python3-osm-im_15.0.0+g09d7979-1_all.deb ...
Unpacking python3-osm-im (15.0.0+g09d7979-1) ...
Selecting previously unselected package python3-osmclient.
Preparing to unpack .../python3-osmclient_11.0.0rc1.post59+gb46c7c6-1_all.deb ...
Unpacking python3-osmclient (11.0.0rc1.post59+gb46c7c6-1) ...
Setting up python3-osmclient (11.0.0rc1.post59+gb46c7c6-1) ...
Setting up python3-osm-im (15.0.0+g09d7979-1) ...
NEEDRESTART-VER: 3.5
NEEDRESTART-KCUR: 6.2.0-1018-azure
NEEDRESTART-KEXP: 6.2.0-1018-azure
NEEDRESTART-KSTA: 1
Defaulting to user installation because normal site-packages is not writeable
Collecting enum34==1.1.10 (from -r /usr/lib/python3/dist-packages/osm_im/requirements.txt (line 17))
  Downloading enum34-1.1.10-py3-none-any.whl (11 kB)
Collecting lxml==4.9.3 (from -r /usr/lib/python3/dist-packages/osm_im/requirements.txt (line 19))
  Downloading lxml-4.9.3-cp310-cp310-manylinux_2_28_x86_64.whl.metadata (3.8 kB)
Collecting pyang==2.5.3 (from -r /usr/lib/python3/dist-packages/osm_im/requirements.txt (line 23))
  Downloading pyang-2.5.3-py2.py3-none-any.whl (592 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 592.9/592.9 kB 14.3 MB/s eta 0:00:00
Collecting pyangbind==0.8.3.post1 (from -r /usr/lib/python3/dist-packages/osm_im/requirements.txt (line 27))
  Downloading pyangbind-0.8.3.post1-py3-none-any.whl.metadata (4.2 kB)
Collecting pyyaml==6.0.1 (from -r /usr/lib/python3/dist-packages/osm_im/requirements.txt (line 29))
  Downloading PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (2.1 kB)
Collecting regex==2023.8.8 (from -r /usr/lib/python3/dist-packages/osm_im/requirements.txt (line 31))
  Downloading regex-2023.8.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (40 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 40.9/40.9 kB 3.6 MB/s eta 0:00:00
Requirement already satisfied: six==1.16.0 in /usr/lib/python3/dist-packages (from -r /usr/lib/python3/dist-packages/osm_im/requirements.txt (line 33)) (1.16.0)
Downloading lxml-4.9.3-cp310-cp310-manylinux_2_28_x86_64.whl (7.9 MB)
   ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 7.9/7.9 MB 79.5 MB/s eta 0:00:00
Downloading pyangbind-0.8.3.post1-py3-none-any.whl (51 kB)
   ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 51.8/51.8 kB 2.9 MB/s eta 0:00:00
Downloading PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (705 kB)
   ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 705.5/705.5 kB 31.5 MB/s eta 0:00:00
Downloading regex-2023.8.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (771 kB)
   ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 771.9/771.9 kB 26.3 MB/s eta 0:00:00
Installing collected packages: enum34, regex, pyyaml, lxml, pyang, pyangbind
Successfully installed enum34-1.1.10 lxml-4.9.3 pyang-2.5.3 pyangbind-0.8.3.post1 pyyaml-6.0.1 regex-2023.8.8
Reading package lists...
Building dependency tree...
Reading state information...
libmagic1 is already the newest version (1:5.41-3ubuntu0.1).
libmagic1 set to manually installed.
0 upgraded, 0 newly installed, 0 to remove and 3 not upgraded.
Defaulting to user installation because normal site-packages is not writeable
Collecting certifi==2023.7.22 (from -r /usr/lib/python3/dist-packages/osmclient/requirements.txt (line 17))
  Downloading certifi-2023.7.22-py3-none-any.whl.metadata (2.2 kB)
Collecting charset-normalizer==3.2.0 (from -r /usr/lib/python3/dist-packages/osmclient/requirements.txt (line 19))
  Downloading charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (31 kB)
Collecting click==8.1.7 (from -r /usr/lib/python3/dist-packages/osmclient/requirements.txt (line 21))
  Downloading click-8.1.7-py3-none-any.whl.metadata (3.0 kB)
Collecting idna==3.4 (from -r /usr/lib/python3/dist-packages/osmclient/requirements.txt (line 23))
  Downloading idna-3.4-py3-none-any.whl (61 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 61.5/61.5 kB 3.1 MB/s eta 0:00:00
Collecting jinja2==3.1.2 (from -r /usr/lib/python3/dist-packages/osmclient/requirements.txt (line 25))
  Downloading Jinja2-3.1.2-py3-none-any.whl (133 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 133.1/133.1 kB 7.8 MB/s eta 0:00:00
Collecting jsonpath-ng==1.6.0 (from -r /usr/lib/python3/dist-packages/osmclient/requirements.txt (line 27))
  Downloading jsonpath_ng-1.6.0-py3-none-any.whl.metadata (17 kB)
Collecting markupsafe==2.1.3 (from -r /usr/lib/python3/dist-packages/osmclient/requirements.txt (line 29))
  Downloading MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (3.0 kB)
Collecting packaging==23.1 (from -r /usr/lib/python3/dist-packages/osmclient/requirements.txt (line 31))
  Downloading packaging-23.1-py3-none-any.whl (48 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 48.9/48.9 kB 1.7 MB/s eta 0:00:00
Collecting ply==3.11 (from -r /usr/lib/python3/dist-packages/osmclient/requirements.txt (line 33))
  Downloading ply-3.11-py2.py3-none-any.whl (49 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 49.6/49.6 kB 5.3 MB/s eta 0:00:00
Collecting prettytable==3.9.0 (from -r /usr/lib/python3/dist-packages/osmclient/requirements.txt (line 35))
  Downloading prettytable-3.9.0-py3-none-any.whl.metadata (26 kB)
Collecting python-magic==0.4.27 (from -r /usr/lib/python3/dist-packages/osmclient/requirements.txt (line 37))
  Downloading python_magic-0.4.27-py2.py3-none-any.whl (13 kB)
Requirement already satisfied: pyyaml==6.0.1 in ./.local/lib/python3.10/site-packages (from -r /usr/lib/python3/dist-packages/osmclient/requirements.txt (line 39)) (6.0.1)
Collecting requests==2.31.0 (from -r /usr/lib/python3/dist-packages/osmclient/requirements.txt (line 41))
  Downloading requests-2.31.0-py3-none-any.whl.metadata (4.6 kB)
Collecting urllib3==2.0.5 (from -r /usr/lib/python3/dist-packages/osmclient/requirements.txt (line 43))
  Downloading urllib3-2.0.5-py3-none-any.whl.metadata (6.6 kB)
Collecting verboselogs==1.7 (from -r /usr/lib/python3/dist-packages/osmclient/requirements.txt (line 45))
  Downloading verboselogs-1.7-py2.py3-none-any.whl (11 kB)
Collecting wcwidth==0.2.6 (from -r /usr/lib/python3/dist-packages/osmclient/requirements.txt (line 47))
  Downloading wcwidth-0.2.6-py2.py3-none-any.whl (29 kB)
Downloading certifi-2023.7.22-py3-none-any.whl (158 kB)
   ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 158.3/158.3 kB 14.8 MB/s eta 0:00:00
Downloading charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (201 kB)
   ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 201.8/201.8 kB 17.1 MB/s eta 0:00:00
Downloading click-8.1.7-py3-none-any.whl (97 kB)
   ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 97.9/97.9 kB 6.6 MB/s eta 0:00:00
Downloading jsonpath_ng-1.6.0-py3-none-any.whl (29 kB)
Downloading MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (25 kB)
Downloading prettytable-3.9.0-py3-none-any.whl (27 kB)
Downloading requests-2.31.0-py3-none-any.whl (62 kB)
   ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 62.6/62.6 kB 3.5 MB/s eta 0:00:00
Downloading urllib3-2.0.5-py3-none-any.whl (123 kB)
   ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 123.8/123.8 kB 8.6 MB/s eta 0:00:00
Installing collected packages: wcwidth, verboselogs, ply, urllib3, python-magic, prettytable, packaging, markupsafe, jsonpath-ng, idna, click, charset-normalizer, certifi, requests, jinja2
  WARNING: The script jsonpath_ng is installed in '/home/ubuntu/.local/bin' which is not on PATH.
  Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.
  WARNING: The script normalizer is installed in '/home/ubuntu/.local/bin' which is not on PATH.
  Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.
Successfully installed certifi-2023.7.22 charset-normalizer-3.2.0 click-8.1.7 idna-3.4 jinja2-3.1.2 jsonpath-ng-1.6.0 markupsafe-2.1.3 packaging-23.1 ply-3.11 prettytable-3.9.0 python-magic-0.4.27 requests-2.31.0 urllib3-2.0.5 verboselogs-1.7 wcwidth-0.2.6

OSM client installed
OSM client assumes that OSM host is running in localhost (127.0.0.1).
In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:
     export OSM_HOSTNAME=<OSM_host>
Track osmclient osmclient_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705839928&event=osmclient&operation=osmclient_ok&value=&comment=&tags=
Checking OSM health state...
helm -n osm list
NAME        	NAMESPACE	REVISION	UPDATED                                	STATUS  	CHART                        	APP VERSION
airflow     	osm      	1       	2024-01-21 12:21:01.44243989 +0000 UTC 	deployed	airflow-1.9.0                	2.5.3      
alertmanager	osm      	1       	2024-01-21 12:24:54.135412028 +0000 UTC	deployed	alertmanager-0.22.0          	v0.24.0    
mongodb-k8s 	osm      	1       	2024-01-21 12:20:57.426808711 +0000 UTC	deployed	mongodb-13.9.4               	6.0.5      
osm         	osm      	1       	2024-01-21 12:20:58.011718098 +0000 UTC	deployed	osm-0.0.1                    	15         
pushgateway 	osm      	1       	2024-01-21 12:24:50.711637645 +0000 UTC	deployed	prometheus-pushgateway-1.18.2	1.4.2      
helm -n osm status osm
NAME: osm
LAST DEPLOYED: Sun Jan 21 12:20:58 2024
NAMESPACE: osm
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
1. Get the application URL by running these commands:
  export NODE_PORT=$(kubectl get --namespace osm -o jsonpath="{.spec.ports[0].nodePort}" services nbi)
  export NODE_IP=$(kubectl get nodes --namespace osm -o jsonpath="{.items[0].status.addresses[0].address}")
  echo http://$NODE_IP:$NODE_PORT

Bootstraping... 1 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 2 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 3 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 4 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 5 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 6 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 7 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 8 attempts of 84
5 of 13 deployments starting:
            keystone	0/1	0
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 9 attempts of 84
5 of 13 deployments starting:
            keystone	0/1	0
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 10 attempts of 84
5 of 13 deployments starting:
            keystone	0/1	0
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 11 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 12 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 13 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 14 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 15 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 16 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 17 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 18 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 19 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 20 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 21 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 22 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 23 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 24 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
 mongodb-k8s-arbiter	0/1	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 25 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
 mongodb-k8s-arbiter	0/1	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 26 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
 mongodb-k8s-arbiter	0/1	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 27 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
 mongodb-k8s-arbiter	0/1	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 28 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
 mongodb-k8s-arbiter	0/1	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 29 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
 mongodb-k8s-arbiter	0/1	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 30 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 31 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
          prometheus	0/1	
           zookeeper	0/1	


Bootstraping... 32 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
          prometheus	0/1	
           zookeeper	0/1	


Bootstraping... 33 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 34 attempts of 84
5 of 13 deployments starting:
            keystone	0/1	0
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 35 attempts of 84
5 of 13 deployments starting:
            keystone	0/1	0
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 36 attempts of 84
5 of 13 deployments starting:
            keystone	0/1	0
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 37 attempts of 84
5 of 13 deployments starting:
            keystone	0/1	0
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 38 attempts of 84
5 of 13 deployments starting:
            keystone	0/1	0
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 39 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 40 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 41 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 42 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 43 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 44 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 45 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 46 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 47 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 48 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 49 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 50 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 51 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 52 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 53 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 54 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
          prometheus	0/1	
           zookeeper	0/1	


Bootstraping... 55 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
          prometheus	0/1	
           zookeeper	0/1	


Bootstraping... 56 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
          prometheus	0/1	
           zookeeper	0/1	


Bootstraping... 57 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
          prometheus	0/1	
           zookeeper	0/1	


Bootstraping... 58 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
          prometheus	0/1	
           zookeeper	0/1	


Bootstraping... 59 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
          prometheus	0/1	
           zookeeper	0/1	


Bootstraping... 60 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 61 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 62 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
 mongodb-k8s-arbiter	0/1	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 63 attempts of 84
5 of 13 deployments starting:
            keystone	0/1	0
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
 mongodb-k8s-arbiter	0/1	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 64 attempts of 84
5 of 13 deployments starting:
            keystone	0/1	0
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
 mongodb-k8s-arbiter	0/1	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 65 attempts of 84
5 of 13 deployments starting:
            keystone	0/1	0
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
 mongodb-k8s-arbiter	0/1	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 66 attempts of 84
5 of 13 deployments starting:
            keystone	0/1	0
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
 mongodb-k8s-arbiter	0/1	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 67 attempts of 84
5 of 13 deployments starting:
            keystone	0/1	0
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
 mongodb-k8s-arbiter	0/1	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 68 attempts of 84
5 of 13 deployments starting:
            keystone	0/1	0
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
 mongodb-k8s-arbiter	0/1	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 69 attempts of 84
5 of 13 deployments starting:
            keystone	0/1	0
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 70 attempts of 84
5 of 13 deployments starting:
            keystone	0/1	0
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 71 attempts of 84
5 of 13 deployments starting:
            keystone	0/1	0
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 72 attempts of 84
5 of 13 deployments starting:
            keystone	0/1	0
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 73 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 74 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 75 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 76 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 77 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 78 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 79 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

4 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
           zookeeper	0/1	


Bootstraping... 80 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
          prometheus	0/1	
           zookeeper	0/1	


Bootstraping... 81 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
          prometheus	0/1	
           zookeeper	0/1	


Bootstraping... 82 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
          prometheus	0/1	
           zookeeper	0/1	


Bootstraping... 83 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
          prometheus	0/1	
           zookeeper	0/1	


Bootstraping... 84 attempts of 84
4 of 13 deployments starting:
                 lcm	0/1	0
                 mon	0/1	0
                 nbi	0/1	0
                  ro	0/1	0

5 of 10 statefulsets starting:
    kafka-controller	2/3	
         mongodb-k8s	0/2	
               mysql	0/1	
          prometheus	0/1	
           zookeeper	0/1	


SYSTEM IS BROKEN
OSM is not healthy, but will probably converge to a healthy state soon.
Check OSM status with: kubectl -n osm get all
Track healthchecks osm_unhealthy: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705840361&event=healthchecks&operation=osm_unhealthy&value=didnotconverge&comment=&tags=
Track healthchecks after_healthcheck_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705840361&event=healthchecks&operation=after_healthcheck_ok&value=&comment=&tags=
HTTPSConnectionPool(host='127.0.0.1', port=9999): Max retries exceeded with url: /osm/admin/v1/tokens (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7ff06b670250>: Failed to establish a new connection: [Errno 111] Connection refused'))
Maybe "--hostname" option or OSM_HOSTNAME environment variable needs to be specified
HTTPSConnectionPool(host='127.0.0.1', port=9999): Max retries exceeded with url: /osm/admin/v1/tokens (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7f7a3fa71360>: Failed to establish a new connection: [Errno 111] Connection refused'))
Maybe "--hostname" option or OSM_HOSTNAME environment variable needs to be specified
Track final_ops add_local_k8scluster_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705840362&event=final_ops&operation=add_local_k8scluster_ok&value=&comment=&tags=
Fixing firewall so docker and LXD can share the same host without affecting each other.
tee: /etc/iptables/rules.v4: No such file or directory
# Generated by iptables-save v1.8.7 on Sun Jan 21 12:32:42 2024
*mangle
:PREROUTING ACCEPT [0:0]
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
:KUBE-IPTABLES-HINT - [0:0]
:KUBE-KUBELET-CANARY - [0:0]
:KUBE-PROXY-CANARY - [0:0]
COMMIT
# Completed on Sun Jan 21 12:32:42 2024
# Generated by iptables-save v1.8.7 on Sun Jan 21 12:32:42 2024
*security
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [356360:151272095]
-A OUTPUT -d 168.63.129.16/32 -p tcp -m tcp --dport 53 -j ACCEPT
-A OUTPUT -d 168.63.129.16/32 -p tcp -m owner --uid-owner 0 -j ACCEPT
-A OUTPUT -d 168.63.129.16/32 -p tcp -m conntrack --ctstate INVALID,NEW -j DROP
COMMIT
# Completed on Sun Jan 21 12:32:42 2024
# Generated by iptables-save v1.8.7 on Sun Jan 21 12:32:42 2024
*filter
:INPUT ACCEPT [0:0]
:FORWARD DROP [0:0]
:OUTPUT ACCEPT [0:0]
:DOCKER - [0:0]
:DOCKER-ISOLATION-STAGE-1 - [0:0]
:DOCKER-ISOLATION-STAGE-2 - [0:0]
:DOCKER-USER - [0:0]
:FLANNEL-FWD - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FIREWALL - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-KUBELET-CANARY - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-PROXY-CANARY - [0:0]
:KUBE-PROXY-FIREWALL - [0:0]
:KUBE-SERVICES - [0:0]
-A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL
-A INPUT -m comment --comment "kubernetes health check service ports" -j KUBE-NODEPORTS
-A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES
-A INPUT -j KUBE-FIREWALL
-A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL
-A FORWARD -m comment --comment "kubernetes forwarding rules" -j KUBE-FORWARD
-A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES
-A FORWARD -j DOCKER-USER
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -o docker0 -j DOCKER
-A FORWARD -i docker0 ! -o docker0 -j ACCEPT
-A FORWARD -i docker0 -o docker0 -j ACCEPT
-A FORWARD -m comment --comment "flanneld forward" -j FLANNEL-FWD
-A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL
-A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A OUTPUT -j KUBE-FIREWALL
-A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-1 -j RETURN
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
-A DOCKER-ISOLATION-STAGE-2 -j RETURN
-A DOCKER-USER -j ACCEPT
-A DOCKER-USER -j RETURN
-A FLANNEL-FWD -s 10.244.0.0/16 -m comment --comment "flanneld forward" -j ACCEPT
-A FLANNEL-FWD -d 10.244.0.0/16 -m comment --comment "flanneld forward" -j ACCEPT
-A KUBE-EXTERNAL-SERVICES -p tcp -m comment --comment "osm/nbi has no endpoints" -m addrtype --dst-type LOCAL -m tcp --dport 9999 -j REJECT --reject-with icmp-port-unreachable
-A KUBE-EXTERNAL-SERVICES -p tcp -m comment --comment "osm/prometheus has no endpoints" -m addrtype --dst-type LOCAL -m tcp --dport 9091 -j REJECT --reject-with icmp-port-unreachable
-A KUBE-FIREWALL ! -s 127.0.0.0/8 -d 127.0.0.0/8 -m comment --comment "block incoming localnet connections" -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
-A KUBE-FIREWALL -m comment --comment "kubernetes firewall for dropping marked packets" -m mark --mark 0x8000/0x8000 -j DROP
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A KUBE-SERVICES -d 10.107.37.248/32 -p tcp -m comment --comment "osm/zookeeper:tcp-client has no endpoints" -m tcp --dport 2181 -j REJECT --reject-with icmp-port-unreachable
-A KUBE-SERVICES -d 10.96.42.255/32 -p tcp -m comment --comment "osm/mysql:mysql has no endpoints" -m tcp --dport 3306 -j REJECT --reject-with icmp-port-unreachable
-A KUBE-SERVICES -d 10.98.129.144/32 -p tcp -m comment --comment "osm/nbi has no endpoints" -m tcp --dport 9999 -j REJECT --reject-with icmp-port-unreachable
-A KUBE-SERVICES -d 10.104.32.113/32 -p tcp -m comment --comment "osm/prometheus has no endpoints" -m tcp --dport 9090 -j REJECT --reject-with icmp-port-unreachable
-A KUBE-SERVICES -d 10.107.37.248/32 -p tcp -m comment --comment "osm/zookeeper:tcp-follower has no endpoints" -m tcp --dport 2888 -j REJECT --reject-with icmp-port-unreachable
-A KUBE-SERVICES -d 10.107.37.248/32 -p tcp -m comment --comment "osm/zookeeper:tcp-election has no endpoints" -m tcp --dport 3888 -j REJECT --reject-with icmp-port-unreachable
COMMIT
# Completed on Sun Jan 21 12:32:42 2024
# Generated by iptables-save v1.8.7 on Sun Jan 21 12:32:42 2024
*nat
:PREROUTING ACCEPT [0:0]
:INPUT ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
:DOCKER - [0:0]
:FLANNEL-POSTRTG - [0:0]
:KUBE-EXT-BF2LB5WJRBPLA42J - [0:0]
:KUBE-EXT-GZN4S7ND4PF6YXD6 - [0:0]
:KUBE-EXT-PQIZCPF63EFIBBJY - [0:0]
:KUBE-EXT-XUD4OEZNIHB47KQL - [0:0]
:KUBE-EXT-YA74QX5VY2UAABIX - [0:0]
:KUBE-KUBELET-CANARY - [0:0]
:KUBE-MARK-DROP - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-PROXY-CANARY - [0:0]
:KUBE-SEP-3QXYW336TC6ED5Q7 - [0:0]
:KUBE-SEP-5D2WEBEDBSLEFQZO - [0:0]
:KUBE-SEP-6E7XQMQ4RAYOWTTM - [0:0]
:KUBE-SEP-7HXLIXUMXGELTMFD - [0:0]
:KUBE-SEP-BGWLLMSLJMMH67BG - [0:0]
:KUBE-SEP-C523AIPH4Y2GJ7FW - [0:0]
:KUBE-SEP-E6QZKDLTQFPYJ7WY - [0:0]
:KUBE-SEP-IASF5YUGAFRIWCER - [0:0]
:KUBE-SEP-IT2ZTR26TO4XFPTO - [0:0]
:KUBE-SEP-JIWKU7LWBAE46CYF - [0:0]
:KUBE-SEP-N4G2XR5TDX7PQE7P - [0:0]
:KUBE-SEP-O5ZLXNTEPTDGZTUZ - [0:0]
:KUBE-SEP-QIH2WLTFEDC7N73V - [0:0]
:KUBE-SEP-UO726RQ525XORZLT - [0:0]
:KUBE-SEP-W6JILCJ5ZCZONVYH - [0:0]
:KUBE-SEP-XNQ7MNHBFHAQLS4W - [0:0]
:KUBE-SEP-YHH7I2J6SYEPVH7R - [0:0]
:KUBE-SEP-YIL6JZP7A3QYXJU2 - [0:0]
:KUBE-SEP-YRMTI7Q4WZP6Y2KS - [0:0]
:KUBE-SEP-ZP3FB6NMPNCO4VBJ - [0:0]
:KUBE-SEP-ZXMNUKOKXUTL2MK2 - [0:0]
:KUBE-SERVICES - [0:0]
:KUBE-SVC-5QOWUZVRO3UICSLI - [0:0]
:KUBE-SVC-BF2LB5WJRBPLA42J - [0:0]
:KUBE-SVC-ERIFXISQEP7F7OF4 - [0:0]
:KUBE-SVC-GZ25SP4UFGF7SAVL - [0:0]
:KUBE-SVC-GZN4S7ND4PF6YXD6 - [0:0]
:KUBE-SVC-JD5MR3NA4I4DYORP - [0:0]
:KUBE-SVC-NPX46M4PTMTKRN6Y - [0:0]
:KUBE-SVC-O36IMWM6WEZJKHBK - [0:0]
:KUBE-SVC-PQIZCPF63EFIBBJY - [0:0]
:KUBE-SVC-QE77U7R3P7AE7O5U - [0:0]
:KUBE-SVC-QSOISDZI64RJ2IKG - [0:0]
:KUBE-SVC-TCOU7JCQXEZGVUNU - [0:0]
:KUBE-SVC-TTTQGL2HNUNQKPOG - [0:0]
:KUBE-SVC-USIDOZAE2VTXK5OJ - [0:0]
:KUBE-SVC-XUD4OEZNIHB47KQL - [0:0]
:KUBE-SVC-YA74QX5VY2UAABIX - [0:0]
:KUBE-SVC-ZUD4L6KQKCHD52W4 - [0:0]
-A PREROUTING -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
-A OUTPUT -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE
-A POSTROUTING -m comment --comment "flanneld masq" -j FLANNEL-POSTRTG
-A DOCKER -i docker0 -j RETURN
-A FLANNEL-POSTRTG -m comment --comment "flanneld masq" -j RETURN
-A FLANNEL-POSTRTG -s 10.244.0.0/24 -d 10.244.0.0/16 -m comment --comment "flanneld masq" -j RETURN
-A FLANNEL-POSTRTG -s 10.244.0.0/16 -d 10.244.0.0/24 -m comment --comment "flanneld masq" -j RETURN
-A FLANNEL-POSTRTG ! -s 10.244.0.0/16 -d 10.244.0.0/24 -m comment --comment "flanneld masq" -j RETURN
-A FLANNEL-POSTRTG -s 10.244.0.0/16 ! -d 224.0.0.0/4 -m comment --comment "flanneld masq" -j MASQUERADE --random-fully
-A FLANNEL-POSTRTG ! -s 10.244.0.0/16 -d 10.244.0.0/16 -m comment --comment "flanneld masq" -j MASQUERADE --random-fully
-A KUBE-EXT-BF2LB5WJRBPLA42J -m comment --comment "masquerade traffic for osm/airflow-webserver:airflow-ui external destinations" -j KUBE-MARK-MASQ
-A KUBE-EXT-BF2LB5WJRBPLA42J -j KUBE-SVC-BF2LB5WJRBPLA42J
-A KUBE-EXT-GZN4S7ND4PF6YXD6 -m comment --comment "masquerade traffic for osm/alertmanager:http external destinations" -j KUBE-MARK-MASQ
-A KUBE-EXT-GZN4S7ND4PF6YXD6 -j KUBE-SVC-GZN4S7ND4PF6YXD6
-A KUBE-EXT-PQIZCPF63EFIBBJY -m comment --comment "masquerade traffic for osm/grafana:service external destinations" -j KUBE-MARK-MASQ
-A KUBE-EXT-PQIZCPF63EFIBBJY -j KUBE-SVC-PQIZCPF63EFIBBJY
-A KUBE-EXT-XUD4OEZNIHB47KQL -m comment --comment "masquerade traffic for osm/webhook-translator external destinations" -j KUBE-MARK-MASQ
-A KUBE-EXT-XUD4OEZNIHB47KQL -j KUBE-SVC-XUD4OEZNIHB47KQL
-A KUBE-EXT-YA74QX5VY2UAABIX -m comment --comment "masquerade traffic for osm/ng-ui external destinations" -j KUBE-MARK-MASQ
-A KUBE-EXT-YA74QX5VY2UAABIX -j KUBE-SVC-YA74QX5VY2UAABIX
-A KUBE-MARK-DROP -j MARK --set-xmark 0x8000/0x8000
-A KUBE-MARK-MASQ -j MARK --set-xmark 0x4000/0x4000
-A KUBE-NODEPORTS -p tcp -m comment --comment "osm/ng-ui" -m tcp --dport 80 -j KUBE-EXT-YA74QX5VY2UAABIX
-A KUBE-NODEPORTS -p tcp -m comment --comment "osm/grafana:service" -m tcp --dport 3000 -j KUBE-EXT-PQIZCPF63EFIBBJY
-A KUBE-NODEPORTS -p tcp -m comment --comment "osm/airflow-webserver:airflow-ui" -m tcp --dport 18343 -j KUBE-EXT-BF2LB5WJRBPLA42J
-A KUBE-NODEPORTS -p tcp -m comment --comment "osm/webhook-translator" -m tcp --dport 9998 -j KUBE-EXT-XUD4OEZNIHB47KQL
-A KUBE-NODEPORTS -p tcp -m comment --comment "osm/alertmanager:http" -m tcp --dport 9093 -j KUBE-EXT-GZN4S7ND4PF6YXD6
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --set-xmark 0x4000/0x0
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE --random-fully
-A KUBE-SEP-3QXYW336TC6ED5Q7 -s 10.244.0.25/32 -m comment --comment "osm/airflow-statsd:statsd-ingest" -j KUBE-MARK-MASQ
-A KUBE-SEP-3QXYW336TC6ED5Q7 -p udp -m comment --comment "osm/airflow-statsd:statsd-ingest" -m udp -j DNAT --to-destination 10.244.0.25:9125
-A KUBE-SEP-5D2WEBEDBSLEFQZO -s 10.244.0.38/32 -m comment --comment "osm/airflow-postgresql:tcp-postgresql" -j KUBE-MARK-MASQ
-A KUBE-SEP-5D2WEBEDBSLEFQZO -p tcp -m comment --comment "osm/airflow-postgresql:tcp-postgresql" -m tcp -j DNAT --to-destination 10.244.0.38:5432
-A KUBE-SEP-6E7XQMQ4RAYOWTTM -s 10.244.0.3/32 -m comment --comment "kube-system/kube-dns:dns" -j KUBE-MARK-MASQ
-A KUBE-SEP-6E7XQMQ4RAYOWTTM -p udp -m comment --comment "kube-system/kube-dns:dns" -m udp -j DNAT --to-destination 10.244.0.3:53
-A KUBE-SEP-7HXLIXUMXGELTMFD -s 10.244.0.16/32 -m comment --comment "osm/webhook-translator" -j KUBE-MARK-MASQ
-A KUBE-SEP-7HXLIXUMXGELTMFD -p tcp -m comment --comment "osm/webhook-translator" -m tcp -j DNAT --to-destination 10.244.0.16:9998
-A KUBE-SEP-BGWLLMSLJMMH67BG -s 10.244.0.43/32 -m comment --comment "osm/alertmanager:http" -j KUBE-MARK-MASQ
-A KUBE-SEP-BGWLLMSLJMMH67BG -p tcp -m comment --comment "osm/alertmanager:http" -m tcp -j DNAT --to-destination 10.244.0.43:9093
-A KUBE-SEP-C523AIPH4Y2GJ7FW -s 172.21.23.11/32 -m comment --comment "default/kubernetes:https" -j KUBE-MARK-MASQ
-A KUBE-SEP-C523AIPH4Y2GJ7FW -p tcp -m comment --comment "default/kubernetes:https" -m tcp -j DNAT --to-destination 172.21.23.11:6443
-A KUBE-SEP-E6QZKDLTQFPYJ7WY -s 10.244.0.41/32 -m comment --comment "osm/pushgateway-prometheus-pushgateway:http" -j KUBE-MARK-MASQ
-A KUBE-SEP-E6QZKDLTQFPYJ7WY -p tcp -m comment --comment "osm/pushgateway-prometheus-pushgateway:http" -m tcp -j DNAT --to-destination 10.244.0.41:9091
-A KUBE-SEP-IASF5YUGAFRIWCER -s 10.244.0.34/32 -m comment --comment "osm/kafka:tcp-client" -j KUBE-MARK-MASQ
-A KUBE-SEP-IASF5YUGAFRIWCER -p tcp -m comment --comment "osm/kafka:tcp-client" -m tcp -j DNAT --to-destination 10.244.0.34:9092
-A KUBE-SEP-IT2ZTR26TO4XFPTO -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-MARK-MASQ
-A KUBE-SEP-IT2ZTR26TO4XFPTO -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp" -m tcp -j DNAT --to-destination 10.244.0.2:53
-A KUBE-SEP-JIWKU7LWBAE46CYF -s 10.244.0.37/32 -m comment --comment "osm/airflow-redis:redis-db" -j KUBE-MARK-MASQ
-A KUBE-SEP-JIWKU7LWBAE46CYF -p tcp -m comment --comment "osm/airflow-redis:redis-db" -m tcp -j DNAT --to-destination 10.244.0.37:6379
-A KUBE-SEP-N4G2XR5TDX7PQE7P -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:metrics" -j KUBE-MARK-MASQ
-A KUBE-SEP-N4G2XR5TDX7PQE7P -p tcp -m comment --comment "kube-system/kube-dns:metrics" -m tcp -j DNAT --to-destination 10.244.0.2:9153
-A KUBE-SEP-O5ZLXNTEPTDGZTUZ -s 10.244.0.27/32 -m comment --comment "osm/airflow-webserver:airflow-ui" -j KUBE-MARK-MASQ
-A KUBE-SEP-O5ZLXNTEPTDGZTUZ -p tcp -m comment --comment "osm/airflow-webserver:airflow-ui" -m tcp -j DNAT --to-destination 10.244.0.27:8080
-A KUBE-SEP-QIH2WLTFEDC7N73V -s 10.244.0.10/32 -m comment --comment "metallb-system/metallb-webhook-service" -j KUBE-MARK-MASQ
-A KUBE-SEP-QIH2WLTFEDC7N73V -p tcp -m comment --comment "metallb-system/metallb-webhook-service" -m tcp -j DNAT --to-destination 10.244.0.10:9443
-A KUBE-SEP-UO726RQ525XORZLT -s 10.244.0.4/32 -m comment --comment "cert-manager/cert-manager-webhook:https" -j KUBE-MARK-MASQ
-A KUBE-SEP-UO726RQ525XORZLT -p tcp -m comment --comment "cert-manager/cert-manager-webhook:https" -m tcp -j DNAT --to-destination 10.244.0.4:10250
-A KUBE-SEP-W6JILCJ5ZCZONVYH -s 10.244.0.25/32 -m comment --comment "osm/airflow-statsd:statsd-scrape" -j KUBE-MARK-MASQ
-A KUBE-SEP-W6JILCJ5ZCZONVYH -p tcp -m comment --comment "osm/airflow-statsd:statsd-scrape" -m tcp -j DNAT --to-destination 10.244.0.25:9102
-A KUBE-SEP-XNQ7MNHBFHAQLS4W -s 10.244.0.36/32 -m comment --comment "osm/kafka:tcp-client" -j KUBE-MARK-MASQ
-A KUBE-SEP-XNQ7MNHBFHAQLS4W -p tcp -m comment --comment "osm/kafka:tcp-client" -m tcp -j DNAT --to-destination 10.244.0.36:9092
-A KUBE-SEP-YHH7I2J6SYEPVH7R -s 10.244.0.20/32 -m comment --comment "osm/grafana:service" -j KUBE-MARK-MASQ
-A KUBE-SEP-YHH7I2J6SYEPVH7R -p tcp -m comment --comment "osm/grafana:service" -m tcp -j DNAT --to-destination 10.244.0.20:3000
-A KUBE-SEP-YIL6JZP7A3QYXJU2 -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:dns" -j KUBE-MARK-MASQ
-A KUBE-SEP-YIL6JZP7A3QYXJU2 -p udp -m comment --comment "kube-system/kube-dns:dns" -m udp -j DNAT --to-destination 10.244.0.2:53
-A KUBE-SEP-YRMTI7Q4WZP6Y2KS -s 10.244.0.18/32 -m comment --comment "osm/ng-ui" -j KUBE-MARK-MASQ
-A KUBE-SEP-YRMTI7Q4WZP6Y2KS -p tcp -m comment --comment "osm/ng-ui" -m tcp -j DNAT --to-destination 10.244.0.18:80
-A KUBE-SEP-ZP3FB6NMPNCO4VBJ -s 10.244.0.3/32 -m comment --comment "kube-system/kube-dns:metrics" -j KUBE-MARK-MASQ
-A KUBE-SEP-ZP3FB6NMPNCO4VBJ -p tcp -m comment --comment "kube-system/kube-dns:metrics" -m tcp -j DNAT --to-destination 10.244.0.3:9153
-A KUBE-SEP-ZXMNUKOKXUTL2MK2 -s 10.244.0.3/32 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-MARK-MASQ
-A KUBE-SEP-ZXMNUKOKXUTL2MK2 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp" -m tcp -j DNAT --to-destination 10.244.0.3:53
-A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-SVC-JD5MR3NA4I4DYORP
-A KUBE-SERVICES -d 10.101.235.117/32 -p tcp -m comment --comment "osm/kafka:tcp-client cluster IP" -m tcp --dport 9092 -j KUBE-SVC-QSOISDZI64RJ2IKG
-A KUBE-SERVICES -d 10.111.238.254/32 -p tcp -m comment --comment "osm/airflow-statsd:statsd-scrape cluster IP" -m tcp --dport 9102 -j KUBE-SVC-TTTQGL2HNUNQKPOG
-A KUBE-SERVICES -d 10.108.253.168/32 -p tcp -m comment --comment "osm/airflow-redis:redis-db cluster IP" -m tcp --dport 6379 -j KUBE-SVC-USIDOZAE2VTXK5OJ
-A KUBE-SERVICES -d 10.101.11.212/32 -p tcp -m comment --comment "osm/pushgateway-prometheus-pushgateway:http cluster IP" -m tcp --dport 9091 -j KUBE-SVC-5QOWUZVRO3UICSLI
-A KUBE-SERVICES -d 10.107.97.192/32 -p tcp -m comment --comment "cert-manager/cert-manager-webhook:https cluster IP" -m tcp --dport 443 -j KUBE-SVC-ZUD4L6KQKCHD52W4
-A KUBE-SERVICES -d 10.107.33.9/32 -p tcp -m comment --comment "osm/ng-ui cluster IP" -m tcp --dport 80 -j KUBE-SVC-YA74QX5VY2UAABIX
-A KUBE-SERVICES -d 10.107.18.115/32 -p tcp -m comment --comment "metallb-system/metallb-webhook-service cluster IP" -m tcp --dport 443 -j KUBE-SVC-GZ25SP4UFGF7SAVL
-A KUBE-SERVICES -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-SVC-TCOU7JCQXEZGVUNU
-A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-SVC-ERIFXISQEP7F7OF4
-A KUBE-SERVICES -d 10.111.251.215/32 -p tcp -m comment --comment "osm/grafana:service cluster IP" -m tcp --dport 3000 -j KUBE-SVC-PQIZCPF63EFIBBJY
-A KUBE-SERVICES -d 10.111.238.254/32 -p udp -m comment --comment "osm/airflow-statsd:statsd-ingest cluster IP" -m udp --dport 9125 -j KUBE-SVC-O36IMWM6WEZJKHBK
-A KUBE-SERVICES -d 10.101.125.77/32 -p tcp -m comment --comment "osm/airflow-postgresql:tcp-postgresql cluster IP" -m tcp --dport 5432 -j KUBE-SVC-QE77U7R3P7AE7O5U
-A KUBE-SERVICES -d 10.105.157.65/32 -p tcp -m comment --comment "osm/airflow-webserver:airflow-ui cluster IP" -m tcp --dport 8080 -j KUBE-SVC-BF2LB5WJRBPLA42J
-A KUBE-SERVICES -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-SVC-NPX46M4PTMTKRN6Y
-A KUBE-SERVICES -d 10.104.136.237/32 -p tcp -m comment --comment "osm/webhook-translator cluster IP" -m tcp --dport 9998 -j KUBE-SVC-XUD4OEZNIHB47KQL
-A KUBE-SERVICES -d 10.105.211.58/32 -p tcp -m comment --comment "osm/alertmanager:http cluster IP" -m tcp --dport 9093 -j KUBE-SVC-GZN4S7ND4PF6YXD6
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
-A KUBE-SVC-5QOWUZVRO3UICSLI ! -s 10.244.0.0/16 -d 10.101.11.212/32 -p tcp -m comment --comment "osm/pushgateway-prometheus-pushgateway:http cluster IP" -m tcp --dport 9091 -j KUBE-MARK-MASQ
-A KUBE-SVC-5QOWUZVRO3UICSLI -m comment --comment "osm/pushgateway-prometheus-pushgateway:http -> 10.244.0.41:9091" -j KUBE-SEP-E6QZKDLTQFPYJ7WY
-A KUBE-SVC-BF2LB5WJRBPLA42J ! -s 10.244.0.0/16 -d 10.105.157.65/32 -p tcp -m comment --comment "osm/airflow-webserver:airflow-ui cluster IP" -m tcp --dport 8080 -j KUBE-MARK-MASQ
-A KUBE-SVC-BF2LB5WJRBPLA42J -m comment --comment "osm/airflow-webserver:airflow-ui -> 10.244.0.27:8080" -j KUBE-SEP-O5ZLXNTEPTDGZTUZ
-A KUBE-SVC-ERIFXISQEP7F7OF4 ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-MARK-MASQ
-A KUBE-SVC-ERIFXISQEP7F7OF4 -m comment --comment "kube-system/kube-dns:dns-tcp -> 10.244.0.2:53" -m statistic --mode random --probability 0.50000000000 -j KUBE-SEP-IT2ZTR26TO4XFPTO
-A KUBE-SVC-ERIFXISQEP7F7OF4 -m comment --comment "kube-system/kube-dns:dns-tcp -> 10.244.0.3:53" -j KUBE-SEP-ZXMNUKOKXUTL2MK2
-A KUBE-SVC-GZ25SP4UFGF7SAVL ! -s 10.244.0.0/16 -d 10.107.18.115/32 -p tcp -m comment --comment "metallb-system/metallb-webhook-service cluster IP" -m tcp --dport 443 -j KUBE-MARK-MASQ
-A KUBE-SVC-GZ25SP4UFGF7SAVL -m comment --comment "metallb-system/metallb-webhook-service -> 10.244.0.10:9443" -j KUBE-SEP-QIH2WLTFEDC7N73V
-A KUBE-SVC-GZN4S7ND4PF6YXD6 ! -s 10.244.0.0/16 -d 10.105.211.58/32 -p tcp -m comment --comment "osm/alertmanager:http cluster IP" -m tcp --dport 9093 -j KUBE-MARK-MASQ
-A KUBE-SVC-GZN4S7ND4PF6YXD6 -m comment --comment "osm/alertmanager:http -> 10.244.0.43:9093" -j KUBE-SEP-BGWLLMSLJMMH67BG
-A KUBE-SVC-JD5MR3NA4I4DYORP ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-MARK-MASQ
-A KUBE-SVC-JD5MR3NA4I4DYORP -m comment --comment "kube-system/kube-dns:metrics -> 10.244.0.2:9153" -m statistic --mode random --probability 0.50000000000 -j KUBE-SEP-N4G2XR5TDX7PQE7P
-A KUBE-SVC-JD5MR3NA4I4DYORP -m comment --comment "kube-system/kube-dns:metrics -> 10.244.0.3:9153" -j KUBE-SEP-ZP3FB6NMPNCO4VBJ
-A KUBE-SVC-NPX46M4PTMTKRN6Y ! -s 10.244.0.0/16 -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-MARK-MASQ
-A KUBE-SVC-NPX46M4PTMTKRN6Y -m comment --comment "default/kubernetes:https -> 172.21.23.11:6443" -j KUBE-SEP-C523AIPH4Y2GJ7FW
-A KUBE-SVC-O36IMWM6WEZJKHBK ! -s 10.244.0.0/16 -d 10.111.238.254/32 -p udp -m comment --comment "osm/airflow-statsd:statsd-ingest cluster IP" -m udp --dport 9125 -j KUBE-MARK-MASQ
-A KUBE-SVC-O36IMWM6WEZJKHBK -m comment --comment "osm/airflow-statsd:statsd-ingest -> 10.244.0.25:9125" -j KUBE-SEP-3QXYW336TC6ED5Q7
-A KUBE-SVC-PQIZCPF63EFIBBJY ! -s 10.244.0.0/16 -d 10.111.251.215/32 -p tcp -m comment --comment "osm/grafana:service cluster IP" -m tcp --dport 3000 -j KUBE-MARK-MASQ
-A KUBE-SVC-PQIZCPF63EFIBBJY -m comment --comment "osm/grafana:service -> 10.244.0.20:3000" -j KUBE-SEP-YHH7I2J6SYEPVH7R
-A KUBE-SVC-QE77U7R3P7AE7O5U ! -s 10.244.0.0/16 -d 10.101.125.77/32 -p tcp -m comment --comment "osm/airflow-postgresql:tcp-postgresql cluster IP" -m tcp --dport 5432 -j KUBE-MARK-MASQ
-A KUBE-SVC-QE77U7R3P7AE7O5U -m comment --comment "osm/airflow-postgresql:tcp-postgresql -> 10.244.0.38:5432" -j KUBE-SEP-5D2WEBEDBSLEFQZO
-A KUBE-SVC-QSOISDZI64RJ2IKG ! -s 10.244.0.0/16 -d 10.101.235.117/32 -p tcp -m comment --comment "osm/kafka:tcp-client cluster IP" -m tcp --dport 9092 -j KUBE-MARK-MASQ
-A KUBE-SVC-QSOISDZI64RJ2IKG -m comment --comment "osm/kafka:tcp-client -> 10.244.0.34:9092" -m statistic --mode random --probability 0.50000000000 -j KUBE-SEP-IASF5YUGAFRIWCER
-A KUBE-SVC-QSOISDZI64RJ2IKG -m comment --comment "osm/kafka:tcp-client -> 10.244.0.36:9092" -j KUBE-SEP-XNQ7MNHBFHAQLS4W
-A KUBE-SVC-TCOU7JCQXEZGVUNU ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-MARK-MASQ
-A KUBE-SVC-TCOU7JCQXEZGVUNU -m comment --comment "kube-system/kube-dns:dns -> 10.244.0.2:53" -m statistic --mode random --probability 0.50000000000 -j KUBE-SEP-YIL6JZP7A3QYXJU2
-A KUBE-SVC-TCOU7JCQXEZGVUNU -m comment --comment "kube-system/kube-dns:dns -> 10.244.0.3:53" -j KUBE-SEP-6E7XQMQ4RAYOWTTM
-A KUBE-SVC-TTTQGL2HNUNQKPOG ! -s 10.244.0.0/16 -d 10.111.238.254/32 -p tcp -m comment --comment "osm/airflow-statsd:statsd-scrape cluster IP" -m tcp --dport 9102 -j KUBE-MARK-MASQ
-A KUBE-SVC-TTTQGL2HNUNQKPOG -m comment --comment "osm/airflow-statsd:statsd-scrape -> 10.244.0.25:9102" -j KUBE-SEP-W6JILCJ5ZCZONVYH
-A KUBE-SVC-USIDOZAE2VTXK5OJ ! -s 10.244.0.0/16 -d 10.108.253.168/32 -p tcp -m comment --comment "osm/airflow-redis:redis-db cluster IP" -m tcp --dport 6379 -j KUBE-MARK-MASQ
-A KUBE-SVC-USIDOZAE2VTXK5OJ -m comment --comment "osm/airflow-redis:redis-db -> 10.244.0.37:6379" -j KUBE-SEP-JIWKU7LWBAE46CYF
-A KUBE-SVC-XUD4OEZNIHB47KQL ! -s 10.244.0.0/16 -d 10.104.136.237/32 -p tcp -m comment --comment "osm/webhook-translator cluster IP" -m tcp --dport 9998 -j KUBE-MARK-MASQ
-A KUBE-SVC-XUD4OEZNIHB47KQL -m comment --comment "osm/webhook-translator -> 10.244.0.16:9998" -j KUBE-SEP-7HXLIXUMXGELTMFD
-A KUBE-SVC-YA74QX5VY2UAABIX ! -s 10.244.0.0/16 -d 10.107.33.9/32 -p tcp -m comment --comment "osm/ng-ui cluster IP" -m tcp --dport 80 -j KUBE-MARK-MASQ
-A KUBE-SVC-YA74QX5VY2UAABIX -m comment --comment "osm/ng-ui -> 10.244.0.18:80" -j KUBE-SEP-YRMTI7Q4WZP6Y2KS
-A KUBE-SVC-ZUD4L6KQKCHD52W4 ! -s 10.244.0.0/16 -d 10.107.97.192/32 -p tcp -m comment --comment "cert-manager/cert-manager-webhook:https cluster IP" -m tcp --dport 443 -j KUBE-MARK-MASQ
-A KUBE-SVC-ZUD4L6KQKCHD52W4 -m comment --comment "cert-manager/cert-manager-webhook:https -> 10.244.0.4:10250" -j KUBE-SEP-UO726RQ525XORZLT
COMMIT
# Completed on Sun Jan 21 12:32:42 2024
tee: /etc/iptables/rules.v6: No such file or directory
# Generated by ip6tables-save v1.8.7 on Sun Jan 21 12:32:42 2024
*mangle
:PREROUTING ACCEPT [0:0]
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
:KUBE-IPTABLES-HINT - [0:0]
:KUBE-KUBELET-CANARY - [0:0]
:KUBE-PROXY-CANARY - [0:0]
COMMIT
# Completed on Sun Jan 21 12:32:42 2024
# Generated by ip6tables-save v1.8.7 on Sun Jan 21 12:32:42 2024
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FIREWALL - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-KUBELET-CANARY - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-PROXY-CANARY - [0:0]
:KUBE-PROXY-FIREWALL - [0:0]
:KUBE-SERVICES - [0:0]
-A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL
-A INPUT -m comment --comment "kubernetes health check service ports" -j KUBE-NODEPORTS
-A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES
-A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL
-A FORWARD -m comment --comment "kubernetes forwarding rules" -j KUBE-FORWARD
-A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES
-A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL
-A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A KUBE-FIREWALL -m comment --comment "kubernetes firewall for dropping marked packets" -m mark --mark 0x8000/0x8000 -j DROP
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
# Completed on Sun Jan 21 12:32:42 2024
# Generated by ip6tables-save v1.8.7 on Sun Jan 21 12:32:42 2024
*nat
:PREROUTING ACCEPT [0:0]
:INPUT ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
:KUBE-KUBELET-CANARY - [0:0]
:KUBE-MARK-DROP - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-PROXY-CANARY - [0:0]
:KUBE-SERVICES - [0:0]
-A PREROUTING -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A OUTPUT -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A KUBE-MARK-DROP -j MARK --set-xmark 0x8000/0x8000
-A KUBE-MARK-MASQ -j MARK --set-xmark 0x4000/0x4000
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --set-xmark 0x4000/0x0
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE --random-fully
-A KUBE-SERVICES ! -d ::1/128 -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
COMMIT
# Completed on Sun Jan 21 12:32:42 2024
Track end end: https://osm.etsi.org/InstallLog.php?&installation_id=1705839460-vUbB28zRmF4t3OOb&local_ts=1705840362&event=end&operation=end&value=&comment=&tags=
/etc/osm
/etc/osm/kubeadm-config.yaml
/etc/osm/helm
/etc/osm/helm/mongodb-values.yaml
/etc/osm/helm/alertmanager-values.yaml
/etc/osm/helm/osm-values.yaml
/etc/osm/helm/airflow-values.yaml
/etc/osm/metallb-ipaddrpool.yaml

DONE
+ set +eux
Warning: Permanently added '172.21.23.11' (ED25519) to the list of known hosts.
Defaulted container "lcm" out of: lcm, kafka-ro-mongo-test (init)
Unable to use a TTY - input is not a terminal or the right kind of file
error: unable to upgrade connection: container not found ("lcm")
+ export JUJU_PASSWORD=
+ JUJU_PASSWORD=
+ cat
+ echo Environment was updated at /robot-systest/results/osm_environment.rc
Environment was updated at /robot-systest/results/osm_environment.rc
[Pipeline] }
[Pipeline] // stage
[Pipeline] stage
[Pipeline] { (Add VIM and K8s cluster to OSM)
[Pipeline] sh
[azure_robot_tests] Running shell script
+ . /robot-systest/results/osm_environment.rc
+ export CLOUD_TYPE=azure
+ export OSM_HOSTNAME=172.21.23.11
+ export OSM_IMAGE_NAME=osmtest202401211214
+ export JUJU_PASSWORD=
+ . /robot-systest/results/k8s_environment.rc
+ export CLOUD_TYPE=azure
+ export USE_PAAS_K8S=FALSE
+ export K8S_IP=172.21.23.10
+ export K8S_IMAGE_NAME=k8stest202401211208
+ export K8S_CREDENTIALS=/robot-systest/results/kubeconfig.yaml
+ osm version
HTTPSConnectionPool(host='172.21.23.11', port=9999): Max retries exceeded with url: /osm/version (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7f72be4de500>: Failed to establish a new connection: [Errno 111] Connection refused'))
Maybe "--hostname" option or OSM_HOSTNAME environment variable needs to be specified
[Pipeline] }
[Pipeline] // stage
[Pipeline] stage
[Pipeline] { (Run Robot tests)
Stage 'Run Robot tests' skipped due to earlier failure(s)
[Pipeline] }
[Pipeline] // stage
[Pipeline] stage
[Pipeline] { (Declarative: Post Actions)
Retrieve container logs
[Pipeline] echo
[Pipeline] sh
[azure_robot_tests] Running shell script
+ . /robot-systest/results/osm_environment.rc
+ export CLOUD_TYPE=azure
+ export OSM_HOSTNAME=172.21.23.11
+ export OSM_IMAGE_NAME=osmtest202401211214
+ export JUJU_PASSWORD=
+ /robot-systest/cloud-scripts/remote-extract-logs.sh
Saving grafana logs...
Warning: Permanently added '172.21.23.11' (ED25519) to the list of known hosts.
Saving keystone logs...
Warning: Permanently added '172.21.23.11' (ED25519) to the list of known hosts.
Saving lcm logs...
Warning: Permanently added '172.21.23.11' (ED25519) to the list of known hosts.
Saving mon logs...
Warning: Permanently added '172.21.23.11' (ED25519) to the list of known hosts.
Saving nbi logs...
Warning: Permanently added '172.21.23.11' (ED25519) to the list of known hosts.
Saving pol logs...
Warning: Permanently added '172.21.23.11' (ED25519) to the list of known hosts.
Saving ro logs...
Warning: Permanently added '172.21.23.11' (ED25519) to the list of known hosts.
Saving ngui logs...
Warning: Permanently added '172.21.23.11' (ED25519) to the list of known hosts.
Saving airflow-scheduler logs...
Warning: Permanently added '172.21.23.11' (ED25519) to the list of known hosts.
Saving pushgateway-prometheus-pushgateway logs...
Warning: Permanently added '172.21.23.11' (ED25519) to the list of known hosts.
Saving webhook-translator logs...
Warning: Permanently added '172.21.23.11' (ED25519) to the list of known hosts.
Saving kafka logs...
Warning: Permanently added '172.21.23.11' (ED25519) to the list of known hosts.
Saving mongo logs...
Warning: Permanently added '172.21.23.11' (ED25519) to the list of known hosts.
Saving mysql logs...
Warning: Permanently added '172.21.23.11' (ED25519) to the list of known hosts.
Saving prometheus logs...
Warning: Permanently added '172.21.23.11' (ED25519) to the list of known hosts.
Saving zookeeper logs...
Warning: Permanently added '172.21.23.11' (ED25519) to the list of known hosts.
Saving alertmanager logs...
Warning: Permanently added '172.21.23.11' (ED25519) to the list of known hosts.

All logs saved to /robot-systest/results/
[Pipeline] echo
Save results
[Pipeline] sh
[azure_robot_tests] Running shell script
+ rm -rf results
[Pipeline] sh
[azure_robot_tests] Running shell script
+ cp -var /robot-systest/results /robot-systest/reports/* .
'/robot-systest/results' -> './results'
'/robot-systest/results/osm-deploy-grafana.log' -> './results/osm-deploy-grafana.log'
'/robot-systest/results/osm-deploy-keystone.log' -> './results/osm-deploy-keystone.log'
'/robot-systest/results/osm-deploy-lcm.log' -> './results/osm-deploy-lcm.log'
'/robot-systest/results/osm-deploy-mon.log' -> './results/osm-deploy-mon.log'
'/robot-systest/results/osm-deploy-nbi.log' -> './results/osm-deploy-nbi.log'
'/robot-systest/results/osm-deploy-pol.log' -> './results/osm-deploy-pol.log'
'/robot-systest/results/osm-deploy-ro.log' -> './results/osm-deploy-ro.log'
'/robot-systest/results/osm-deploy-ngui.log' -> './results/osm-deploy-ngui.log'
'/robot-systest/results/osm-deploy-airflow-scheduler.log' -> './results/osm-deploy-airflow-scheduler.log'
'/robot-systest/results/osm-deploy-pushgateway-prometheus-pushgateway.log' -> './results/osm-deploy-pushgateway-prometheus-pushgateway.log'
'/robot-systest/results/osm-deploy-webhook-translator.log' -> './results/osm-deploy-webhook-translator.log'
'/robot-systest/results/osm-sts-kafka.log' -> './results/osm-sts-kafka.log'
'/robot-systest/results/osm-sts-mongo.log' -> './results/osm-sts-mongo.log'
'/robot-systest/results/osm-sts-mysql.log' -> './results/osm-sts-mysql.log'
'/robot-systest/results/osm-sts-prometheus.log' -> './results/osm-sts-prometheus.log'
'/robot-systest/results/osm-sts-zookeeper.log' -> './results/osm-sts-zookeeper.log'
'/robot-systest/results/osm-sts-alertmanager.log' -> './results/osm-sts-alertmanager.log'
'/robot-systest/results/k8s_environment.rc' -> './results/k8s_environment.rc'
'/robot-systest/results/kubeconfig.yaml' -> './results/kubeconfig.yaml'
'/robot-systest/results/osm_environment.rc' -> './results/osm_environment.rc'
cp: cannot stat '/robot-systest/reports/*': No such file or directory
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
$ docker stop --time=1 11d681e3b94fd330adc948aafe13ea93f8c6f4a6d55131532961614f445bdcea
$ docker rm -f 11d681e3b94fd330adc948aafe13ea93f8c6f4a6d55131532961614f445bdcea
[Pipeline] // withDockerContainer
[Pipeline] }
[Pipeline] // node
[Pipeline] End of Pipeline
ERROR: script returned exit code 1
Finished: FAILURE