diff --git a/hooks/playbooks/skmo/configure-leaf-listener.yaml b/hooks/playbooks/skmo/configure-leaf-listener.yaml new file mode 100644 index 0000000000..2bfa8aee05 --- /dev/null +++ b/hooks/playbooks/skmo/configure-leaf-listener.yaml @@ -0,0 +1,36 @@ +--- +- name: Patch leaf control plane with barbican-keystone-listener transport URL + hosts: localhost + gather_facts: false + vars: + central_namespace: openstack + leaf_namespace: openstack2 + leaf_transport_url_name: rabbitmq-transport-url-barbican-keystone-listener-regiontwo + tasks: + - name: Get transport URL secret from central namespace + kubernetes.core.k8s_info: + api_version: v1 + kind: Secret + namespace: "{{ central_namespace }}" + name: "{{ leaf_transport_url_name }}" + register: _transport_secret + + - name: Patch OpenStackControlPlane in leaf region with notifications transport_url + vars: + _transport_url: "{{ _transport_secret.resources[0].data['transport_url'] | b64decode }}" + kubernetes.core.k8s: + state: patched + api_version: core.openstack.org/v1beta1 + kind: OpenStackControlPlane + name: controlplane + namespace: "{{ leaf_namespace }}" + definition: + spec: + barbican: + template: + barbicanKeystoneListener: + customServiceConfig: | + [DEFAULT] + transport_url = {{ _transport_url }} + [keystone_notifications] + pool_name = barbican-listener-regionTwo diff --git a/hooks/playbooks/skmo/prepare-leaf.yaml b/hooks/playbooks/skmo/prepare-leaf.yaml new file mode 100644 index 0000000000..d32fe2457a --- /dev/null +++ b/hooks/playbooks/skmo/prepare-leaf.yaml @@ -0,0 +1,209 @@ +--- +- name: Prepare SKMO leaf prerequisites in regionZero + hosts: localhost + gather_facts: false + vars: + skmo_values_file: "{{ cifmw_architecture_repo }}/examples/va/multi-namespace-skmo/control-plane2/skmo-values.yaml" + osp_secrets_env_file: "{{ cifmw_architecture_repo }}/lib/control-plane/base/osp-secrets.env" + central_namespace: openstack + leaf_namespace: openstack2 + leaf_secret_name: osp-secret + central_rootca_secret: rootca-public + central_rootca_internal_secret: rootca-internal + leaf_transport_url_name: barbican-keystone-listener-regiontwo + leaf_transport_url_name_secret: rabbitmq-transport-url-barbican-keystone-listener-regiontwo + leaf_transport_url_secret_copy: barbican-keystone-listener-regiontwo-transport + tasks: + - name: Wait for central Keystone API to be ready + kubernetes.core.k8s_info: + api_version: keystone.openstack.org/v1beta1 + kind: KeystoneAPI + namespace: "{{ central_namespace }}" + register: _keystoneapi_info + retries: 60 + delay: 10 + until: + - _keystoneapi_info.resources | length > 0 + - _keystoneapi_info.resources[0].status.conditions is defined + - _keystoneapi_info.resources[0].status.conditions | + selectattr('type', 'equalto', 'Ready') | + selectattr('status', 'equalto', 'True') | list | length > 0 + + - name: Wait for openstackclient pod to be ready in central region + kubernetes.core.k8s_info: + api_version: v1 + kind: Pod + namespace: "{{ central_namespace }}" + name: openstackclient + register: _osc_pod_info + retries: 30 + delay: 10 + until: + - _osc_pod_info.resources | length > 0 + - _osc_pod_info.resources[0].status.conditions is defined + - _osc_pod_info.resources[0].status.conditions | + selectattr('type', 'equalto', 'Ready') | + selectattr('status', 'equalto', 'True') | list | length > 0 + + - name: Load SKMO values + ansible.builtin.set_fact: + skmo_values: "{{ lookup('file', skmo_values_file) | from_yaml }}" + + - name: Set SKMO leaf facts + ansible.builtin.set_fact: + leaf_region: "{{ skmo_values.data.leafRegion }}" + leaf_admin_user: "{{ skmo_values.data.leafAdminUser }}" + leaf_admin_project: "{{ skmo_values.data.leafAdminProject }}" + leaf_admin_password_key: "{{ skmo_values.data.leafAdminPasswordKey }}" + keystone_internal_url: "{{ skmo_values.data.keystoneInternalURL }}" + keystone_public_url: "{{ skmo_values.data.keystonePublicURL }}" + ca_bundle_secret_name: "{{ skmo_values.data.leafCaBundleSecretName }}" + + - name: Ensure leaf osp-secret exists (pre-create from env file) + ansible.builtin.shell: | + set -euo pipefail + if ! oc -n {{ leaf_namespace }} get secret {{ leaf_secret_name }} >/dev/null 2>&1; then + oc -n {{ leaf_namespace }} create secret generic {{ leaf_secret_name }} \ + --from-env-file="{{ osp_secrets_env_file }}" \ + --dry-run=client -o yaml | oc apply -f - + fi + args: + executable: /bin/bash + + - name: Read leaf admin password from leaf secret + ansible.builtin.shell: | + set -euo pipefail + oc -n {{ leaf_namespace }} get secret {{ leaf_secret_name }} \ + -o jsonpath='{.data.{{ leaf_admin_password_key }}}' | base64 -d + args: + executable: /bin/bash + register: leaf_admin_password + changed_when: false + + - name: Ensure leaf region exists in central Keystone + ansible.builtin.shell: | + set -euo pipefail + oc -n {{ central_namespace }} rsh openstackclient \ + openstack region show {{ leaf_region }} >/dev/null 2>&1 || \ + oc -n {{ central_namespace }} rsh openstackclient \ + openstack region create {{ leaf_region }} + args: + executable: /bin/bash + + - name: Ensure keystone catalog endpoints exist for leaf region + ansible.builtin.shell: | + set -euo pipefail + if ! oc -n {{ central_namespace }} rsh openstackclient \ + openstack endpoint list --service keystone --interface public --region {{ leaf_region }} \ + -f value -c ID | head -1 | grep -q .; then + oc -n {{ central_namespace }} rsh openstackclient \ + openstack endpoint create --region {{ leaf_region }} identity public "{{ keystone_public_url }}" + fi + if ! oc -n {{ central_namespace }} rsh openstackclient \ + openstack endpoint list --service keystone --interface internal --region {{ leaf_region }} \ + -f value -c ID | head -1 | grep -q .; then + oc -n {{ central_namespace }} rsh openstackclient \ + openstack endpoint create --region {{ leaf_region }} identity internal "{{ keystone_internal_url }}" + fi + args: + executable: /bin/bash + + - name: Ensure leaf admin project exists in central Keystone + ansible.builtin.shell: | + set -euo pipefail + oc -n {{ central_namespace }} rsh openstackclient \ + openstack project show {{ leaf_admin_project }} >/dev/null 2>&1 || \ + oc -n {{ central_namespace }} rsh openstackclient \ + openstack project create {{ leaf_admin_project }} + args: + executable: /bin/bash + + - name: Ensure leaf admin user exists and has admin role + ansible.builtin.shell: | + set -euo pipefail + if ! oc -n {{ central_namespace }} rsh openstackclient \ + openstack user show {{ leaf_admin_user }} >/dev/null 2>&1; then + oc -n {{ central_namespace }} rsh openstackclient \ + openstack user create --domain Default --password "{{ leaf_admin_password.stdout | trim }}" {{ leaf_admin_user }} + fi + oc -n {{ central_namespace }} rsh openstackclient \ + openstack role add --project {{ leaf_admin_project }} --user {{ leaf_admin_user }} admin + args: + executable: /bin/bash + + - name: Get existing leaf CA bundle secret if present + kubernetes.core.k8s_info: + api_version: v1 + kind: Secret + namespace: "{{ leaf_namespace }}" + name: "{{ ca_bundle_secret_name }}" + register: _existing_bundle + + - name: Get central rootca certs + kubernetes.core.k8s_info: + api_version: v1 + kind: Secret + namespace: "{{ central_namespace }}" + name: "{{ item }}" + register: _central_certs + loop: + - "{{ central_rootca_secret }}" + - "{{ central_rootca_internal_secret }}" + + - name: Create or update leaf CA bundle secret + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Secret + metadata: + name: "{{ ca_bundle_secret_name }}" + namespace: "{{ leaf_namespace }}" + data: "{{ (_existing_bundle.resources[0].data | default({})) | combine({ + 'skmo-central-rootca.crt': _central_certs.results[0].resources[0].data['tls.crt'], + 'skmo-central-rootca-internal.crt': _central_certs.results[1].resources[0].data['tls.crt'] + }) }}" + + - name: Create TransportURL CR in central region for leaf listener + ansible.builtin.shell: | + set -euo pipefail + oc apply -f - <- + {{ + ((_central_oscp_info.resources | first).spec.tls | default({})).caBundleSecretName + | default(cifmw_custom_ca_certs_secret_name | default('custom-ca-certs', true), true) + | default('custom-ca-certs', true) + }} + _oscp_has_ca_bundle: >- + {{ + ( + ((_central_oscp_info.resources | first).spec.tls | default({})).caBundleSecretName + | default('') + ) | length > 0 + }} + + # ------------------------------------------------------------------------- + # Step 2 - fetch the leaf region CA certs + # ------------------------------------------------------------------------- + - name: Get leaf region rootca certs + kubernetes.core.k8s_info: + api_version: v1 + kind: Secret + namespace: "{{ leaf_namespace }}" + name: "{{ item }}" + register: _leaf_certs + loop: + - "{{ leaf_rootca_secret }}" + - "{{ leaf_rootca_internal_secret }}" + + # ------------------------------------------------------------------------- + # Step 3 - get existing central CA bundle data (if secret already exists) + # ------------------------------------------------------------------------- + - name: Look up existing central CA bundle secret + kubernetes.core.k8s_info: + api_version: v1 + kind: Secret + namespace: "{{ central_namespace }}" + name: "{{ _ca_bundle_secret_name }}" + register: _existing_bundle + + - name: Capture existing CA bundle secret data + ansible.builtin.set_fact: + _existing_bundle_data: >- + {{ + (_existing_bundle.resources | first).data + if _existing_bundle.resources | length > 0 + else {} + }} + + # ------------------------------------------------------------------------- + # Step 4 - create or update the secret, merging in the leaf CAs + # ------------------------------------------------------------------------- + - name: Create or update central CA bundle secret with leaf region CAs + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Secret + metadata: + name: "{{ _ca_bundle_secret_name }}" + namespace: "{{ central_namespace }}" + data: >- + {{ + _existing_bundle_data | combine({ + 'skmo-leaf-rootca.crt': + _leaf_certs.results[0].resources[0].data['tls.crt'], + 'skmo-leaf-rootca-internal.crt': + _leaf_certs.results[1].resources[0].data['tls.crt'] + }) + }} + + # ------------------------------------------------------------------------- + # Step 5 - patch the OSCP to reference the secret when not already set + # ------------------------------------------------------------------------- + - name: Patch OpenStackControlPlane to set caBundleSecretName (when unset) + when: not _oscp_has_ca_bundle | bool + kubernetes.core.k8s: + state: patched + definition: + apiVersion: core.openstack.org/v1beta1 + kind: OpenStackControlPlane + metadata: + name: "{{ controlplane_name }}" + namespace: "{{ central_namespace }}" + spec: + tls: + caBundleSecretName: "{{ _ca_bundle_secret_name }}" + + # ------------------------------------------------------------------------- + # Step 6 - wait for RHOSO to reconcile combined-ca-bundle. + # + # We compare the fingerprint of the leaf rootca cert we just added against + # every cert in combined-ca-bundle, retrying until it appears. + # ------------------------------------------------------------------------- + - name: Wait for leaf region CA to appear in combined-ca-bundle + kubernetes.core.k8s_info: + api_version: v1 + kind: Secret + namespace: "{{ central_namespace }}" + name: combined-ca-bundle + register: _combined_bundle + until: >- + (_combined_bundle.resources | length > 0) and + ( + _leaf_certs.results[0].resources[0].data['tls.crt'] | b64decode + in + (_combined_bundle.resources | first).data['tls-ca-bundle.pem'] | b64decode + ) + retries: 30 + delay: 10 + changed_when: false diff --git a/roles/ci_gen_kustomize_values/templates/multi-namespace-skmo b/roles/ci_gen_kustomize_values/templates/multi-namespace-skmo new file mode 120000 index 0000000000..67c8e7f36c --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/multi-namespace-skmo @@ -0,0 +1 @@ +multi-namespace \ No newline at end of file diff --git a/roles/ci_gen_kustomize_values/templates/multi-namespace/edpm-nodeset2-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/multi-namespace/edpm-nodeset2-values/values.yaml.j2 index f3316b92ff..492f2f3dce 100644 --- a/roles/ci_gen_kustomize_values/templates/multi-namespace/edpm-nodeset2-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/multi-namespace/edpm-nodeset2-values/values.yaml.j2 @@ -3,9 +3,9 @@ {% set _ipv = cifmw_ci_gen_kustomize_values_ip_version_var_mapping %} {% set instances_names = [] %} {% set _original_nodeset = (original_content.data | default({})).nodeset | default({}) %} -{% set _original_nodes = _original_nodeset.nodes | default({}) %} +{% set _original_nodes = _original_nodeset.nodes if _original_nodeset.nodes else {} %} {% set _original_services = _original_nodeset['services'] | default([]) %} -{% set _vm_type = (_original_nodes.keys() | first).split('-')[1] %} +{% set _vm_type = ((_original_nodes.keys() | first).split('-')[1] | regex_replace('\\d+$', '')) if _original_nodes else 'compute' %} {{ '#vmtype: ' ~ _vm_type }} {% for _inst in cifmw_networking_env_definition.instances.keys() %} {% if _inst.startswith(_vm_type ~ "2-") %} diff --git a/roles/federation/defaults/main.yml b/roles/federation/defaults/main.yml index acab89258d..c691046fa8 100644 --- a/roles/federation/defaults/main.yml +++ b/roles/federation/defaults/main.yml @@ -39,6 +39,22 @@ cifmw_federation_keycloak_url_validate_certs: false # Deploy one realm by default. Add true to job vars for multirealm deploys. cifmw_federation_deploy_multirealm: false +# ============================================================================= +# CA CERTIFICATE HANDLING +# ============================================================================= +# When set to a non-empty string, the federation role will look for an existing +# Kubernetes Secret with this name in cifmw_federation_run_osp_cmd_namespace. +# If the secret exists, the Keycloak CA certificate is added as a new key +# (keycloak-ca.crt) without disturbing existing keys. If the secret does not +# exist it is created with just the Keycloak CA. In both cases the kustomization +# patch does NOT override spec.tls.caBundleSecretName, assuming the control plane +# CR already points to this secret. +# +# When left empty (the default) the original behaviour is preserved: a dedicated +# 'keycloakca' secret is created and the kustomization patch sets +# spec.tls.caBundleSecretName to 'keycloakca'. +cifmw_custom_ca_certs_secret_name: "" + # ============================================================================= # KEYCLOAK TEST USERS AND GROUPS - REALM 1 # ============================================================================= diff --git a/roles/federation/tasks/hook_controlplane_config.yml b/roles/federation/tasks/hook_controlplane_config.yml index eadc6f01ed..9ec02b056c 100644 --- a/roles/federation/tasks/hook_controlplane_config.yml +++ b/roles/federation/tasks/hook_controlplane_config.yml @@ -14,49 +14,76 @@ # License for the specific language governing permissions and limitations # under the License. -- name: Create file to customize keystone for Federation resources deployed in the control plane - ansible.builtin.copy: - dest: "{{ cifmw_manifests_dir }}/kustomizations/controlplane/keystone_federation.yaml" - content: |- - apiVersion: kustomize.config.k8s.io/v1beta1 - kind: Kustomization - resources: - - namespace: {{ cifmw_federation_run_osp_cmd_namespace }} - patches: - - target: - kind: OpenStackControlPlane - name: .* - patch: |- - - op: add - path: /spec/tls - value: {} - - op: add - path: /spec/tls/caBundleSecretName - value: keycloakca - - op: add - path: /spec/keystone/template/httpdCustomization - value: - customConfigSecret: keystone-httpd-override - - op: add - path: /spec/keystone/template/customServiceConfig - value: | - [DEFAULT] - insecure_debug=true - debug=true - [federation] - trusted_dashboard={{ cifmw_federation_horizon_url }}/dashboard/auth/websso/ - [openid] - remote_id_attribute=HTTP_OIDC_ISS - [auth] - methods = password,token,oauth1,mapped,application_credential,openid - mode: "0644" - +# --------------------------------------------------------------------------- +# Step 1 - read the Keycloak CA cert written by federation-pre-deploy +# --------------------------------------------------------------------------- - name: Get ingress operator CA cert ansible.builtin.slurp: - src: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'ingress-operator-ca.crt'] | path_join }}" + src: "{{ [ansible_user_dir, 'ci-framework-data', 'tmp', 'ingress-operator-ca.crt'] | path_join }}" register: federation_sso_ca -- name: Add Keycloak CA secret +# --------------------------------------------------------------------------- +# Step 2 - read the live OSCP to determine where the CA bundle lives. +# +# Priority for the secret name: +# 1. spec.tls.caBundleSecretName already set on the OSCP (use it as-is). +# 2. cifmw_custom_ca_certs_secret_name variable (if set by caller). +# 3. Hard default: "custom-ca-certs". +# +# This makes the hook self-healing: it does not rely on the kustomize having +# correctly propagated caBundleSecretName, and it works on fresh installs +# where the secret does not yet exist. +# --------------------------------------------------------------------------- +- name: Read current OpenStackControlPlane state + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_version: core.openstack.org/v1beta1 + kind: OpenStackControlPlane + name: controlplane + namespace: "{{ cifmw_federation_run_osp_cmd_namespace }}" + register: _federation_oscp_info + +- name: Resolve CA bundle secret name and check if OSCP already references one + ansible.builtin.set_fact: + _federation_ca_bundle_secret_name: >- + {{ + ((_federation_oscp_info.resources | first).spec.tls | default({})).caBundleSecretName + | default(cifmw_custom_ca_certs_secret_name | default('custom-ca-certs', true), true) + | default('custom-ca-certs', true) + }} + _federation_oscp_has_ca_bundle: >- + {{ + ( + ((_federation_oscp_info.resources | first).spec.tls | default({})).caBundleSecretName + | default('') + ) | length > 0 + }} + +# --------------------------------------------------------------------------- +# Step 3 - preserve any keys already in the target secret +# --------------------------------------------------------------------------- +- name: Look up existing CA bundle secret + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_version: v1 + kind: Secret + name: "{{ _federation_ca_bundle_secret_name }}" + namespace: "{{ cifmw_federation_run_osp_cmd_namespace }}" + register: _federation_existing_ca_bundle + +- name: Capture existing CA bundle secret data + ansible.builtin.set_fact: + _federation_ca_bundle_existing_data: >- + {{ + (_federation_existing_ca_bundle.resources | first).data + if _federation_existing_ca_bundle.resources | length > 0 + else {} + }} + +# --------------------------------------------------------------------------- +# Step 4 - create / update the secret, adding keycloak-ca.crt +# --------------------------------------------------------------------------- +- name: Create or update CA bundle secret with Keycloak CA cert kubernetes.core.k8s: kubeconfig: "{{ cifmw_openshift_kubeconfig }}" state: present @@ -65,11 +92,80 @@ kind: Secret type: Opaque metadata: - name: keycloakca + name: "{{ _federation_ca_bundle_secret_name }}" + namespace: "{{ cifmw_federation_run_osp_cmd_namespace }}" + data: >- + {{ + _federation_ca_bundle_existing_data | + combine({'keycloak-ca.crt': federation_sso_ca.content}) + }} + +# --------------------------------------------------------------------------- +# Step 5 - patch the OSCP to reference the secret when not already set +# --------------------------------------------------------------------------- +- name: Patch OpenStackControlPlane to set caBundleSecretName (when unset) + when: not _federation_oscp_has_ca_bundle | bool + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + state: patched + definition: + apiVersion: core.openstack.org/v1beta1 + kind: OpenStackControlPlane + metadata: + name: controlplane namespace: "{{ cifmw_federation_run_osp_cmd_namespace }}" - data: - KeyCloakCA: "{{ federation_sso_ca.content }}" + spec: + tls: + caBundleSecretName: "{{ _federation_ca_bundle_secret_name }}" +# --------------------------------------------------------------------------- +# Step 6 - kustomization for CRC/devscripts flow (not consumed by kustomize_deploy) +# --------------------------------------------------------------------------- +- name: Ensure kustomization controlplane directory exists + ansible.builtin.file: + path: "{{ cifmw_manifests_dir }}/kustomizations/controlplane" + state: directory + mode: "0755" + +- name: Create Keystone federation kustomization + ansible.builtin.copy: + dest: "{{ cifmw_manifests_dir }}/kustomizations/controlplane/keystone_federation.yaml" + mode: "0644" + content: |- + apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + resources: + - namespace: {{ cifmw_federation_run_osp_cmd_namespace }} + patches: + - target: + kind: OpenStackControlPlane + name: .* + patch: |- + apiVersion: core.openstack.org/v1beta1 + kind: OpenStackControlPlane + metadata: + name: controlplane + spec: + tls: + caBundleSecretName: {{ _federation_ca_bundle_secret_name }} + keystone: + template: + httpdCustomization: + customConfigSecret: keystone-httpd-override + customServiceConfig: | + [DEFAULT] + insecure_debug=true + debug=true + [federation] + trusted_dashboard={{ cifmw_federation_horizon_url }}/dashboard/auth/websso/ + [openid] + remote_id_attribute=HTTP_OIDC_ISS + [auth] + methods = password,token,oauth1,mapped,application_credential,openid + +# --------------------------------------------------------------------------- +# Step 7 - Keystone httpd override secret (always needed) +# --------------------------------------------------------------------------- - name: Create Keystone httpd override secret for Federation kubernetes.core.k8s: kubeconfig: "{{ cifmw_openshift_kubeconfig }}" @@ -83,3 +179,33 @@ type: Opaque stringData: federation.conf: "{{ lookup('template', 'federation-single.conf.j2') }}" + +# --------------------------------------------------------------------------- +# Step 8 - patch the OSCP for Keystone OIDC settings (kustomize_deploy flow) +# --------------------------------------------------------------------------- +- name: Patch OpenStackControlPlane with Keystone federation config + when: _federation_oscp_info.resources | length > 0 + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + state: patched + definition: + apiVersion: core.openstack.org/v1beta1 + kind: OpenStackControlPlane + metadata: + name: controlplane + namespace: "{{ cifmw_federation_run_osp_cmd_namespace }}" + spec: + keystone: + template: + httpdCustomization: + customConfigSecret: keystone-httpd-override + customServiceConfig: | + [DEFAULT] + insecure_debug=true + debug=true + [federation] + trusted_dashboard={{ cifmw_federation_horizon_url }}/dashboard/auth/websso/ + [openid] + remote_id_attribute=HTTP_OIDC_ISS + [auth] + methods = password,token,oauth1,mapped,application_credential,openid diff --git a/roles/federation/tasks/hook_post_deploy.yml b/roles/federation/tasks/hook_post_deploy.yml index 7b49c46330..b6ea2c91df 100644 --- a/roles/federation/tasks/hook_post_deploy.yml +++ b/roles/federation/tasks/hook_post_deploy.yml @@ -14,6 +14,22 @@ # License for the specific language governing permissions and limitations # under the License. +- name: Wait for Keystone to be Ready with federation config applied + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_version: keystone.openstack.org/v1beta1 + kind: KeystoneAPI + name: keystone + namespace: "{{ cifmw_federation_run_osp_cmd_namespace }}" + register: _keystone_ready + until: >- + _keystone_ready.resources | length > 0 and + (_keystone_ready.resources[0].status.conditions | default([]) | + selectattr('type', 'equalto', 'Ready') | + selectattr('status', 'equalto', 'True') | list | length > 0) + retries: 30 + delay: 20 + - name: Build realm configurations for single realm OpenStack setup ansible.builtin.set_fact: _federation_openstack_realms_to_process: diff --git a/roles/federation/tasks/run_keycloak_setup.yml b/roles/federation/tasks/run_keycloak_setup.yml index 41cd8ef218..56db4e6b04 100644 --- a/roles/federation/tasks/run_keycloak_setup.yml +++ b/roles/federation/tasks/run_keycloak_setup.yml @@ -54,11 +54,13 @@ kind: InstallPlan register: ip_list until: >- - {{ - ip_list.resources | - map(attribute='metadata.labels') | - select('match', '.*rhsso-operator.*') - }} + ip_list.resources | + selectattr('metadata.labels', 'defined') | + map(attribute='metadata.labels') | + map('dict2items') | + flatten | + selectattr('key', 'match', '.*rhsso-operator.*') | + list | length > 0 retries: 30 delay: 40 diff --git a/roles/federation/tasks/run_openstack_auth_setup.yml b/roles/federation/tasks/run_openstack_auth_setup.yml index 55c2a30ce1..558558206a 100644 --- a/roles/federation/tasks/run_openstack_auth_setup.yml +++ b/roles/federation/tasks/run_openstack_auth_setup.yml @@ -53,9 +53,21 @@ remote_path: "/home/cloud-admin/{{ cifmw_federation_keycloak_testuser2_username }}" local_path: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', cifmw_federation_keycloak_testuser2_username ] | path_join }}" -- name: Copy system CA bundle +# Build full-ca-list.crt starting from the openstackclient pod's own system CA +# bundle (which already trusts all RHOSO/OCP internal CAs including rootca-public), +# then append the ingress-operator CA so that Keycloak — accessed via the OCP +# ingress route — is also trusted. Using the pod's bundle as the base avoids +# any mismatch between what controller-0 trusts and what the pod trusts. +- name: Fetch system CA bundle from openstackclient pod + kubernetes.core.k8s_exec: + namespace: "{{ cifmw_federation_run_osp_cmd_namespace }}" + pod: openstackclient + command: cat /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem + register: _pod_ca_bundle + +- name: Write pod CA bundle locally as base for full-ca-list.crt ansible.builtin.copy: - src: "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem" + content: "{{ _pod_ca_bundle.stdout }}" dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'full-ca-list.crt' ] | path_join }}" mode: "0444" @@ -67,6 +79,7 @@ - name: Add ingress operator CA to bundle ansible.builtin.blockinfile: path: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'full-ca-list.crt' ] | path_join }}" + marker: "# {mark} ingress-operator CA" block: "{{ federation_sso_ca.content | b64decode }}" - name: Copy CA bundle to openstackclient pod diff --git a/roles/federation/tasks/run_openstack_setup.yml b/roles/federation/tasks/run_openstack_setup.yml index 07f40baba4..5752d102c2 100644 --- a/roles/federation/tasks/run_openstack_setup.yml +++ b/roles/federation/tasks/run_openstack_setup.yml @@ -21,12 +21,38 @@ mode: "0640" when: cifmw_federation_deploy_type == "crc" +- name: Check if federation domain already exists + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: + cmd: >- + oc exec -n {{ cifmw_federation_run_osp_cmd_namespace }} -t openstackclient -- + openstack domain show {{ cifmw_federation_keystone_domain }} -f value -c id + register: _federation_domain_check + failed_when: false + changed_when: false + - name: Run federation create domain + when: _federation_domain_check.rc != 0 vars: _osp_cmd: "openstack domain create {{ cifmw_federation_keystone_domain }}" ansible.builtin.include_tasks: run_osp_cmd.yml +- name: Check if federation identity provider already exists + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: + cmd: >- + oc exec -n {{ cifmw_federation_run_osp_cmd_namespace }} -t openstackclient -- + openstack identity provider show {{ cifmw_federation_IdpName }} -f value -c id + register: _federation_idp_check + failed_when: false + changed_when: false + - name: Run federation identity provider create + when: _federation_idp_check.rc != 0 vars: _osp_cmd: "openstack identity provider create --remote-id {{ cifmw_federation_remote_id }} @@ -47,38 +73,99 @@ remote_path: "/home/cloud-admin/{{ cifmw_federation_rules_file }}" local_path: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', cifmw_federation_rules_file ] | path_join }}" +- name: Check if federation mapping already exists + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: + cmd: >- + oc exec -n {{ cifmw_federation_run_osp_cmd_namespace }} -t openstackclient -- + openstack mapping show {{ cifmw_federation_mapping_name }} -f value -c id + register: _federation_mapping_check + failed_when: false + changed_when: false + - name: Run federation mapping create + when: _federation_mapping_check.rc != 0 vars: _osp_cmd: "openstack mapping create --rules {{ cifmw_federation_rules_file }} {{ cifmw_federation_mapping_name }}" ansible.builtin.include_tasks: run_osp_cmd.yml +- name: Check if federation group already exists + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: + cmd: >- + oc exec -n {{ cifmw_federation_run_osp_cmd_namespace }} -t openstackclient -- + openstack group show --domain {{ cifmw_federation_keystone_domain }} + {{ cifmw_federation_group_name }} -f value -c id + register: _federation_group_check + failed_when: false + changed_when: false + - name: Run federation group create + when: _federation_group_check.rc != 0 vars: _osp_cmd: "openstack group create --domain {{ cifmw_federation_keystone_domain }} {{ cifmw_federation_group_name }}" ansible.builtin.include_tasks: run_osp_cmd.yml +- name: Check if federation project already exists + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: + cmd: >- + oc exec -n {{ cifmw_federation_run_osp_cmd_namespace }} -t openstackclient -- + openstack project show --domain {{ cifmw_federation_keystone_domain }} + {{ cifmw_federation_project_name }} -f value -c id + register: _federation_project_check + failed_when: false + changed_when: false + - name: Run federation project create + when: _federation_project_check.rc != 0 vars: _osp_cmd: "openstack project create --domain {{ cifmw_federation_keystone_domain }} {{ cifmw_federation_project_name }}" ansible.builtin.include_tasks: run_osp_cmd.yml -- name: Run federation rule add - vars: - _osp_cmd: "openstack role add +- name: Run federation role add (safe to repeat - role add is idempotent) + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: + cmd: >- + oc exec -n {{ cifmw_federation_run_osp_cmd_namespace }} -t openstackclient -- + openstack role add --group {{ cifmw_federation_group_name }} --group-domain {{ cifmw_federation_keystone_domain }} --project {{ cifmw_federation_project_name }} --project-domain {{ cifmw_federation_keystone_domain }} - member" - ansible.builtin.include_tasks: run_osp_cmd.yml + member + failed_when: false + changed_when: true + +- name: Check if federation protocol already exists + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: + cmd: >- + oc exec -n {{ cifmw_federation_run_osp_cmd_namespace }} -t openstackclient -- + openstack federation protocol show openid + --identity-provider {{ cifmw_federation_IdpName }} -f value -c id + register: _federation_protocol_check + failed_when: false + changed_when: false - name: Run federation protocol create + when: _federation_protocol_check.rc != 0 vars: _osp_cmd: "openstack federation protocol create openid --mapping {{ cifmw_federation_mapping_name }} diff --git a/roles/kustomize_deploy/defaults/main.yml b/roles/kustomize_deploy/defaults/main.yml index 5236fe3467..2565d04bc6 100644 --- a/roles/kustomize_deploy/defaults/main.yml +++ b/roles/kustomize_deploy/defaults/main.yml @@ -218,6 +218,17 @@ cifmw_kustomize_deploy_dp_dest_file: >- # timeouts and retry configuration + +# Suffix appended to OpenStackDataPlaneDeployment resource names when applying +# a deployment stage. Each run produces a uniquely named OSDPD, preventing the +# "Already deployed" deadlock that occurs when an existing OSDPD with +# Status.Deployed=true is re-applied on subsequent runs. +# When empty (the default), a timestamp is auto-generated once at the start of +# the first deployment stage and reused for all subsequent stages in the same +# run, so all OSDPDs in a given run share the same suffix. +# Set explicitly to pin a known value (e.g. for idempotent re-runs). +cifmw_kustomize_deploy_osdpd_suffix: "" + cifmw_kustomize_deploy_delay: 10 cifmw_kustomize_deploy_retries_subscription: 90 cifmw_kustomize_deploy_retries_install_plan: 60 diff --git a/roles/kustomize_deploy/files/uniquify_osdpd.py b/roles/kustomize_deploy/files/uniquify_osdpd.py new file mode 100644 index 0000000000..a9bef41353 --- /dev/null +++ b/roles/kustomize_deploy/files/uniquify_osdpd.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +"""Append a run suffix to OpenStackDataPlaneDeployment resource names. + +Usage: uniquify_osdpd.py + +Reads the multi-document YAML file at , appends +to the metadata.name of every OpenStackDataPlaneDeployment resource that +does not already end with that suffix, and writes the result back in place. +Prints a "Renamed: -> " line for each renamed resource so that +the calling Ansible task can use changed_when on stdout. +""" +import sys +import yaml + +path, suffix = sys.argv[1], sys.argv[2] + +with open(path) as f: + docs = [d for d in yaml.safe_load_all(f) if d is not None] + +for doc in docs: + if doc.get("kind") == "OpenStackDataPlaneDeployment": + name = doc["metadata"]["name"] + if not name.endswith("-" + suffix): + doc["metadata"]["name"] = name + "-" + suffix + print("Renamed: {} -> {}".format(name, doc["metadata"]["name"])) + +with open(path, "w") as f: + yaml.dump_all(docs, f, default_flow_style=False) diff --git a/roles/kustomize_deploy/tasks/execute_step.yml b/roles/kustomize_deploy/tasks/execute_step.yml index 76bd5b82bf..6ea15ea727 100644 --- a/roles/kustomize_deploy/tasks/execute_step.yml +++ b/roles/kustomize_deploy/tasks/execute_step.yml @@ -93,6 +93,16 @@ - _tag_name not in _skip_tags - _tag_name_id not in _skip_tags block: + - name: Generate OSDPD run suffix (once per play) + when: _cifmw_kustomize_deploy_run_suffix is not defined + ansible.builtin.set_fact: + _cifmw_kustomize_deploy_run_suffix: >- + {{ + cifmw_kustomize_deploy_osdpd_suffix | string + if (cifmw_kustomize_deploy_osdpd_suffix | default('') | string | length > 0) + else (lookup('pipe', 'date +%Y%m%d%H%M%S') | string) + }} + - name: Ensure source files exists register: _src when: @@ -241,6 +251,18 @@ content: "{{ _kustomize_output.stdout }}" mode: "0644" + + - name: "Uniquify OpenStackDataPlaneDeployment names in {{ stage.path }}" + when: _cifmw_kustomize_deploy_run_suffix | default('') | string | length > 0 + ansible.builtin.script: + executable: python3 + cmd: >- + {{ role_path }}/files/uniquify_osdpd.py + {{ _output | quote }} + {{ _cifmw_kustomize_deploy_run_suffix | string | quote }} + register: _rename_osdpd + changed_when: "'Renamed:' in _rename_osdpd.stdout" + - name: "Store kustomized content in artifacts for {{ stage.path }}" ansible.builtin.copy: remote_src: true diff --git a/roles/openshift_adm/tasks/wait_for_cluster.yml b/roles/openshift_adm/tasks/wait_for_cluster.yml index 8fd5838dde..cca0f83352 100644 --- a/roles/openshift_adm/tasks/wait_for_cluster.yml +++ b/roles/openshift_adm/tasks/wait_for_cluster.yml @@ -50,6 +50,67 @@ retries: "{{ cifmw_openshift_adm_retry_count }}" delay: 30 +# MachineConfigs applied during devscripts install (e.g. iSCSI, Cinder LVM) +# trigger an MCO update cycle that continues asynchronously after the cluster +# is first reachable. On compact (3-master) clusters the MCO controller can +# get stuck: all nodes reboot and report state=Done / desiredDrain=uncordon-*, +# but the controller never issues the final kubectl-uncordon, leaving every +# node SchedulingDisabled indefinitely. We handle this with a loop that: +# 1. Waits until no MCP is mid-update (unavailableMachineCount drops to 0) +# OR detects the stuck state (all updated, none ready). +# 2. If stuck, uncordons all nodes to break the deadlock. +# 3. Repeats until all MCPs report Updated=True. +- name: Wait for MachineConfigPools to complete, fixing stuck cordons if needed. + when: + - not cifmw_openshift_adm_dry_run + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.shell: | + set -eo pipefail + MCP_JSON=$(oc get mcp -o json) + + UPDATING=$(echo "$MCP_JSON" | jq -r ' + .items[] | + select( + .status.conditions // [] | + map(select(.type == "Updating" and .status == "True")) | + length > 0 + ) | + .metadata.name + ') + + if [ -z "$UPDATING" ]; then + echo "All MCPs are up to date." + exit 0 + fi + + # At least one MCP is still Updating. Check for the stuck-uncordon case: + # updatedMachineCount == machineCount but readyMachineCount == 0. + STUCK=$(echo "$MCP_JSON" | jq -r ' + .items[] | + select( + .status.updatedMachineCount == .status.machineCount and + .status.readyMachineCount == 0 and + .status.machineCount > 0 + ) | + .metadata.name + ') + + if [ -n "$STUCK" ]; then + echo "Stuck MCPs detected: $STUCK -- uncordoning all nodes to break deadlock." + oc adm uncordon $(oc get nodes -o jsonpath='{.items[*].metadata.name}') + else + echo "MCPs still updating (normal progress): $UPDATING" + fi + exit 1 + register: _mcp_wait + until: _mcp_wait.rc == 0 + retries: 60 + delay: 30 + changed_when: "'uncordoning' in _mcp_wait.stdout" + failed_when: false + - name: Check for pending certificate approval. when: - _openshift_adm_check_cert_approve | default(false) | bool diff --git a/roles/pcp_metrics/tasks/coreos.yaml b/roles/pcp_metrics/tasks/coreos.yaml index c60b8a733a..2d99c35a33 100644 --- a/roles/pcp_metrics/tasks/coreos.yaml +++ b/roles/pcp_metrics/tasks/coreos.yaml @@ -15,6 +15,12 @@ become: true ansible.builtin.command: cmd: rpm-ostree usroverlay + register: _pcp_usroverlay + changed_when: _pcp_usroverlay.rc == 0 + failed_when: + - _pcp_usroverlay.rc != 0 + - >- + "already in unlocked state" not in _pcp_usroverlay.stderr - name: Create required directory become: true diff --git a/scenarios/reproducers/va-multi-skmo.yml b/scenarios/reproducers/va-multi-skmo.yml new file mode 100644 index 0000000000..6b19e135c5 --- /dev/null +++ b/scenarios/reproducers/va-multi-skmo.yml @@ -0,0 +1,412 @@ +--- +cifmw_architecture_scenario: multi-namespace-skmo + +# HERE if you want to override kustomization, you can uncomment this parameter +# and push the data structure you want to apply. +# cifmw_architecture_user_kustomize: +# stage_0: +# 'network-values': +# data: +# starwars: Obiwan + +# HERE, if you want to stop the deployment loop at any stage, you can uncomment +# the following parameter and update the value to match the stage you want to +# reach. Known stages are: +# pre_kustomize_stage_INDEX +# pre_apply_stage_INDEX +# post_apply_stage_INDEX +# +# cifmw_deploy_architecture_stopper: + +cifmw_arch_automation_file: multi-namespace-skmo.yaml +cifmw_os_must_gather_additional_namespaces: kuttl,openshift-storage,sushy-emulator,openstack2 +cifmw_reproducer_validate_network_host: "192.168.122.1" +cifmw_libvirt_manager_default_gw_nets: + - ocpbm + - osptrunk2 +cifmw_networking_mapper_interfaces_info_translations: + osp_trunk: + - controlplane + - ctlplane + osptrunk2: + - ctlplane2 + +# Override the default 3-compute VA setting, since 3 computes in both namespaces is too expensive +cifmw_libvirt_manager_compute_amount: 2 + +cifmw_libvirt_manager_configuration: + networks: + osp_trunk: | + + osp_trunk + + + + + + + osptrunk2: | + + osptrunk2 + + + + + + + ocpbm: | + + ocpbm + + + + + + + ocppr: | + + ocppr + + + + vms: + ocp: + amount: 3 + admin_user: core + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "ocp_master" + disksize: "100" + extra_disks_num: 4 + extra_disks_size: "50G" + cpus: 16 + memory: 32 + root_part_id: 4 + uefi: true + nets: + - ocppr + - ocpbm + - osp_trunk # ctlplane and isolated networks for openstack namespace cloud + - osptrunk2 # ctlplane and isolated networks for openstack2 namespace cloud + - osp_trunk # OVN datacentre for openstack namespace cloud + - osptrunk2 # OVN datacentre for openstack2 namespace cloud + compute: + uefi: "{{ cifmw_use_uefi }}" + root_part_id: "{{ cifmw_root_partition_id }}" + amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 2] | max }}" + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: "{{ [cifmw_libvirt_manager_compute_disksize|int, 50] | max }}" + memory: "{{ [cifmw_libvirt_manager_compute_memory|int, 8] | max }}" + cpus: "{{ [cifmw_libvirt_manager_compute_cpus|int, 4] | max }}" + nets: + - ocpbm + - osp_trunk + compute2: + uefi: "{{ cifmw_use_uefi }}" + root_part_id: "{{ cifmw_root_partition_id }}" + amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 2] | max }}" + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: "{{ [cifmw_libvirt_manager_compute_disksize|int, 50] | max }}" + memory: "{{ [cifmw_libvirt_manager_compute_memory|int, 8] | max }}" + cpus: "{{ [cifmw_libvirt_manager_compute_cpus|int, 4] | max }}" + nets: + - ocpbm + - osptrunk2 + controller: + uefi: "{{ cifmw_use_uefi }}" + root_part_id: "{{ cifmw_root_partition_id }}" + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: 50 + memory: 8 + cpus: 4 + nets: + - ocpbm + - osp_trunk + - osptrunk2 + +## devscript support for OCP deploy +cifmw_devscripts_config_overrides: + fips_mode: "{{ cifmw_fips_enabled | default(false) | bool }}" + +# Set Logical Volume Manager Storage by default for local storage +cifmw_use_lvms: true +cifmw_lvms_disk_list: + - /dev/vda + - /dev/vdb + - /dev/vdc + +# /dev/vdd is reserved for Cinder LVM backend (set up via MachineConfig at install time) +cifmw_devscripts_create_logical_volume: true +cifmw_devscripts_cinder_volume_pvs: + - /dev/vdd +cifmw_devscripts_enable_iscsi_on_ocp_nodes: true + +cifmw_networking_definition: + networks: + ctlplane: + network: "192.168.122.0/24" + gateway: "192.168.122.1" + dns: + - "192.168.122.1" + mtu: 1500 + tools: + multus: + ranges: + - start: 30 + end: 70 + netconfig: + ranges: + - start: 100 + end: 120 + - start: 150 + end: 170 + metallb: + ranges: + - start: 80 + end: 90 + ctlplane2: + network: "192.168.133.0/24" + gateway: "192.168.133.1" + dns: + - "192.168.133.1" + mtu: 1500 + tools: + multus: + ranges: + - start: 30 + end: 70 + netconfig: + ranges: + - start: 100 + end: 120 + - start: 150 + end: 170 + metallb: + ranges: + - start: 80 + end: 90 + internalapi: + network: "172.17.0.0/24" + vlan: 20 + mtu: 1496 + tools: + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 250 + multus: + ranges: + - start: 30 + end: 70 + internalapi2: + network: "172.17.10.0/24" + vlan: 30 + mtu: 1496 + tools: + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 250 + multus: + ranges: + - start: 30 + end: 70 + storage: + network: "172.18.0.0/24" + vlan: 21 + mtu: 1496 + tools: + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 250 + multus: + ranges: + - start: 30 + end: 70 + storage2: + network: "172.18.10.0/24" + vlan: 31 + mtu: 1496 + tools: + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 250 + multus: + ranges: + - start: 30 + end: 70 + tenant: + network: "172.19.0.0/24" + tools: + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 250 + multus: + ranges: + - start: 30 + end: 70 + vlan: 22 + mtu: 1496 + tenant2: + network: "172.19.10.0/24" + tools: + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 250 + multus: + ranges: + - start: 30 + end: 70 + vlan: 32 + mtu: 1496 + external: + network: "10.0.0.0/24" + tools: + netconfig: + ranges: + - start: 100 + end: 250 + vlan: 22 + mtu: 1500 + external2: + network: "10.10.0.0/24" + tools: + netconfig: + ranges: + - start: 100 + end: 250 + vlan: 32 + mtu: 1500 + + group-templates: + ocps: + network-template: + range: + start: 10 + length: 10 + networks: &ocps_nets + ctlplane: {} + internalapi: + trunk-parent: ctlplane + tenant: + trunk-parent: ctlplane + storage: + trunk-parent: ctlplane + ctlplane2: {} + internalapi2: + trunk-parent: ctlplane2 + tenant2: + trunk-parent: ctlplane2 + storage2: + trunk-parent: ctlplane2 + ocp_workers: + network-template: + range: + start: 20 + length: 10 + networks: *ocps_nets + computes: + network-template: + range: + start: 100 + length: 21 + networks: + ctlplane: {} + internalapi: + trunk-parent: ctlplane + tenant: + trunk-parent: ctlplane + storage: + trunk-parent: ctlplane + compute2s: + network-template: + range: + start: 200 + length: 21 + networks: + ctlplane2: {} + internalapi2: + trunk-parent: ctlplane2 + tenant2: + trunk-parent: ctlplane2 + storage2: + trunk-parent: ctlplane2 + instances: + controller-0: + networks: + ctlplane: + ip: "192.168.122.9" + ctlplane2: + ip: "192.168.133.9" + +# Hooks +post_deploy: + - name: Discover hypervisors for openstack2 namespace + type: playbook + source: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/hooks/playbooks/nova_manage_discover_hosts.yml" + extra_vars: + namespace: openstack2 + _cell_conductors: nova-cell0-conductor-0 + +pre_admin_setup: + - name: Prepare OSP networks in openstack2 namespace + type: playbook + source: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/playbooks/multi-namespace/ns2_osp_networks.yaml" + extra_vars: + cifmw_os_net_setup_namespace: openstack2 + cifmw_os_net_setup_public_cidr: "192.168.133.0/24" + cifmw_os_net_setup_public_start: "192.168.133.230" + cifmw_os_net_setup_public_end: "192.168.133.250" + cifmw_os_net_setup_public_gateway: "192.168.133.1" + +post_tests: + - name: Run tempest against openstack2 namespace + type: playbook + source: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/playbooks/multi-namespace/ns2_validation.yaml" + extra_vars: + cifmw_test_operator_tempest_name: tempest-tests2 + cifmw_test_operator_namespace: openstack2