diff --git a/.devcontainer/README.md b/.devcontainer/README.md index d49bda6dc5..771b5c12ec 100644 --- a/.devcontainer/README.md +++ b/.devcontainer/README.md @@ -69,8 +69,12 @@ Now you can use aws cli to interact with the S3 service ### Inspecting Codespace creation logs -You can inspect the logs of the Codespace creation this way: -1. Press `Ctrl+Shift+P` (or `Cmd+Shift+P` on Mac) -2. Type "Codespaces: Export Logs" and select it -3. A zip file will be downloaded to your local machine -4. In the zip, look at the `creation.log` file \ No newline at end of file +You can inspect the logs of the Codespace creation in 2 ways way: +1. When Codespace creation is still running : +Use Cmd/Ctrl + Shift + P -> View Creation Log to see full logs + +2. When the setup is finished, dump the logs : +a. Press `Ctrl+Shift+P` (or `Cmd+Shift+P` on Mac) +b. Type "Codespaces: Export Logs" and select it +c. A zip file will be downloaded to your local machine +d. In the zip, look at the `creation.log` file diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 6af23450aa..0b23ef0ce6 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -13,6 +13,7 @@ "ghcr.io/devcontainers/features/docker-in-docker": {}, "ghcr.io/devcontainers/features/github-cli:1": {}, "ghcr.io/devcontainers/features/node:1": {}, + "ghcr.io/devcontainers/features/python:1": {}, "ghcr.io/devcontainers/features/sshd:1": {}, "ghcr.io/devcontainers-extra/features/kind:1": {}, "ghcr.io/devcontainers/features/aws-cli:1": {}, @@ -95,6 +96,7 @@ "vscode": { "extensions": [ "ms-kubernetes-tools.vscode-kubernetes-tools", + "cucumberopen.cucumber-official" ] } }, diff --git a/.github/scripts/end2end/common.sh b/.github/scripts/end2end/common.sh index eb490d7026..5752f84835 100644 --- a/.github/scripts/end2end/common.sh +++ b/.github/scripts/end2end/common.sh @@ -1,11 +1,11 @@ get_token() { curl -k -H "Host: keycloak.zenko.local" \ - -d "client_id=${OIDC_CLIENT_ID}" \ - -d "username=${OIDC_USERNAME}" \ - -d "password=${OIDC_PASSWORD}" \ + -d "client_id=${KEYCLOAK_TEST_CLIENT_ID}" \ + -d "username=${KEYCLOAK_TEST_USER}" \ + -d "password=${KEYCLOAK_TEST_PASSWORD}" \ -d "grant_type=password" \ -d "scope=openid" \ - https://localhost/auth/realms/${OIDC_REALM}/protocol/openid-connect/token | \ + https://localhost/auth/realms/${KEYCLOAK_TEST_REALM_NAME}/protocol/openid-connect/token | \ jq -cr '.id_token' } diff --git a/.github/scripts/end2end/configs/keycloak_config.json b/.github/scripts/end2end/configs/keycloak_config.json index 2966edcb7f..0f2c799df9 100644 --- a/.github/scripts/end2end/configs/keycloak_config.json +++ b/.github/scripts/end2end/configs/keycloak_config.json @@ -1,5 +1,5 @@ { - "realm" : "${OIDC_REALM}", + "realm" : "${KEYCLOAK_TEST_REALM_NAME}", "enabled" : true, "defaultRoles" : [ "uma_authorization", "offline_access" ], "roles": { @@ -9,7 +9,7 @@ "name": "StorageManager", "composite": false, "clientRole": false, - "containerId": "${OIDC_REALM}", + "containerId": "${KEYCLOAK_TEST_REALM_NAME}", "attributes": {} }, { @@ -17,7 +17,7 @@ "name": "AccountTest::DataAccessor", "composite": false, "clientRole": false, - "containerId": "${OIDC_REALM}", + "containerId": "${KEYCLOAK_TEST_REALM_NAME}", "attributes": {} }, { @@ -25,7 +25,7 @@ "name": "AccountTest::DataConsumer", "composite": false, "clientRole": false, - "containerId": "${OIDC_REALM}", + "containerId": "${KEYCLOAK_TEST_REALM_NAME}", "attributes": {} }, { @@ -33,7 +33,7 @@ "name": "AccountTest::StorageAccountOwner", "composite": false, "clientRole": false, - "containerId": "${OIDC_REALM}", + "containerId": "${KEYCLOAK_TEST_REALM_NAME}", "attributes": {} } ] @@ -41,14 +41,14 @@ "requiredCredentials" : [ "password" ], "users" : [ { - "username": "${OIDC_USERNAME}-norights", + "username": "${KEYCLOAK_TEST_USER}-norights", "enabled": true, "totp": false, "emailVerified": false, "email": "e2e-norights@zenko.local", "attributes": { "instanceIds": [ - "${INSTANCE_ID}" + "${ZENKO_INSTANCE_ID}" ], "role": [ "user" @@ -85,7 +85,7 @@ "email": "storage_manager@zenko.local", "attributes": { "instanceIds": [ - "${INSTANCE_ID}" + "${ZENKO_INSTANCE_ID}" ], "role": [ "user" @@ -123,7 +123,7 @@ "email": "data_consumer@zenko.local", "attributes": { "instanceIds": [ - "${INSTANCE_ID}" + "${ZENKO_INSTANCE_ID}" ], "role": [ "user" @@ -161,7 +161,7 @@ "email": "storage_account_owner@zenko.local", "attributes": { "instanceIds": [ - "${INSTANCE_ID}" + "${ZENKO_INSTANCE_ID}" ], "role": [ "user" @@ -193,7 +193,7 @@ } ], "clients" : [ { - "clientId" : "${OIDC_CLIENT_ID}", + "clientId" : "${KEYCLOAK_TEST_CLIENT_ID}", "rootUrl": "http://keycloack.zenko.local", "adminUrl": "/", "surrogateAuthRequired" : false, diff --git a/.github/scripts/end2end/configs/keycloak_user.json b/.github/scripts/end2end/configs/keycloak_user.json index 7dfe2e1126..a8a69169f9 100644 --- a/.github/scripts/end2end/configs/keycloak_user.json +++ b/.github/scripts/end2end/configs/keycloak_user.json @@ -1,5 +1,5 @@ { - "username": "${OIDC_USERNAME}", + "username": "${KEYCLOAK_TEST_USER}", "enabled": true, "totp": false, "emailVerified": true, @@ -8,7 +8,7 @@ "email": "${OIDC_EMAIL}", "attributes": { "instanceIds": [ - "${INSTANCE_ID}" + "${ZENKO_INSTANCE_ID}" ], "role": [ "user" diff --git a/.github/scripts/end2end/configs/zenko.yaml b/.github/scripts/end2end/configs/zenko.yaml index e4a886bed4..808049a1b0 100644 --- a/.github/scripts/end2end/configs/zenko.yaml +++ b/.github/scripts/end2end/configs/zenko.yaml @@ -87,10 +87,10 @@ spec: management: provider: InCluster oidc: - provider: '${OIDC_ENDPOINT}/auth/realms/${OIDC_REALM}' + provider: '${KEYCLOAK_TEST_ENDPOINT}/auth/realms/${KEYCLOAK_TEST_REALM_NAME}' federatedProviders: - - '${OIDC_ENDPOINT}/auth/realms/${OIDC_REALM}' - vaultClientId: ${OIDC_CLIENT_ID} + - '${KEYCLOAK_TEST_ENDPOINT}/auth/realms/${KEYCLOAK_TEST_REALM_NAME}' + vaultClientId: ${KEYCLOAK_TEST_CLIENT_ID} api: ingress: hostname: ${ZENKO_MANAGEMENT_INGRESS} diff --git a/.github/scripts/end2end/enable-https.sh b/.github/scripts/end2end/enable-https.sh index a2dafc2488..3b91dab289 100755 --- a/.github/scripts/end2end/enable-https.sh +++ b/.github/scripts/end2end/enable-https.sh @@ -60,8 +60,8 @@ kubectl patch zenko/${ZENKO_NAME} --type=merge -p '{ kubectl wait --for condition=Available --timeout 5m zenko/${ZENKO_NAME} # Update environment variables to use HTTPS URLs -echo "OIDC_ENDPOINT=https://keycloak.zenko.local" >> $GITHUB_ENV -echo "OIDC_HOST=keycloak.zenko.local" >> $GITHUB_ENV +echo "KEYCLOAK_TEST_ENDPOINT=https://keycloak.zenko.local" >> $GITHUB_ENV +echo "KEYCLOAK_TEST_HOST=keycloak.zenko.local" >> $GITHUB_ENV echo "ENABLE_KEYCLOAK_HTTPS=true" >> $GITHUB_ENV # Set the HTTPS ingress options for Keycloak diff --git a/.github/scripts/end2end/keycloak-helper.sh b/.github/scripts/end2end/keycloak-helper.sh index ec0cde2f9f..1ecfee4d74 100755 --- a/.github/scripts/end2end/keycloak-helper.sh +++ b/.github/scripts/end2end/keycloak-helper.sh @@ -29,34 +29,34 @@ case $COMMAND in "add-user") refresh_creds - export INSTANCE_ID=`kubectl -n ${NAMESPACE} get zenko ${ZENKO_NAME} -o jsonpath='{.status.instanceID}'` + export ZENKO_INSTANCE_ID=`kubectl -n ${NAMESPACE} get zenko ${ZENKO_NAME} -o jsonpath='{.status.instanceID}'` export OIDC_EMAIL=${OIDC_EMAIL:-"e2e@zenko.local"} envsubst < $DIR/configs/keycloak_user.json | \ - ${KEYCLOAK_EXEC} /opt/jboss/keycloak/bin/kcadm.sh create users -r ${OIDC_REALM} -f - + ${KEYCLOAK_EXEC} /opt/jboss/keycloak/bin/kcadm.sh create users -r ${KEYCLOAK_TEST_REALM_NAME} -f - ${KEYCLOAK_EXEC} /opt/jboss/keycloak/bin/kcadm.sh set-password \ - -r ${OIDC_REALM} \ - --username ${OIDC_USERNAME} \ - --new-password ${OIDC_PASSWORD} + -r ${KEYCLOAK_TEST_REALM_NAME} \ + --username ${KEYCLOAK_TEST_USER} \ + --new-password ${KEYCLOAK_TEST_PASSWORD} # attach StorageManager role to user ${KEYCLOAK_EXEC} /opt/jboss/keycloak/bin/kcadm.sh add-roles \ - -r ${OIDC_REALM} \ - --uusername ${OIDC_USERNAME} \ + -r ${KEYCLOAK_TEST_REALM_NAME} \ + --uusername ${KEYCLOAK_TEST_USER} \ --rolename "StorageManager" ;; "set-user-instance-ids") refresh_creds - export INSTANCE_ID=`kubectl -n ${NAMESPACE} get zenko -o jsonpath='{.items[0].status.instanceID}'` + export ZENKO_INSTANCE_ID=`kubectl -n ${NAMESPACE} get zenko -o jsonpath='{.items[0].status.instanceID}'` # get user id - USER_ID=$(${KEYCLOAK_EXEC} /opt/jboss/keycloak/bin/kcadm.sh get users -r ${OIDC_REALM} -q "username=${OIDC_USERNAME}" | jq -r '.[0].id') + USER_ID=$(${KEYCLOAK_EXEC} /opt/jboss/keycloak/bin/kcadm.sh get users -r ${KEYCLOAK_TEST_REALM_NAME} -q "username=${KEYCLOAK_TEST_USER}" | jq -r '.[0].id') # set instanceIds array attribute for user - ${KEYCLOAK_EXEC} /opt/jboss/keycloak/bin/kcadm.sh update users/${USER_ID} -r ${OIDC_REALM} -s 'attributes={"instanceIds":["'"${INSTANCE_ID}"'"],"role":"user"}' + ${KEYCLOAK_EXEC} /opt/jboss/keycloak/bin/kcadm.sh update users/${USER_ID} -r ${KEYCLOAK_TEST_REALM_NAME} -s 'attributes={"instanceIds":["'"${ZENKO_INSTANCE_ID}"'"],"role":"user"}' ;; diff --git a/.github/scripts/end2end/load-config.sh b/.github/scripts/end2end/load-config.sh new file mode 100755 index 0000000000..fb10319ef5 --- /dev/null +++ b/.github/scripts/end2end/load-config.sh @@ -0,0 +1,248 @@ +#!/bin/bash +# Unified configuration loader for Zenko end-to-end tests +# +# Usage: +# source load-config.sh ctst # Load config for CTST tests +# source load-config.sh e2e # Load config for zenko_tests (e2e) +# source load-config.sh common # Load only common config +# +# After sourcing, use the helpers: +# kubectl exec pod -- env $(env_for_kubectl_exec) command +# kubectl run pod $(env_for_kubectl_run) -- command + +set -e +x + +SUITE="${1:-common}" + + +# Extract a value from the top-level env block in end2end.yaml. +# Strips ${{ ... }} expressions (secrets/context refs) leaving an empty string. +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +WORKFLOWS_END2END_YAML="${SCRIPT_DIR}/../../workflows/end2end.yaml" +get_env_var() { + local key="$1" + yq eval ".env.${key}" "$WORKFLOWS_END2END_YAML" | sed 's/\${{[^}]*}}//g' | tr -d '"' +} + +# Output helpers – both derived from the same ENV_VARS array. +# Usage: kubectl exec pod -- env $(env_for_kubectl_exec) command +env_for_kubectl_exec() { printf '%s ' "${ENV_VARS[@]}"; } +# Usage: kubectl run pod $(env_for_kubectl_run) -- command +env_for_kubectl_run() { printf -- '--env=%s ' "${ENV_VARS[@]}"; } + +# ============================================================================= +# COMMON - Used by both CTST and zenko_tests +# ============================================================================= +load_common() { + ENV_VARS=() + + # From end2end.yaml + ENV_VARS+=("KEYCLOAK_TEST_REALM_NAME=$(get_env_var KEYCLOAK_TEST_REALM_NAME)") + ENV_VARS+=("KEYCLOAK_TEST_CLIENT_ID=$(get_env_var KEYCLOAK_TEST_CLIENT_ID)") + if [[ "$SUITE" == "e2e" ]]; then + ENV_VARS+=("KEYCLOAK_TEST_USER=$(get_env_var KEYCLOAK_TEST_USER)-norights") + elif [[ "$SUITE" == "ctst" ]]; then + ENV_VARS+=("KEYCLOAK_TEST_USER=$(get_env_var KEYCLOAK_TEST_USER)") + fi + ENV_VARS+=("KEYCLOAK_TEST_PASSWORD=$(get_env_var KEYCLOAK_TEST_PASSWORD)") + ENV_VARS+=("KEYCLOAK_TEST_HOST=$(get_env_var KEYCLOAK_TEST_HOST)") + ENV_VARS+=("AZURE_ACCOUNT_NAME=$(get_env_var AZURE_ACCOUNT_NAME)") + ENV_VARS+=("AZURE_SECRET_KEY=$(get_env_var AZURE_SECRET_KEY)") + ENV_VARS+=("AZURE_BACKEND_ENDPOINT=$(get_env_var AZURE_BACKEND_ENDPOINT)") + + # From k8s: Zenko account credentials + ENV_VARS+=("ZENKO_ACCESS_KEY=$(kubectl get secret end2end-account-zenko -o jsonpath='{.data.AccessKeyId}' | base64 -d)") + ENV_VARS+=("ZENKO_SECRET_KEY=$(kubectl get secret end2end-account-zenko -o jsonpath='{.data.SecretAccessKey}' | base64 -d)") + + # From k8s: Admin vault credentials + ENV_VARS+=("ADMIN_ACCESS_KEY_ID=$(kubectl get secret end2end-management-vault-admin-creds.v1 -o jsonpath='{.data.accessKey}' | base64 -d)") + ENV_VARS+=("ADMIN_SECRET_ACCESS_KEY=$(kubectl get secret end2end-management-vault-admin-creds.v1 -o jsonpath='{.data.secretKey}' | base64 -d)") +} + +# ============================================================================= +# CTST ONLY - Variables specific to cucumber tests +# ============================================================================= +load_ctst() { + load_common + + # From end2end.yaml + ENV_VARS+=("SUBDOMAIN=$(get_env_var SUBDOMAIN)") + + # Hardcoded CTST values + ENV_VARS+=("SSL=false") + ENV_VARS+=("ZENKO_PORT=80") + ENV_VARS+=("AZURE_ARCHIVE_ACCESS_TIER=Hot") + ENV_VARS+=("AZURE_ARCHIVE_MANIFEST_ACCESS_TIER=Hot") + + # From end2end.yaml + ENV_VARS+=("ZENKO_ACCOUNT_NAME=$(get_env_var ZENKO_ACCOUNT_NAME)") + ENV_VARS+=("STORAGE_MANAGER_USER_NAME=$(get_env_var STORAGE_MANAGER_USER_NAME)") + ENV_VARS+=("STORAGE_ACCOUNT_OWNER_USER_NAME=$(get_env_var STORAGE_ACCOUNT_OWNER_USER_NAME)") + ENV_VARS+=("DATA_CONSUMER_USER_NAME=$(get_env_var DATA_CONSUMER_USER_NAME)") + ENV_VARS+=("DATA_ACCESSOR_USER_NAME=$(get_env_var DATA_ACCESSOR_USER_NAME)") + ENV_VARS+=("DR_SUBDOMAIN=$(get_env_var DR_SUBDOMAIN)") + ENV_VARS+=("PROMETHEUS_NAME=$(get_env_var PROMETHEUS_NAME)") + ENV_VARS+=("AZURE_BACKEND_QUEUE_ENDPOINT=$(get_env_var AZURE_BACKEND_QUEUE_ENDPOINT)") + ENV_VARS+=("AZURE_ARCHIVE_BUCKET_NAME=$(get_env_var AZURE_ARCHIVE_BUCKET_NAME)") + ENV_VARS+=("AZURE_ARCHIVE_BUCKET_NAME_2=$(get_env_var AZURE_ARCHIVE_BUCKET_NAME_2)") + ENV_VARS+=("AZURE_ARCHIVE_QUEUE_NAME=$(get_env_var AZURE_ARCHIVE_QUEUE_NAME)") + ENV_VARS+=("NOTIF_DEST_NAME=$(get_env_var NOTIF_DEST_NAME)") + ENV_VARS+=("NOTIF_DEST_TOPIC=$(get_env_var NOTIF_DEST_TOPIC)") + ENV_VARS+=("NOTIF_ALT_DEST_NAME=$(get_env_var NOTIF_ALT_DEST_NAME)") + ENV_VARS+=("NOTIF_ALT_DEST_TOPIC=$(get_env_var NOTIF_ALT_DEST_TOPIC)") + ENV_VARS+=("NOTIF_AUTH_DEST_NAME=$(get_env_var NOTIF_AUTH_DEST_NAME)") + ENV_VARS+=("NOTIF_AUTH_DEST_TOPIC=$(get_env_var NOTIF_AUTH_DEST_TOPIC)") + ENV_VARS+=("NOTIF_AUTH_DEST_USERNAME=$(get_env_var NOTIF_AUTH_DEST_USERNAME)") + ENV_VARS+=("NOTIF_AUTH_DEST_PASSWORD=$(get_env_var NOTIF_AUTH_DEST_PASSWORD)") + + # From k8s: DR admin credentials (only exists if PRA is deployed) + ENV_VARS+=("DR_ADMIN_ACCESS_KEY_ID=$(kubectl get secret end2end-pra-management-vault-admin-creds.v1 -o jsonpath='{.data.accessKey}' 2>/dev/null | base64 -d || true)") + ENV_VARS+=("DR_ADMIN_SECRET_ACCESS_KEY=$(kubectl get secret end2end-pra-management-vault-admin-creds.v1 -o jsonpath='{.data.secretKey}' 2>/dev/null | base64 -d || true)") + + # From k8s: Kafka config + local kafka_host_port kafka_port + kafka_host_port=$(kubectl get secret -l app.kubernetes.io/name=backbeat-config,app.kubernetes.io/instance=end2end \ + -o jsonpath='{.items[0].data.config\.json}' | base64 -d | jq -r '.kafka.hosts') + kafka_port="${kafka_host_port#*:}" + ENV_VARS+=("KAFKA_HOST_PORT=${kafka_host_port}") + ENV_VARS+=("KAFKA_AUTH_HOST_PORT=end2end-base-queue-auth-0:${kafka_port}") + + # From k8s: Sorbet/kafka topics + local sorbet_config + sorbet_config=$(kubectl get secret -l "app.kubernetes.io/name=cold-sorbet-config-e2e-azure-archive,app.kubernetes.io/instance=end2end" \ + -o jsonpath='{.items[0].data.config\.json}' | base64 -d) + ENV_VARS+=("KAFKA_DEAD_LETTER_TOPIC=$(echo "${sorbet_config}" | jq -r '."kafka-dead-letter-topic"')") + ENV_VARS+=("KAFKA_OBJECT_TASK_TOPIC=$(echo "${sorbet_config}" | jq -r '."kafka-object-task-topic"')") + ENV_VARS+=("KAFKA_GC_REQUEST_TOPIC=$(echo "${sorbet_config}" | jq -r '."kafka-gc-request-topic"')") + + # From k8s: Zenko resource values + # ENV_VARS+=("TIME_PROGRESSION_FACTOR=$(kubectl get zenko end2end -o jsonpath='{.metadata.annotations.zenko\.io/time-progression-factor}')") + ENV_VARS+=("ZENKO_INSTANCE_ID=$(kubectl get zenko end2end -o jsonpath='{.status.instanceID}')") + ENV_VARS+=("KAFKA_CLEANER_INTERVAL=$(kubectl get zenko end2end -o jsonpath='{.spec.kafkaCleaner.interval}')") + ENV_VARS+=("SORBETD_RESTORE_TIMEOUT=$(kubectl get zenko end2end -o jsonpath='{.spec.sorbet.server.azure.restoreTimeout}')") + ENV_VARS+=("UTILIZATION_SERVICE_HOST=$(kubectl get zenko end2end -o jsonpath='{.spec.scuba.api.ingress.hostname}')") + + # From k8s: Backbeat API config + local cloudserver_config + cloudserver_config=$(kubectl get secret -l app.kubernetes.io/name=connector-cloudserver-config,app.kubernetes.io/instance=end2end \ + -o jsonpath='{.items[0].data.config\.json}' | base64 -d) + ENV_VARS+=("BACKBEAT_API_HOST=$(echo "${cloudserver_config}" | jq -r '.backbeat.host')") + ENV_VARS+=("BACKBEAT_API_PORT=$(echo "${cloudserver_config}" | jq -r '.backbeat.port')") + + # From k8s: Service users credentials + local lcbp_creds lcc_creds lcop_creds qp_creds sorbet_ak sorbet_sk + lcbp_creds=$(kubectl get secret -l app.kubernetes.io/name=backbeat-lcbp-user-creds,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.backbeat-lifecycle-bp-1\.json}' | base64 -d) + lcc_creds=$(kubectl get secret -l app.kubernetes.io/name=backbeat-lcc-user-creds,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.backbeat-lifecycle-conductor-1\.json}' | base64 -d) + lcop_creds=$(kubectl get secret -l app.kubernetes.io/name=backbeat-lcop-user-creds,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.backbeat-lifecycle-op-1\.json}' | base64 -d) + qp_creds=$(kubectl get secret -l app.kubernetes.io/name=backbeat-qp-user-creds,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.backbeat-qp-1\.json}' | base64 -d) + sorbet_ak=$(kubectl get secret -l app.kubernetes.io/name=sorbet-fwd-creds,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.accessKey}' | base64 -d) + sorbet_sk=$(kubectl get secret -l app.kubernetes.io/name=sorbet-fwd-creds,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.secretKey}' | base64 -d) + + local service_users_creds + service_users_creds=$(echo '{"backbeat-lifecycle-bp-1":'"${lcbp_creds}"',"backbeat-lifecycle-conductor-1":'"${lcc_creds}"',"backbeat-lifecycle-op-1":'"${lcop_creds}"',"backbeat-qp-1":'"${qp_creds}"',"sorbet-fwd-2":{"accessKey":"'"${sorbet_ak}"'","secretKey":"'"${sorbet_sk}"'"}}') + ENV_VARS+=("SERVICE_USERS_CREDENTIALS=${service_users_creds}") +} + +# ============================================================================= +# E2E ONLY - Variables specific to zenko_tests (mocha) +# ============================================================================= +load_e2e() { + load_common + + # From end2end.yaml + ENV_VARS+=("AWS_BACKEND_SOURCE_LOCATION=$(get_env_var AWS_BACKEND_SOURCE_LOCATION)") + ENV_VARS+=("AWS_BACKEND_DESTINATION_LOCATION=$(get_env_var AWS_BACKEND_DESTINATION_LOCATION)") + ENV_VARS+=("AWS_BACKEND_DESTINATION_FAIL_LOCATION=$(get_env_var AWS_BACKEND_DESTINATION_FAIL_LOCATION)") + ENV_VARS+=("GCP_BACKEND_DESTINATION_LOCATION=$(get_env_var GCP_BACKEND_DESTINATION_LOCATION)") + ENV_VARS+=("AZURE_BACKEND_DESTINATION_LOCATION=$(get_env_var AZURE_BACKEND_DESTINATION_LOCATION)") + ENV_VARS+=("COLD_BACKEND_DESTINATION_LOCATION=$(get_env_var COLD_BACKEND_DESTINATION_LOCATION)") + ENV_VARS+=("AZURE_ARCHIVE_BACKEND_DESTINATION_LOCATION=$(get_env_var AZURE_ARCHIVE_BACKEND_DESTINATION_LOCATION)") + ENV_VARS+=("MIRIA_BACKEND_DESTINATION_LOCATION=$(get_env_var MIRIA_BACKEND_DESTINATION_LOCATION)") + ENV_VARS+=("LOCATION_QUOTA_BACKEND=$(get_env_var LOCATION_QUOTA_BACKEND)") + ENV_VARS+=("AWS_BUCKET_NAME=$(get_env_var AWS_BUCKET_NAME)") + ENV_VARS+=("AWS_CRR_BUCKET_NAME=$(get_env_var AWS_CRR_BUCKET_NAME)") + ENV_VARS+=("AWS_FAIL_BUCKET_NAME=$(get_env_var AWS_FAIL_BUCKET_NAME)") + ENV_VARS+=("AZURE_CRR_BUCKET_NAME=$(get_env_var AZURE_CRR_BUCKET_NAME)") + ENV_VARS+=("AZURE_ARCHIVE_BUCKET_NAME=$(get_env_var AZURE_ARCHIVE_BUCKET_NAME)") + ENV_VARS+=("GCP_CRR_BUCKET_NAME=$(get_env_var GCP_CRR_BUCKET_NAME)") + ENV_VARS+=("GCP_CRR_MPU_BUCKET_NAME=$(get_env_var GCP_CRR_MPU_BUCKET_NAME)") + ENV_VARS+=("GCP_ACCESS_KEY=$(get_env_var GCP_ACCESS_KEY)") + ENV_VARS+=("GCP_SECRET_KEY=$(get_env_var GCP_SECRET_KEY)") + ENV_VARS+=("GCP_BACKEND_SERVICE_KEY=$(get_env_var GCP_BACKEND_SERVICE_KEY)") + ENV_VARS+=("GCP_BACKEND_SERVICE_EMAIL=$(get_env_var GCP_BACKEND_SERVICE_EMAIL)") + ENV_VARS+=("AWS_ENDPOINT=$(get_env_var AWS_ENDPOINT)") + ENV_VARS+=("AWS_ACCESS_KEY=$(get_env_var AWS_ACCESS_KEY)") + ENV_VARS+=("AWS_SECRET_KEY=$(get_env_var AWS_SECRET_KEY)") + ENV_VARS+=("VERIFY_CERTIFICATES=$(get_env_var VERIFY_CERTIFICATES)") + # ENV_VARS+=("ENABLE_RING_TESTS=$(get_env_var ENABLE_RING_TESTS)") + ENV_VARS+=("RING_S3C_ACCESS_KEY=$(get_env_var RING_S3C_ACCESS_KEY)") + ENV_VARS+=("RING_S3C_SECRET_KEY=$(get_env_var RING_S3C_SECRET_KEY)") + ENV_VARS+=("RING_S3C_ENDPOINT=$(get_env_var RING_S3C_ENDPOINT)") + ENV_VARS+=("RING_S3C_BACKEND_SOURCE_LOCATION=$(get_env_var RING_S3C_BACKEND_SOURCE_LOCATION)") + ENV_VARS+=("RING_S3C_INGESTION_SRC_BUCKET_NAME=$(get_env_var RING_S3C_INGESTION_SRC_BUCKET_NAME)") + ENV_VARS+=("RING_S3C_BACKEND_SOURCE_NON_VERSIONED_LOCATION=$(get_env_var RING_S3C_BACKEND_SOURCE_NON_VERSIONED_LOCATION)") + ENV_VARS+=("RING_S3C_INGESTION_SRC_NON_VERSIONED_BUCKET_NAME=$(get_env_var RING_S3C_INGESTION_SRC_NON_VERSIONED_BUCKET_NAME)") + ENV_VARS+=("RING_S3C_INGESTION_NON_VERSIONED_OBJECT_COUNT_PER_TYPE=$(get_env_var RING_S3C_INGESTION_NON_VERSIONED_OBJECT_COUNT_PER_TYPE)") + ENV_VARS+=("CRR_SOURCE_LOCATION_NAME=$(get_env_var CRR_SOURCE_LOCATION_NAME)") + ENV_VARS+=("CRR_DESTINATION_LOCATION_NAME=$(get_env_var CRR_DESTINATION_LOCATION_NAME)") + ENV_VARS+=("CRR_ROLE_NAME=$(get_env_var CRR_ROLE_NAME)") + ENV_VARS+=("BACKBEAT_BUCKET_CHECK_TIMEOUT_S=$(get_env_var BACKBEAT_BUCKET_CHECK_TIMEOUT_S)") + ENV_VARS+=("MOCHA_FILE=$(get_env_var MOCHA_FILE)") + + # Derived endpoints + ENV_VARS+=("CLOUDSERVER_HOST=end2end-connector-s3api.default.svc.cluster.local") + ENV_VARS+=("CLOUDSERVER_ENDPOINT=http://end2end-connector-s3api.default.svc.cluster.local:80") + ENV_VARS+=("VAULT_ENDPOINT=http://end2end-management-vault-iam-admin-api:80") + ENV_VARS+=("VAULT_STS_ENDPOINT=http://end2end-connector-vault-sts-api:80") + ENV_VARS+=("BACKBEAT_API_ENDPOINT=http://end2end-management-backbeat-api.default.svc.cluster.local:80") + + # From k8s: MongoDB config + local cloudserver_secret + cloudserver_secret=$(kubectl get secret -l app.kubernetes.io/name=connector-cloudserver-config,app.kubernetes.io/instance=end2end \ + -o jsonpath="{.items[0].data.config\.json}" | base64 -d) + ENV_VARS+=("MONGO_DATABASE=$(echo "${cloudserver_secret}" | jq -r '.mongodb.database')") + ENV_VARS+=("MONGO_READ_PREFERENCE=$(echo "${cloudserver_secret}" | jq -r '.mongodb.readPreference')") + ENV_VARS+=("MONGO_REPLICA_SET_HOSTS=$(echo "${cloudserver_secret}" | jq -r '.mongodb.replicaSetHosts')") + ENV_VARS+=("MONGO_SHARD_COLLECTION=$(echo "${cloudserver_secret}" | jq -r '.mongodb.shardCollections')") + ENV_VARS+=("MONGO_WRITE_CONCERN=$(echo "${cloudserver_secret}" | jq -r '.mongodb.writeConcern')") + ENV_VARS+=("MONGO_AUTH_USERNAME=$(echo "${cloudserver_secret}" | jq -r '.mongodb.authCredentials.username')") + ENV_VARS+=("MONGO_AUTH_PASSWORD=$(echo "${cloudserver_secret}" | jq -r '.mongodb.authCredentials.password')") + + # From k8s: CRR account credentials + local crr_src crr_dst + crr_src=$(get_env_var CRR_SOURCE_ACCOUNT_NAME) + crr_dst=$(get_env_var CRR_DESTINATION_ACCOUNT_NAME) + local src_ak src_sk src_st src_id dst_ak dst_sk dst_st dst_id + src_ak=$(kubectl get secret "end2end-account-${crr_src}" -o jsonpath='{.data.AccessKeyId}' | base64 -d) + src_sk=$(kubectl get secret "end2end-account-${crr_src}" -o jsonpath='{.data.SecretAccessKey}' | base64 -d) + src_st=$(kubectl get secret "end2end-account-${crr_src}" -o jsonpath='{.data.SessionToken}' | base64 -d) + src_id=$(kubectl get secret "end2end-account-${crr_src}" -o jsonpath='{.data.AccountId}' | base64 -d) + dst_ak=$(kubectl get secret "end2end-account-${crr_dst}" -o jsonpath='{.data.AccessKeyId}' | base64 -d) + dst_sk=$(kubectl get secret "end2end-account-${crr_dst}" -o jsonpath='{.data.SecretAccessKey}' | base64 -d) + dst_st=$(kubectl get secret "end2end-account-${crr_dst}" -o jsonpath='{.data.SessionToken}' | base64 -d) + dst_id=$(kubectl get secret "end2end-account-${crr_dst}" -o jsonpath='{.data.AccountId}' | base64 -d) + ENV_VARS+=("CRR_SOURCE_INFO={\"AccessKeyId\":\"${src_ak}\",\"SecretAccessKey\":\"${src_sk}\",\"SessionToken\":\"${src_st}\",\"AccountId\":\"${src_id}\"}") + ENV_VARS+=("CRR_DESTINATION_INFO={\"AccessKeyId\":\"${dst_ak}\",\"SecretAccessKey\":\"${dst_sk}\",\"SessionToken\":\"${dst_st}\",\"AccountId\":\"${dst_id}\"}") + + # From k8s: Zenko account session token + ENV_VARS+=("ZENKO_SESSION_TOKEN=$(kubectl get secret end2end-account-zenko -o jsonpath='{.data.SessionToken}' | base64 -d)") +} + +# ============================================================================= +# MAIN +# ============================================================================= +case "$SUITE" in + common) + load_common + ;; + ctst) + load_ctst + ;; + e2e) + load_e2e + ;; + *) + echo "Usage: source load-config.sh [common|ctst|e2e]" >&2 + exit 1 + ;; +esac diff --git a/.github/scripts/end2end/run-e2e-ctst.sh b/.github/scripts/end2end/run-e2e-ctst.sh index f69d9f59dc..f09650e1d8 100755 --- a/.github/scripts/end2end/run-e2e-ctst.sh +++ b/.github/scripts/end2end/run-e2e-ctst.sh @@ -7,6 +7,10 @@ set -exu # run-e2e-ctst.sh "@PreMerge and not @PRA" # run-e2e-ctst.sh "@PRA" +# Load environment configuration +DIR=$(dirname "$0") +source "$DIR/load-config.sh" ctst + TAGS=${1:?'Error: TAGS argument is required (e.g., "@PreMerge", "@PRA")'} ZENKO_NAME="end2end" PARALLEL_RUNS=${PARALLEL_RUNS:-$(( ( $(nproc) + 1 ) / 2 ))} @@ -14,131 +18,12 @@ PARALLEL_RUNS=${PARALLEL_RUNS:-$(( ( $(nproc) + 1 ) / 2 ))} # Zenko Version VERSION=$(cat ../../../VERSION | grep -Po 'VERSION="\K[^"]*') -# Zenko Environment -ZENKO_ACCOUNT_NAME="zenko-ctst" -ADMIN_ACCESS_KEY_ID=$(kubectl get secret end2end-management-vault-admin-creds.v1 -o jsonpath='{.data.accessKey}' | base64 -d) -ADMIN_SECRET_ACCESS_KEY=$(kubectl get secret end2end-management-vault-admin-creds.v1 -o jsonpath='{.data.secretKey}' | base64 -d) -ADMIN_PRA_ACCESS_KEY_ID=$(kubectl get secret end2end-pra-management-vault-admin-creds.v1 -o jsonpath='{.data.accessKey}' | base64 -d) -ADMIN_PRA_SECRET_ACCESS_KEY=$(kubectl get secret end2end-pra-management-vault-admin-creds.v1 -o jsonpath='{.data.secretKey}' | base64 -d) -STORAGE_MANAGER_USER_NAME="ctst_storage_manager" -STORAGE_ACCOUNT_OWNER_USER_NAME="ctst_storage_account_owner" -DATA_CONSUMER_USER_NAME="ctst_data_consumer" -DATA_ACCESSOR_USER_NAME="ctst_data_accessor" -VAULT_AUTH_HOST="${ZENKO_NAME}-connector-vault-auth-api.default.svc.cluster.local" -ZENKO_PORT="80" -KEYCLOAK_TEST_USER=${OIDC_USERNAME} -KEYCLOAK_TEST_PASSWORD=${OIDC_PASSWORD} -KEYCLOAK_TEST_HOST=${OIDC_HOST} -KEYCLOAK_TEST_PORT="80" -KEYCLOAK_TEST_REALM_NAME=${OIDC_REALM} -KEYCLOAK_TEST_CLIENT_ID=${OIDC_CLIENT_ID} -KEYCLOAK_TEST_GRANT_TYPE="password" - -# get Zenko service users credentials -BACKBEAT_LCBP_1_CREDS=$(kubectl get secret -l app.kubernetes.io/name=backbeat-lcbp-user-creds,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.backbeat-lifecycle-bp-1\.json}' | base64 -d) -BACKBEAT_LCC_1_CREDS=$(kubectl get secret -l app.kubernetes.io/name=backbeat-lcc-user-creds,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.backbeat-lifecycle-conductor-1\.json}' | base64 -d) -BACKBEAT_LCOP_1_CREDS=$(kubectl get secret -l app.kubernetes.io/name=backbeat-lcop-user-creds,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.backbeat-lifecycle-op-1\.json}' | base64 -d) -BACKBEAT_QP_1_CREDS=$(kubectl get secret -l app.kubernetes.io/name=backbeat-qp-user-creds,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.backbeat-qp-1\.json}' | base64 -d) -SORBET_FWD_2_ACCESSKEY=$(kubectl get secret -l app.kubernetes.io/name=sorbet-fwd-creds,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.accessKey}' | base64 -d) -SORBET_FWD_2_SECRETKEY=$(kubectl get secret -l app.kubernetes.io/name=sorbet-fwd-creds,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.secretKey}' | base64 -d) -SERVICE_USERS_CREDENTIALS=$(echo '{"backbeat-lifecycle-bp-1":'${BACKBEAT_LCBP_1_CREDS}',"backbeat-lifecycle-conductor-1":'${BACKBEAT_LCC_1_CREDS}',"backbeat-lifecycle-op-1":'${BACKBEAT_LCOP_1_CREDS}',"backbeat-qp-1":'${BACKBEAT_QP_1_CREDS}',"sorbet-fwd-2":{"accessKey":"'${SORBET_FWD_2_ACCESSKEY}'","secretKey":"'${SORBET_FWD_2_SECRETKEY}'"}}' | jq -R) - -# Get KAFKA topics for sorbet -KAFKA_DEAD_LETTER_TOPIC=$(kubectl get secret -l app.kubernetes.io/name=cold-sorbet-config-e2e-azure-archive,app.kubernetes.io/instance=end2end \ - -o jsonpath='{.items[0].data.config\.json}' | base64 -di | jq '."kafka-dead-letter-topic"' | cut -d "\"" -f 2) - -KAFKA_OBJECT_TASK_TOPIC=$(kubectl get secret -l app.kubernetes.io/name=cold-sorbet-config-e2e-azure-archive,app.kubernetes.io/instance=end2end \ - -o jsonpath='{.items[0].data.config\.json}' | base64 -di | jq '."kafka-object-task-topic"' | cut -d "\"" -f 2) - -KAFKA_GC_REQUEST_TOPIC=$(kubectl get secret -l app.kubernetes.io/name=cold-sorbet-config-e2e-azure-archive,app.kubernetes.io/instance=end2end \ - -o jsonpath='{.items[0].data.config\.json}' | base64 -di | jq '."kafka-gc-request-topic"' | cut -d "\"" -f 2) - -DR_ADMIN_ACCESS_KEY_ID=$(kubectl get secret end2end-pra-management-vault-admin-creds.v1 -o jsonpath='{.data.accessKey}' | base64 -d) -DR_ADMIN_SECRET_ACCESS_KEY=$(kubectl get secret end2end-pra-management-vault-admin-creds.v1 -o jsonpath='{.data.secretKey}' | base64 -d) - -# Extracting kafka host from bacbeat's config -KAFKA_HOST_PORT=$(kubectl get secret -l app.kubernetes.io/name=backbeat-config,app.kubernetes.io/instance=end2end \ - -o jsonpath='{.items[0].data.config\.json}' | base64 -di | jq .kafka.hosts) -KAFKA_HOST_PORT=${KAFKA_HOST_PORT:1:-1} -KAFKA_PORT=${KAFKA_HOST_PORT#*:} - -KAFKA_AUTH_HOST="end2end-base-queue-auth-0" -KAFKA_AUTH_HOST_PORT="$KAFKA_AUTH_HOST:$KAFKA_PORT" - -TIME_PROGRESSION_FACTOR=$(kubectl get zenko ${ZENKO_NAME} -o jsonpath="{.metadata.annotations.zenko\.io/time-progression-factor}") -INSTANCE_ID=$(kubectl get zenko ${ZENKO_NAME} -o jsonpath='{.status.instanceID}') - -# Azure archive tests -AZURE_ARCHIVE_ACCESS_TIER="Hot" -AZURE_ARCHIVE_MANIFEST_ACCESS_TIER="Hot" - -BACKBEAT_API_HOST=$(kubectl get secret -l app.kubernetes.io/name=connector-cloudserver-config,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.config\.json}' | base64 -di | jq .backbeat.host) -BACKBEAT_API_HOST=${BACKBEAT_API_HOST:1:-1} -BACKBEAT_API_PORT=$(kubectl get secret -l app.kubernetes.io/name=connector-cloudserver-config,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.config\.json}' | base64 -di | jq .backbeat.port) - -KAFKA_CLEANER_INTERVAL=$(kubectl get zenko ${ZENKO_NAME} -o jsonpath='{.spec.kafkaCleaner.interval}') -SORBETD_RESTORE_TIMEOUT=$(kubectl get zenko ${ZENKO_NAME} -o jsonpath='{.spec.sorbet.server.azure.restoreTimeout}') - -# Utilization service -UTILIZATION_SERVICE_HOST=$(kubectl get zenko ${ZENKO_NAME} -o jsonpath='{.spec.scuba.api.ingress.hostname}') -UTILIZATION_SERVICE_PORT="80" - # Setting CTST world params WORLD_PARAMETERS="$(jq -c <('accountName') || - world.parameters.AccountName); + process.env.ZENKO_ACCOUNT_NAME); world.resetCommand(); world.addCommandParameter({ bucket: bucketName }); const createdObjects = world.getCreatedObjects(); @@ -125,14 +125,14 @@ async function createBucket(world: Zenko, versioning: string, bucketName: string Given('a {string} bucket with dot', async function (this: Zenko, versioning: string) { const preName = this.getSaved('accountName') || - this.parameters.AccountName || Constants.ACCOUNT_NAME; + process.env.ZENKO_ACCOUNT_NAME || Constants.ACCOUNT_NAME; await createBucket(this, versioning, `${preName}.${Constants.BUCKET_NAME_TEST}${Utils.randomString()}`.toLocaleLowerCase()); }); Given('a {string} bucket', async function (this: Zenko, versioning: string) { const preName = this.getSaved('accountName') || - this.parameters.AccountName || Constants.ACCOUNT_NAME; + process.env.ZENKO_ACCOUNT_NAME || Constants.ACCOUNT_NAME; await createBucket(this, versioning, `${preName}${Constants.BUCKET_NAME_TEST}${Utils.randomString()}`.toLocaleLowerCase()); }); @@ -298,7 +298,7 @@ Then('i {string} be able to add user metadata to object {string}', Then('kafka consumed messages should not take too much place on disk', { timeout: -1 }, async function (this: Zenko) { - const kfkcIntervalSeconds = parseInt(this.parameters.KafkaCleanerInterval); + const kfkcIntervalSeconds = parseInt(process.env.KAFKA_CLEANER_INTERVAL); const checkInterval = kfkcIntervalSeconds * (1000 + 5000); const timeoutID = setTimeout(() => { @@ -307,9 +307,9 @@ Then('kafka consumed messages should not take too much place on disk', { timeout try { const ignoredTopics = ['dead-letter']; - const kafkaAdmin = new Kafka({ brokers: [this.parameters.KafkaHosts] }).admin(); + const kafkaAdmin = new Kafka({ brokers: [process.env.KAFKA_HOST_PORT] }).admin(); const topics: string[] = (await kafkaAdmin.listTopics()) - .filter(t => (t.includes(this.parameters.InstanceID) && + .filter(t => (t.includes(process.env.ZENKO_INSTANCE_ID) && !ignoredTopics.some(e => t.includes(e)))); const previousOffsets = await getTopicsOffsets(topics, kafkaAdmin); diff --git a/tests/ctst/common/hooks.ts b/tests/ctst/common/hooks.ts index acf621a8a1..feab9f709b 100644 --- a/tests/ctst/common/hooks.ts +++ b/tests/ctst/common/hooks.ts @@ -62,8 +62,8 @@ Before({ tags: '@UtilizationAPI', timeout: 1200000 }, async function (scenarioOp After(async function (this: Zenko, results) { // Reset any configuration set on the endpoint (ssl, port) - CacheHelper.parameters.ssl = this.parameters.ssl; - CacheHelper.parameters.port = this.parameters.port; + CacheHelper.parameters.ssl = process.env.SSL === 'true'; + CacheHelper.parameters.port = process.env.ZENKO_PORT; if (results.result?.status === 'FAILED') { this.logger.warn('bucket was not cleaned for test', { bucket: this.getSaved('bucketName'), diff --git a/tests/ctst/features/crrReplicationS3utils.feature b/tests/ctst/features/crrReplicationS3utils.feature index e9d1644068..399baab861 100644 --- a/tests/ctst/features/crrReplicationS3utils.feature +++ b/tests/ctst/features/crrReplicationS3utils.feature @@ -13,6 +13,7 @@ Feature: Replication @2.12.0 @PreMerge + @yaya @ReplicationTest Scenario Outline: Replicate objects created before creating the replication rule Given an existing bucket "source-bucket-1" "with" versioning, "without" ObjectLock "without" retention mode @@ -35,4 +36,4 @@ Feature: Replication When the destination bucket on the location is created again And the job to replicate existing objects with status "FAILED" is executed Then the object replication should "succeed" within 300 seconds - And the replicated object should be the same as the source object \ No newline at end of file + And the replicated object should be the same as the source object diff --git a/tests/ctst/run-ctst-locally.sh b/tests/ctst/run-ctst-locally.sh index c3b86264b8..105463bc12 100755 --- a/tests/ctst/run-ctst-locally.sh +++ b/tests/ctst/run-ctst-locally.sh @@ -10,21 +10,14 @@ set -exu CUCUMBER_TAGS="$1" IMAGE_NAME="${2:-ghcr.io/scality/zenko/zenko-e2e-ctst:ctst_codespace_setup}" +# Load unified test configuration +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "${SCRIPT_DIR}/../../.github/scripts/end2end/load-config.sh" ctst + # Version is used with a Before Hook (in ctst/common/hooks.ts) to skip # certain tests based on their @version tag. VERSION=$(cat ../../VERSION | grep -Po 'VERSION="\K[^"]*') POD_NAME="ctst-end2end" -WORLD_PARAMETERS="$(jq -c < { // lisintg all blobs in the container const blobs = await AzureHelper.listBlobs( - world.parameters.AzureArchiveContainer, - getAzureCreds(world), + process.env.AZURE_ARCHIVE_BUCKET_NAME, + getAzureCreds(), ); // filtering the list of blobs only leaving the manifests const manifests = blobs.filter(blob => blob.name.includes('.json.')); for (let i = 0; i < manifests.length; i++) { // downloading the manifest const manifestBuffer = await AzureHelper.downloadBlob( - world.parameters.AzureArchiveContainer, + process.env.AZURE_ARCHIVE_BUCKET_NAME, manifests[i].name, - getAzureCreds(world), + getAzureCreds(), ); const { ok, result } = safeJsonParse(manifestBuffer.toString()); if (!ok) { @@ -175,21 +169,21 @@ export async function cleanAzureContainer( ); if (tarName) { await AzureHelper.deleteBlob( - world.parameters.AzureArchiveContainer, + process.env.AZURE_ARCHIVE_BUCKET_NAME, tarName, - getAzureCreds(world), + getAzureCreds(), ); await AzureHelper.deleteBlob( - world.parameters.AzureArchiveContainer, + process.env.AZURE_ARCHIVE_BUCKET_NAME, `rehydrate/${tarName}`, - getAzureCreds(world), + getAzureCreds(), ); } if (manifestName) { await AzureHelper.deleteBlob( - world.parameters.AzureArchiveContainer, + process.env.AZURE_ARCHIVE_BUCKET_NAME, manifestName, - getAzureCreds(world), + getAzureCreds(), ); } currentKey = iterator.next(); @@ -207,11 +201,11 @@ Then('manifest access tier should be valid for object {string}', async function assert(manifestName); // manifest access tier const manifestProperties = await AzureHelper.getBlobProperties( - this.parameters.AzureArchiveContainer, + process.env.AZURE_ARCHIVE_BUCKET_NAME, manifestName, - getAzureCreds(this), + getAzureCreds(), ); - assert.strictEqual(manifestProperties.accessTier, this.parameters.AzureArchiveManifestTier); + assert.strictEqual(manifestProperties.accessTier, Zenko.AZURE_ARCHIVE_MANIFEST_ACCESS_TIER); }); Then('tar access tier should be valid for object {string}', async function (this: Zenko, objectName: string) { @@ -225,11 +219,11 @@ Then('tar access tier should be valid for object {string}', async function (this assert(tarName); // manifest access tier const packProperties = await AzureHelper.getBlobProperties( - this.parameters.AzureArchiveContainer, + process.env.AZURE_ARCHIVE_BUCKET_NAME, tarName, - getAzureCreds(this), + getAzureCreds(), ); - assert.strictEqual(packProperties.accessTier, this.parameters.AzureArchiveAccessTier); + assert.strictEqual(packProperties.accessTier, Zenko.AZURE_ARCHIVE_ACCESS_TIER); }); Then('manifest and tar containing object {string} should exist', async function (this: Zenko, objectName: string) { @@ -303,10 +297,10 @@ Then('blob for object {string} must be rehydrated', const tarName = await isObjectRehydrated(this, objectName); assert(tarName); await AzureHelper.sendBlobCreatedEventToQueue( - this.parameters.AzureArchiveQueue, - this.parameters.AzureArchiveContainer, + process.env.AZURE_ARCHIVE_QUEUE_NAME, + process.env.AZURE_ARCHIVE_BUCKET_NAME, `rehydrate/${tarName}`, - getAzureCreds(this), + getAzureCreds(), ); }); @@ -321,7 +315,7 @@ Then('blob for object {string} fails to rehydrate', const tarName = await isObjectRehydrated(this, objectName); // wait for restore to fail and end up in dead letter queue - const restoreTimeoutSeconds = parseInt(this.parameters.SorbetdRestoreTimeout); + const restoreTimeoutSeconds = parseInt(process.env.SORBETD_RESTORE_TIMEOUT); await Utils.sleep(restoreTimeoutSeconds * 1000 + 1000); assert(tarName); // restoreTimeout is set to 30s in the config @@ -361,10 +355,10 @@ Then('the storage class of object {string} must stay {string} for {int} seconds' When('i run sorbetctl to retry failed restore for {string} location', { timeout: 10 * 60 * 1000 }, async function (this: Zenko, location: string) { const command = `/ctst/sorbetctl forward list failed --trigger-retry --skip-invalid \ - --kafka-dead-letter-topic=${this.parameters.KafkaDeadLetterQueueTopic} \ - --kafka-object-task-topic=${this.parameters.KafkaObjectTaskTopic} \ - --kafka-gc-request-topic=${this.parameters.KafkaGCRequestTopic} \ - --kafka-brokers ${this.parameters.KafkaHosts}`; + --kafka-dead-letter-topic=${process.env.KAFKA_DEAD_LETTER_TOPIC} \ + --kafka-object-task-topic=${process.env.KAFKA_OBJECT_TASK_TOPIC} \ + --kafka-gc-request-topic=${process.env.KAFKA_GC_REQUEST_TOPIC} \ + --kafka-brokers ${process.env.KAFKA_HOST_PORT}`; try { this.logger.debug('Running command', { command, location }); const result = await util.promisify(exec)(command); @@ -377,9 +371,9 @@ When('i run sorbetctl to retry failed restore for {string} location', } }); -When('i wait for {int} days', { timeout: 10 * 60 * 1000 }, async function (this: Zenko, days: number) { - const realTimeDay = days * 24 * 60 * 60 * 1000 / - (this.parameters.TimeProgressionFactor > 1 ? this.parameters.TimeProgressionFactor : 1); +When('i wait for {int} days', { timeout: 10 * 60 * 1000 }, async (days: number) => { + const factor = Math.max(1, Number(process.env.TIME_PROGRESSION_FACTOR) || 1); + const realTimeDay = days * 24 * 60 * 60 * 1000 / factor; await Utils.sleep(realTimeDay); }); @@ -402,13 +396,14 @@ Then('object {string} should expire in {int} days', async function (this: Zenko, const expiryDate = new Date(expireResDate[1]).getTime(); const lastModified = new Date(head.LastModified).getTime(); const diff = (expiryDate - lastModified) / 1000 / 86400; - const realTimeDays = days / (this.parameters.TimeProgressionFactor > 1 ? this.parameters.TimeProgressionFactor : 1); + const factor = Math.max(1, Number(process.env.TIME_PROGRESSION_FACTOR) || 1); + const realTimeDays = days / factor; assert.ok(diff >= realTimeDays && diff < realTimeDays + 0.005, - `Expected ${realTimeDays} but got ${diff} ; ${this.parameters.TimeProgressionFactor}`); + `Expected ${realTimeDays} but got ${diff} ; ${process.env.TIME_PROGRESSION_FACTOR}`); }); Given('that lifecycle is {string} for the {string} location', - async function (this: Zenko, status: string, location: string) { + async (_this: Zenko, status: string, location: string) => { let path: string; if (status === 'paused') { path = `/_/lifecycle/pause/${location}`; @@ -416,8 +411,8 @@ Given('that lifecycle is {string} for the {string} location', path = `/_/lifecycle/resume/${location}`; } const options = { - hostname: this.parameters.BackbeatApiHost, - port: this.parameters.BackbeatApiPort, + hostname: process.env.BACKBEAT_API_HOST, + port: process.env.BACKBEAT_API_PORT, method: 'POST', path, }; @@ -430,21 +425,21 @@ Given('an azure archive location {string}', { timeout: 15 * 60 * 1000 }, name: locationName, locationType: 'location-azure-archive-v1', details: { - endpoint: AZURE_STORAGE_BLOB_URL, - bucketName: this.parameters.AzureArchiveContainer, + endpoint: process.env.AZURE_BACKEND_ENDPOINT || 'http://127.0.0.1:10000/devstoreaccount1', + bucketName: process.env.AZURE_ARCHIVE_BUCKET_NAME, queue: { type: 'location-azure-storage-queue-v1', - queueName: this.parameters.AzureArchiveQueue, - endpoint: AZURE_STORAGE_QUEUE_URL, + queueName: process.env.AZURE_ARCHIVE_QUEUE_NAME, + endpoint: process.env.AZURE_BACKEND_QUEUE_ENDPOINT || 'http://127.0.0.1:10001/devstoreaccount1', }, auth: { type: 'location-azure-shared-key', - accountName: this.parameters.AzureAccountName, - accountKey: this.parameters.AzureAccountKey, + accountName: process.env.AZURE_ACCOUNT_NAME, + accountKey: process.env.AZURE_SECRET_KEY, }, }, }; - const result = await this.managementAPIRequest('POST', `/config/${this.parameters.InstanceID}/location`, {}, + const result = await this.managementAPIRequest('POST', `/config/${process.env.ZENKO_INSTANCE_ID}/location`, {}, locationConfig); assert.strictEqual(result.statusCode, 201); this.addToSaved('locationName', locationName); @@ -454,7 +449,7 @@ Given('an azure archive location {string}', { timeout: 15 * 60 * 1000 }, When('i change azure archive location {string} container target', { timeout: 15 * 60 * 1000 }, async function (this: Zenko, locationName: string) { - const result = await this.managementAPIRequest('GET', `/config/overlay/view/${this.parameters.InstanceID}`); + const result = await this.managementAPIRequest('GET', `/config/overlay/view/${process.env.ZENKO_INSTANCE_ID}`); if ('err' in result) { assert.ifError(result.err); } else { @@ -463,17 +458,17 @@ When('i change azure archive location {string} container target', { timeout: 15 const locationConfig = locations[locationName] as Record; const details = locationConfig.details as { bucketName: string, auth: { accountKey: string } }; const auth = details.auth; - details.bucketName = this.parameters.AzureArchiveContainer2; - auth.accountKey = this.parameters.AzureAccountKey; + details.bucketName = process.env.AZURE_ARCHIVE_BUCKET_NAME_2; + auth.accountKey = process.env.AZURE_SECRET_KEY; const putResult = await this.managementAPIRequest('PUT', - `/config/${this.parameters.InstanceID}/location/${locationName}`, + `/config/${process.env.ZENKO_INSTANCE_ID}/location/${locationName}`, {}, locationConfig); if ('err' in putResult) { assert.ifError(putResult.err); } else { assert.strictEqual((putResult.data as { details: { bucketName: string } }).details.bucketName, - this.parameters.AzureArchiveContainer2); + process.env.AZURE_ARCHIVE_BUCKET_NAME_2); assert.strictEqual(putResult.statusCode, 200); } } @@ -482,7 +477,7 @@ When('i change azure archive location {string} container target', { timeout: 15 }); Then('i can get the {string} location details', async function (this: Zenko, locationName: string) { - const result = await this.managementAPIRequest('GET', `/config/overlay/view/${this.parameters.InstanceID}`); + const result = await this.managementAPIRequest('GET', `/config/overlay/view/${process.env.ZENKO_INSTANCE_ID}`); if ('err' in result) { assert.ifError(result.err); } diff --git a/tests/ctst/steps/cloudserverAuth.ts b/tests/ctst/steps/cloudserverAuth.ts index a75a104be4..c658fcb989 100644 --- a/tests/ctst/steps/cloudserverAuth.ts +++ b/tests/ctst/steps/cloudserverAuth.ts @@ -36,7 +36,7 @@ When('the user tries to perform CreateBucket', async function (this: Zenko) { this.resetCommand(); this.useSavedIdentity(); const preName = this.getSaved('accountName') || - this.parameters.AccountName || Constants.ACCOUNT_NAME; + process.env.ZENKO_ACCOUNT_NAME || Constants.ACCOUNT_NAME; const usedBucketName = `${preName}${Constants.BUCKET_NAME_TEST}${Utils.randomString()}`.toLocaleLowerCase(); this.addToSaved('bucketName', usedBucketName); this.addCommandParameter({ bucket: usedBucketName }); diff --git a/tests/ctst/steps/iam-policies/common.ts b/tests/ctst/steps/iam-policies/common.ts index 24dc31cfb5..45a5b9ec7d 100644 --- a/tests/ctst/steps/iam-policies/common.ts +++ b/tests/ctst/steps/iam-policies/common.ts @@ -20,22 +20,18 @@ When('the user tries to perform vault auth {string}', async function (this: Zenk + 'Make sure the `IAMSession` and `AssumedSession` world parameter are defined.'); } - if (!this.parameters.VaultAuthHost) { - throw new Error('Vault auth endpoint is not set. Make sure the `VaultAuthHost` world parameter is defined.'); - } - const vaultAuthClientOptions: ClientOptions = { AccessKey: userCredentials.accessKeyId, SecretKey: userCredentials.secretAccessKey, SessionToken: userCredentials.sessionToken, - ip: this.parameters.VaultAuthHost, - ssl: CacheHelper.parameters ? CacheHelper.parameters.ssl as boolean : undefined, + ip: 'end2end-connector-vault-auth-api.default.svc.cluster.local', + ssl: CacheHelper.parameters.ssl, }; switch (action) { case 'GetAccountInfo': this.setResult(await VaultAuth.getAccounts([ - this.getSaved('accountNameForScenario') || this.parameters.AccountName, + this.getSaved('accountNameForScenario') || process.env.ZENKO_ACCOUNT_NAME, ], null, null, { // @ts-expect-error accountNames is not generated by CTST yet accountNames: true, diff --git a/tests/ctst/steps/notifications.ts b/tests/ctst/steps/notifications.ts index 69f1176c11..6ba77e9457 100644 --- a/tests/ctst/steps/notifications.ts +++ b/tests/ctst/steps/notifications.ts @@ -116,32 +116,32 @@ function setNotificationDestination(world: Zenko, destination: string, topic: st Given('one notification destination', function (this: Zenko) { setNotificationDestination( this, - this.parameters.NotificationDestination, - this.parameters.NotificationDestinationTopic, - this.parameters.KafkaHosts, + process.env.NOTIF_DEST_NAME, + process.env.NOTIF_DEST_TOPIC, + process.env.KAFKA_HOST_PORT, ); }); Given('one authenticated notification destination', function (this: Zenko) { setNotificationDestination( this, - this.parameters.NotificationDestinationAuth, - this.parameters.NotificationDestinationTopicAuth, - this.parameters.KafkaAuthHosts, + process.env.NOTIF_AUTH_DEST_NAME, + process.env.NOTIF_AUTH_DEST_TOPIC, + process.env.KAFKA_AUTH_HOST_PORT, ); }); Given('two notification destinations', function (this: Zenko) { const notificationDestinations = []; notificationDestinations.push({ - destinationName: this.parameters.NotificationDestination, - topic: this.parameters.NotificationDestinationTopic, - hosts: this.parameters.KafkaHosts, + destinationName: process.env.NOTIF_DEST_NAME, + topic: process.env.NOTIF_DEST_TOPIC, + hosts: process.env.KAFKA_HOST_PORT, }); notificationDestinations.push({ - destinationName: this.parameters.NotificationDestinationAlt, - topic: this.parameters.NotificationDestinationTopicAlt, - hosts: this.parameters.KafkaHosts, + destinationName: process.env.NOTIF_ALT_DEST_NAME, + topic: process.env.NOTIF_ALT_DEST_TOPIC, + hosts: process.env.KAFKA_HOST_PORT, }); this.addToSaved('notificationDestinations', notificationDestinations); }); diff --git a/tests/ctst/steps/pra.ts b/tests/ctst/steps/pra.ts index 4542658ce0..7a2f846cd0 100644 --- a/tests/ctst/steps/pra.ts +++ b/tests/ctst/steps/pra.ts @@ -70,7 +70,7 @@ async function installPRA(world: Zenko, sinkS3Endpoint = 'http://s3.zenko.local' sourceZenkoNamespace: 'default', sourceS3Endpoint: 'http://s3.zenko.local', sinkS3Endpoint, - prometheusService: world.parameters.PrometheusService, + prometheusService: Zenko.PROMETHEUS_SERVICE, prometheusHostname: 'prom.dr.zenko.local', prometheusExternalIpsDiscovery: true, forceRotateServiceCredentials: (CacheHelper.savedAcrossTests[Zenko.PRA_INSTALL_COUNT_KEY] as number) > 0, @@ -328,9 +328,9 @@ Then('the kafka DR volume exists', { timeout: volumeTimeout + 2000 }, async func assert(volumeParsed.result!['volume phase'] === 'Bound'); }); -Then('prometheus should scrap federated metrics from DR sink', { timeout: 180000 }, async function (this: Zenko) { +Then('prometheus should scrap federated metrics from DR sink', { timeout: 180000 }, async () => { const prom = new PrometheusDriver({ - endpoint: `http://${this.parameters.PrometheusService}:9090`, + endpoint: `http://${Zenko.PROMETHEUS_SERVICE}:9090`, baseURL: '/api/v1', }); diff --git a/tests/ctst/steps/replication.ts b/tests/ctst/steps/replication.ts index 7e0311355a..56af088d3f 100644 --- a/tests/ctst/steps/replication.ts +++ b/tests/ctst/steps/replication.ts @@ -26,7 +26,7 @@ When('the job to replicate existing objects with status {string} is executed', const s3utilsVersion = zenkoVersion.spec.versions.s3utils; const credentials = Identity.getCredentialsForIdentity( IdentityEnum.ACCOUNT, - this.parameters.AccountName + process.env.ZENKO_ACCOUNT_NAME ); const podManifest = { apiVersion: 'v1', diff --git a/tests/ctst/steps/utilization/utilizationAPI.ts b/tests/ctst/steps/utilization/utilizationAPI.ts index f76cef9a72..467a9ba541 100644 --- a/tests/ctst/steps/utilization/utilizationAPI.ts +++ b/tests/ctst/steps/utilization/utilizationAPI.ts @@ -27,8 +27,8 @@ When('the user retrieves utilization metrics using scubaclient for metric type { this.addToSaved('metricType', metricType); const client = new ScubaClient({ - port: parseInt(this.parameters.UtilizationServicePort), - host: this.parameters.UtilizationServiceHost, + port: Zenko.UTILIZATION_SERVICE_PORT, + host: process.env.UTILIZATION_SERVICE_HOST, useHttps: false, auth: { awsV4: { diff --git a/tests/ctst/steps/utils/utils.ts b/tests/ctst/steps/utils/utils.ts index ceef07ece0..053fbf8a89 100644 --- a/tests/ctst/steps/utils/utils.ts +++ b/tests/ctst/steps/utils/utils.ts @@ -170,7 +170,7 @@ async function createBucketWithConfiguration( retentionMode?: string) { world.resetCommand(); const preName = world.getSaved('accountName') || - world.parameters.AccountName || Constants.ACCOUNT_NAME; + process.env.ZENKO_ACCOUNT_NAME || Constants.ACCOUNT_NAME; const usedBucketName = bucketName || `${preName}${Constants.BUCKET_NAME_TEST}${Utils.randomString()}`.toLocaleLowerCase(); world.addToSaved('bucketName', usedBucketName); diff --git a/tests/ctst/steps/website/website.ts b/tests/ctst/steps/website/website.ts index 023ec014d2..9047b20f43 100644 --- a/tests/ctst/steps/website/website.ts +++ b/tests/ctst/steps/website/website.ts @@ -58,7 +58,7 @@ When('the user creates an S3 Bucket policy granting public read access', async f Then('the user should be able to load the index.html file from the {string} endpoint', async function (this: Zenko, endpoint: string) { - const baseUrl = this.parameters.ssl === false ? 'http://' : 'https://'; + const baseUrl = process.env.SSL === 'false' ? 'http://' : 'https://'; // The ingress may take some time to be ready (<60s) const uri = `${baseUrl}${this.getSaved('bucketName')}.${endpoint}`; let response; diff --git a/tests/ctst/world/Zenko.ts b/tests/ctst/world/Zenko.ts index f66a04158a..c9085ce628 100644 --- a/tests/ctst/world/Zenko.ts +++ b/tests/ctst/world/Zenko.ts @@ -44,56 +44,56 @@ export enum EntityType { } export interface ZenkoWorldParameters extends ClientOptions { - AccountName: string; - AccountAccessKey: string; - AccountSecretKey: string; - DRAdminAccessKey?: string; - DRAdminSecretKey?: string; - DRSubdomain?: string; - VaultAuthHost: string; - NotificationDestination: string; - NotificationDestinationTopic: string; - NotificationDestinationAlt: string; - NotificationDestinationTopicAlt: string; - NotificationDestinationAuth: string; - NotificationDestinationTopicAuth: string; - NotificationDestinationAuthUsername: string; + // AccountName: string; + // AccountAccessKey: string; + // AccountSecretKey: string; + // DRAdminAccessKey?: string; + // DRAdminSecretKey?: string; + // DRSubdomain?: string; + // VaultAuthHost: string; + // NotificationDestination: string; + // NotificationDestinationTopic: string; + // NotificationDestinationAlt: string; + // NotificationDestinationTopicAlt: string; + // NotificationDestinationAuth: string; + // NotificationDestinationTopicAuth: string; + // NotificationDestinationAuthUsername: string; NotificationDestinationAuthPassword: string; KafkaExternalIps: string; - KafkaHosts: string; - KafkaAuthHosts: string; - PrometheusService: string; - KeycloakUsername: string; - KeycloakPassword: string; - KeycloakHost: string; - KeycloakPort: string; - KeycloakRealm: string; - KeycloakClientId: string; - KeycloakGrantType: string; - StorageManagerUsername: string; - StorageAccountOwnerUsername: string; - DataConsumerUsername: string; - DataAccessorUsername: string; - ServiceUsersCredentials: string; - KeycloakTestPassword: string; - AzureAccountName: string; - AzureAccountKey: string; - AzureArchiveContainer: string; - AzureArchiveContainer2: string; - AzureArchiveAccessTier: string; - AzureArchiveManifestTier: string; - AzureArchiveQueue: string; - TimeProgressionFactor: number; - KafkaDeadLetterQueueTopic: string; - KafkaObjectTaskTopic: string; - KafkaGCRequestTopic: string; - InstanceID: string; - BackbeatApiHost: string; - BackbeatApiPort: string; - KafkaCleanerInterval: string; - SorbetdRestoreTimeout: string; - UtilizationServiceHost: string; - UtilizationServicePort: string; + // KafkaHosts: string; + // KafkaAuthHosts: string; + // PrometheusService: string; + // KeycloakUsername: string; + // KeycloakPassword: string; + // KeycloakHost: string; + // KeycloakPort: string; + // KeycloakRealm: string; + // KeycloakClientId: string; + // KeycloakGrantType: string; + // KeycloakTestPassword: string; + // StorageManagerUsername: string; + // StorageAccountOwnerUsername: string; + // DataConsumerUsername: string; + // DataAccessorUsername: string; + // ServiceUsersCredentials: string; + // AzureAccountName: string; + // AzureAccountKey: string; + // AzureArchiveContainer: string; + // AzureArchiveContainer2: string; + // AzureArchiveAccessTier: string; + // AzureArchiveManifestTier: string; + // AzureArchiveQueue: string; + // TimeProgressionFactor: number; + // KafkaDeadLetterQueueTopic: string; + // KafkaObjectTaskTopic: string; + // KafkaGCRequestTopic: string; + // InstanceID: string; + // BackbeatApiHost: string; + // BackbeatApiPort: string; + // KafkaCleanerInterval: string; + // SorbetdRestoreTimeout: string; + // UtilizationServiceHost: string; + // UtilizationServicePort: string; [key: string]: unknown; } @@ -127,6 +127,10 @@ export default class Zenko extends World { static readonly PRIMARY_SITE_NAME = 'admin'; static readonly SECONDARY_SITE_NAME = 'dradmin'; static readonly PRA_INSTALL_COUNT_KEY = 'praInstallCount'; + static readonly AZURE_ARCHIVE_ACCESS_TIER = 'Hot'; + static readonly AZURE_ARCHIVE_MANIFEST_ACCESS_TIER = 'Hot'; + static readonly UTILIZATION_SERVICE_PORT = 80; + static readonly PROMETHEUS_SERVICE = `${process.env.PROMETHEUS_NAME}-operated.default.svc.cluster.local`; /** * @constructor @@ -134,47 +138,52 @@ export default class Zenko extends World { */ constructor(options: IWorldOptions) { super(options); + Logger.createLogger(this); // store service users credentials from world parameters - if (this.parameters.ServiceUsersCredentials) { + if (process.env.SERVICE_USERS_CREDENTIALS) { const serviceUserCredentials = - JSON.parse(this.parameters.ServiceUsersCredentials) as Record; + JSON.parse(process.env.SERVICE_USERS_CREDENTIALS) as Record; for (const serviceUserName in serviceUserCredentials) { - if (!Identity.hasIdentity(IdentityEnum.SERVICE_USER, serviceUserName, this.parameters.AccountName)) { + if (!Identity.hasIdentity(IdentityEnum.SERVICE_USER, serviceUserName, process.env.ZENKO_ACCOUNT_NAME)) { Identity.addIdentity(IdentityEnum.SERVICE_USER, serviceUserName, { accessKeyId: serviceUserCredentials[serviceUserName].accessKey, secretAccessKey: serviceUserCredentials[serviceUserName].secretKey, - }, this.parameters.AccountName); + }, process.env.ZENKO_ACCOUNT_NAME); } } } // Workaround to be able to access global parameters in BeforeAll/AfterAll hooks + // Only cache specific parameters needed by cli-testing that aren't available as env vars CacheHelper.cacheParameters({ - ...this.parameters, + ssl: process.env.SSL === 'true', + port: process.env.ZENKO_PORT, + subdomain: process.env.SUBDOMAIN, }); CacheHelper.savedAcrossTests[Zenko.PRA_INSTALL_COUNT_KEY] = 0; - if (this.parameters.AccountName && !Identity.hasIdentity(IdentityEnum.ACCOUNT, this.parameters.AccountName)) { - Identity.addIdentity(IdentityEnum.ACCOUNT, this.parameters.AccountName, { - accessKeyId: this.parameters.AccountAccessKey, - secretAccessKey: this.parameters.AccountSecretKey, + if (process.env.ZENKO_ACCOUNT_NAME && + !Identity.hasIdentity(IdentityEnum.ACCOUNT, process.env.ZENKO_ACCOUNT_NAME)) { + Identity.addIdentity(IdentityEnum.ACCOUNT, process.env.ZENKO_ACCOUNT_NAME, { + accessKeyId: process.env.ACCOUNT_ACCESS_KEY, + secretAccessKey: process.env.ACCOUNT_SECRET_KEY, }); } - if (this.parameters.AccountName) { - Identity.useIdentity(IdentityEnum.ACCOUNT, this.parameters.AccountName); - Identity.defaultAccountName = this.parameters.AccountName; + if (process.env.ZENKO_ACCOUNT_NAME) { + Identity.useIdentity(IdentityEnum.ACCOUNT, process.env.ZENKO_ACCOUNT_NAME); + Identity.defaultAccountName = process.env.ZENKO_ACCOUNT_NAME; } - if (this.parameters.AdminAccessKey && this.parameters.AdminSecretKey && + if (process.env.ADMIN_ACCESS_KEY_ID && process.env.ADMIN_SECRET_ACCESS_KEY && !Identity.hasIdentity(IdentityEnum.ADMIN, Zenko.PRIMARY_SITE_NAME)) { Identity.addIdentity(IdentityEnum.ADMIN, Zenko.PRIMARY_SITE_NAME, { - accessKeyId: this.parameters.AdminAccessKey, - secretAccessKey: this.parameters.AdminSecretKey, - }, undefined, undefined, undefined, this.parameters.subdomain); + accessKeyId: process.env.ADMIN_ACCESS_KEY_ID, + secretAccessKey: process.env.ADMIN_SECRET_ACCESS_KEY, + }, undefined, undefined, undefined, process.env.SUBDOMAIN); Zenko.sites['source'] = { accountName: Identity.defaultAccountName, @@ -185,13 +194,13 @@ export default class Zenko extends World { if (this.needsSecondarySite()) { if (!Identity.hasIdentity(IdentityEnum.ADMIN, Zenko.SECONDARY_SITE_NAME)) { Identity.addIdentity(IdentityEnum.ADMIN, Zenko.SECONDARY_SITE_NAME, { - accessKeyId: this.parameters.DRAdminAccessKey!, - secretAccessKey: this.parameters.DRAdminSecretKey!, - }, undefined, undefined, undefined, this.parameters.DRSubdomain); + accessKeyId: process.env.DR_ADMIN_ACCESS_KEY_ID!, + secretAccessKey: process.env.DR_ADMIN_SECRET_ACCESS_KEY!, + }, undefined, undefined, undefined, process.env.DR_SUBDOMAIN); } Zenko.sites['sink'] = { - accountName: `dr${this.parameters.AccountName}`, + accountName: `dr${process.env.ZENKO_ACCOUNT_NAME}`, adminIdentityName: Zenko.SECONDARY_SITE_NAME, }; } @@ -202,7 +211,9 @@ export default class Zenko extends World { } private needsSecondarySite() { - return this.parameters.DRAdminAccessKey && this.parameters.DRAdminSecretKey && this.parameters.DRSubdomain; + return process.env.DR_ADMIN_ACCESS_KEY_ID && + process.env.DR_ADMIN_SECRET_ACCESS_KEY && + process.env.DR_SUBDOMAIN; } /** @@ -257,20 +268,20 @@ export default class Zenko extends World { await this.prepareIamUser(); break; case EntityType.STORAGE_MANAGER: - await this.prepareARWWI(this.parameters.StorageManagerUsername || 'storage_manager', - 'storage-manager-role', this.parameters.KeycloakTestPassword); + await this.prepareARWWI(process.env.STORAGE_MANAGER_USER_NAME, + 'storage-manager-role', process.env.KEYCLOAK_TEST_PASSWORD); break; case EntityType.STORAGE_ACCOUNT_OWNER: - await this.prepareARWWI(this.parameters.StorageAccountOwnerUsername || 'storage_account_owner', - 'storage-account-owner-role', this.parameters.KeycloakTestPassword); + await this.prepareARWWI(process.env.STORAGE_ACCOUNT_OWNER_USER_NAME, + 'storage-account-owner-role', process.env.KEYCLOAK_TEST_PASSWORD); break; case EntityType.DATA_CONSUMER: - await this.prepareARWWI(this.parameters.DataConsumerUsername || 'data_consumer', - 'data-consumer-role', this.parameters.KeycloakTestPassword); + await this.prepareARWWI(process.env.DATA_CONSUMER_USER_NAME, + 'data-consumer-role', process.env.KEYCLOAK_TEST_PASSWORD); break; case EntityType.DATA_ACCESSOR: - await this.prepareARWWI(this.parameters.DataAccessorUsername || 'data_accessor', - 'data-accessor-role', this.parameters.KeycloakTestPassword); + await this.prepareARWWI(process.env.DATA_ACCESSOR_USER_NAME, + 'data-accessor-role', process.env.KEYCLOAK_TEST_PASSWORD); break; case EntityType.ASSUME_ROLE_USER: await this.prepareAssumeRole(false); @@ -307,11 +318,11 @@ export default class Zenko extends World { const webIdentityToken = await this.getWebIdentityToken( ARWWIName, ARWWIPassword || '123', - this.parameters.KeycloakHost || 'keycloak.zenko.local', - this.parameters.KeycloakPort || '80', - `/auth/realms/${this.parameters.KeycloakRealm || 'zenko'}/protocol/openid-connect/token`, - this.parameters.KeycloakClientId || Constants.K_CLIENT, - this.parameters.KeycloakGrantType || 'password', + process.env.KEYCLOAK_TEST_HOST, + '80', + `/auth/realms/${process.env.KEYCLOAK_TEST_REALM_NAME}/protocol/openid-connect/token`, + process.env.KEYCLOAK_TEST_CLIENT_ID || Constants.K_CLIENT, + 'password', ); if (!webIdentityToken) { throw new Error('Error when trying to get a WebIdentity token.'); @@ -410,7 +421,7 @@ export default class Zenko extends World { clientId: string, grantType: string, ): Promise { - const baseUrl = this.parameters.ssl === false ? 'http://' : 'https://'; + const baseUrl = process.env.SSL === 'false' ? 'http://' : 'https://'; const data = qs.stringify({ username, password, @@ -712,10 +723,10 @@ export default class Zenko extends World { } } - const accountName = this.sites['source']?.accountName || CacheHelper.parameters.AccountName!; + const accountName = this.sites['source']?.accountName || process.env.ZENKO_ACCOUNT_NAME!; const accountAccessKeys = Identity.getCredentialsForIdentity( IdentityEnum.ACCOUNT, this.sites['source']?.accountName - || CacheHelper.parameters.AccountName!) || { + || process.env.ZENKO_ACCOUNT_NAME!) || { accessKeyId: '', secretAccessKey: '', }; @@ -898,10 +909,10 @@ export default class Zenko extends World { const axiosInstance = axios.create(); axiosInstance.interceptors.request.use(interceptor); - const protocol = this.parameters.ssl === false ? 'http://' : 'https://'; + const protocol = process.env.SSL === 'false' ? 'http://' : 'https://'; const axiosConfig: AxiosRequestConfig = { method, - url: `${protocol}s3.${this.parameters.subdomain + url: `${protocol}s3.${process.env.SUBDOMAIN || Constants.DEFAULT_SUBDOMAIN}${path}`, headers, data: payload, @@ -939,16 +950,16 @@ export default class Zenko extends World { payload: object | string = {}, ): Promise<{ statusCode: number; data: object } | { statusCode: number; err: unknown }> { const token = await this.getWebIdentityToken( - this.parameters.KeycloakUsername || 'storage_manager', - this.parameters.KeycloakPassword || '123', - this.parameters.KeycloakHost || 'keycloak.zenko.local', - this.parameters.KeycloakPort || '80', - `/auth/realms/${this.parameters.KeycloakRealm || 'zenko'}/protocol/openid-connect/token`, - this.parameters.KeycloakClientId || Constants.K_CLIENT, - this.parameters.KeycloakGrantType || 'password', + process.env.KEYCLOAK_TEST_USER, + process.env.KEYCLOAK_TEST_PASSWORD, + process.env.KEYCLOAK_TEST_HOST, + '80', + `/auth/realms/${process.env.KEYCLOAK_TEST_REALM_NAME || 'zenko'}/protocol/openid-connect/token`, + process.env.KEYCLOAK_TEST_CLIENT_ID || Constants.K_CLIENT, + 'password', ); const axiosInstance = axios.create(); - const protocol = this.parameters.ssl === false ? 'http://' : 'https://'; + const protocol = process.env.SSL === 'false' ? 'http://' : 'https://'; // eslint-disable-next-line no-param-reassign headers = { ...headers, @@ -956,7 +967,7 @@ export default class Zenko extends World { }; const axiosConfig: AxiosRequestConfig = { method, - url: `${protocol}management.${this.parameters.subdomain || Constants.DEFAULT_SUBDOMAIN}/api/v1${path}`, + url: `${protocol}management.${process.env.SUBDOMAIN || Constants.DEFAULT_SUBDOMAIN}/api/v1${path}`, headers, data: payload, }; @@ -992,7 +1003,7 @@ export default class Zenko extends World { async addWebsiteEndpoint(this: Zenko, endpoint: string): Promise<{ statusCode: number; data: object } | { statusCode: number; err: unknown }> { return await this.managementAPIRequest('POST', - `/config/${this.parameters.InstanceID}/website/endpoint`, + `/config/${process.env.ZENKO_INSTANCE_ID}/website/endpoint`, { 'Content-Type': 'application/json', }, @@ -1002,7 +1013,7 @@ export default class Zenko extends World { async deleteLocation(this: Zenko, locationName: string): Promise<{ statusCode: number; data: object } | { statusCode: number; err: unknown }> { return await this.managementAPIRequest('DELETE', - `/config/${this.parameters.InstanceID}/location/${locationName}`); + `/config/${process.env.ZENKO_INSTANCE_ID}/location/${locationName}`); } saveCreatedObject(objectName: string, versionId: string) { diff --git a/tests/ctst/yarn.lock b/tests/ctst/yarn.lock index 40a3ee3251..9aa758d86d 100644 --- a/tests/ctst/yarn.lock +++ b/tests/ctst/yarn.lock @@ -4398,6 +4398,13 @@ js-yaml@^4.1.0: dependencies: argparse "^2.0.1" +js-yaml@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.1.tgz#854c292467705b699476e1a2decc0c8a3458806b" + integrity sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA== + dependencies: + argparse "^2.0.1" + jsbn@~0.1.0: version "0.1.1" resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513" @@ -5903,3 +5910,9 @@ yup@1.2.0: tiny-case "^1.0.3" toposort "^2.0.2" type-fest "^2.19.0" + +"zenko-config-loader@file:../config-loader": + version "1.0.0" + dependencies: + "@kubernetes/client-node" "^0.21.0" + js-yaml "^4.1.1" diff --git a/tests/zenko_tests/Dockerfile b/tests/zenko_tests/Dockerfile index 6c7e80f244..c1f3be52b7 100644 --- a/tests/zenko_tests/Dockerfile +++ b/tests/zenko_tests/Dockerfile @@ -43,12 +43,7 @@ RUN python3 -m pip install -r /tmp/requirements.txt tox && \ rm -rf /tmp/npm-* && \ rm -rf /var/cache/apk/* -COPY ./node_tests/npm_chain.sh ./docker-entrypoint.sh ./wait_for_ceph.sh /usr/local/bin/ -RUN chmod +x /usr/local/bin/npm_chain.sh /usr/local/bin/docker-entrypoint.sh /usr/local/bin/wait_for_ceph.sh - # Copy Tests COPY . /usr/local/bin/tests/ WORKDIR /usr/local/bin/tests - -CMD [ "docker-entrypoint.sh" ] diff --git a/tests/zenko_tests/README.md b/tests/zenko_tests/README.md index 5932fa1d40..9223ea7868 100644 --- a/tests/zenko_tests/README.md +++ b/tests/zenko_tests/README.md @@ -1,9 +1,28 @@ # Table of contents +- [How to run node tests in a Codespace](#how-to-run-node-tests-locally-in-a-codespace) - [How to write iam policy e2e tests](#how-to-write-iam-policy-e2e-tests) - [How to run zenko end2end test locally with zenko-operator](#how-to-run-zenko-end2end-test-locally-with-zenko-operator) - [How to run zenko end2end test locally with cloudserver and vault](#how-to-run-zenko-end2end-test-locally-with-cloudserver-and-vault) +# How to run node tests locally in a Codespace + +```bash +cd tests/zenko_tests/node_tests + +# Run a specific test with grep +./run-node-tests-locally.sh "should list objects in V2 format" "cloudserver/bucketGetV2" + +# Run all tests in a folder +./run-node-tests-locally.sh "" "cloudserver" + +# Build and use a custom image +docker build -t my-e2e:local ../ +./run-node-tests-locally.sh "should list objects" "cloudserver" my-e2e:local +``` + +To switch images, delete the pod first: `kubectl delete pod node-tests-local` + # How to write iam policy e2e tests All iam policy controlled tests go under `node_tests/iam_policies`, diff --git a/tests/zenko_tests/docker-entrypoint.sh b/tests/zenko_tests/docker-entrypoint.sh deleted file mode 100644 index 11da706a8b..0000000000 --- a/tests/zenko_tests/docker-entrypoint.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh - -EXIT_STATUS="0" - -enter_and_run() { - local old_cwd="$(pwd)" - cd "$1" - sh -c "$2" - if [ "$?" -ne "0" ]; then - EXIT_STATUS="1" - echo "$2 have failed" - fi - cd "$old_cwd" -} - -echo 'Waiting for ceph' -sh wait_for_ceph.sh zenko-ceph-ceph-in-a-box - -# Setup our environment -python3 create_buckets.py -if [ "$?" -ne "0" ]; then - exit 1 -fi - -# Run the tests -echo "Running test stage: $STAGE" -if [ "$STAGE" = 'python-tests' ]; then - enter_and_run python_tests "./run.sh $PYTHON_ARGS" -elif [ "$STAGE" = 'node-tests-01' ]; then - enter_and_run node_tests "npm_chain.sh test_aws_crr test_api test_location_quota test_bucket_get_v2 test_ingestion_oob_s3c test_bucket_policy" -elif [ "$STAGE" = 'node-tests-02' ]; then - enter_and_run node_tests "npm_chain.sh test_gcp_crr test_azure_crr test_one_to_many test_lifecycle test_crr_pause_resume" -else - enter_and_run python_tests "./run.sh $PYTHON_ARGS" - # test_crr runs "test_aws_crr test_gcp_crr test_azure_crr test_one_to_many" - enter_and_run node_tests "npm_chain.sh test_crr test_api test_crr_pause_resume test_location_quota test_bucket_get_v2 test_bucket_policy" -fi - -python3 cleans3c.py - -exit "$EXIT_STATUS" diff --git a/tests/zenko_tests/node_tests/backbeat/tests/retry/pendingMetrics.js b/tests/zenko_tests/node_tests/backbeat/tests/retry/pendingMetrics.js index 14295d9738..4822bc4d63 100644 --- a/tests/zenko_tests/node_tests/backbeat/tests/retry/pendingMetrics.js +++ b/tests/zenko_tests/node_tests/backbeat/tests/retry/pendingMetrics.js @@ -15,7 +15,7 @@ const awsUtils = new ReplicationUtility(awsS3Client); const srcBucket = `source-bucket-${Date.now()}`; const awsDestBucket = process.env.AWS_CRR_BUCKET_NAME; const destAWSLocation = process.env.AWS_BACKEND_DESTINATION_LOCATION; -const destFailBucket = process.env.AWS_S3_FAIL_BACKBEAT_BUCKET_NAME; +const destFailBucket = process.env.AWS_FAIL_BUCKET_NAME; const destFailLocation = process.env.AWS_S3_FAIL_BACKEND_DESTINATION_LOCATION; const hex = crypto.createHash('md5') .update(Math.random().toString()) diff --git a/tests/zenko_tests/node_tests/backbeat/tests/retry/retry.js b/tests/zenko_tests/node_tests/backbeat/tests/retry/retry.js index d27c434847..65e96082d4 100644 --- a/tests/zenko_tests/node_tests/backbeat/tests/retry/retry.js +++ b/tests/zenko_tests/node_tests/backbeat/tests/retry/retry.js @@ -11,7 +11,7 @@ const { makeGETRequest, makeUpdateRequest, getResponseBody } = require('../../.. const scalityUtils = new ReplicationUtility(scalityS3Client); const awsUtils = new ReplicationUtility(awsS3Client); const srcBucket = `source-bucket-${Date.now()}`; -const destFailBucket = process.env.AWS_S3_FAIL_BACKBEAT_BUCKET_NAME; +const destFailBucket = process.env.AWS_FAIL_BUCKET_NAME; const destFailLocation = process.env.AWS_S3_FAIL_BACKEND_DESTINATION_LOCATION; const hex = crypto.createHash('md5') diff --git a/tests/zenko_tests/node_tests/npm_chain.sh b/tests/zenko_tests/node_tests/npm_chain.sh deleted file mode 100755 index 768d558a15..0000000000 --- a/tests/zenko_tests/node_tests/npm_chain.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/sh - -EXIT_STATUS="0" - -while [ ! -z "$1" ]; do - echo "npm run $1" - npm run "$1" - status="$?" - if [ "$status" -gt 0 ]; then - echo "command: npm run $1 failed with exit code $status" >&2 - EXIT_STATUS="1" - fi - shift -done -exit "$EXIT_STATUS" diff --git a/tests/zenko_tests/node_tests/run-node-tests-locally.sh b/tests/zenko_tests/node_tests/run-node-tests-locally.sh new file mode 100755 index 0000000000..036761ba73 --- /dev/null +++ b/tests/zenko_tests/node_tests/run-node-tests-locally.sh @@ -0,0 +1,75 @@ +#!/bin/bash + +# Script to run mocha node tests from a GitHub Codespace +# Uses a persistent pod with kubectl cp to sync local changes +# +# Usage: ./run-node-tests-locally.sh [image] +# +# Examples: +# ./run-node-tests-locally.sh "should list objects" "cloudserver/bucketGetV2" +# ./run-node-tests-locally.sh "should list objects" "cloudserver" my-e2e:local +# ./run-node-tests-locally.sh "" "smoke_tests" # Run all smoke tests +# ./run-node-tests-locally.sh "" "cloudserver" # Run all cloudserver tests +# +# If you need to use a different image, delete pod first: +# kubectl delete pod node-tests-local + +set -ex + +ZENKO_NAME=${ZENKO_NAME:-end2end} +IMAGE_NAME="${3:-ghcr.io/scality/zenko/zenko-e2e-ctst:ctst_codespace_setup}" +POD_NAME="node-tests-local" + +TEST_GREP="${1:-should list objects in V2 format}" +TEST_PATH="${2:-cloudserver}" + +ZENKO_ACCESS_KEY=$(kubectl get secret end2end-management-account-creds.v1 -o jsonpath='{.data.accessKey}' | base64 -d) +ZENKO_SECRET_KEY=$(kubectl get secret end2end-management-account-creds.v1 -o jsonpath='{.data.secretKey}' | base64 -d) + +CLOUDSERVER_ENDPOINT="http://${ZENKO_NAME}-connector-s3api.default.svc.cluster.local:80" +VAULT_ENDPOINT="http://${ZENKO_NAME}-management-vault-iam-admin-api:80" +VAULT_STS_ENDPOINT="http://${ZENKO_NAME}-connector-vault-sts-api:80" +BACKBEAT_API_ENDPOINT="http://${ZENKO_NAME}-management-backbeat-api.default.svc.cluster.local:80" + +# Path inside the zenko-e2e container +CONTAINER_PATH="/usr/local/bin/tests/node_tests" + +if ! kubectl get pod "$POD_NAME" &>/dev/null; then + echo "Loading image into kind cluster, can take 1~3 minutes" + kind load docker-image "$IMAGE_NAME" --name kind || true + + kubectl run "$POD_NAME" \ + --image="$IMAGE_NAME" \ + --restart=Never \ + --image-pull-policy=IfNotPresent \ + --command -- sleep infinity + kubectl wait --for=condition=Ready pod/"$POD_NAME" --timeout=5m +fi + +# Copy local test files so that ongoing changes are included +kubectl exec "$POD_NAME" -- rm -rf "$CONTAINER_PATH"/cloudserver "$CONTAINER_PATH"/backbeat "$CONTAINER_PATH"/smoke_tests "$CONTAINER_PATH"/iam_policies "$CONTAINER_PATH"/utils +kubectl cp ./cloudserver "$POD_NAME":"$CONTAINER_PATH"/cloudserver +kubectl cp ./backbeat "$POD_NAME":"$CONTAINER_PATH"/backbeat +kubectl cp ./smoke_tests "$POD_NAME":"$CONTAINER_PATH"/smoke_tests +kubectl cp ./iam_policies "$POD_NAME":"$CONTAINER_PATH"/iam_policies +kubectl cp ./utils "$POD_NAME":"$CONTAINER_PATH"/utils +kubectl cp ./s3SDK.js "$POD_NAME":"$CONTAINER_PATH"/s3SDK.js +kubectl cp ./stsSDK.js "$POD_NAME":"$CONTAINER_PATH"/stsSDK.js +kubectl cp ./VaultClient.js "$POD_NAME":"$CONTAINER_PATH"/VaultClient.js +kubectl cp ./init_test.js "$POD_NAME":"$CONTAINER_PATH"/init_test.js + +kubectl exec "$POD_NAME" -- env \ + ZENKO_ACCESS_KEY="$ZENKO_ACCESS_KEY" \ + ZENKO_SECRET_KEY="$ZENKO_SECRET_KEY" \ + CLOUDSERVER_ENDPOINT="$CLOUDSERVER_ENDPOINT" \ + VAULT_ENDPOINT="$VAULT_ENDPOINT" \ + VAULT_STS_ENDPOINT="$VAULT_STS_ENDPOINT" \ + BACKBEAT_API_ENDPOINT="$BACKBEAT_API_ENDPOINT" \ + sh -c "cd $CONTAINER_PATH && \ + npx mocha \ + --exit \ + --timeout 10000 \ + --recursive \ + --grep '$TEST_GREP' \ + '$TEST_PATH'" + diff --git a/tests/zenko_tests/node_tests/utils/getWebIdentityToken.js b/tests/zenko_tests/node_tests/utils/getWebIdentityToken.js index 8a02914c44..cefba2b323 100644 --- a/tests/zenko_tests/node_tests/utils/getWebIdentityToken.js +++ b/tests/zenko_tests/node_tests/utils/getWebIdentityToken.js @@ -2,13 +2,12 @@ const querystring = require('querystring'); const http = require('http'); const assert = require('assert'); -const USER_1_PASSWORD = process.env.KEYCLOAK_TEST_PASSWORD || '123'; -const HOST_1_URL = process.env.KEYCLOAK_TEST_HOST || 'http://keycloak.zenko.local'; -const HOST_1_PORT = parseInt(process.env.KEYCLOAK_TEST_PORT, 10) || 80; +const HOST_1 = process.env.KEYCLOAK_TEST_HOST; +const HOST_1_PORT = 80; const REALM_NAME = process.env.KEYCLOAK_TEST_REALM_NAME || 'zenko'; const KEYCLOAK_PATH = `/auth/realms/${REALM_NAME}/protocol/openid-connect/token`; const CLIENT_ID = process.env.KEYCLOAK_TEST_CLIENT_ID || 'zenko-ui'; -const GRANT_TYPE = process.env.KEYCLOAK_TEST_GRANT_TYPE || 'password'; +const GRANT_TYPE = 'password'; /** @@ -34,9 +33,6 @@ function getWebIdentityToken( grandType, callback, ) { - // In Zenko, we are using an endpoint as the `KEYCLOAK_TEST_HOST` env variable - // So we should remove any existing http of https prefix in HOST_1_URL. - host = host.replace('https://', '').replace('http://', ''); const userData = querystring.stringify({ username, password, @@ -82,8 +78,8 @@ function getWebIdentityToken( function getTokenForIdentity(identity, callback) { getWebIdentityToken( identity, - USER_1_PASSWORD, - HOST_1_URL, + process.env.KEYCLOAK_TEST_PASSWORD, + HOST_1, HOST_1_PORT, KEYCLOAK_PATH, CLIENT_ID, diff --git a/tests/zenko_tests/wait_for_ceph.sh b/tests/zenko_tests/wait_for_ceph.sh deleted file mode 100644 index 0037cd9492..0000000000 --- a/tests/zenko_tests/wait_for_ceph.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/sh - -# This script is needed because RADOS Gateway -# will open the port before beginning to serve traffic -# causing wait_for_local_port.bash to exit immediately - -EP="zenko-ceph-ceph-in-a-box" -echo "Waiting for ceph at $EP" -while [ -z "$(curl $EP 2>/dev/null)" ]; do - sleep 1 -done