Skip to content

Commit 9181d8b

Browse files
last var
1 parent f5de72a commit 9181d8b

8 files changed

Lines changed: 54 additions & 117 deletions

File tree

.github/scripts/end2end/load-config.sh

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -70,16 +70,16 @@ load_ctst() {
7070

7171
# Hardcoded CTST values
7272
ENV_VARS+=("SSL=false")
73-
ENV_VARS+=("ZENKO_ACCOUNT_NAME=zenko-ctst")
74-
ENV_VARS+=("STORAGE_MANAGER_USER_NAME=ctst_storage_manager")
75-
ENV_VARS+=("STORAGE_ACCOUNT_OWNER_USER_NAME=ctst_storage_account_owner")
76-
ENV_VARS+=("DATA_CONSUMER_USER_NAME=ctst_data_consumer")
77-
ENV_VARS+=("DATA_ACCESSOR_USER_NAME=ctst_data_accessor")
7873
ENV_VARS+=("ZENKO_PORT=80")
7974
ENV_VARS+=("AZURE_ARCHIVE_ACCESS_TIER=Hot")
8075
ENV_VARS+=("AZURE_ARCHIVE_MANIFEST_ACCESS_TIER=Hot")
8176

8277
# From end2end.yaml
78+
ENV_VARS+=("ZENKO_ACCOUNT_NAME=$(get_env_var ZENKO_ACCOUNT_NAME)")
79+
ENV_VARS+=("STORAGE_MANAGER_USER_NAME=$(get_env_var STORAGE_MANAGER_USER_NAME)")
80+
ENV_VARS+=("STORAGE_ACCOUNT_OWNER_USER_NAME=$(get_env_var STORAGE_ACCOUNT_OWNER_USER_NAME)")
81+
ENV_VARS+=("DATA_CONSUMER_USER_NAME=$(get_env_var DATA_CONSUMER_USER_NAME)")
82+
ENV_VARS+=("DATA_ACCESSOR_USER_NAME=$(get_env_var DATA_ACCESSOR_USER_NAME)")
8383
ENV_VARS+=("DR_SUBDOMAIN=$(get_env_var DR_SUBDOMAIN)")
8484
ENV_VARS+=("PROMETHEUS_NAME=$(get_env_var PROMETHEUS_NAME)")
8585
ENV_VARS+=("AZURE_BACKEND_QUEUE_ENDPOINT=$(get_env_var AZURE_BACKEND_QUEUE_ENDPOINT)")

.github/scripts/end2end/run-e2e-ctst.sh

Lines changed: 15 additions & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -18,80 +18,12 @@ PARALLEL_RUNS=${PARALLEL_RUNS:-$(( ( $(nproc) + 1 ) / 2 ))}
1818
# Zenko Version
1919
VERSION=$(cat ../../../VERSION | grep -Po 'VERSION="\K[^"]*')
2020

21-
# Zenko Environment
22-
ZENKO_ACCOUNT_NAME="zenko-ctst"
23-
# ADMIN_ACCESS_KEY_ID=$(kubectl get secret end2end-management-vault-admin-creds.v1 -o jsonpath='{.data.accessKey}' | base64 -d)
24-
# ADMIN_SECRET_ACCESS_KEY=$(kubectl get secret end2end-management-vault-admin-creds.v1 -o jsonpath='{.data.secretKey}' | base64 -d)
25-
26-
# STORAGE_MANAGER_USER_NAME="ctst_storage_manager"
27-
# STORAGE_ACCOUNT_OWNER_USER_NAME="ctst_storage_account_owner"
28-
# DATA_CONSUMER_USER_NAME="ctst_data_consumer"
29-
# DATA_ACCESSOR_USER_NAME="ctst_data_accessor"
30-
31-
# VAULT_AUTH_HOST="${ZENKO_NAME}-connector-vault-auth-api.default.svc.cluster.local"
32-
# ZENKO_PORT="80"
33-
# KEYCLOAK_TEST_USER=${OIDC_USERNAME}
34-
# KEYCLOAK_TEST_PASSWORD=${OIDC_PASSWORD}
35-
# KEYCLOAK_TEST_HOST=${OIDC_HOST}
36-
# KEYCLOAK_TEST_PORT="80"
37-
# KEYCLOAK_TEST_REALM_NAME=${OIDC_REALM}
38-
# KEYCLOAK_TEST_CLIENT_ID=${OIDC_CLIENT_ID}
39-
# KEYCLOAK_TEST_GRANT_TYPE="password"
40-
41-
# get Zenko service users credentials
42-
# BACKBEAT_LCBP_1_CREDS=$(kubectl get secret -l app.kubernetes.io/name=backbeat-lcbp-user-creds,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.backbeat-lifecycle-bp-1\.json}' | base64 -d)
43-
# BACKBEAT_LCC_1_CREDS=$(kubectl get secret -l app.kubernetes.io/name=backbeat-lcc-user-creds,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.backbeat-lifecycle-conductor-1\.json}' | base64 -d)
44-
# BACKBEAT_LCOP_1_CREDS=$(kubectl get secret -l app.kubernetes.io/name=backbeat-lcop-user-creds,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.backbeat-lifecycle-op-1\.json}' | base64 -d)
45-
# BACKBEAT_QP_1_CREDS=$(kubectl get secret -l app.kubernetes.io/name=backbeat-qp-user-creds,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.backbeat-qp-1\.json}' | base64 -d)
46-
# SORBET_FWD_2_ACCESSKEY=$(kubectl get secret -l app.kubernetes.io/name=sorbet-fwd-creds,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.accessKey}' | base64 -d)
47-
# SORBET_FWD_2_SECRETKEY=$(kubectl get secret -l app.kubernetes.io/name=sorbet-fwd-creds,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.secretKey}' | base64 -d)
48-
# SERVICE_USERS_CREDENTIALS=$(echo '{"backbeat-lifecycle-bp-1":'${BACKBEAT_LCBP_1_CREDS}',"backbeat-lifecycle-conductor-1":'${BACKBEAT_LCC_1_CREDS}',"backbeat-lifecycle-op-1":'${BACKBEAT_LCOP_1_CREDS}',"backbeat-qp-1":'${BACKBEAT_QP_1_CREDS}',"sorbet-fwd-2":{"accessKey":"'${SORBET_FWD_2_ACCESSKEY}'","secretKey":"'${SORBET_FWD_2_SECRETKEY}'"}}' | jq -R)
49-
50-
# Get KAFKA topics for sorbet
51-
# KAFKA_DEAD_LETTER_TOPIC=$(kubectl get secret -l app.kubernetes.io/name=cold-sorbet-config-e2e-azure-archive,app.kubernetes.io/instance=end2end \
52-
# -o jsonpath='{.items[0].data.config\.json}' | base64 -di | jq '."kafka-dead-letter-topic"' | cut -d "\"" -f 2)
53-
54-
# KAFKA_OBJECT_TASK_TOPIC=$(kubectl get secret -l app.kubernetes.io/name=cold-sorbet-config-e2e-azure-archive,app.kubernetes.io/instance=end2end \
55-
# -o jsonpath='{.items[0].data.config\.json}' | base64 -di | jq '."kafka-object-task-topic"' | cut -d "\"" -f 2)
56-
57-
# KAFKA_GC_REQUEST_TOPIC=$(kubectl get secret -l app.kubernetes.io/name=cold-sorbet-config-e2e-azure-archive,app.kubernetes.io/instance=end2end \
58-
# -o jsonpath='{.items[0].data.config\.json}' | base64 -di | jq '."kafka-gc-request-topic"' | cut -d "\"" -f 2)
59-
60-
# Extracting kafka host from bacbeat's config
61-
# KAFKA_HOST_PORT=$(kubectl get secret -l app.kubernetes.io/name=backbeat-config,app.kubernetes.io/instance=end2end \
62-
# -o jsonpath='{.items[0].data.config\.json}' | base64 -di | jq -r .kafka.hosts)
63-
# KAFKA_PORT=${KAFKA_HOST_PORT#*:}
64-
65-
# KAFKA_AUTH_HOST="end2end-base-queue-auth-0"
66-
# KAFKA_AUTH_HOST_PORT="$KAFKA_AUTH_HOST:$KAFKA_PORT"
67-
68-
# TIME_PROGRESSION_FACTOR=$(kubectl get zenko end2end -o jsonpath="{.metadata.annotations.zenko\.io/time-progression-factor}")
69-
# ZENKO_INSTANCE_ID=$(kubectl get zenko end2end -o jsonpath='{.status.instanceID}')
70-
71-
# Azure archive tests
72-
# AZURE_ARCHIVE_ACCESS_TIER="Hot"
73-
# AZURE_ARCHIVE_MANIFEST_ACCESS_TIER="Hot"
74-
75-
# BACKBEAT_API_HOST=$(kubectl get secret -l app.kubernetes.io/name=connector-cloudserver-config,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.config\.json}' | base64 -di | jq -r .backbeat.host)
76-
# BACKBEAT_API_PORT=$(kubectl get secret -l app.kubernetes.io/name=connector-cloudserver-config,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.config\.json}' | base64 -di | jq -r .backbeat.port)
77-
78-
KAFKA_CLEANER_INTERVAL=$(kubectl get zenko ${ZENKO_NAME} -o jsonpath='{.spec.kafkaCleaner.interval}')
79-
# SORBETD_RESTORE_TIMEOUT=$(kubectl get zenko end2end -o jsonpath='{.spec.sorbet.server.azure.restoreTimeout}')
80-
81-
# Utilization service
82-
# UTILIZATION_SERVICE_HOST=$(kubectl get zenko ${ZENKO_NAME} -o jsonpath='{.spec.scuba.api.ingress.hostname}')
83-
# UTILIZATION_SERVICE_PORT="80"
84-
8521
# Setting CTST world params
8622
WORLD_PARAMETERS="$(jq -c <<EOF
8723
{
8824
"NotificationDestinationAuthUsername":"${NOTIF_AUTH_DEST_USERNAME}",
8925
"NotificationDestinationAuthPassword":"${NOTIF_AUTH_DEST_PASSWORD}",
90-
"KafkaExternalIps": "${KAFKA_EXTERNAL_IP:-}",
91-
"StorageManagerUsername":"ctst_storage_manager",
92-
"StorageAccountOwnerUsername":"ctst_storage_account_owner",
93-
"DataConsumerUsername":"ctst_data_consumer",
94-
"DataAccessorUsername":"ctst_data_accessor",
26+
"KafkaExternalIps": "${KAFKA_EXTERNAL_IP:-}"
9527
}
9628
EOF
9729
)"
@@ -101,15 +33,23 @@ kubectl set env deployment end2end-connector-cloudserver SCUBA_HEALTHCHECK_FREQU
10133
kubectl rollout status deployment end2end-connector-cloudserver
10234

10335
E2E_IMAGE=$E2E_CTST_IMAGE_NAME:$E2E_IMAGE_TAG
104-
POD_NAME="${ZENKO_NAME}-ctst-tests"
105-
CTST_VERSION=$(sed 's/.*"cli-testing": ".*#\(.*\)".*/\1/;t;d' ../../../tests/ctst/package.json)
36+
POD_NAME="end2end-ctst-tests"
10637

10738
# Configure keycloak
10839
docker run \
109-
--rm \
110-
--network=host \
111-
"${E2E_IMAGE}" /bin/bash \
112-
-c "SUBDOMAIN=${SUBDOMAIN} CONTROL_PLANE_INGRESS_ENDPOINT=${KEYCLOAK_TEST_ENDPOINT} ACCOUNT=${ZENKO_ACCOUNT_NAME} KEYCLOAK_REALM=${KEYCLOAK_TEST_REALM_NAME} STORAGE_MANAGER=ctst_storage_manager STORAGE_ACCOUNT_OWNER=ctst_storage_account_owner DATA_CONSUMER=ctst_data_consumer DATA_ACCESSOR=ctst_data_accessor /ctst/node_modules/cli-testing/bin/seedKeycloak.sh"; [[ $? -eq 1 ]] && exit 1 || echo 'Keycloak Configured!'
40+
--rm \
41+
--network=host \
42+
"${E2E_IMAGE}" /bin/bash \
43+
-c "SUBDOMAIN=${SUBDOMAIN} \
44+
CONTROL_PLANE_INGRESS_ENDPOINT=${KEYCLOAK_TEST_ENDPOINT} \
45+
ACCOUNT=${ZENKO_ACCOUNT_NAME} \
46+
KEYCLOAK_REALM=${KEYCLOAK_TEST_REALM_NAME} \
47+
STORAGE_MANAGER=${STORAGE_MANAGER_USER_NAME} \
48+
STORAGE_ACCOUNT_OWNER=${STORAGE_ACCOUNT_OWNER_USER_NAME} \
49+
DATA_CONSUMER=${DATA_CONSUMER_USER_NAME} \
50+
DATA_ACCESSOR=${DATA_ACCESSOR_USER_NAME} \
51+
/ctst/node_modules/cli-testing/bin/seedKeycloak.sh"
52+
[[ $? -eq 1 ]] && exit 1 || echo 'Keycloak Configured!'
11353

11454
# Grant access to Kube API (insecure, only for testing)
11555
kubectl create clusterrolebinding serviceaccounts-cluster-admin \
@@ -127,8 +67,6 @@ kubectl run $POD_NAME \
12767
--image-pull-policy=IfNotPresent \
12868
$(env_for_kubectl_run) \
12969
--env=TARGET_VERSION=$VERSION \
130-
--env=AZURE_BLOB_URL=$AZURE_BACKEND_ENDPOINT \
131-
--env=AZURE_QUEUE_URL=$AZURE_BACKEND_QUEUE_ENDPOINT \
13270
--env=VERBOSE=1 \
13371
--env=SDK=true \
13472
--override-type strategic \

.github/workflows/end2end.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,11 @@ env:
9191
RING_S3C_INGESTION_SRC_NON_VERSIONED_BUCKET_NAME: ingestion-test-src-non-versioned-bucket
9292
RING_S3C_INGESTION_NON_VERSIONED_OBJECT_COUNT_PER_TYPE: 2
9393
# CTST end2end tests
94+
ZENKO_ACCOUNT_NAME: 'zenko-ctst'
95+
STORAGE_MANAGER_USER_NAME: "ctst_storage_manager"
96+
STORAGE_ACCOUNT_OWNER_USER_NAME: "ctst_storage_account_owner"
97+
DATA_CONSUMER_USER_NAME: "ctst_data_consumer"
98+
DATA_ACCESSOR_USER_NAME: "ctst_data_accessor"
9499
NOTIF_DEST_NAME: "destination1"
95100
NOTIF_DEST_TOPIC: "destination-topic-1"
96101
NOTIF_ALT_DEST_NAME: "destination2"

tests/ctst/features/crrReplicationS3utils.feature

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ Feature: Replication
1313

1414
@2.12.0
1515
@PreMerge
16-
@yoyo
16+
@yaya
1717
@ReplicationTest
1818
Scenario Outline: Replicate objects created before creating the replication rule
1919
Given an existing bucket "source-bucket-1" "with" versioning, "without" ObjectLock "without" retention mode

tests/ctst/steps/azureArchive.ts

Lines changed: 14 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -32,17 +32,11 @@ type manifest = {
3232
'entries': manifestEntry[],
3333
}
3434

35-
const AZURE_STORAGE_BLOB_URL = process.env.AZURE_BLOB_URL || 'http://127.0.0.1:10000/devstoreaccount1';
36-
const AZURE_STORAGE_QUEUE_URL = process.env.AZURE_QUEUE_URL || 'http://127.0.0.1:10001/devstoreaccount1';
37-
3835
/**
3936
* Returns an object containing azure credentials
40-
* @param {Zenko} world world object
4137
* @returns {object} azure creds
4238
*/
43-
function getAzureCreds(
44-
world: Zenko,
45-
): {accountName: string, accountKey: string } {
39+
function getAzureCreds(): {accountName: string, accountKey: string } {
4640
return {
4741
accountName: process.env.AZURE_ACCOUNT_NAME,
4842
accountKey: process.env.AZURE_SECRET_KEY,
@@ -69,7 +63,7 @@ async function isObjectRehydrated(zenko: Zenko, objectName: string) {
6963
found = await AzureHelper.blobExists(
7064
process.env.AZURE_ARCHIVE_BUCKET_NAME,
7165
`rehydrate/${tarName}`,
72-
getAzureCreds(zenko),
66+
getAzureCreds(),
7367
);
7468
await Utils.sleep(1000);
7569

@@ -97,7 +91,7 @@ async function findObjectPackAndManifest(
9791
// lisintg all blobs in the container
9892
const blobs = await AzureHelper.listBlobs(
9993
process.env.AZURE_ARCHIVE_BUCKET_NAME,
100-
getAzureCreds(world),
94+
getAzureCreds(),
10195
);
10296
// filtering the list of blobs only leaving the manifests
10397
const manifests = blobs.filter(blob => blob.name.includes('.json.'));
@@ -106,7 +100,7 @@ async function findObjectPackAndManifest(
106100
const manifestBuffer = await AzureHelper.downloadBlob(
107101
process.env.AZURE_ARCHIVE_BUCKET_NAME,
108102
manifests[i].name,
109-
getAzureCreds(world),
103+
getAzureCreds(),
110104
);
111105
const { ok, result } = safeJsonParse(manifestBuffer.toString());
112106
if (!ok) {
@@ -177,19 +171,19 @@ export async function cleanAzureContainer(
177171
await AzureHelper.deleteBlob(
178172
process.env.AZURE_ARCHIVE_BUCKET_NAME,
179173
tarName,
180-
getAzureCreds(world),
174+
getAzureCreds(),
181175
);
182176
await AzureHelper.deleteBlob(
183177
process.env.AZURE_ARCHIVE_BUCKET_NAME,
184178
`rehydrate/${tarName}`,
185-
getAzureCreds(world),
179+
getAzureCreds(),
186180
);
187181
}
188182
if (manifestName) {
189183
await AzureHelper.deleteBlob(
190184
process.env.AZURE_ARCHIVE_BUCKET_NAME,
191185
manifestName,
192-
getAzureCreds(world),
186+
getAzureCreds(),
193187
);
194188
}
195189
currentKey = iterator.next();
@@ -209,7 +203,7 @@ Then('manifest access tier should be valid for object {string}', async function
209203
const manifestProperties = await AzureHelper.getBlobProperties(
210204
process.env.AZURE_ARCHIVE_BUCKET_NAME,
211205
manifestName,
212-
getAzureCreds(this),
206+
getAzureCreds(),
213207
);
214208
assert.strictEqual(manifestProperties.accessTier, Zenko.AZURE_ARCHIVE_MANIFEST_ACCESS_TIER);
215209
});
@@ -227,7 +221,7 @@ Then('tar access tier should be valid for object {string}', async function (this
227221
const packProperties = await AzureHelper.getBlobProperties(
228222
process.env.AZURE_ARCHIVE_BUCKET_NAME,
229223
tarName,
230-
getAzureCreds(this),
224+
getAzureCreds(),
231225
);
232226
assert.strictEqual(packProperties.accessTier, Zenko.AZURE_ARCHIVE_ACCESS_TIER);
233227
});
@@ -306,7 +300,7 @@ Then('blob for object {string} must be rehydrated',
306300
process.env.AZURE_ARCHIVE_QUEUE_NAME,
307301
process.env.AZURE_ARCHIVE_BUCKET_NAME,
308302
`rehydrate/${tarName}`,
309-
getAzureCreds(this),
303+
getAzureCreds(),
310304
);
311305
});
312306

@@ -377,7 +371,7 @@ When('i run sorbetctl to retry failed restore for {string} location',
377371
}
378372
});
379373

380-
When('i wait for {int} days', { timeout: 10 * 60 * 1000 }, async function (this: Zenko, days: number) {
374+
When('i wait for {int} days', { timeout: 10 * 60 * 1000 }, async (days: number) => {
381375
const factor = Math.max(1, Number(process.env.TIME_PROGRESSION_FACTOR) || 1);
382376
const realTimeDay = days * 24 * 60 * 60 * 1000 / factor;
383377
await Utils.sleep(realTimeDay);
@@ -409,7 +403,7 @@ Then('object {string} should expire in {int} days', async function (this: Zenko,
409403
});
410404

411405
Given('that lifecycle is {string} for the {string} location',
412-
async function (this: Zenko, status: string, location: string) {
406+
async (_this: Zenko, status: string, location: string) => {
413407
let path: string;
414408
if (status === 'paused') {
415409
path = `/_/lifecycle/pause/${location}`;
@@ -431,12 +425,12 @@ Given('an azure archive location {string}', { timeout: 15 * 60 * 1000 },
431425
name: locationName,
432426
locationType: 'location-azure-archive-v1',
433427
details: {
434-
endpoint: AZURE_STORAGE_BLOB_URL,
428+
endpoint: process.env.AZURE_BACKEND_ENDPOINT || 'http://127.0.0.1:10000/devstoreaccount1',
435429
bucketName: process.env.AZURE_ARCHIVE_BUCKET_NAME,
436430
queue: {
437431
type: 'location-azure-storage-queue-v1',
438432
queueName: process.env.AZURE_ARCHIVE_QUEUE_NAME,
439-
endpoint: AZURE_STORAGE_QUEUE_URL,
433+
endpoint: process.env.AZURE_BACKEND_QUEUE_ENDPOINT || 'http://127.0.0.1:10001/devstoreaccount1',
440434
},
441435
auth: {
442436
type: 'location-azure-shared-key',

tests/ctst/steps/iam-policies/common.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ When('the user tries to perform vault auth {string}', async function (this: Zenk
2424
AccessKey: userCredentials.accessKeyId,
2525
SecretKey: userCredentials.secretAccessKey,
2626
SessionToken: userCredentials.sessionToken,
27-
ip: "end2end-connector-vault-auth-api.default.svc.cluster.local",
27+
ip: 'end2end-connector-vault-auth-api.default.svc.cluster.local',
2828
ssl: CacheHelper.parameters.ssl,
2929
};
3030

tests/ctst/steps/pra.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -328,7 +328,7 @@ Then('the kafka DR volume exists', { timeout: volumeTimeout + 2000 }, async func
328328
assert(volumeParsed.result!['volume phase'] === 'Bound');
329329
});
330330

331-
Then('prometheus should scrap federated metrics from DR sink', { timeout: 180000 }, async function (this: Zenko) {
331+
Then('prometheus should scrap federated metrics from DR sink', { timeout: 180000 }, async () => {
332332
const prom = new PrometheusDriver({
333333
endpoint: `http://${Zenko.PROMETHEUS_SERVICE}:9090`,
334334
baseURL: '/api/v1',

tests/ctst/world/Zenko.ts

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// import { World, IWorldOptions, setWorldConstructor } from '@cucumber/cucumber';
1+
import { World, IWorldOptions, setWorldConstructor } from '@cucumber/cucumber';
22
import axios, { AxiosRequestConfig, AxiosResponse, Method } from 'axios';
33
import { AccessKey } from '@aws-sdk/client-iam';
44
import { Credentials } from '@aws-sdk/client-sts';
@@ -71,17 +71,17 @@ export interface ZenkoWorldParameters extends ClientOptions {
7171
// KeycloakClientId: string;
7272
// KeycloakGrantType: string;
7373
// KeycloakTestPassword: string;
74-
StorageManagerUsername: string;
75-
StorageAccountOwnerUsername: string;
76-
DataConsumerUsername: string;
77-
DataAccessorUsername: string;
78-
ServiceUsersCredentials: string;
74+
// StorageManagerUsername: string;
75+
// StorageAccountOwnerUsername: string;
76+
// DataConsumerUsername: string;
77+
// DataAccessorUsername: string;
78+
// ServiceUsersCredentials: string;
7979
// AzureAccountName: string;
8080
// AzureAccountKey: string;
8181
// AzureArchiveContainer: string;
8282
// AzureArchiveContainer2: string;
83-
AzureArchiveAccessTier: string;
84-
AzureArchiveManifestTier: string;
83+
// AzureArchiveAccessTier: string;
84+
// AzureArchiveManifestTier: string;
8585
// AzureArchiveQueue: string;
8686
// TimeProgressionFactor: number;
8787
// KafkaDeadLetterQueueTopic: string;
@@ -268,19 +268,19 @@ export default class Zenko extends World<ZenkoWorldParameters> {
268268
await this.prepareIamUser();
269269
break;
270270
case EntityType.STORAGE_MANAGER:
271-
await this.prepareARWWI(this.parameters.StorageManagerUsername || 'storage_manager',
271+
await this.prepareARWWI(process.env.STORAGE_MANAGER_USER_NAME,
272272
'storage-manager-role', process.env.KEYCLOAK_TEST_PASSWORD);
273273
break;
274274
case EntityType.STORAGE_ACCOUNT_OWNER:
275-
await this.prepareARWWI(this.parameters.StorageAccountOwnerUsername || 'storage_account_owner',
275+
await this.prepareARWWI(process.env.STORAGE_ACCOUNT_OWNER_USER_NAME,
276276
'storage-account-owner-role', process.env.KEYCLOAK_TEST_PASSWORD);
277277
break;
278278
case EntityType.DATA_CONSUMER:
279-
await this.prepareARWWI(this.parameters.DataConsumerUsername || 'data_consumer',
279+
await this.prepareARWWI(process.env.DATA_CONSUMER_USER_NAME,
280280
'data-consumer-role', process.env.KEYCLOAK_TEST_PASSWORD);
281281
break;
282282
case EntityType.DATA_ACCESSOR:
283-
await this.prepareARWWI(this.parameters.DataAccessorUsername || 'data_accessor',
283+
await this.prepareARWWI(process.env.DATA_ACCESSOR_USER_NAME,
284284
'data-accessor-role', process.env.KEYCLOAK_TEST_PASSWORD);
285285
break;
286286
case EntityType.ASSUME_ROLE_USER:

0 commit comments

Comments
 (0)