diff --git a/lib/api/apiUtils/integrity/validateChecksums.js b/lib/api/apiUtils/integrity/validateChecksums.js
index 64f945b2ae..c3ce6018c1 100644
--- a/lib/api/apiUtils/integrity/validateChecksums.js
+++ b/lib/api/apiUtils/integrity/validateChecksums.js
@@ -5,52 +5,59 @@ const { CrtCrc64Nvme } = require('@aws-sdk/crc64-nvme-crt');
const { errors: ArsenalErrors, errorInstances } = require('arsenal');
const { config } = require('../../../Config');
-const defaultChecksumData = Object.freeze(
- { algorithm: 'crc64nvme', isTrailer: false, expected: undefined });
+const defaultChecksumData = Object.freeze({ algorithm: 'crc64nvme', isTrailer: false, expected: undefined });
const errAlgoNotSupported = errorInstances.InvalidRequest.customizeDescription(
- 'The algorithm type you specified in x-amz-checksum- header is invalid.');
+ 'The algorithm type you specified in x-amz-checksum- header is invalid.',
+);
const errAlgoNotSupportedSDK = errorInstances.InvalidRequest.customizeDescription(
- 'Value for x-amz-sdk-checksum-algorithm header is invalid.');
+ 'Value for x-amz-sdk-checksum-algorithm header is invalid.',
+);
const errMissingCorresponding = errorInstances.InvalidRequest.customizeDescription(
'x-amz-sdk-checksum-algorithm specified, but no corresponding x-amz-checksum-* ' +
- 'or x-amz-trailer headers were found.');
+ 'or x-amz-trailer headers were found.',
+);
const errMultipleChecksumTypes = errorInstances.InvalidRequest.customizeDescription(
- 'Expecting a single x-amz-checksum- header. Multiple checksum Types are not allowed.');
+ 'Expecting a single x-amz-checksum- header. Multiple checksum Types are not allowed.',
+);
const errTrailerAndChecksum = errorInstances.InvalidRequest.customizeDescription(
- 'Expecting a single x-amz-checksum- header');
+ 'Expecting a single x-amz-checksum- header',
+);
const errTrailerNotSupported = errorInstances.InvalidRequest.customizeDescription(
- 'The value specified in the x-amz-trailer header is not supported');
+ 'The value specified in the x-amz-trailer header is not supported',
+);
const errMPUAlgoNotSupported = errorInstances.InvalidRequest.customizeDescription(
'Checksum algorithm provided is unsupported. ' +
- 'Please try again with any of the valid types: ' +
- '[CRC32, CRC32C, CRC64NVME, SHA1, SHA256]');
+ 'Please try again with any of the valid types: ' +
+ '[CRC32, CRC32C, CRC64NVME, SHA1, SHA256]',
+);
const errMPUTypeInvalid = errorInstances.InvalidRequest.customizeDescription(
- 'Value for x-amz-checksum-type header is invalid.');
+ 'Value for x-amz-checksum-type header is invalid.',
+);
const errMPUTypeWithoutAlgo = errorInstances.InvalidRequest.customizeDescription(
- 'The x-amz-checksum-type header can only be used ' +
- 'with the x-amz-checksum-algorithm header.');
+ 'The x-amz-checksum-type header can only be used ' + 'with the x-amz-checksum-algorithm header.',
+);
const checksumedMethods = Object.freeze({
- 'completeMultipartUpload': true,
- 'multiObjectDelete': true,
- 'bucketPutACL': true,
- 'bucketPutCors': true,
- 'bucketPutEncryption': true,
- 'bucketPutLifecycle': true,
- 'bucketPutLogging': true,
- 'bucketPutNotification': true,
- 'bucketPutPolicy': true,
- 'bucketPutReplication': true,
- 'bucketPutTagging': true,
- 'bucketPutVersioning': true,
- 'bucketPutWebsite': true,
- 'objectPutACL': true,
- 'objectPutLegalHold': true,
- 'bucketPutObjectLock': true, // PutObjectLockConfiguration
- 'objectPutRetention': true,
- 'objectPutTagging': true,
- 'objectRestore': true,
+ completeMultipartUpload: true,
+ multiObjectDelete: true,
+ bucketPutACL: true,
+ bucketPutCors: true,
+ bucketPutEncryption: true,
+ bucketPutLifecycle: true,
+ bucketPutLogging: true,
+ bucketPutNotification: true,
+ bucketPutPolicy: true,
+ bucketPutReplication: true,
+ bucketPutTagging: true,
+ bucketPutVersioning: true,
+ bucketPutWebsite: true,
+ objectPutACL: true,
+ objectPutLegalHold: true,
+ bucketPutObjectLock: true, // PutObjectLockConfiguration
+ objectPutRetention: true,
+ objectPutTagging: true,
+ objectRestore: true,
});
const ChecksumError = Object.freeze({
@@ -85,7 +92,7 @@ function uint32ToBase64(num) {
const algorithms = Object.freeze({
crc64nvme: {
- getObjectAttributesXMLTag: 'ChecksumCRC64NVME',
+ xmlTag: 'ChecksumCRC64NVME',
digest: async data => {
const input = Buffer.isBuffer(data) ? data : Buffer.from(data);
const crc = new CrtCrc64Nvme();
@@ -98,10 +105,10 @@ const algorithms = Object.freeze({
return Buffer.from(result).toString('base64');
},
isValidDigest: expected => typeof expected === 'string' && expected.length === 12 && base64Regex.test(expected),
- createHash: () => new CrtCrc64Nvme()
+ createHash: () => new CrtCrc64Nvme(),
},
crc32: {
- getObjectAttributesXMLTag: 'ChecksumCRC32',
+ xmlTag: 'ChecksumCRC32',
digest: data => {
const input = Buffer.isBuffer(data) ? data : Buffer.from(data);
return uint32ToBase64(new Crc32().update(input).digest() >>> 0); // >>> 0 coerce number to uint32
@@ -111,38 +118,38 @@ const algorithms = Object.freeze({
return uint32ToBase64(result >>> 0);
},
isValidDigest: expected => typeof expected === 'string' && expected.length === 8 && base64Regex.test(expected),
- createHash: () => new Crc32()
+ createHash: () => new Crc32(),
},
crc32c: {
- getObjectAttributesXMLTag: 'ChecksumCRC32C',
+ xmlTag: 'ChecksumCRC32C',
digest: data => {
const input = Buffer.isBuffer(data) ? data : Buffer.from(data);
return uint32ToBase64(new Crc32c().update(input).digest() >>> 0); // >>> 0 coerce number to uint32
},
digestFromHash: hash => uint32ToBase64(hash.digest() >>> 0),
isValidDigest: expected => typeof expected === 'string' && expected.length === 8 && base64Regex.test(expected),
- createHash: () => new Crc32c()
+ createHash: () => new Crc32c(),
},
sha1: {
- getObjectAttributesXMLTag: 'ChecksumSHA1',
+ xmlTag: 'ChecksumSHA1',
digest: data => {
const input = Buffer.isBuffer(data) ? data : Buffer.from(data);
return crypto.createHash('sha1').update(input).digest('base64');
},
digestFromHash: hash => hash.digest('base64'),
isValidDigest: expected => typeof expected === 'string' && expected.length === 28 && base64Regex.test(expected),
- createHash: () => crypto.createHash('sha1')
+ createHash: () => crypto.createHash('sha1'),
},
sha256: {
- getObjectAttributesXMLTag: 'ChecksumSHA256',
+ xmlTag: 'ChecksumSHA256',
digest: data => {
const input = Buffer.isBuffer(data) ? data : Buffer.from(data);
return crypto.createHash('sha256').update(input).digest('base64');
},
digestFromHash: hash => hash.digest('base64'),
isValidDigest: expected => typeof expected === 'string' && expected.length === 44 && base64Regex.test(expected),
- createHash: () => crypto.createHash('sha256')
- }
+ createHash: () => crypto.createHash('sha256'),
+ },
});
async function validateXAmzChecksums(headers, body) {
@@ -155,7 +162,7 @@ async function validateXAmzChecksums(headers, body) {
if (xAmzChecksumCnt === 0 && 'x-amz-sdk-checksum-algorithm' in headers) {
return {
error: ChecksumError.MissingCorresponding,
- details: { expected: headers['x-amz-sdk-checksum-algorithm'] }
+ details: { expected: headers['x-amz-sdk-checksum-algorithm'] },
};
} else if (xAmzChecksumCnt === 0) {
return { error: ChecksumError.MissingChecksum, details: null };
@@ -164,7 +171,7 @@ async function validateXAmzChecksums(headers, body) {
// No x-amz-sdk-checksum-algorithm we expect one x-amz-checksum-[crc64nvme, crc32, crc32C, sha1, sha256].
const algo = checksumHeaders[0].slice('x-amz-checksum-'.length);
if (!(algo in algorithms)) {
- return { error: ChecksumError.AlgoNotSupported, details: { algorithm: algo } };;
+ return { error: ChecksumError.AlgoNotSupported, details: { algorithm: algo } };
}
const expected = headers[`x-amz-checksum-${algo}`];
@@ -232,7 +239,7 @@ function getChecksumDataFromHeaders(headers) {
if (checksumHeader === undefined && !('x-amz-trailer' in headers) && 'x-amz-sdk-checksum-algorithm' in headers) {
return {
error: ChecksumError.MissingCorresponding,
- details: { expected: headers['x-amz-sdk-checksum-algorithm'] }
+ details: { expected: headers['x-amz-sdk-checksum-algorithm'] },
};
}
@@ -335,7 +342,8 @@ function arsenalErrorFromChecksumError(err) {
case ChecksumError.XAmzMismatch: {
const algoUpper = err.details.algorithm.toUpperCase();
return errorInstances.BadDigest.customizeDescription(
- `The ${algoUpper} you specified did not match the calculated checksum.`);
+ `The ${algoUpper} you specified did not match the calculated checksum.`,
+ );
}
case ChecksumError.AlgoNotSupported:
return errAlgoNotSupported;
@@ -347,7 +355,8 @@ function arsenalErrorFromChecksumError(err) {
return errMultipleChecksumTypes;
case ChecksumError.MalformedChecksum:
return errorInstances.InvalidRequest.customizeDescription(
- `Value for x-amz-checksum-${err.details.algorithm} header is invalid.`);
+ `Value for x-amz-checksum-${err.details.algorithm} header is invalid.`,
+ );
case ChecksumError.MD5Invalid:
return ArsenalErrors.InvalidDigest;
case ChecksumError.TrailerAlgoMismatch:
@@ -358,7 +367,8 @@ function arsenalErrorFromChecksumError(err) {
return ArsenalErrors.MalformedTrailerError;
case ChecksumError.TrailerChecksumMalformed:
return errorInstances.InvalidRequest.customizeDescription(
- `Value for x-amz-checksum-${err.details.algorithm} trailing header is invalid.`);
+ `Value for x-amz-checksum-${err.details.algorithm} trailing header is invalid.`,
+ );
case ChecksumError.TrailerAndChecksum:
return errTrailerAndChecksum;
case ChecksumError.TrailerNotSupported:
@@ -372,7 +382,8 @@ function arsenalErrorFromChecksumError(err) {
case ChecksumError.MPUInvalidCombination:
return errorInstances.InvalidRequest.customizeDescription(
`The ${err.details.type} checksum type cannot be used ` +
- `with the ${err.details.algorithm.toUpperCase()} checksum algorithm.`);
+ `with the ${err.details.algorithm.toUpperCase()} checksum algorithm.`,
+ );
default:
return ArsenalErrors.BadDigest;
}
@@ -464,8 +475,10 @@ function getChecksumDataFromMPUHeaders(headers) {
}
// Validate algorithm + type combination
- if ((type === 'FULL_OBJECT' && !fullObjectAlgorithms.has(algo)) ||
- (type === 'COMPOSITE' && !compositeAlgorithms.has(algo))) {
+ if (
+ (type === 'FULL_OBJECT' && !fullObjectAlgorithms.has(algo)) ||
+ (type === 'COMPOSITE' && !compositeAlgorithms.has(algo))
+ ) {
return { error: ChecksumError.MPUInvalidCombination, details: { algorithm: algo, type } };
}
diff --git a/lib/api/apiUtils/object/objectAttributes.js b/lib/api/apiUtils/object/objectAttributes.js
index ad5bf2e51e..2582ca9f25 100644
--- a/lib/api/apiUtils/object/objectAttributes.js
+++ b/lib/api/apiUtils/object/objectAttributes.js
@@ -69,11 +69,7 @@ function buildAttributesXml(objectMD, userMetadata, requestedAttrs, xml, log) {
case 'ObjectParts': {
const partCount = getPartCountFromMd5(objectMD);
if (partCount) {
- xml.push(
- '',
- `${partCount}`,
- '',
- );
+ xml.push('', `${partCount}`, '');
}
break;
}
@@ -93,7 +89,7 @@ function buildAttributesXml(objectMD, userMetadata, requestedAttrs, xml, log) {
});
break;
}
- const tag = algo.getObjectAttributesXMLTag;
+ const tag = algo.xmlTag;
xml.push(
'',
`<${tag}>${checksum.checksumValue}${tag}>`,
diff --git a/lib/api/completeMultipartUpload.js b/lib/api/completeMultipartUpload.js
index 83df03fbe8..581344223b 100644
--- a/lib/api/completeMultipartUpload.js
+++ b/lib/api/completeMultipartUpload.js
@@ -10,17 +10,18 @@ const { data } = require('../data/wrapper');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const constants = require('../../constants');
const { config } = require('../Config');
-const { versioningPreprocessing, checkQueryVersionId, decodeVID, overwritingVersioning }
- = require('./apiUtils/object/versioning');
+const {
+ versioningPreprocessing,
+ checkQueryVersionId,
+ decodeVID,
+ overwritingVersioning,
+} = require('./apiUtils/object/versioning');
const services = require('../services');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
-const locationConstraintCheck
- = require('./apiUtils/object/locationConstraintCheck');
+const locationConstraintCheck = require('./apiUtils/object/locationConstraintCheck');
const { skipMpuPartProcessing } = storage.data.external.backendUtils;
-const { validateAndFilterMpuParts, generateMpuPartStorageInfo } =
- s3middleware.processMpuParts;
-const locationKeysHaveChanged
- = require('./apiUtils/object/locationKeysHaveChanged');
+const { validateAndFilterMpuParts, generateMpuPartStorageInfo } = s3middleware.processMpuParts;
+const locationKeysHaveChanged = require('./apiUtils/object/locationKeysHaveChanged');
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
const { validatePutVersionId } = require('./apiUtils/object/coldStorage');
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
@@ -49,8 +50,7 @@ const REPLICATION_ACTION = 'MPU';
*/
-
- /*
+/*
Format of xml response:
{
- if (err || !result || !result.CompleteMultipartUpload
- || !result.CompleteMultipartUpload.Part) {
+ if (err || !result || !result.CompleteMultipartUpload || !result.CompleteMultipartUpload.Part) {
return next(errors.MalformedXML);
}
const jsonList = result.CompleteMultipartUpload;
@@ -126,288 +125,472 @@ function completeMultipartUpload(authInfo, request, log, callback) {
});
}
- return async.waterfall([
- function validateDestBucket(next) {
- const metadataValParams = {
- objectKey,
- authInfo,
- bucketName,
- // Required permissions for this action
- // at the destinationBucket level are same as objectPut
- requestType: request.apiMethods || 'completeMultipartUpload',
- versionId,
- request,
- };
- standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, next);
- },
- function validateMultipart(destBucket, objMD, next) {
- if (objMD) {
- oldByteLength = objMD['content-length'];
- }
-
- if (isPutVersion) {
- const error = validatePutVersionId(objMD, putVersionId, log);
- if (error) {
- return next(error, destBucket);
+ return async.waterfall(
+ [
+ function validateDestBucket(next) {
+ const metadataValParams = {
+ objectKey,
+ authInfo,
+ bucketName,
+ // Required permissions for this action
+ // at the destinationBucket level are same as objectPut
+ requestType: request.apiMethods || 'completeMultipartUpload',
+ versionId,
+ request,
+ };
+ standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, next);
+ },
+ function validateMultipart(destBucket, objMD, next) {
+ if (objMD) {
+ oldByteLength = objMD['content-length'];
}
- }
- return services.metadataValidateMultipart(metadataValParams,
- (err, mpuBucket, mpuOverview, storedMetadata) => {
- if (err) {
- log.error('error validating request', { error: err });
- return next(err, destBucket);
- }
- return next(null, destBucket, objMD, mpuBucket,
- storedMetadata);
- });
- },
- function parsePartsList(destBucket, objMD, mpuBucket,
- storedMetadata, next) {
- const location = storedMetadata.controllingLocationConstraint;
- // BACKWARD: Remove to remove the old splitter
- if (mpuBucket.getMdBucketModelVersion() < 2) {
- splitter = constants.oldSplitter;
- }
- // Reconstruct mpuOverviewKey to point to metadata
- // originally stored when mpu initiated
- const mpuOverviewKey =
- `overview${splitter}${objectKey}${splitter}${uploadId}`;
- if (request.post) {
- return parseXml(request.post, (err, jsonList) => {
- if (err) {
- log.error('error parsing XML', { error: err });
- return next(err, destBucket);
+ if (isPutVersion) {
+ const error = validatePutVersionId(objMD, putVersionId, log);
+ if (error) {
+ return next(error, destBucket);
}
- return next(null, destBucket, objMD, mpuBucket,
- jsonList, storedMetadata, location, mpuOverviewKey);
- });
- }
- return next(errors.MalformedXML, destBucket);
- },
- function markOverviewForCompletion(destBucket, objMD, mpuBucket, jsonList,
- storedMetadata, location, mpuOverviewKey, next) {
- return services.metadataMarkMPObjectForCompletion({
- bucketName: mpuBucket.getName(),
- objectKey,
- uploadId,
- splitter,
+ }
+
+ return services.metadataValidateMultipart(
+ metadataValParams,
+ (err, mpuBucket, mpuOverview, storedMetadata) => {
+ if (err) {
+ log.error('error validating request', { error: err });
+ return next(err, destBucket);
+ }
+ return next(null, destBucket, objMD, mpuBucket, storedMetadata);
+ },
+ );
+ },
+ function parsePartsList(destBucket, objMD, mpuBucket, storedMetadata, next) {
+ const location = storedMetadata.controllingLocationConstraint;
+ // BACKWARD: Remove to remove the old splitter
+ if (mpuBucket.getMdBucketModelVersion() < 2) {
+ splitter = constants.oldSplitter;
+ }
+ // Reconstruct mpuOverviewKey to point to metadata
+ // originally stored when mpu initiated
+ const mpuOverviewKey = `overview${splitter}${objectKey}${splitter}${uploadId}`;
+ if (request.post) {
+ return parseXml(request.post, (err, jsonList) => {
+ if (err) {
+ log.error('error parsing XML', { error: err });
+ return next(err, destBucket);
+ }
+ return next(
+ null,
+ destBucket,
+ objMD,
+ mpuBucket,
+ jsonList,
+ storedMetadata,
+ location,
+ mpuOverviewKey,
+ );
+ });
+ }
+ return next(errors.MalformedXML, destBucket);
+ },
+ function markOverviewForCompletion(
+ destBucket,
+ objMD,
+ mpuBucket,
+ jsonList,
storedMetadata,
- }, log, err => {
- if (err) {
- log.error('error marking MPU object for completion', {
+ location,
+ mpuOverviewKey,
+ next,
+ ) {
+ return services.metadataMarkMPObjectForCompletion(
+ {
bucketName: mpuBucket.getName(),
objectKey,
uploadId,
- error: err,
- });
- return next(err);
- }
- return next(null, destBucket, objMD, mpuBucket,
- jsonList, storedMetadata, location, mpuOverviewKey);
- });
- },
- function retrieveParts(destBucket, objMD, mpuBucket, jsonList,
- storedMetadata, location, mpuOverviewKey, next) {
- return services.getMPUparts(mpuBucket.getName(), uploadId, log,
- (err, result) => {
+ splitter,
+ storedMetadata,
+ },
+ log,
+ err => {
+ if (err) {
+ log.error('error marking MPU object for completion', {
+ bucketName: mpuBucket.getName(),
+ objectKey,
+ uploadId,
+ error: err,
+ });
+ return next(err);
+ }
+ return next(
+ null,
+ destBucket,
+ objMD,
+ mpuBucket,
+ jsonList,
+ storedMetadata,
+ location,
+ mpuOverviewKey,
+ );
+ },
+ );
+ },
+ function retrieveParts(
+ destBucket,
+ objMD,
+ mpuBucket,
+ jsonList,
+ storedMetadata,
+ location,
+ mpuOverviewKey,
+ next,
+ ) {
+ return services.getMPUparts(mpuBucket.getName(), uploadId, log, (err, result) => {
if (err) {
log.error('error getting parts', { error: err });
return next(err, destBucket);
}
const storedParts = result.Contents;
const totalMPUSize = storedParts.reduce((acc, part) => acc + part.value.Size, 0);
- return next(null, destBucket, objMD, mpuBucket, storedParts,
- jsonList, storedMetadata, location, mpuOverviewKey, totalMPUSize);
+ return next(
+ null,
+ destBucket,
+ objMD,
+ mpuBucket,
+ storedParts,
+ jsonList,
+ storedMetadata,
+ location,
+ mpuOverviewKey,
+ totalMPUSize,
+ );
});
- },
- function completeExternalMpu(destBucket, objMD, mpuBucket, storedParts,
- jsonList, storedMetadata, location, mpuOverviewKey, totalMPUSize, next) {
- const mdInfo = { storedParts, mpuOverviewKey, splitter };
- const mpuInfo =
- { objectKey, uploadId, jsonList, bucketName, destBucket };
- const originalIdentityImpDenies = request.actionImplicitDenies;
- // eslint-disable-next-line no-param-reassign
- delete request.actionImplicitDenies;
- return data.completeMPU(request, mpuInfo, mdInfo, location,
- null, null, null, locationConstraintCheck, log,
- (err, completeObjData) => {
+ },
+ function completeExternalMpu(
+ destBucket,
+ objMD,
+ mpuBucket,
+ storedParts,
+ jsonList,
+ storedMetadata,
+ location,
+ mpuOverviewKey,
+ totalMPUSize,
+ next,
+ ) {
+ const mdInfo = { storedParts, mpuOverviewKey, splitter };
+ const mpuInfo = { objectKey, uploadId, jsonList, bucketName, destBucket };
+ const originalIdentityImpDenies = request.actionImplicitDenies;
// eslint-disable-next-line no-param-reassign
- request.actionImplicitDenies = originalIdentityImpDenies;
- if (err) {
- log.error('error completing MPU externally', { error: err });
- return next(err, destBucket);
- }
- // if mpu not handled externally, completeObjData will be null
- return next(null, destBucket, objMD, mpuBucket, storedParts,
- jsonList, storedMetadata, completeObjData, mpuOverviewKey,
- totalMPUSize);
- });
- },
- function validateAndFilterParts(destBucket, objMD, mpuBucket,
- storedParts, jsonList, storedMetadata, completeObjData, mpuOverviewKey,
- totalMPUSize, next) {
- if (completeObjData) {
- return next(null, destBucket, objMD, mpuBucket, storedParts,
- jsonList, storedMetadata, completeObjData, mpuOverviewKey,
- completeObjData.filteredPartsObj, totalMPUSize);
- }
- const filteredPartsObj = validateAndFilterMpuParts(storedParts,
- jsonList, mpuOverviewKey, splitter, log);
- if (filteredPartsObj.error) {
- return next(filteredPartsObj.error, destBucket);
- }
- return next(null, destBucket, objMD, mpuBucket, storedParts,
- jsonList, storedMetadata, completeObjData, mpuOverviewKey,
- filteredPartsObj, totalMPUSize);
- },
- function processParts(destBucket, objMD, mpuBucket, storedParts,
- jsonList, storedMetadata, completeObjData, mpuOverviewKey,
- filteredPartsObj, totalMPUSize, next) {
- // if mpu was completed on backend that stored mpu MD externally,
- // skip MD processing steps
- if (completeObjData && skipMpuPartProcessing(completeObjData)) {
- const dataLocations = [
- {
- key: completeObjData.key,
- size: completeObjData.contentLength,
- start: 0,
- dataStoreVersionId: completeObjData.dataStoreVersionId,
- dataStoreName: storedMetadata.dataStoreName,
- dataStoreETag: completeObjData.eTag,
- dataStoreType: completeObjData.dataStoreType,
+ delete request.actionImplicitDenies;
+ return data.completeMPU(
+ request,
+ mpuInfo,
+ mdInfo,
+ location,
+ null,
+ null,
+ null,
+ locationConstraintCheck,
+ log,
+ (err, completeObjData) => {
+ // eslint-disable-next-line no-param-reassign
+ request.actionImplicitDenies = originalIdentityImpDenies;
+ if (err) {
+ log.error('error completing MPU externally', { error: err });
+ return next(err, destBucket);
+ }
+ // if mpu not handled externally, completeObjData will be null
+ return next(
+ null,
+ destBucket,
+ objMD,
+ mpuBucket,
+ storedParts,
+ jsonList,
+ storedMetadata,
+ completeObjData,
+ mpuOverviewKey,
+ totalMPUSize,
+ );
},
- ];
- const calculatedSize = completeObjData.contentLength;
- return next(null, destBucket, objMD, mpuBucket, storedMetadata,
- completeObjData.eTag, calculatedSize, dataLocations,
- [mpuOverviewKey], null, completeObjData, totalMPUSize);
- }
-
- const partsInfo =
- generateMpuPartStorageInfo(filteredPartsObj.partList);
- if (partsInfo.error) {
- return next(partsInfo.error, destBucket);
- }
- const { keysToDelete, extraPartLocations } = filteredPartsObj;
- const { aggregateETag, dataLocations, calculatedSize } = partsInfo;
+ );
+ },
+ function validateAndFilterParts(
+ destBucket,
+ objMD,
+ mpuBucket,
+ storedParts,
+ jsonList,
+ storedMetadata,
+ completeObjData,
+ mpuOverviewKey,
+ totalMPUSize,
+ next,
+ ) {
+ if (completeObjData) {
+ return next(
+ null,
+ destBucket,
+ objMD,
+ mpuBucket,
+ storedParts,
+ jsonList,
+ storedMetadata,
+ completeObjData,
+ mpuOverviewKey,
+ completeObjData.filteredPartsObj,
+ totalMPUSize,
+ );
+ }
+ const filteredPartsObj = validateAndFilterMpuParts(
+ storedParts,
+ jsonList,
+ mpuOverviewKey,
+ splitter,
+ log,
+ );
+ if (filteredPartsObj.error) {
+ return next(filteredPartsObj.error, destBucket);
+ }
+ return next(
+ null,
+ destBucket,
+ objMD,
+ mpuBucket,
+ storedParts,
+ jsonList,
+ storedMetadata,
+ completeObjData,
+ mpuOverviewKey,
+ filteredPartsObj,
+ totalMPUSize,
+ );
+ },
+ function processParts(
+ destBucket,
+ objMD,
+ mpuBucket,
+ storedParts,
+ jsonList,
+ storedMetadata,
+ completeObjData,
+ mpuOverviewKey,
+ filteredPartsObj,
+ totalMPUSize,
+ next,
+ ) {
+ // if mpu was completed on backend that stored mpu MD externally,
+ // skip MD processing steps
+ if (completeObjData && skipMpuPartProcessing(completeObjData)) {
+ const dataLocations = [
+ {
+ key: completeObjData.key,
+ size: completeObjData.contentLength,
+ start: 0,
+ dataStoreVersionId: completeObjData.dataStoreVersionId,
+ dataStoreName: storedMetadata.dataStoreName,
+ dataStoreETag: completeObjData.eTag,
+ dataStoreType: completeObjData.dataStoreType,
+ },
+ ];
+ const calculatedSize = completeObjData.contentLength;
+ return next(
+ null,
+ destBucket,
+ objMD,
+ mpuBucket,
+ storedMetadata,
+ completeObjData.eTag,
+ calculatedSize,
+ dataLocations,
+ [mpuOverviewKey],
+ null,
+ completeObjData,
+ totalMPUSize,
+ );
+ }
- if (completeObjData) {
- const dataLocations = [
- {
- key: completeObjData.key,
- size: calculatedSize,
- start: 0,
- dataStoreName: storedMetadata.dataStoreName,
- dataStoreETag: aggregateETag,
- dataStoreType: completeObjData.dataStoreType,
- },
+ const partsInfo = generateMpuPartStorageInfo(filteredPartsObj.partList);
+ if (partsInfo.error) {
+ return next(partsInfo.error, destBucket);
+ }
+ const { keysToDelete, extraPartLocations } = filteredPartsObj;
+ const { aggregateETag, dataLocations, calculatedSize } = partsInfo;
+
+ if (completeObjData) {
+ const dataLocations = [
+ {
+ key: completeObjData.key,
+ size: calculatedSize,
+ start: 0,
+ dataStoreName: storedMetadata.dataStoreName,
+ dataStoreETag: aggregateETag,
+ dataStoreType: completeObjData.dataStoreType,
+ },
+ ];
+ return next(
+ null,
+ destBucket,
+ objMD,
+ mpuBucket,
+ storedMetadata,
+ aggregateETag,
+ calculatedSize,
+ dataLocations,
+ keysToDelete,
+ extraPartLocations,
+ completeObjData,
+ totalMPUSize,
+ );
+ }
+ return next(
+ null,
+ destBucket,
+ objMD,
+ mpuBucket,
+ storedMetadata,
+ aggregateETag,
+ calculatedSize,
+ dataLocations,
+ keysToDelete,
+ extraPartLocations,
+ null,
+ totalMPUSize,
+ );
+ },
+ function prepForStoring(
+ destBucket,
+ objMD,
+ mpuBucket,
+ storedMetadata,
+ aggregateETag,
+ calculatedSize,
+ dataLocations,
+ keysToDelete,
+ extraPartLocations,
+ completeObjData,
+ totalMPUSize,
+ next,
+ ) {
+ // Store full object size for server access logs
+ if (request.serverAccessLog) {
+ // eslint-disable-next-line no-param-reassign
+ request.serverAccessLog.objectSize = calculatedSize;
+ }
+ const metaHeaders = {};
+ const keysNotNeeded = [
+ 'initiator',
+ 'partLocations',
+ 'key',
+ 'initiated',
+ 'uploadId',
+ 'content-type',
+ 'expires',
+ 'eventualStorageBucket',
+ 'dataStoreName',
];
- return next(null, destBucket, objMD, mpuBucket, storedMetadata,
- aggregateETag, calculatedSize, dataLocations, keysToDelete,
- extraPartLocations, completeObjData, totalMPUSize);
- }
- return next(null, destBucket, objMD, mpuBucket, storedMetadata,
- aggregateETag, calculatedSize, dataLocations, keysToDelete,
- extraPartLocations, null, totalMPUSize);
- },
- function prepForStoring(destBucket, objMD, mpuBucket, storedMetadata,
- aggregateETag, calculatedSize, dataLocations, keysToDelete,
- extraPartLocations, completeObjData, totalMPUSize, next) {
- // Store full object size for server access logs
- if (request.serverAccessLog) {
- // eslint-disable-next-line no-param-reassign
- request.serverAccessLog.objectSize = calculatedSize;
- }
- const metaHeaders = {};
- const keysNotNeeded =
- ['initiator', 'partLocations', 'key',
- 'initiated', 'uploadId', 'content-type', 'expires',
- 'eventualStorageBucket', 'dataStoreName'];
- const metadataKeysToPull =
- Object.keys(storedMetadata).filter(item =>
- keysNotNeeded.indexOf(item) === -1);
- metadataKeysToPull.forEach(item => {
- metaHeaders[item] = storedMetadata[item];
- });
-
- const droppedMPUSize = totalMPUSize - calculatedSize;
+ const metadataKeysToPull = Object.keys(storedMetadata).filter(
+ item => keysNotNeeded.indexOf(item) === -1,
+ );
+ metadataKeysToPull.forEach(item => {
+ metaHeaders[item] = storedMetadata[item];
+ });
- const metaStoreParams = {
- authInfo,
- objectKey,
- metaHeaders,
- uploadId,
- dataStoreName: storedMetadata.dataStoreName,
- contentType: storedMetadata['content-type'],
- cacheControl: storedMetadata['cache-control'],
- contentDisposition: storedMetadata['content-disposition'],
- contentEncoding: storedMetadata['content-encoding'],
- expires: storedMetadata.expires,
- contentMD5: aggregateETag,
- size: calculatedSize,
- multipart: true,
- isDeleteMarker: false,
- replicationInfo: getReplicationInfo(config,
- objectKey, destBucket, false, calculatedSize, REPLICATION_ACTION),
- originOp: 's3:ObjectCreated:CompleteMultipartUpload',
- overheadField: constants.overheadField,
- log,
- };
- // If key already exists
- if (objMD) {
- // Re-use creation-time if we can
- if (objMD['creation-time']) {
- metaStoreParams.creationTime = objMD['creation-time'];
- // Otherwise fallback to last-modified
+ const droppedMPUSize = totalMPUSize - calculatedSize;
+
+ const metaStoreParams = {
+ authInfo,
+ objectKey,
+ metaHeaders,
+ uploadId,
+ dataStoreName: storedMetadata.dataStoreName,
+ contentType: storedMetadata['content-type'],
+ cacheControl: storedMetadata['cache-control'],
+ contentDisposition: storedMetadata['content-disposition'],
+ contentEncoding: storedMetadata['content-encoding'],
+ expires: storedMetadata.expires,
+ contentMD5: aggregateETag,
+ size: calculatedSize,
+ multipart: true,
+ isDeleteMarker: false,
+ replicationInfo: getReplicationInfo(
+ config,
+ objectKey,
+ destBucket,
+ false,
+ calculatedSize,
+ REPLICATION_ACTION,
+ ),
+ originOp: 's3:ObjectCreated:CompleteMultipartUpload',
+ overheadField: constants.overheadField,
+ log,
+ };
+ // If key already exists
+ if (objMD) {
+ // Re-use creation-time if we can
+ if (objMD['creation-time']) {
+ metaStoreParams.creationTime = objMD['creation-time'];
+ // Otherwise fallback to last-modified
+ } else {
+ metaStoreParams.creationTime = objMD['last-modified'];
+ }
+ // If its a new key, create a new timestamp
} else {
- metaStoreParams.creationTime = objMD['last-modified'];
+ metaStoreParams.creationTime = new Date().toJSON();
+ }
+ if (storedMetadata['x-amz-tagging']) {
+ metaStoreParams.tagging = storedMetadata['x-amz-tagging'];
+ }
+ if (storedMetadata.retentionMode && storedMetadata.retentionDate) {
+ metaStoreParams.retentionMode = storedMetadata.retentionMode;
+ metaStoreParams.retentionDate = storedMetadata.retentionDate;
+ }
+ if (storedMetadata.legalHold) {
+ metaStoreParams.legalHold = storedMetadata.legalHold;
+ }
+ const serverSideEncryption = storedMetadata['x-amz-server-side-encryption'];
+ let pseudoCipherBundle = null;
+ if (serverSideEncryption) {
+ const kmsKey = storedMetadata['x-amz-server-side-encryption-aws-kms-key-id'];
+ pseudoCipherBundle = {
+ algorithm: serverSideEncryption,
+ masterKeyId: kmsKey,
+ };
+ setSSEHeaders(responseHeaders, serverSideEncryption, kmsKey);
}
- // If its a new key, create a new timestamp
- } else {
- metaStoreParams.creationTime = new Date().toJSON();
- }
- if (storedMetadata['x-amz-tagging']) {
- metaStoreParams.tagging = storedMetadata['x-amz-tagging'];
- }
- if (storedMetadata.retentionMode && storedMetadata.retentionDate) {
- metaStoreParams.retentionMode = storedMetadata.retentionMode;
- metaStoreParams.retentionDate = storedMetadata.retentionDate;
- }
- if (storedMetadata.legalHold) {
- metaStoreParams.legalHold = storedMetadata.legalHold;
- }
- const serverSideEncryption = storedMetadata['x-amz-server-side-encryption'];
- let pseudoCipherBundle = null;
- if (serverSideEncryption) {
- const kmsKey = storedMetadata['x-amz-server-side-encryption-aws-kms-key-id'];
- pseudoCipherBundle = {
- algorithm: serverSideEncryption,
- masterKeyId: kmsKey,
- };
- setSSEHeaders(responseHeaders, serverSideEncryption, kmsKey);
- }
- if (authInfo.getCanonicalID() !== destBucket.getOwner()) {
- metaStoreParams.bucketOwnerId = destBucket.getOwner();
- }
+ if (authInfo.getCanonicalID() !== destBucket.getOwner()) {
+ metaStoreParams.bucketOwnerId = destBucket.getOwner();
+ }
- // if x-scal-s3-version-id header is specified, we overwrite the object/version metadata.
- if (isPutVersion) {
- const options = overwritingVersioning(objMD, metaStoreParams);
- return process.nextTick(() => next(null, destBucket, dataLocations,
- metaStoreParams, mpuBucket, keysToDelete, aggregateETag,
- objMD, extraPartLocations, pseudoCipherBundle,
- completeObjData, options, droppedMPUSize));
- }
+ // if x-scal-s3-version-id header is specified, we overwrite the object/version metadata.
+ if (isPutVersion) {
+ const options = overwritingVersioning(objMD, metaStoreParams);
+ return process.nextTick(() =>
+ next(
+ null,
+ destBucket,
+ dataLocations,
+ metaStoreParams,
+ mpuBucket,
+ keysToDelete,
+ aggregateETag,
+ objMD,
+ extraPartLocations,
+ pseudoCipherBundle,
+ completeObjData,
+ options,
+ droppedMPUSize,
+ ),
+ );
+ }
- if (!destBucket.isVersioningEnabled() && objMD?.archive?.archiveInfo) {
- // Ensure we trigger a "delete" event in the oplog for the previously archived object
- metaStoreParams.needOplogUpdate = 's3:ReplaceArchivedObject';
- }
+ if (!destBucket.isVersioningEnabled() && objMD?.archive?.archiveInfo) {
+ // Ensure we trigger a "delete" event in the oplog for the previously archived object
+ metaStoreParams.needOplogUpdate = 's3:ReplaceArchivedObject';
+ }
- return versioningPreprocessing(bucketName,
- destBucket, objectKey, objMD, log, (err, options) => {
+ return versioningPreprocessing(bucketName, destBucket, objectKey, objMD, log, (err, options) => {
if (err) {
// TODO: check AWS error when user requested a specific
// version before any versions have been put
@@ -430,206 +613,287 @@ function completeMultipartUpload(authInfo, request, log, callback) {
}
}
- return next(null, destBucket, dataLocations,
- metaStoreParams, mpuBucket, keysToDelete, aggregateETag,
- objMD, extraPartLocations, pseudoCipherBundle,
- completeObjData, options, droppedMPUSize);
+ return next(
+ null,
+ destBucket,
+ dataLocations,
+ metaStoreParams,
+ mpuBucket,
+ keysToDelete,
+ aggregateETag,
+ objMD,
+ extraPartLocations,
+ pseudoCipherBundle,
+ completeObjData,
+ options,
+ droppedMPUSize,
+ );
});
- },
- function storeAsNewObj(destinationBucket, dataLocations,
- metaStoreParams, mpuBucket, keysToDelete, aggregateETag, objMD,
- extraPartLocations, pseudoCipherBundle,
- completeObjData, options, droppedMPUSize, next) {
- const dataToDelete = options.dataToDelete;
- /* eslint-disable no-param-reassign */
- metaStoreParams.versionId = options.versionId;
- metaStoreParams.versioning = options.versioning;
- metaStoreParams.isNull = options.isNull;
- metaStoreParams.deleteNullKey = options.deleteNullKey;
- if (options.extraMD) {
- Object.assign(metaStoreParams, options.extraMD);
- }
- /* eslint-enable no-param-reassign */
-
- // For external backends (where completeObjData is not
- // null), the backend key does not change for new versions
- // of the same object (or rewrites for nonversioned
- // buckets), hence the deduplication sanity check does not
- // make sense for external backends.
- if (objMD && !completeObjData) {
- // An object with the same key already exists, check
- // if it has been created by the same MPU upload by
- // checking if any of its internal location keys match
- // the new keys. In such case, it must be a duplicate
- // from a retry of a previous failed completion
- // attempt, hence do the following:
- //
- // - skip writing the new metadata key to avoid
- // creating a new version pointing to the same data
- // keys
- //
- // - skip old data locations deletion since the old
- // data location keys overlap the new ones (in
- // principle they should be fully identical as there
- // is no reuse of previous versions' data keys in
- // the normal process) - note that the previous
- // failed completion attempt may have left orphan
- // data keys but we lost track of them so we cannot
- // delete them now
- //
- // - proceed to the deletion of overview and part
- // metadata keys, which are likely to have failed in
- // the previous MPU completion attempt
- //
- if (!locationKeysHaveChanged(objMD.location, dataLocations)) {
- log.info('MPU complete request replay detected', {
- method: 'completeMultipartUpload.storeAsNewObj',
- bucketName: destinationBucket.getName(),
- objectKey: metaStoreParams.objectKey,
- uploadId: metaStoreParams.uploadId,
- });
- return next(null, mpuBucket, keysToDelete, aggregateETag,
- extraPartLocations, destinationBucket,
- // pass the original version ID as generatedVersionId
- objMD.versionId, droppedMPUSize);
+ },
+ function storeAsNewObj(
+ destinationBucket,
+ dataLocations,
+ metaStoreParams,
+ mpuBucket,
+ keysToDelete,
+ aggregateETag,
+ objMD,
+ extraPartLocations,
+ pseudoCipherBundle,
+ completeObjData,
+ options,
+ droppedMPUSize,
+ next,
+ ) {
+ const dataToDelete = options.dataToDelete;
+ /* eslint-disable no-param-reassign */
+ metaStoreParams.versionId = options.versionId;
+ metaStoreParams.versioning = options.versioning;
+ metaStoreParams.isNull = options.isNull;
+ metaStoreParams.deleteNullKey = options.deleteNullKey;
+ if (options.extraMD) {
+ Object.assign(metaStoreParams, options.extraMD);
}
- }
- return services.metadataStoreObject(destinationBucket.getName(),
- dataLocations, pseudoCipherBundle, metaStoreParams,
- (err, res) => {
- if (err) {
- log.error('error storing object metadata', { error: err });
- return next(err, destinationBucket);
+ /* eslint-enable no-param-reassign */
+
+ // For external backends (where completeObjData is not
+ // null), the backend key does not change for new versions
+ // of the same object (or rewrites for nonversioned
+ // buckets), hence the deduplication sanity check does not
+ // make sense for external backends.
+ if (objMD && !completeObjData) {
+ // An object with the same key already exists, check
+ // if it has been created by the same MPU upload by
+ // checking if any of its internal location keys match
+ // the new keys. In such case, it must be a duplicate
+ // from a retry of a previous failed completion
+ // attempt, hence do the following:
+ //
+ // - skip writing the new metadata key to avoid
+ // creating a new version pointing to the same data
+ // keys
+ //
+ // - skip old data locations deletion since the old
+ // data location keys overlap the new ones (in
+ // principle they should be fully identical as there
+ // is no reuse of previous versions' data keys in
+ // the normal process) - note that the previous
+ // failed completion attempt may have left orphan
+ // data keys but we lost track of them so we cannot
+ // delete them now
+ //
+ // - proceed to the deletion of overview and part
+ // metadata keys, which are likely to have failed in
+ // the previous MPU completion attempt
+ //
+ if (!locationKeysHaveChanged(objMD.location, dataLocations)) {
+ log.info('MPU complete request replay detected', {
+ method: 'completeMultipartUpload.storeAsNewObj',
+ bucketName: destinationBucket.getName(),
+ objectKey: metaStoreParams.objectKey,
+ uploadId: metaStoreParams.uploadId,
+ });
+ return next(
+ null,
+ mpuBucket,
+ keysToDelete,
+ aggregateETag,
+ extraPartLocations,
+ destinationBucket,
+ // pass the original version ID as generatedVersionId
+ objMD.versionId,
+ droppedMPUSize,
+ );
}
+ }
+ return services.metadataStoreObject(
+ destinationBucket.getName(),
+ dataLocations,
+ pseudoCipherBundle,
+ metaStoreParams,
+ (err, res) => {
+ if (err) {
+ log.error('error storing object metadata', { error: err });
+ return next(err, destinationBucket);
+ }
- setExpirationHeaders(responseHeaders, {
- lifecycleConfig: destinationBucket.getLifecycleConfiguration(),
- objectParams: {
- key: objectKey,
- date: res.lastModified,
- tags: res.tags,
- },
- });
+ setExpirationHeaders(responseHeaders, {
+ lifecycleConfig: destinationBucket.getLifecycleConfiguration(),
+ objectParams: {
+ key: objectKey,
+ date: res.lastModified,
+ tags: res.tags,
+ },
+ });
- const generatedVersionId = res ? res.versionId : undefined;
- // in cases where completing mpu overwrites a previous
- // null version when versioning is suspended or versioning
- // is not enabled, need to delete pre-existing data
- // unless the preexisting object and the completed mpu
- // are on external backends
- if (dataToDelete) {
- const newDataStoreName =
- Array.isArray(dataLocations) && dataLocations[0] ?
- dataLocations[0].dataStoreName : null;
- return data.batchDelete(dataToDelete,
- request.method,
- newDataStoreName, log, err => {
+ const generatedVersionId = res ? res.versionId : undefined;
+ // in cases where completing mpu overwrites a previous
+ // null version when versioning is suspended or versioning
+ // is not enabled, need to delete pre-existing data
+ // unless the preexisting object and the completed mpu
+ // are on external backends
+ if (dataToDelete) {
+ const newDataStoreName =
+ Array.isArray(dataLocations) && dataLocations[0]
+ ? dataLocations[0].dataStoreName
+ : null;
+ return data.batchDelete(dataToDelete, request.method, newDataStoreName, log, err => {
if (err) {
return next(err);
}
- return next(null, mpuBucket, keysToDelete,
- aggregateETag, extraPartLocations,
- destinationBucket, generatedVersionId,
- droppedMPUSize);
+ return next(
+ null,
+ mpuBucket,
+ keysToDelete,
+ aggregateETag,
+ extraPartLocations,
+ destinationBucket,
+ generatedVersionId,
+ droppedMPUSize,
+ );
});
- }
- return next(null, mpuBucket, keysToDelete, aggregateETag,
- extraPartLocations, destinationBucket,
- generatedVersionId, droppedMPUSize);
- });
- },
- function deletePartsMetadata(mpuBucket, keysToDelete, aggregateETag,
- extraPartLocations, destinationBucket, generatedVersionId, droppedMPUSize, next) {
- services.batchDeleteObjectMetadata(mpuBucket.getName(),
- keysToDelete, log, err => {
+ }
+ return next(
+ null,
+ mpuBucket,
+ keysToDelete,
+ aggregateETag,
+ extraPartLocations,
+ destinationBucket,
+ generatedVersionId,
+ droppedMPUSize,
+ );
+ },
+ );
+ },
+ function deletePartsMetadata(
+ mpuBucket,
+ keysToDelete,
+ aggregateETag,
+ extraPartLocations,
+ destinationBucket,
+ generatedVersionId,
+ droppedMPUSize,
+ next,
+ ) {
+ services.batchDeleteObjectMetadata(mpuBucket.getName(), keysToDelete, log, err => {
if (err) {
if (err.is?.DeleteConflict) {
// DeleteConflict should trigger automatic retry
// Convert to InternalError to make it retryable
const customErr = errorInstances.InternalError.customizeDescription(
- 'conflict deleting MPU parts metadata'
+ 'conflict deleting MPU parts metadata',
);
- return next(customErr, extraPartLocations,
- destinationBucket, aggregateETag, generatedVersionId, droppedMPUSize);
+ return next(
+ customErr,
+ extraPartLocations,
+ destinationBucket,
+ aggregateETag,
+ generatedVersionId,
+ droppedMPUSize,
+ );
}
// For NoSuchKey and other errors, return them as-is
// NoSuchKey is non-retryable, InternalError and others are retryable
- return next(err, extraPartLocations,
- destinationBucket, aggregateETag, generatedVersionId, droppedMPUSize);
+ return next(
+ err,
+ extraPartLocations,
+ destinationBucket,
+ aggregateETag,
+ generatedVersionId,
+ droppedMPUSize,
+ );
}
- return next(null, extraPartLocations,
- destinationBucket, aggregateETag, generatedVersionId, droppedMPUSize);
- });
- },
- function batchDeleteExtraParts(extraPartLocations, destinationBucket,
- aggregateETag, generatedVersionId, droppedMPUSize, next) {
- if (extraPartLocations && extraPartLocations.length > 0) {
- return data.batchDelete(extraPartLocations, request.method, null, log, err => {
- if (err) {
- // Extra part deletion failure should not fail the operation
- // The S3 object was created successfully and MPU metadata was cleaned up
- // Orphaned extra parts are acceptable since the main operation succeeded
- log.warn('failed to delete extra parts, keeping orphan but returning success', {
- method: 'completeMultipartUpload',
- extraPartLocationsCount: extraPartLocations.length,
- error: err,
- });
- }
- return next(null, destinationBucket, aggregateETag,
- generatedVersionId, droppedMPUSize);
+ return next(
+ null,
+ extraPartLocations,
+ destinationBucket,
+ aggregateETag,
+ generatedVersionId,
+ droppedMPUSize,
+ );
});
+ },
+ function batchDeleteExtraParts(
+ extraPartLocations,
+ destinationBucket,
+ aggregateETag,
+ generatedVersionId,
+ droppedMPUSize,
+ next,
+ ) {
+ if (extraPartLocations && extraPartLocations.length > 0) {
+ return data.batchDelete(extraPartLocations, request.method, null, log, err => {
+ if (err) {
+ // Extra part deletion failure should not fail the operation
+ // The S3 object was created successfully and MPU metadata was cleaned up
+ // Orphaned extra parts are acceptable since the main operation succeeded
+ log.warn('failed to delete extra parts, keeping orphan but returning success', {
+ method: 'completeMultipartUpload',
+ extraPartLocationsCount: extraPartLocations.length,
+ error: err,
+ });
+ }
+ return next(null, destinationBucket, aggregateETag, generatedVersionId, droppedMPUSize);
+ });
+ }
+ return next(null, destinationBucket, aggregateETag, generatedVersionId, droppedMPUSize);
+ },
+ function updateQuotas(destinationBucket, aggregateETag, generatedVersionId, droppedMPUSize, next) {
+ return validateQuotas(
+ request,
+ destinationBucket,
+ request.accountQuotas,
+ ['objectDelete'],
+ 'objectDelete',
+ -droppedMPUSize,
+ false,
+ log,
+ err => {
+ if (err) {
+ // Ignore error, as the data has been deleted already: only inflight count
+ // has not been updated, and will be eventually consistent anyway
+ log.warn('failed to update inflights', {
+ method: 'completeMultipartUpload',
+ error: err,
+ });
+ }
+ return next(null, destinationBucket, aggregateETag, generatedVersionId);
+ },
+ );
+ },
+ ],
+ (err, destinationBucket, aggregateETag, generatedVersionId) => {
+ const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destinationBucket);
+ if (err) {
+ return callback(err, null, corsHeaders);
}
- return next(null, destinationBucket, aggregateETag,
- generatedVersionId, droppedMPUSize);
- },
- function updateQuotas(destinationBucket, aggregateETag, generatedVersionId, droppedMPUSize, next) {
- return validateQuotas(request, destinationBucket, request.accountQuotas,
- ['objectDelete'], 'objectDelete', -droppedMPUSize, false, log, err => {
- if (err) {
- // Ignore error, as the data has been deleted already: only inflight count
- // has not been updated, and will be eventually consistent anyway
- log.warn('failed to update inflights', {
- method: 'completeMultipartUpload',
- error: err,
- });
- }
- return next(null, destinationBucket, aggregateETag,
- generatedVersionId);
+ if (generatedVersionId) {
+ corsHeaders['x-amz-version-id'] = versionIdUtils.encode(generatedVersionId);
+ }
+ Object.assign(responseHeaders, corsHeaders);
+
+ const vcfg = destinationBucket.getVersioningConfiguration();
+ const isVersionedObj = vcfg && vcfg.Status === 'Enabled';
+
+ xmlParams.eTag = `"${aggregateETag}"`;
+ const xml = convertToXml('completeMultipartUpload', xmlParams);
+ pushMetric('completeMultipartUpload', log, {
+ oldByteLength: isVersionedObj ? null : oldByteLength,
+ authInfo,
+ canonicalID: destinationBucket.getOwner(),
+ bucket: bucketName,
+ keys: [objectKey],
+ versionId: generatedVersionId,
+ numberOfObjects: !generatedVersionId && oldByteLength !== null ? 0 : 1,
+ location: destinationBucket.getLocationConstraint(),
});
+ return callback(null, xml, responseHeaders);
},
- ], (err, destinationBucket, aggregateETag, generatedVersionId) => {
- const corsHeaders =
- collectCorsHeaders(request.headers.origin, request.method,
- destinationBucket);
- if (err) {
- return callback(err, null, corsHeaders);
- }
- if (generatedVersionId) {
- corsHeaders['x-amz-version-id'] =
- versionIdUtils.encode(generatedVersionId);
- }
- Object.assign(responseHeaders, corsHeaders);
-
- const vcfg = destinationBucket.getVersioningConfiguration();
- const isVersionedObj = vcfg && vcfg.Status === 'Enabled';
-
- xmlParams.eTag = `"${aggregateETag}"`;
- const xml = convertToXml('completeMultipartUpload', xmlParams);
- pushMetric('completeMultipartUpload', log, {
- oldByteLength: isVersionedObj ? null : oldByteLength,
- authInfo,
- canonicalID: destinationBucket.getOwner(),
- bucket: bucketName,
- keys: [objectKey],
- versionId: generatedVersionId,
- numberOfObjects: !generatedVersionId && oldByteLength !== null ? 0 : 1,
- location: destinationBucket.getLocationConstraint(),
- });
- return callback(null, xml, responseHeaders);
- });
+ );
}
module.exports = completeMultipartUpload;
diff --git a/lib/api/listParts.js b/lib/api/listParts.js
index ffff63ed0b..f5c7b5faaf 100644
--- a/lib/api/listParts.js
+++ b/lib/api/listParts.js
@@ -5,8 +5,7 @@ const { errors, s3middleware } = require('arsenal');
const constants = require('../../constants');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
-const locationConstraintCheck =
- require('./apiUtils/object/locationConstraintCheck');
+const locationConstraintCheck = require('./apiUtils/object/locationConstraintCheck');
const services = require('../services');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const escapeForXml = s3middleware.escapeForXml;
@@ -58,8 +57,7 @@ function buildXML(xmlParams, xml, encodingFn) {
xmlParams.forEach(param => {
if (param.value !== undefined) {
xml.push(`<${param.tag}>${encodingFn(param.value)}${param.tag}>`);
- } else if (param.tag !== 'NextPartNumberMarker' &&
- param.tag !== 'PartNumberMarker') {
+ } else if (param.tag !== 'NextPartNumberMarker' && param.tag !== 'PartNumberMarker') {
xml.push(`<${param.tag}/>`);
}
});
@@ -91,8 +89,7 @@ function getPartChecksumXML(checksumAlgorithm, checksumValue) {
return undefined;
}
const algorithm = checksumAlgorithm.toLowerCase();
- const xmlTag = algorithms[algorithm] &&
- algorithms[algorithm].getObjectAttributesXMLTag;
+ const xmlTag = algorithms[algorithm] && algorithms[algorithm].xmlTag;
if (!xmlTag) {
return undefined;
}
@@ -114,19 +111,19 @@ function listParts(authInfo, request, log, callback) {
const objectKey = request.objectKey;
const uploadId = request.query.uploadId;
const encoding = request.query['encoding-type'];
- let maxParts = Number.parseInt(request.query['max-parts'], 10) ?
- Number.parseInt(request.query['max-parts'], 10) : 1000;
+ let maxParts = Number.parseInt(request.query['max-parts'], 10)
+ ? Number.parseInt(request.query['max-parts'], 10)
+ : 1000;
if (maxParts < 0) {
- monitoring.promMetrics('GET', bucketName, 400,
- 'listMultipartUploadParts');
+ monitoring.promMetrics('GET', bucketName, 400, 'listMultipartUploadParts');
return callback(errors.InvalidArgument);
}
if (maxParts > constants.listingHardLimit) {
maxParts = constants.listingHardLimit;
}
- const partNumberMarker =
- Number.parseInt(request.query['part-number-marker'], 10) ?
- Number.parseInt(request.query['part-number-marker'], 10) : 0;
+ const partNumberMarker = Number.parseInt(request.query['part-number-marker'], 10)
+ ? Number.parseInt(request.query['part-number-marker'], 10)
+ : 0;
const metadataValMPUparams = {
authInfo,
bucketName,
@@ -147,199 +144,217 @@ function listParts(authInfo, request, log, callback) {
let splitter = constants.splitter;
const responseHeaders = {};
- async.waterfall([
- function checkDestBucketVal(next) {
- standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
- (err, destinationBucket) => {
- if (err) {
- return next(err, destinationBucket, null);
- }
- if (destinationBucket.policies) {
- // TODO: Check bucket policies to see if user is granted
- // permission or forbidden permission to take
- // given action.
- // If permitted, add 'bucketPolicyGoAhead'
- // attribute to params for validating at MPU level.
- // This is GH Issue#76
- metadataValMPUparams.requestType =
- 'bucketPolicyGoAhead';
- }
- return next(null, destinationBucket);
- });
- },
- function waterfall2(destBucket, next) {
- metadataValMPUparams.log = log;
- services.metadataValidateMultipart(metadataValMPUparams,
- (err, mpuBucket, mpuOverviewObj) => {
+ async.waterfall(
+ [
+ function checkDestBucketVal(next) {
+ standardMetadataValidateBucketAndObj(
+ metadataValParams,
+ request.actionImplicitDenies,
+ log,
+ (err, destinationBucket) => {
+ if (err) {
+ return next(err, destinationBucket, null);
+ }
+ if (destinationBucket.policies) {
+ // TODO: Check bucket policies to see if user is granted
+ // permission or forbidden permission to take
+ // given action.
+ // If permitted, add 'bucketPolicyGoAhead'
+ // attribute to params for validating at MPU level.
+ // This is GH Issue#76
+ metadataValMPUparams.requestType = 'bucketPolicyGoAhead';
+ }
+ return next(null, destinationBucket);
+ },
+ );
+ },
+ function waterfall2(destBucket, next) {
+ metadataValMPUparams.log = log;
+ services.metadataValidateMultipart(metadataValMPUparams, (err, mpuBucket, mpuOverviewObj) => {
if (err) {
return next(err, destBucket, null);
}
return next(null, destBucket, mpuBucket, mpuOverviewObj);
});
- },
- function waterfall3(destBucket, mpuBucket, mpuOverviewObj, next) {
- const mpuInfo = {
- objectKey,
- uploadId,
- bucketName,
- partNumberMarker,
- maxParts,
- mpuOverviewObj,
- destBucket,
- };
- const originalIdentityImpDenies = request.actionImplicitDenies;
- // eslint-disable-next-line no-param-reassign
- delete request.actionImplicitDenies;
- return data.listParts(mpuInfo, request, locationConstraintCheck,
- log, (err, backendPartList) => {
+ },
+ function waterfall3(destBucket, mpuBucket, mpuOverviewObj, next) {
+ const mpuInfo = {
+ objectKey,
+ uploadId,
+ bucketName,
+ partNumberMarker,
+ maxParts,
+ mpuOverviewObj,
+ destBucket,
+ };
+ const originalIdentityImpDenies = request.actionImplicitDenies;
// eslint-disable-next-line no-param-reassign
- request.actionImplicitDenies = originalIdentityImpDenies;
- if (err) {
- return next(err, destBucket);
+ delete request.actionImplicitDenies;
+ return data.listParts(mpuInfo, request, locationConstraintCheck, log, (err, backendPartList) => {
+ // eslint-disable-next-line no-param-reassign
+ request.actionImplicitDenies = originalIdentityImpDenies;
+ if (err) {
+ return next(err, destBucket);
+ }
+ // if external backend doesn't handle mpu, backendPartList
+ // will be null
+ return next(null, destBucket, mpuBucket, mpuOverviewObj, backendPartList);
+ });
+ },
+ function waterfall4(destBucket, mpuBucket, mpuOverviewObj, backendPartList, next) {
+ // if parts were returned from cloud backend, they were not
+ // stored in Scality S3 metadata, so this step can be skipped
+ if (backendPartList) {
+ return next(null, destBucket, mpuBucket, backendPartList, mpuOverviewObj);
}
- // if external backend doesn't handle mpu, backendPartList
- // will be null
- return next(null, destBucket, mpuBucket, mpuOverviewObj,
- backendPartList);
- });
- },
- function waterfall4(destBucket, mpuBucket, mpuOverviewObj,
- backendPartList, next) {
- // if parts were returned from cloud backend, they were not
- // stored in Scality S3 metadata, so this step can be skipped
- if (backendPartList) {
- return next(null, destBucket, mpuBucket, backendPartList,
- mpuOverviewObj);
- }
- // BACKWARD: Remove to remove the old splitter
- if (mpuBucket.getMdBucketModelVersion() < 2) {
- splitter = constants.oldSplitter;
- }
- const getPartsParams = {
- uploadId,
- mpuBucketName: mpuBucket.getName(),
- maxParts,
- partNumberMarker,
- log,
- splitter,
- };
- return services.getSomeMPUparts(getPartsParams,
- (err, storedParts) => {
- if (err) {
- return next(err, destBucket, null);
+ // BACKWARD: Remove to remove the old splitter
+ if (mpuBucket.getMdBucketModelVersion() < 2) {
+ splitter = constants.oldSplitter;
}
- return next(null, destBucket, mpuBucket, storedParts,
- mpuOverviewObj);
- });
- }, function waterfall5(destBucket, mpuBucket, storedParts,
- mpuOverviewObj, next) {
- const encodingFn = encoding === 'url'
- ? querystring.escape : escapeForXml;
- const isTruncated = storedParts.IsTruncated;
- const splitterLen = splitter.length;
- const partListing = storedParts.Contents.map(item => {
- const value = item.value;
- const partChecksum = getPartChecksum(item);
- return {
- partNumber: getPartNumber(item, splitter, splitterLen),
- lastModified: value.LastModified,
- ETag: value.ETag,
- size: value.Size,
- checksumAlgorithm: partChecksum.checksumAlgorithm,
- checksumValue: partChecksum.checksumValue,
+ const getPartsParams = {
+ uploadId,
+ mpuBucketName: mpuBucket.getName(),
+ maxParts,
+ partNumberMarker,
+ log,
+ splitter,
};
- });
- const lastPartShown = partListing.length > 0 ?
- partListing[partListing.length - 1].partNumber : undefined;
+ return services.getSomeMPUparts(getPartsParams, (err, storedParts) => {
+ if (err) {
+ return next(err, destBucket, null);
+ }
+ return next(null, destBucket, mpuBucket, storedParts, mpuOverviewObj);
+ });
+ },
+ function waterfall5(destBucket, mpuBucket, storedParts, mpuOverviewObj, next) {
+ const encodingFn = encoding === 'url' ? querystring.escape : escapeForXml;
+ const isTruncated = storedParts.IsTruncated;
+ const splitterLen = splitter.length;
+ const partListing = storedParts.Contents.map(item => {
+ const value = item.value;
+ const partChecksum = getPartChecksum(item);
+ return {
+ partNumber: getPartNumber(item, splitter, splitterLen),
+ lastModified: value.LastModified,
+ ETag: value.ETag,
+ size: value.Size,
+ checksumAlgorithm: partChecksum.checksumAlgorithm,
+ checksumValue: partChecksum.checksumValue,
+ };
+ });
+ const lastPartShown =
+ partListing.length > 0 ? partListing[partListing.length - 1].partNumber : undefined;
- setExpirationHeaders(responseHeaders, {
- lifecycleConfig: destBucket.getLifecycleConfiguration(),
- mpuParams: {
- key: mpuOverviewObj.key,
- date: mpuOverviewObj.initiated,
- },
- });
+ setExpirationHeaders(responseHeaders, {
+ lifecycleConfig: destBucket.getLifecycleConfiguration(),
+ mpuParams: {
+ key: mpuOverviewObj.key,
+ date: mpuOverviewObj.initiated,
+ },
+ });
+
+ const xml = [];
+ xml.push(
+ '',
+ '',
+ );
+ buildXML(
+ [
+ { tag: 'Bucket', value: bucketName },
+ { tag: 'Key', value: objectKey },
+ { tag: 'UploadId', value: uploadId },
+ ],
+ xml,
+ encodingFn,
+ );
+ const showChecksum =
+ !mpuOverviewObj.checksumIsDefault &&
+ mpuOverviewObj.checksumAlgorithm &&
+ mpuOverviewObj.checksumType;
+ if (showChecksum) {
+ buildXML(
+ [
+ { tag: 'ChecksumAlgorithm', value: mpuOverviewObj.checksumAlgorithm.toUpperCase() },
+ { tag: 'ChecksumType', value: mpuOverviewObj.checksumType },
+ ],
+ xml,
+ encodingFn,
+ );
+ }
+ xml.push('');
+ buildXML(
+ [
+ { tag: 'ID', value: mpuOverviewObj.initiatorID },
+ { tag: 'DisplayName', value: mpuOverviewObj.initiatorDisplayName },
+ ],
+ xml,
+ encodingFn,
+ );
+ xml.push('');
+ xml.push('');
+ buildXML(
+ [
+ { tag: 'ID', value: mpuOverviewObj.ownerID },
+ { tag: 'DisplayName', value: mpuOverviewObj.ownerDisplayName },
+ ],
+ xml,
+ encodingFn,
+ );
+ xml.push('');
+ buildXML(
+ [
+ { tag: 'StorageClass', value: mpuOverviewObj.storageClass },
+ { tag: 'PartNumberMarker', value: partNumberMarker || undefined },
+ // print only if it's truncated
+ { tag: 'NextPartNumberMarker', value: isTruncated ? parseInt(lastPartShown, 10) : undefined },
+ { tag: 'MaxParts', value: maxParts },
+ { tag: 'IsTruncated', value: isTruncated ? 'true' : 'false' },
+ ],
+ xml,
+ encodingFn,
+ );
- const xml = [];
- xml.push(
- '',
- ''
- );
- buildXML([
- { tag: 'Bucket', value: bucketName },
- { tag: 'Key', value: objectKey },
- { tag: 'UploadId', value: uploadId },
- ], xml, encodingFn);
- const showChecksum = !mpuOverviewObj.checksumIsDefault &&
- mpuOverviewObj.checksumAlgorithm &&
- mpuOverviewObj.checksumType;
- if (showChecksum) {
- buildXML([
- { tag: 'ChecksumAlgorithm',
- value: mpuOverviewObj.checksumAlgorithm.toUpperCase() },
- { tag: 'ChecksumType', value: mpuOverviewObj.checksumType },
- ], xml, encodingFn);
+ partListing.forEach(part => {
+ const partChecksumXML = showChecksum
+ ? getPartChecksumXML(part.checksumAlgorithm, part.checksumValue)
+ : undefined;
+ xml.push('');
+ buildXML(
+ [
+ { tag: 'PartNumber', value: part.partNumber },
+ { tag: 'LastModified', value: part.lastModified },
+ { tag: 'ETag', value: `"${part.ETag}"` },
+ { tag: 'Size', value: part.size },
+ ],
+ xml,
+ encodingFn,
+ );
+ if (partChecksumXML) {
+ buildXML([partChecksumXML], xml, encodingFn);
+ }
+ xml.push('');
+ });
+ xml.push('');
+ pushMetric('listMultipartUploadParts', log, {
+ authInfo,
+ bucket: bucketName,
+ });
+ monitoring.promMetrics('GET', bucketName, '200', 'listMultipartUploadParts');
+ next(null, destBucket, xml.join(''));
+ },
+ ],
+ (err, destinationBucket, xml) => {
+ const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destinationBucket);
+ if (err) {
+ // The 200 metric is emitted on success in the final waterfall
+ // step; only count failures here to avoid double-counting.
+ monitoring.promMetrics('GET', bucketName, 400, 'listMultipartUploadParts');
}
- xml.push('');
- buildXML([
- { tag: 'ID', value: mpuOverviewObj.initiatorID },
- { tag: 'DisplayName',
- value: mpuOverviewObj.initiatorDisplayName },
- ], xml, encodingFn);
- xml.push('');
- xml.push('');
- buildXML([
- { tag: 'ID', value: mpuOverviewObj.ownerID },
- { tag: 'DisplayName', value: mpuOverviewObj.ownerDisplayName },
- ], xml, encodingFn);
- xml.push('');
- buildXML([
- { tag: 'StorageClass', value: mpuOverviewObj.storageClass },
- { tag: 'PartNumberMarker', value: partNumberMarker ||
- undefined },
- // print only if it's truncated
- { tag: 'NextPartNumberMarker', value: isTruncated ?
- parseInt(lastPartShown, 10) : undefined },
- { tag: 'MaxParts', value: maxParts },
- { tag: 'IsTruncated', value: isTruncated ? 'true' : 'false' },
- ], xml, encodingFn);
+ Object.assign(responseHeaders, corsHeaders);
- partListing.forEach(part => {
- const partChecksumXML = showChecksum ?
- getPartChecksumXML(
- part.checksumAlgorithm, part.checksumValue) :
- undefined;
- xml.push('');
- buildXML([
- { tag: 'PartNumber', value: part.partNumber },
- { tag: 'LastModified', value: part.lastModified },
- { tag: 'ETag', value: `"${part.ETag}"` },
- { tag: 'Size', value: part.size },
- ], xml, encodingFn);
- if (partChecksumXML) {
- buildXML([partChecksumXML], xml, encodingFn);
- }
- xml.push('');
- });
- xml.push('');
- pushMetric('listMultipartUploadParts', log, {
- authInfo,
- bucket: bucketName,
- });
- monitoring.promMetrics(
- 'GET', bucketName, '200', 'listMultipartUploadParts');
- next(null, destBucket, xml.join(''));
+ return callback(err, xml, responseHeaders);
},
- ], (err, destinationBucket, xml) => {
- const corsHeaders = collectCorsHeaders(request.headers.origin,
- request.method, destinationBucket);
- monitoring.promMetrics('GET', bucketName, 400,
- 'listMultipartUploadParts');
- Object.assign(responseHeaders, corsHeaders);
-
- return callback(err, xml, responseHeaders);
- });
+ );
return undefined;
}
diff --git a/package.json b/package.json
index d512509634..e8626e7e78 100644
--- a/package.json
+++ b/package.json
@@ -33,7 +33,7 @@
"@azure/storage-blob": "^12.28.0",
"@hapi/joi": "^17.1.1",
"@smithy/node-http-handler": "^3.0.0",
- "arsenal": "git+https://github.com/scality/Arsenal#8.4.1",
+ "arsenal": "git+https://github.com/scality/Arsenal#8.4.2",
"async": "2.6.4",
"bucketclient": "scality/bucketclient#8.2.7",
"bufferutil": "^4.0.8",
diff --git a/tests/functional/aws-node-sdk/test/object/mpuVersion.js b/tests/functional/aws-node-sdk/test/object/mpuVersion.js
index 0cdeee830f..3bb476ecb8 100644
--- a/tests/functional/aws-node-sdk/test/object/mpuVersion.js
+++ b/tests/functional/aws-node-sdk/test/object/mpuVersion.js
@@ -25,9 +25,7 @@ const checkError = require('../../lib/utility/checkError');
const { getMetadata, fakeMetadataArchive, isNullKeyMetadataV1 } = require('../utils/init');
const { hasColdStorage } = require('../../lib/utility/test-utils');
-const {
- LOCATION_NAME_DMF,
-} = require('../../../../constants');
+const { LOCATION_NAME_DMF } = require('../../../../constants');
const log = new DummyRequestLogger();
@@ -59,11 +57,11 @@ async function putMPUVersion(s3, bucketName, objectName, vId) {
args.request.headers['x-scal-s3-version-id'] = vId;
return next(args);
},
- { step: 'build' }
+ { step: 'build' },
);
}
const resCreation = await s3.send(command);
-
+
const uploadId = resCreation.UploadId;
const uploadParams = {
Body: 'okok',
@@ -80,11 +78,11 @@ async function putMPUVersion(s3, bucketName, objectName, vId) {
args.request.headers['x-scal-s3-version-id'] = vId;
return next(args);
},
- { step: 'build' }
+ { step: 'build' },
);
}
const uploadRes = await s3.send(uploadCommand);
-
+
const completeParams = {
Bucket: bucketName,
Key: objectName,
@@ -92,9 +90,9 @@ async function putMPUVersion(s3, bucketName, objectName, vId) {
Parts: [
{
ETag: uploadRes.ETag,
- PartNumber: 1
+ PartNumber: 1,
},
- ]
+ ],
},
UploadId: uploadId,
};
@@ -106,7 +104,7 @@ async function putMPUVersion(s3, bucketName, objectName, vId) {
args.request.headers['x-scal-s3-version-id'] = vId;
return next(args);
},
- { step: 'build' }
+ { step: 'build' },
);
}
return await s3.send(completeCommand);
@@ -123,8 +121,11 @@ function checkVersionsAndUpdate(versionsBefore, versionsAfter, indexes) {
/* eslint-disable no-param-reassign */
versionsBefore[i].value.Size = versionsAfter[i].value.Size;
// Also update uploadId if it exists and is different since now aws sdk returns it as well
- if (versionsAfter[i].value.uploadId && versionsBefore[i].value.uploadId &&
- versionsAfter[i].value.uploadId !== versionsBefore[i].value.uploadId) {
+ if (
+ versionsAfter[i].value.uploadId &&
+ versionsBefore[i].value.uploadId &&
+ versionsAfter[i].value.uploadId !== versionsBefore[i].value.uploadId
+ ) {
versionsBefore[i].value.uploadId = versionsAfter[i].value.uploadId;
}
/* eslint-enable no-param-reassign */
@@ -179,13 +180,15 @@ describe('MPU with x-scal-s3-version-id header', () => {
bucketUtil = new BucketUtility('default', sigCfg);
s3 = bucketUtil.s3;
await new Promise((resolve, reject) => {
- metadata.setup(err => err ? reject(err) : resolve());
+ metadata.setup(err => (err ? reject(err) : resolve()));
});
await s3.send(new CreateBucketCommand({ Bucket: bucketName }));
- await s3.send(new CreateBucketCommand({
- Bucket: bucketNameMD,
- ObjectLockEnabledForBucket: true
- }));
+ await s3.send(
+ new CreateBucketCommand({
+ Bucket: bucketNameMD,
+ ObjectLockEnabledForBucket: true,
+ }),
+ );
});
afterEach(async () => {
@@ -199,14 +202,14 @@ describe('MPU with x-scal-s3-version-id header', () => {
Bucket: bucketName,
VersioningConfiguration: {
Status: 'Enabled',
- }
+ },
};
const params = { Bucket: bucketName, Key: objectName };
try {
await s3.send(new PutBucketVersioningCommand(vParams));
await s3.send(new PutObjectCommand(params));
-
+
try {
await putMPUVersion(s3, bucketName, objectName, 'aJLWKz4Ko9IjBBgXKj5KQT.G9UHv0g7P');
throw new Error('Expected InvalidArgument error');
@@ -235,17 +238,21 @@ describe('MPU with x-scal-s3-version-id header', () => {
Bucket: bucketName,
VersioningConfiguration: {
Status: 'Enabled',
- }
+ },
};
const params = { Bucket: bucketName, Key: objectName };
try {
await s3.send(new PutBucketVersioningCommand(vParams));
await s3.send(new PutObjectCommand(params));
-
+
try {
- await putMPUVersion(s3, bucketName, objectName,
- '393833343735313131383832343239393939393952473030312020313031');
+ await putMPUVersion(
+ s3,
+ bucketName,
+ objectName,
+ '393833343735313131383832343239393939393952473030312020313031',
+ );
throw new Error('Expected NoSuchVersion error');
} catch (err) {
checkError(err, 'NoSuchVersion', 404);
@@ -263,7 +270,7 @@ describe('MPU with x-scal-s3-version-id header', () => {
try {
await s3.send(new PutObjectCommand(params));
-
+
try {
await putMPUVersion(s3, bucketName, objectName, '');
throw new Error('Expected InvalidObjectState error');
@@ -284,7 +291,7 @@ describe('MPU with x-scal-s3-version-id header', () => {
Bucket: bucketName,
VersioningConfiguration: {
Status: 'Enabled',
- }
+ },
};
let vId;
@@ -294,12 +301,14 @@ describe('MPU with x-scal-s3-version-id header', () => {
const deleteRes = await s3.send(new DeleteObjectCommand(params));
vId = deleteRes.VersionId;
-
- putMPUVersion(s3, bucketName, objectName, vId).then(() => {
- throw new Error('Expected MethodNotAllowed error');
- }).catch(err => {
- checkError(err, 'MethodNotAllowed', 405);
- });
+
+ putMPUVersion(s3, bucketName, objectName, vId)
+ .then(() => {
+ throw new Error('Expected MethodNotAllowed error');
+ })
+ .catch(err => {
+ checkError(err, 'MethodNotAllowed', 405);
+ });
} catch (err) {
if (err.message === 'Expected MethodNotAllowed error') {
throw err;
@@ -317,28 +326,34 @@ describe('MPU with x-scal-s3-version-id header', () => {
try {
await putMPU(s3, bucketName, objectName);
-
+
await fakeMetadataArchivePromise(bucketName, objectName, undefined, archive);
-
+
objMDBefore = await getMetadataPromise(bucketName, objectName, undefined);
-
+
const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log);
versionsBefore = versionRes1.Versions;
await putMPUVersion(s3, bucketName, objectName, '');
-
+
objMDAfter = await getMetadataPromise(bucketName, objectName, undefined);
-
+
const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log);
const versionsAfter = versionRes2.Versions;
-
+
clearUploadIdAndRestoreStatusFromVersions(versionsBefore);
clearUploadIdAndRestoreStatusFromVersions(versionsAfter);
-
+
assert.deepStrictEqual(versionsAfter, versionsBefore);
- checkObjMdAndUpdate(objMDBefore, objMDAfter,
- ['location', 'uploadId', 'microVersionId', 'x-amz-restore',
- 'archive', 'dataStoreName', 'originOp']);
+ checkObjMdAndUpdate(objMDBefore, objMDAfter, [
+ 'location',
+ 'uploadId',
+ 'microVersionId',
+ 'x-amz-restore',
+ 'archive',
+ 'dataStoreName',
+ 'originOp',
+ ]);
assert.deepStrictEqual(objMDAfter, objMDBefore);
} catch (err) {
@@ -347,33 +362,40 @@ describe('MPU with x-scal-s3-version-id header', () => {
});
it('should overwrite an object', async () => {
- const params = { Bucket: bucketName, Key: objectName };
+ const params = { Bucket: bucketName, Key: objectName };
- await s3.send(new PutObjectCommand(params));
+ await s3.send(new PutObjectCommand(params));
- await fakeMetadataArchivePromise(bucketName, objectName, undefined, archive);
-
- const objMDBefore = await getMetadataPromise(bucketName, objectName, undefined);
-
- const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log);
- const versionsBefore = clearUploadIdAndRestoreStatusFromVersions(versionRes1.Versions);
+ await fakeMetadataArchivePromise(bucketName, objectName, undefined, archive);
- await putMPUVersion(s3, bucketName, objectName, '');
+ const objMDBefore = await getMetadataPromise(bucketName, objectName, undefined);
- const objMDAfter = await getMetadataPromise(bucketName, objectName, undefined);
+ const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log);
+ const versionsBefore = clearUploadIdAndRestoreStatusFromVersions(versionRes1.Versions);
- const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log);
- const versionsAfter = clearUploadIdAndRestoreStatusFromVersions(versionRes2.Versions);
+ await putMPUVersion(s3, bucketName, objectName, '');
- checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]);
+ const objMDAfter = await getMetadataPromise(bucketName, objectName, undefined);
- assert.deepStrictEqual(versionsAfter, versionsBefore);
+ const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log);
+ const versionsAfter = clearUploadIdAndRestoreStatusFromVersions(versionRes2.Versions);
- checkObjMdAndUpdate(objMDBefore, objMDAfter,
- ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId',
- 'x-amz-restore', 'archive', 'dataStoreName']);
-
- assert.deepStrictEqual(objMDAfter, objMDBefore);
+ checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]);
+
+ assert.deepStrictEqual(versionsAfter, versionsBefore);
+
+ checkObjMdAndUpdate(objMDBefore, objMDAfter, [
+ 'location',
+ 'content-length',
+ 'originOp',
+ 'uploadId',
+ 'microVersionId',
+ 'x-amz-restore',
+ 'archive',
+ 'dataStoreName',
+ ]);
+
+ assert.deepStrictEqual(objMDAfter, objMDBefore);
});
it('should overwrite a version', async () => {
@@ -381,17 +403,17 @@ describe('MPU with x-scal-s3-version-id header', () => {
Bucket: bucketName,
VersioningConfiguration: {
Status: 'Enabled',
- }
+ },
};
const params = { Bucket: bucketName, Key: objectName };
await s3.send(new PutBucketVersioningCommand(vParams));
-
+
const putRes = await s3.send(new PutObjectCommand(params));
const vId = putRes.VersionId;
await fakeMetadataArchivePromise(bucketName, objectName, vId, archive);
-
+
const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log);
const versionsBefore = clearUploadIdAndRestoreStatusFromVersions(versionRes1.Versions);
@@ -400,16 +422,23 @@ describe('MPU with x-scal-s3-version-id header', () => {
await putMPUVersion(s3, bucketName, objectName, vId);
const objMDAfter = await getMetadataPromise(bucketName, objectName, vId);
-
+
const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log);
const versionsAfter = clearUploadIdAndRestoreStatusFromVersions(versionRes2.Versions);
checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]);
assert.deepStrictEqual(versionsAfter, versionsBefore);
- checkObjMdAndUpdate(objMDBefore, objMDAfter,
- ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId',
- 'x-amz-restore', 'archive', 'dataStoreName']);
+ checkObjMdAndUpdate(objMDBefore, objMDAfter, [
+ 'location',
+ 'content-length',
+ 'originOp',
+ 'uploadId',
+ 'microVersionId',
+ 'x-amz-restore',
+ 'archive',
+ 'dataStoreName',
+ ]);
assert.deepStrictEqual(objMDAfter, objMDBefore);
});
@@ -418,17 +447,17 @@ describe('MPU with x-scal-s3-version-id header', () => {
Bucket: bucketName,
VersioningConfiguration: {
Status: 'Enabled',
- }
+ },
};
const params = { Bucket: bucketName, Key: objectName };
await s3.send(new PutBucketVersioningCommand(vParams));
-
+
const putRes = await s3.send(new PutObjectCommand(params));
const vId = putRes.VersionId;
await fakeMetadataArchivePromise(bucketName, objectName, vId, archive);
-
+
const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log);
const versionsBefore = clearUploadIdAndRestoreStatusFromVersions(versionRes1.Versions);
@@ -437,16 +466,23 @@ describe('MPU with x-scal-s3-version-id header', () => {
await putMPUVersion(s3, bucketName, objectName, '');
const objMDAfter = await getMetadataPromise(bucketName, objectName, vId);
-
+
const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log);
const versionsAfter = clearUploadIdAndRestoreStatusFromVersions(versionRes2.Versions);
checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]);
assert.deepStrictEqual(versionsAfter, versionsBefore);
- checkObjMdAndUpdate(objMDBefore, objMDAfter,
- ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId',
- 'x-amz-restore', 'archive', 'dataStoreName']);
+ checkObjMdAndUpdate(objMDBefore, objMDAfter, [
+ 'location',
+ 'content-length',
+ 'originOp',
+ 'uploadId',
+ 'microVersionId',
+ 'x-amz-restore',
+ 'archive',
+ 'dataStoreName',
+ ]);
assert.deepStrictEqual(objMDAfter, objMDBefore);
});
@@ -455,14 +491,14 @@ describe('MPU with x-scal-s3-version-id header', () => {
Bucket: bucketName,
VersioningConfiguration: {
Status: 'Enabled',
- }
+ },
};
const params = { Bucket: bucketName, Key: objectName };
-
+
await s3.send(new PutObjectCommand(params));
await s3.send(new PutBucketVersioningCommand(vParams));
await s3.send(new PutObjectCommand(params));
-
+
await fakeMetadataArchivePromise(bucketName, objectName, 'null', archive);
const objMDBefore = await getMetadataPromise(bucketName, objectName, 'null');
@@ -479,9 +515,16 @@ describe('MPU with x-scal-s3-version-id header', () => {
checkVersionsAndUpdate(versionsBefore, versionsAfter, [1]);
assert.deepStrictEqual(versionsAfter, versionsBefore);
- checkObjMdAndUpdate(objMDBefore, objMDAfter,
- ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId',
- 'x-amz-restore', 'archive', 'dataStoreName']);
+ checkObjMdAndUpdate(objMDBefore, objMDAfter, [
+ 'location',
+ 'content-length',
+ 'originOp',
+ 'uploadId',
+ 'microVersionId',
+ 'x-amz-restore',
+ 'archive',
+ 'dataStoreName',
+ ]);
assert.deepStrictEqual(objMDAfter, objMDBefore);
});
@@ -490,13 +533,13 @@ describe('MPU with x-scal-s3-version-id header', () => {
Bucket: bucketName,
VersioningConfiguration: {
Status: 'Enabled',
- }
+ },
};
const params = { Bucket: bucketName, Key: objectName };
-
+
await s3.send(new PutObjectCommand(params));
await s3.send(new PutBucketVersioningCommand(vParams));
-
+
const putRes = await s3.send(new PutObjectCommand(params));
const vId = putRes.VersionId;
@@ -517,9 +560,16 @@ describe('MPU with x-scal-s3-version-id header', () => {
checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]);
assert.deepStrictEqual(versionsAfter, versionsBefore);
- checkObjMdAndUpdate(objMDBefore, objMDAfter,
- ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId',
- 'x-amz-restore', 'archive', 'dataStoreName']);
+ checkObjMdAndUpdate(objMDBefore, objMDAfter, [
+ 'location',
+ 'content-length',
+ 'originOp',
+ 'uploadId',
+ 'microVersionId',
+ 'x-amz-restore',
+ 'archive',
+ 'dataStoreName',
+ ]);
assert.deepStrictEqual(objMDAfter, objMDBefore);
});
@@ -528,21 +578,21 @@ describe('MPU with x-scal-s3-version-id header', () => {
Bucket: bucketName,
VersioningConfiguration: {
Status: 'Enabled',
- }
+ },
};
const sParams = {
Bucket: bucketName,
VersioningConfiguration: {
Status: 'Suspended',
- }
+ },
};
const params = { Bucket: bucketName, Key: objectName };
-
+
await s3.send(new PutBucketVersioningCommand(vParams));
await s3.send(new PutObjectCommand(params));
await s3.send(new PutBucketVersioningCommand(sParams));
await s3.send(new PutObjectCommand(params));
-
+
await fakeMetadataArchivePromise(bucketName, objectName, undefined, archive);
const objMDBefore = await getMetadataPromise(bucketName, objectName, undefined);
@@ -560,9 +610,16 @@ describe('MPU with x-scal-s3-version-id header', () => {
checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]);
assert.deepStrictEqual(versionsAfter, versionsBefore);
- checkObjMdAndUpdate(objMDBefore, objMDAfter,
- ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId',
- 'x-amz-restore', 'archive', 'dataStoreName']);
+ checkObjMdAndUpdate(objMDBefore, objMDAfter, [
+ 'location',
+ 'content-length',
+ 'originOp',
+ 'uploadId',
+ 'microVersionId',
+ 'x-amz-restore',
+ 'archive',
+ 'dataStoreName',
+ ]);
assert.deepStrictEqual(objMDAfter, objMDBefore);
});
@@ -571,22 +628,22 @@ describe('MPU with x-scal-s3-version-id header', () => {
Bucket: bucketName,
VersioningConfiguration: {
Status: 'Enabled',
- }
+ },
};
const params = { Bucket: bucketName, Key: objectName };
-
+
await s3.send(new PutBucketVersioningCommand(vParams));
await s3.send(new PutObjectCommand(params));
-
+
const putRes = await s3.send(new PutObjectCommand(params));
const vId = putRes.VersionId;
await s3.send(new PutObjectCommand(params));
-
+
await fakeMetadataArchivePromise(bucketName, objectName, vId, archive);
const objMDBefore = await getMetadataPromise(bucketName, objectName, vId);
-
+
const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log);
const versionsBefore = clearUploadIdAndRestoreStatusFromVersions(versionRes1.Versions);
@@ -600,9 +657,16 @@ describe('MPU with x-scal-s3-version-id header', () => {
checkVersionsAndUpdate(versionsBefore, versionsAfter, [1]);
assert.deepStrictEqual(versionsAfter, versionsBefore);
- checkObjMdAndUpdate(objMDBefore, objMDAfter,
- ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId',
- 'x-amz-restore', 'archive', 'dataStoreName']);
+ checkObjMdAndUpdate(objMDBefore, objMDAfter, [
+ 'location',
+ 'content-length',
+ 'originOp',
+ 'uploadId',
+ 'microVersionId',
+ 'x-amz-restore',
+ 'archive',
+ 'dataStoreName',
+ ]);
assert.deepStrictEqual(objMDAfter, objMDBefore);
});
@@ -611,18 +675,18 @@ describe('MPU with x-scal-s3-version-id header', () => {
Bucket: bucketName,
VersioningConfiguration: {
Status: 'Enabled',
- }
+ },
};
const params = { Bucket: bucketName, Key: objectName };
await s3.send(new PutBucketVersioningCommand(vParams));
await s3.send(new PutObjectCommand(params));
-
+
const putRes = await s3.send(new PutObjectCommand(params));
const vId = putRes.VersionId;
await fakeMetadataArchivePromise(bucketName, objectName, vId, archive);
-
+
const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log);
const versionsBefore = clearUploadIdAndRestoreStatusFromVersions(versionRes1.Versions);
@@ -631,16 +695,23 @@ describe('MPU with x-scal-s3-version-id header', () => {
await putMPUVersion(s3, bucketName, objectName, vId);
const objMDAfter = await getMetadataPromise(bucketName, objectName, vId);
-
+
const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log);
const versionsAfter = clearUploadIdAndRestoreStatusFromVersions(versionRes2.Versions);
checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]);
assert.deepStrictEqual(versionsAfter, versionsBefore);
- checkObjMdAndUpdate(objMDBefore, objMDAfter,
- ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId',
- 'x-amz-restore', 'archive', 'dataStoreName']);
+ checkObjMdAndUpdate(objMDBefore, objMDAfter, [
+ 'location',
+ 'content-length',
+ 'originOp',
+ 'uploadId',
+ 'microVersionId',
+ 'x-amz-restore',
+ 'archive',
+ 'dataStoreName',
+ ]);
assert.deepStrictEqual(objMDAfter, objMDBefore);
});
@@ -649,31 +720,31 @@ describe('MPU with x-scal-s3-version-id header', () => {
Bucket: bucketName,
VersioningConfiguration: {
Status: 'Enabled',
- }
+ },
};
const sParams = {
Bucket: bucketName,
VersioningConfiguration: {
Status: 'Suspended',
- }
+ },
};
const params = { Bucket: bucketName, Key: objectName };
await s3.send(new PutBucketVersioningCommand(vParams));
await s3.send(new PutObjectCommand(params));
-
+
const putRes = await s3.send(new PutObjectCommand(params));
const vId = putRes.VersionId;
await fakeMetadataArchivePromise(bucketName, objectName, vId, archive);
-
+
const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log);
const versionsBefore = clearUploadIdAndRestoreStatusFromVersions(versionRes1.Versions);
const objMDBefore = await getMetadataPromise(bucketName, objectName, vId);
-
+
await s3.send(new PutBucketVersioningCommand(sParams));
-
+
await putMPUVersion(s3, bucketName, objectName, vId);
const objMDAfter = await getMetadataPromise(bucketName, objectName, vId);
@@ -684,9 +755,16 @@ describe('MPU with x-scal-s3-version-id header', () => {
checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]);
assert.deepStrictEqual(versionsAfter, versionsBefore);
- checkObjMdAndUpdate(objMDBefore, objMDAfter,
- ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId',
- 'x-amz-restore', 'archive', 'dataStoreName']);
+ checkObjMdAndUpdate(objMDBefore, objMDAfter, [
+ 'location',
+ 'content-length',
+ 'originOp',
+ 'uploadId',
+ 'microVersionId',
+ 'x-amz-restore',
+ 'archive',
+ 'dataStoreName',
+ ]);
assert.deepStrictEqual(objMDAfter, objMDBefore);
});
@@ -695,21 +773,21 @@ describe('MPU with x-scal-s3-version-id header', () => {
Bucket: bucketName,
VersioningConfiguration: {
Status: 'Enabled',
- }
+ },
};
const params = { Bucket: bucketName, Key: objectName };
await s3.send(new PutObjectCommand(params));
-
+
await fakeMetadataArchivePromise(bucketName, objectName, undefined, archive);
-
+
const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log);
const versionsBefore = clearUploadIdAndRestoreStatusFromVersions(versionRes1.Versions);
-
+
const objMDBefore = await getMetadataPromise(bucketName, objectName, undefined);
-
+
await s3.send(new PutBucketVersioningCommand(vParams));
-
+
await putMPUVersion(s3, bucketName, objectName, 'null');
const objMDAfter = await getMetadataPromise(bucketName, objectName, undefined);
@@ -720,9 +798,16 @@ describe('MPU with x-scal-s3-version-id header', () => {
checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]);
assert.deepStrictEqual(versionsAfter, versionsBefore);
- checkObjMdAndUpdate(objMDBefore, objMDAfter,
- ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId',
- 'x-amz-restore', 'archive', 'dataStoreName']);
+ checkObjMdAndUpdate(objMDBefore, objMDAfter, [
+ 'location',
+ 'content-length',
+ 'originOp',
+ 'uploadId',
+ 'microVersionId',
+ 'x-amz-restore',
+ 'archive',
+ 'dataStoreName',
+ ]);
assert(isDeepStrictEqual(objMDAfter, objMDBefore), 'Objects should be deeply equal');
});
@@ -734,12 +819,12 @@ describe('MPU with x-scal-s3-version-id header', () => {
restoreRequestedAt: new Date(0),
restoreRequestedDays: 5,
restoreCompletedAt: new Date(10),
- restoreWillExpireAt: new Date(10 + (5 * 24 * 60 * 60 * 1000)),
+ restoreWillExpireAt: new Date(10 + 5 * 24 * 60 * 60 * 1000),
};
await s3.send(new PutObjectCommand(params));
-
+
await fakeMetadataArchivePromise(bucketName, objectName, undefined, archiveCompleted);
-
+
try {
await putMPUVersion(s3, bucketName, objectName, '');
throw new Error('Expected InvalidObjectState error');
@@ -748,45 +833,45 @@ describe('MPU with x-scal-s3-version-id header', () => {
}
});
- [
- 'non versioned',
- 'versioned',
- 'suspended'
- ].forEach(versioning => {
+ ['non versioned', 'versioned', 'suspended'].forEach(versioning => {
it(`should update restore metadata while keeping storage class (${versioning})`, async () => {
const params = { Bucket: bucketName, Key: objectName };
if (versioning === 'versioned') {
- await s3.send(new PutBucketVersioningCommand({
- Bucket: bucketName,
- VersioningConfiguration: { Status: 'Enabled' }
- }));
+ await s3.send(
+ new PutBucketVersioningCommand({
+ Bucket: bucketName,
+ VersioningConfiguration: { Status: 'Enabled' },
+ }),
+ );
} else if (versioning === 'suspended') {
- await s3.send(new PutBucketVersioningCommand({
- Bucket: bucketName,
- VersioningConfiguration: { Status: 'Suspended' }
- }));
+ await s3.send(
+ new PutBucketVersioningCommand({
+ Bucket: bucketName,
+ VersioningConfiguration: { Status: 'Suspended' },
+ }),
+ );
}
-
+
await s3.send(new PutObjectCommand(params));
-
+
await fakeMetadataArchivePromise(bucketName, objectName, undefined, archive);
const objMDBefore = await getMetadataPromise(bucketName, objectName, undefined);
await metadataListObjectPromise(bucketName, mdListingParams, log);
-
+
await putMPUVersion(s3, bucketName, objectName, '');
const objMDAfter = await getMetadataPromise(bucketName, objectName, undefined);
-
+
const listRes = await s3.send(new ListObjectsCommand({ Bucket: bucketName }));
assert.strictEqual(listRes.Contents.length, 1);
assert.strictEqual(listRes.Contents[0].StorageClass, LOCATION_NAME_DMF);
-
+
const headRes = await s3.send(new HeadObjectCommand(params));
assert.strictEqual(headRes.StorageClass, LOCATION_NAME_DMF);
-
+
const getRes = await s3.send(new GetObjectCommand(params));
assert.strictEqual(getRes.StorageClass, LOCATION_NAME_DMF);
@@ -794,10 +879,14 @@ describe('MPU with x-scal-s3-version-id header', () => {
assert.deepStrictEqual(objMDAfter.dataStoreName, 'us-east-1');
assert.deepStrictEqual(objMDAfter.archive.archiveInfo, objMDBefore.archive.archiveInfo);
- assert.deepStrictEqual(objMDAfter.archive.restoreRequestedAt,
- objMDBefore.archive.restoreRequestedAt);
- assert.deepStrictEqual(objMDAfter.archive.restoreRequestedDays,
- objMDBefore.archive.restoreRequestedDays);
+ assert.deepStrictEqual(
+ objMDAfter.archive.restoreRequestedAt,
+ objMDBefore.archive.restoreRequestedAt,
+ );
+ assert.deepStrictEqual(
+ objMDAfter.archive.restoreRequestedDays,
+ objMDBefore.archive.restoreRequestedDays,
+ );
assert.deepStrictEqual(objMDAfter['x-amz-restore']['ongoing-request'], false);
assert(objMDAfter.archive.restoreCompletedAt);
@@ -806,18 +895,17 @@ describe('MPU with x-scal-s3-version-id header', () => {
});
});
-
it('should "copy" all but non data-related metadata (data encryption, data size...)', async () => {
const params = {
Bucket: bucketNameMD,
- Key: objectName
+ Key: objectName,
};
const putParams = {
...params,
Metadata: {
'custom-user-md': 'custom-md',
},
- WebsiteRedirectLocation: 'http://custom-redirect'
+ WebsiteRedirectLocation: 'http://custom-redirect',
};
const aclParams = {
...params,
@@ -827,51 +915,51 @@ describe('MPU with x-scal-s3-version-id header', () => {
const tagParams = {
...params,
Tagging: {
- TagSet: [{
- Key: 'tag1',
- Value: 'value1'
- }, {
- Key: 'tag2',
- Value: 'value2'
- }]
- }
+ TagSet: [
+ {
+ Key: 'tag1',
+ Value: 'value1',
+ },
+ {
+ Key: 'tag2',
+ Value: 'value2',
+ },
+ ],
+ },
};
const legalHoldParams = {
...params,
LegalHold: {
- Status: 'ON'
+ Status: 'ON',
},
};
const acl = {
- 'Canned': '',
- 'FULL_CONTROL': [
+ Canned: '',
+ FULL_CONTROL: [
// canonicalID of user Bart
'79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be',
],
- 'WRITE_ACP': [],
- 'READ': [],
- 'READ_ACP': [],
+ WRITE_ACP: [],
+ READ: [],
+ READ_ACP: [],
};
const tags = { tag1: 'value1', tag2: 'value2' };
const replicationInfo = {
- 'status': 'COMPLETED',
- 'backends': [
- {
- 'site': 'azure-normal',
- 'status': 'COMPLETED',
- 'dataStoreVersionId': '',
- },
- ],
- 'content': [
- 'DATA',
- 'METADATA',
+ status: 'COMPLETED',
+ backends: [
+ {
+ site: 'azure-normal',
+ status: 'COMPLETED',
+ dataStoreVersionId: '',
+ },
],
- 'destination': 'arn:aws:s3:::versioned',
- 'storageClass': 'azure-normal',
- 'role': 'arn:aws:iam::root:role/s3-replication-role',
- 'storageType': 'azure',
- 'dataStoreVersionId': '',
- 'isNFS': null,
+ content: ['DATA', 'METADATA'],
+ destination: 'arn:aws:s3:::versioned',
+ storageClass: 'azure-normal',
+ role: 'arn:aws:iam::root:role/s3-replication-role',
+ storageType: 'azure',
+ dataStoreVersionId: '',
+ isNFS: null,
};
await s3.send(new PutObjectCommand(putParams));
await s3.send(new PutObjectAclCommand(aclParams));
@@ -890,7 +978,6 @@ describe('MPU with x-scal-s3-version-id header', () => {
objMD['content-encoding'] = 'testencoding';
objMD['x-amz-server-side-encryption'] = 'aws:kms';
-
await metadataPutObjectMDPromise(bucketNameMD, objectName, objMD, undefined, log);
await putMPUVersion(s3, bucketNameMD, objectName, '');
@@ -911,7 +998,7 @@ describe('MPU with x-scal-s3-version-id header', () => {
// data's etag inside x-amz-restore
assert.strictEqual(finalObjMD['content-md5'], 'testmd5');
assert.strictEqual(typeof finalObjMD['x-amz-restore']['content-md5'], 'string');
-
+
// removing legal hold to be able to clean the bucket after the test
legalHoldParams.LegalHold.Status = 'OFF';
await s3.send(new PutObjectLegalHoldCommand(legalHoldParams));
diff --git a/tests/functional/aws-node-sdk/test/object/objectGetAttributes.js b/tests/functional/aws-node-sdk/test/object/objectGetAttributes.js
index 4af27c76e7..520decd4b6 100644
--- a/tests/functional/aws-node-sdk/test/object/objectGetAttributes.js
+++ b/tests/functional/aws-node-sdk/test/object/objectGetAttributes.js
@@ -31,9 +31,14 @@ describe('objectGetAttributes', () => {
beforeEach(async () => {
await s3.send(new CreateBucketCommand({ Bucket: bucket }));
- await s3.send(new PutObjectCommand({
- Bucket: bucket, Key: key, Body: body, ChecksumAlgorithm: 'CRC64NVME',
- }));
+ await s3.send(
+ new PutObjectCommand({
+ Bucket: bucket,
+ Key: key,
+ Body: body,
+ ChecksumAlgorithm: 'CRC64NVME',
+ }),
+ );
});
afterEach(async () => {
@@ -43,12 +48,14 @@ describe('objectGetAttributes', () => {
it('should fail with a wrong bucket owner header', async () => {
try {
- await s3.send(new GetObjectAttributesCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: ['ETag'],
- ExpectedBucketOwner: 'wrongAccountId',
- }));
+ await s3.send(
+ new GetObjectAttributesCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: ['ETag'],
+ ExpectedBucketOwner: 'wrongAccountId',
+ }),
+ );
assert.fail('Expected AccessDenied error');
} catch (err) {
assert.strictEqual(err.name, 'AccessDenied');
@@ -58,11 +65,13 @@ describe('objectGetAttributes', () => {
it('should fail because attributes header is missing', async () => {
try {
- await s3.send(new GetObjectAttributesCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: [],
- }));
+ await s3.send(
+ new GetObjectAttributesCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: [],
+ }),
+ );
assert.fail('Expected InvalidArgument error');
} catch (err) {
assert.strictEqual(err.name, 'InvalidArgument');
@@ -72,11 +81,13 @@ describe('objectGetAttributes', () => {
it('should fail because attribute name is invalid', async () => {
try {
- await s3.send(new GetObjectAttributesCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: ['InvalidAttribute'],
- }));
+ await s3.send(
+ new GetObjectAttributesCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: ['InvalidAttribute'],
+ }),
+ );
assert.fail('Expected InvalidArgument error');
} catch (err) {
assert.strictEqual(err.name, 'InvalidArgument');
@@ -86,11 +97,13 @@ describe('objectGetAttributes', () => {
it('should return NoSuchKey for non-existent object', async () => {
try {
- await s3.send(new GetObjectAttributesCommand({
- Bucket: bucket,
- Key: 'nonexistent',
- ObjectAttributes: ['ETag'],
- }));
+ await s3.send(
+ new GetObjectAttributesCommand({
+ Bucket: bucket,
+ Key: 'nonexistent',
+ ObjectAttributes: ['ETag'],
+ }),
+ );
assert.fail('Expected NoSuchKey error');
} catch (err) {
assert.strictEqual(err.name, 'NoSuchKey');
@@ -99,11 +112,13 @@ describe('objectGetAttributes', () => {
});
it('should return all attributes', async () => {
- const data = await s3.send(new GetObjectAttributesCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: ['ETag', 'ObjectParts', 'StorageClass', 'ObjectSize'],
- }));
+ const data = await s3.send(
+ new GetObjectAttributesCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: ['ETag', 'ObjectParts', 'StorageClass', 'ObjectSize'],
+ }),
+ );
assert.strictEqual(data.ETag, expectedMD5);
assert.strictEqual(data.StorageClass, 'STANDARD');
@@ -113,21 +128,25 @@ describe('objectGetAttributes', () => {
});
it('should return ETag', async () => {
- const data = await s3.send(new GetObjectAttributesCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: ['ETag'],
- }));
+ const data = await s3.send(
+ new GetObjectAttributesCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: ['ETag'],
+ }),
+ );
assert.strictEqual(data.ETag, expectedMD5);
});
it('should return ChecksumCRC64NVME for object', async () => {
- const data = await s3.send(new GetObjectAttributesCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: ['Checksum'],
- }));
+ const data = await s3.send(
+ new GetObjectAttributesCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: ['Checksum'],
+ }),
+ );
assert(data.Checksum, 'Checksum should be present');
assert(data.Checksum.ChecksumCRC64NVME, 'ChecksumCRC64NVME should be present');
@@ -135,11 +154,13 @@ describe('objectGetAttributes', () => {
});
it('should not return Checksum when not requested', async () => {
- const data = await s3.send(new GetObjectAttributesCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: ['ETag', 'ObjectSize'],
- }));
+ const data = await s3.send(
+ new GetObjectAttributesCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: ['ETag', 'ObjectSize'],
+ }),
+ );
assert(data.ETag, 'ETag should be present');
assert(data.ObjectSize, 'ObjectSize should be present');
@@ -148,42 +169,50 @@ describe('objectGetAttributes', () => {
it("shouldn't return ObjectParts for non-MPU objects", async () => {
// Requesting only ObjectParts for a non-MPU object break AWS SDK v3
- const data = await s3.send(new GetObjectAttributesCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: ['ObjectParts', 'ETag'],
- }));
+ const data = await s3.send(
+ new GetObjectAttributesCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: ['ObjectParts', 'ETag'],
+ }),
+ );
assert.strictEqual(data.ObjectParts, undefined, "ObjectParts shouldn't be present");
assert.strictEqual(data.ETag, expectedMD5);
});
it('should return StorageClass', async () => {
- const data = await s3.send(new GetObjectAttributesCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: ['StorageClass'],
- }));
+ const data = await s3.send(
+ new GetObjectAttributesCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: ['StorageClass'],
+ }),
+ );
assert.strictEqual(data.StorageClass, 'STANDARD');
});
it('should return ObjectSize', async () => {
- const data = await s3.send(new GetObjectAttributesCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: ['ObjectSize'],
- }));
+ const data = await s3.send(
+ new GetObjectAttributesCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: ['ObjectSize'],
+ }),
+ );
assert.strictEqual(data.ObjectSize, body.length);
});
it('should return LastModified', async () => {
- const data = await s3.send(new GetObjectAttributesCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: ['ETag'],
- }));
+ const data = await s3.send(
+ new GetObjectAttributesCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: ['ETag'],
+ }),
+ );
assert(data.LastModified, 'LastModified should be present');
assert(data.LastModified instanceof Date, 'LastModified should be a Date');
@@ -206,31 +235,37 @@ describe('Test get object attributes with multipart upload', () => {
await s3.send(new CreateBucketCommand({ Bucket: bucket }));
- const createResult = await s3.send(new CreateMultipartUploadCommand({
- Bucket: bucket,
- Key: mpuKey,
- }));
+ const createResult = await s3.send(
+ new CreateMultipartUploadCommand({
+ Bucket: bucket,
+ Key: mpuKey,
+ }),
+ );
const uploadId = createResult.UploadId;
const partData = Buffer.alloc(partSize, 'a');
const parts = [];
for (let i = 1; i <= partCount; i++) {
- const uploadResult = await s3.send(new UploadPartCommand({
- Bucket: bucket,
- Key: mpuKey,
- PartNumber: i,
- UploadId: uploadId,
- Body: partData,
- }));
+ const uploadResult = await s3.send(
+ new UploadPartCommand({
+ Bucket: bucket,
+ Key: mpuKey,
+ PartNumber: i,
+ UploadId: uploadId,
+ Body: partData,
+ }),
+ );
parts.push({ PartNumber: i, ETag: uploadResult.ETag });
}
- await s3.send(new CompleteMultipartUploadCommand({
- Bucket: bucket,
- Key: mpuKey,
- UploadId: uploadId,
- MultipartUpload: { Parts: parts },
- }));
+ await s3.send(
+ new CompleteMultipartUploadCommand({
+ Bucket: bucket,
+ Key: mpuKey,
+ UploadId: uploadId,
+ MultipartUpload: { Parts: parts },
+ }),
+ );
});
after(async () => {
@@ -239,22 +274,26 @@ describe('Test get object attributes with multipart upload', () => {
});
it('should return TotalPartsCount for MPU object', async () => {
- const data = await s3.send(new GetObjectAttributesCommand({
- Bucket: bucket,
- Key: mpuKey,
- ObjectAttributes: ['ObjectParts'],
- }));
+ const data = await s3.send(
+ new GetObjectAttributesCommand({
+ Bucket: bucket,
+ Key: mpuKey,
+ ObjectAttributes: ['ObjectParts'],
+ }),
+ );
assert(data.ObjectParts, 'ObjectParts should be present');
assert.strictEqual(data.ObjectParts.TotalPartsCount, partCount);
});
it('should return TotalPartsCount along with other attributes for MPU object', async () => {
- const data = await s3.send(new GetObjectAttributesCommand({
- Bucket: bucket,
- Key: mpuKey,
- ObjectAttributes: ['ETag', 'ObjectParts', 'ObjectSize', 'StorageClass'],
- }));
+ const data = await s3.send(
+ new GetObjectAttributesCommand({
+ Bucket: bucket,
+ Key: mpuKey,
+ ObjectAttributes: ['ETag', 'ObjectParts', 'ObjectSize', 'StorageClass'],
+ }),
+ );
assert(data.ETag, 'ETag should be present');
assert(data.ETag.includes(`-${partCount}`), `ETag should indicate MPU with ${partCount} parts`);
@@ -286,64 +325,76 @@ describe('objectGetAttributes with user metadata', () => {
});
it('should return specific user metadata when requested', async () => {
- await s3.send(new PutObjectCommand({
- Bucket: bucket,
- Key: key,
- Body: body,
- Metadata: {
- 'custom-key': 'custom-value',
- 'another-key': 'another-value',
- },
- }));
-
- const response = await s3.send(new GetObjectAttributesExtendedCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: ['x-amz-meta-custom-key'],
- }));
+ await s3.send(
+ new PutObjectCommand({
+ Bucket: bucket,
+ Key: key,
+ Body: body,
+ Metadata: {
+ 'custom-key': 'custom-value',
+ 'another-key': 'another-value',
+ },
+ }),
+ );
+
+ const response = await s3.send(
+ new GetObjectAttributesExtendedCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: ['x-amz-meta-custom-key'],
+ }),
+ );
assert.strictEqual(response['x-amz-meta-custom-key'], 'custom-value');
});
it('should return multiple user metadata when requested', async () => {
- await s3.send(new PutObjectCommand({
- Bucket: bucket,
- Key: key,
- Body: body,
- Metadata: {
- foo: 'foo-value',
- bar: 'bar-value',
- baz: 'baz-value',
- },
- }));
-
- const response = await s3.send(new GetObjectAttributesExtendedCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: ['x-amz-meta-foo', 'x-amz-meta-bar'],
- }));
+ await s3.send(
+ new PutObjectCommand({
+ Bucket: bucket,
+ Key: key,
+ Body: body,
+ Metadata: {
+ foo: 'foo-value',
+ bar: 'bar-value',
+ baz: 'baz-value',
+ },
+ }),
+ );
+
+ const response = await s3.send(
+ new GetObjectAttributesExtendedCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: ['x-amz-meta-foo', 'x-amz-meta-bar'],
+ }),
+ );
assert.strictEqual(response['x-amz-meta-foo'], 'foo-value');
assert.strictEqual(response['x-amz-meta-bar'], 'bar-value');
});
it('should return only all user metadata when x-amz-meta-* is requested', async () => {
- await s3.send(new PutObjectCommand({
- Bucket: bucket,
- Key: key,
- Body: body,
- Metadata: {
- key1: 'value1',
- key2: 'value2',
- key3: 'value3',
- },
- }));
-
- const response = await s3.send(new GetObjectAttributesExtendedCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: ['x-amz-meta-*'],
- }));
+ await s3.send(
+ new PutObjectCommand({
+ Bucket: bucket,
+ Key: key,
+ Body: body,
+ Metadata: {
+ key1: 'value1',
+ key2: 'value2',
+ key3: 'value3',
+ },
+ }),
+ );
+
+ const response = await s3.send(
+ new GetObjectAttributesExtendedCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: ['x-amz-meta-*'],
+ }),
+ );
assert.strictEqual(response['x-amz-meta-key1'], 'value1');
assert.strictEqual(response['x-amz-meta-key2'], 'value2');
@@ -352,75 +403,91 @@ describe('objectGetAttributes with user metadata', () => {
});
it('should return empty response when object has no user metadata and x-amz-meta-* is requested', async () => {
- await s3.send(new PutObjectCommand({
- Bucket: bucket,
- Key: key,
- Body: body,
- }));
-
- const response = await s3.send(new GetObjectAttributesExtendedCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: ['ETag', 'x-amz-meta-*'],
- }));
+ await s3.send(
+ new PutObjectCommand({
+ Bucket: bucket,
+ Key: key,
+ Body: body,
+ }),
+ );
+
+ const response = await s3.send(
+ new GetObjectAttributesExtendedCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: ['ETag', 'x-amz-meta-*'],
+ }),
+ );
const metadataKeys = Object.keys(response).filter(k => k.startsWith('x-amz-meta-'));
assert.strictEqual(metadataKeys.length, 0);
});
it('should return empty response when requested metadata key does not exist', async () => {
- await s3.send(new PutObjectCommand({
- Bucket: bucket,
- Key: key,
- Body: body,
- Metadata: {
- existing: 'value',
- },
- }));
-
- const response = await s3.send(new GetObjectAttributesExtendedCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: ['ETag', 'x-amz-meta-nonexistent'],
- }));
+ await s3.send(
+ new PutObjectCommand({
+ Bucket: bucket,
+ Key: key,
+ Body: body,
+ Metadata: {
+ existing: 'value',
+ },
+ }),
+ );
+
+ const response = await s3.send(
+ new GetObjectAttributesExtendedCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: ['ETag', 'x-amz-meta-nonexistent'],
+ }),
+ );
assert.strictEqual(response['x-amz-meta-nonexistent'], undefined);
});
it('should return empty response when only a non-existing metadata key is requested', async () => {
- await s3.send(new PutObjectCommand({
- Bucket: bucket,
- Key: key,
- Body: body,
- Metadata: {
- existing: 'value',
- },
- }));
-
- const response = await s3.send(new GetObjectAttributesExtendedCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: ['x-amz-meta-nonexistent'],
- }));
+ await s3.send(
+ new PutObjectCommand({
+ Bucket: bucket,
+ Key: key,
+ Body: body,
+ Metadata: {
+ existing: 'value',
+ },
+ }),
+ );
+
+ const response = await s3.send(
+ new GetObjectAttributesExtendedCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: ['x-amz-meta-nonexistent'],
+ }),
+ );
assert.strictEqual(response['x-amz-meta-nonexistent'], undefined);
});
it('should return user metadata along with standard attributes', async () => {
- await s3.send(new PutObjectCommand({
- Bucket: bucket,
- Key: key,
- Body: body,
- Metadata: {
- custom: 'custom-value',
- },
- }));
-
- const response = await s3.send(new GetObjectAttributesExtendedCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: ['ETag', 'x-amz-meta-custom', 'ObjectSize'],
- }));
+ await s3.send(
+ new PutObjectCommand({
+ Bucket: bucket,
+ Key: key,
+ Body: body,
+ Metadata: {
+ custom: 'custom-value',
+ },
+ }),
+ );
+
+ const response = await s3.send(
+ new GetObjectAttributesExtendedCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: ['ETag', 'x-amz-meta-custom', 'ObjectSize'],
+ }),
+ );
assert.strictEqual(response.ETag, expectedMD5);
assert.strictEqual(response.ObjectSize, body.length);
@@ -428,22 +495,26 @@ describe('objectGetAttributes with user metadata', () => {
});
it('should return all metadata once wildcard is provided', async () => {
- await s3.send(new PutObjectCommand({
- Bucket: bucket,
- Key: key,
- Body: body,
- Metadata: {
- key1: 'value1',
- key2: 'value2',
- key3: 'value3',
- },
- }));
-
- const response = await s3.send(new GetObjectAttributesExtendedCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: ['x-amz-meta-*', 'x-amz-meta-key1'],
- }));
+ await s3.send(
+ new PutObjectCommand({
+ Bucket: bucket,
+ Key: key,
+ Body: body,
+ Metadata: {
+ key1: 'value1',
+ key2: 'value2',
+ key3: 'value3',
+ },
+ }),
+ );
+
+ const response = await s3.send(
+ new GetObjectAttributesExtendedCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: ['x-amz-meta-*', 'x-amz-meta-key1'],
+ }),
+ );
assert.strictEqual(response['x-amz-meta-key1'], 'value1');
assert.strictEqual(response['x-amz-meta-key2'], 'value2');
@@ -451,42 +522,50 @@ describe('objectGetAttributes with user metadata', () => {
});
it('should handle duplicate wildcard requests without duplicating results', async () => {
- await s3.send(new PutObjectCommand({
- Bucket: bucket,
- Key: key,
- Body: body,
- Metadata: {
- key1: 'value1',
- key2: 'value2',
- },
- }));
-
- const response = await s3.send(new GetObjectAttributesExtendedCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: ['x-amz-meta-*', 'x-amz-meta-*'],
- }));
+ await s3.send(
+ new PutObjectCommand({
+ Bucket: bucket,
+ Key: key,
+ Body: body,
+ Metadata: {
+ key1: 'value1',
+ key2: 'value2',
+ },
+ }),
+ );
+
+ const response = await s3.send(
+ new GetObjectAttributesExtendedCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: ['x-amz-meta-*', 'x-amz-meta-*'],
+ }),
+ );
assert.strictEqual(response['x-amz-meta-key1'], 'value1');
assert.strictEqual(response['x-amz-meta-key2'], 'value2');
});
it('should handle duplicate specific metadata requests without duplicating results', async () => {
- await s3.send(new PutObjectCommand({
- Bucket: bucket,
- Key: key,
- Body: body,
- Metadata: {
- key1: 'value1',
- key2: 'value2',
- },
- }));
-
- const response = await s3.send(new GetObjectAttributesExtendedCommand({
- Bucket: bucket,
- Key: key,
- ObjectAttributes: ['x-amz-meta-key1', 'x-amz-meta-key1'],
- }));
+ await s3.send(
+ new PutObjectCommand({
+ Bucket: bucket,
+ Key: key,
+ Body: body,
+ Metadata: {
+ key1: 'value1',
+ key2: 'value2',
+ },
+ }),
+ );
+
+ const response = await s3.send(
+ new GetObjectAttributesExtendedCommand({
+ Bucket: bucket,
+ Key: key,
+ ObjectAttributes: ['x-amz-meta-key1', 'x-amz-meta-key1'],
+ }),
+ );
assert.strictEqual(response['x-amz-meta-key1'], 'value1');
assert.strictEqual(response['x-amz-meta-key2'], undefined);
@@ -519,63 +598,75 @@ describe('objectGetAttributes with checksum', () => {
await s3.send(new DeleteBucketCommand({ Bucket: checksumBucket }));
});
- Object.entries(algorithms).forEach(([name, { getObjectAttributesXMLTag }]) => {
+ Object.entries(algorithms).forEach(([name, { xmlTag }]) => {
const sdkAlgorithm = name.toUpperCase();
- it(`should return ${getObjectAttributesXMLTag} when object has ${name} checksum`, async () => {
- await s3.send(new PutObjectCommand({
- Bucket: checksumBucket,
- Key: checksumKey,
- Body: checksumBody,
- ChecksumAlgorithm: sdkAlgorithm,
- }));
-
- const data = await s3.send(new GetObjectAttributesCommand({
- Bucket: checksumBucket,
- Key: checksumKey,
- ObjectAttributes: ['Checksum'],
- }));
+ it(`should return ${xmlTag} when object has ${name} checksum`, async () => {
+ await s3.send(
+ new PutObjectCommand({
+ Bucket: checksumBucket,
+ Key: checksumKey,
+ Body: checksumBody,
+ ChecksumAlgorithm: sdkAlgorithm,
+ }),
+ );
+
+ const data = await s3.send(
+ new GetObjectAttributesCommand({
+ Bucket: checksumBucket,
+ Key: checksumKey,
+ ObjectAttributes: ['Checksum'],
+ }),
+ );
assert(data.Checksum, 'Checksum should be present');
- assert.strictEqual(data.Checksum[getObjectAttributesXMLTag], expectedDigests[name]);
+ assert.strictEqual(data.Checksum[xmlTag], expectedDigests[name]);
assert.strictEqual(data.Checksum.ChecksumType, 'FULL_OBJECT');
});
- it(`should return ${getObjectAttributesXMLTag} along with other attributes`, async () => {
- await s3.send(new PutObjectCommand({
- Bucket: checksumBucket,
- Key: checksumKey,
- Body: checksumBody,
- ChecksumAlgorithm: sdkAlgorithm,
- }));
-
- const data = await s3.send(new GetObjectAttributesCommand({
- Bucket: checksumBucket,
- Key: checksumKey,
- ObjectAttributes: ['ETag', 'Checksum', 'ObjectSize'],
- }));
+ it(`should return ${xmlTag} along with other attributes`, async () => {
+ await s3.send(
+ new PutObjectCommand({
+ Bucket: checksumBucket,
+ Key: checksumKey,
+ Body: checksumBody,
+ ChecksumAlgorithm: sdkAlgorithm,
+ }),
+ );
+
+ const data = await s3.send(
+ new GetObjectAttributesCommand({
+ Bucket: checksumBucket,
+ Key: checksumKey,
+ ObjectAttributes: ['ETag', 'Checksum', 'ObjectSize'],
+ }),
+ );
assert(data.ETag, 'ETag should be present');
assert(data.ObjectSize, 'ObjectSize should be present');
assert(data.Checksum, 'Checksum should be present');
- assert.strictEqual(data.Checksum[getObjectAttributesXMLTag], expectedDigests[name]);
+ assert.strictEqual(data.Checksum[xmlTag], expectedDigests[name]);
assert.strictEqual(data.Checksum.ChecksumType, 'FULL_OBJECT');
});
});
it('should not return Checksum when not requested', async () => {
- await s3.send(new PutObjectCommand({
- Bucket: checksumBucket,
- Key: checksumKey,
- Body: checksumBody,
- ChecksumAlgorithm: 'CRC64NVME',
- }));
-
- const data = await s3.send(new GetObjectAttributesCommand({
- Bucket: checksumBucket,
- Key: checksumKey,
- ObjectAttributes: ['ETag', 'ObjectSize'],
- }));
+ await s3.send(
+ new PutObjectCommand({
+ Bucket: checksumBucket,
+ Key: checksumKey,
+ Body: checksumBody,
+ ChecksumAlgorithm: 'CRC64NVME',
+ }),
+ );
+
+ const data = await s3.send(
+ new GetObjectAttributesCommand({
+ Bucket: checksumBucket,
+ Key: checksumKey,
+ ObjectAttributes: ['ETag', 'ObjectSize'],
+ }),
+ );
assert(data.ETag, 'ETag should be present');
assert(data.ObjectSize, 'ObjectSize should be present');
diff --git a/tests/functional/raw-node/test/xAmzChecksum.js b/tests/functional/raw-node/test/xAmzChecksum.js
index b9fa5579e1..691aedcd98 100644
--- a/tests/functional/raw-node/test/xAmzChecksum.js
+++ b/tests/functional/raw-node/test/xAmzChecksum.js
@@ -19,8 +19,9 @@ describe('Test x-amz-checksums', () => {
{ name: 'CRC64NVME', objDataDigest: 'jC+ERbTL/Dw=', validWrong: 'AAAAAAAAAAA=' },
{ name: 'SHA1', objDataDigest: 'hvfkN/qlp/zhXR3cuerq6jd2Z7g=', validWrong: 'AAAAAAAAAAAAAAAAAAAAAAAAAAA=' },
{
- name: 'SHA256', objDataDigest: 'ypeBEsobvcr6wjGzmiPcTaeG7/gUfE5yuYB3ha/uSLs=',
- validWrong: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA='
+ name: 'SHA256',
+ objDataDigest: 'ypeBEsobvcr6wjGzmiPcTaeG7/gUfE5yuYB3ha/uSLs=',
+ validWrong: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=',
},
];
const methods = [
@@ -153,7 +154,7 @@ describe('Test x-amz-checksums', () => {
...headers,
},
},
- authCredentials
+ authCredentials,
),
res => {
let data = '';
@@ -167,14 +168,13 @@ describe('Test x-amz-checksums', () => {
}
done();
});
- }
+ },
);
req.on('error', err => {
assert.ifError(err);
});
-
req.once('drain', () => {
req.end();
});
@@ -188,13 +188,14 @@ describe('Test x-amz-checksums', () => {
for (const algo of algos) {
for (const method of methods) {
itSkipIfAWS(
- `${method.Name} should respond BadDigest ` +
- `with invalid x-amz-checksum-${algo.name.toLowerCase()}`, done => {
+ `${method.Name} should respond BadDigest ` + `with invalid x-amz-checksum-${algo.name.toLowerCase()}`,
+ done => {
const headers = {
[`x-amz-checksum-${algo.name.toLowerCase()}`]: algo.validWrong,
};
doTest(headers, method, 400, ['BadDigest'], done);
- });
+ },
+ );
}
}
@@ -225,20 +226,19 @@ describe('Test x-amz-checksums', () => {
);
});
- itSkipIfAWS(
- 'should respond InvalidRequest if the value of x-amz-sdk-checksum-algorithm is invalid', done => {
- const headers = {
- 'x-amz-sdk-checksum-algorithm': 'BAD',
- [`x-amz-checksum-${algos[0].name.toLowerCase()}`]: algos[0].objDataDigest,
- };
- doTest(
- headers,
- methods[0],
- 400,
- ['InvalidRequest', 'Value for x-amz-sdk-checksum-algorithm header is invalid.'],
- done,
- );
- });
+ itSkipIfAWS('should respond InvalidRequest if the value of x-amz-sdk-checksum-algorithm is invalid', done => {
+ const headers = {
+ 'x-amz-sdk-checksum-algorithm': 'BAD',
+ [`x-amz-checksum-${algos[0].name.toLowerCase()}`]: algos[0].objDataDigest,
+ };
+ doTest(
+ headers,
+ methods[0],
+ 400,
+ ['InvalidRequest', 'Value for x-amz-sdk-checksum-algorithm header is invalid.'],
+ done,
+ );
+ });
itSkipIfAWS('should respond InvalidRequest with if invalid x-amz-checksum- value', done => {
const headers = {
@@ -254,7 +254,8 @@ describe('Test x-amz-checksums', () => {
});
itSkipIfAWS(
- 'should respond InvalidRequest with if missing x-amz-checksum- for x-amz-sdk-checksum-algorithm ', done => {
+ 'should respond InvalidRequest with if missing x-amz-checksum- for x-amz-sdk-checksum-algorithm ',
+ done => {
const headers = {
'x-amz-sdk-checksum-algorithm': 'SHA1',
};
@@ -262,17 +263,22 @@ describe('Test x-amz-checksums', () => {
headers,
methods[0],
400,
- ['InvalidRequest', 'x-amz-sdk-checksum-algorithm specified, but no corresponding x-amz-checksum-* ' +
- 'or x-amz-trailer headers were found.'],
+ [
+ 'InvalidRequest',
+ 'x-amz-sdk-checksum-algorithm specified, but no corresponding x-amz-checksum-* ' +
+ 'or x-amz-trailer headers were found.',
+ ],
done,
);
- });
+ },
+ );
for (const algo of algos) {
for (const method of methods) {
itSkipIfAWS(
`${method.Name} should not respond BadDigest if ` +
- `x-amz-checksum-${algo.name.toLowerCase()} is correct`, done => {
+ `x-amz-checksum-${algo.name.toLowerCase()} is correct`,
+ done => {
const url = `http://localhost:8000/${bucket}/${method.Key}?${method.Query}`;
const req = new HttpRequestAuthV4(
url,
@@ -286,7 +292,7 @@ describe('Test x-amz-checksums', () => {
[`x-amz-checksum-${algo.name.toLowerCase()}`]: algo.objDataDigest,
},
},
- authCredentials
+ authCredentials,
),
res => {
let data = '';
@@ -301,14 +307,13 @@ describe('Test x-amz-checksums', () => {
assert(!data.includes('did not match the calculated checksum'));
done();
});
- }
+ },
);
req.on('error', err => {
assert.ifError(err);
});
-
req.once('drain', () => {
req.end();
});
@@ -317,7 +322,8 @@ describe('Test x-amz-checksums', () => {
assert.ifError(err);
req.end();
});
- });
+ },
+ );
}
}
});
diff --git a/tests/unit/api/apiUtils/integrity/validateChecksums.js b/tests/unit/api/apiUtils/integrity/validateChecksums.js
index cfd0253714..01bebac4a5 100644
--- a/tests/unit/api/apiUtils/integrity/validateChecksums.js
+++ b/tests/unit/api/apiUtils/integrity/validateChecksums.js
@@ -20,7 +20,7 @@ describe('validateChecksumsNoChunking MD5', () => {
const body = 'Hello, World!';
const expectedMd5 = crypto.createHash('md5').update(body, 'utf8').digest('base64');
const headers = {
- 'content-md5': expectedMd5
+ 'content-md5': expectedMd5,
};
const result = await validateChecksumsNoChunking(headers, body);
@@ -34,7 +34,7 @@ describe('validateChecksumsNoChunking MD5', () => {
const wrongMd5 = '1B2M2Y8AsgTpgAmY7PhCfg==';
const expectedMd5 = crypto.createHash('md5').update(body, 'utf8').digest('base64');
const headers = {
- 'content-md5': wrongMd5
+ 'content-md5': wrongMd5,
};
const result = await validateChecksumsNoChunking(headers, body);
@@ -67,7 +67,7 @@ describe('validateChecksumsNoChunking MD5', () => {
const body = 'Hello, World!';
const headers = {
'content-type': 'application/json',
- 'content-md5': undefined
+ 'content-md5': undefined,
};
const result = await validateChecksumsNoChunking(headers, body);
@@ -79,7 +79,7 @@ describe('validateChecksumsNoChunking MD5', () => {
const body = 'Hello, World!';
const headers = {
'content-type': 'application/json',
- 'content-md5': null
+ 'content-md5': null,
};
const result = await validateChecksumsNoChunking(headers, body);
@@ -91,7 +91,7 @@ describe('validateChecksumsNoChunking MD5', () => {
const body = 'Hello, World!';
const headers = {
'content-type': 'application/json',
- 'content-md5': ''
+ 'content-md5': '',
};
const result = await validateChecksumsNoChunking(headers, body);
@@ -106,17 +106,25 @@ describe('validateChecksumsNoChunking CRC32, CRC32C, SHA1, SHA256, CRC64NVME', (
{ name: 'crc32', data: 'crc32 data', digest: 'xCSBHA==', invalid: 'x', validWrong: 'AAAAAA==' },
{ name: 'crc32c', data: 'crc32c data', digest: 'oEjFGQ==', invalid: 'x', validWrong: 'AAAAAA==' },
{
- name: 'sha1', data: 'sha1 data', digest: 'roREeoJPb6jNZz8PPT+/KtdXm0o=', invalid: 'x',
- validWrong: 'AAAAAAAAAAAAAAAAAAAAAAAAAAA='
+ name: 'sha1',
+ data: 'sha1 data',
+ digest: 'roREeoJPb6jNZz8PPT+/KtdXm0o=',
+ invalid: 'x',
+ validWrong: 'AAAAAAAAAAAAAAAAAAAAAAAAAAA=',
},
{
- name: 'sha256', data: 'sha256 data',
- digest: 'jS/UevcoKxbM33kmPFujS72ior/9/i374VmGvbTAwAc=', invalid: 'x',
- validWrong: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA='
+ name: 'sha256',
+ data: 'sha256 data',
+ digest: 'jS/UevcoKxbM33kmPFujS72ior/9/i374VmGvbTAwAc=',
+ invalid: 'x',
+ validWrong: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=',
},
{
- name: 'crc64nvme', data: 'crc64nvme data', digest: 'Tpz+dGVqyhg=', invalid: 'x',
- validWrong: 'AAAAAAAAAAA='
+ name: 'crc64nvme',
+ data: 'crc64nvme data',
+ digest: 'Tpz+dGVqyhg=',
+ invalid: 'x',
+ validWrong: 'AAAAAAAAAAA=',
},
];
@@ -305,8 +313,8 @@ describe('validateMethodChecksumNoChunking', () => {
const request = {
apiMethod: method,
headers: {
- 'content-md5': wrongMd5
- }
+ 'content-md5': wrongMd5,
+ },
};
const log = { debug: sandbox.stub() };
@@ -328,8 +336,8 @@ describe('validateMethodChecksumNoChunking', () => {
const request = {
apiMethod: method,
headers: {
- 'content-md5': wrongMd5
- }
+ 'content-md5': wrongMd5,
+ },
};
const log = { debug: sandbox.stub() };
@@ -349,7 +357,7 @@ describe('validateMethodChecksumNoChunking', () => {
const body = 'Hello, World!';
const request = {
apiMethod: method,
- headers: {}
+ headers: {},
};
const log = { debug: sandbox.stub() };
@@ -371,8 +379,8 @@ describe('validateMethodChecksumNoChunking', () => {
const request = {
apiMethod: method,
headers: {
- 'content-md5': correctMd5
- }
+ 'content-md5': correctMd5,
+ },
};
const log = { debug: sandbox.stub() };
@@ -394,8 +402,8 @@ describe('validateMethodChecksumNoChunking', () => {
const request = {
apiMethod: method,
headers: {
- 'content-md5': wrongMd5
- }
+ 'content-md5': wrongMd5,
+ },
};
const log = { debug: sandbox.stub() };
@@ -417,8 +425,8 @@ describe('validateMethodChecksumNoChunking', () => {
const request = {
apiMethod: unsupportedMethod,
headers: {
- 'content-md5': wrongMd5
- }
+ 'content-md5': wrongMd5,
+ },
};
const log = { debug: sandbox.stub() };
@@ -434,8 +442,8 @@ describe('validateMethodChecksumNoChunking', () => {
const body = 'Hello, World!';
const request = {
headers: {
- 'content-md5': 'wrongchecksum123='
- }
+ 'content-md5': 'wrongchecksum123=',
+ },
};
const log = { debug: sandbox.stub() };
@@ -449,8 +457,8 @@ describe('validateMethodChecksumNoChunking', () => {
const request = {
apiMethod: 'nonExistentMethod',
headers: {
- 'content-md5': 'wrongchecksum123='
- }
+ 'content-md5': 'wrongchecksum123=',
+ },
};
const log = { debug: sandbox.stub() };
@@ -464,8 +472,8 @@ describe('validateMethodChecksumNoChunking', () => {
describe('getChecksumDataFromHeaders', () => {
// Valid-format digests (correct length and base64, content not verified by getChecksumDataFromHeaders)
const validDigests = {
- crc32: 'AAAAAA==', // 8 chars
- crc32c: 'AAAAAA==', // 8 chars
+ crc32: 'AAAAAA==', // 8 chars
+ crc32c: 'AAAAAA==', // 8 chars
sha1: 'AAAAAAAAAAAAAAAAAAAAAAAAAAA=', // 28 chars
sha256: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=', // 44 chars
crc64nvme: 'AAAAAAAAAAA=', // 12 chars
@@ -514,12 +522,11 @@ describe('getChecksumDataFromHeaders', () => {
assert.strictEqual(result.error, ChecksumError.MultipleChecksumTypes);
});
- it('should return MissingCorresponding when x-amz-sdk-checksum-algorithm has no x-amz-checksum- or x-amz-trailer',
- () => {
- const result = getChecksumDataFromHeaders({ 'x-amz-sdk-checksum-algorithm': 'crc32' });
- assert.strictEqual(result.error, ChecksumError.MissingCorresponding);
- assert.strictEqual(result.details.expected, 'crc32');
- });
+ it('should return MissingCorresponding when x-amz-sdk-checksum-algorithm has no x-amz-checksum- or trailer', () => {
+ const result = getChecksumDataFromHeaders({ 'x-amz-sdk-checksum-algorithm': 'crc32' });
+ assert.strictEqual(result.error, ChecksumError.MissingCorresponding);
+ assert.strictEqual(result.details.expected, 'crc32');
+ });
it('should return success for x-amz-checksum-crc32 with matching x-amz-sdk-checksum-algorithm CRC32', () => {
const result = getChecksumDataFromHeaders({
@@ -529,25 +536,23 @@ describe('getChecksumDataFromHeaders', () => {
assert.deepStrictEqual(result, { algorithm: 'crc32', isTrailer: false, expected: validDigests.crc32 });
});
- it('should return AlgoNotSupportedSDK for x-amz-checksum-crc32 with mismatched x-amz-sdk-checksum-algorithm SHA256',
- () => {
- const result = getChecksumDataFromHeaders({
- 'x-amz-checksum-crc32': validDigests.crc32,
- 'x-amz-sdk-checksum-algorithm': 'sha256',
- });
- assert.strictEqual(result.error, ChecksumError.AlgoNotSupportedSDK);
- assert.strictEqual(result.details.algorithm, 'sha256');
+ it('should return AlgoNotSupportedSDK on mismatched x-amz-sdk-checksum-algorithm (SHA256 vs CRC32)', () => {
+ const result = getChecksumDataFromHeaders({
+ 'x-amz-checksum-crc32': validDigests.crc32,
+ 'x-amz-sdk-checksum-algorithm': 'sha256',
});
+ assert.strictEqual(result.error, ChecksumError.AlgoNotSupportedSDK);
+ assert.strictEqual(result.details.algorithm, 'sha256');
+ });
- it('should return AlgoNotSupportedSDK for x-amz-checksum-crc32 with non-string x-amz-sdk-checksum-algorithm',
- () => {
- const result = getChecksumDataFromHeaders({
- 'x-amz-checksum-crc32': validDigests.crc32,
- 'x-amz-sdk-checksum-algorithm': 1234,
- });
- assert.strictEqual(result.error, ChecksumError.AlgoNotSupportedSDK);
- assert.strictEqual(result.details.algorithm, 1234);
+ it('should return AlgoNotSupportedSDK for non-string x-amz-sdk-checksum-algorithm', () => {
+ const result = getChecksumDataFromHeaders({
+ 'x-amz-checksum-crc32': validDigests.crc32,
+ 'x-amz-sdk-checksum-algorithm': 1234,
});
+ assert.strictEqual(result.error, ChecksumError.AlgoNotSupportedSDK);
+ assert.strictEqual(result.details.algorithm, 1234);
+ });
it('should return AlgoNotSupportedSDK for x-amz-checksum-crc32 with unknown x-amz-sdk-checksum-algorithm', () => {
const result = getChecksumDataFromHeaders({
@@ -568,12 +573,11 @@ describe('getChecksumDataFromHeaders', () => {
assert.deepStrictEqual(result, { algorithm: 'crc64nvme', isTrailer: true, expected: undefined });
});
- it('should return TrailerNotSupported for x-amz-trailer with unsupported value (not x-amz-checksum- prefix)',
- () => {
- const result = getChecksumDataFromHeaders({ 'x-amz-trailer': 'x-custom-header' });
- assert.strictEqual(result.error, ChecksumError.TrailerNotSupported);
- assert.strictEqual(result.details.value, 'x-custom-header');
- });
+ it('should return TrailerNotSupported for x-amz-trailer value not prefixed by x-amz-checksum-', () => {
+ const result = getChecksumDataFromHeaders({ 'x-amz-trailer': 'x-custom-header' });
+ assert.strictEqual(result.error, ChecksumError.TrailerNotSupported);
+ assert.strictEqual(result.details.value, 'x-custom-header');
+ });
it('should return TrailerNotSupported for x-amz-trailer: x-amz-checksum-unknown-algo', () => {
const result = getChecksumDataFromHeaders({ 'x-amz-trailer': 'x-amz-checksum-md4' });
@@ -749,8 +753,7 @@ describe('arsenalErrorFromChecksumError', () => {
details: { type: 'BADTYPE' },
});
assert.strictEqual(result.message, 'InvalidRequest');
- assert.strictEqual(result.description,
- 'Value for x-amz-checksum-type header is invalid.');
+ assert.strictEqual(result.description, 'Value for x-amz-checksum-type header is invalid.');
});
it('should return InvalidRequest for MPUTypeWithoutAlgo', () => {
@@ -759,8 +762,10 @@ describe('arsenalErrorFromChecksumError', () => {
details: { type: 'COMPOSITE' },
});
assert.strictEqual(result.message, 'InvalidRequest');
- assert.match(result.description,
- /x-amz-checksum-type header can only be used with the x-amz-checksum-algorithm header/);
+ assert.match(
+ result.description,
+ /x-amz-checksum-type header can only be used with the x-amz-checksum-algorithm header/,
+ );
});
it('should return InvalidRequest for MPUInvalidCombination mentioning type and algorithm', () => {
@@ -769,8 +774,10 @@ describe('arsenalErrorFromChecksumError', () => {
details: { algorithm: 'sha256', type: 'FULL_OBJECT' },
});
assert.strictEqual(result.message, 'InvalidRequest');
- assert.strictEqual(result.description,
- 'The FULL_OBJECT checksum type cannot be used with the SHA256 checksum algorithm.');
+ assert.strictEqual(
+ result.description,
+ 'The FULL_OBJECT checksum type cannot be used with the SHA256 checksum algorithm.',
+ );
});
});
@@ -779,7 +786,9 @@ describe('getChecksumDataFromMPUHeaders', () => {
it('should return crc64nvme/FULL_OBJECT with isDefault=true when no headers', () => {
const result = getChecksumDataFromMPUHeaders({});
assert.deepStrictEqual(result, {
- algorithm: 'crc64nvme', type: 'FULL_OBJECT', isDefault: true,
+ algorithm: 'crc64nvme',
+ type: 'FULL_OBJECT',
+ isDefault: true,
});
});
});
@@ -799,7 +808,9 @@ describe('getChecksumDataFromMPUHeaders', () => {
'x-amz-checksum-algorithm': algo,
});
assert.deepStrictEqual(result, {
- algorithm: algo, type: expectedType, isDefault: false,
+ algorithm: algo,
+ type: expectedType,
+ isDefault: false,
});
});
}
@@ -809,7 +820,9 @@ describe('getChecksumDataFromMPUHeaders', () => {
'x-amz-checksum-algorithm': 'CRC32',
});
assert.deepStrictEqual(result, {
- algorithm: 'crc32', type: 'COMPOSITE', isDefault: false,
+ algorithm: 'crc32',
+ type: 'COMPOSITE',
+ isDefault: false,
});
});
@@ -818,7 +831,9 @@ describe('getChecksumDataFromMPUHeaders', () => {
'x-amz-checksum-algorithm': 'Sha256',
});
assert.deepStrictEqual(result, {
- algorithm: 'sha256', type: 'COMPOSITE', isDefault: false,
+ algorithm: 'sha256',
+ type: 'COMPOSITE',
+ isDefault: false,
});
});
});
@@ -841,7 +856,9 @@ describe('getChecksumDataFromMPUHeaders', () => {
'x-amz-checksum-type': type,
});
assert.deepStrictEqual(result, {
- algorithm: algo, type, isDefault: false,
+ algorithm: algo,
+ type,
+ isDefault: false,
});
});
}
diff --git a/tests/unit/api/apiUtils/object/objectAttributes.js b/tests/unit/api/apiUtils/object/objectAttributes.js
index 151d56d0aa..a8ce74e197 100644
--- a/tests/unit/api/apiUtils/object/objectAttributes.js
+++ b/tests/unit/api/apiUtils/object/objectAttributes.js
@@ -1,7 +1,7 @@
const assert = require('assert');
const {
parseAttributesHeaders,
- buildAttributesXml
+ buildAttributesXml,
} = require('../../../../../lib/api/apiUtils/object/objectAttributes');
const { algorithms } = require('../../../../../lib/api/apiUtils/integrity/validateChecksums');
const { DummyRequestLogger } = require('../../../helpers');
@@ -51,16 +51,15 @@ describe('parseAttributesHeaders', () => {
});
});
-
describe('buildXmlAttributes', () => {
const objectMD = {
'content-md5': '16e37e19194511993498801d4692795f',
'content-length': 5000,
'x-amz-storage-class': 'STANDARD',
- 'restoreStatus': {
+ restoreStatus: {
inProgress: false,
- expiryDate: 'Fri, 20 Feb 2026 12:00:00 GMT'
- }
+ expiryDate: 'Fri, 20 Feb 2026 12:00:00 GMT',
+ },
};
const userMetadata = {
@@ -164,9 +163,11 @@ describe('buildXmlAttributes', () => {
const expectedDigests = {};
before(async () => {
- await Promise.all(Object.keys(algorithms).map(async name => {
- expectedDigests[name] = await algorithms[name].digest(testData);
- }));
+ await Promise.all(
+ Object.keys(algorithms).map(async name => {
+ expectedDigests[name] = await algorithms[name].digest(testData);
+ }),
+ );
});
it('should not generate Checksum XML when checksumAlgorithm is unknown', () => {
@@ -190,7 +191,7 @@ describe('buildXmlAttributes', () => {
assert.strictEqual(result.length, 0);
});
- Object.entries(algorithms).forEach(([algo, { getObjectAttributesXMLTag }]) => {
+ Object.entries(algorithms).forEach(([algo, { xmlTag }]) => {
it(`should generate correct Checksum XML for ${algo}`, () => {
const digest = expectedDigests[algo];
const result = [];
@@ -205,7 +206,7 @@ describe('buildXmlAttributes', () => {
assert.strictEqual(result.length, 4);
assert.strictEqual(result[0], '');
- assert.strictEqual(result[1], `<${getObjectAttributesXMLTag}>${digest}${getObjectAttributesXMLTag}>`);
+ assert.strictEqual(result[1], `<${xmlTag}>${digest}${xmlTag}>`);
assert.strictEqual(result[2], 'FULL_OBJECT');
assert.strictEqual(result[3], '');
});
diff --git a/tests/unit/api/objectGetAttributes.js b/tests/unit/api/objectGetAttributes.js
index 2155731560..7244efd0ec 100644
--- a/tests/unit/api/objectGetAttributes.js
+++ b/tests/unit/api/objectGetAttributes.js
@@ -25,15 +25,18 @@ const postBody = Buffer.from(body, 'utf8');
const expectedMD5 = 'fc3ff98e8c6a0d3087d515c0473f8677';
// Promisify helper for functions with non-standard callback signatures
-const promisify = fn => (...args) => new Promise((resolve, reject) => {
- fn(...args, (err, ...results) => {
- if (err) {
- reject(err);
- } else {
- resolve(results);
- }
- });
-});
+const promisify =
+ fn =>
+ (...args) =>
+ new Promise((resolve, reject) => {
+ fn(...args, (err, ...results) => {
+ if (err) {
+ reject(err);
+ } else {
+ resolve(results);
+ }
+ });
+ });
const bucketPutAsync = promisify(bucketPut);
const bucketPutVersioningAsync = promisify(bucketPutVersioning);
@@ -106,7 +109,7 @@ describe('objectGetAttributes API', () => {
assert.strictEqual(
err.description,
'The x-amz-object-attributes header specifying the attributes ' +
- 'to be retrieved is either missing or empty',
+ 'to be retrieved is either missing or empty',
);
}
});
@@ -176,12 +179,7 @@ describe('objectGetAttributes API', () => {
});
it('should return all attributes', async () => {
- const testGetRequest = createGetAttributesRequest([
- 'ETag',
- 'ObjectParts',
- 'StorageClass',
- 'ObjectSize',
- ]);
+ const testGetRequest = createGetAttributesRequest(['ETag', 'ObjectParts', 'StorageClass', 'ObjectSize']);
const { xml, responseHeaders } = await objectGetAttributes(authInfo, testGetRequest, log);
assert(xml, 'Response XML should be present');
@@ -298,8 +296,7 @@ describe('objectGetAttributes API with multipart upload', () => {
completeParts.push(`${i}"${partHash}"`);
}
- const completeBody =
- `${completeParts.join('')}`;
+ const completeBody = `${completeParts.join('')}`;
const completeRequest = {
bucketName,
@@ -654,9 +651,11 @@ describe('objectGetAttributes API with checksum', () => {
const expectedDigests = {};
before(async () => {
- await Promise.all(Object.keys(algorithms).map(async name => {
- expectedDigests[name] = await algorithms[name].digest(postBody);
- }));
+ await Promise.all(
+ Object.keys(algorithms).map(async name => {
+ expectedDigests[name] = await algorithms[name].digest(postBody);
+ }),
+ );
});
beforeEach(async () => {
@@ -664,8 +663,8 @@ describe('objectGetAttributes API with checksum', () => {
await bucketPutAsync(authInfo, testPutBucketRequest, log);
});
- Object.entries(algorithms).forEach(([name, { getObjectAttributesXMLTag }]) => {
- it(`should return ${getObjectAttributesXMLTag} when object has ${name} checksum`, async () => {
+ Object.entries(algorithms).forEach(([name, { xmlTag }]) => {
+ it(`should return ${xmlTag} when object has ${name} checksum`, async () => {
const testPutObjectRequest = new DummyRequest(
{
bucketName,
@@ -688,11 +687,11 @@ describe('objectGetAttributes API with checksum', () => {
const response = result.GetObjectAttributesResponse;
assert(response.Checksum, 'Checksum should be present');
- assert.strictEqual(response.Checksum[0][getObjectAttributesXMLTag][0], expectedDigests[name]);
+ assert.strictEqual(response.Checksum[0][xmlTag][0], expectedDigests[name]);
assert.strictEqual(response.Checksum[0].ChecksumType[0], 'FULL_OBJECT');
});
- it(`should return ${getObjectAttributesXMLTag} along with other attributes`, async () => {
+ it(`should return ${xmlTag} along with other attributes`, async () => {
const testPutObjectRequest = new DummyRequest(
{
bucketName,
@@ -717,7 +716,7 @@ describe('objectGetAttributes API with checksum', () => {
assert(response.ETag, 'ETag should be present');
assert(response.ObjectSize, 'ObjectSize should be present');
assert(response.Checksum, 'Checksum should be present');
- assert.strictEqual(response.Checksum[0][getObjectAttributesXMLTag][0], expectedDigests[name]);
+ assert.strictEqual(response.Checksum[0][xmlTag][0], expectedDigests[name]);
assert.strictEqual(response.Checksum[0].ChecksumType[0], 'FULL_OBJECT');
});
});
@@ -730,11 +729,7 @@ describe('objectGetAttributes API with checksum', () => {
const { xml } = await objectGetAttributes(authInfo, testGetRequest, log);
const result = await parseStringPromise(xml);
- assert.strictEqual(
- result.GetObjectAttributesResponse.Checksum,
- undefined,
- 'Checksum should not be present',
- );
+ assert.strictEqual(result.GetObjectAttributesResponse.Checksum, undefined, 'Checksum should not be present');
});
it('should not return Checksum when not requested', async () => {
diff --git a/yarn.lock b/yarn.lock
index 0c6ce95e2a..66231c1951 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -6143,9 +6143,9 @@ arraybuffer.prototype.slice@^1.0.4:
optionalDependencies:
ioctl "^2.0.2"
-"arsenal@git+https://github.com/scality/Arsenal#8.4.1":
- version "8.4.1"
- resolved "git+https://github.com/scality/Arsenal#6b3b58b152ac23d29176ab1f24f49f8eda3145b2"
+"arsenal@git+https://github.com/scality/Arsenal#8.4.2":
+ version "8.4.2"
+ resolved "git+https://github.com/scality/Arsenal#2312744c5e6bdd4d0cc9d60bf4cdcf10f32461e6"
dependencies:
"@aws-sdk/client-kms" "^3.975.0"
"@aws-sdk/client-s3" "^3.975.0"