Skip to content
Original file line number Diff line number Diff line change
Expand Up @@ -1473,6 +1473,8 @@ message PurgePathRequest {
optional string deletedDir = 3;
repeated KeyInfo deletedSubFiles = 4;
repeated KeyInfo markDeletedSubDirs = 5;
repeated hadoop.hdds.KeyValue deleteRangeSubFiles = 6;
repeated hadoop.hdds.KeyValue deleteRangeSubDirs = 7;
}

message DeleteOpenKeysRequest {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,13 @@
public class DeleteKeysResult {

private List<OmKeyInfo> keysToDelete;

private boolean processedKeys;
private List<ExclusiveRange> keyRanges;

public DeleteKeysResult(List<OmKeyInfo> keysToDelete, boolean processedKeys) {
public DeleteKeysResult(List<OmKeyInfo> keysToDelete, List<ExclusiveRange> keyRanges, boolean processedKeys) {
Comment thread
aryangupta1998 marked this conversation as resolved.
Outdated
this.keysToDelete = keysToDelete;
this.processedKeys = processedKeys;
this.keyRanges = keyRanges;
Comment thread
aryangupta1998 marked this conversation as resolved.
Outdated
Comment thread
aryangupta1998 marked this conversation as resolved.
Outdated
}

public List<OmKeyInfo> getKeysToDelete() {
Expand All @@ -43,4 +44,30 @@ public boolean isProcessedKeys() {
return processedKeys;
}

public List<ExclusiveRange> getKeyRanges() {
return keyRanges;
}

/**
* Represents a half-open key range {@code [startKey, exclusiveEndKey)} used
* for RocksDB deleteRange operations.
*/
public static class ExclusiveRange {
private final String startKey;
private final String exclusiveEndKey;

public ExclusiveRange(String startKey, String exclusiveEndKey) {
this.startKey = startKey;
this.exclusiveEndKey = exclusiveEndKey;
}

public String getExclusiveEndKey() {
return exclusiveEndKey;
}

public String getStartKey() {
return startKey;
}
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT;
import static org.apache.hadoop.hdds.StringUtils.getLexicographicallyHigherString;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.READ;
import static org.apache.hadoop.hdds.scm.net.NetConstants.NODE_COST_DEFAULT;
import static org.apache.hadoop.hdds.utils.HddsServerUtil.getRemoteUser;
Expand Down Expand Up @@ -2295,15 +2296,33 @@ private <T extends WithParentObjectId> DeleteKeysResult gatherSubPathsWithIterat
List<OmKeyInfo> keyInfos = new ArrayList<>();
String seekFileInDB = metadataManager.getOzonePathKey(volumeId, bucketId, parentInfo.getObjectID(), "");
try (TableIterator<String, ? extends KeyValue<String, T>> iterator = table.iterator(seekFileInDB)) {
while (iterator.hasNext() && remainingNum > 0) {
String startKey = null;
String lastLoopExclusiveKey = getLexicographicallyHigherString(seekFileInDB);
Comment thread
aryangupta1998 marked this conversation as resolved.
Outdated
List<DeleteKeysResult.ExclusiveRange> keyRanges = new ArrayList<>();
while (iterator.hasNext()) {
Comment thread
aryangupta1998 marked this conversation as resolved.
Outdated
KeyValue<String, T> entry = iterator.next();
KeyValue<String, OmKeyInfo> keyInfo = deleteKeyTransformer.apply(entry);
if (remainingNum <= 0) {
lastLoopExclusiveKey = keyInfo.getKey();
break;
}
if (deleteKeyFilter.apply(keyInfo)) {
keyInfos.add(keyInfo.getValue());
remainingNum--;
if (startKey == null) {
startKey = keyInfo.getKey();
}
} else {
if (startKey != null) {
keyRanges.add(new DeleteKeysResult.ExclusiveRange(startKey, keyInfo.getKey()));
}
startKey = null;
}
}
return new DeleteKeysResult(keyInfos, !iterator.hasNext());
if (startKey != null) {
keyRanges.add(new DeleteKeysResult.ExclusiveRange(startKey, lastLoopExclusiveKey));
}
return new DeleteKeysResult(keyInfos, keyRanges, !iterator.hasNext());
Comment thread
aryangupta1998 marked this conversation as resolved.
Outdated
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import java.util.Map;
import java.util.UUID;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.ozone.OmUtils;
Expand Down Expand Up @@ -147,22 +148,22 @@ public void processPaths(
deletedSpaceOmMetadataManager.getDeletedDirTable().putWithBatch(deletedSpaceBatchOperation,
ozoneDeleteKey, keyInfo);

keySpaceOmMetadataManager.getDirectoryTable().deleteWithBatch(keySpaceBatchOperation,
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@aryangupta1998 We should keep the individual deletes to keep the changes backward compatible since we can have a few transactions which could be unflushed in the followers this can lead to split brain. What I would suggest would be to create a new OmDirectoriesPurgeRangeRequest and Response which extends the old implementation and new proto request type enum to have new and old implementation in parallel. We should also have an OM server version bump for this

ozoneDbKey);

if (LOG.isDebugEnabled()) {
LOG.debug("markDeletedDirList KeyName: {}, DBKey: {}",
keyInfo.getKeyName(), ozoneDbKey);
}
}

for (HddsProtos.KeyValue keyRanges : path.getDeleteRangeSubDirsList()) {
keySpaceOmMetadataManager.getDirectoryTable()
.deleteRangeWithBatch(keySpaceBatchOperation, keyRanges.getKey(), keyRanges.getValue());
Comment thread
aryangupta1998 marked this conversation as resolved.
}

for (OzoneManagerProtocolProtos.KeyInfo key : deletedSubFilesList) {
OmKeyInfo keyInfo = OmKeyInfo.getFromProtobuf(key)
.withCommittedKeyDeletedFlag(true);
String ozoneDbKey = keySpaceOmMetadataManager.getOzonePathKey(volumeId,
bucketId, keyInfo.getParentObjectID(), keyInfo.getFileName());
keySpaceOmMetadataManager.getKeyTable(getBucketLayout())
.deleteWithBatch(keySpaceBatchOperation, ozoneDbKey);

if (LOG.isDebugEnabled()) {
LOG.info("Move keyName:{} to DeletedTable DBKey: {}",
Expand All @@ -182,6 +183,11 @@ public void processPaths(
deletedKey, repeatedOmKeyInfo);
}

for (HddsProtos.KeyValue keyRanges : path.getDeleteRangeSubFilesList()) {
keySpaceOmMetadataManager.getKeyTable(getBucketLayout())
.deleteRangeWithBatch(keySpaceBatchOperation, keyRanges.getKey(), keyRanges.getValue());
}

if (!openKeyInfoMap.isEmpty()) {
for (Map.Entry<String, OmKeyInfo> entry : openKeyInfoMap.entrySet()) {
keySpaceOmMetadataManager.getOpenKeyTable(getBucketLayout()).putWithBatch(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.utils.BackgroundTask;
import org.apache.hadoop.hdds.utils.BackgroundTaskResult;
import org.apache.hadoop.hdds.utils.IOUtils;
Expand Down Expand Up @@ -436,16 +437,19 @@ private Optional<PurgePathRequest> prepareDeleteDirRequest(
if (purgeDeletedDir != null) {
remainNum.addAndGet(-1);
}
return Optional.of(wrapPurgeRequest(volumeBucketId.getVolumeId(), volumeBucketId.getBucketId(),
purgeDeletedDir, subFiles, subDirs));
return Optional.of(
wrapPurgeRequest(volumeBucketId.getVolumeId(), volumeBucketId.getBucketId(), purgeDeletedDir, subFiles, subDirs,
subDirDeleteResult.getKeyRanges(), subFileDeleteResult.getKeyRanges()));
}

private OzoneManagerProtocolProtos.PurgePathRequest wrapPurgeRequest(
final long volumeId,
final long bucketId,
final String purgeDeletedDir,
final List<OmKeyInfo> purgeDeletedFiles,
final List<OmKeyInfo> markDirsAsDeleted) {
final List<OmKeyInfo> markDirsAsDeleted,
List<DeleteKeysResult.ExclusiveRange> dirExclusiveRanges,
List<DeleteKeysResult.ExclusiveRange> fileExclusiveRanges) {
// Put all keys to be purged in a list
PurgePathRequest.Builder purgePathsRequest = PurgePathRequest.newBuilder();
purgePathsRequest.setVolumeId(volumeId);
Expand All @@ -467,6 +471,20 @@ private OzoneManagerProtocolProtos.PurgePathRequest wrapPurgeRequest(
dir.getProtobuf(ClientVersion.CURRENT_VERSION));
}

if (dirExclusiveRanges != null) {
Comment thread
aryangupta1998 marked this conversation as resolved.
Outdated
for (DeleteKeysResult.ExclusiveRange range : dirExclusiveRanges) {
purgePathsRequest.addDeleteRangeSubDirs(
HddsProtos.KeyValue.newBuilder().setKey(range.getStartKey()).setValue(range.getExclusiveEndKey()).build());
}
}

if (fileExclusiveRanges != null) {
for (DeleteKeysResult.ExclusiveRange range : fileExclusiveRanges) {
purgePathsRequest.addDeleteRangeSubFiles(
HddsProtos.KeyValue.newBuilder().setKey(range.getStartKey()).setValue(range.getExclusiveEndKey()).build());
}
}

return purgePathsRequest.build();
}

Expand Down
Loading
Loading