Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -37,16 +37,35 @@
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;

import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;

import static org.junit.Assert.assertEquals;

@RunWith(Parameterized.class)
public class TestCleanerWithReplication extends CompactorTest {

private Path cmRootDirectory;
private static MiniDFSCluster miniDFSCluster;
private final String dbName = "TestCleanerWithReplication";

private final boolean useMinHistoryWriteId;

public TestCleanerWithReplication(boolean useMinHistoryWriteId) {
this.useMinHistoryWriteId = useMinHistoryWriteId;
}

@Parameters(name = "useMinHistoryWriteId={0}")
public static Collection<Object[]> parameters() {
return Arrays.asList(
new Object[][]{{true}, {false}});
}

@Before
public void setup() throws Exception {
HiveConf conf = new HiveConf();
Expand All @@ -63,6 +82,11 @@ public void setup() throws Exception {
ms.createDatabase(db);
}

@Override
protected boolean useMinHistoryWriteId() {
return useMinHistoryWriteId;
}

@BeforeClass
public static void classLevelSetup() throws IOException {
Configuration hadoopConf = new Configuration();
Expand Down
2 changes: 1 addition & 1 deletion ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
Original file line number Diff line number Diff line change
Expand Up @@ -1380,7 +1380,7 @@ public static AcidDirectory getAcidState(FileSystem fileSystem, Path candidateDi
// Filter out all delta directories that are shadowed by others
findBestWorkingDeltas(writeIdList, directory);

if(directory.getOldestBase() != null && directory.getBase() == null &&
if (directory.getOldestBase() != null && directory.getBase() == null &&
isCompactedBase(directory.getOldestBase(), fs, dirSnapshots)) {
/*
* If here, it means there was a base_x (> 1 perhaps) but none were suitable for given
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,9 @@ fs, path, getConf(), validWriteIdList, Ref.from(false), false,

// Make sure there are no leftovers below the compacted watermark
boolean success = false;
getConf().set(ValidTxnList.VALID_TXNS_KEY, new ValidReadTxnList().toString());
if (info.minOpenWriteId < 0) {
getConf().set(ValidTxnList.VALID_TXNS_KEY, new ValidReadTxnList().toString());
}

dir = AcidUtils.getAcidState(
fs, path, getConf(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hive.ql.txn.compactor.service;

import com.google.common.collect.ImmutableMap;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
Expand Down Expand Up @@ -196,9 +195,6 @@ public Boolean compact(Table table, CompactionInfo ci) throws Exception {
txnWriteIds.addTableValidWriteIdList(tblValidWriteIds);
conf.set(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY, txnWriteIds.toString());

msc.addWriteIdsToMinHistory(compactionTxn.getTxnId(),
ImmutableMap.of(fullTableName, txnWriteIds.getMinOpenWriteId(fullTableName)));

ci.highestWriteId = tblValidWriteIds.getHighWatermark();
//this writes TXN_COMPONENTS to ensure that if compactorTxnId fails, we keep metadata about
//it until after any data written by it are physically removed
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -693,9 +693,10 @@ public void testRevokeTimedOutWorkers() throws Exception {
rqst = new CompactionRequest("foo", "baz", CompactionType.MINOR);
txnHandler.compact(rqst);

assertNotNull(txnHandler.findNextToCompact(aFindNextCompactRequest("fred-193892", WORKER_VERSION)));
FindNextCompactRequest nextCompactRqst = aFindNextCompactRequest("fred-193892", WORKER_VERSION);
assertNotNull(txnHandler.findNextToCompact(nextCompactRqst));
Thread.sleep(200);
assertNotNull(txnHandler.findNextToCompact(aFindNextCompactRequest("fred-193892", WORKER_VERSION)));
assertNotNull(txnHandler.findNextToCompact(nextCompactRqst));
txnHandler.revokeTimedoutWorkers(100);

ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
Expand Down
255 changes: 125 additions & 130 deletions ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java

Large diffs are not rendered by default.

73 changes: 73 additions & 0 deletions ql/src/test/org/apache/hadoop/hive/ql/testutil/TxnStoreHelper.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.hive.ql.testutil;

import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest;
import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsResponse;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.MaxAllocatedTableWriteIdRequest;
import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
import org.apache.hadoop.hive.metastore.txn.TxnStore;
import org.apache.hadoop.hive.metastore.txn.TxnUtils;

import java.util.Collections;

import static org.apache.hadoop.hive.metastore.txn.TxnHandler.ConfVars;

public final class TxnStoreHelper {

private final TxnStore txnHandler;

private TxnStoreHelper(TxnStore txnHandler) {
this.txnHandler = txnHandler;
}

public static TxnStoreHelper wrap(TxnStore txnHandler) {
return new TxnStoreHelper(txnHandler);
}

/**
* Allocates a new write ID for the table in the given transaction.
*/
public long allocateTableWriteId(String dbName, String tblName, long txnId)
throws TxnAbortedException, NoSuchTxnException, MetaException {
AllocateTableWriteIdsRequest request = new AllocateTableWriteIdsRequest(dbName, tblName.toLowerCase());
request.setTxnIds(Collections.singletonList(txnId));

AllocateTableWriteIdsResponse response = txnHandler.allocateTableWriteIds(request);
return response.getTxnToWriteIds().getFirst().getWriteId();
}

/**
* Registers the min open write ID for the table in the given transaction.
*/
public void registerMinOpenWriteId(String dbName, String tblName, long txnId) throws MetaException {
if (!ConfVars.useMinHistoryWriteId()) {
return;
}
long maxWriteId = txnHandler.getMaxAllocatedTableWriteId(
new MaxAllocatedTableWriteIdRequest(dbName, tblName.toLowerCase()))
.getMaxWriteId();

txnHandler.addWriteIdsToMinHistory(txnId,
Collections.singletonMap(
TxnUtils.getFullTableName(dbName, tblName), maxWriteId + 1));
}
}
Loading