diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSCVFWithMiniCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSCVFWithMiniCluster.java index 23048e708c00..0f34115ee1b7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSCVFWithMiniCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSCVFWithMiniCluster.java @@ -17,14 +17,13 @@ */ package org.apache.hadoop.hbase.regionserver; -import static junit.framework.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; @@ -46,23 +45,19 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ RegionServerTests.class, MediumTests.class }) /* * This test verifies that the scenarios illustrated by HBASE-10850 work w.r.t. essential column * family optimization */ +@Tag(RegionServerTests.TAG) +@Tag(MediumTests.TAG) public class TestSCVFWithMiniCluster { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSCVFWithMiniCluster.class); - private static final TableName HBASE_TABLE_NAME = TableName.valueOf("TestSCVFWithMiniCluster"); private static final byte[] FAMILY_A = Bytes.toBytes("a"); @@ -77,7 +72,7 @@ public class TestSCVFWithMiniCluster { private int expected = 1; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { HBaseTestingUtil util = new HBaseTestingUtil(); @@ -128,7 +123,7 @@ public static void setUp() throws Exception { ((SingleColumnValueFilter) scanFilter).setFilterIfMissing(true); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { htable.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java index e693ef4a3776..335e49758263 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java @@ -17,24 +17,24 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.NavigableSet; import java.util.TreeSet; +import java.util.stream.Stream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueTestUtil; @@ -48,13 +48,10 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.BloomFilterUtil; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.params.provider.Arguments; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -62,14 +59,11 @@ * Test a multi-column scanner when there is a Bloom filter false-positive. This is needed for the * multi-column Bloom filter optimization. */ -@RunWith(Parameterized.class) -@Category({ RegionServerTests.class, SmallTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(SmallTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: bloomType={0}") public class TestScanWithBloomError { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScanWithBloomError.class); - private static final Logger LOG = LoggerFactory.getLogger(TestScanWithBloomError.class); private static final String TABLE_NAME = "ScanWithBloomError"; @@ -86,27 +80,26 @@ public class TestScanWithBloomError { private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - @Parameters - public static final Collection parameters() { - List configurations = new ArrayList<>(); + public static final Stream parameters() { + List configurations = new ArrayList<>(); for (BloomType bloomType : BloomType.values()) { - configurations.add(new Object[] { bloomType }); + configurations.add(Arguments.of(bloomType)); } - return configurations; + return configurations.stream(); } public TestScanWithBloomError(BloomType bloomType) { this.bloomType = bloomType; } - @Before + @BeforeEach public void setUp() throws IOException { conf = TEST_UTIL.getConfiguration(); fs = FileSystem.get(conf); conf.setInt(BloomFilterUtil.PREFIX_LENGTH_KEY, 10); } - @Test + @TestTemplate public void testThreeStoreFiles() throws IOException { ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder .newBuilder(Bytes.toBytes(FAMILY)).setCompressionType(Compression.Algorithm.GZ) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java index 97ae16624225..a5078c129597 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java @@ -20,18 +20,17 @@ import static org.apache.hadoop.hbase.HBaseTestingUtil.START_KEY_BYTES; import static org.apache.hadoop.hbase.HBaseTestingUtil.fam1; import static org.apache.hadoop.hbase.HBaseTestingUtil.fam2; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.ArrayList; import java.util.List; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTestConst; @@ -57,27 +56,19 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Test of a long-lived scanner validating as we go. */ -@Category({ RegionServerTests.class, MediumTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(MediumTests.TAG) public class TestScanner { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScanner.class); - - @Rule - public TestName name = new TestName(); - private static final Logger LOG = LoggerFactory.getLogger(TestScanner.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -492,10 +483,11 @@ public void testScanAndRealConcurrentFlush() throws Exception { * Make sure scanner returns correct result when we run a major compaction with deletes. */ @Test - public void testScanAndConcurrentMajorCompact() throws Exception { - TableDescriptor htd = TEST_UTIL.createTableDescriptor(TableName.valueOf(name.getMethodName()), - ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, - ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); + public void testScanAndConcurrentMajorCompact(TestInfo testInfo) throws Exception { + TableDescriptor htd = + TEST_UTIL.createTableDescriptor(TableName.valueOf(testInfo.getTestMethod().get().getName()), + ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, + ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); this.region = TEST_UTIL.createLocalHRegion(htd, null, null); Table hri = new RegionAsTable(region); @@ -525,7 +517,7 @@ public void testScanAndConcurrentMajorCompact() throws Exception { s.next(results); // make sure returns column2 of firstRow - assertTrue("result is not correct, keyValues : " + results, results.size() == 1); + assertEquals(1, results.size(), "result is not correct, keyValues : " + results); assertTrue(CellUtil.matchingRows(results.get(0), firstRowBytes)); assertTrue(CellUtil.matchingFamily(results.get(0), fam2)); @@ -533,7 +525,7 @@ public void testScanAndConcurrentMajorCompact() throws Exception { s.next(results); // get secondRow - assertTrue(results.size() == 2); + assertEquals(2, results.size()); assertTrue(CellUtil.matchingRows(results.get(0), secondRowBytes)); assertTrue(CellUtil.matchingFamily(results.get(0), fam1)); assertTrue(CellUtil.matchingFamily(results.get(1), fam2)); @@ -544,7 +536,7 @@ public void testScanAndConcurrentMajorCompact() throws Exception { /** * Count table. - * @param hri Region + * @param countTable Table * @param flushIndex At what row we start the flush. * @param concurrent if the flush should be concurrent or sync. * @return Count of rows found. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerBlockSizeLimits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerBlockSizeLimits.java index c70c7a35133a..dc8808bab92a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerBlockSizeLimits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerBlockSizeLimits.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.HashSet; @@ -27,7 +27,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -46,19 +45,14 @@ import org.apache.hadoop.hbase.filter.SkipFilter; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ LargeTests.class }) +@Tag(LargeTests.TAG) public class TestScannerBlockSizeLimits { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScannerBlockSizeLimits.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final TableName TABLE = TableName.valueOf("TestScannerBlockSizeLimits"); private static final byte[] FAMILY1 = Bytes.toBytes("0"); @@ -72,7 +66,7 @@ public class TestScannerBlockSizeLimits { private static final byte[] COLUMN3 = Bytes.toBytes(2); private static final byte[] COLUMN5 = Bytes.toBytes(5); - @BeforeClass + @BeforeAll public static void setUp() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.setInt(HConstants.HBASE_SERVER_SCANNER_MAX_RESULT_SIZE_KEY, 4200); @@ -81,7 +75,7 @@ public static void setUp() throws Exception { createTestData(); } - @Before + @BeforeEach public void setupEach() throws Exception { HRegionServer regionServer = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0); for (HRegion region : regionServer.getRegions(TABLE)) { @@ -169,8 +163,8 @@ public void testCheckLimitAfterFilterRowKey() throws IOException { rows.add(Bytes.toInt(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); } if (rows.contains(3)) { - assertFalse("expected row3 to come all in one result, but found it in two results", - foundRow3); + assertFalse(foundRow3, + "expected row3 to come all in one result, but found it in two results"); assertEquals(1, rows.size()); foundRow3 = true; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java index ce07308c2e79..e1bc29d8ff94 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hbase.regionserver; import static org.apache.hadoop.hbase.regionserver.RSRpcServices.DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.when; import java.io.IOException; @@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ExtendedCell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTestConst; @@ -63,13 +62,12 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @@ -87,13 +85,9 @@ * the time limit is reached, the server will return to the Client whatever Results it has * accumulated (potentially empty). */ -@Category(LargeTests.class) +@Tag(LargeTests.TAG) public class TestScannerHeartbeatMessages { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScannerHeartbeatMessages.class); - private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static AsyncConnection CONN; @@ -134,7 +128,7 @@ public class TestScannerHeartbeatMessages { // Similar with row sleep time. private static int DEFAULT_CF_SLEEP_TIME = 300; - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); @@ -219,18 +213,18 @@ static ArrayList createPuts(byte[][] rows, byte[][] families, byte[][] qual return puts; } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { Closeables.close(CONN, true); TEST_UTIL.shutdownMiniCluster(); } - @Before + @BeforeEach public void setupBeforeTest() throws Exception { disableSleeping(); } - @After + @AfterEach public void teardownAfterTest() throws Exception { disableSleeping(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerLeaseCount.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerLeaseCount.java index 307c5733be20..da5be1f32f93 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerLeaseCount.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerLeaseCount.java @@ -21,7 +21,6 @@ import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; import org.apache.hadoop.hbase.StartTestingClusterOption; @@ -40,22 +39,18 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestScannerLeaseCount { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScannerLeaseCount.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static final TableName TABLE_NAME = TableName.valueOf("ScannerLeaseCount"); private static final byte[] FAM = Bytes.toBytes("Fam"); @@ -70,7 +65,7 @@ public class TestScannerLeaseCount { private static Connection CONN; private static Table TABLE; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { StartTestingClusterOption option = StartTestingClusterOption.builder().rsClass(MockedQuotaManagerRegionServer.class).build(); @@ -83,7 +78,7 @@ public static void setUp() throws Exception { UTIL.loadTable(TABLE, FAM); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { try { TABLE.close(); @@ -96,7 +91,7 @@ public static void tearDown() throws Exception { UTIL.shutdownMiniCluster(); } - @Before + @BeforeEach public void before() { SHOULD_THROW = false; SCAN_SEEN.set(false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRPCScanMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRPCScanMetrics.java index cca8d0273444..c615c7cd8dd1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRPCScanMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRPCScanMetrics.java @@ -17,11 +17,10 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.SingleProcessHBaseCluster.MiniHBaseClusterRegionServer; @@ -34,47 +33,39 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ RegionServerTests.class, MediumTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(MediumTests.TAG) public class TestScannerRPCScanMetrics { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScannerRPCScanMetrics.class); - private static final Logger LOG = LoggerFactory.getLogger(TestScannerRPCScanMetrics.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final byte[] FAMILY = Bytes.toBytes("testFamily"); private static final byte[] QUALIFIER = Bytes.toBytes("testQualifier"); private static final byte[] VALUE = Bytes.toBytes("testValue"); - @Rule - public TestName name = new TestName(); - - @BeforeClass + @BeforeAll public static void setupBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.setStrings(HConstants.REGION_SERVER_IMPL, RegionServerWithScanMetrics.class.getName()); TEST_UTIL.startMiniCluster(1); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @Test - public void testScannerRPCScanMetrics() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + public void testScannerRPCScanMetrics(TestInfo testInfo) throws Exception { + final TableName tableName = TableName.valueOf(testInfo.getTestMethod().get().getName()); byte[][] splits = new byte[1][]; splits[0] = Bytes.toBytes("row-4"); Table ht = TEST_UTIL.createTable(tableName, FAMILY, splits); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java index 93f6d595688e..db6808438fc3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -27,10 +27,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -45,22 +43,18 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ RegionServerTests.class, MediumTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(MediumTests.TAG) public class TestScannerRetriableFailure { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScannerRetriableFailure.class); - private static final Logger LOG = LoggerFactory.getLogger(TestScannerRetriableFailure.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -68,9 +62,6 @@ public class TestScannerRetriableFailure { private static final String FAMILY_NAME_STR = "f"; private static final byte[] FAMILY_NAME = Bytes.toBytes(FAMILY_NAME_STR); - @Rule - public TableNameTestRule testTable = new TableNameTestRule(); - public static class FaultyScannerObserver implements RegionCoprocessor, RegionObserver { private int faults = 0; @@ -100,13 +91,13 @@ private static void setupConf(Configuration conf) { conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, FaultyScannerObserver.class.getName()); } - @BeforeClass + @BeforeAll public static void setup() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniCluster(1); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { try { UTIL.shutdownMiniCluster(); @@ -116,8 +107,8 @@ public static void tearDown() throws Exception { } @Test - public void testFaultyScanner() throws Exception { - TableName tableName = testTable.getTableName(); + public void testFaultyScanner(TestInfo testInfo) throws Exception { + TableName tableName = TableName.valueOf(testInfo.getTestMethod().get().getName()); Table table = UTIL.createTable(tableName, FAMILY_NAME); try { final int NUM_ROWS = 100; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerTimeoutHandling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerTimeoutHandling.java index 8c37239812a7..74ec826a204e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerTimeoutHandling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerTimeoutHandling.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.concurrent.TimeUnit; @@ -26,7 +26,6 @@ import java.util.concurrent.locks.ReentrantLock; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompatibilityFactory; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; @@ -45,13 +44,10 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,15 +56,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -@Category({ RegionServerTests.class, MediumTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(MediumTests.TAG) public class TestScannerTimeoutHandling { private static final Logger LOG = LoggerFactory.getLogger(TestScannerTimeoutHandling.class); - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScannerTimeoutHandling.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final MetricsAssertHelper METRICS_ASSERT = CompatibilityFactory.getInstance(MetricsAssertHelper.class); @@ -76,10 +68,7 @@ public class TestScannerTimeoutHandling { private static final TableName TABLE_NAME = TableName.valueOf("foo"); private static Connection CONN; - @Rule - public TestName name = new TestName(); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); // Don't report so often so easier to see other rpcs @@ -94,7 +83,7 @@ public static void setUpBeforeClass() throws Exception { CONN = ConnectionFactory.createConnection(conf); } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { CONN.close(); TEST_UTIL.shutdownMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java index bb43d4447423..276033c5c580 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java @@ -19,6 +19,8 @@ import static org.apache.hadoop.hbase.regionserver.HStoreFile.BULKLOAD_TIME_KEY; import static org.apache.hadoop.hbase.regionserver.HStoreFile.MAX_SEQ_ID_KEY; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; import java.io.IOException; import java.util.List; @@ -27,7 +29,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; @@ -49,28 +50,27 @@ import org.apache.hadoop.hbase.tool.BulkLoadHFiles; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; -@Category({ RegionServerTests.class, MediumTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(MediumTests.TAG) public class TestScannerWithBulkload { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScannerWithBulkload.class); - private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - @Rule - public TestName name = new TestName(); + private String methodName; + + @BeforeEach + public void setTestName(TestInfo testInfo) { + this.methodName = testInfo.getTestMethod().get().getName(); + } - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(1); } @@ -85,7 +85,7 @@ private static void createTable(Admin admin, TableName tableName) throws IOExcep @Test public void testBulkLoad() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); long l = EnvironmentEdgeManager.currentTime(); Admin admin = TEST_UTIL.getAdmin(); createTable(admin, tableName); @@ -117,7 +117,7 @@ public void testBulkLoad() throws Exception { _c.getQualifierLength())); System.out .println(Bytes.toString(_c.getValueArray(), _c.getValueOffset(), _c.getValueLength())); - Assert.assertEquals("version3", + assertEquals("version3", Bytes.toString(_c.getValueArray(), _c.getValueOffset(), _c.getValueLength())); } } @@ -139,7 +139,7 @@ private Result scanAfterBulkLoad(ResultScanner scanner, Result result, String ex _c.getQualifierLength())); System.out .println(Bytes.toString(_c.getValueArray(), _c.getValueOffset(), _c.getValueLength())); - Assert.assertEquals(expctedVal, + assertEquals(expctedVal, Bytes.toString(_c.getValueArray(), _c.getValueOffset(), _c.getValueLength())); } } @@ -157,7 +157,7 @@ private Path writeToHFile(long l, String hFilePath, String pathStr, boolean nati fs.mkdirs(hfilePath); Path path = new Path(pathStr); HFile.WriterFactory wf = HFile.getWriterFactoryNoCache(TEST_UTIL.getConfiguration()); - Assert.assertNotNull(wf); + assertNotNull(wf); HFileContext context = new HFileContextBuilder().build(); HFile.Writer writer = wf.withPath(fs, path).withFileContext(context).create(); KeyValue kv = new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, @@ -207,9 +207,9 @@ private Table init(Admin admin, long l, Scan scan, TableName tableName) throws E ResultScanner scanner = table.getScanner(scan); Result result = scanner.next(); List cells = result.getColumnCells(Bytes.toBytes("col"), Bytes.toBytes("q")); - Assert.assertEquals(1, cells.size()); + assertEquals(1, cells.size()); Cell _c = cells.get(0); - Assert.assertEquals("version1", + assertEquals("version1", Bytes.toString(_c.getValueArray(), _c.getValueOffset(), _c.getValueLength())); scanner.close(); return table; @@ -217,7 +217,7 @@ private Table init(Admin admin, long l, Scan scan, TableName tableName) throws E @Test public void testBulkLoadWithParallelScan() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); final long l = EnvironmentEdgeManager.currentTime(); final Admin admin = TEST_UTIL.getAdmin(); createTable(admin, tableName); @@ -259,7 +259,7 @@ public void run() { @Test public void testBulkLoadNativeHFile() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); long l = EnvironmentEdgeManager.currentTime(); Admin admin = TEST_UTIL.getAdmin(); createTable(admin, tableName); @@ -293,7 +293,7 @@ public void testBulkLoadNativeHFile() throws Exception { _c.getQualifierLength())); System.out .println(Bytes.toString(_c.getValueArray(), _c.getValueOffset(), _c.getValueLength())); - Assert.assertEquals("version3", + assertEquals("version3", Bytes.toString(_c.getValueArray(), _c.getValueOffset(), _c.getValueLength())); } } @@ -309,7 +309,7 @@ private Scan createScan() { return scan; } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithCorruptHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithCorruptHFile.java index d1fd841d8422..d72ad2ec7814 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithCorruptHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithCorruptHFile.java @@ -17,11 +17,12 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.junit.jupiter.api.Assertions.assertThrows; + import java.io.IOException; import java.util.List; import java.util.Optional; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -40,35 +41,27 @@ import org.apache.hadoop.hbase.io.hfile.CorruptHFileException; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; /** * Tests a scanner on a corrupt hfile. */ -@Category(MediumTests.class) +@Tag(MediumTests.TAG) public class TestScannerWithCorruptHFile { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScannerWithCorruptHFile.class); - - @Rule - public TestName name = new TestName(); private static final byte[] FAMILY_NAME = Bytes.toBytes("f"); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - @BeforeClass + @BeforeAll public static void setup() throws Exception { TEST_UTIL.startMiniCluster(1); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @@ -86,16 +79,16 @@ public boolean preScannerNext(ObserverContext scan(table)); } finally { table.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java index 26d5cadd97b0..0cf6b758f4d9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java @@ -17,20 +17,23 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; + import java.io.IOException; import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.Collection; import java.util.Deque; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import java.util.stream.Stream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.ExtendedCell; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.AsyncClusterConnection; @@ -54,27 +57,21 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.params.provider.Arguments; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Multimap; -@RunWith(Parameterized.class) -@Category({ RegionServerTests.class, MediumTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(MediumTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: useFileBasedSFT={0}") public class TestSecureBulkLoadManager { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSecureBulkLoadManager.class); - private static final Logger LOG = LoggerFactory.getLogger(TestSecureBulkLoadManager.class); private static TableName TABLE = TableName.valueOf(Bytes.toBytes("TestSecureBulkLoadManager")); @@ -99,13 +96,11 @@ public TestSecureBulkLoadManager(Boolean useFileBasedSFT) { this.useFileBasedSFT = useFileBasedSFT; } - @Parameterized.Parameters - public static Collection data() { - Boolean[] data = { false, true }; - return Arrays.asList(data); + public static Stream parameters() { + return Stream.of(Arguments.of(false), Arguments.of(true)); } - @Before + @BeforeEach public void setUp() throws Exception { if (useFileBasedSFT) { conf.set(StoreFileTrackerFactory.TRACKER_IMPL, @@ -116,7 +111,7 @@ public void setUp() throws Exception { testUtil.startMiniCluster(); } - @After + @AfterEach public void tearDown() throws Exception { testUtil.shutdownMiniCluster(); testUtil.cleanupTestDir(); @@ -132,7 +127,7 @@ public void tearDown() throws Exception { * finishes earlier when the other bulkload still needs its FileSystems, checks that both * bulkloads succeed. */ - @Test + @TestTemplate public void testForRaceCondition() throws Exception { Consumer fsCreatedListener = new Consumer() { @Override @@ -184,17 +179,17 @@ public void run() { laterBulkload.start(); Threads.shutdown(ealierBulkload); Threads.shutdown(laterBulkload); - Assert.assertNull(t1Exception.get()); - Assert.assertNull(t2Exception.get()); + assertNull(t1Exception.get()); + assertNull(t2Exception.get()); /// check bulkload ok Get get1 = new Get(key1); Get get3 = new Get(key3); Table t = testUtil.getConnection().getTable(TABLE); Result r = t.get(get1); - Assert.assertArrayEquals(r.getValue(FAMILY, COLUMN), value1); + assertArrayEquals(r.getValue(FAMILY, COLUMN), value1); r = t.get(get3); - Assert.assertArrayEquals(r.getValue(FAMILY, COLUMN), value3); + assertArrayEquals(r.getValue(FAMILY, COLUMN), value3); } @@ -224,11 +219,8 @@ protected void bulkLoadPhase(AsyncClusterConnection conn, TableName tableName, throw new MyExceptionToAvoidRetry(); // throw exception to avoid retry } }; - try { - h.bulkLoad(TABLE, dir); - Assert.fail("MyExceptionToAvoidRetry is expected"); - } catch (MyExceptionToAvoidRetry e) { // expected - } + assertThrows(MyExceptionToAvoidRetry.class, () -> h.bulkLoad(TABLE, dir), + "MyExceptionToAvoidRetry is expected"); } private void prepareHFile(Path dir, byte[] key, byte[] value) throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkloadListener.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkloadListener.java index f5972424c5e6..03d7dfe3b682 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkloadListener.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkloadListener.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + import java.io.IOException; import java.util.UUID; import org.apache.hadoop.conf.Configuration; @@ -25,7 +29,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.hfile.HFile; @@ -34,32 +37,21 @@ import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TemporaryFolder; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; /** * Tests for failedBulkLoad logic to make sure staged files are returned to their original location * if the bulkload have failed. */ -@Category({ MiscTests.class, LargeTests.class }) +@Tag(MiscTests.TAG) +@Tag(LargeTests.TAG) public class TestSecureBulkloadListener { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSecureBulkloadListener.class); - - @ClassRule - public static TemporaryFolder testFolder = new TemporaryFolder(); private Configuration conf; - private MiniDFSCluster cluster; private HBaseTestingUtil htu; private DistributedFileSystem dfs; private final byte[] randomBytes = new byte[100]; @@ -69,12 +61,11 @@ public class TestSecureBulkloadListener { private static byte[] FAMILY = Bytes.toBytes("family"); private static final String STAGING_DIR = "staging"; private static final String CUSTOM_STAGING_DIR = "customStaging"; + private String methodName; - @Rule - public TestName name = new TestName(); - - @Before - public void setUp() throws Exception { + @BeforeEach + public void setUp(TestInfo testInfo) throws Exception { + this.methodName = testInfo.getTestMethod().get().getName(); Bytes.random(randomBytes); htu = new HBaseTestingUtil(); htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks @@ -83,19 +74,17 @@ public void setUp() throws Exception { new String[] { host1, host2, host3 }); conf = htu.getConfiguration(); - cluster = htu.getDFSCluster(); dfs = (DistributedFileSystem) FileSystem.get(conf); } - @After - public void tearDownAfterClass() throws Exception { + @AfterEach + public void tearDown() throws Exception { htu.shutdownMiniCluster(); } @Test public void testMovingStagedFile() throws Exception { - Path stagingDirPath = - new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), STAGING_DIR)); + Path stagingDirPath = new Path(dfs.getWorkingDirectory(), new Path(methodName, STAGING_DIR)); if (!dfs.exists(stagingDirPath)) { dfs.mkdirs(stagingDirPath); } @@ -105,7 +94,7 @@ public void testMovingStagedFile() throws Exception { // creating file to load String srcFile = createHFileForFamilies(FAMILY); Path srcPath = new Path(srcFile); - Assert.assertTrue(dfs.exists(srcPath)); + assertTrue(dfs.exists(srcPath)); Path stagedFamily = new Path(stagingDirPath, new Path(Bytes.toString(FAMILY))); if (!dfs.exists(stagedFamily)) { @@ -115,19 +104,18 @@ public void testMovingStagedFile() throws Exception { // moving file to staging String stagedFile = listener.prepareBulkLoad(FAMILY, srcFile, false, null); Path stagedPath = new Path(stagedFile); - Assert.assertTrue(dfs.exists(stagedPath)); - Assert.assertFalse(dfs.exists(srcPath)); + assertTrue(dfs.exists(stagedPath)); + assertFalse(dfs.exists(srcPath)); // moving files back to original location after a failed bulkload listener.failedBulkLoad(FAMILY, stagedFile); - Assert.assertFalse(dfs.exists(stagedPath)); - Assert.assertTrue(dfs.exists(srcPath)); + assertFalse(dfs.exists(stagedPath)); + assertTrue(dfs.exists(srcPath)); } @Test public void testMovingStagedFileWithCustomStageDir() throws Exception { - Path stagingDirPath = - new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), STAGING_DIR)); + Path stagingDirPath = new Path(dfs.getWorkingDirectory(), new Path(methodName, STAGING_DIR)); if (!dfs.exists(stagingDirPath)) { dfs.mkdirs(stagingDirPath); } @@ -137,7 +125,7 @@ public void testMovingStagedFileWithCustomStageDir() throws Exception { // creating file to load String srcFile = createHFileForFamilies(FAMILY); Path srcPath = new Path(srcFile); - Assert.assertTrue(dfs.exists(srcPath)); + assertTrue(dfs.exists(srcPath)); Path stagedFamily = new Path(stagingDirPath, new Path(Bytes.toString(FAMILY))); if (!dfs.exists(stagedFamily)) { @@ -145,7 +133,7 @@ public void testMovingStagedFileWithCustomStageDir() throws Exception { } Path customStagingDirPath = - new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), CUSTOM_STAGING_DIR)); + new Path(dfs.getWorkingDirectory(), new Path(methodName, CUSTOM_STAGING_DIR)); Path customStagedFamily = new Path(customStagingDirPath, new Path(Bytes.toString(FAMILY))); if (!dfs.exists(customStagedFamily)) { dfs.mkdirs(customStagedFamily); @@ -155,19 +143,18 @@ public void testMovingStagedFileWithCustomStageDir() throws Exception { String stagedFile = listener.prepareBulkLoad(FAMILY, srcFile, false, customStagingDirPath.toString()); Path stagedPath = new Path(stagedFile); - Assert.assertTrue(dfs.exists(stagedPath)); - Assert.assertFalse(dfs.exists(srcPath)); + assertTrue(dfs.exists(stagedPath)); + assertFalse(dfs.exists(srcPath)); // moving files back to original location after a failed bulkload listener.failedBulkLoad(FAMILY, stagedFile); - Assert.assertFalse(dfs.exists(stagedPath)); - Assert.assertTrue(dfs.exists(srcPath)); + assertFalse(dfs.exists(stagedPath)); + assertTrue(dfs.exists(srcPath)); } @Test public void testCopiedStagedFile() throws Exception { - Path stagingDirPath = - new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), STAGING_DIR)); + Path stagingDirPath = new Path(dfs.getWorkingDirectory(), new Path(methodName, STAGING_DIR)); if (!dfs.exists(stagingDirPath)) { dfs.mkdirs(stagingDirPath); } @@ -177,7 +164,7 @@ public void testCopiedStagedFile() throws Exception { // creating file to load String srcFile = createHFileForFamilies(FAMILY); Path srcPath = new Path(srcFile); - Assert.assertTrue(dfs.exists(srcPath)); + assertTrue(dfs.exists(srcPath)); Path stagedFamily = new Path(stagingDirPath, new Path(Bytes.toString(FAMILY))); if (!dfs.exists(stagedFamily)) { @@ -187,19 +174,18 @@ public void testCopiedStagedFile() throws Exception { // copying file to staging String stagedFile = listener.prepareBulkLoad(FAMILY, srcFile, true, null); Path stagedPath = new Path(stagedFile); - Assert.assertTrue(dfs.exists(stagedPath)); - Assert.assertTrue(dfs.exists(srcPath)); + assertTrue(dfs.exists(stagedPath)); + assertTrue(dfs.exists(srcPath)); // should do nothing because the original file was copied to staging listener.failedBulkLoad(FAMILY, stagedFile); - Assert.assertTrue(dfs.exists(stagedPath)); - Assert.assertTrue(dfs.exists(srcPath)); + assertTrue(dfs.exists(stagedPath)); + assertTrue(dfs.exists(srcPath)); } - @Test(expected = IOException.class) + @Test public void testDeletedStagedFile() throws Exception { - Path stagingDirPath = - new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), STAGING_DIR)); + Path stagingDirPath = new Path(dfs.getWorkingDirectory(), new Path(methodName, STAGING_DIR)); if (!dfs.exists(stagingDirPath)) { dfs.mkdirs(stagingDirPath); } @@ -209,7 +195,7 @@ public void testDeletedStagedFile() throws Exception { // creating file to load String srcFile = createHFileForFamilies(FAMILY); Path srcPath = new Path(srcFile); - Assert.assertTrue(dfs.exists(srcPath)); + assertTrue(dfs.exists(srcPath)); Path stagedFamily = new Path(stagingDirPath, new Path(Bytes.toString(FAMILY))); if (!dfs.exists(stagedFamily)) { @@ -219,19 +205,19 @@ public void testDeletedStagedFile() throws Exception { // moving file to staging String stagedFile = listener.prepareBulkLoad(FAMILY, srcFile, false, null); Path stagedPath = new Path(stagedFile); - Assert.assertTrue(dfs.exists(stagedPath)); - Assert.assertFalse(dfs.exists(srcPath)); + assertTrue(dfs.exists(stagedPath)); + assertFalse(dfs.exists(srcPath)); dfs.delete(stagedPath, false); // moving files back to original location after a failed bulkload - listener.failedBulkLoad(FAMILY, stagedFile); + assertThrows(IOException.class, () -> listener.failedBulkLoad(FAMILY, stagedFile)); } private String createHFileForFamilies(byte[] family) throws IOException { HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(conf); Path testDir = - new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), Bytes.toString(family))); + new Path(dfs.getWorkingDirectory(), new Path(methodName, Bytes.toString(family))); if (!dfs.exists(testDir)) { dfs.mkdirs(testDir); } @@ -259,5 +245,4 @@ private static String generateUniqueName(final String suffix) { if (suffix != null) name += suffix; return name; } - } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java index 66ee4ca72368..3225f4a2b7f7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java @@ -17,12 +17,11 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -30,10 +29,11 @@ import java.util.Map; import java.util.Random; import java.util.Set; +import java.util.stream.Stream; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -48,14 +48,11 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.BloomFilterUtil; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.params.provider.Arguments; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -63,14 +60,11 @@ * Test various seek optimizations for correctness and check if they are actually saving I/O * operations. */ -@RunWith(Parameterized.class) -@Category({ RegionServerTests.class, MediumTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(MediumTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: comprAlgo={0}, bloomType={1}") public class TestSeekOptimizations { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSeekOptimizations.class); - private static final Logger LOG = LoggerFactory.getLogger(TestSeekOptimizations.class); // Constants @@ -116,9 +110,8 @@ public class TestSeekOptimizations { private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final Random RNG = new Random(); // This test depends on Random#setSeed - @Parameters - public static final Collection parameters() { - return HBaseTestingUtil.BLOOM_AND_COMPRESSION_COMBINATIONS; + public static Stream parameters() { + return HBaseTestingUtil.BLOOM_AND_COMPRESSION_COMBINATIONS.stream().map(Arguments::of); } public TestSeekOptimizations(Compression.Algorithm comprAlgo, BloomType bloomType) { @@ -126,14 +119,14 @@ public TestSeekOptimizations(Compression.Algorithm comprAlgo, BloomType bloomTyp this.bloomType = bloomType; } - @Before + @BeforeEach public void setUp() { RNG.setSeed(91238123L); expectedKVs.clear(); TEST_UTIL.getConfiguration().setInt(BloomFilterUtil.PREFIX_LENGTH_KEY, 10); } - @Test + @TestTemplate public void testMultipleTimestampRanges() throws IOException { // enable seek counting StoreFileScanner.instrument(); @@ -177,9 +170,10 @@ public void testMultipleTimestampRanges() throws IOException { // Test that lazy seeks are buying us something. Without the actual // implementation of the lazy seek optimization this will be 0. final double expectedSeekSavings = 0.0; - assertTrue("Lazy seek is only saving " + String.format("%.2f%%", seekSavings * 100) - + " seeks but should " + "save at least " - + String.format("%.2f%%", expectedSeekSavings * 100), seekSavings >= expectedSeekSavings); + assertTrue(seekSavings >= expectedSeekSavings, + "Lazy seek is only saving " + String.format("%.2f%%", seekSavings * 100) + + " seeks but should " + "save at least " + + String.format("%.2f%%", expectedSeekSavings * 100)); } private void testScan(final int[] columnArr, final boolean lazySeekEnabled, final int startRow, @@ -419,7 +413,7 @@ public void createTimestampRange(long minTS, long maxTS, long deleteUpToTS) thro region.flush(true); } - @After + @AfterEach public void tearDown() throws IOException { if (region != null) { HBaseTestingUtil.closeRegionAndWAL(region); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerNonceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerNonceManager.java index 896776a18313..a10cb89e2ecd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerNonceManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerNonceManager.java @@ -18,15 +18,14 @@ package org.apache.hadoop.hbase.regionserver; import static org.apache.hadoop.hbase.HConstants.NO_NONCE; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Stoppable; @@ -35,20 +34,16 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; import org.apache.hadoop.hbase.util.Threads; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -@Category({ RegionServerTests.class, SmallTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(SmallTests.TAG) public class TestServerNonceManager { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestServerNonceManager.class); - @Test public void testMvcc() throws Exception { ServerNonceManager nm = createManager(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSettingTimeoutOnBlockingPoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSettingTimeoutOnBlockingPoint.java index 068334ce1ba7..618e71339724 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSettingTimeoutOnBlockingPoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSettingTimeoutOnBlockingPoint.java @@ -17,9 +17,10 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.junit.jupiter.api.Assertions.fail; + import java.io.IOException; import java.util.Optional; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -36,31 +37,21 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; -@Category({ MediumTests.class }) +@Tag(MediumTests.TAG) public class TestSettingTimeoutOnBlockingPoint { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSettingTimeoutOnBlockingPoint.class); - private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final byte[] FAM = Bytes.toBytes("f"); private static final byte[] ROW1 = Bytes.toBytes("row1"); private static final byte[] ROW2 = Bytes.toBytes("row2"); - @Rule - public TestName testName = new TestName(); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setBoolean(HConstants.STATUS_PUBLISHED, true); TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); @@ -69,8 +60,8 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(2); } - @AfterClass - public static void setUpAfterClass() throws Exception { + @AfterAll + public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @@ -92,9 +83,10 @@ public Result preIncrementAfterRowLock( } @Test - public void testRowLock() throws IOException { - TableDescriptor hdt = TEST_UTIL.createModifyableTableDescriptor(testName.getMethodName()) - .setCoprocessor(SleepCoprocessor.class.getName()).build(); + public void testRowLock(TestInfo testInfo) throws IOException { + TableDescriptor hdt = + TEST_UTIL.createModifyableTableDescriptor(testInfo.getTestMethod().get().getName()) + .setCoprocessor(SleepCoprocessor.class.getName()).build(); TEST_UTIL.createTable(hdt, new byte[][] { FAM }, TEST_UTIL.getConfiguration()); TableName tableName = hdt.getTableName(); Thread incrementThread = new Thread(() -> { @@ -103,7 +95,7 @@ public void testRowLock() throws IOException { table.incrementColumnValue(ROW1, FAM, FAM, 1); } } catch (IOException e) { - Assert.fail(e.getMessage()); + fail(e.getMessage()); } }); Thread getThread = new Thread(() -> { @@ -112,7 +104,7 @@ public void testRowLock() throws IOException { Delete delete = new Delete(ROW1); table.delete(delete); } catch (IOException e) { - Assert.fail(e.getMessage()); + fail(e.getMessage()); } }); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java index 0bc7deccc121..a915c3c92262 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java @@ -17,14 +17,13 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; -import java.util.Arrays; -import java.util.List; import java.util.concurrent.CountDownLatch; +import java.util.stream.Stream; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -37,29 +36,22 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.zookeeper.KeeperException.SessionExpiredException; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameter; -import org.junit.runners.Parameterized.Parameters; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.params.provider.Arguments; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * See HBASE-19929 for more details. */ -@RunWith(Parameterized.class) -@Category({ RegionServerTests.class, LargeTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(LargeTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: WAL={0}") public class TestShutdownWhileWALBroken { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestShutdownWhileWALBroken.class); - private static final Logger LOG = LoggerFactory.getLogger(TestShutdownWhileWALBroken.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -68,12 +60,14 @@ public class TestShutdownWhileWALBroken { private static byte[] CF = Bytes.toBytes("CF"); - @Parameter public String walType; - @Parameters(name = "{index}: WAL={0}") - public static List params() { - return Arrays.asList(new Object[] { "asyncfs" }, new Object[] { "filesystem" }); + public static Stream parameters() { + return Stream.of(Arguments.of("asyncfs"), Arguments.of("filesystem")); + } + + public TestShutdownWhileWALBroken(String walType) { + this.walType = walType; } public static final class MyRegionServer extends HRegionServer { @@ -111,7 +105,7 @@ public void abort(String reason, Throwable cause) { } } - @Before + @BeforeEach public void setUp() throws Exception { UTIL.getConfiguration().setClass(HConstants.REGION_SERVER_IMPL, MyRegionServer.class, HRegionServer.class); @@ -119,12 +113,12 @@ public void setUp() throws Exception { UTIL.startMiniCluster(2); } - @After + @AfterEach public void tearDown() throws Exception { UTIL.shutdownMiniCluster(); } - @Test + @TestTemplate public void test() throws Exception { UTIL.createMultiRegionTable(TABLE_NAME, CF); try (Table table = UTIL.getConnection().getTable(TABLE_NAME)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java index 64032c1436ea..2f6be533b539 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java @@ -17,29 +17,24 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.DataOutputStream; import java.io.IOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.io.ByteArrayOutputStream; import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ RegionServerTests.class, SmallTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(MediumTests.TAG) public class TestSimpleTimeRangeTracker { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSimpleTimeRangeTracker.class); - protected TimeRangeTracker getTimeRangeTracker() { return TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC); } @@ -144,5 +139,4 @@ public void testRangeConstruction() throws IOException { assertEquals(Long.MAX_VALUE, twoArgRange3.getMax()); assertFalse(twoArgRange3.isAllTime()); } - } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java index 7c8530901e32..8611e0646963 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java @@ -21,8 +21,8 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -31,7 +31,6 @@ import java.util.concurrent.atomic.LongAdder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoordinatedStateManager; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.ServerName; @@ -51,21 +50,17 @@ import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.ZooDefs.Ids; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ RegionServerTests.class, MediumTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(MediumTests.TAG) public class TestSplitLogWorker { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSplitLogWorker.class); - private static final Logger LOG = LoggerFactory.getLogger(TestSplitLogWorker.class); private static final int WAIT_TIME = 15000; private final ServerName MANAGER = ServerName.valueOf("manager,1,1"); @@ -104,8 +99,8 @@ public CoordinatedStateManager getCoordinatedStateManager() { private void waitForCounter(LongAdder ctr, long oldval, long newval, long timems) throws Exception { - assertTrue("ctr=" + ctr.sum() + ", oldval=" + oldval + ", newval=" + newval, - waitForCounterBoolean(ctr, oldval, newval, timems)); + assertTrue(waitForCounterBoolean(ctr, oldval, newval, timems), + "ctr=" + ctr.sum() + ", oldval=" + oldval + ", newval=" + newval); } private boolean waitForCounterBoolean(final LongAdder ctr, final long oldval, long newval, @@ -132,7 +127,7 @@ public boolean evaluate() throws Exception { return true; } - @Before + @BeforeEach public void setup() throws Exception { TEST_UTIL.startMiniZKCluster(); Configuration conf = TEST_UTIL.getConfiguration(); @@ -155,7 +150,7 @@ public void setup() throws Exception { .setExecutorType(ExecutorType.RS_LOG_REPLAY_OPS).setCorePoolSize(10)); } - @After + @AfterEach public void teardown() throws Exception { if (executorService != null) { executorService.shutdown(); @@ -370,7 +365,7 @@ public void testRescan() throws Exception { byte[] data = ZKUtil.getData(zkw, ZNodePaths.joinZNode(zkw.getZNodePaths().splitLogZNode, fn)); slt = SplitLogTask.parseFrom(data); - assertTrue(slt.toString(), slt.isDone(SRV)); + assertTrue(slt.isDone(SRV), slt.toString()); } } assertEquals(2, num); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index 32f88ea8dc6f..6aa195123062 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -18,14 +18,14 @@ package org.apache.hadoop.hbase.regionserver; import static org.apache.hadoop.hbase.client.TableDescriptorBuilder.SPLIT_POLICY; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNotSame; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotSame; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.lang.reflect.Field; @@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MasterNotRunningException; @@ -104,16 +103,13 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.NodeExistsException; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -130,13 +126,10 @@ /** * The below tests are testing split region against a running cluster */ -@Category({ RegionServerTests.class, LargeTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(LargeTests.TAG) public class TestSplitTransactionOnCluster { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSplitTransactionOnCluster.class); - private static final Logger LOG = LoggerFactory.getLogger(TestSplitTransactionOnCluster.class); private Admin admin = null; private SingleProcessHBaseCluster cluster = null; @@ -144,10 +137,9 @@ public class TestSplitTransactionOnCluster { static final HBaseTestingUtil TESTING_UTIL = new HBaseTestingUtil(); - @Rule - public TestName name = new TestName(); + private String methodName; - @BeforeClass + @BeforeAll public static void before() throws Exception { TESTING_UTIL.getConfiguration().setInt(HConstants.HBASE_BALANCER_PERIOD, 60000); StartTestingClusterOption option = StartTestingClusterOption.builder() @@ -155,19 +147,20 @@ public static void before() throws Exception { TESTING_UTIL.startMiniCluster(option); } - @AfterClass + @AfterAll public static void after() throws Exception { TESTING_UTIL.shutdownMiniCluster(); } - @Before - public void setup() throws IOException { + @BeforeEach + public void setup(TestInfo testInfo) throws IOException { + this.methodName = testInfo.getTestMethod().get().getName(); TESTING_UTIL.ensureSomeNonStoppedRegionServersAvailable(NB_SERVERS); this.admin = TESTING_UTIL.getAdmin(); this.cluster = TESTING_UTIL.getMiniHBaseCluster(); } - @After + @AfterEach public void tearDown() throws Exception { this.admin.close(); for (TableDescriptor htd : this.admin.listTableDescriptors()) { @@ -194,7 +187,7 @@ private void requestSplitRegion(final HRegionServer rsServer, final Region regio @Test public void testRITStateForRollback() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); final HMaster master = cluster.getMaster(); try { // Create table then get the single region for our new table. @@ -211,7 +204,7 @@ public void testRITStateForRollback() throws Exception { // find a splittable region final HRegion region = findSplittableRegion(regions); - assertTrue("not able to find a splittable region", region != null); + assertNotNull(region, "not able to find a splittable region"); // install master co-processor to fail splits master.getMasterCoprocessorHost().load(FailingSplitMasterObserver.class, @@ -241,7 +234,7 @@ public void testRITStateForRollback() throws Exception { @Test public void testSplitFailedCompactionAndSplit() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); // Create table then get the single region for our new table. byte[] cf = Bytes.toBytes("cf"); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) @@ -285,15 +278,16 @@ public void testSplitFailedCompactionAndSplit() throws Exception { @Test public void testSplitCompactWithPriority() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); // Create table then get the single region for our new table. byte[] cf = Bytes.toBytes("cf"); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf)).build(); admin.createTable(htd); - assertNotEquals("Unable to retrieve regions of the table", -1, - TESTING_UTIL.waitFor(10000, () -> cluster.getRegions(tableName).size() == 1)); + assertNotEquals(-1, + TESTING_UTIL.waitFor(10000, () -> cluster.getRegions(tableName).size() == 1), + "Unable to retrieve regions of the table"); HRegion region = cluster.getRegions(tableName).get(0); HStore store = region.getStore(cf); @@ -311,7 +305,7 @@ public void testSplitCompactWithPriority() throws Exception { Optional compactionContext = store.requestCompaction(); assertTrue(compactionContext.isPresent()); assertFalse(compactionContext.get().getRequest().isAfterSplit()); - assertEquals(compactionContext.get().getRequest().getPriority(), 13); + assertEquals(13, compactionContext.get().getRequest().getPriority()); // Split long procId = @@ -321,8 +315,8 @@ public void testSplitCompactWithPriority() throws Exception { // the procedure will return true; if the split fails, the procedure would throw exception. ProcedureTestingUtility.waitProcedure(cluster.getMaster().getMasterProcedureExecutor(), procId); Thread.sleep(3000); - assertNotEquals("Table is not split properly?", -1, - TESTING_UTIL.waitFor(3000, () -> cluster.getRegions(tableName).size() == 2)); + assertNotEquals(-1, TESTING_UTIL.waitFor(3000, () -> cluster.getRegions(tableName).size() == 2), + "Table is not split properly?"); // we have 2 daughter regions HRegion hRegion1 = cluster.getRegions(tableName).get(0); HRegion hRegion2 = cluster.getRegions(tableName).get(1); @@ -344,7 +338,7 @@ public void testSplitCompactWithPriority() throws Exception { // since we set mock reference to one of the storeFiles, we will get isAfterSplit=true && // highest priority for hStore1's compactionContext assertTrue(compactionContext.get().getRequest().isAfterSplit()); - assertEquals(compactionContext.get().getRequest().getPriority(), Integer.MIN_VALUE + 1000); + assertEquals(Integer.MIN_VALUE + 1000, compactionContext.get().getRequest().getPriority()); compactionContext = hStore2.requestCompaction(Integer.MIN_VALUE + 10, CompactionLifeCycleTracker.DUMMY, null); @@ -353,14 +347,14 @@ public void testSplitCompactWithPriority() throws Exception { // compaction (Integer.MIN_VALUE + 1000), hence we are expecting request priority to // be accepted. assertTrue(compactionContext.get().getRequest().isAfterSplit()); - assertEquals(compactionContext.get().getRequest().getPriority(), Integer.MIN_VALUE + 10); + assertEquals(Integer.MIN_VALUE + 10, compactionContext.get().getRequest().getPriority()); admin.disableTable(tableName); admin.deleteTable(tableName); } @Test public void testContinuousSplitUsingLinkFile() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); // Create table then get the single region for our new table. byte[] cf = Bytes.toBytes("cf"); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName) @@ -371,8 +365,9 @@ public void testContinuousSplitUsingLinkFile() throws Exception { admin.createTable(builder.build()); admin.compactionSwitch(false, new ArrayList<>()); - assertNotEquals("Unable to retrieve regions of the table", -1, - TESTING_UTIL.waitFor(10000, () -> cluster.getRegions(tableName).size() == 1)); + assertNotEquals(-1, + TESTING_UTIL.waitFor(10000, () -> cluster.getRegions(tableName).size() == 1), + "Unable to retrieve regions of the table"); Table table = TESTING_UTIL.getConnection().getTable(tableName); // insert data insertData(tableName, admin, table, 10); @@ -388,8 +383,8 @@ public void testContinuousSplitUsingLinkFile() throws Exception { // wait for the split to complete or get interrupted. If the split completes successfully, // the procedure will return true; if the split fails, the procedure would throw exception. Thread.sleep(3000); - assertNotEquals("Table is not split properly?", -1, - TESTING_UTIL.waitFor(3000, () -> cluster.getRegions(tableName).size() == 2)); + assertNotEquals(-1, TESTING_UTIL.waitFor(3000, () -> cluster.getRegions(tableName).size() == 2), + "Table is not split properly?"); // we have 2 daughter regions HRegion hRegion1 = cluster.getRegions(tableName).get(0); HRegion hRegion2 = cluster.getRegions(tableName).get(1); @@ -411,8 +406,8 @@ public void testContinuousSplitUsingLinkFile() throws Exception { // Continuous Split findRegionToSplit(tableName, "row24"); Thread.sleep(3000); - assertNotEquals("Table is not split properly?", -1, - TESTING_UTIL.waitFor(3000, () -> cluster.getRegions(tableName).size() == 3)); + assertNotEquals(-1, TESTING_UTIL.waitFor(3000, () -> cluster.getRegions(tableName).size() == 3), + "Table is not split properly?"); // now table has 3 region, each region should have one link file for (HRegion newRegion : cluster.getRegions(tableName)) { assertEquals(1, newRegion.getStore(cf).getStorefilesCount()); @@ -427,8 +422,8 @@ public void testContinuousSplitUsingLinkFile() throws Exception { // After this, can not continuous split, because there are reference files. findRegionToSplit(tableName, "row11"); Thread.sleep(3000); - assertNotEquals("Table is not split properly?", -1, - TESTING_UTIL.waitFor(3000, () -> cluster.getRegions(tableName).size() == 4)); + assertNotEquals(-1, TESTING_UTIL.waitFor(3000, () -> cluster.getRegions(tableName).size() == 4), + "Table is not split properly?"); scan = new Scan(); scanValidate(scan, rowCount, table); @@ -491,7 +486,7 @@ public void preSplitRegionBeforeMETAAction( @Test public void testSplitRollbackOnRegionClosing() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); // Create table then get the single region for our new table. Table t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY); @@ -549,7 +544,7 @@ public void testSplitRollbackOnRegionClosing() throws Exception { */ @Test public void testShutdownFixupWhenDaughterHasSplit() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); // Create table then get the single region for our new table. Table t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY); @@ -599,7 +594,7 @@ public void testShutdownFixupWhenDaughterHasSplit() throws Exception { assertEquals(daughters.size(), regions.size()); for (HRegion r : regions) { LOG.info("Regions post crash " + r + ", contains=" + daughters.contains(r)); - assertTrue("Missing region post crash " + r, daughters.contains(r)); + assertTrue(daughters.contains(r), "Missing region post crash " + r); } } finally { LOG.info("EXITING"); @@ -625,7 +620,7 @@ private void clearReferences(HRegion region) throws IOException { @Test public void testSplitShouldNotThrowNPEEvenARegionHasEmptySplitFiles() throws Exception { - TableName userTableName = TableName.valueOf(name.getMethodName()); + TableName userTableName = TableName.valueOf(methodName); TableDescriptor htd = TableDescriptorBuilder.newBuilder(userTableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of("col")).build(); admin.createTable(htd); @@ -670,7 +665,7 @@ public void testSplitShouldNotThrowNPEEvenARegionHasEmptySplitFiles() throws Exc + regionsOfTable); } - Assert.assertEquals(2, regionsOfTable.size()); + assertEquals(2, regionsOfTable.size()); Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); @@ -678,7 +673,7 @@ public void testSplitShouldNotThrowNPEEvenARegionHasEmptySplitFiles() throws Exc for (Result rr = scanner.next(); rr != null; rr = scanner.next()) { mainTableCount++; } - Assert.assertEquals(3, mainTableCount); + assertEquals(3, mainTableCount); } finally { table.close(); } @@ -692,7 +687,7 @@ public void testSplitShouldNotThrowNPEEvenARegionHasEmptySplitFiles() throws Exc public void testMasterRestartAtRegionSplitPendingCatalogJanitor() throws IOException, InterruptedException, NodeExistsException, KeeperException, ServiceException, ExecutionException, TimeoutException { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); // Create table then get the single region for our new table. try (Table t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY)) { List regions = cluster.getRegions(tableName); @@ -746,9 +741,9 @@ public void testMasterRestartAtRegionSplitPendingCatalogJanitor() @Test public void testSplitWithRegionReplicas() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); TableDescriptor htd = TESTING_UTIL - .createModifyableTableDescriptor(TableName.valueOf(name.getMethodName()), + .createModifyableTableDescriptor(TableName.valueOf(methodName), ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED) .setRegionReplication(2).setCoprocessor(SlowMeCopro.class.getName()).build(); @@ -771,11 +766,11 @@ public void testSplitWithRegionReplicas() throws Exception { // Turn off the meta scanner so it don't remove parent on us. cluster.getMaster().setCatalogJanitorEnabled(false); boolean tableExists = TESTING_UTIL.getAdmin().tableExists(tableName); - assertEquals("The specified table should be present.", true, tableExists); + assertEquals(true, tableExists, "The specified table should be present."); final HRegion region = findSplittableRegion(oldRegions); regionServerIndex = cluster.getServerWith(region.getRegionInfo().getRegionName()); regionServer = cluster.getRegionServer(regionServerIndex); - assertTrue("not able to find a splittable region", region != null); + assertTrue(region != null, "not able to find a splittable region"); try { requestSplitRegion(regionServer, region, Bytes.toBytes("row2")); } catch (IOException e) { @@ -794,7 +789,7 @@ public void testSplitWithRegionReplicas() throws Exception { || newRegions.size() != 4 ); tableExists = TESTING_UTIL.getAdmin().tableExists(tableName); - assertEquals("The specified table should be present.", true, tableExists); + assertEquals(true, tableExists, "The specified table should be present."); // exists works on stale and we see the put after the flush byte[] b1 = Bytes.toBytes("row1"); Get g = new Get(b1); @@ -803,7 +798,7 @@ public void testSplitWithRegionReplicas() throws Exception { // In the process it will also get the location of the replica of the daughter (initially // pointing to the parent's replica) Result r = t.get(g); - Assert.assertFalse(r.isStale()); + assertFalse(r.isStale()); LOG.info("exists stale after flush done"); SlowMeCopro.getPrimaryCdl().set(new CountDownLatch(1)); @@ -811,7 +806,7 @@ public void testSplitWithRegionReplicas() throws Exception { g.setConsistency(Consistency.TIMELINE); // This will succeed because in the previous GET we get the location of the replica r = t.get(g); - Assert.assertTrue(r.isStale()); + assertTrue(r.isStale()); SlowMeCopro.getPrimaryCdl().get().countDown(); } finally { SlowMeCopro.getPrimaryCdl().get().countDown(); @@ -847,7 +842,7 @@ private void insertData(TableName tableName, Admin admin, Table t, int i) throws */ @Test public void testSplitRegionWithNoStoreFiles() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); // Create table then get the single region for our new table. createTableAndWait(tableName, HConstants.CATALOG_FAMILY); List regions = cluster.getRegions(tableName); @@ -867,12 +862,12 @@ public void testSplitRegionWithNoStoreFiles() throws Exception { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem fs = TESTING_UTIL.getDFSCluster().getFileSystem(); Map storefiles = FSUtils.getTableStoreFilePathMap(null, fs, rootDir, tableName); - assertEquals("Expected nothing but found " + storefiles.toString(), 0, storefiles.size()); + assertEquals(0, storefiles.size(), "Expected nothing but found " + storefiles.toString()); // find a splittable region. Refresh the regions list regions = cluster.getRegions(tableName); final HRegion region = findSplittableRegion(regions); - assertTrue("not able to find a splittable region", region != null); + assertTrue(region != null, "not able to find a splittable region"); // Now split. try { @@ -890,8 +885,8 @@ public void testSplitRegionWithNoStoreFiles() throws Exception { HBaseFsck.debugLsr(conf, new Path("/")); Map storefilesAfter = FSUtils.getTableStoreFilePathMap(null, fs, rootDir, tableName); - assertEquals("Expected nothing but found " + storefilesAfter.toString(), 0, - storefilesAfter.size()); + assertEquals(0, storefilesAfter.size(), + "Expected nothing but found " + storefilesAfter.toString()); hri = region.getRegionInfo(); // split parent AssignmentManager am = cluster.getMaster().getAssignmentManager(); @@ -899,8 +894,8 @@ public void testSplitRegionWithNoStoreFiles() throws Exception { long start = EnvironmentEdgeManager.currentTime(); while (!regionStates.isRegionInState(hri, State.SPLIT)) { LOG.debug("Waiting for SPLIT state on: " + hri); - assertFalse("Timed out in waiting split parent to be in state SPLIT", - EnvironmentEdgeManager.currentTime() - start > 60000); + assertFalse(EnvironmentEdgeManager.currentTime() - start > 60000, + "Timed out in waiting split parent to be in state SPLIT"); Thread.sleep(500); } assertTrue(regionStates.isRegionInState(daughters.get(0).getRegionInfo(), State.OPEN)); @@ -912,7 +907,7 @@ public void testSplitRegionWithNoStoreFiles() throws Exception { } catch (DoNotRetryIOException e) { // Expected } - assertFalse("Split region can't be assigned", am.isRegionInTransition(hri)); + assertFalse(am.isRegionInTransition(hri), "Split region can't be assigned"); assertTrue(regionStates.isRegionInState(hri, State.SPLIT)); // We should not be able to unassign it either @@ -922,7 +917,7 @@ public void testSplitRegionWithNoStoreFiles() throws Exception { } catch (DoNotRetryIOException e) { // Expected } - assertFalse("Split region can't be unassigned", am.isRegionInTransition(hri)); + assertFalse(am.isRegionInTransition(hri), "Split region can't be unassigned"); assertTrue(regionStates.isRegionInState(hri, State.SPLIT)); } finally { admin.balancerSwitch(true, false); @@ -932,7 +927,7 @@ public void testSplitRegionWithNoStoreFiles() throws Exception { @Test public void testStoreFileReferenceCreationWhenSplitPolicySaysToSkipRangeCheck() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = TableName.valueOf(methodName); try { byte[] cf = Bytes.toBytes("f"); byte[] cf1 = Bytes.toBytes("i_f"); @@ -1040,8 +1035,8 @@ private int ensureTableRegionNotOnSameServerAsMeta(final Admin admin, final Regi + tableRegionIndex + " and metaServerIndex=" + metaServerIndex); Thread.sleep(100); } - assertTrue("Region not moved off hbase:meta server, tableRegionIndex=" + tableRegionIndex, - tableRegionIndex != -1 && tableRegionIndex != metaServerIndex); + assertTrue(tableRegionIndex != -1 && tableRegionIndex != metaServerIndex, + "Region not moved off hbase:meta server, tableRegionIndex=" + tableRegionIndex); // Verify for sure table region is not on same server as hbase:meta tableRegionIndex = cluster.getServerWith(hri.getRegionName()); assertTrue(tableRegionIndex != -1); @@ -1080,9 +1075,10 @@ private void waitUntilRegionServerDead() throws InterruptedException, IOExceptio LOG.info("Waiting on server to go down"); Thread.sleep(100); } - assertFalse("Waited too long for RS to die", + assertFalse( cluster.getMaster().getClusterMetrics().getLiveServerMetrics().size() > NB_SERVERS - || cluster.getLiveRegionServerThreads().size() > NB_SERVERS); + || cluster.getLiveRegionServerThreads().size() > NB_SERVERS, + "Waited too long for RS to die"); } private void awaitDaughters(TableName tableName, int numDaughters) throws InterruptedException { @@ -1110,7 +1106,7 @@ private Table createTableAndWait(TableName tableName, byte[] cf) throws IOException, InterruptedException { Table t = TESTING_UTIL.createTable(tableName, cf); awaitTableRegions(tableName); - assertTrue("Table not online: " + tableName, cluster.getRegions(tableName).size() != 0); + assertTrue(cluster.getRegions(tableName).size() != 0, "Table not online: " + tableName); return t; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWalDataLoss.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWalDataLoss.java index 17754d498e05..57a84db785f2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWalDataLoss.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWalDataLoss.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.spy; @@ -27,7 +27,6 @@ import java.util.Map; import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.hbase.DroppedSnapshotException; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.NamespaceDescriptor; @@ -44,11 +43,10 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import org.slf4j.Logger; @@ -57,13 +55,9 @@ /** * Testcase for https://issues.apache.org/jira/browse/HBASE-13811 */ -@Category({ LargeTests.class }) +@Tag(LargeTests.TAG) public class TestSplitWalDataLoss { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSplitWalDataLoss.class); - private static final Logger LOG = LoggerFactory.getLogger(TestSplitWalDataLoss.class); private final HBaseTestingUtil testUtil = new HBaseTestingUtil(); @@ -77,7 +71,7 @@ public class TestSplitWalDataLoss { private byte[] qualifier = Bytes.toBytes("q"); - @Before + @BeforeEach public void setUp() throws Exception { testUtil.getConfiguration().setInt("hbase.regionserver.msginterval", 30000); testUtil.startMiniCluster(2); @@ -88,7 +82,7 @@ public void setUp() throws Exception { testUtil.waitTableAvailable(tableName); } - @After + @AfterEach public void tearDown() throws Exception { testUtil.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWithBlockingFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWithBlockingFiles.java index fd3d7b39c7dc..9498231d8be0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWithBlockingFiles.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWithBlockingFiles.java @@ -19,13 +19,12 @@ import static org.apache.hadoop.hbase.regionserver.HRegion.SPLIT_IGNORE_BLOCKING_ENABLED_KEY; import static org.apache.hadoop.hbase.regionserver.Store.PRIORITY_USER; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.List; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -43,33 +42,23 @@ import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -@Category({ MediumTests.class }) +@Tag(MediumTests.TAG) public class TestSplitWithBlockingFiles { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSplitWithBlockingFiles.class); - - private static final Logger LOG = LoggerFactory.getLogger(TestSplitWithBlockingFiles.class); - protected static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("test"); private static Admin ADMIN; private static byte[] CF = Bytes.toBytes("cf"); private static Table TABLE; - @BeforeClass + @BeforeAll public static void setupCluster() throws Exception { UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE, 8 * 2 * 10240L); UTIL.getConfiguration().setInt(HStore.BLOCKING_STOREFILES_KEY, 1); @@ -85,7 +74,7 @@ public static void setupCluster() throws Exception { UTIL.waitTableAvailable(TABLE_NAME); } - @AfterClass + @AfterAll public static void cleanupTest() throws Exception { Closeables.close(TABLE, true); UTIL.shutdownMiniCluster(); @@ -111,7 +100,7 @@ public void testSplitIgnoreBlockingFiles() throws Exception { while (results.next() != null) { count++; } - Assert.assertEquals("There should be 100 rows!", 100, count); + assertEquals(100, count, "There should be 100 rows!"); List regions = UTIL.getMiniHBaseCluster().getRegionServer(0).getRegions(); regions.removeIf(r -> !r.getRegionInfo().getTable().equals(TABLE_NAME)); assertEquals(1, regions.size()); @@ -140,7 +129,7 @@ public void testSplitIgnoreBlockingFiles() throws Exception { while (results.next() != null) { count++; } - Assert.assertEquals("There should be 100 rows!", 100, count); + assertEquals(100, count, "There should be 100 rows!"); for (HRegion region : regions) { assertTrue(region.getCompactPriority() < PRIORITY_USER); assertFalse( diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java index 530a39f73069..a93a398a49f3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java @@ -17,15 +17,14 @@ */ package org.apache.hadoop.hbase.regionserver; -import static junit.framework.TestCase.assertTrue; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.FileNotFoundException; import java.io.IOException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -39,20 +38,16 @@ import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerForTest; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; /** * Test HStoreFile */ -@Category({ RegionServerTests.class, SmallTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(SmallTests.TAG) public class TestStoreFileInfo { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStoreFileInfo.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); /** @@ -64,21 +59,21 @@ public void testStoreFileNames() { "MyTable_02-400=abc012-def345", "MyTable_02-400.200=abc012-def345", "MyTable_02=abc012-def345_SeqId_1_", "MyTable_02=abc012-def345_SeqId_20_" }; for (String name : legalHFileLink) { - assertTrue("should be a valid link: " + name, HFileLink.isHFileLink(name)); - assertTrue("should be a valid StoreFile" + name, StoreFileInfo.validateStoreFileName(name)); - assertFalse("should not be a valid reference: " + name, StoreFileInfo.isReference(name)); + assertTrue(HFileLink.isHFileLink(name), "should be a valid link: " + name); + assertTrue(StoreFileInfo.validateStoreFileName(name), "should be a valid StoreFile" + name); + assertFalse(StoreFileInfo.isReference(name), "should not be a valid reference: " + name); String refName = name + ".6789"; - assertTrue("should be a valid link reference: " + refName, - StoreFileInfo.isReference(refName)); - assertTrue("should be a valid StoreFile" + refName, - StoreFileInfo.validateStoreFileName(refName)); + assertTrue(StoreFileInfo.isReference(refName), + "should be a valid link reference: " + refName); + assertTrue(StoreFileInfo.validateStoreFileName(refName), + "should be a valid StoreFile" + refName); } String[] illegalHFileLink = { ".MyTable_02=abc012-def345", "-MyTable_02.300=abc012-def345", "MyTable_02-400=abc0_12-def345", "MyTable_02-400.200=abc012-def345...." }; for (String name : illegalHFileLink) { - assertFalse("should not be a valid link: " + name, HFileLink.isHFileLink(name)); + assertFalse(HFileLink.isHFileLink(name), "should not be a valid link: " + name); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java index ace0df4e4ce0..06a0da9f9dd7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hbase.regionserver; import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -33,7 +33,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Stoppable; @@ -54,28 +53,23 @@ import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.StoppableImplementation; import org.apache.hadoop.hbase.wal.WALFactory; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; - -@Category({ RegionServerTests.class, MediumTests.class }) -public class TestStoreFileRefresherChore { +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStoreFileRefresherChore.class); +@Tag(RegionServerTests.TAG) +@Tag(MediumTests.TAG) +public class TestStoreFileRefresherChore { private HBaseTestingUtil TEST_UTIL; private Path testDir; - @Rule - public TestName name = new TestName(); + private String methodName; - @Before - public void setUp() throws IOException { + @BeforeEach + public void setUp(TestInfo testInfo) throws IOException { + this.methodName = testInfo.getTestMethod().get().getName(); TEST_UTIL = new HBaseTestingUtil(); testDir = TEST_UTIL.getDataTestDir("TestStoreFileRefresherChore"); CommonFSUtils.setRootDir(TEST_UTIL.getConfiguration(), testDir); @@ -197,8 +191,7 @@ public void testIsStale() throws IOException { when(regionServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration()); String trackerName = FailingStoreFileTrackerForTest.class.getName(); - TableDescriptor htd = - getTableDesc(TableName.valueOf(name.getMethodName()), 2, trackerName, families); + TableDescriptor htd = getTableDesc(TableName.valueOf(methodName), 2, trackerName, families); HRegion primary = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0); HRegion replica1 = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 1); regions.add(primary); @@ -250,7 +243,7 @@ public void testRefreshReadOnlyTable() throws IOException { when(regionServer.getOnlineRegionsLocalContext()).thenReturn(regions); when(regionServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration()); - TableDescriptor htd = getTableDesc(TableName.valueOf(name.getMethodName()), 2, null, families); + TableDescriptor htd = getTableDesc(TableName.valueOf(methodName), 2, null, families); HRegion primary = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0); HRegion replica1 = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 1); regions.add(primary); @@ -274,7 +267,7 @@ public void testRefreshReadOnlyTable() throws IOException { verifyData(primary, 0, 200, qf, families); // then the table is set to readonly - htd = getTableDesc(TableName.valueOf(name.getMethodName()), 2, true, null, families); + htd = getTableDesc(TableName.valueOf(methodName), 2, true, null, families); primary.setTableDescriptor(htd); replica1.setTableDescriptor(htd); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScanner.java index 84566a9651cc..da4950c9a436 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScanner.java @@ -17,16 +17,15 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; @@ -42,38 +41,32 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; /** * Test StoreFileScanner */ -@Category({ RegionServerTests.class, SmallTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(SmallTests.TAG) public class TestStoreFileScanner { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStoreFileScanner.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final String TEST_FAMILY = "cf"; - @Rule - public TestName name = new TestName(); + private String methodName; private Configuration conf; private Path testDir; private FileSystem fs; private CacheConfig cacheConf; - @Before - public void setUp() throws IOException { + @BeforeEach + public void setUp(TestInfo testInfo) throws IOException { + this.methodName = testInfo.getTestMethod().get().getName(); conf = TEST_UTIL.getConfiguration(); - testDir = TEST_UTIL.getDataTestDir(name.getMethodName()); + testDir = TEST_UTIL.getDataTestDir(methodName); fs = testDir.getFileSystem(conf); cacheConf = new CacheConfig(conf); } @@ -93,8 +86,7 @@ private void writeStoreFile(final StoreFileWriter writer) throws IOException { @Test public void testGetFilesRead() throws Exception { // Setup: region info, region fs, and HFile context; create store file and write data. - final RegionInfo hri = - RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); + final RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(methodName)).build(); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, new Path(testDir, hri.getTable().getNameAsString()), hri); HFileContext hFileContext = new HFileContextBuilder().withBlockSize(8 * 1024).build(); @@ -120,13 +112,13 @@ public void testGetFilesRead() throws Exception { // Before close: getFilesRead must be empty. Set filesRead = scanner.getFilesRead(); - assertTrue("Should return empty set before closing scanner", filesRead.isEmpty()); + assertTrue(filesRead.isEmpty(), "Should return empty set before closing scanner"); scanner.close(); // After close: set must contain the single qualified store file path. filesRead = scanner.getFilesRead(); - assertEquals("Should return set with one file path after closing", 1, filesRead.size()); - assertTrue("Should contain the qualified file path", filesRead.contains(qualifiedPath)); + assertEquals(1, filesRead.size(), "Should return set with one file path after closing"); + assertTrue(filesRead.contains(qualifiedPath), "Should contain the qualified file path"); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java index f1f5b1e49a66..80b4203396ff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.List; @@ -27,7 +27,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.ExtendedCell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -43,25 +42,20 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; -@Category({ RegionServerTests.class, SmallTests.class }) +@org.junit.jupiter.api.Tag(RegionServerTests.TAG) +@org.junit.jupiter.api.Tag(SmallTests.TAG) public class TestStoreFileScannerWithTagCompression { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStoreFileScannerWithTagCompression.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static Configuration conf = TEST_UTIL.getConfiguration(); private static CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration()); private static Path ROOT_DIR = TEST_UTIL.getDataTestDir("TestStoreFileScannerWithTagCompression"); private static FileSystem fs = null; - @BeforeClass + @BeforeAll public static void setUp() throws IOException { conf.setInt("hfile.format.version", 3); fs = FileSystem.get(conf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileWriter.java index de84d4daa5e9..1345bbb2d47e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileWriter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileWriter.java @@ -19,19 +19,18 @@ import static org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.NEW_VERSION_BEHAVIOR; import static org.apache.hadoop.hbase.regionserver.StoreFileWriter.ENABLE_HISTORICAL_COMPACTION_FILES; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; import java.util.List; import java.util.Random; +import java.util.stream.Stream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.MemoryCompactionPolicy; @@ -46,13 +45,11 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.params.provider.Arguments; /** * Store file writer does not do any compaction. Each cell written to either the live or historical @@ -68,12 +65,12 @@ * historical files are generated only when historical file generation is enabled (by the config * hbase.enable.historical.compaction.files). */ -@Category({ RegionServerTests.class, LargeTests.class }) -@RunWith(Parameterized.class) +@Tag(RegionServerTests.TAG) +@Tag(LargeTests.TAG) +@HBaseParameterizedTestTemplate( + name = "{index}: keepDeletedCells={0}, maxVersions={1}, newVersionBehavior={2}") public class TestStoreFileWriter { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStoreFileWriter.class); + private final int ROW_NUM = 100; private final Random RANDOM = new Random(11); private final HBaseTestingUtil testUtil = new HBaseTestingUtil(); @@ -87,20 +84,23 @@ public class TestStoreFileWriter { private final Configuration conf = testUtil.getConfiguration(); private int flushCount = 0; - @Parameterized.Parameter(0) public KeepDeletedCells keepDeletedCells; - @Parameterized.Parameter(1) public int maxVersions; - @Parameterized.Parameter(2) public boolean newVersionBehavior; - @Parameterized.Parameters(name = "keepDeletedCells={0}, maxVersions={1}, newVersionBehavior={2}") - public static synchronized Collection data() { - return Arrays.asList( - new Object[][] { { KeepDeletedCells.FALSE, 1, true }, { KeepDeletedCells.FALSE, 2, false }, - { KeepDeletedCells.FALSE, 3, true }, { KeepDeletedCells.TRUE, 1, false }, - // { KeepDeletedCells.TRUE, 2, true }, see HBASE-28442 - { KeepDeletedCells.TRUE, 3, false } }); + public TestStoreFileWriter(KeepDeletedCells keepDeletedCells, int maxVersions, + boolean newVersionBehavior) { + this.keepDeletedCells = keepDeletedCells; + this.maxVersions = maxVersions; + this.newVersionBehavior = newVersionBehavior; + } + + public static synchronized Stream parameters() { + return Stream.of(Arguments.of(KeepDeletedCells.FALSE, 1, true), + Arguments.of(KeepDeletedCells.FALSE, 2, false), Arguments.of(KeepDeletedCells.FALSE, 3, true), + Arguments.of(KeepDeletedCells.TRUE, 1, false), + // { KeepDeletedCells.TRUE, 2, true }, see HBASE-28442 + Arguments.of(KeepDeletedCells.TRUE, 3, false)); } // In memory representation of a cell. We only need to know timestamp and type field for our @@ -128,7 +128,7 @@ private void createTable(int index, boolean enableDualFileWriter) throws IOExcep regions[index] = testUtil.getMiniHBaseCluster().getRegions(tableName[index]).get(0); } - @Before + @BeforeEach public void setUp() throws Exception { conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 6); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, @@ -145,13 +145,13 @@ public void setUp() throws Exception { } } - @After + @AfterEach public void tearDown() throws Exception { this.testUtil.shutdownMiniCluster(); testUtil.cleanupTestDir(); } - @Test + @TestTemplate public void testCompactedFiles() throws Exception { for (int i = 0; i < 10; i++) { insertRows(ROW_NUM * maxVersions); @@ -182,10 +182,9 @@ public void testCompactedFiles() throws Exception { int minorCompactedCount = stores[1].getStorefilesCount(); int expectedMin = flushCount - stores[1].getCompactedFiles().size() + 1; int expectedMax = flushCount - stores[1].getCompactedFiles().size() + 2; - assertTrue( + assertTrue(minorCompactedCount >= expectedMin && minorCompactedCount <= expectedMax, "Expected store file count between " + expectedMin + " and " + expectedMax + " but was " - + minorCompactedCount, - minorCompactedCount >= expectedMin && minorCompactedCount <= expectedMax); + + minorCompactedCount); verifyCells(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java index 2d190aa0ea46..f661e17e6ac7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java @@ -19,12 +19,12 @@ import static org.apache.hadoop.hbase.KeyValueTestUtil.create; import static org.apache.hadoop.hbase.regionserver.KeyValueScanFixture.scanFixture; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -74,27 +73,28 @@ import org.apache.hadoop.hbase.util.EnvironmentEdge; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; -import org.junit.ClassRule; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; // Can't be small as it plays with EnvironmentEdgeManager -@Category({ RegionServerTests.class, SmallTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(SmallTests.TAG) public class TestStoreScanner { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStoreScanner.class); - private static final Logger LOG = LoggerFactory.getLogger(TestStoreScanner.class); - @Rule - public TestName name = new TestName(); + private String methodName; + + @BeforeEach + public void setTestName(TestInfo testInfo) { + this.methodName = testInfo.getTestMethod().get().getName(); + } + private static final String CF_STR = "cf"; private static final byte[] CF = Bytes.toBytes(CF_STR); static Configuration CONF = HBaseConfiguration.create(); @@ -374,8 +374,8 @@ public void testWithColumnCountGetFilter() throws Exception { assertEquals(2, results.size()); assertTrue(CellUtil.matchingColumn(results.get(0), CELL_WITH_VERSIONS[0])); assertTrue(CellUtil.matchingColumn(results.get(1), CELL_WITH_VERSIONS[2])); - assertTrue("Optimize should do some optimizations", - scannerNoOptimize.optimization.get() == 0); + assertTrue(scannerNoOptimize.optimization.get() == 0, + "Optimize should do some optimizations"); } get.setFilter(new ColumnCountGetFilter(2)); @@ -388,7 +388,7 @@ public void testWithColumnCountGetFilter() throws Exception { assertEquals(2, results.size()); assertTrue(CellUtil.matchingColumn(results.get(0), CELL_WITH_VERSIONS[0])); assertTrue(CellUtil.matchingColumn(results.get(1), CELL_WITH_VERSIONS[2])); - assertTrue("Optimize should do some optimizations", scanner.optimization.get() > 0); + assertTrue(scanner.optimization.get() > 0, "Optimize should do some optimizations"); } } @@ -469,7 +469,7 @@ public void testOptimize() throws IOException { assertTrue(Bytes.equals(ONE, 0, ONE.length, cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())); } - assertTrue("Optimize should do some optimizations", scanner.optimization.get() > 0); + assertTrue(scanner.optimization.get() > 0, "Optimize should do some optimizations"); } } @@ -494,9 +494,9 @@ public void testOptimizeAndGet() throws IOException { // Should be one result only. assertEquals(2, results.size()); // And we should have gone through optimize twice only. - assertEquals("First qcode is SEEK_NEXT_COL and second INCLUDE_AND_SEEK_NEXT_ROW", 3, - scanner.count.get()); - assertEquals("Memstore Read count should be", 1, scanner.memstoreOnlyReads); + assertEquals(3, scanner.count.get(), + "First qcode is SEEK_NEXT_COL and second INCLUDE_AND_SEEK_NEXT_ROW"); + assertEquals(1, scanner.memstoreOnlyReads, "Memstore Read count should be"); } } @@ -522,8 +522,8 @@ public void testOptimizeAndGetWithFakedNextBlockIndexStart() throws IOException // Should be one result only. assertEquals(1, results.size()); // And we should have gone through optimize twice only. - assertEquals("First qcode is SEEK_NEXT_COL and second INCLUDE_AND_SEEK_NEXT_ROW", 2, - scanner.count.get()); + assertEquals(2, scanner.count.get(), + "First qcode is SEEK_NEXT_COL and second INCLUDE_AND_SEEK_NEXT_ROW"); } } @@ -922,7 +922,7 @@ public void testScannerReseekDoesntNPE() throws Exception { } @Test - @Ignore("this fails, since we don't handle deletions, etc, in peek") + @Disabled("this fails, since we don't handle deletions, etc, in peek") public void testPeek() throws Exception { KeyValue[] kvs = new KeyValue[] { create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care"), create("R1", "cf", "a", 1, KeyValue.Type.Delete, "dont-care"), }; @@ -1113,13 +1113,12 @@ public void testGetFilesRead() throws Exception { // Setup: test util, conf, fs, cache, region fs, and HFile context. HBaseTestingUtil testUtil = new HBaseTestingUtil(); Configuration conf = testUtil.getConfiguration(); - Path testDir = testUtil.getDataTestDir(name.getMethodName() + "_directory"); + Path testDir = testUtil.getDataTestDir(methodName + "_directory"); FileSystem fs = testDir.getFileSystem(conf); CacheConfig cacheConf = new CacheConfig(conf); final String TEST_FAMILY = "cf"; - final RegionInfo hri = - RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); + final RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(methodName)).build(); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, new Path(testDir, hri.getTable().getNameAsString()), hri); HFileContext hFileContext = new HFileContextBuilder().withBlockSize(8 * 1024).build(); @@ -1228,13 +1227,13 @@ public void testGetFilesRead() throws Exception { // After close: all 5 files must be tracked (in-range, out-of-range, and TTL-expired). Set filesRead = storeScanner.getFilesRead(); - assertTrue("File 1 (in range) should be tracked", filesRead.contains(filePaths.get(0))); - assertTrue("File 2 (in range) should be tracked", filesRead.contains(filePaths.get(1))); - assertTrue("File 3 (out of key range) should be tracked", filesRead.contains(filePaths.get(2))); - assertTrue("File 4 (before start row) should be tracked", filesRead.contains(filePaths.get(3))); - assertTrue("File 5 (expired TTL, filtered after read) should be tracked", - filesRead.contains(filePaths.get(4))); - assertEquals("Should have all 5 files read", 5, filesRead.size()); + assertTrue(filesRead.contains(filePaths.get(0)), "File 1 (in range) should be tracked"); + assertTrue(filesRead.contains(filePaths.get(1)), "File 2 (in range) should be tracked"); + assertTrue(filesRead.contains(filePaths.get(2)), "File 3 (out of key range) should be tracked"); + assertTrue(filesRead.contains(filePaths.get(3)), "File 4 (before start row) should be tracked"); + assertTrue(filesRead.contains(filePaths.get(4)), + "File 5 (expired TTL, filtered after read) should be tracked"); + assertEquals(5, filesRead.size(), "Should have all 5 files read"); } /** @@ -1288,7 +1287,7 @@ public void testGetFilesReadOnInitializationFailure() throws Exception { } // Verify that exception was thrown - assertNotNull("Should have thrown IOException during initialization", caughtException); + assertNotNull(caughtException, "Should have thrown IOException during initialization"); // Verify that store methods were called (cleanup happened in catch block) Mockito.verify(mockStore, Mockito.times(1)).addChangedReaderObserver(Mockito.any()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java index 44ef696f1fd1..c2f36c4990bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java @@ -19,7 +19,8 @@ import static org.apache.hadoop.hbase.KeyValueTestUtil.create; import static org.apache.hadoop.hbase.regionserver.KeyValueScanFixture.scanFixture; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -34,7 +35,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -55,12 +55,9 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -69,17 +66,13 @@ * {@link StoreScanner#updateReaders(List, List)} works perfectly ensuring that there are no * references on the existing Storescanner readers. */ -@Category({ RegionServerTests.class, SmallTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(SmallTests.TAG) public class TestStoreScannerClosure { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStoreScannerClosure.class); - private static final Logger LOG = LoggerFactory.getLogger(TestStoreScannerClosure.class); private static final int NUM_VALID_KEY_TYPES = KeyValue.Type.values().length - 2; - @Rule - public TestName name = new TestName(); + private static final String CF_STR = "cf"; private static HRegion region; private static final byte[] CF = Bytes.toBytes(CF_STR); @@ -102,7 +95,7 @@ public class TestStoreScannerClosure { create("R1", "cf", "i", 11, KeyValue.Type.Put, "dont-care"), create("R2", "cf", "a", 11, KeyValue.Type.Put, "dont-care"), }; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { CONF = TEST_UTIL.getConfiguration(); cacheConf = new CacheConfig(CONF); @@ -164,7 +157,7 @@ public void run() { if (memStoreLAB != null) { // There should be no unpooled chunks int refCount = ((MemStoreLABImpl) memStoreLAB).getRefCntValue(); - assertTrue("The memstore should not have unpooled chunks", refCount == 0); + assertEquals(0, refCount, "The memstore should not have unpooled chunks"); } } } @@ -265,7 +258,7 @@ public void run() { // in the other case the fileReader will be null. int refCount = file.getReader().getRefCount(); LOG.info("the store scanner count is " + refCount); - assertTrue("The store scanner count should be 0", refCount == 0); + assertEquals(0, refCount, "The store scanner count should be 0"); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java index 6ca392a9bfde..2bd821affdd1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; @@ -34,7 +34,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CellComparatorImpl; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; @@ -44,17 +43,13 @@ import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ RegionServerTests.class, SmallTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(SmallTests.TAG) public class TestStripeStoreEngine { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStripeStoreEngine.class); - @Test public void testCreateBasedOnConfig() throws Exception { Configuration conf = HBaseConfiguration.create(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java index bdf50298403f..5e414aadc8e7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java @@ -18,11 +18,11 @@ package org.apache.hadoop.hbase.regionserver; import static org.apache.hadoop.hbase.regionserver.StripeStoreFileManager.OPEN_KEY; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -35,7 +35,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CellComparatorImpl; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -43,20 +42,16 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; -@Category({ RegionServerTests.class, MediumTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(MediumTests.TAG) public class TestStripeStoreFileManager { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStripeStoreFileManager.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final Path BASEDIR = TEST_UTIL.getDataTestDir(TestStripeStoreFileManager.class.getSimpleName()); @@ -73,7 +68,7 @@ public class TestStripeStoreFileManager { private static final KeyValue KV_C = new KeyValue(KEY_C, 0L); private static final KeyValue KV_D = new KeyValue(KEY_D, 0L); - @Before + @BeforeEach public void setUp() throws Exception { FileSystem fs = TEST_UTIL.getTestFileSystem(); if (!fs.mkdirs(CFDIR)) { @@ -81,7 +76,7 @@ public void setUp() throws Exception { } } - @After + @AfterEach public void tearDown() throws Exception { FileSystem fs = TEST_UTIL.getTestFileSystem(); if (fs.exists(CFDIR) && !fs.delete(CFDIR, true)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java index 47c2a95b5bbf..f9a9432da8a4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java @@ -17,10 +17,10 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -31,7 +31,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -47,20 +46,16 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category({ RegionServerTests.class, SmallTests.class }) +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +@Tag(RegionServerTests.TAG) +@Tag(SmallTests.TAG) public class TestSwitchToStreamRead { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSwitchToStreamRead.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("stream"); @@ -73,7 +68,7 @@ public class TestSwitchToStreamRead { private static HRegion REGION; - @Before + @BeforeEach public void setUp() throws IOException { UTIL.getConfiguration().setLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 2048); StringBuilder sb = new StringBuilder(256); @@ -98,7 +93,7 @@ public void setUp() throws IOException { } } - @After + @AfterEach public void tearDown() throws IOException { REGION.close(true); UTIL.cleanupTestDir(); @@ -193,7 +188,7 @@ private void testFilter(Filter filter) throws IOException { // until the row key is changed. And there we can only use NoLimitScannerContext so we can not // make the upper layer return immediately. Simply do not use NoLimitScannerContext will lead to // an infinite loop. Need to dig more, the code are way too complicated... - @Ignore + @Disabled @Test public void testFilterRowKey() throws IOException { testFilter(new MatchLastRowKeyFilter()); @@ -295,14 +290,14 @@ public void testGetFilesReadOnTrySwitchToStreamRead() throws Exception { for (HStoreFile sf : store.getStorefiles()) { expectedFilePaths.add(fs.makeQualified(sf.getPath())); } - assertFalse("Should have at least one store file", expectedFilePaths.isEmpty()); + assertFalse(expectedFilePaths.isEmpty(), "Should have at least one store file"); // Verify scanners start in PREAD mode before the switch. for (KeyValueScanner kvs : storeScanner.getAllScannersForTesting()) { if (kvs instanceof StoreFileScanner) { StoreFileScanner sfScanner = (StoreFileScanner) kvs; - assertSame("Scanner should start in PREAD mode", ReaderType.PREAD, - sfScanner.getReader().getReaderContext().getReaderType()); + assertSame(ReaderType.PREAD, sfScanner.getReader().getReaderContext().getReaderType(), + "Scanner should start in PREAD mode"); } } @@ -331,17 +326,17 @@ public void testGetFilesReadOnTrySwitchToStreamRead() throws Exception { } } } - assertTrue("trySwitchToStreamRead should have been invoked and scanners switched to stream", - switchVerified); + assertTrue(switchVerified, + "trySwitchToStreamRead should have been invoked and scanners switched to stream"); // Not closing the scanners explicitly, because those must be closed during // trySwitchToStreamRead // After close: files that were read (including those closed during switch) must be tracked. Set filesRead = storeScanner.getFilesRead(); - assertEquals("Should have exact file count after close", expectedFilePaths.size(), - filesRead.size()); - assertEquals("Should contain all expected store file paths", expectedFilePaths, filesRead); + assertEquals(expectedFilePaths.size(), filesRead.size(), + "Should have exact file count after close"); + assertEquals(expectedFilePaths, filesRead, "Should contain all expected store file paths"); } finally { UTIL.getConfiguration().setLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, originalPreadMaxBytes); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSyncTimeRangeTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSyncTimeRangeTracker.java index 99b29f8aeb71..c7078fb6db68 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSyncTimeRangeTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSyncTimeRangeTracker.java @@ -17,23 +17,18 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.concurrent.ThreadLocalRandom; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ RegionServerTests.class, MediumTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(MediumTests.TAG) public class TestSyncTimeRangeTracker extends TestSimpleTimeRangeTracker { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSyncTimeRangeTracker.class); - private static final int NUM_KEYS = 8000000; private static final int NUM_OF_THREADS = 20; @@ -83,8 +78,8 @@ public void run() { threads[i].join(); } - assertTrue(trr.getMax() == calls * threadCount); - assertTrue(trr.getMin() == 0); + assertEquals(calls * threadCount, trr.getMax()); + assertEquals(0, trr.getMin()); } static class RandomTestData { @@ -178,7 +173,7 @@ public void testConcurrentIncludeTimestampCorrectness() { } } - assertTrue(min == trt.getMin()); - assertTrue(max == trt.getMax()); + assertEquals(min, trt.getMin()); + assertEquals(max, trt.getMax()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTableDescriptorHashComputation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTableDescriptorHashComputation.java index 9c793ef7fb4b..84d4fdf4f9f1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTableDescriptorHashComputation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTableDescriptorHashComputation.java @@ -17,11 +17,10 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -29,17 +28,13 @@ import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; -@Category({ RegionServerTests.class, SmallTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(SmallTests.TAG) public class TestTableDescriptorHashComputation { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableDescriptorHashComputation.class); - @Test public void testHashLength() { TableDescriptor td = TableDescriptorBuilder.newBuilder(TableName.valueOf("testTable")) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java index 31304c0815f3..aa5d978f853e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTagsReverseScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTagsReverseScan.java index eedba5db7d7a..018e1aaba19e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTagsReverseScan.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTagsReverseScan.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.Arrays; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTimestampFilterSeekHint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTimestampFilterSeekHint.java index 7b92c9abc79a..af3462ad75b1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTimestampFilterSeekHint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTimestampFilterSeekHint.java @@ -17,11 +17,10 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -31,20 +30,16 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; -@Category({ RegionServerTests.class, LargeTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(LargeTests.TAG) public class TestTimestampFilterSeekHint { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTimestampFilterSeekHint.class); - private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private final static String RK = "myRK"; private final static byte[] RK_BYTES = Bytes.toBytes(RK); @@ -92,7 +87,7 @@ public void testGetDoesntSeekWithNoHint() throws IOException { assertTrue(finalSeekCount < initialSeekCount + 3); } - @Before + @BeforeEach public void prepareRegion() throws IOException { ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder .newBuilder(Bytes.toBytes(FAMILY)).setBlocksize(1024).setMaxVersions(MAX_VERSIONS).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALMonotonicallyIncreasingSeqId.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALMonotonicallyIncreasingSeqId.java index a08459e1f1a7..3501667affcb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALMonotonicallyIncreasingSeqId.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALMonotonicallyIncreasingSeqId.java @@ -17,17 +17,18 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.concurrent.CountDownLatch; +import java.util.stream.Stream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -48,18 +49,13 @@ import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALStreamReader; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameter; -import org.junit.runners.Parameterized.Parameters; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.TestInfo; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.params.provider.Arguments; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -74,14 +70,11 @@ * This case use two thread to put and increment at the same time in a single region. Then check the * seqid in WAL. If seqid is wal is not monotonically increasing, this case will fail */ -@RunWith(Parameterized.class) -@Category({ RegionServerTests.class, SmallTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(SmallTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: wal={0}") public class TestWALMonotonicallyIncreasingSeqId { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALMonotonicallyIncreasingSeqId.class); - private final Logger LOG = LoggerFactory.getLogger(getClass()); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static Path testDir = TEST_UTIL.getDataTestDir("TestWALMonotonicallyIncreasingSeqId"); @@ -90,15 +83,14 @@ public class TestWALMonotonicallyIncreasingSeqId { private Configuration walConf; private HRegion region; - @Parameter public String walProvider; - @Rule - public TestName name = new TestName(); + public TestWALMonotonicallyIncreasingSeqId(String walProvider) { + this.walProvider = walProvider; + } - @Parameters(name = "{index}: wal={0}") - public static List data() { - return Arrays.asList(new Object[] { "asyncfs" }, new Object[] { "filesystem" }); + public static Stream parameters() { + return Stream.of(Arguments.of("asyncfs"), Arguments.of("filesystem")); } private TableDescriptor getTableDesc(TableName tableName, byte[]... families) { @@ -184,22 +176,27 @@ public void run() { } } - @Before - public void setUp() throws IOException { + @BeforeEach + public void setUp(TestInfo testInfo) throws IOException { + String name = testInfo.getTestMethod().get().getName() + "_" + walProvider; byte[][] families = new byte[][] { Bytes.toBytes("cf") }; - TableDescriptor htd = getTableDesc( - TableName.valueOf(name.getMethodName().replaceAll("[^0-9A-Za-z_]", "_")), families); + TableDescriptor htd = + getTableDesc(TableName.valueOf(name.replaceAll("[^0-9A-Za-z_]", "_")), families); region = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0); } - @After + @AfterEach public void tearDown() throws IOException { if (region != null) { region.close(); } + if (wals != null) { + wals.close(); + wals = null; + } } - @AfterClass + @AfterAll public static void tearDownAfterClass() throws IOException { TEST_UTIL.cleanupTestDir(); } @@ -212,7 +209,7 @@ private WALStreamReader createReader(Path logPath, Path oldWalsDir) throws IOExc } } - @Test + @TestTemplate public void testWALMonotonicallyIncreasingSeqId() throws Exception { List putThreads = new ArrayList<>(); for (int i = 0; i < 1; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java index 32944a4147fd..7ea79e98ce3c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java @@ -17,15 +17,14 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.concurrent.Executors; import java.util.concurrent.ThreadPoolExecutor; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -42,23 +41,19 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; /** * This test verifies the correctness of the Per Column Family flushing strategy when part of the * memstores are compacted memstores */ -@Category({ RegionServerTests.class, LargeTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(LargeTests.TAG) public class TestWalAndCompactingMemStoreFlush { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWalAndCompactingMemStoreFlush.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final Path DIR = TEST_UTIL.getDataTestDir("TestHRegion"); public static final TableName TABLENAME = @@ -127,7 +122,7 @@ private void verifyInMemoryFlushSize(Region region) { ((CompactingMemStore) ((HStore) region.getStore(FAMILY3)).memstore).getInmemoryFlushSize()); } - @Before + @BeforeEach public void setup() { conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, @@ -204,8 +199,8 @@ public void testSelectiveFlushWithEager() throws IOException { String msg = "totalMemstoreSize=" + totalMemstoreSize + " cf1MemstoreSizePhaseI=" + cf1MemstoreSizePhaseI + " cf2MemstoreSizePhaseI=" + cf2MemstoreSizePhaseI + " cf3MemstoreSizePhaseI=" + cf3MemstoreSizePhaseI; - assertEquals(msg, totalMemstoreSize, cf1MemstoreSizePhaseI.getDataSize() - + cf2MemstoreSizePhaseI.getDataSize() + cf3MemstoreSizePhaseI.getDataSize()); + assertEquals(totalMemstoreSize, cf1MemstoreSizePhaseI.getDataSize() + + cf2MemstoreSizePhaseI.getDataSize() + cf3MemstoreSizePhaseI.getDataSize(), msg); // Flush!!!!!!!!!!!!!!!!!!!!!! // We have big compacting memstore CF1 and two small memstores: @@ -308,7 +303,7 @@ public void testSelectiveFlushWithEager() throws IOException { assertEquals(smallestSeqCF3PhaseII, smallestSeqCF3PhaseIV); // CF3 should be bottleneck for WAL - assertEquals(s, smallestSeqInRegionCurrentMemstorePhaseIV, smallestSeqCF3PhaseIV); + assertEquals(smallestSeqInRegionCurrentMemstorePhaseIV, smallestSeqCF3PhaseIV, s); // Flush!!!!!!!!!!!!!!!!!!!!!! // Trying to clean the existing memstores, CF2 all flushed to disk. The single @@ -672,8 +667,8 @@ public void testSelectiveFlushAndWALinDataCompaction() throws IOException { + DefaultMemStore.DEEP_OVERHEAD + " cf1MemstoreSizePhaseI=" + cf1MemstoreSizePhaseI + " cf2MemstoreSizePhaseI=" + cf2MemstoreSizePhaseI + " cf3MemstoreSizePhaseI=" + cf3MemstoreSizePhaseI; - assertEquals(msg, totalMemstoreSize, cf1MemstoreSizePhaseI.getDataSize() - + cf2MemstoreSizePhaseI.getDataSize() + cf3MemstoreSizePhaseI.getDataSize()); + assertEquals(totalMemstoreSize, cf1MemstoreSizePhaseI.getDataSize() + + cf2MemstoreSizePhaseI.getDataSize() + cf3MemstoreSizePhaseI.getDataSize(), msg); // Flush! CompactingMemStore cms1 = (CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore; @@ -744,8 +739,8 @@ public void testSelectiveFlushAndWALinDataCompaction() throws IOException { + smallestSeqCF3PhaseIV + "\n"; // now check that the LSN of the entire WAL, of CF1 and of CF3 has progressed due to compaction - assertTrue(s, - smallestSeqInRegionCurrentMemstorePhaseIV > smallestSeqInRegionCurrentMemstorePhaseIII); + assertTrue( + smallestSeqInRegionCurrentMemstorePhaseIV > smallestSeqInRegionCurrentMemstorePhaseIII, s); assertTrue(smallestSeqCF1PhaseIV > smallestSeqCF1PhaseIII); assertTrue(smallestSeqCF3PhaseIV > smallestSeqCF3PhaseIII); @@ -884,16 +879,18 @@ public void testSelectiveFlushWithBasicAndMerge() throws IOException { assertEquals(3, // active, one in pipeline, snapshot ((CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore).getSegments().size()); // CF2 should have been cleared - assertEquals("\n<<< DEBUG: The data--heap sizes of stores before/after first flushes," - + " CF1: " + cf1MemstoreSizePhaseI.getDataSize() + "/" + cf1MemstoreSizePhaseII.getDataSize() - + "--" + cf1MemstoreSizePhaseI.getHeapSize() + "/" + cf1MemstoreSizePhaseII.getHeapSize() - + ", CF2: " + cf2MemstoreSizePhaseI.getDataSize() + "/" + cf2MemstoreSizePhaseII.getDataSize() - + "--" + cf2MemstoreSizePhaseI.getHeapSize() + "/" + cf2MemstoreSizePhaseII.getHeapSize() - + ", CF3: " + cf3MemstoreSizePhaseI.getDataSize() + "/" + cf3MemstoreSizePhaseII.getDataSize() - + "--" + cf3MemstoreSizePhaseI.getHeapSize() + "/" + cf3MemstoreSizePhaseII.getHeapSize() - + "\n<<< AND before/after second flushes " + " CF1: " + cf1MemstoreSizePhaseIII.getDataSize() - + "/" + cf1MemstoreSizePhaseIV.getDataSize() + "--" + cf1MemstoreSizePhaseIII.getHeapSize() - + "/" + cf1MemstoreSizePhaseIV.getHeapSize() + "\n", 0, cf2MemstoreSizePhaseIV.getDataSize()); + assertEquals(0, cf2MemstoreSizePhaseIV.getDataSize(), + "\n<<< DEBUG: The data--heap sizes of stores before/after first flushes," + " CF1: " + + cf1MemstoreSizePhaseI.getDataSize() + "/" + cf1MemstoreSizePhaseII.getDataSize() + "--" + + cf1MemstoreSizePhaseI.getHeapSize() + "/" + cf1MemstoreSizePhaseII.getHeapSize() + + ", CF2: " + cf2MemstoreSizePhaseI.getDataSize() + "/" + + cf2MemstoreSizePhaseII.getDataSize() + "--" + cf2MemstoreSizePhaseI.getHeapSize() + "/" + + cf2MemstoreSizePhaseII.getHeapSize() + ", CF3: " + cf3MemstoreSizePhaseI.getDataSize() + + "/" + cf3MemstoreSizePhaseII.getDataSize() + "--" + cf3MemstoreSizePhaseI.getHeapSize() + + "/" + cf3MemstoreSizePhaseII.getHeapSize() + "\n<<< AND before/after second flushes " + + " CF1: " + cf1MemstoreSizePhaseIII.getDataSize() + "/" + + cf1MemstoreSizePhaseIV.getDataSize() + "--" + cf1MemstoreSizePhaseIII.getHeapSize() + "/" + + cf1MemstoreSizePhaseIV.getHeapSize() + "\n"); HBaseTestingUtil.closeRegionAndWAL(region); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java index 796e49a68a2d..b323f312a915 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -29,7 +29,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -44,21 +43,16 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ RegionServerTests.class, SmallTests.class }) +@Tag(RegionServerTests.TAG) +@Tag(SmallTests.TAG) public class TestWideScanner { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWideScanner.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static final Logger LOG = LoggerFactory.getLogger(TestWideScanner.class); @@ -83,7 +77,7 @@ public class TestWideScanner { /** HRegionInfo for root region */ private static HRegion REGION; - @BeforeClass + @BeforeAll public static void setUp() throws IOException { Path testDir = UTIL.getDataTestDir(); RegionInfo hri = RegionInfoBuilder.newBuilder(TESTTABLEDESC.getTableName()).build(); @@ -91,7 +85,7 @@ public static void setUp() throws IOException { HBaseTestingUtil.createRegionAndWAL(hri, testDir, UTIL.getConfiguration(), TESTTABLEDESC); } - @AfterClass + @AfterAll public static void tearDown() throws IOException { if (REGION != null) { HBaseTestingUtil.closeRegionAndWAL(REGION);