diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendCompatibilityTest.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendCompatibilityTest.java new file mode 100644 index 00000000000..98f94736bfe --- /dev/null +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendCompatibilityTest.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage; + +import java.lang.reflect.Field; +import java.net.URI; + +import org.apache.jackrabbit.core.data.DataIdentifier; +import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions; +import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadOptions; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +/** + * Compatibility tests for direct-download and upload cache configuration in + * {@link AzureBlobStoreBackend}. The assertions are intentionally behavior- + * based and do not depend on a specific cache library. + */ +public class AzureBlobStoreBackendCompatibilityTest { + + @Test + public void setHttpDownloadURIExpirySecondsUpdatesField() throws Exception { + // Setter coverage for the direct-download expiry value used by presigned URIs. + AzureBlobStoreBackend backend = new AzureBlobStoreBackend(); + + backend.setHttpDownloadURIExpirySeconds(3600); + + assertEquals(3600, getIntField(backend, "httpDownloadURIExpirySeconds")); + } + + @Test + public void setHttpUploadURIExpirySecondsUpdatesField() throws Exception { + // Setter coverage for the direct-upload expiry used during upload initiation. + AzureBlobStoreBackend backend = new AzureBlobStoreBackend(); + + backend.setHttpUploadURIExpirySeconds(1800); + + assertEquals(1800, getIntField(backend, "httpUploadURIExpirySeconds")); + } + + @Test + public void setHttpDownloadURICacheSizeCreatesAndDisablesCache() throws Exception { + // Verify the cache-size toggle actually creates and then removes the backing cache. + AzureBlobStoreBackend backend = new AzureBlobStoreBackend(); + backend.setHttpDownloadURIExpirySeconds(3600); + + backend.setHttpDownloadURICacheSize(100); + assertNotNull(getField(backend, "httpDownloadURICache")); + + backend.setHttpDownloadURICacheSize(0); + assertNull(getField(backend, "httpDownloadURICache")); + } + + @Test + public void createHttpDownloadURIReturnsNullWhenDisabled() { + // With no download expiry configured, direct download access should stay disabled. + AzureBlobStoreBackend backend = new AzureBlobStoreBackend(); + + URI downloadURI = backend.createHttpDownloadURI( + new DataIdentifier("test"), + DataRecordDownloadOptions.DEFAULT); + + assertNull(downloadURI); + } + + @Test + public void initiateHttpUploadReturnsNullWhenDisabled() { + // Upload initiation follows the same disabled-by-default contract until configured. + AzureBlobStoreBackend backend = new AzureBlobStoreBackend(); + + assertNull(backend.initiateHttpUpload(1024, 1, DataRecordUploadOptions.DEFAULT)); + } + + private static int getIntField(AzureBlobStoreBackend backend, String fieldName) throws Exception { + Field field = AzureBlobStoreBackend.class.getDeclaredField(fieldName); + field.setAccessible(true); + return (int) field.get(backend); + } + + private static Object getField(AzureBlobStoreBackend backend, String fieldName) throws Exception { + Field field = AzureBlobStoreBackend.class.getDeclaredField(fieldName); + field.setAccessible(true); + return field.get(backend); + } +} diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8Test.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8Test.java index 49cfff277db..10764b03e17 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8Test.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8Test.java @@ -36,7 +36,10 @@ import java.io.File; import java.io.FileOutputStream; import java.io.IOException; +import java.lang.reflect.Field; +import java.lang.reflect.Method; import java.net.URISyntaxException; +import java.net.URI; import java.time.Duration; import java.time.Instant; import java.util.Date; @@ -163,6 +166,77 @@ public void initSecret() throws Exception { assertReferenceSecret(azureBlobStoreBackend); } + @Test + public void setHttpDownloadURIExpirySecondsUpdatesField() throws Exception { + // Setter coverage for the v8 direct-download expiry value used by presigned URIs. + AzureBlobStoreBackendV8 backend = new AzureBlobStoreBackendV8(); + + backend.setHttpDownloadURIExpirySeconds(3600); + + assertEquals(3600, getIntField(backend, "httpDownloadURIExpirySeconds")); + } + + @Test + public void setHttpUploadURIExpirySecondsUpdatesField() throws Exception { + // Setter coverage for the v8 direct-upload expiry used during upload initiation. + AzureBlobStoreBackendV8 backend = new AzureBlobStoreBackendV8(); + + backend.setHttpUploadURIExpirySeconds(1800); + + assertEquals(1800, getIntField(backend, "httpUploadURIExpirySeconds")); + } + + @Test + public void setHttpDownloadURICacheSizeCreatesAndDisablesCache() throws Exception { + // Verify the cache-size toggle actually creates and then removes the backing cache. + AzureBlobStoreBackendV8 backend = new AzureBlobStoreBackendV8(); + backend.setHttpDownloadURIExpirySeconds(3600); + + backend.setHttpDownloadURICacheSize(100); + assertNotNull(getField(backend, "httpDownloadURICache")); + + backend.setHttpDownloadURICacheSize(0); + assertNull(getField(backend, "httpDownloadURICache")); + } + + @Test + public void createHttpDownloadURIReturnsNullWhenDisabled() throws DataStoreException { + // With no download expiry configured, direct download access should stay disabled. + AzureBlobStoreBackendV8 backend = new AzureBlobStoreBackendV8(); + + assertNull(backend.createHttpDownloadURI(new org.apache.jackrabbit.core.data.DataIdentifier("test"), + org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions.DEFAULT)); + } + + @Test + public void initiateHttpUploadReturnsNullWhenDisabled() throws DataStoreException { + // Upload initiation follows the same disabled-by-default contract until configured. + AzureBlobStoreBackendV8 backend = new AzureBlobStoreBackendV8(); + + assertNull(backend.initiateHttpUpload(1024, 1, + org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadOptions.DEFAULT)); + } + + @Test + public void createHttpDownloadURIReturnsPreExistingCachedURI() throws Exception { + // Seed a cache entry directly, then verify the externally observable + // behavior that the same URI is returned for the same download request. + AzureBlobStoreBackendV8 backend = new AzureBlobStoreBackendV8(); + org.apache.jackrabbit.core.data.DataIdentifier identifier = + new org.apache.jackrabbit.core.data.DataIdentifier("cached"); + URI cachedUri = URI.create("https://cached.example/download"); + + backend.setHttpDownloadURIExpirySeconds(300); + setField(backend, "downloadDomainOverride", "cached.example"); + setField(backend, "presignedDownloadURIVerifyExists", false); + backend.setHttpDownloadURICacheSize(10); + putIntoCache(getField(backend, "httpDownloadURICache"), + identifier + "cached.example", cachedUri); + + assertEquals(cachedUri, backend.createHttpDownloadURI(identifier, + org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions.DEFAULT)); + } + /* make sure that blob1.txt and blob2.txt are uploaded to AZURE_ACCOUNT_NAME/blobstore container before * executing this test * */ @@ -312,6 +386,30 @@ private static String getConnectionString() { return UtilsV8.getConnectionString(AzuriteDockerRule.ACCOUNT_NAME, AzuriteDockerRule.ACCOUNT_KEY, azurite.getBlobEndpoint()); } + private static int getIntField(AzureBlobStoreBackendV8 backend, String fieldName) throws Exception { + Field field = AzureBlobStoreBackendV8.class.getDeclaredField(fieldName); + field.setAccessible(true); + return (int) field.get(backend); + } + + private static Object getField(AzureBlobStoreBackendV8 backend, String fieldName) throws Exception { + Field field = AzureBlobStoreBackendV8.class.getDeclaredField(fieldName); + field.setAccessible(true); + return field.get(backend); + } + + private static void setField(AzureBlobStoreBackendV8 backend, String fieldName, Object value) throws Exception { + Field field = AzureBlobStoreBackendV8.class.getDeclaredField(fieldName); + field.setAccessible(true); + field.set(backend, value); + } + + private static void putIntoCache(Object cache, Object key, Object value) throws Exception { + Method put = cache.getClass().getMethod("put", Object.class, Object.class); + put.setAccessible(true); + put.invoke(cache, key, value); + } + private static void assertReferenceSecret(AzureBlobStoreBackendV8 azureBlobStoreBackend) throws DataStoreException { // assert secret already created on init diff --git a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendCompatibilityTest.java b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendCompatibilityTest.java new file mode 100644 index 00000000000..b8bbfcb0f4f --- /dev/null +++ b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendCompatibilityTest.java @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.blob.cloud.s3; + +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.net.URI; + +import org.apache.jackrabbit.core.data.DataIdentifier; +import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +/** + * Compatibility tests for direct-download and upload cache configuration in + * {@link S3Backend}. The assertions are intentionally behavior-based and do + * not reference third-party cache types. + */ +public class S3BackendCompatibilityTest { + + @Test + public void setHttpDownloadURIExpirySecondsUpdatesField() throws Exception { + // Setter coverage: direct-download expiry must be stored verbatim because + // later cache/window calculations depend on this value. + S3Backend backend = new S3Backend(); + + backend.setHttpDownloadURIExpirySeconds(3600); + + assertEquals(3600, getIntField(backend, "httpDownloadURIExpirySeconds")); + } + + @Test + public void setHttpUploadURIExpirySecondsUpdatesField() throws Exception { + // Setter coverage for the upload-side expiry used by presigned upload URIs. + S3Backend backend = new S3Backend(); + + backend.setHttpUploadURIExpirySeconds(1800); + + assertEquals(1800, getIntField(backend, "httpUploadURIExpirySeconds")); + } + + @Test + public void setHttpDownloadURICacheSizeCreatesAndDisablesCache() throws Exception { + // Toggle the cache on and back off again to verify the configuration method + // controls the backing cache lifecycle directly. + S3Backend backend = new S3Backend(); + backend.setHttpDownloadURIExpirySeconds(3600); + + backend.setHttpDownloadURICacheSize(100); + assertNotNull(getField(backend, "httpDownloadURICache")); + + backend.setHttpDownloadURICacheSize(0); + assertNull(getField(backend, "httpDownloadURICache")); + } + + @Test + public void createHttpDownloadURIReturnsNullWhenDisabled() { + // With download expiry left at the default disabled state, direct download + // access must stay off and return null immediately. + S3Backend backend = new S3Backend(); + + URI downloadURI = backend.createHttpDownloadURI( + new DataIdentifier("test"), + DataRecordDownloadOptions.DEFAULT); + + assertNull(downloadURI); + } + + @Test + public void initiateHttpUploadReturnsNullWhenDisabled() { + // Upload URIs use the same disabled-by-default contract when no expiry is configured. + S3Backend backend = new S3Backend(); + + assertNull(backend.initiateHttpUpload(1024, 1)); + } + + @Test + public void createHttpDownloadURIReturnsPreExistingCachedURI() throws Exception { + // Seed a cache entry directly, then verify the externally observable + // behavior that the same URI is returned for the same download request. + S3Backend backend = new S3Backend(); + DataIdentifier identifier = new DataIdentifier("cached"); + URI cachedUri = URI.create("https://cached.example/download"); + + backend.setHttpDownloadURIExpirySeconds(300); + backend.setHttpDownloadURICacheSize(10); + setField(backend, "presignedDownloadURIVerifyExists", false); + putIntoCache(getField(backend, "httpDownloadURICache"), identifier, cachedUri); + + assertEquals(cachedUri, backend.createHttpDownloadURI(identifier, DataRecordDownloadOptions.DEFAULT)); + } + + private static int getIntField(S3Backend backend, String fieldName) throws Exception { + Field field = S3Backend.class.getDeclaredField(fieldName); + field.setAccessible(true); + return (int) field.get(backend); + } + + private static Object getField(S3Backend backend, String fieldName) throws Exception { + Field field = S3Backend.class.getDeclaredField(fieldName); + field.setAccessible(true); + return field.get(backend); + } + + private static void setField(S3Backend backend, String fieldName, Object value) throws Exception { + Field field = S3Backend.class.getDeclaredField(fieldName); + field.setAccessible(true); + field.set(backend, value); + } + + private static void putIntoCache(Object cache, Object key, Object value) throws Exception { + Method put = cache.getClass().getMethod("put", Object.class, Object.class); + put.setAccessible(true); + put.invoke(cache, key, value); + } +} diff --git a/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/split/BlobIdSetCompatibilityTest.java b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/split/BlobIdSetCompatibilityTest.java new file mode 100644 index 00000000000..d3ba6f9b792 --- /dev/null +++ b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/split/BlobIdSetCompatibilityTest.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.split; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Files; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +/** + * Compatibility tests for {@link BlobIdSet}. + * These assertions verify observable lookup semantics without depending on the + * underlying cache implementation. + */ +public class BlobIdSetCompatibilityTest { + + private static final String TEST_FILENAME = "compat-blob-ids.txt"; + + private File tempDir; + private File storeFile; + private BlobIdSet blobIdSet; + + @Before + public void setUp() throws IOException { + tempDir = Files.createTempDirectory("blob-id-set-compat").toFile(); + storeFile = new File(tempDir, TEST_FILENAME); + blobIdSet = new BlobIdSet(tempDir.getAbsolutePath(), TEST_FILENAME); + } + + @After + public void tearDown() { + if (storeFile.exists()) { + storeFile.delete(); + } + tempDir.delete(); + } + + @Test + public void containsReturnsTrueForEntryAddedAfterRestart() throws IOException { + // Seed the on-disk store first, then rebuild BlobIdSet to show startup + // rehydrates lookup state from the persisted file. + try (FileWriter writer = new FileWriter(storeFile)) { + writer.write("blob-from-store\n"); + } + + BlobIdSet restarted = new BlobIdSet(tempDir.getAbsolutePath(), TEST_FILENAME); + + assertTrue(restarted.contains("blob-from-store")); + } + + @Test + public void addMakesEntryVisibleBeforeAndAfterRestart() throws IOException { + // Add through the public API and verify both the current instance and a + // restarted one observe the same persisted membership result. + blobIdSet.add("added-through-api"); + + assertTrue(blobIdSet.contains("added-through-api")); + + BlobIdSet restarted = new BlobIdSet(tempDir.getAbsolutePath(), TEST_FILENAME); + assertTrue(restarted.contains("added-through-api")); + } +} diff --git a/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/AbstractCacheStatsTest.java b/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/AbstractCacheStatsTest.java new file mode 100644 index 00000000000..4d3fb779d4e --- /dev/null +++ b/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/AbstractCacheStatsTest.java @@ -0,0 +1,235 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.cache; + +import java.util.concurrent.ExecutionException; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Tests for {@link AbstractCacheStats} using {@link CacheLIRS} as the backing cache. + * These assertions intentionally avoid third-party cache types so the same + * tests can run across cache implementation changes. + */ +public class AbstractCacheStatsTest { + + private static final String CACHE_NAME = "testCache"; + private static final long MAX_WEIGHT = 1000; + + private CacheLIRS cache; + private CacheStats stats; + + @Before + public void setUp() { + cache = new CacheLIRS<>(null, MAX_WEIGHT, 1, 1, 0, null, null, null); + stats = new CacheStats(cache, CACHE_NAME, null, MAX_WEIGHT); + } + + @Test + public void getNameReturnsConstructorValue() { + Assert.assertEquals(CACHE_NAME, stats.getName()); + } + + @Test + public void hitCountIncreasesOnCacheHit() throws ExecutionException { + cache.put("k1", "v1"); + cache.get("k1", () -> "v1"); // cache hit — callable not invoked + Assert.assertEquals(1, stats.getHitCount()); + } + + @Test + public void missCountIncreasesOnCacheMiss() { + cache.getIfPresent("absent"); + Assert.assertEquals(1, stats.getMissCount()); + } + + @Test + public void requestCountIsSumOfHitsAndMisses() throws ExecutionException { + cache.put("k1", "v1"); + cache.get("k1", () -> "v1"); // hit + cache.getIfPresent("absent"); // miss + Assert.assertEquals(2, stats.getRequestCount()); + Assert.assertEquals(1, stats.getHitCount()); + Assert.assertEquals(1, stats.getMissCount()); + } + + @Test + public void loadSuccessCountIncreasesOnCallableLoad() throws ExecutionException { + cache.get("k1", () -> "v1"); // miss + load + Assert.assertEquals(1, stats.getLoadSuccessCount()); + Assert.assertEquals(1, stats.getLoadCount()); + } + + @Test + public void loadExceptionCountIncreasesOnFailedLoad() { + try { + cache.get("k1", () -> { + throw new RuntimeException("load failed"); + }); + } catch (ExecutionException ignored) { + } + Assert.assertEquals(1, stats.getLoadExceptionCount()); + Assert.assertEquals(1, stats.getLoadCount()); + Assert.assertEquals(0, stats.getLoadSuccessCount()); + } + + @Test + public void evictionCountIncreasesAfterCapacityEviction() { + // LIRS needs warm-up: create cache of size 5 and add 30 entries to ensure evictions + CacheLIRS smallCache = CacheLIRS.newBuilder() + .maximumSize(5) + .build(); + CacheStats smallStats = new CacheStats(smallCache, "small", null, 5); + for (int i = 0; i < 30; i++) { + smallCache.put("k" + i, "v" + i); + } + Assert.assertTrue("evictionCount should be positive after capacity eviction", + smallStats.getEvictionCount() > 0); + } + + @Test + public void maxTotalWeightReturnsConfiguredValue() { + Assert.assertEquals(MAX_WEIGHT, stats.getMaxTotalWeight()); + } + + @Test + public void elementCountReflectsCachedEntries() throws ExecutionException { + cache.get("k1", () -> "v1"); + cache.get("k2", () -> "v2"); + Assert.assertEquals(2, stats.getElementCount()); + } + + @Test + public void estimateCurrentWeightReturnsNegativeOneWhenNoWeigher() { + Assert.assertEquals(-1, stats.estimateCurrentWeight()); + } + + @Test + public void resetStatsClearsCountersButNotCacheContents() throws ExecutionException { + cache.get("k1", () -> "v1"); // miss + load + cache.get("k1", () -> "v1"); // hit + cache.getIfPresent("absent"); // miss + + stats.resetStats(); + + Assert.assertEquals(0, stats.getRequestCount()); + Assert.assertEquals(0, stats.getHitCount()); + Assert.assertEquals(0, stats.getMissCount()); + Assert.assertEquals(0, stats.getLoadCount()); + Assert.assertEquals(0, stats.getLoadSuccessCount()); + Assert.assertEquals(0, stats.getLoadExceptionCount()); + Assert.assertEquals(0, stats.getEvictionCount()); + Assert.assertEquals(0.0, stats.getLoadExceptionRate(), Double.MIN_VALUE); + Assert.assertEquals(0, stats.getTotalLoadTime()); + // cache contents unchanged after reset + Assert.assertEquals(1, stats.getElementCount()); + } + + @Test + public void hitRateIsOneWhenAllAccessesAreHits() throws ExecutionException { + cache.put("k1", "v1"); + cache.get("k1", () -> "v1"); // hit + Assert.assertEquals(1.0, stats.getHitRate(), Double.MIN_VALUE); + } + + @Test + public void hitRateIsOneWhenNoRequestsYet() { + // by convention, hit rate is 1.0 when there are no requests + Assert.assertEquals(1.0, stats.getHitRate(), Double.MIN_VALUE); + } + + @Test + public void cacheInfoAsStringContainsRequiredFields() throws ExecutionException { + cache.get("k1", () -> "v1"); + String info = stats.cacheInfoAsString(); + Assert.assertTrue("cacheInfoAsString should contain hitCount", info.contains("hitCount=")); + Assert.assertTrue("cacheInfoAsString should contain missCount", info.contains("missCount=")); + Assert.assertTrue("cacheInfoAsString should contain loadCount", info.contains("loadCount=")); + Assert.assertTrue("cacheInfoAsString should contain elementCount", info.contains("elementCount=1")); + Assert.assertTrue("cacheInfoAsString should contain maxWeight", info.contains("maxWeight=")); + } + + @Test + public void timeInWordsIncludesMinAndSec() { + String result = AbstractCacheStats.timeInWords(0); + Assert.assertNotNull(result); + Assert.assertTrue("timeInWords should contain 'min'", result.contains("min")); + Assert.assertTrue("timeInWords should contain 'sec'", result.contains("sec")); + } + + @Test + public void timeInWordsFormatsOneMinute() { + long oneMinuteNanos = 60L * 1_000_000_000L; + String result = AbstractCacheStats.timeInWords(oneMinuteNanos); + Assert.assertTrue("1-minute duration should contain '1 min'", result.contains("1 min")); + } + + @Test + public void loadExceptionRateAfterMixedLoads() throws ExecutionException { + cache.get("success", () -> "v1"); // success + try { + cache.get("failure", () -> { + throw new RuntimeException("boom"); + }); + } catch (ExecutionException ignored) { + } + // 1 success + 1 exception = 2 loads; rate = 0.5 + Assert.assertEquals(0.5, stats.getLoadExceptionRate(), 0.001); + } + + @Test + public void loadExceptionRateIsZeroWhenNoLoads() { + Assert.assertEquals(0.0, stats.getLoadExceptionRate(), Double.MIN_VALUE); + } + + @Test + public void totalLoadTimeIsPositiveAfterMeasuredLoad() throws ExecutionException { + cache.get("k1", () -> { + Thread.sleep(1); + return "v1"; + }); + Assert.assertTrue("totalLoadTime should be > 0 after a measured load", stats.getTotalLoadTime() > 0); + } + + @Test + public void averageLoadPenaltyIsPositiveAfterMeasuredLoad() throws ExecutionException { + cache.get("k1", () -> { + Thread.sleep(1); + return "v1"; + }); + Assert.assertTrue("averageLoadPenalty should be > 0 after a measured load", + stats.getAverageLoadPenalty() > 0.0); + } + + @Test + public void missRateIsOneWhenAllAccessesAreMisses() { + cache.getIfPresent("a"); + cache.getIfPresent("b"); + Assert.assertEquals(1.0, stats.getMissRate(), Double.MIN_VALUE); + } + + @Test + public void statsAreAccumulatedAcrossMultipleLoads() throws ExecutionException { + cache.get("k1", () -> "v1"); + cache.get("k2", () -> "v2"); + cache.get("k3", () -> "v3"); + Assert.assertEquals(3, stats.getLoadSuccessCount()); + Assert.assertEquals(3, stats.getLoadCount()); + } +} diff --git a/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/CacheLIRSCompatibilityTest.java b/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/CacheLIRSCompatibilityTest.java new file mode 100644 index 00000000000..0351a80ee3d --- /dev/null +++ b/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/CacheLIRSCompatibilityTest.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.cache; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** + * Compatibility tests for the Oak-visible {@link CacheLIRS} API surface. + * These assertions intentionally avoid third-party cache APIs so the same + * tests can run before and after the cache implementation migration. + */ +public class CacheLIRSCompatibilityTest { + + @Test + public void getWithCallableCachesLoadedValue() throws ExecutionException { + // Load through the Oak-visible callable API, then repeat the same lookup + // with a different loader to prove the cached value wins on the second call. + CacheLIRS cache = CacheLIRS.newBuilder() + .maximumSize(10) + .build(); + AtomicInteger loadCount = new AtomicInteger(); + + assertEquals("loaded", cache.get("k", () -> { + loadCount.incrementAndGet(); + return "loaded"; + })); + assertEquals("loaded", cache.get("k", () -> { + loadCount.incrementAndGet(); + return "other"; + })); + + assertEquals(1, loadCount.get()); + assertEquals("loaded", cache.getIfPresent("k")); + } + + @Test + public void getWithCallableWrapsCheckedLoaderFailureInExecutionException() { + // Use a checked exception from the loader and verify the legacy + // ExecutionException shape is preserved for callers. + CacheLIRS cache = CacheLIRS.newBuilder() + .maximumSize(10) + .build(); + Exception failure = new Exception("checked failure"); + + try { + cache.get("k", () -> { + throw failure; + }); + fail("expected ExecutionException"); + } catch (ExecutionException e) { + assertEquals(failure, e.getCause()); + assertEquals("checked failure", e.getCause().getMessage()); + assertNull(cache.getIfPresent("k")); + } + } + + @Test + public void invalidateAllClearsPreviouslyCachedEntries() throws ExecutionException { + // Populate two keys first, then clear the cache and verify both the + // size counters and direct lookups observe an empty cache. + CacheLIRS cache = CacheLIRS.newBuilder() + .maximumSize(10) + .build(); + + cache.get("k1", () -> "v1"); + cache.get("k2", () -> "v2"); + assertEquals(2, cache.size()); + + cache.invalidateAll(); + + assertEquals(0, cache.size()); + assertNull(cache.getIfPresent("k1")); + assertNull(cache.getIfPresent("k2")); + assertTrue(cache.asMap().isEmpty()); + } + + @Test + public void evictionCallbackIsInvokedWhenEntryIsEvictedBySize() { + // Push the cache past capacity and capture the first callback so the + // test checks real size-based eviction instead of explicit invalidation. + AtomicInteger evictions = new AtomicInteger(); + AtomicReference firstEvictedKey = new AtomicReference<>(); + AtomicReference firstEvictedValue = new AtomicReference<>(); + CacheLIRS cache = CacheLIRS.newBuilder() + .maximumSize(10) + .evictionCallback((key, value, cause) -> { + if (evictions.getAndIncrement() == 0) { + firstEvictedKey.set(key); + firstEvictedValue.set(value); + } + }) + .build(); + + // LIRS requires cold-queue warm-up before evicting; 100× the cache capacity + // ensures at least one eviction on any conforming implementation. + for (int i = 0; i < 1000 && evictions.get() == 0; i++) { + cache.put("k" + i, "v" + i); + } + + assertTrue("expected at least one eviction callback", evictions.get() > 0); + assertTrue(firstEvictedKey.get().startsWith("k")); + assertTrue(firstEvictedValue.get().startsWith("v")); + assertNull(cache.getIfPresent(firstEvictedKey.get())); + } +} diff --git a/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/EmpiricalWeigherTest.java b/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/EmpiricalWeigherTest.java new file mode 100644 index 00000000000..39209ad5d70 --- /dev/null +++ b/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/EmpiricalWeigherTest.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.cache; + +import org.junit.Assert; +import org.junit.Test; + +/** + * Tests for {@link EmpiricalWeigher}. + * These assertions intentionally avoid third-party cache types so the same + * tests can run across cache implementation changes. + */ +public class EmpiricalWeigherTest { + + private static final int ENTRY_OVERHEAD = 168; + + private final EmpiricalWeigher weigher = new EmpiricalWeigher(); + + @Test + public void weighIncludesBaseOverheadForZeroMemoryValues() { + CacheValue key = () -> 0; + CacheValue value = () -> 0; + Assert.assertEquals(ENTRY_OVERHEAD, weigher.weigh(key, value)); + } + + @Test + public void weighAddsKeyAndValueMemoryToOverhead() { + CacheValue key = () -> 100; + CacheValue value = () -> 200; + Assert.assertEquals(ENTRY_OVERHEAD + 100 + 200, weigher.weigh(key, value)); + } + + @Test + public void weighWithOnlyKeyMemory() { + CacheValue key = () -> 50; + CacheValue value = () -> 0; + Assert.assertEquals(ENTRY_OVERHEAD + 50, weigher.weigh(key, value)); + } + + @Test + public void weighWithOnlyValueMemory() { + CacheValue key = () -> 0; + CacheValue value = () -> 300; + Assert.assertEquals(ENTRY_OVERHEAD + 300, weigher.weigh(key, value)); + } + + @Test + public void weighCapsAtIntegerMaxValue() { + // key + value + overhead overflows int + CacheValue key = () -> Integer.MAX_VALUE; + CacheValue value = () -> Integer.MAX_VALUE; + Assert.assertEquals(Integer.MAX_VALUE, weigher.weigh(key, value)); + } + + @Test + public void weighIsAlwaysPositive() { + CacheValue key = () -> 1; + CacheValue value = () -> 1; + Assert.assertTrue(weigher.weigh(key, value) > 0); + } + + @Test + public void weighReturnsConsistentResultsForSameInput() { + CacheValue key = () -> 42; + CacheValue value = () -> 99; + int first = weigher.weigh(key, value); + int second = weigher.weigh(key, value); + Assert.assertEquals(first, second); + } + + @Test + public void weighJustBelowOverflow() { + // total = 168 + (Integer.MAX_VALUE - 168 - 1) + 1 = Integer.MAX_VALUE + int keyMem = Integer.MAX_VALUE - ENTRY_OVERHEAD - 1; + int valueMem = 1; + CacheValue key = () -> keyMem; + CacheValue value = () -> valueMem; + Assert.assertEquals(Integer.MAX_VALUE, weigher.weigh(key, value)); + } +} diff --git a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreHelperTest.java b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreHelperTest.java new file mode 100644 index 00000000000..f188b7b0a77 --- /dev/null +++ b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreHelperTest.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.plugins.document; + +import org.apache.jackrabbit.oak.spi.commit.CommitInfo; +import org.apache.jackrabbit.oak.spi.commit.EmptyHook; +import org.apache.jackrabbit.oak.spi.state.NodeBuilder; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Tests for {@link DocumentNodeStoreHelper}. + * These assertions intentionally avoid third-party cache types so the same + * tests can run across cache implementation changes. + */ +public class DocumentNodeStoreHelperTest { + + private DocumentNodeStore store; + + @Before + public void setUp() { + store = DocumentNodeStoreBuilder.newDocumentNodeStoreBuilder() + .setAsyncDelay(0) + .build(); + } + + @After + public void tearDown() { + if (store != null) { + store.dispose(); + } + } + + @Test + public void getNodesCacheReturnsNonNull() { + // This is a smoke test for the helper entry point: callers should always + // get back a usable cache handle for a live DocumentNodeStore. + Assert.assertNotNull(DocumentNodeStoreHelper.getNodesCache(store)); + } + + @Test + public void getNodesCacheExposesNonNullMapView() { + // This only asserts that the helper exposes a map view; contents and + // mutability semantics are intentionally out of scope here. + Assert.assertNotNull(DocumentNodeStoreHelper.getNodesCache(store).asMap()); + } + + @Test + public void getNodesCacheMapViewReflectsCachedNodeReads() throws Exception { + // Create content through the normal node-store API so the subsequent read + // exercises the production cache population path instead of a test-only insert. + NodeBuilder builder = store.getRoot().builder(); + builder.child("a").child("b"); + store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + + // Clear any startup entries first, then verify a real tree read repopulates + // the helper-visible map view. + DocumentNodeStoreHelper.getNodesCache(store).asMap().clear(); + Assert.assertTrue(DocumentNodeStoreHelper.getNodesCache(store).asMap().isEmpty()); + store.getRoot().getChildNode("a").getChildNode("b"); + + Assert.assertFalse(DocumentNodeStoreHelper.getNodesCache(store).asMap().isEmpty()); + } +} diff --git a/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticIndexStatisticsCompatibilityTest.java b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticIndexStatisticsCompatibilityTest.java new file mode 100644 index 00000000000..a421c9aaa75 --- /dev/null +++ b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticIndexStatisticsCompatibilityTest.java @@ -0,0 +1,321 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.plugins.index.elastic; + +import co.elastic.clients.elasticsearch.ElasticsearchClient; +import co.elastic.clients.elasticsearch.core.CountRequest; +import co.elastic.clients.elasticsearch.core.CountResponse; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.ArgumentMatchers; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; + +import java.io.IOException; +import java.time.Clock; +import java.time.Instant; +import java.time.ZoneId; +import java.util.function.IntSupplier; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Compatibility tests for {@link ElasticIndexStatistics}. + * These assertions intentionally avoid third-party cache types so the same + * tests can run across cache implementation changes. + */ +public class ElasticIndexStatisticsCompatibilityTest { + + @Mock + private ElasticConnection elasticConnectionMock; + + @Mock + private ElasticIndexDefinition indexDefinitionMock; + + @Mock + private ElasticsearchClient elasticClientMock; + + private AutoCloseable closeable; + + @Before + public void setUp() { + this.closeable = MockitoAnnotations.openMocks(this); + Mockito.when(indexDefinitionMock.getIndexAlias()).thenReturn("test-index"); + Mockito.when(elasticConnectionMock.getClient()).thenReturn(elasticClientMock); + } + + @After + public void releaseMocks() throws Exception { + closeable.close(); + } + + @Test + public void numDocsReturnsMockedCountFromElasticsearch() throws Exception { + // Baseline behavior: a cache miss should load the document count from Elasticsearch. + CountResponse countResponse = Mockito.mock(CountResponse.class); + Mockito.when(countResponse.count()).thenReturn(42L); + Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) + .thenReturn(countResponse); + + ElasticIndexStatistics indexStatistics = + new ElasticIndexStatistics(elasticConnectionMock, indexDefinitionMock); + Assert.assertEquals(42, indexStatistics.numDocs()); + } + + @Test + public void numDocsCachesResultOnSubsequentCalls() throws Exception { + // Call numDocs() twice with the same descriptor and verify only the first + // call reaches Elasticsearch. + CountResponse countResponse = Mockito.mock(CountResponse.class); + Mockito.when(countResponse.count()).thenReturn(99L); + Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) + .thenReturn(countResponse); + + ElasticIndexStatistics indexStatistics = + new ElasticIndexStatistics(elasticConnectionMock, indexDefinitionMock); + // first call loads from ES + Assert.assertEquals(99, indexStatistics.numDocs()); + // second call should be served from cache (same value) + Assert.assertEquals(99, indexStatistics.numDocs()); + // ES should only have been called once + Mockito.verify(elasticClientMock, Mockito.times(1)).count(ArgumentMatchers.any(CountRequest.class)); + } + + @Test + public void numDocsPropagatesIOExceptionAsRuntimeFailure() throws Exception { + // Use a checked IOException from the client and assert callers still see a + // runtime failure that preserves the original cause chain. + Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) + .thenThrow(new IOException("ES down")); + + ElasticIndexStatistics indexStatistics = + new ElasticIndexStatistics(elasticConnectionMock, indexDefinitionMock); + try { + indexStatistics.numDocs(); + Assert.fail("expected RuntimeException when Elasticsearch is unavailable"); + } catch (RuntimeException e) { + // The exact wrapper type is intentionally not asserted so this test + // can remain valid across cache implementations. + Assert.assertNotNull(findCause(e, IOException.class)); + Assert.assertEquals("ES down", findCause(e, IOException.class).getMessage()); + } + } + + @Test + public void getDocCountForFieldReturnsMockedCount() throws Exception { + CountResponse countResponse = Mockito.mock(CountResponse.class); + Mockito.when(countResponse.count()).thenReturn(10L); + Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) + .thenReturn(countResponse); + + ElasticIndexStatistics indexStatistics = + new ElasticIndexStatistics(elasticConnectionMock, indexDefinitionMock); + Assert.assertEquals(10, indexStatistics.getDocCountFor("someField")); + } + + @Test + public void numDocsAndGetDocCountForUseIndependentCacheKeys() throws Exception { + // numDocs() and getDocCountFor(field) should not alias each other in the cache, + // so both lookups must hit Elasticsearch once. + CountResponse countResponse = Mockito.mock(CountResponse.class); + Mockito.when(countResponse.count()).thenReturn(5L); + Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) + .thenReturn(countResponse); + + ElasticIndexStatistics indexStatistics = + new ElasticIndexStatistics(elasticConnectionMock, indexDefinitionMock); + indexStatistics.numDocs(); + indexStatistics.getDocCountFor("someField"); + // numDocs and getDocCountFor use different cache keys (different StatsRequestDescriptors) + Mockito.verify(elasticClientMock, Mockito.times(2)).count(ArgumentMatchers.any(CountRequest.class)); + } + + @Test + public void numDocsRefreshesValueAfterRefreshWindow() throws Exception { + // Advance a controllable clock past the refresh boundary, then release + // the blocked refresh and verify callers eventually observe the new value. + MutableClock clock = new MutableClock(); + CountResponse initialResponse = Mockito.mock(CountResponse.class); + CountResponse refreshedResponse = Mockito.mock(CountResponse.class); + Mockito.when(initialResponse.count()).thenReturn(100L); + Mockito.when(refreshedResponse.count()).thenReturn(1000L); + CountDownLatch refreshStarted = new CountDownLatch(1); + CountDownLatch releaseRefresh = new CountDownLatch(1); + CountDownLatch refreshCompleted = new CountDownLatch(1); + AtomicInteger invocations = new AtomicInteger(); + Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) + .thenAnswer(invocation -> { + if (invocations.getAndIncrement() == 0) { + return initialResponse; + } + refreshStarted.countDown(); + if (!releaseRefresh.await(5, TimeUnit.SECONDS)) { + throw new AssertionError("timed out waiting to release refresh"); + } + refreshCompleted.countDown(); + return refreshedResponse; + }); + + ElasticIndexStatistics indexStatistics = + newIndexStatistics(clock); + + Assert.assertEquals(100, indexStatistics.numDocs()); + Mockito.verify(elasticClientMock, Mockito.times(1)).count(ArgumentMatchers.any(CountRequest.class)); + + clock.advanceSeconds(2); + Assert.assertEquals(100, indexStatistics.numDocs()); + + Assert.assertTrue("expected refresh to start", refreshStarted.await(5, TimeUnit.SECONDS)); + releaseRefresh.countDown(); + Assert.assertTrue("expected refresh completion", refreshCompleted.await(5, TimeUnit.SECONDS)); + assertEventuallyEquals(1000, indexStatistics::numDocs); + Mockito.verify(elasticClientMock, Mockito.atLeast(2)).count(ArgumentMatchers.any(CountRequest.class)); + } + + @Test + public void numDocsReturnsStaleValueWhileRefreshIsInFlight() throws Exception { + // Advance a controllable clock into the refresh window, then block the + // reload so the read path can prove it returns the stale cached value. + MutableClock clock = new MutableClock(); + CountResponse initialResponse = Mockito.mock(CountResponse.class); + CountResponse refreshedResponse = Mockito.mock(CountResponse.class); + Mockito.when(initialResponse.count()).thenReturn(100L); + Mockito.when(refreshedResponse.count()).thenReturn(1000L); + + CountDownLatch refreshStarted = new CountDownLatch(1); + CountDownLatch releaseRefresh = new CountDownLatch(1); + CountDownLatch refreshCompleted = new CountDownLatch(1); + AtomicInteger invocations = new AtomicInteger(); + Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) + .thenAnswer(invocation -> { + if (invocations.getAndIncrement() == 0) { + return initialResponse; + } + refreshStarted.countDown(); + if (!releaseRefresh.await(5, TimeUnit.SECONDS)) { + throw new AssertionError("timed out waiting to release refresh"); + } + refreshCompleted.countDown(); + return refreshedResponse; + }); + + ElasticIndexStatistics indexStatistics = + newIndexStatistics(clock); + + Assert.assertEquals(100, indexStatistics.numDocs()); + + clock.advanceSeconds(2); + Assert.assertEquals(100, indexStatistics.numDocs()); + Assert.assertTrue("expected refresh to start", refreshStarted.await(5, TimeUnit.SECONDS)); + + releaseRefresh.countDown(); + Assert.assertTrue("expected refresh completion", refreshCompleted.await(5, TimeUnit.SECONDS)); + assertEventuallyEquals(1000, indexStatistics::numDocs); + } + + @Test + public void numDocsKeepsCachedValueWhenRefreshFails() throws Exception { + // Advance a controllable clock into the refresh window, then make the + // asynchronous refresh fail and verify the cached value is preserved. + MutableClock clock = new MutableClock(); + CountResponse initialResponse = Mockito.mock(CountResponse.class); + Mockito.when(initialResponse.count()).thenReturn(100L); + + CountDownLatch refreshAttempted = new CountDownLatch(1); + AtomicInteger invocations = new AtomicInteger(); + Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) + .thenAnswer(invocation -> { + if (invocations.getAndIncrement() == 0) { + return initialResponse; + } + refreshAttempted.countDown(); + throw new IOException("refresh failed"); + }); + + ElasticIndexStatistics indexStatistics = + newIndexStatistics(clock); + + Assert.assertEquals(100, indexStatistics.numDocs()); + + clock.advanceSeconds(2); + Assert.assertEquals(100, indexStatistics.numDocs()); + Assert.assertTrue("expected refresh attempt", refreshAttempted.await(5, TimeUnit.SECONDS)); + Assert.assertEquals(100, indexStatistics.numDocs()); + } + + private ElasticIndexStatistics newIndexStatistics(Clock clock) { + return new ElasticIndexStatistics( + elasticConnectionMock, + indexDefinitionMock, + ElasticIndexStatistics.setupCountCache(100, 30, 1, clock), + null); + } + + private static void assertEventuallyEquals(int expected, IntSupplier supplier) throws InterruptedException { + long deadline = System.nanoTime() + TimeUnit.SECONDS.toNanos(5); + int actual = supplier.getAsInt(); + while (System.nanoTime() < deadline && actual != expected) { + TimeUnit.MILLISECONDS.sleep(25); + actual = supplier.getAsInt(); + } + Assert.assertEquals(expected, actual); + } + + private static Throwable findCause(Throwable throwable, Class type) { + Throwable current = throwable; + while (current != null) { + if (type.isInstance(current)) { + return current; + } + current = current.getCause(); + } + return null; + } + + private static final class MutableClock extends Clock { + private final AtomicLong currentMillis = new AtomicLong(); + + @Override + public ZoneId getZone() { + return ZoneId.of("UTC"); + } + + @Override + public Clock withZone(ZoneId zone) { + return this; + } + + @Override + public Instant instant() { + return Instant.ofEpochMilli(currentMillis.get()); + } + + @Override + public long millis() { + return currentMillis.get(); + } + + private void advanceSeconds(long seconds) { + currentMillis.addAndGet(TimeUnit.SECONDS.toMillis(seconds)); + } + } +} diff --git a/oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/search/ExtractedTextCacheTest.java b/oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/search/ExtractedTextCacheTest.java index c4ab7301ba8..a2f8996dbb4 100644 --- a/oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/search/ExtractedTextCacheTest.java +++ b/oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/search/ExtractedTextCacheTest.java @@ -66,6 +66,17 @@ public void cacheEnabled() throws Exception { assertEquals("test hello", text); } + @Test + public void cacheStatsTrackRequestsImplementationIndependently() throws Exception { + ExtractedTextCache cache = new ExtractedTextCache(10 * FileUtils.ONE_MB, 100); + + Blob blob = new IdBlob("hello", "stats-id"); + assertNull(cache.get("/a", "foo", blob, false)); + + assertEquals(1, cache.getCacheStats().getRequestCount()); + assertEquals(1, cache.getCacheStats().getMissCount()); + } + @Test public void cacheEnabledNonIdBlob() throws Exception { ExtractedTextCache cache = new ExtractedTextCache(10 * FileUtils.ONE_MB, 100); diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCacheTest.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCacheTest.java index 4b965ce07d0..eb63dcd4633 100644 --- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCacheTest.java +++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCacheTest.java @@ -70,6 +70,21 @@ public void putTest() throws ExecutionException { assertEquals(segment1, cache.getSegment(id1, () -> failToLoad(id1))); } + @Test + public void getSegmentWrapsCheckedLoaderFailureInExecutionException() { + Exception failure = new Exception("load failed"); + + try { + cache.getSegment(id1, () -> { + throw failure; + }); + fail("expected ExecutionException"); + } catch (ExecutionException e) { + assertEquals(failure, e.getCause()); + assertEquals("load failed", e.getCause().getMessage()); + } + } + @Test public void invalidateTests() throws ExecutionException { cache.putSegment(segment1); diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/CommitValueResolverTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/CommitValueResolverTest.java index 77a26568e2c..de4f5255453 100644 --- a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/CommitValueResolverTest.java +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/CommitValueResolverTest.java @@ -207,6 +207,8 @@ public void branchCommit() throws Exception { @Test public void cacheEmptyCommitValue() throws Exception { + // Remove a commit value after the change is written, then compare a resolver + // without negative caching to one that remembers old empty results. addNode("/foo"); // add changes and remove commit value NodeBuilder builder = ns.getRoot().builder(); @@ -234,6 +236,25 @@ public void cacheEmptyCommitValue() throws Exception { assertThat(countDocumentLookUps(() -> cvr.resolve(commitRev, foo)), equalTo(0)); } + @Test + public void committedValueFromPreviousDocumentIsCached() throws Exception { + // Move a committed revision out of the main document into previous documents, + // resolve it once, then verify a caching resolver can answer again without + // additional document-store lookups. + CommitValueResolver cachingResolver = newCachingCommitValueResolver(100); + Revision revision = addNode("/foo"); + assertTrue(getDocument("/").getLocalRevisions().containsKey(revision)); + while (getDocument("/").getLocalRevisions().containsKey(revision)) { + someChange("/"); + ns.runBackgroundUpdateOperations(); + } + + NodeDocument root = getDocument("/"); + assertEquals("c", cachingResolver.resolve(revision, root)); + NodeDocument cachedRoot = getDocument("/"); + assertThat(countDocumentLookUps(() -> cachingResolver.resolve(revision, cachedRoot)), equalTo(0)); + } + private int countDocumentLookUps(Callable c) throws Exception { int numCalls = store.getNumFindCalls(NODES); c.call(); diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBuilderTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBuilderTest.java new file mode 100644 index 00000000000..ae10d8fd09c --- /dev/null +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBuilderTest.java @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.plugins.document; + +import java.lang.reflect.Method; + +import org.apache.jackrabbit.oak.cache.CacheStats; +import org.apache.jackrabbit.oak.plugins.document.cache.NodeDocumentCache; +import org.apache.jackrabbit.oak.plugins.document.locks.StripedNodeDocumentLocks; +import org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore; +import org.apache.jackrabbit.oak.plugins.document.util.StringValue; +import org.junit.Assert; +import org.junit.Test; + +/** + * Tests for {@link DocumentNodeStoreBuilder} cache configuration. + * These assertions intentionally avoid third-party cache types so the same + * tests can run across cache implementation changes. + */ +public class DocumentNodeStoreBuilderTest { + + @Test + public void buildNodeDocumentCacheReturnsNonNull() { + // Verify the builder can construct the node-document cache with the default + // in-memory configuration and a plain in-memory document store. + DocumentStore store = new MemoryDocumentStore(); + NodeDocumentCache cache = DocumentNodeStoreBuilder.newDocumentNodeStoreBuilder() + .buildNodeDocumentCache(store, new StripedNodeDocumentLocks()); + Assert.assertNotNull(cache); + } + + @Test + public void buildNodeDocumentCacheStatsAreNonEmpty() { + // The builder wires cache stats as part of construction, so the returned + // cache should already expose at least one stats entry. + DocumentStore store = new MemoryDocumentStore(); + NodeDocumentCache cache = DocumentNodeStoreBuilder.newDocumentNodeStoreBuilder() + .buildNodeDocumentCache(store, new StripedNodeDocumentLocks()); + Iterable stats = cache.getCacheStats(); + Assert.assertNotNull(stats); + Assert.assertTrue(stats.iterator().hasNext()); + } + + @Test + public void buildNodeDocumentCacheIsUsable() throws Exception { + // Round-trip a document through the built cache so this test checks + // observable put/get behavior instead of just construction. + DocumentStore docStore = new MemoryDocumentStore(); + NodeDocumentCache cache = DocumentNodeStoreBuilder.newDocumentNodeStoreBuilder() + .buildNodeDocumentCache(docStore, new StripedNodeDocumentLocks()); + // put a document and verify it can be retrieved + NodeDocument doc = new NodeDocument(docStore, 1L); + doc.put(Document.ID, "test-id"); + doc.put(Document.MOD_COUNT, 1L); + cache.put(doc); + NodeDocument result = cache.getIfPresent("test-id"); + Assert.assertNotNull(result); + Assert.assertEquals(doc.getModCount(), result.getModCount()); + } + + @Test + public void buildNodeDocumentCacheWithZeroMemoryDistributionStillReturnsUsableCache() throws Exception { + DocumentStore docStore = new MemoryDocumentStore(); + // This verifies builder behavior when all memory cache buckets are disabled. + // It does not assert cache-capacity semantics. + NodeDocumentCache cache = DocumentNodeStoreBuilder.newDocumentNodeStoreBuilder() + .memoryCacheDistribution(0, 0, 0, 0, 0) + .buildNodeDocumentCache(docStore, new StripedNodeDocumentLocks()); + NodeDocument doc = new NodeDocument(docStore, 2L); + doc.put(Document.ID, "zero-distribution-id"); + doc.put(Document.MOD_COUNT, 2L); + cache.put(doc); + + NodeDocument result = cache.getIfPresent("zero-distribution-id"); + Assert.assertNotNull(result); + Assert.assertEquals(doc.getModCount(), result.getModCount()); + } + + @Test + public void buildDocumentCacheStoresAndRetrievesDocuments() throws Exception { + // buildDocumentCache() currently returns an implementation-specific cache type, + // so this test uses reflection and checks only the observable put/get contract. + DocumentStore store = new MemoryDocumentStore(); + Object cache = DocumentNodeStoreBuilder.newDocumentNodeStoreBuilder().buildDocumentCache(store); + NodeDocument document = new NodeDocument(store, 1L); + StringValue key = StringValue.fromString("document-cache-id"); + document.put(Document.ID, key.toString()); + document.put(Document.MOD_COUNT, 7L); + + invoke(cache, "put", Object.class, Object.class, key, document); + Object cached = invoke(cache, "getIfPresent", Object.class, key); + + Assert.assertNotNull(cached); + Assert.assertTrue(cached instanceof NodeDocument); + Assert.assertEquals(document.getModCount(), ((NodeDocument) cached).getModCount()); + } + + private static Object invoke(Object target, String methodName, Class parameterType, Object argument) + throws Exception { + Method method = target.getClass().getMethod(methodName, parameterType); + return method.invoke(target, argument); + } + + private static Object invoke(Object target, + String methodName, + Class firstType, + Class secondType, + Object firstArgument, + Object secondArgument) throws Exception { + Method method = target.getClass().getMethod(methodName, firstType, secondType); + return method.invoke(target, firstArgument, secondArgument); + } +} diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreTest.java index ee7c09f2ef3..ae89307b6a7 100644 --- a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreTest.java +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreTest.java @@ -3499,6 +3499,85 @@ public void missingLastRevInApplyChanges() throws CommitFailedException { assertNull(ns.getNodeCache().getIfPresent(new PathRev(path, before))); } + @Test + public void getNodeConvertsNodeCacheLoaderFailures() throws Exception { + // Build a store that starts healthy, then make only the node read path fail + // to verify getNode() converts loader failures into DocumentStoreException. + AtomicBoolean failFind = new AtomicBoolean(); + String fooId = Utils.getIdFromPath("/foo"); + DocumentStore store = new DocumentStoreWrapper(new MemoryDocumentStore()) { + @Override + public T find(Collection collection, String key) { + if (collection == NODES && fooId.equals(key) && failFind.get()) { + throw new IllegalStateException("node lookup failed"); + } + return super.find(collection, key); + } + }; + + DocumentNodeStore writer = builderProvider.newBuilder().setAsyncDelay(0) + .setDocumentStore(store).getNodeStore(); + NodeBuilder builder = writer.getRoot().builder(); + builder.child("foo"); + merge(writer, builder); + writer.dispose(); + + DocumentNodeStore reader = builderProvider.newBuilder().setAsyncDelay(0) + .setDocumentStore(store).getNodeStore(); + try { + failFind.set(true); + reader.getNode(Path.fromString("/foo"), reader.getHeadRevision()); + fail("must fail with DocumentStoreException"); + } catch (DocumentStoreException e) { + assertThat(e.getMessage(), containsString("node lookup failed")); + assertTrue(e.getCause() instanceof IllegalStateException); + } finally { + reader.dispose(); + } + } + + @Test + public void getChildrenConvertsNodeChildrenCacheLoaderFailures() throws Exception { + // Populate a parent/child structure first, then fail only the child query path + // so the test exercises getChildren() exception conversion through the cache loader. + AtomicBoolean failQuery = new AtomicBoolean(); + DocumentStore store = new DocumentStoreWrapper(new MemoryDocumentStore()) { + @NotNull + @Override + public List query(Collection collection, + String fromKey, + String toKey, + int limit) { + if (collection == NODES && failQuery.get()) { + throw new IllegalStateException("child query failed"); + } + return super.query(collection, fromKey, toKey, limit); + } + }; + + DocumentNodeStore writer = builderProvider.newBuilder().setAsyncDelay(0) + .setDocumentStore(store).getNodeStore(); + NodeBuilder builder = writer.getRoot().builder(); + builder.child("parent").child("child"); + merge(writer, builder); + writer.dispose(); + + DocumentNodeStore reader = builderProvider.newBuilder().setAsyncDelay(0) + .setDocumentStore(store).getNodeStore(); + try { + DocumentNodeState parent = reader.getNode(Path.fromString("/parent"), reader.getHeadRevision()); + assertNotNull(parent); + failQuery.set(true); + reader.getChildren(parent, "", 10); + fail("must fail with DocumentStoreException"); + } catch (DocumentStoreException e) { + assertThat(e.getMessage(), containsString("Error occurred while fetching children for path /parent")); + assertTrue(e.getCause() instanceof IllegalStateException); + } finally { + reader.dispose(); + } + } + // OAK-6351 @Test public void inconsistentNodeChildrenCache() throws Exception { diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/LocalDiffCacheCompatibilityTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/LocalDiffCacheCompatibilityTest.java new file mode 100644 index 00000000000..1fde5716c5b --- /dev/null +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/LocalDiffCacheCompatibilityTest.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.plugins.document; + +import java.util.concurrent.atomic.AtomicBoolean; + +import org.junit.Rule; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +/** + * Compatibility tests for {@link LocalDiffCache}. + * These assertions stay at the {@link DiffCache} surface and do not reference + * the underlying cache implementation. + */ +public class LocalDiffCacheCompatibilityTest { + + private static final int CLUSTER_ID = 1; + + @Rule + public DocumentMKBuilderProvider builderProvider = new DocumentMKBuilderProvider(); + + private LocalDiffCache buildCache() { + return new LocalDiffCache(builderProvider.newBuilder() + .setCacheSegmentCount(1) + .memoryCacheDistribution(0, 0, 0, 99, 0)); + } + + @Test + public void getChangesReturnsEmptyStringForMissingPathInsideCachedDiff() { + // Cache only the root diff and then ask for an uncached child path from the + // same revision pair; LocalDiffCache should report that as an empty diff. + LocalDiffCache cache = buildCache(); + RevisionVector from = new RevisionVector(Revision.newRevision(CLUSTER_ID)); + RevisionVector to = new RevisionVector(Revision.newRevision(CLUSTER_ID)); + + DiffCache.Entry entry = cache.newEntry(from, to, true); + entry.append(Path.ROOT, "^\"root\":{}"); + entry.done(); + + assertEquals("", cache.getChanges(from, to, Path.fromString("/missing"), null)); + } + + @Test + public void getChangesDelegatesToLoaderWhenRevisionPairIsNotCached() { + // Start from a completely uncached revision pair and verify getChanges() + // falls back to the supplied loader instead of inventing a cached value. + LocalDiffCache cache = buildCache(); + RevisionVector from = new RevisionVector(Revision.newRevision(CLUSTER_ID)); + RevisionVector to = new RevisionVector(Revision.newRevision(CLUSTER_ID)); + AtomicBoolean loaderCalled = new AtomicBoolean(); + + String result = cache.getChanges(from, to, Path.ROOT, () -> { + loaderCalled.set(true); + return "^\"loaded\":{}"; + }); + + assertTrue(loaderCalled.get()); + assertEquals("^\"loaded\":{}", result); + assertNull(cache.getChanges(from, to, Path.ROOT, null)); + } +} diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/MemoryDiffCacheTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/MemoryDiffCacheTest.java index a62adbed710..a96980e08d7 100644 --- a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/MemoryDiffCacheTest.java +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/MemoryDiffCacheTest.java @@ -18,11 +18,14 @@ import java.util.UUID; +import org.apache.jackrabbit.oak.cache.CacheStats; import org.junit.Rule; import org.junit.Test; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; public class MemoryDiffCacheTest { @@ -44,6 +47,60 @@ public void limit() throws Exception { assertNull(cache.getChanges(from, to, Path.fromString("/foo"), null)); } + @Test + public void invalidateAllClearsAllCachedEntries() { + DiffCache cache = new MemoryDiffCache(builderProvider.newBuilder() + .setCacheSegmentCount(1) + .memoryCacheDistribution(0, 0, 0, 99, 0)); + RevisionVector from = new RevisionVector(Revision.newRevision(1)); + RevisionVector to = new RevisionVector(Revision.newRevision(1)); + DiffCache.Entry entry = cache.newEntry(from, to, false); + entry.append(Path.ROOT, "^\"foo\":{}"); + entry.done(); + + assertNotNull(cache.getChanges(from, to, Path.ROOT, null)); + cache.invalidateAll(); + assertNull(cache.getChanges(from, to, Path.ROOT, null)); + } + + @Test + public void getStatsReturnsNonEmptyIterable() { + DiffCache cache = new MemoryDiffCache(builderProvider.newBuilder() + .setCacheSegmentCount(1) + .memoryCacheDistribution(0, 0, 0, 99, 0)); + Iterable statsIterable = cache.getStats(); + assertNotNull(statsIterable); + assertTrue(statsIterable.iterator().hasNext()); + } + + @Test + public void getChangesReturnsNullForUncachedRevisions() { + DiffCache cache = new MemoryDiffCache(builderProvider.newBuilder() + .setCacheSegmentCount(1) + .memoryCacheDistribution(0, 0, 0, 99, 0)); + RevisionVector from = new RevisionVector(Revision.newRevision(1)); + RevisionVector to = new RevisionVector(Revision.newRevision(1)); + assertNull(cache.getChanges(from, to, Path.ROOT, null)); + } + + @Test + public void doneMakesRootPathChangesReadableFromCache() { + DiffCache cache = new MemoryDiffCache(builderProvider.newBuilder() + .setCacheSegmentCount(1) + .memoryCacheDistribution(0, 0, 0, 99, 0)); + RevisionVector from = new RevisionVector(Revision.newRevision(1)); + RevisionVector to = new RevisionVector(Revision.newRevision(1)); + String rootPathChanges = "^\"foo\":{}"; + + DiffCache.Entry entry = cache.newEntry(from, to, false); + entry.append(Path.ROOT, rootPathChanges); + entry.done(); + + String actualChanges = cache.getChanges(from, to, Path.ROOT, null); + assertNotNull(actualChanges); + assertEquals(rootPathChanges, actualChanges); + } + private static String changes(int minLength) { StringBuilder sb = new StringBuilder(); while (sb.length() < minLength) { diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/TieredDiffCacheTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/TieredDiffCacheTest.java new file mode 100644 index 00000000000..0923ca8750a --- /dev/null +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/TieredDiffCacheTest.java @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.plugins.document; + +import java.lang.reflect.Field; + +import org.apache.jackrabbit.oak.cache.CacheStats; +import org.junit.Rule; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +/** + * Tests for {@link TieredDiffCache}. + * These assertions intentionally avoid third-party cache types so the same + * tests can run across cache implementation changes. + */ +public class TieredDiffCacheTest { + + private static final int CLUSTER_ID = 1; + + @Rule + public DocumentMKBuilderProvider builderProvider = new DocumentMKBuilderProvider(); + + private TieredDiffCache buildCache() { + return new TieredDiffCache(builderProvider.newBuilder() + .setCacheSegmentCount(1) + .memoryCacheDistribution(0, 0, 0, 99, 0), CLUSTER_ID); + } + + @Test + public void getChangesReturnsNullForUncachedRevision() { + TieredDiffCache cache = buildCache(); + RevisionVector from = new RevisionVector(Revision.newRevision(CLUSTER_ID)); + RevisionVector to = new RevisionVector(Revision.newRevision(CLUSTER_ID)); + assertNull(cache.getChanges(from, to, Path.ROOT, null)); + } + + @Test + public void newEntryLocalPopulatesLocalCache() { + TieredDiffCache cache = buildCache(); + RevisionVector from = new RevisionVector(Revision.newRevision(CLUSTER_ID)); + RevisionVector to = new RevisionVector(Revision.newRevision(CLUSTER_ID)); + String rootPathChanges = "^\"foo\":{}"; + + DiffCache.Entry entry = cache.newEntry(from, to, true); + entry.append(Path.ROOT, rootPathChanges); + entry.done(); + + assertEquals(rootPathChanges, getTier(cache, "localCache").getChanges(from, to, Path.ROOT, null)); + assertNull(getTier(cache, "memoryCache").getChanges(from, to, Path.ROOT, null)); + assertEquals(rootPathChanges, cache.getChanges(from, to, Path.ROOT, null)); + } + + @Test + public void newEntryExternalPopulatesMemoryCache() { + TieredDiffCache cache = buildCache(); + RevisionVector from = new RevisionVector(Revision.newRevision(2)); + RevisionVector to = new RevisionVector(Revision.newRevision(2)); + String rootPathChanges = "^\"bar\":{}"; + + DiffCache.Entry entry = cache.newEntry(from, to, false); + entry.append(Path.ROOT, rootPathChanges); + entry.done(); + + assertNull(getTier(cache, "localCache").getChanges(from, to, Path.ROOT, null)); + assertEquals(rootPathChanges, getTier(cache, "memoryCache").getChanges(from, to, Path.ROOT, null)); + assertEquals(rootPathChanges, cache.getChanges(from, to, Path.ROOT, null)); + } + + @Test + public void getStatsReturnsNonEmptyIterable() { + TieredDiffCache cache = buildCache(); + Iterable stats = cache.getStats(); + assertNotNull(stats); + assertTrue(stats.iterator().hasNext()); + } + + @Test + public void invalidateAllClearsCache() { + TieredDiffCache cache = buildCache(); + RevisionVector localFrom = new RevisionVector(Revision.newRevision(CLUSTER_ID)); + RevisionVector localTo = new RevisionVector(Revision.newRevision(CLUSTER_ID)); + RevisionVector externalFrom = new RevisionVector(Revision.newRevision(2)); + RevisionVector externalTo = new RevisionVector(Revision.newRevision(2)); + + DiffCache.Entry localEntry = cache.newEntry(localFrom, localTo, true); + localEntry.append(Path.ROOT, "^\"local\":{}"); + localEntry.done(); + + DiffCache.Entry externalEntry = cache.newEntry(externalFrom, externalTo, false); + externalEntry.append(Path.ROOT, "^\"external\":{}"); + externalEntry.done(); + + assertNotNull(cache.getChanges(localFrom, localTo, Path.ROOT, null)); + assertNotNull(cache.getChanges(externalFrom, externalTo, Path.ROOT, null)); + cache.invalidateAll(); + assertNull(getTier(cache, "localCache").getChanges(localFrom, localTo, Path.ROOT, null)); + assertNull(getTier(cache, "memoryCache").getChanges(externalFrom, externalTo, Path.ROOT, null)); + assertNull(cache.getChanges(localFrom, localTo, Path.ROOT, null)); + assertNull(cache.getChanges(externalFrom, externalTo, Path.ROOT, null)); + } + + private static DiffCache getTier(TieredDiffCache cache, String fieldName) { + try { + Field field = TieredDiffCache.class.getDeclaredField(fieldName); + field.setAccessible(true); + return (DiffCache) field.get(cache); + } catch (ReflectiveOperationException e) { + throw new AssertionError("Unable to access diff cache tier " + fieldName, e); + } + } +} diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/cache/NodeDocumentCacheTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/cache/NodeDocumentCacheTest.java index 6cb22db5c33..8ab1f82df8b 100644 --- a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/cache/NodeDocumentCacheTest.java +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/cache/NodeDocumentCacheTest.java @@ -16,6 +16,9 @@ */ package org.apache.jackrabbit.oak.plugins.document.cache; +import java.util.concurrent.ExecutionException; + +import org.apache.jackrabbit.oak.cache.CacheStats; import org.apache.jackrabbit.oak.plugins.document.Document; import org.apache.jackrabbit.oak.plugins.document.DocumentStore; import org.apache.jackrabbit.oak.plugins.document.NodeDocument; @@ -28,6 +31,10 @@ import static java.util.Collections.singleton; import static org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreBuilder.newDocumentNodeStoreBuilder; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; public class NodeDocumentCacheTest { @@ -68,6 +75,69 @@ public void cacheConsistency() throws Exception { assertEquals(updated.getModCount(), cache.get(ID, () -> updated).getModCount()); } + @Test + public void getWithCallableLoadsDocumentOnMiss() throws Exception { + NodeDocument doc = createDocument(1L); + NodeDocument loaded = cache.get(ID, () -> doc); + assertEquals(doc.getModCount(), loaded.getModCount()); + } + + @Test + public void getWithCallableReturnsCachedDocumentOnHit() throws Exception { + NodeDocument doc = createDocument(1L); + cache.put(doc); + // loader should not be called since doc is already cached + NodeDocument loaded = cache.get(ID, () -> { + throw new RuntimeException("loader must not be called on cache hit"); + }); + assertEquals(doc.getModCount(), loaded.getModCount()); + } + + @Test + public void getWithCallableWrapsCheckedLoaderFailureInExecutionException() { + Exception failure = new Exception("simulated load failure"); + try { + cache.get(ID, () -> { + // This verifies the existing Oak-visible checked-exception + // contract, not raw runtime propagation. + throw failure; + }); + fail("expected ExecutionException"); + } catch (ExecutionException e) { + assertEquals(failure, e.getCause()); + assertEquals("simulated load failure", e.getCause().getMessage()); + } + } + + @Test + public void getIfPresentReturnsNullForUncachedKey() { + assertNull(cache.getIfPresent("not-cached")); + } + + @Test + public void putAndGetIfPresentReturnsDocument() { + NodeDocument doc = createDocument(5L); + cache.put(doc); + NodeDocument result = cache.getIfPresent(ID); + assertNotNull(result); + assertEquals(doc.getModCount(), result.getModCount()); + } + + @Test + public void invalidateRemovesDocumentFromCache() { + NodeDocument doc = createDocument(1L); + cache.put(doc); + cache.invalidate(ID); + assertNull(cache.getIfPresent(ID)); + } + + @Test + public void getCacheStatsReturnsNonEmptyIterable() { + Iterable statsIterable = cache.getCacheStats(); + assertNotNull(statsIterable); + assertTrue(statsIterable.iterator().hasNext()); + } + private NodeDocument createDocument(long modCount) { NodeDocument doc = new NodeDocument(store, modCount); doc.put(Document.ID, ID); diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/persistentCache/PersistentCacheCompatibilityTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/persistentCache/PersistentCacheCompatibilityTest.java new file mode 100644 index 00000000000..f9a52f63047 --- /dev/null +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/persistentCache/PersistentCacheCompatibilityTest.java @@ -0,0 +1,183 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.plugins.document.persistentCache; + +import java.io.File; +import java.util.concurrent.ExecutionException; + +import org.apache.jackrabbit.oak.cache.CacheLIRS; +import org.apache.jackrabbit.oak.plugins.document.MemoryDiffCache; +import org.apache.jackrabbit.oak.plugins.document.Path; +import org.apache.jackrabbit.oak.plugins.document.Revision; +import org.apache.jackrabbit.oak.plugins.document.RevisionVector; +import org.apache.jackrabbit.oak.plugins.document.util.StringValue; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.fail; + +/** + * Compatibility tests for the persistent cache wrapping layer. + * These assertions stay on Oak-visible behavior and intentionally avoid direct + * use of third-party cache APIs in the test code. + */ +public class PersistentCacheCompatibilityTest { + + @Rule + public final TemporaryFolder tempFolder = new TemporaryFolder(new File("target")); + + @Test + public void wrapReturnsUsablePersistentDiffCache() throws Exception { + // Wrapping a DIFF cache should return a usable cache handle whose + // observable put/get behavior matches the in-memory base contract. + CacheHandle handle = openDiffCache("wrap"); + try { + MemoryDiffCache.Key key = key(0); + StringValue value = new StringValue("value"); + assertNotNull(handle.cache); + handle.cache.put(key, value); + assertEquals(value, handle.cache.getIfPresent(key)); + } finally { + handle.close(); + } + } + + @Test + public void invalidateRemovesOnlyTheRequestedPersistedEntry() throws Exception { + // Persist two keys, invalidate only one after reopening, then reopen again + // to prove the removal was durable and did not affect the sibling entry. + MemoryDiffCache.Key first = key(1); + MemoryDiffCache.Key second = key(2); + + CacheHandle initial = openDiffCache("invalidateOne"); + try { + initial.cache.put(first, new StringValue("first")); + initial.cache.put(second, new StringValue("second")); + } finally { + initial.close(); + } + + CacheHandle reopened = openDiffCache("invalidateOne"); + try { + assertEquals(new StringValue("first"), reopened.cache.getIfPresent(first)); + assertEquals(new StringValue("second"), reopened.cache.getIfPresent(second)); + reopened.cache.invalidate(first); + } finally { + reopened.close(); + } + + CacheHandle afterInvalidate = openDiffCache("invalidateOne"); + try { + assertNull(afterInvalidate.cache.getIfPresent(first)); + assertEquals(new StringValue("second"), afterInvalidate.cache.getIfPresent(second)); + } finally { + afterInvalidate.close(); + } + } + + @Test + public void invalidateAllClearsPersistedEntriesAcrossReopen() throws Exception { + // Persist entries, clear the wrapped cache, and reopen the persistent layer + // to verify invalidateAll() removes the durable state as well. + MemoryDiffCache.Key first = key(1); + MemoryDiffCache.Key second = key(2); + + CacheHandle initial = openDiffCache("invalidateAll"); + try { + initial.cache.put(first, new StringValue("first")); + initial.cache.put(second, new StringValue("second")); + } finally { + initial.close(); + } + + CacheHandle reopened = openDiffCache("invalidateAll"); + try { + assertEquals(new StringValue("first"), reopened.cache.getIfPresent(first)); + assertEquals(new StringValue("second"), reopened.cache.getIfPresent(second)); + reopened.cache.invalidateAll(); + } finally { + reopened.close(); + } + + CacheHandle afterInvalidate = openDiffCache("invalidateAll"); + try { + assertNull(afterInvalidate.cache.getIfPresent(first)); + assertNull(afterInvalidate.cache.getIfPresent(second)); + } finally { + afterInvalidate.close(); + } + } + + @Test + public void getWrapsCheckedLoaderFailureInExecutionException() throws Exception { + // Use a checked loader failure here because NodeCache exposes the same + // checked get(key, loader) contract as the in-memory cache underneath it. + CacheHandle handle = openDiffCache("loaderFailure"); + Exception failure = new Exception("simulated persistent-cache load failure"); + + try { + handle.cache.get(key(7), () -> { + throw failure; + }); + fail("expected ExecutionException"); + } catch (ExecutionException e) { + assertSame(failure, e.getCause()); + assertEquals("simulated persistent-cache load failure", e.getCause().getMessage()); + } finally { + handle.close(); + } + } + + private CacheHandle openDiffCache(String name) throws Exception { + File directory = new File(tempFolder.getRoot(), name); + directory.mkdirs(); + PersistentCache persistentCache = new PersistentCache(directory.getAbsolutePath() + ",-async"); + CacheLIRS base = CacheLIRS.newBuilder() + .maximumSize(16) + .build(); + @SuppressWarnings("unchecked") + NodeCache wrapped = (NodeCache) persistentCache.wrap( + null, null, base, CacheType.DIFF); + return new CacheHandle(persistentCache, wrapped); + } + + private static MemoryDiffCache.Key key(int id) { + RevisionVector from = new RevisionVector(new Revision(0, 0, id)); + RevisionVector to = new RevisionVector(new Revision(1, 0, id)); + return new MemoryDiffCache.Key(Path.fromString("/node-" + id), from, to); + } + + private static final class CacheHandle { + private final PersistentCache persistentCache; + private final NodeCache cache; + + private CacheHandle(PersistentCache persistentCache, + NodeCache cache) { + this.persistentCache = persistentCache; + this.cache = cache; + } + + private void close() { + persistentCache.close(); + } + } +}