From 3e8fb232034266bcbce7f0ba92f0cba961903a29 Mon Sep 17 00:00:00 2001 From: rishabhdaim Date: Mon, 23 Mar 2026 10:58:47 +0530 Subject: [PATCH 1/5] OAK-12145 : add compatibility tests for Caffeine migration (PR 2807) Add implementation-independent test coverage for all classes affected by the Guava-to-Caffeine cache migration in OAK-11946. Tests reference only Oak-level types (CacheLIRS, CacheStats, DiffCache, etc.) so the same suite can be cherry-picked to OAK-11946 and run unchanged; any failure there is a migration compatibility gap. Modules covered: - oak-core-spi: AbstractCacheStats, CacheLIRS, EmpiricalWeigher - oak-store-document: DocumentNodeStoreBuilder, NodeDocumentCache, MemoryDiffCache, LocalDiffCache, TieredDiffCache, CachingCommitValueResolver, DocumentNodeStore, PersistentCache - oak-run-commons: DocumentNodeStoreHelper - oak-search: ExtractedTextCache (stats tracking) - oak-search-elastic: ElasticIndexStatistics (cache, refresh, failure) - oak-segment-tar: SegmentCache (loader failure contract) - oak-blob: BlobIdSet (cache miss / persistence semantics) - oak-blob-cloud: S3Backend (expiry, cache enable/disable) - oak-blob-cloud-azure: AzureBlobStoreBackend, AzureBlobStoreBackendV8 (expiry, cache enable/disable) --- ...zureBlobStoreBackendCompatibilityTest.java | 97 +++++++ .../v8/AzureBlobStoreBackendV8Test.java | 97 +++++++ .../oak/blob/cloud/s3/S3BackendTest.java | 125 ++++++++ .../split/BlobIdSetCompatibilityTest.java | 83 ++++++ .../oak/cache/AbstractCacheStatsTest.java | 235 ++++++++++++++++ .../oak/cache/CacheLIRSCompatibilityTest.java | 121 ++++++++ .../oak/cache/EmpiricalWeigherTest.java | 94 +++++++ ...asticIndexStatisticsCompatibilityTest.java | 266 ++++++++++++++++++ .../index/search/ExtractedTextCacheTest.java | 11 + .../oak/segment/SegmentCacheTest.java | 15 + .../document/CommitValueResolverTest.java | 16 ++ .../DocumentNodeStoreBuilderTest.java | 119 ++++++++ .../document/DocumentNodeStoreTest.java | 75 +++++ .../LocalDiffCacheCompatibilityTest.java | 75 +++++ .../plugins/document/MemoryDiffCacheTest.java | 57 ++++ .../plugins/document/TieredDiffCacheTest.java | 130 +++++++++ .../document/cache/NodeDocumentCacheTest.java | 70 +++++ .../PersistentCacheCompatibilityTest.java | 171 +++++++++++ 18 files changed, 1857 insertions(+) create mode 100644 oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendCompatibilityTest.java create mode 100644 oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendTest.java create mode 100644 oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/split/BlobIdSetCompatibilityTest.java create mode 100644 oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/AbstractCacheStatsTest.java create mode 100644 oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/CacheLIRSCompatibilityTest.java create mode 100644 oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/EmpiricalWeigherTest.java create mode 100644 oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticIndexStatisticsCompatibilityTest.java create mode 100644 oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBuilderTest.java create mode 100644 oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/LocalDiffCacheCompatibilityTest.java create mode 100644 oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/TieredDiffCacheTest.java create mode 100644 oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/persistentCache/PersistentCacheCompatibilityTest.java diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendCompatibilityTest.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendCompatibilityTest.java new file mode 100644 index 00000000000..d6588e1d491 --- /dev/null +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendCompatibilityTest.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage; + +import java.lang.reflect.Field; +import java.net.URI; + +import org.apache.jackrabbit.core.data.DataIdentifier; +import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions; +import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadOptions; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +/** + * Compatibility tests for direct-download and upload cache configuration in + * {@link AzureBlobStoreBackend}. The assertions are intentionally behavior- + * based and do not depend on a specific cache library. + */ +public class AzureBlobStoreBackendCompatibilityTest { + + @Test + public void setHttpDownloadURIExpirySecondsUpdatesField() throws Exception { + AzureBlobStoreBackend backend = new AzureBlobStoreBackend(); + + backend.setHttpDownloadURIExpirySeconds(3600); + + assertEquals(3600, getIntField(backend, "httpDownloadURIExpirySeconds")); + } + + @Test + public void setHttpUploadURIExpirySecondsUpdatesField() throws Exception { + AzureBlobStoreBackend backend = new AzureBlobStoreBackend(); + + backend.setHttpUploadURIExpirySeconds(1800); + + assertEquals(1800, getIntField(backend, "httpUploadURIExpirySeconds")); + } + + @Test + public void setHttpDownloadURICacheSizeCreatesAndDisablesCache() throws Exception { + AzureBlobStoreBackend backend = new AzureBlobStoreBackend(); + backend.setHttpDownloadURIExpirySeconds(3600); + + backend.setHttpDownloadURICacheSize(100); + assertNotNull(getField(backend, "httpDownloadURICache")); + + backend.setHttpDownloadURICacheSize(0); + assertNull(getField(backend, "httpDownloadURICache")); + } + + @Test + public void createHttpDownloadURIReturnsNullWhenDisabled() { + AzureBlobStoreBackend backend = new AzureBlobStoreBackend(); + + URI downloadURI = backend.createHttpDownloadURI( + new DataIdentifier("test"), + DataRecordDownloadOptions.DEFAULT); + + assertNull(downloadURI); + } + + @Test + public void initiateHttpUploadReturnsNullWhenDisabled() { + AzureBlobStoreBackend backend = new AzureBlobStoreBackend(); + + assertNull(backend.initiateHttpUpload(1024, 1, DataRecordUploadOptions.DEFAULT)); + } + + private static int getIntField(AzureBlobStoreBackend backend, String fieldName) throws Exception { + Field field = AzureBlobStoreBackend.class.getDeclaredField(fieldName); + field.setAccessible(true); + return (int) field.get(backend); + } + + private static Object getField(AzureBlobStoreBackend backend, String fieldName) throws Exception { + Field field = AzureBlobStoreBackend.class.getDeclaredField(fieldName); + field.setAccessible(true); + return field.get(backend); + } +} diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8Test.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8Test.java index 49cfff277db..7028065a379 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8Test.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8Test.java @@ -36,7 +36,10 @@ import java.io.File; import java.io.FileOutputStream; import java.io.IOException; +import java.lang.reflect.Field; +import java.lang.reflect.Method; import java.net.URISyntaxException; +import java.net.URI; import java.time.Duration; import java.time.Instant; import java.util.Date; @@ -163,6 +166,69 @@ public void initSecret() throws Exception { assertReferenceSecret(azureBlobStoreBackend); } + @Test + public void setHttpDownloadURIExpirySecondsUpdatesField() throws Exception { + AzureBlobStoreBackendV8 backend = new AzureBlobStoreBackendV8(); + + backend.setHttpDownloadURIExpirySeconds(3600); + + assertEquals(3600, getIntField(backend, "httpDownloadURIExpirySeconds")); + } + + @Test + public void setHttpUploadURIExpirySecondsUpdatesField() throws Exception { + AzureBlobStoreBackendV8 backend = new AzureBlobStoreBackendV8(); + + backend.setHttpUploadURIExpirySeconds(1800); + + assertEquals(1800, getIntField(backend, "httpUploadURIExpirySeconds")); + } + + @Test + public void setHttpDownloadURICacheSizeCreatesAndDisablesCache() throws Exception { + AzureBlobStoreBackendV8 backend = new AzureBlobStoreBackendV8(); + backend.setHttpDownloadURIExpirySeconds(3600); + + backend.setHttpDownloadURICacheSize(100); + assertNotNull(getField(backend, "httpDownloadURICache")); + + backend.setHttpDownloadURICacheSize(0); + assertNull(getField(backend, "httpDownloadURICache")); + } + + @Test + public void createHttpDownloadURIReturnsNullWhenDisabled() throws DataStoreException { + AzureBlobStoreBackendV8 backend = new AzureBlobStoreBackendV8(); + + assertNull(backend.createHttpDownloadURI(new org.apache.jackrabbit.core.data.DataIdentifier("test"), + org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions.DEFAULT)); + } + + @Test + public void initiateHttpUploadReturnsNullWhenDisabled() throws DataStoreException { + AzureBlobStoreBackendV8 backend = new AzureBlobStoreBackendV8(); + + assertNull(backend.initiateHttpUpload(1024, 1, + org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadOptions.DEFAULT)); + } + + @Test + public void createHttpDownloadURIReturnsCachedURIWithoutRecheckingStore() throws Exception { + CacheHitBackend backend = new CacheHitBackend(); + org.apache.jackrabbit.core.data.DataIdentifier identifier = + new org.apache.jackrabbit.core.data.DataIdentifier("cached"); + URI cachedUri = URI.create("https://cached.example/download"); + + backend.setHttpDownloadURIExpirySeconds(300); + setField(backend, "downloadDomainOverride", "cached.example"); + backend.setHttpDownloadURICacheSize(10); + putIntoCache(getField(backend, "httpDownloadURICache"), + identifier + "cached.example", cachedUri); + + assertEquals(cachedUri, backend.createHttpDownloadURI(identifier, + org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions.DEFAULT)); + } + /* make sure that blob1.txt and blob2.txt are uploaded to AZURE_ACCOUNT_NAME/blobstore container before * executing this test * */ @@ -312,6 +378,37 @@ private static String getConnectionString() { return UtilsV8.getConnectionString(AzuriteDockerRule.ACCOUNT_NAME, AzuriteDockerRule.ACCOUNT_KEY, azurite.getBlobEndpoint()); } + private static int getIntField(AzureBlobStoreBackendV8 backend, String fieldName) throws Exception { + Field field = AzureBlobStoreBackendV8.class.getDeclaredField(fieldName); + field.setAccessible(true); + return (int) field.get(backend); + } + + private static Object getField(AzureBlobStoreBackendV8 backend, String fieldName) throws Exception { + Field field = AzureBlobStoreBackendV8.class.getDeclaredField(fieldName); + field.setAccessible(true); + return field.get(backend); + } + + private static void setField(AzureBlobStoreBackendV8 backend, String fieldName, Object value) throws Exception { + Field field = AzureBlobStoreBackendV8.class.getDeclaredField(fieldName); + field.setAccessible(true); + field.set(backend, value); + } + + private static void putIntoCache(Object cache, Object key, Object value) throws Exception { + Method put = cache.getClass().getMethod("put", Object.class, Object.class); + put.setAccessible(true); + put.invoke(cache, key, value); + } + + private static final class CacheHitBackend extends AzureBlobStoreBackendV8 { + @Override + public boolean exists(org.apache.jackrabbit.core.data.DataIdentifier identifier) throws DataStoreException { + throw new AssertionError("cached download URI should be returned before checking blob existence"); + } + } + private static void assertReferenceSecret(AzureBlobStoreBackendV8 azureBlobStoreBackend) throws DataStoreException { // assert secret already created on init diff --git a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendTest.java b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendTest.java new file mode 100644 index 00000000000..170d34eed16 --- /dev/null +++ b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendTest.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.blob.cloud.s3; + +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.net.URI; + +import org.apache.jackrabbit.core.data.DataIdentifier; +import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +/** + * Compatibility tests for direct-download and upload cache configuration in + * {@link S3Backend}. The assertions are intentionally behavior-based and do + * not reference third-party cache types. + */ +public class S3BackendTest { + + @Test + public void setHttpDownloadURIExpirySecondsUpdatesField() throws Exception { + S3Backend backend = new S3Backend(); + + backend.setHttpDownloadURIExpirySeconds(3600); + + assertEquals(3600, getIntField(backend, "httpDownloadURIExpirySeconds")); + } + + @Test + public void setHttpUploadURIExpirySecondsUpdatesField() throws Exception { + S3Backend backend = new S3Backend(); + + backend.setHttpUploadURIExpirySeconds(1800); + + assertEquals(1800, getIntField(backend, "httpUploadURIExpirySeconds")); + } + + @Test + public void setHttpDownloadURICacheSizeCreatesAndDisablesCache() throws Exception { + S3Backend backend = new S3Backend(); + backend.setHttpDownloadURIExpirySeconds(3600); + + backend.setHttpDownloadURICacheSize(100); + assertNotNull(getField(backend, "httpDownloadURICache")); + + backend.setHttpDownloadURICacheSize(0); + assertNull(getField(backend, "httpDownloadURICache")); + } + + @Test + public void createHttpDownloadURIReturnsNullWhenDisabled() { + S3Backend backend = new S3Backend(); + + URI downloadURI = backend.createHttpDownloadURI( + new DataIdentifier("test"), + DataRecordDownloadOptions.DEFAULT); + + assertNull(downloadURI); + } + + @Test + public void initiateHttpUploadReturnsNullWhenDisabled() { + S3Backend backend = new S3Backend(); + + assertNull(backend.initiateHttpUpload(1024, 1)); + } + + @Test + public void createHttpDownloadURIReturnsCachedURIWithoutRecheckingStore() throws Exception { + CacheHitBackend backend = new CacheHitBackend(); + DataIdentifier identifier = new DataIdentifier("cached"); + URI cachedUri = URI.create("https://cached.example/download"); + + backend.setHttpDownloadURIExpirySeconds(300); + backend.setHttpDownloadURICacheSize(10); + putIntoCache(getField(backend, "httpDownloadURICache"), identifier, cachedUri); + + assertEquals(cachedUri, backend.createHttpDownloadURI(identifier, DataRecordDownloadOptions.DEFAULT)); + } + + private static int getIntField(S3Backend backend, String fieldName) throws Exception { + Field field = S3Backend.class.getDeclaredField(fieldName); + field.setAccessible(true); + return (int) field.get(backend); + } + + private static Object getField(S3Backend backend, String fieldName) throws Exception { + Field field = S3Backend.class.getDeclaredField(fieldName); + field.setAccessible(true); + return field.get(backend); + } + + private static void putIntoCache(Object cache, Object key, Object value) throws Exception { + Method put = cache.getClass().getMethod("put", Object.class, Object.class); + put.setAccessible(true); + put.invoke(cache, key, value); + } + + private static final class CacheHitBackend extends S3Backend { + @Override + public boolean exists(DataIdentifier identifier) throws DataStoreException { + throw new AssertionError("cached download URI should be returned before checking blob existence"); + } + } +} diff --git a/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/split/BlobIdSetCompatibilityTest.java b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/split/BlobIdSetCompatibilityTest.java new file mode 100644 index 00000000000..399bf8d9064 --- /dev/null +++ b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/split/BlobIdSetCompatibilityTest.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.split; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Files; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +/** + * Compatibility tests for {@link BlobIdSet}. + * These assertions verify observable lookup semantics without depending on the + * underlying cache implementation. + */ +public class BlobIdSetCompatibilityTest { + + private static final String TEST_FILENAME = "compat-blob-ids.txt"; + + private File tempDir; + private File storeFile; + private BlobIdSet blobIdSet; + + @Before + public void setUp() throws IOException { + tempDir = Files.createTempDirectory("blob-id-set-compat").toFile(); + storeFile = new File(tempDir, TEST_FILENAME); + blobIdSet = new BlobIdSet(tempDir.getAbsolutePath(), TEST_FILENAME); + } + + @After + public void tearDown() { + if (storeFile.exists()) { + storeFile.delete(); + } + tempDir.delete(); + } + + @Test + public void containsReturnsTrueForEntryAddedAfterRestart() throws IOException { + try (FileWriter writer = new FileWriter(storeFile)) { + writer.write("blob-from-store\n"); + } + + BlobIdSet restarted = new BlobIdSet(tempDir.getAbsolutePath(), TEST_FILENAME); + + assertTrue(restarted.contains("blob-from-store")); + } + + @Test + public void containsIgnoresNewStoreEntryUntilBloomFilterIsUpdated() throws IOException { + assertFalse(blobIdSet.contains("missing")); + + try (FileWriter writer = new FileWriter(storeFile)) { + writer.write("missing\n"); + } + + assertFalse(blobIdSet.contains("missing")); + + blobIdSet.add("missing"); + assertTrue(blobIdSet.contains("missing")); + } +} diff --git a/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/AbstractCacheStatsTest.java b/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/AbstractCacheStatsTest.java new file mode 100644 index 00000000000..4d3fb779d4e --- /dev/null +++ b/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/AbstractCacheStatsTest.java @@ -0,0 +1,235 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.cache; + +import java.util.concurrent.ExecutionException; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Tests for {@link AbstractCacheStats} using {@link CacheLIRS} as the backing cache. + * These assertions intentionally avoid third-party cache types so the same + * tests can run across cache implementation changes. + */ +public class AbstractCacheStatsTest { + + private static final String CACHE_NAME = "testCache"; + private static final long MAX_WEIGHT = 1000; + + private CacheLIRS cache; + private CacheStats stats; + + @Before + public void setUp() { + cache = new CacheLIRS<>(null, MAX_WEIGHT, 1, 1, 0, null, null, null); + stats = new CacheStats(cache, CACHE_NAME, null, MAX_WEIGHT); + } + + @Test + public void getNameReturnsConstructorValue() { + Assert.assertEquals(CACHE_NAME, stats.getName()); + } + + @Test + public void hitCountIncreasesOnCacheHit() throws ExecutionException { + cache.put("k1", "v1"); + cache.get("k1", () -> "v1"); // cache hit — callable not invoked + Assert.assertEquals(1, stats.getHitCount()); + } + + @Test + public void missCountIncreasesOnCacheMiss() { + cache.getIfPresent("absent"); + Assert.assertEquals(1, stats.getMissCount()); + } + + @Test + public void requestCountIsSumOfHitsAndMisses() throws ExecutionException { + cache.put("k1", "v1"); + cache.get("k1", () -> "v1"); // hit + cache.getIfPresent("absent"); // miss + Assert.assertEquals(2, stats.getRequestCount()); + Assert.assertEquals(1, stats.getHitCount()); + Assert.assertEquals(1, stats.getMissCount()); + } + + @Test + public void loadSuccessCountIncreasesOnCallableLoad() throws ExecutionException { + cache.get("k1", () -> "v1"); // miss + load + Assert.assertEquals(1, stats.getLoadSuccessCount()); + Assert.assertEquals(1, stats.getLoadCount()); + } + + @Test + public void loadExceptionCountIncreasesOnFailedLoad() { + try { + cache.get("k1", () -> { + throw new RuntimeException("load failed"); + }); + } catch (ExecutionException ignored) { + } + Assert.assertEquals(1, stats.getLoadExceptionCount()); + Assert.assertEquals(1, stats.getLoadCount()); + Assert.assertEquals(0, stats.getLoadSuccessCount()); + } + + @Test + public void evictionCountIncreasesAfterCapacityEviction() { + // LIRS needs warm-up: create cache of size 5 and add 30 entries to ensure evictions + CacheLIRS smallCache = CacheLIRS.newBuilder() + .maximumSize(5) + .build(); + CacheStats smallStats = new CacheStats(smallCache, "small", null, 5); + for (int i = 0; i < 30; i++) { + smallCache.put("k" + i, "v" + i); + } + Assert.assertTrue("evictionCount should be positive after capacity eviction", + smallStats.getEvictionCount() > 0); + } + + @Test + public void maxTotalWeightReturnsConfiguredValue() { + Assert.assertEquals(MAX_WEIGHT, stats.getMaxTotalWeight()); + } + + @Test + public void elementCountReflectsCachedEntries() throws ExecutionException { + cache.get("k1", () -> "v1"); + cache.get("k2", () -> "v2"); + Assert.assertEquals(2, stats.getElementCount()); + } + + @Test + public void estimateCurrentWeightReturnsNegativeOneWhenNoWeigher() { + Assert.assertEquals(-1, stats.estimateCurrentWeight()); + } + + @Test + public void resetStatsClearsCountersButNotCacheContents() throws ExecutionException { + cache.get("k1", () -> "v1"); // miss + load + cache.get("k1", () -> "v1"); // hit + cache.getIfPresent("absent"); // miss + + stats.resetStats(); + + Assert.assertEquals(0, stats.getRequestCount()); + Assert.assertEquals(0, stats.getHitCount()); + Assert.assertEquals(0, stats.getMissCount()); + Assert.assertEquals(0, stats.getLoadCount()); + Assert.assertEquals(0, stats.getLoadSuccessCount()); + Assert.assertEquals(0, stats.getLoadExceptionCount()); + Assert.assertEquals(0, stats.getEvictionCount()); + Assert.assertEquals(0.0, stats.getLoadExceptionRate(), Double.MIN_VALUE); + Assert.assertEquals(0, stats.getTotalLoadTime()); + // cache contents unchanged after reset + Assert.assertEquals(1, stats.getElementCount()); + } + + @Test + public void hitRateIsOneWhenAllAccessesAreHits() throws ExecutionException { + cache.put("k1", "v1"); + cache.get("k1", () -> "v1"); // hit + Assert.assertEquals(1.0, stats.getHitRate(), Double.MIN_VALUE); + } + + @Test + public void hitRateIsOneWhenNoRequestsYet() { + // by convention, hit rate is 1.0 when there are no requests + Assert.assertEquals(1.0, stats.getHitRate(), Double.MIN_VALUE); + } + + @Test + public void cacheInfoAsStringContainsRequiredFields() throws ExecutionException { + cache.get("k1", () -> "v1"); + String info = stats.cacheInfoAsString(); + Assert.assertTrue("cacheInfoAsString should contain hitCount", info.contains("hitCount=")); + Assert.assertTrue("cacheInfoAsString should contain missCount", info.contains("missCount=")); + Assert.assertTrue("cacheInfoAsString should contain loadCount", info.contains("loadCount=")); + Assert.assertTrue("cacheInfoAsString should contain elementCount", info.contains("elementCount=1")); + Assert.assertTrue("cacheInfoAsString should contain maxWeight", info.contains("maxWeight=")); + } + + @Test + public void timeInWordsIncludesMinAndSec() { + String result = AbstractCacheStats.timeInWords(0); + Assert.assertNotNull(result); + Assert.assertTrue("timeInWords should contain 'min'", result.contains("min")); + Assert.assertTrue("timeInWords should contain 'sec'", result.contains("sec")); + } + + @Test + public void timeInWordsFormatsOneMinute() { + long oneMinuteNanos = 60L * 1_000_000_000L; + String result = AbstractCacheStats.timeInWords(oneMinuteNanos); + Assert.assertTrue("1-minute duration should contain '1 min'", result.contains("1 min")); + } + + @Test + public void loadExceptionRateAfterMixedLoads() throws ExecutionException { + cache.get("success", () -> "v1"); // success + try { + cache.get("failure", () -> { + throw new RuntimeException("boom"); + }); + } catch (ExecutionException ignored) { + } + // 1 success + 1 exception = 2 loads; rate = 0.5 + Assert.assertEquals(0.5, stats.getLoadExceptionRate(), 0.001); + } + + @Test + public void loadExceptionRateIsZeroWhenNoLoads() { + Assert.assertEquals(0.0, stats.getLoadExceptionRate(), Double.MIN_VALUE); + } + + @Test + public void totalLoadTimeIsPositiveAfterMeasuredLoad() throws ExecutionException { + cache.get("k1", () -> { + Thread.sleep(1); + return "v1"; + }); + Assert.assertTrue("totalLoadTime should be > 0 after a measured load", stats.getTotalLoadTime() > 0); + } + + @Test + public void averageLoadPenaltyIsPositiveAfterMeasuredLoad() throws ExecutionException { + cache.get("k1", () -> { + Thread.sleep(1); + return "v1"; + }); + Assert.assertTrue("averageLoadPenalty should be > 0 after a measured load", + stats.getAverageLoadPenalty() > 0.0); + } + + @Test + public void missRateIsOneWhenAllAccessesAreMisses() { + cache.getIfPresent("a"); + cache.getIfPresent("b"); + Assert.assertEquals(1.0, stats.getMissRate(), Double.MIN_VALUE); + } + + @Test + public void statsAreAccumulatedAcrossMultipleLoads() throws ExecutionException { + cache.get("k1", () -> "v1"); + cache.get("k2", () -> "v2"); + cache.get("k3", () -> "v3"); + Assert.assertEquals(3, stats.getLoadSuccessCount()); + Assert.assertEquals(3, stats.getLoadCount()); + } +} diff --git a/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/CacheLIRSCompatibilityTest.java b/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/CacheLIRSCompatibilityTest.java new file mode 100644 index 00000000000..842ea7076e8 --- /dev/null +++ b/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/CacheLIRSCompatibilityTest.java @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.cache; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** + * Compatibility tests for the Oak-visible {@link CacheLIRS} API surface. + * These assertions intentionally avoid third-party cache APIs so the same + * tests can run before and after the cache implementation migration. + */ +public class CacheLIRSCompatibilityTest { + + @Test + public void getWithCallableCachesLoadedValue() throws ExecutionException { + CacheLIRS cache = CacheLIRS.newBuilder() + .maximumSize(10) + .build(); + AtomicInteger loadCount = new AtomicInteger(); + + assertEquals("loaded", cache.get("k", () -> { + loadCount.incrementAndGet(); + return "loaded"; + })); + assertEquals("loaded", cache.get("k", () -> { + loadCount.incrementAndGet(); + return "other"; + })); + + assertEquals(1, loadCount.get()); + assertEquals("loaded", cache.getIfPresent("k")); + } + + @Test + public void getWithCallableWrapsCheckedLoaderFailureInExecutionException() { + CacheLIRS cache = CacheLIRS.newBuilder() + .maximumSize(10) + .build(); + Exception failure = new Exception("checked failure"); + + try { + cache.get("k", () -> { + throw failure; + }); + fail("expected ExecutionException"); + } catch (ExecutionException e) { + assertEquals(failure, e.getCause()); + assertEquals("checked failure", e.getCause().getMessage()); + assertNull(cache.getIfPresent("k")); + } + } + + @Test + public void invalidateAllClearsPreviouslyCachedEntries() throws ExecutionException { + CacheLIRS cache = CacheLIRS.newBuilder() + .maximumSize(10) + .build(); + + cache.get("k1", () -> "v1"); + cache.get("k2", () -> "v2"); + assertEquals(2, cache.size()); + + cache.invalidateAll(); + + assertEquals(0, cache.size()); + assertNull(cache.getIfPresent("k1")); + assertNull(cache.getIfPresent("k2")); + assertTrue(cache.asMap().isEmpty()); + } + + @Test + public void evictionCallbackIsInvokedWhenEntryIsEvictedBySize() { + AtomicInteger evictions = new AtomicInteger(); + AtomicReference firstEvictedKey = new AtomicReference<>(); + AtomicReference firstEvictedValue = new AtomicReference<>(); + CacheLIRS cache = CacheLIRS.newBuilder() + .maximumSize(10) + .evictionCallback((key, value, cause) -> { + if (evictions.getAndIncrement() == 0) { + firstEvictedKey.set(key); + firstEvictedValue.set(value); + } + }) + .build(); + + // LIRS requires cold-queue warm-up before evicting; 100× the cache capacity + // ensures at least one eviction on any conforming implementation. + for (int i = 0; i < 1000 && evictions.get() == 0; i++) { + cache.put("k" + i, "v" + i); + } + + assertTrue("expected at least one eviction callback", evictions.get() > 0); + assertTrue(firstEvictedKey.get().startsWith("k")); + assertTrue(firstEvictedValue.get().startsWith("v")); + assertNull(cache.getIfPresent(firstEvictedKey.get())); + } +} diff --git a/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/EmpiricalWeigherTest.java b/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/EmpiricalWeigherTest.java new file mode 100644 index 00000000000..39209ad5d70 --- /dev/null +++ b/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/EmpiricalWeigherTest.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.cache; + +import org.junit.Assert; +import org.junit.Test; + +/** + * Tests for {@link EmpiricalWeigher}. + * These assertions intentionally avoid third-party cache types so the same + * tests can run across cache implementation changes. + */ +public class EmpiricalWeigherTest { + + private static final int ENTRY_OVERHEAD = 168; + + private final EmpiricalWeigher weigher = new EmpiricalWeigher(); + + @Test + public void weighIncludesBaseOverheadForZeroMemoryValues() { + CacheValue key = () -> 0; + CacheValue value = () -> 0; + Assert.assertEquals(ENTRY_OVERHEAD, weigher.weigh(key, value)); + } + + @Test + public void weighAddsKeyAndValueMemoryToOverhead() { + CacheValue key = () -> 100; + CacheValue value = () -> 200; + Assert.assertEquals(ENTRY_OVERHEAD + 100 + 200, weigher.weigh(key, value)); + } + + @Test + public void weighWithOnlyKeyMemory() { + CacheValue key = () -> 50; + CacheValue value = () -> 0; + Assert.assertEquals(ENTRY_OVERHEAD + 50, weigher.weigh(key, value)); + } + + @Test + public void weighWithOnlyValueMemory() { + CacheValue key = () -> 0; + CacheValue value = () -> 300; + Assert.assertEquals(ENTRY_OVERHEAD + 300, weigher.weigh(key, value)); + } + + @Test + public void weighCapsAtIntegerMaxValue() { + // key + value + overhead overflows int + CacheValue key = () -> Integer.MAX_VALUE; + CacheValue value = () -> Integer.MAX_VALUE; + Assert.assertEquals(Integer.MAX_VALUE, weigher.weigh(key, value)); + } + + @Test + public void weighIsAlwaysPositive() { + CacheValue key = () -> 1; + CacheValue value = () -> 1; + Assert.assertTrue(weigher.weigh(key, value) > 0); + } + + @Test + public void weighReturnsConsistentResultsForSameInput() { + CacheValue key = () -> 42; + CacheValue value = () -> 99; + int first = weigher.weigh(key, value); + int second = weigher.weigh(key, value); + Assert.assertEquals(first, second); + } + + @Test + public void weighJustBelowOverflow() { + // total = 168 + (Integer.MAX_VALUE - 168 - 1) + 1 = Integer.MAX_VALUE + int keyMem = Integer.MAX_VALUE - ENTRY_OVERHEAD - 1; + int valueMem = 1; + CacheValue key = () -> keyMem; + CacheValue value = () -> valueMem; + Assert.assertEquals(Integer.MAX_VALUE, weigher.weigh(key, value)); + } +} diff --git a/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticIndexStatisticsCompatibilityTest.java b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticIndexStatisticsCompatibilityTest.java new file mode 100644 index 00000000000..a1d932e0c24 --- /dev/null +++ b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticIndexStatisticsCompatibilityTest.java @@ -0,0 +1,266 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.plugins.index.elastic; + +import co.elastic.clients.elasticsearch.ElasticsearchClient; +import co.elastic.clients.elasticsearch.core.CountRequest; +import co.elastic.clients.elasticsearch.core.CountResponse; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.ArgumentMatchers; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; + +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Compatibility tests for {@link ElasticIndexStatistics}. + * These assertions intentionally avoid third-party cache types so the same + * tests can run across cache implementation changes. + */ +public class ElasticIndexStatisticsCompatibilityTest { + + @Mock + private ElasticConnection elasticConnectionMock; + + @Mock + private ElasticIndexDefinition indexDefinitionMock; + + @Mock + private ElasticsearchClient elasticClientMock; + + private AutoCloseable closeable; + + @Before + public void setUp() { + this.closeable = MockitoAnnotations.openMocks(this); + Mockito.when(indexDefinitionMock.getIndexAlias()).thenReturn("test-index"); + Mockito.when(elasticConnectionMock.getClient()).thenReturn(elasticClientMock); + } + + @After + public void releaseMocks() throws Exception { + System.clearProperty("oak.elastic.statsExpireSeconds"); + System.clearProperty("oak.elastic.statsRefreshSeconds"); + closeable.close(); + } + + @Test + public void numDocsReturnsMockedCountFromElasticsearch() throws Exception { + CountResponse countResponse = Mockito.mock(CountResponse.class); + Mockito.when(countResponse.count()).thenReturn(42L); + Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) + .thenReturn(countResponse); + + ElasticIndexStatistics indexStatistics = + new ElasticIndexStatistics(elasticConnectionMock, indexDefinitionMock); + Assert.assertEquals(42, indexStatistics.numDocs()); + } + + @Test + public void numDocsCachesResultOnSubsequentCalls() throws Exception { + CountResponse countResponse = Mockito.mock(CountResponse.class); + Mockito.when(countResponse.count()).thenReturn(99L); + Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) + .thenReturn(countResponse); + + ElasticIndexStatistics indexStatistics = + new ElasticIndexStatistics(elasticConnectionMock, indexDefinitionMock); + // first call loads from ES + Assert.assertEquals(99, indexStatistics.numDocs()); + // second call should be served from cache (same value) + Assert.assertEquals(99, indexStatistics.numDocs()); + // ES should only have been called once + Mockito.verify(elasticClientMock, Mockito.times(1)).count(ArgumentMatchers.any(CountRequest.class)); + } + + @Test + public void numDocsPropagatesIOExceptionAsRuntimeFailure() throws Exception { + Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) + .thenThrow(new IOException("ES down")); + + ElasticIndexStatistics indexStatistics = + new ElasticIndexStatistics(elasticConnectionMock, indexDefinitionMock); + try { + indexStatistics.numDocs(); + Assert.fail("expected RuntimeException when Elasticsearch is unavailable"); + } catch (RuntimeException e) { + // The exact wrapper type is intentionally not asserted so this test + // can remain valid across cache implementations. + Assert.assertNotNull(findCause(e, IOException.class)); + Assert.assertEquals("ES down", findCause(e, IOException.class).getMessage()); + } + } + + @Test + public void getDocCountForFieldReturnsMockedCount() throws Exception { + CountResponse countResponse = Mockito.mock(CountResponse.class); + Mockito.when(countResponse.count()).thenReturn(10L); + Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) + .thenReturn(countResponse); + + ElasticIndexStatistics indexStatistics = + new ElasticIndexStatistics(elasticConnectionMock, indexDefinitionMock); + Assert.assertEquals(10, indexStatistics.getDocCountFor("someField")); + } + + @Test + public void numDocsAndGetDocCountForUseIndependentCacheKeys() throws Exception { + CountResponse countResponse = Mockito.mock(CountResponse.class); + Mockito.when(countResponse.count()).thenReturn(5L); + Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) + .thenReturn(countResponse); + + ElasticIndexStatistics indexStatistics = + new ElasticIndexStatistics(elasticConnectionMock, indexDefinitionMock); + indexStatistics.numDocs(); + indexStatistics.getDocCountFor("someField"); + // numDocs and getDocCountFor use different cache keys (different StatsRequestDescriptors) + Mockito.verify(elasticClientMock, Mockito.times(2)).count(ArgumentMatchers.any(CountRequest.class)); + } + + @Test + public void numDocsRefreshesValueAfterRefreshWindow() throws Exception { + System.setProperty("oak.elastic.statsExpireSeconds", "30"); + System.setProperty("oak.elastic.statsRefreshSeconds", "1"); + + CountResponse countResponse = Mockito.mock(CountResponse.class); + Mockito.when(countResponse.count()).thenReturn(100L); + Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) + .thenReturn(countResponse); + + ElasticIndexStatistics indexStatistics = + new ElasticIndexStatistics(elasticConnectionMock, indexDefinitionMock); + + Assert.assertEquals(100, indexStatistics.numDocs()); + Mockito.verify(elasticClientMock, Mockito.times(1)).count(ArgumentMatchers.any(CountRequest.class)); + + Mockito.when(countResponse.count()).thenReturn(1000L); + + TimeUnit.MILLISECONDS.sleep(1200); + + long deadline = System.nanoTime() + TimeUnit.SECONDS.toNanos(5); + int refreshedValue = indexStatistics.numDocs(); + while (System.nanoTime() < deadline) { + refreshedValue = indexStatistics.numDocs(); + if (refreshedValue == 1000) { + break; + } + TimeUnit.MILLISECONDS.sleep(50); + } + + Assert.assertEquals(1000, refreshedValue); + Mockito.verify(elasticClientMock, Mockito.atLeast(2)).count(ArgumentMatchers.any(CountRequest.class)); + } + + @Test + public void numDocsReturnsStaleValueWhileRefreshIsInFlight() throws Exception { + System.setProperty("oak.elastic.statsExpireSeconds", "30"); + System.setProperty("oak.elastic.statsRefreshSeconds", "1"); + + CountResponse initialResponse = Mockito.mock(CountResponse.class); + CountResponse refreshedResponse = Mockito.mock(CountResponse.class); + Mockito.when(initialResponse.count()).thenReturn(100L); + Mockito.when(refreshedResponse.count()).thenReturn(1000L); + + CountDownLatch refreshStarted = new CountDownLatch(1); + CountDownLatch releaseRefresh = new CountDownLatch(1); + AtomicInteger invocations = new AtomicInteger(); + Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) + .thenAnswer(invocation -> { + if (invocations.getAndIncrement() == 0) { + return initialResponse; + } + refreshStarted.countDown(); + if (!releaseRefresh.await(5, TimeUnit.SECONDS)) { + throw new AssertionError("timed out waiting to release refresh"); + } + return refreshedResponse; + }); + + ElasticIndexStatistics indexStatistics = + new ElasticIndexStatistics(elasticConnectionMock, indexDefinitionMock); + + Assert.assertEquals(100, indexStatistics.numDocs()); + + TimeUnit.MILLISECONDS.sleep(1200); + Assert.assertEquals(100, indexStatistics.numDocs()); + Assert.assertTrue("expected refresh to start", refreshStarted.await(5, TimeUnit.SECONDS)); + + releaseRefresh.countDown(); + + long deadline = System.nanoTime() + TimeUnit.SECONDS.toNanos(5); + int refreshedValue = indexStatistics.numDocs(); + while (System.nanoTime() < deadline && refreshedValue != 1000) { + TimeUnit.MILLISECONDS.sleep(50); + refreshedValue = indexStatistics.numDocs(); + } + + Assert.assertEquals(1000, refreshedValue); + } + + @Test + public void numDocsKeepsCachedValueWhenRefreshFails() throws Exception { + System.setProperty("oak.elastic.statsExpireSeconds", "30"); + System.setProperty("oak.elastic.statsRefreshSeconds", "1"); + + CountResponse initialResponse = Mockito.mock(CountResponse.class); + Mockito.when(initialResponse.count()).thenReturn(100L); + + AtomicInteger invocations = new AtomicInteger(); + Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) + .thenAnswer(invocation -> { + if (invocations.getAndIncrement() == 0) { + return initialResponse; + } + throw new IOException("refresh failed"); + }); + + ElasticIndexStatistics indexStatistics = + new ElasticIndexStatistics(elasticConnectionMock, indexDefinitionMock); + + Assert.assertEquals(100, indexStatistics.numDocs()); + + TimeUnit.MILLISECONDS.sleep(1200); + Assert.assertEquals(100, indexStatistics.numDocs()); + + long deadline = System.nanoTime() + TimeUnit.SECONDS.toNanos(5); + while (System.nanoTime() < deadline && invocations.get() < 2) { + TimeUnit.MILLISECONDS.sleep(50); + } + + Assert.assertTrue("expected refresh attempt", invocations.get() >= 2); + Assert.assertEquals(100, indexStatistics.numDocs()); + } + + private static Throwable findCause(Throwable throwable, Class type) { + Throwable current = throwable; + while (current != null) { + if (type.isInstance(current)) { + return current; + } + current = current.getCause(); + } + return null; + } +} diff --git a/oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/search/ExtractedTextCacheTest.java b/oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/search/ExtractedTextCacheTest.java index c4ab7301ba8..a2f8996dbb4 100644 --- a/oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/search/ExtractedTextCacheTest.java +++ b/oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/search/ExtractedTextCacheTest.java @@ -66,6 +66,17 @@ public void cacheEnabled() throws Exception { assertEquals("test hello", text); } + @Test + public void cacheStatsTrackRequestsImplementationIndependently() throws Exception { + ExtractedTextCache cache = new ExtractedTextCache(10 * FileUtils.ONE_MB, 100); + + Blob blob = new IdBlob("hello", "stats-id"); + assertNull(cache.get("/a", "foo", blob, false)); + + assertEquals(1, cache.getCacheStats().getRequestCount()); + assertEquals(1, cache.getCacheStats().getMissCount()); + } + @Test public void cacheEnabledNonIdBlob() throws Exception { ExtractedTextCache cache = new ExtractedTextCache(10 * FileUtils.ONE_MB, 100); diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCacheTest.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCacheTest.java index 4b965ce07d0..dcd13264214 100644 --- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCacheTest.java +++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCacheTest.java @@ -70,6 +70,21 @@ public void putTest() throws ExecutionException { assertEquals(segment1, cache.getSegment(id1, () -> failToLoad(id1))); } + @Test + public void getSegmentWrapsLoaderFailureInExecutionException() { + RuntimeException failure = new RuntimeException("load failed"); + + try { + cache.getSegment(id1, () -> { + throw failure; + }); + fail("expected ExecutionException"); + } catch (ExecutionException e) { + assertEquals(failure, e.getCause()); + assertEquals("load failed", e.getCause().getMessage()); + } + } + @Test public void invalidateTests() throws ExecutionException { cache.putSegment(segment1); diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/CommitValueResolverTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/CommitValueResolverTest.java index 77a26568e2c..951638ccd55 100644 --- a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/CommitValueResolverTest.java +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/CommitValueResolverTest.java @@ -234,6 +234,22 @@ public void cacheEmptyCommitValue() throws Exception { assertThat(countDocumentLookUps(() -> cvr.resolve(commitRev, foo)), equalTo(0)); } + @Test + public void committedValueFromPreviousDocumentIsCached() throws Exception { + CommitValueResolver cachingResolver = newCachingCommitValueResolver(100); + Revision revision = addNode("/foo"); + assertTrue(getDocument("/").getLocalRevisions().containsKey(revision)); + while (getDocument("/").getLocalRevisions().containsKey(revision)) { + someChange("/"); + ns.runBackgroundUpdateOperations(); + } + + NodeDocument root = getDocument("/"); + assertEquals("c", cachingResolver.resolve(revision, root)); + NodeDocument cachedRoot = getDocument("/"); + assertThat(countDocumentLookUps(() -> cachingResolver.resolve(revision, cachedRoot)), equalTo(0)); + } + private int countDocumentLookUps(Callable c) throws Exception { int numCalls = store.getNumFindCalls(NODES); c.call(); diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBuilderTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBuilderTest.java new file mode 100644 index 00000000000..103efc6feb9 --- /dev/null +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBuilderTest.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.plugins.document; + +import java.lang.reflect.Method; + +import org.apache.jackrabbit.oak.cache.CacheStats; +import org.apache.jackrabbit.oak.plugins.document.cache.NodeDocumentCache; +import org.apache.jackrabbit.oak.plugins.document.locks.StripedNodeDocumentLocks; +import org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore; +import org.apache.jackrabbit.oak.plugins.document.util.StringValue; +import org.junit.Assert; +import org.junit.Test; + +/** + * Tests for {@link DocumentNodeStoreBuilder} cache configuration. + * These assertions intentionally avoid third-party cache types so the same + * tests can run across cache implementation changes. + */ +public class DocumentNodeStoreBuilderTest { + + @Test + public void buildNodeDocumentCacheReturnsNonNull() { + DocumentStore store = new MemoryDocumentStore(); + NodeDocumentCache cache = DocumentNodeStoreBuilder.newDocumentNodeStoreBuilder() + .buildNodeDocumentCache(store, new StripedNodeDocumentLocks()); + Assert.assertNotNull(cache); + } + + @Test + public void buildNodeDocumentCacheStatsAreNonEmpty() { + DocumentStore store = new MemoryDocumentStore(); + NodeDocumentCache cache = DocumentNodeStoreBuilder.newDocumentNodeStoreBuilder() + .buildNodeDocumentCache(store, new StripedNodeDocumentLocks()); + Iterable stats = cache.getCacheStats(); + Assert.assertNotNull(stats); + Assert.assertTrue(stats.iterator().hasNext()); + } + + @Test + public void buildNodeDocumentCacheIsUsable() throws Exception { + DocumentStore docStore = new MemoryDocumentStore(); + NodeDocumentCache cache = DocumentNodeStoreBuilder.newDocumentNodeStoreBuilder() + .buildNodeDocumentCache(docStore, new StripedNodeDocumentLocks()); + // put a document and verify it can be retrieved + NodeDocument doc = new NodeDocument(docStore, 1L); + doc.put(Document.ID, "test-id"); + doc.put(Document.MOD_COUNT, 1L); + cache.put(doc); + NodeDocument result = cache.getIfPresent("test-id"); + Assert.assertNotNull(result); + Assert.assertEquals(doc.getModCount(), result.getModCount()); + } + + @Test + public void buildNodeDocumentCacheWithZeroMemoryDistributionStillReturnsUsableCache() throws Exception { + DocumentStore docStore = new MemoryDocumentStore(); + // This verifies builder behavior when all memory cache buckets are disabled. + // It does not assert cache-capacity semantics. + NodeDocumentCache cache = DocumentNodeStoreBuilder.newDocumentNodeStoreBuilder() + .memoryCacheDistribution(0, 0, 0, 0, 0) + .buildNodeDocumentCache(docStore, new StripedNodeDocumentLocks()); + NodeDocument doc = new NodeDocument(docStore, 2L); + doc.put(Document.ID, "zero-distribution-id"); + doc.put(Document.MOD_COUNT, 2L); + cache.put(doc); + + NodeDocument result = cache.getIfPresent("zero-distribution-id"); + Assert.assertNotNull(result); + Assert.assertEquals(doc.getModCount(), result.getModCount()); + } + + @Test + public void buildDocumentCacheStoresAndRetrievesDocuments() throws Exception { + DocumentStore store = new MemoryDocumentStore(); + Object cache = DocumentNodeStoreBuilder.newDocumentNodeStoreBuilder().buildDocumentCache(store); + NodeDocument document = new NodeDocument(store, 1L); + StringValue key = StringValue.fromString("document-cache-id"); + document.put(Document.ID, key.toString()); + document.put(Document.MOD_COUNT, 7L); + + invoke(cache, "put", Object.class, Object.class, key, document); + Object cached = invoke(cache, "getIfPresent", Object.class, key); + + Assert.assertNotNull(cached); + Assert.assertTrue(cached instanceof NodeDocument); + Assert.assertEquals(document.getModCount(), ((NodeDocument) cached).getModCount()); + } + + private static Object invoke(Object target, String methodName, Class parameterType, Object argument) + throws Exception { + Method method = target.getClass().getMethod(methodName, parameterType); + return method.invoke(target, argument); + } + + private static Object invoke(Object target, + String methodName, + Class firstType, + Class secondType, + Object firstArgument, + Object secondArgument) throws Exception { + Method method = target.getClass().getMethod(methodName, firstType, secondType); + return method.invoke(target, firstArgument, secondArgument); + } +} diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreTest.java index ee7c09f2ef3..18331c5a44b 100644 --- a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreTest.java +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreTest.java @@ -3499,6 +3499,81 @@ public void missingLastRevInApplyChanges() throws CommitFailedException { assertNull(ns.getNodeCache().getIfPresent(new PathRev(path, before))); } + @Test + public void getNodeConvertsNodeCacheLoaderFailures() throws Exception { + AtomicBoolean failFind = new AtomicBoolean(); + String fooId = Utils.getIdFromPath("/foo"); + DocumentStore store = new DocumentStoreWrapper(new MemoryDocumentStore()) { + @Override + public T find(Collection collection, String key) { + if (collection == NODES && fooId.equals(key) && failFind.get()) { + throw new IllegalStateException("node lookup failed"); + } + return super.find(collection, key); + } + }; + + DocumentNodeStore writer = builderProvider.newBuilder().setAsyncDelay(0) + .setDocumentStore(store).getNodeStore(); + NodeBuilder builder = writer.getRoot().builder(); + builder.child("foo"); + merge(writer, builder); + writer.dispose(); + + DocumentNodeStore reader = builderProvider.newBuilder().setAsyncDelay(0) + .setDocumentStore(store).getNodeStore(); + try { + failFind.set(true); + reader.getNode(Path.fromString("/foo"), reader.getHeadRevision()); + fail("must fail with DocumentStoreException"); + } catch (DocumentStoreException e) { + assertThat(e.getMessage(), containsString("node lookup failed")); + assertTrue(e.getCause() instanceof IllegalStateException); + } finally { + reader.dispose(); + } + } + + @Test + public void getChildrenConvertsNodeChildrenCacheLoaderFailures() throws Exception { + AtomicBoolean failQuery = new AtomicBoolean(); + DocumentStore store = new DocumentStoreWrapper(new MemoryDocumentStore()) { + @NotNull + @Override + public List query(Collection collection, + String fromKey, + String toKey, + int limit) { + if (collection == NODES && failQuery.get()) { + throw new IllegalStateException("child query failed"); + } + return super.query(collection, fromKey, toKey, limit); + } + }; + + DocumentNodeStore writer = builderProvider.newBuilder().setAsyncDelay(0) + .setDocumentStore(store).getNodeStore(); + NodeBuilder builder = writer.getRoot().builder(); + builder.child("parent").child("child"); + merge(writer, builder); + writer.dispose(); + + DocumentNodeStore reader = builderProvider.newBuilder().setAsyncDelay(0) + .setDocumentStore(store).getNodeStore(); + try { + DocumentNodeState parent = reader.getNode(Path.fromString("/parent"), reader.getHeadRevision()); + assertNotNull(parent); + failQuery.set(true); + reader.getChildren(parent, "", 10); + fail("must fail with DocumentStoreException"); + } catch (DocumentStoreException e) { + assertThat(e.getMessage(), containsString("Error occurred while fetching children for path /parent")); + assertTrue(e.getCause() instanceof IllegalStateException); + } finally { + reader.dispose(); + } + } + // OAK-6351 @Test public void inconsistentNodeChildrenCache() throws Exception { diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/LocalDiffCacheCompatibilityTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/LocalDiffCacheCompatibilityTest.java new file mode 100644 index 00000000000..7d39624de1e --- /dev/null +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/LocalDiffCacheCompatibilityTest.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.plugins.document; + +import java.util.concurrent.atomic.AtomicBoolean; + +import org.junit.Rule; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +/** + * Compatibility tests for {@link LocalDiffCache}. + * These assertions stay at the {@link DiffCache} surface and do not reference + * the underlying cache implementation. + */ +public class LocalDiffCacheCompatibilityTest { + + private static final int CLUSTER_ID = 1; + + @Rule + public DocumentMKBuilderProvider builderProvider = new DocumentMKBuilderProvider(); + + private LocalDiffCache buildCache() { + return new LocalDiffCache(builderProvider.newBuilder() + .setCacheSegmentCount(1) + .memoryCacheDistribution(0, 0, 0, 99, 0)); + } + + @Test + public void getChangesReturnsEmptyStringForMissingPathInsideCachedDiff() { + LocalDiffCache cache = buildCache(); + RevisionVector from = new RevisionVector(Revision.newRevision(CLUSTER_ID)); + RevisionVector to = new RevisionVector(Revision.newRevision(CLUSTER_ID)); + + DiffCache.Entry entry = cache.newEntry(from, to, true); + entry.append(Path.ROOT, "^\"root\":{}"); + entry.done(); + + assertEquals("", cache.getChanges(from, to, Path.fromString("/missing"), null)); + } + + @Test + public void getChangesDelegatesToLoaderWhenRevisionPairIsNotCached() { + LocalDiffCache cache = buildCache(); + RevisionVector from = new RevisionVector(Revision.newRevision(CLUSTER_ID)); + RevisionVector to = new RevisionVector(Revision.newRevision(CLUSTER_ID)); + AtomicBoolean loaderCalled = new AtomicBoolean(); + + String result = cache.getChanges(from, to, Path.ROOT, () -> { + loaderCalled.set(true); + return "^\"loaded\":{}"; + }); + + assertTrue(loaderCalled.get()); + assertEquals("^\"loaded\":{}", result); + assertNull(cache.getChanges(from, to, Path.ROOT, null)); + } +} diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/MemoryDiffCacheTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/MemoryDiffCacheTest.java index a62adbed710..a96980e08d7 100644 --- a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/MemoryDiffCacheTest.java +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/MemoryDiffCacheTest.java @@ -18,11 +18,14 @@ import java.util.UUID; +import org.apache.jackrabbit.oak.cache.CacheStats; import org.junit.Rule; import org.junit.Test; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; public class MemoryDiffCacheTest { @@ -44,6 +47,60 @@ public void limit() throws Exception { assertNull(cache.getChanges(from, to, Path.fromString("/foo"), null)); } + @Test + public void invalidateAllClearsAllCachedEntries() { + DiffCache cache = new MemoryDiffCache(builderProvider.newBuilder() + .setCacheSegmentCount(1) + .memoryCacheDistribution(0, 0, 0, 99, 0)); + RevisionVector from = new RevisionVector(Revision.newRevision(1)); + RevisionVector to = new RevisionVector(Revision.newRevision(1)); + DiffCache.Entry entry = cache.newEntry(from, to, false); + entry.append(Path.ROOT, "^\"foo\":{}"); + entry.done(); + + assertNotNull(cache.getChanges(from, to, Path.ROOT, null)); + cache.invalidateAll(); + assertNull(cache.getChanges(from, to, Path.ROOT, null)); + } + + @Test + public void getStatsReturnsNonEmptyIterable() { + DiffCache cache = new MemoryDiffCache(builderProvider.newBuilder() + .setCacheSegmentCount(1) + .memoryCacheDistribution(0, 0, 0, 99, 0)); + Iterable statsIterable = cache.getStats(); + assertNotNull(statsIterable); + assertTrue(statsIterable.iterator().hasNext()); + } + + @Test + public void getChangesReturnsNullForUncachedRevisions() { + DiffCache cache = new MemoryDiffCache(builderProvider.newBuilder() + .setCacheSegmentCount(1) + .memoryCacheDistribution(0, 0, 0, 99, 0)); + RevisionVector from = new RevisionVector(Revision.newRevision(1)); + RevisionVector to = new RevisionVector(Revision.newRevision(1)); + assertNull(cache.getChanges(from, to, Path.ROOT, null)); + } + + @Test + public void doneMakesRootPathChangesReadableFromCache() { + DiffCache cache = new MemoryDiffCache(builderProvider.newBuilder() + .setCacheSegmentCount(1) + .memoryCacheDistribution(0, 0, 0, 99, 0)); + RevisionVector from = new RevisionVector(Revision.newRevision(1)); + RevisionVector to = new RevisionVector(Revision.newRevision(1)); + String rootPathChanges = "^\"foo\":{}"; + + DiffCache.Entry entry = cache.newEntry(from, to, false); + entry.append(Path.ROOT, rootPathChanges); + entry.done(); + + String actualChanges = cache.getChanges(from, to, Path.ROOT, null); + assertNotNull(actualChanges); + assertEquals(rootPathChanges, actualChanges); + } + private static String changes(int minLength) { StringBuilder sb = new StringBuilder(); while (sb.length() < minLength) { diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/TieredDiffCacheTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/TieredDiffCacheTest.java new file mode 100644 index 00000000000..0923ca8750a --- /dev/null +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/TieredDiffCacheTest.java @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.plugins.document; + +import java.lang.reflect.Field; + +import org.apache.jackrabbit.oak.cache.CacheStats; +import org.junit.Rule; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +/** + * Tests for {@link TieredDiffCache}. + * These assertions intentionally avoid third-party cache types so the same + * tests can run across cache implementation changes. + */ +public class TieredDiffCacheTest { + + private static final int CLUSTER_ID = 1; + + @Rule + public DocumentMKBuilderProvider builderProvider = new DocumentMKBuilderProvider(); + + private TieredDiffCache buildCache() { + return new TieredDiffCache(builderProvider.newBuilder() + .setCacheSegmentCount(1) + .memoryCacheDistribution(0, 0, 0, 99, 0), CLUSTER_ID); + } + + @Test + public void getChangesReturnsNullForUncachedRevision() { + TieredDiffCache cache = buildCache(); + RevisionVector from = new RevisionVector(Revision.newRevision(CLUSTER_ID)); + RevisionVector to = new RevisionVector(Revision.newRevision(CLUSTER_ID)); + assertNull(cache.getChanges(from, to, Path.ROOT, null)); + } + + @Test + public void newEntryLocalPopulatesLocalCache() { + TieredDiffCache cache = buildCache(); + RevisionVector from = new RevisionVector(Revision.newRevision(CLUSTER_ID)); + RevisionVector to = new RevisionVector(Revision.newRevision(CLUSTER_ID)); + String rootPathChanges = "^\"foo\":{}"; + + DiffCache.Entry entry = cache.newEntry(from, to, true); + entry.append(Path.ROOT, rootPathChanges); + entry.done(); + + assertEquals(rootPathChanges, getTier(cache, "localCache").getChanges(from, to, Path.ROOT, null)); + assertNull(getTier(cache, "memoryCache").getChanges(from, to, Path.ROOT, null)); + assertEquals(rootPathChanges, cache.getChanges(from, to, Path.ROOT, null)); + } + + @Test + public void newEntryExternalPopulatesMemoryCache() { + TieredDiffCache cache = buildCache(); + RevisionVector from = new RevisionVector(Revision.newRevision(2)); + RevisionVector to = new RevisionVector(Revision.newRevision(2)); + String rootPathChanges = "^\"bar\":{}"; + + DiffCache.Entry entry = cache.newEntry(from, to, false); + entry.append(Path.ROOT, rootPathChanges); + entry.done(); + + assertNull(getTier(cache, "localCache").getChanges(from, to, Path.ROOT, null)); + assertEquals(rootPathChanges, getTier(cache, "memoryCache").getChanges(from, to, Path.ROOT, null)); + assertEquals(rootPathChanges, cache.getChanges(from, to, Path.ROOT, null)); + } + + @Test + public void getStatsReturnsNonEmptyIterable() { + TieredDiffCache cache = buildCache(); + Iterable stats = cache.getStats(); + assertNotNull(stats); + assertTrue(stats.iterator().hasNext()); + } + + @Test + public void invalidateAllClearsCache() { + TieredDiffCache cache = buildCache(); + RevisionVector localFrom = new RevisionVector(Revision.newRevision(CLUSTER_ID)); + RevisionVector localTo = new RevisionVector(Revision.newRevision(CLUSTER_ID)); + RevisionVector externalFrom = new RevisionVector(Revision.newRevision(2)); + RevisionVector externalTo = new RevisionVector(Revision.newRevision(2)); + + DiffCache.Entry localEntry = cache.newEntry(localFrom, localTo, true); + localEntry.append(Path.ROOT, "^\"local\":{}"); + localEntry.done(); + + DiffCache.Entry externalEntry = cache.newEntry(externalFrom, externalTo, false); + externalEntry.append(Path.ROOT, "^\"external\":{}"); + externalEntry.done(); + + assertNotNull(cache.getChanges(localFrom, localTo, Path.ROOT, null)); + assertNotNull(cache.getChanges(externalFrom, externalTo, Path.ROOT, null)); + cache.invalidateAll(); + assertNull(getTier(cache, "localCache").getChanges(localFrom, localTo, Path.ROOT, null)); + assertNull(getTier(cache, "memoryCache").getChanges(externalFrom, externalTo, Path.ROOT, null)); + assertNull(cache.getChanges(localFrom, localTo, Path.ROOT, null)); + assertNull(cache.getChanges(externalFrom, externalTo, Path.ROOT, null)); + } + + private static DiffCache getTier(TieredDiffCache cache, String fieldName) { + try { + Field field = TieredDiffCache.class.getDeclaredField(fieldName); + field.setAccessible(true); + return (DiffCache) field.get(cache); + } catch (ReflectiveOperationException e) { + throw new AssertionError("Unable to access diff cache tier " + fieldName, e); + } + } +} diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/cache/NodeDocumentCacheTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/cache/NodeDocumentCacheTest.java index 6cb22db5c33..8ab1f82df8b 100644 --- a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/cache/NodeDocumentCacheTest.java +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/cache/NodeDocumentCacheTest.java @@ -16,6 +16,9 @@ */ package org.apache.jackrabbit.oak.plugins.document.cache; +import java.util.concurrent.ExecutionException; + +import org.apache.jackrabbit.oak.cache.CacheStats; import org.apache.jackrabbit.oak.plugins.document.Document; import org.apache.jackrabbit.oak.plugins.document.DocumentStore; import org.apache.jackrabbit.oak.plugins.document.NodeDocument; @@ -28,6 +31,10 @@ import static java.util.Collections.singleton; import static org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreBuilder.newDocumentNodeStoreBuilder; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; public class NodeDocumentCacheTest { @@ -68,6 +75,69 @@ public void cacheConsistency() throws Exception { assertEquals(updated.getModCount(), cache.get(ID, () -> updated).getModCount()); } + @Test + public void getWithCallableLoadsDocumentOnMiss() throws Exception { + NodeDocument doc = createDocument(1L); + NodeDocument loaded = cache.get(ID, () -> doc); + assertEquals(doc.getModCount(), loaded.getModCount()); + } + + @Test + public void getWithCallableReturnsCachedDocumentOnHit() throws Exception { + NodeDocument doc = createDocument(1L); + cache.put(doc); + // loader should not be called since doc is already cached + NodeDocument loaded = cache.get(ID, () -> { + throw new RuntimeException("loader must not be called on cache hit"); + }); + assertEquals(doc.getModCount(), loaded.getModCount()); + } + + @Test + public void getWithCallableWrapsCheckedLoaderFailureInExecutionException() { + Exception failure = new Exception("simulated load failure"); + try { + cache.get(ID, () -> { + // This verifies the existing Oak-visible checked-exception + // contract, not raw runtime propagation. + throw failure; + }); + fail("expected ExecutionException"); + } catch (ExecutionException e) { + assertEquals(failure, e.getCause()); + assertEquals("simulated load failure", e.getCause().getMessage()); + } + } + + @Test + public void getIfPresentReturnsNullForUncachedKey() { + assertNull(cache.getIfPresent("not-cached")); + } + + @Test + public void putAndGetIfPresentReturnsDocument() { + NodeDocument doc = createDocument(5L); + cache.put(doc); + NodeDocument result = cache.getIfPresent(ID); + assertNotNull(result); + assertEquals(doc.getModCount(), result.getModCount()); + } + + @Test + public void invalidateRemovesDocumentFromCache() { + NodeDocument doc = createDocument(1L); + cache.put(doc); + cache.invalidate(ID); + assertNull(cache.getIfPresent(ID)); + } + + @Test + public void getCacheStatsReturnsNonEmptyIterable() { + Iterable statsIterable = cache.getCacheStats(); + assertNotNull(statsIterable); + assertTrue(statsIterable.iterator().hasNext()); + } + private NodeDocument createDocument(long modCount) { NodeDocument doc = new NodeDocument(store, modCount); doc.put(Document.ID, ID); diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/persistentCache/PersistentCacheCompatibilityTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/persistentCache/PersistentCacheCompatibilityTest.java new file mode 100644 index 00000000000..63145288fdf --- /dev/null +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/persistentCache/PersistentCacheCompatibilityTest.java @@ -0,0 +1,171 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.plugins.document.persistentCache; + +import java.io.File; +import java.util.concurrent.ExecutionException; + +import org.apache.jackrabbit.oak.cache.CacheLIRS; +import org.apache.jackrabbit.oak.plugins.document.MemoryDiffCache; +import org.apache.jackrabbit.oak.plugins.document.Path; +import org.apache.jackrabbit.oak.plugins.document.Revision; +import org.apache.jackrabbit.oak.plugins.document.RevisionVector; +import org.apache.jackrabbit.oak.plugins.document.util.StringValue; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** + * Compatibility tests for the persistent cache wrapping layer. + * These assertions stay on Oak-visible behavior and intentionally avoid direct + * use of third-party cache APIs in the test code. + */ +public class PersistentCacheCompatibilityTest { + + @Rule + public final TemporaryFolder tempFolder = new TemporaryFolder(new File("target")); + + @Test + public void wrapReturnsNodeCacheForEnabledCacheType() throws Exception { + CacheHandle handle = openDiffCache("wrap"); + try { + assertTrue(handle.cache instanceof NodeCache); + } finally { + handle.close(); + } + } + + @Test + public void invalidateRemovesOnlyTheRequestedPersistedEntry() throws Exception { + MemoryDiffCache.Key first = key(1); + MemoryDiffCache.Key second = key(2); + + CacheHandle initial = openDiffCache("invalidateOne"); + try { + initial.cache.put(first, new StringValue("first")); + initial.cache.put(second, new StringValue("second")); + } finally { + initial.close(); + } + + CacheHandle reopened = openDiffCache("invalidateOne"); + try { + assertEquals(new StringValue("first"), reopened.cache.getIfPresent(first)); + assertEquals(new StringValue("second"), reopened.cache.getIfPresent(second)); + reopened.cache.invalidate(first); + } finally { + reopened.close(); + } + + CacheHandle afterInvalidate = openDiffCache("invalidateOne"); + try { + assertNull(afterInvalidate.cache.getIfPresent(first)); + assertEquals(new StringValue("second"), afterInvalidate.cache.getIfPresent(second)); + } finally { + afterInvalidate.close(); + } + } + + @Test + public void invalidateAllClearsPersistedEntriesAcrossReopen() throws Exception { + MemoryDiffCache.Key first = key(1); + MemoryDiffCache.Key second = key(2); + + CacheHandle initial = openDiffCache("invalidateAll"); + try { + initial.cache.put(first, new StringValue("first")); + initial.cache.put(second, new StringValue("second")); + } finally { + initial.close(); + } + + CacheHandle reopened = openDiffCache("invalidateAll"); + try { + assertEquals(new StringValue("first"), reopened.cache.getIfPresent(first)); + assertEquals(new StringValue("second"), reopened.cache.getIfPresent(second)); + reopened.cache.invalidateAll(); + } finally { + reopened.close(); + } + + CacheHandle afterInvalidate = openDiffCache("invalidateAll"); + try { + assertNull(afterInvalidate.cache.getIfPresent(first)); + assertNull(afterInvalidate.cache.getIfPresent(second)); + } finally { + afterInvalidate.close(); + } + } + + @Test + public void getWrapsCheckedLoaderFailureInExecutionException() throws Exception { + CacheHandle handle = openDiffCache("loaderFailure"); + Exception failure = new Exception("simulated persistent-cache load failure"); + + try { + handle.cache.get(key(7), () -> { + throw failure; + }); + fail("expected ExecutionException"); + } catch (ExecutionException e) { + assertSame(failure, e.getCause()); + assertEquals("simulated persistent-cache load failure", e.getCause().getMessage()); + } finally { + handle.close(); + } + } + + private CacheHandle openDiffCache(String name) throws Exception { + File directory = new File(tempFolder.getRoot(), name); + directory.mkdirs(); + PersistentCache persistentCache = new PersistentCache(directory.getAbsolutePath() + ",-async"); + CacheLIRS base = CacheLIRS.newBuilder() + .maximumSize(16) + .build(); + @SuppressWarnings("unchecked") + NodeCache wrapped = (NodeCache) persistentCache.wrap( + null, null, base, CacheType.DIFF); + return new CacheHandle(persistentCache, wrapped); + } + + private static MemoryDiffCache.Key key(int id) { + RevisionVector from = new RevisionVector(new Revision(0, 0, id)); + RevisionVector to = new RevisionVector(new Revision(1, 0, id)); + return new MemoryDiffCache.Key(Path.fromString("/node-" + id), from, to); + } + + private static final class CacheHandle { + private final PersistentCache persistentCache; + private final NodeCache cache; + + private CacheHandle(PersistentCache persistentCache, + NodeCache cache) { + this.persistentCache = persistentCache; + this.cache = cache; + } + + private void close() { + persistentCache.close(); + } + } +} From c30406eefaca83c1b675a312e4cb20f7e6762d43 Mon Sep 17 00:00:00 2001 From: rishabhdaim Date: Mon, 23 Mar 2026 11:05:49 +0530 Subject: [PATCH 2/5] OAK-12145: Clarify cache compatibility test intent --- ...zureBlobStoreBackendCompatibilityTest.java | 5 ++ .../v8/AzureBlobStoreBackendV8Test.java | 7 ++ .../oak/blob/cloud/s3/S3BackendTest.java | 10 +++ .../split/BlobIdSetCompatibilityTest.java | 5 ++ .../oak/cache/CacheLIRSCompatibilityTest.java | 8 ++ .../document/DocumentNodeStoreHelperTest.java | 80 +++++++++++++++++++ ...asticIndexStatisticsCompatibilityTest.java | 13 +++ .../document/CommitValueResolverTest.java | 5 ++ .../DocumentNodeStoreBuilderTest.java | 8 ++ .../document/DocumentNodeStoreTest.java | 4 + .../LocalDiffCacheCompatibilityTest.java | 4 + .../PersistentCacheCompatibilityTest.java | 8 ++ 12 files changed, 157 insertions(+) create mode 100644 oak-run-commons/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreHelperTest.java diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendCompatibilityTest.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendCompatibilityTest.java index d6588e1d491..98f94736bfe 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendCompatibilityTest.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendCompatibilityTest.java @@ -37,6 +37,7 @@ public class AzureBlobStoreBackendCompatibilityTest { @Test public void setHttpDownloadURIExpirySecondsUpdatesField() throws Exception { + // Setter coverage for the direct-download expiry value used by presigned URIs. AzureBlobStoreBackend backend = new AzureBlobStoreBackend(); backend.setHttpDownloadURIExpirySeconds(3600); @@ -46,6 +47,7 @@ public void setHttpDownloadURIExpirySecondsUpdatesField() throws Exception { @Test public void setHttpUploadURIExpirySecondsUpdatesField() throws Exception { + // Setter coverage for the direct-upload expiry used during upload initiation. AzureBlobStoreBackend backend = new AzureBlobStoreBackend(); backend.setHttpUploadURIExpirySeconds(1800); @@ -55,6 +57,7 @@ public void setHttpUploadURIExpirySecondsUpdatesField() throws Exception { @Test public void setHttpDownloadURICacheSizeCreatesAndDisablesCache() throws Exception { + // Verify the cache-size toggle actually creates and then removes the backing cache. AzureBlobStoreBackend backend = new AzureBlobStoreBackend(); backend.setHttpDownloadURIExpirySeconds(3600); @@ -67,6 +70,7 @@ public void setHttpDownloadURICacheSizeCreatesAndDisablesCache() throws Exceptio @Test public void createHttpDownloadURIReturnsNullWhenDisabled() { + // With no download expiry configured, direct download access should stay disabled. AzureBlobStoreBackend backend = new AzureBlobStoreBackend(); URI downloadURI = backend.createHttpDownloadURI( @@ -78,6 +82,7 @@ public void createHttpDownloadURIReturnsNullWhenDisabled() { @Test public void initiateHttpUploadReturnsNullWhenDisabled() { + // Upload initiation follows the same disabled-by-default contract until configured. AzureBlobStoreBackend backend = new AzureBlobStoreBackend(); assertNull(backend.initiateHttpUpload(1024, 1, DataRecordUploadOptions.DEFAULT)); diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8Test.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8Test.java index 7028065a379..1937a0e44f7 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8Test.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8Test.java @@ -168,6 +168,7 @@ public void initSecret() throws Exception { @Test public void setHttpDownloadURIExpirySecondsUpdatesField() throws Exception { + // Setter coverage for the v8 direct-download expiry value used by presigned URIs. AzureBlobStoreBackendV8 backend = new AzureBlobStoreBackendV8(); backend.setHttpDownloadURIExpirySeconds(3600); @@ -177,6 +178,7 @@ public void setHttpDownloadURIExpirySecondsUpdatesField() throws Exception { @Test public void setHttpUploadURIExpirySecondsUpdatesField() throws Exception { + // Setter coverage for the v8 direct-upload expiry used during upload initiation. AzureBlobStoreBackendV8 backend = new AzureBlobStoreBackendV8(); backend.setHttpUploadURIExpirySeconds(1800); @@ -186,6 +188,7 @@ public void setHttpUploadURIExpirySecondsUpdatesField() throws Exception { @Test public void setHttpDownloadURICacheSizeCreatesAndDisablesCache() throws Exception { + // Verify the cache-size toggle actually creates and then removes the backing cache. AzureBlobStoreBackendV8 backend = new AzureBlobStoreBackendV8(); backend.setHttpDownloadURIExpirySeconds(3600); @@ -198,6 +201,7 @@ public void setHttpDownloadURICacheSizeCreatesAndDisablesCache() throws Exceptio @Test public void createHttpDownloadURIReturnsNullWhenDisabled() throws DataStoreException { + // With no download expiry configured, direct download access should stay disabled. AzureBlobStoreBackendV8 backend = new AzureBlobStoreBackendV8(); assertNull(backend.createHttpDownloadURI(new org.apache.jackrabbit.core.data.DataIdentifier("test"), @@ -206,6 +210,7 @@ public void createHttpDownloadURIReturnsNullWhenDisabled() throws DataStoreExcep @Test public void initiateHttpUploadReturnsNullWhenDisabled() throws DataStoreException { + // Upload initiation follows the same disabled-by-default contract until configured. AzureBlobStoreBackendV8 backend = new AzureBlobStoreBackendV8(); assertNull(backend.initiateHttpUpload(1024, 1, @@ -214,6 +219,8 @@ public void initiateHttpUploadReturnsNullWhenDisabled() throws DataStoreExceptio @Test public void createHttpDownloadURIReturnsCachedURIWithoutRecheckingStore() throws Exception { + // Seed the internal cache first and make exists() fail if it is touched, + // so the test proves a cache hit short-circuits the expensive store check. CacheHitBackend backend = new CacheHitBackend(); org.apache.jackrabbit.core.data.DataIdentifier identifier = new org.apache.jackrabbit.core.data.DataIdentifier("cached"); diff --git a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendTest.java b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendTest.java index 170d34eed16..4dbb34c13dc 100644 --- a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendTest.java +++ b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendTest.java @@ -39,6 +39,8 @@ public class S3BackendTest { @Test public void setHttpDownloadURIExpirySecondsUpdatesField() throws Exception { + // Setter coverage: direct-download expiry must be stored verbatim because + // later cache/window calculations depend on this value. S3Backend backend = new S3Backend(); backend.setHttpDownloadURIExpirySeconds(3600); @@ -48,6 +50,7 @@ public void setHttpDownloadURIExpirySecondsUpdatesField() throws Exception { @Test public void setHttpUploadURIExpirySecondsUpdatesField() throws Exception { + // Setter coverage for the upload-side expiry used by presigned upload URIs. S3Backend backend = new S3Backend(); backend.setHttpUploadURIExpirySeconds(1800); @@ -57,6 +60,8 @@ public void setHttpUploadURIExpirySecondsUpdatesField() throws Exception { @Test public void setHttpDownloadURICacheSizeCreatesAndDisablesCache() throws Exception { + // Toggle the cache on and back off again to verify the configuration method + // controls the backing cache lifecycle directly. S3Backend backend = new S3Backend(); backend.setHttpDownloadURIExpirySeconds(3600); @@ -69,6 +74,8 @@ public void setHttpDownloadURICacheSizeCreatesAndDisablesCache() throws Exceptio @Test public void createHttpDownloadURIReturnsNullWhenDisabled() { + // With download expiry left at the default disabled state, direct download + // access must stay off and return null immediately. S3Backend backend = new S3Backend(); URI downloadURI = backend.createHttpDownloadURI( @@ -80,6 +87,7 @@ public void createHttpDownloadURIReturnsNullWhenDisabled() { @Test public void initiateHttpUploadReturnsNullWhenDisabled() { + // Upload URIs use the same disabled-by-default contract when no expiry is configured. S3Backend backend = new S3Backend(); assertNull(backend.initiateHttpUpload(1024, 1)); @@ -87,6 +95,8 @@ public void initiateHttpUploadReturnsNullWhenDisabled() { @Test public void createHttpDownloadURIReturnsCachedURIWithoutRecheckingStore() throws Exception { + // Seed the internal cache first and make exists() fail if it is touched, + // so the test proves a cache hit short-circuits the expensive store check. CacheHitBackend backend = new CacheHitBackend(); DataIdentifier identifier = new DataIdentifier("cached"); URI cachedUri = URI.create("https://cached.example/download"); diff --git a/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/split/BlobIdSetCompatibilityTest.java b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/split/BlobIdSetCompatibilityTest.java index 399bf8d9064..e70ac2d1623 100644 --- a/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/split/BlobIdSetCompatibilityTest.java +++ b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/split/BlobIdSetCompatibilityTest.java @@ -58,6 +58,8 @@ public void tearDown() { @Test public void containsReturnsTrueForEntryAddedAfterRestart() throws IOException { + // Seed the on-disk store first, then rebuild BlobIdSet to show startup + // rehydrates lookup state from the persisted file. try (FileWriter writer = new FileWriter(storeFile)) { writer.write("blob-from-store\n"); } @@ -69,6 +71,9 @@ public void containsReturnsTrueForEntryAddedAfterRestart() throws IOException { @Test public void containsIgnoresNewStoreEntryUntilBloomFilterIsUpdated() throws IOException { + // The first miss teaches the in-memory structures about the blob id state. + // Writing directly to disk afterwards must stay invisible until add() updates + // the bloom filter and the in-memory cache consistently. assertFalse(blobIdSet.contains("missing")); try (FileWriter writer = new FileWriter(storeFile)) { diff --git a/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/CacheLIRSCompatibilityTest.java b/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/CacheLIRSCompatibilityTest.java index 842ea7076e8..0351a80ee3d 100644 --- a/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/CacheLIRSCompatibilityTest.java +++ b/oak-core-spi/src/test/java/org/apache/jackrabbit/oak/cache/CacheLIRSCompatibilityTest.java @@ -37,6 +37,8 @@ public class CacheLIRSCompatibilityTest { @Test public void getWithCallableCachesLoadedValue() throws ExecutionException { + // Load through the Oak-visible callable API, then repeat the same lookup + // with a different loader to prove the cached value wins on the second call. CacheLIRS cache = CacheLIRS.newBuilder() .maximumSize(10) .build(); @@ -57,6 +59,8 @@ public void getWithCallableCachesLoadedValue() throws ExecutionException { @Test public void getWithCallableWrapsCheckedLoaderFailureInExecutionException() { + // Use a checked exception from the loader and verify the legacy + // ExecutionException shape is preserved for callers. CacheLIRS cache = CacheLIRS.newBuilder() .maximumSize(10) .build(); @@ -76,6 +80,8 @@ public void getWithCallableWrapsCheckedLoaderFailureInExecutionException() { @Test public void invalidateAllClearsPreviouslyCachedEntries() throws ExecutionException { + // Populate two keys first, then clear the cache and verify both the + // size counters and direct lookups observe an empty cache. CacheLIRS cache = CacheLIRS.newBuilder() .maximumSize(10) .build(); @@ -94,6 +100,8 @@ public void invalidateAllClearsPreviouslyCachedEntries() throws ExecutionExcepti @Test public void evictionCallbackIsInvokedWhenEntryIsEvictedBySize() { + // Push the cache past capacity and capture the first callback so the + // test checks real size-based eviction instead of explicit invalidation. AtomicInteger evictions = new AtomicInteger(); AtomicReference firstEvictedKey = new AtomicReference<>(); AtomicReference firstEvictedValue = new AtomicReference<>(); diff --git a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreHelperTest.java b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreHelperTest.java new file mode 100644 index 00000000000..f188b7b0a77 --- /dev/null +++ b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreHelperTest.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.plugins.document; + +import org.apache.jackrabbit.oak.spi.commit.CommitInfo; +import org.apache.jackrabbit.oak.spi.commit.EmptyHook; +import org.apache.jackrabbit.oak.spi.state.NodeBuilder; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Tests for {@link DocumentNodeStoreHelper}. + * These assertions intentionally avoid third-party cache types so the same + * tests can run across cache implementation changes. + */ +public class DocumentNodeStoreHelperTest { + + private DocumentNodeStore store; + + @Before + public void setUp() { + store = DocumentNodeStoreBuilder.newDocumentNodeStoreBuilder() + .setAsyncDelay(0) + .build(); + } + + @After + public void tearDown() { + if (store != null) { + store.dispose(); + } + } + + @Test + public void getNodesCacheReturnsNonNull() { + // This is a smoke test for the helper entry point: callers should always + // get back a usable cache handle for a live DocumentNodeStore. + Assert.assertNotNull(DocumentNodeStoreHelper.getNodesCache(store)); + } + + @Test + public void getNodesCacheExposesNonNullMapView() { + // This only asserts that the helper exposes a map view; contents and + // mutability semantics are intentionally out of scope here. + Assert.assertNotNull(DocumentNodeStoreHelper.getNodesCache(store).asMap()); + } + + @Test + public void getNodesCacheMapViewReflectsCachedNodeReads() throws Exception { + // Create content through the normal node-store API so the subsequent read + // exercises the production cache population path instead of a test-only insert. + NodeBuilder builder = store.getRoot().builder(); + builder.child("a").child("b"); + store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + + // Clear any startup entries first, then verify a real tree read repopulates + // the helper-visible map view. + DocumentNodeStoreHelper.getNodesCache(store).asMap().clear(); + Assert.assertTrue(DocumentNodeStoreHelper.getNodesCache(store).asMap().isEmpty()); + store.getRoot().getChildNode("a").getChildNode("b"); + + Assert.assertFalse(DocumentNodeStoreHelper.getNodesCache(store).asMap().isEmpty()); + } +} diff --git a/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticIndexStatisticsCompatibilityTest.java b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticIndexStatisticsCompatibilityTest.java index a1d932e0c24..50a9e745b10 100644 --- a/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticIndexStatisticsCompatibilityTest.java +++ b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticIndexStatisticsCompatibilityTest.java @@ -67,6 +67,7 @@ public void releaseMocks() throws Exception { @Test public void numDocsReturnsMockedCountFromElasticsearch() throws Exception { + // Baseline behavior: a cache miss should load the document count from Elasticsearch. CountResponse countResponse = Mockito.mock(CountResponse.class); Mockito.when(countResponse.count()).thenReturn(42L); Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) @@ -79,6 +80,8 @@ public void numDocsReturnsMockedCountFromElasticsearch() throws Exception { @Test public void numDocsCachesResultOnSubsequentCalls() throws Exception { + // Call numDocs() twice with the same descriptor and verify only the first + // call reaches Elasticsearch. CountResponse countResponse = Mockito.mock(CountResponse.class); Mockito.when(countResponse.count()).thenReturn(99L); Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) @@ -96,6 +99,8 @@ public void numDocsCachesResultOnSubsequentCalls() throws Exception { @Test public void numDocsPropagatesIOExceptionAsRuntimeFailure() throws Exception { + // Use a checked IOException from the client and assert callers still see a + // runtime failure that preserves the original cause chain. Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) .thenThrow(new IOException("ES down")); @@ -126,6 +131,8 @@ public void getDocCountForFieldReturnsMockedCount() throws Exception { @Test public void numDocsAndGetDocCountForUseIndependentCacheKeys() throws Exception { + // numDocs() and getDocCountFor(field) should not alias each other in the cache, + // so both lookups must hit Elasticsearch once. CountResponse countResponse = Mockito.mock(CountResponse.class); Mockito.when(countResponse.count()).thenReturn(5L); Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) @@ -141,6 +148,8 @@ public void numDocsAndGetDocCountForUseIndependentCacheKeys() throws Exception { @Test public void numDocsRefreshesValueAfterRefreshWindow() throws Exception { + // Keep the same statistics object alive past the refresh window and check + // that repeated reads eventually observe the refreshed value. System.setProperty("oak.elastic.statsExpireSeconds", "30"); System.setProperty("oak.elastic.statsRefreshSeconds", "1"); @@ -175,6 +184,8 @@ public void numDocsRefreshesValueAfterRefreshWindow() throws Exception { @Test public void numDocsReturnsStaleValueWhileRefreshIsInFlight() throws Exception { + // Block the refresh call on a latch so the test can prove a stale value is + // served immediately while the background refresh is still running. System.setProperty("oak.elastic.statsExpireSeconds", "30"); System.setProperty("oak.elastic.statsRefreshSeconds", "1"); @@ -221,6 +232,8 @@ public void numDocsReturnsStaleValueWhileRefreshIsInFlight() throws Exception { @Test public void numDocsKeepsCachedValueWhenRefreshFails() throws Exception { + // Make the reload attempt fail after a successful first load and verify the + // old cached value remains available to callers. System.setProperty("oak.elastic.statsExpireSeconds", "30"); System.setProperty("oak.elastic.statsRefreshSeconds", "1"); diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/CommitValueResolverTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/CommitValueResolverTest.java index 951638ccd55..de4f5255453 100644 --- a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/CommitValueResolverTest.java +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/CommitValueResolverTest.java @@ -207,6 +207,8 @@ public void branchCommit() throws Exception { @Test public void cacheEmptyCommitValue() throws Exception { + // Remove a commit value after the change is written, then compare a resolver + // without negative caching to one that remembers old empty results. addNode("/foo"); // add changes and remove commit value NodeBuilder builder = ns.getRoot().builder(); @@ -236,6 +238,9 @@ public void cacheEmptyCommitValue() throws Exception { @Test public void committedValueFromPreviousDocumentIsCached() throws Exception { + // Move a committed revision out of the main document into previous documents, + // resolve it once, then verify a caching resolver can answer again without + // additional document-store lookups. CommitValueResolver cachingResolver = newCachingCommitValueResolver(100); Revision revision = addNode("/foo"); assertTrue(getDocument("/").getLocalRevisions().containsKey(revision)); diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBuilderTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBuilderTest.java index 103efc6feb9..ae10d8fd09c 100644 --- a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBuilderTest.java +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBuilderTest.java @@ -35,6 +35,8 @@ public class DocumentNodeStoreBuilderTest { @Test public void buildNodeDocumentCacheReturnsNonNull() { + // Verify the builder can construct the node-document cache with the default + // in-memory configuration and a plain in-memory document store. DocumentStore store = new MemoryDocumentStore(); NodeDocumentCache cache = DocumentNodeStoreBuilder.newDocumentNodeStoreBuilder() .buildNodeDocumentCache(store, new StripedNodeDocumentLocks()); @@ -43,6 +45,8 @@ public void buildNodeDocumentCacheReturnsNonNull() { @Test public void buildNodeDocumentCacheStatsAreNonEmpty() { + // The builder wires cache stats as part of construction, so the returned + // cache should already expose at least one stats entry. DocumentStore store = new MemoryDocumentStore(); NodeDocumentCache cache = DocumentNodeStoreBuilder.newDocumentNodeStoreBuilder() .buildNodeDocumentCache(store, new StripedNodeDocumentLocks()); @@ -53,6 +57,8 @@ public void buildNodeDocumentCacheStatsAreNonEmpty() { @Test public void buildNodeDocumentCacheIsUsable() throws Exception { + // Round-trip a document through the built cache so this test checks + // observable put/get behavior instead of just construction. DocumentStore docStore = new MemoryDocumentStore(); NodeDocumentCache cache = DocumentNodeStoreBuilder.newDocumentNodeStoreBuilder() .buildNodeDocumentCache(docStore, new StripedNodeDocumentLocks()); @@ -86,6 +92,8 @@ public void buildNodeDocumentCacheWithZeroMemoryDistributionStillReturnsUsableCa @Test public void buildDocumentCacheStoresAndRetrievesDocuments() throws Exception { + // buildDocumentCache() currently returns an implementation-specific cache type, + // so this test uses reflection and checks only the observable put/get contract. DocumentStore store = new MemoryDocumentStore(); Object cache = DocumentNodeStoreBuilder.newDocumentNodeStoreBuilder().buildDocumentCache(store); NodeDocument document = new NodeDocument(store, 1L); diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreTest.java index 18331c5a44b..ae89307b6a7 100644 --- a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreTest.java +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreTest.java @@ -3501,6 +3501,8 @@ public void missingLastRevInApplyChanges() throws CommitFailedException { @Test public void getNodeConvertsNodeCacheLoaderFailures() throws Exception { + // Build a store that starts healthy, then make only the node read path fail + // to verify getNode() converts loader failures into DocumentStoreException. AtomicBoolean failFind = new AtomicBoolean(); String fooId = Utils.getIdFromPath("/foo"); DocumentStore store = new DocumentStoreWrapper(new MemoryDocumentStore()) { @@ -3536,6 +3538,8 @@ public T find(Collection collection, String key) { @Test public void getChildrenConvertsNodeChildrenCacheLoaderFailures() throws Exception { + // Populate a parent/child structure first, then fail only the child query path + // so the test exercises getChildren() exception conversion through the cache loader. AtomicBoolean failQuery = new AtomicBoolean(); DocumentStore store = new DocumentStoreWrapper(new MemoryDocumentStore()) { @NotNull diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/LocalDiffCacheCompatibilityTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/LocalDiffCacheCompatibilityTest.java index 7d39624de1e..1fde5716c5b 100644 --- a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/LocalDiffCacheCompatibilityTest.java +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/LocalDiffCacheCompatibilityTest.java @@ -45,6 +45,8 @@ private LocalDiffCache buildCache() { @Test public void getChangesReturnsEmptyStringForMissingPathInsideCachedDiff() { + // Cache only the root diff and then ask for an uncached child path from the + // same revision pair; LocalDiffCache should report that as an empty diff. LocalDiffCache cache = buildCache(); RevisionVector from = new RevisionVector(Revision.newRevision(CLUSTER_ID)); RevisionVector to = new RevisionVector(Revision.newRevision(CLUSTER_ID)); @@ -58,6 +60,8 @@ public void getChangesReturnsEmptyStringForMissingPathInsideCachedDiff() { @Test public void getChangesDelegatesToLoaderWhenRevisionPairIsNotCached() { + // Start from a completely uncached revision pair and verify getChanges() + // falls back to the supplied loader instead of inventing a cached value. LocalDiffCache cache = buildCache(); RevisionVector from = new RevisionVector(Revision.newRevision(CLUSTER_ID)); RevisionVector to = new RevisionVector(Revision.newRevision(CLUSTER_ID)); diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/persistentCache/PersistentCacheCompatibilityTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/persistentCache/PersistentCacheCompatibilityTest.java index 63145288fdf..11b47b3406f 100644 --- a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/persistentCache/PersistentCacheCompatibilityTest.java +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/persistentCache/PersistentCacheCompatibilityTest.java @@ -47,6 +47,8 @@ public class PersistentCacheCompatibilityTest { @Test public void wrapReturnsNodeCacheForEnabledCacheType() throws Exception { + // Wrapping a DIFF cache through PersistentCache should produce the persistent + // NodeCache adapter that fronts the in-memory base cache. CacheHandle handle = openDiffCache("wrap"); try { assertTrue(handle.cache instanceof NodeCache); @@ -57,6 +59,8 @@ public void wrapReturnsNodeCacheForEnabledCacheType() throws Exception { @Test public void invalidateRemovesOnlyTheRequestedPersistedEntry() throws Exception { + // Persist two keys, invalidate only one after reopening, then reopen again + // to prove the removal was durable and did not affect the sibling entry. MemoryDiffCache.Key first = key(1); MemoryDiffCache.Key second = key(2); @@ -88,6 +92,8 @@ public void invalidateRemovesOnlyTheRequestedPersistedEntry() throws Exception { @Test public void invalidateAllClearsPersistedEntriesAcrossReopen() throws Exception { + // Persist entries, clear the wrapped cache, and reopen the persistent layer + // to verify invalidateAll() removes the durable state as well. MemoryDiffCache.Key first = key(1); MemoryDiffCache.Key second = key(2); @@ -119,6 +125,8 @@ public void invalidateAllClearsPersistedEntriesAcrossReopen() throws Exception { @Test public void getWrapsCheckedLoaderFailureInExecutionException() throws Exception { + // Use a checked loader failure here because NodeCache exposes the same + // checked get(key, loader) contract as the in-memory cache underneath it. CacheHandle handle = openDiffCache("loaderFailure"); Exception failure = new Exception("simulated persistent-cache load failure"); From 831be768442523bcdd00690d6a90acb181c41d6c Mon Sep 17 00:00:00 2001 From: rishabhdaim Date: Mon, 23 Mar 2026 11:09:17 +0530 Subject: [PATCH 3/5] OAK-12145: Rename S3 compatibility test --- .../s3/{S3BackendTest.java => S3BackendCompatibilityTest.java} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/{S3BackendTest.java => S3BackendCompatibilityTest.java} (99%) diff --git a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendTest.java b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendCompatibilityTest.java similarity index 99% rename from oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendTest.java rename to oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendCompatibilityTest.java index 4dbb34c13dc..1ccbdf93618 100644 --- a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendTest.java +++ b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendCompatibilityTest.java @@ -35,7 +35,7 @@ * {@link S3Backend}. The assertions are intentionally behavior-based and do * not reference third-party cache types. */ -public class S3BackendTest { +public class S3BackendCompatibilityTest { @Test public void setHttpDownloadURIExpirySecondsUpdatesField() throws Exception { From 7c04d7c4b8417500002eab7784724a83236db4dd Mon Sep 17 00:00:00 2001 From: rishabhdaim Date: Mon, 23 Mar 2026 11:39:54 +0530 Subject: [PATCH 4/5] OAK-12145: Make cache compatibility tests behavior-based --- .../v8/AzureBlobStoreBackendV8Test.java | 16 +- .../cloud/s3/S3BackendCompatibilityTest.java | 23 ++- .../split/BlobIdSetCompatibilityTest.java | 19 +-- ...asticIndexStatisticsCompatibilityTest.java | 150 +++++++++++------- .../PersistentCacheCompatibilityTest.java | 14 +- 5 files changed, 128 insertions(+), 94 deletions(-) diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8Test.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8Test.java index 1937a0e44f7..10764b03e17 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8Test.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8Test.java @@ -218,16 +218,17 @@ public void initiateHttpUploadReturnsNullWhenDisabled() throws DataStoreExceptio } @Test - public void createHttpDownloadURIReturnsCachedURIWithoutRecheckingStore() throws Exception { - // Seed the internal cache first and make exists() fail if it is touched, - // so the test proves a cache hit short-circuits the expensive store check. - CacheHitBackend backend = new CacheHitBackend(); + public void createHttpDownloadURIReturnsPreExistingCachedURI() throws Exception { + // Seed a cache entry directly, then verify the externally observable + // behavior that the same URI is returned for the same download request. + AzureBlobStoreBackendV8 backend = new AzureBlobStoreBackendV8(); org.apache.jackrabbit.core.data.DataIdentifier identifier = new org.apache.jackrabbit.core.data.DataIdentifier("cached"); URI cachedUri = URI.create("https://cached.example/download"); backend.setHttpDownloadURIExpirySeconds(300); setField(backend, "downloadDomainOverride", "cached.example"); + setField(backend, "presignedDownloadURIVerifyExists", false); backend.setHttpDownloadURICacheSize(10); putIntoCache(getField(backend, "httpDownloadURICache"), identifier + "cached.example", cachedUri); @@ -409,13 +410,6 @@ private static void putIntoCache(Object cache, Object key, Object value) throws put.invoke(cache, key, value); } - private static final class CacheHitBackend extends AzureBlobStoreBackendV8 { - @Override - public boolean exists(org.apache.jackrabbit.core.data.DataIdentifier identifier) throws DataStoreException { - throw new AssertionError("cached download URI should be returned before checking blob existence"); - } - } - private static void assertReferenceSecret(AzureBlobStoreBackendV8 azureBlobStoreBackend) throws DataStoreException { // assert secret already created on init diff --git a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendCompatibilityTest.java b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendCompatibilityTest.java index 1ccbdf93618..b8bbfcb0f4f 100644 --- a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendCompatibilityTest.java +++ b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3BackendCompatibilityTest.java @@ -22,7 +22,6 @@ import java.net.URI; import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataStoreException; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions; import org.junit.Test; @@ -94,15 +93,16 @@ public void initiateHttpUploadReturnsNullWhenDisabled() { } @Test - public void createHttpDownloadURIReturnsCachedURIWithoutRecheckingStore() throws Exception { - // Seed the internal cache first and make exists() fail if it is touched, - // so the test proves a cache hit short-circuits the expensive store check. - CacheHitBackend backend = new CacheHitBackend(); + public void createHttpDownloadURIReturnsPreExistingCachedURI() throws Exception { + // Seed a cache entry directly, then verify the externally observable + // behavior that the same URI is returned for the same download request. + S3Backend backend = new S3Backend(); DataIdentifier identifier = new DataIdentifier("cached"); URI cachedUri = URI.create("https://cached.example/download"); backend.setHttpDownloadURIExpirySeconds(300); backend.setHttpDownloadURICacheSize(10); + setField(backend, "presignedDownloadURIVerifyExists", false); putIntoCache(getField(backend, "httpDownloadURICache"), identifier, cachedUri); assertEquals(cachedUri, backend.createHttpDownloadURI(identifier, DataRecordDownloadOptions.DEFAULT)); @@ -120,16 +120,15 @@ private static Object getField(S3Backend backend, String fieldName) throws Excep return field.get(backend); } + private static void setField(S3Backend backend, String fieldName, Object value) throws Exception { + Field field = S3Backend.class.getDeclaredField(fieldName); + field.setAccessible(true); + field.set(backend, value); + } + private static void putIntoCache(Object cache, Object key, Object value) throws Exception { Method put = cache.getClass().getMethod("put", Object.class, Object.class); put.setAccessible(true); put.invoke(cache, key, value); } - - private static final class CacheHitBackend extends S3Backend { - @Override - public boolean exists(DataIdentifier identifier) throws DataStoreException { - throw new AssertionError("cached download URI should be returned before checking blob existence"); - } - } } diff --git a/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/split/BlobIdSetCompatibilityTest.java b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/split/BlobIdSetCompatibilityTest.java index e70ac2d1623..d3ba6f9b792 100644 --- a/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/split/BlobIdSetCompatibilityTest.java +++ b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/split/BlobIdSetCompatibilityTest.java @@ -70,19 +70,14 @@ public void containsReturnsTrueForEntryAddedAfterRestart() throws IOException { } @Test - public void containsIgnoresNewStoreEntryUntilBloomFilterIsUpdated() throws IOException { - // The first miss teaches the in-memory structures about the blob id state. - // Writing directly to disk afterwards must stay invisible until add() updates - // the bloom filter and the in-memory cache consistently. - assertFalse(blobIdSet.contains("missing")); + public void addMakesEntryVisibleBeforeAndAfterRestart() throws IOException { + // Add through the public API and verify both the current instance and a + // restarted one observe the same persisted membership result. + blobIdSet.add("added-through-api"); - try (FileWriter writer = new FileWriter(storeFile)) { - writer.write("missing\n"); - } + assertTrue(blobIdSet.contains("added-through-api")); - assertFalse(blobIdSet.contains("missing")); - - blobIdSet.add("missing"); - assertTrue(blobIdSet.contains("missing")); + BlobIdSet restarted = new BlobIdSet(tempDir.getAbsolutePath(), TEST_FILENAME); + assertTrue(restarted.contains("added-through-api")); } } diff --git a/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticIndexStatisticsCompatibilityTest.java b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticIndexStatisticsCompatibilityTest.java index 50a9e745b10..a421c9aaa75 100644 --- a/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticIndexStatisticsCompatibilityTest.java +++ b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticIndexStatisticsCompatibilityTest.java @@ -29,9 +29,14 @@ import org.mockito.MockitoAnnotations; import java.io.IOException; -import java.util.concurrent.TimeUnit; +import java.time.Clock; +import java.time.Instant; +import java.time.ZoneId; +import java.util.function.IntSupplier; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; /** * Compatibility tests for {@link ElasticIndexStatistics}. @@ -60,8 +65,6 @@ public void setUp() { @After public void releaseMocks() throws Exception { - System.clearProperty("oak.elastic.statsExpireSeconds"); - System.clearProperty("oak.elastic.statsRefreshSeconds"); closeable.close(); } @@ -148,47 +151,51 @@ public void numDocsAndGetDocCountForUseIndependentCacheKeys() throws Exception { @Test public void numDocsRefreshesValueAfterRefreshWindow() throws Exception { - // Keep the same statistics object alive past the refresh window and check - // that repeated reads eventually observe the refreshed value. - System.setProperty("oak.elastic.statsExpireSeconds", "30"); - System.setProperty("oak.elastic.statsRefreshSeconds", "1"); - - CountResponse countResponse = Mockito.mock(CountResponse.class); - Mockito.when(countResponse.count()).thenReturn(100L); + // Advance a controllable clock past the refresh boundary, then release + // the blocked refresh and verify callers eventually observe the new value. + MutableClock clock = new MutableClock(); + CountResponse initialResponse = Mockito.mock(CountResponse.class); + CountResponse refreshedResponse = Mockito.mock(CountResponse.class); + Mockito.when(initialResponse.count()).thenReturn(100L); + Mockito.when(refreshedResponse.count()).thenReturn(1000L); + CountDownLatch refreshStarted = new CountDownLatch(1); + CountDownLatch releaseRefresh = new CountDownLatch(1); + CountDownLatch refreshCompleted = new CountDownLatch(1); + AtomicInteger invocations = new AtomicInteger(); Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) - .thenReturn(countResponse); + .thenAnswer(invocation -> { + if (invocations.getAndIncrement() == 0) { + return initialResponse; + } + refreshStarted.countDown(); + if (!releaseRefresh.await(5, TimeUnit.SECONDS)) { + throw new AssertionError("timed out waiting to release refresh"); + } + refreshCompleted.countDown(); + return refreshedResponse; + }); ElasticIndexStatistics indexStatistics = - new ElasticIndexStatistics(elasticConnectionMock, indexDefinitionMock); + newIndexStatistics(clock); Assert.assertEquals(100, indexStatistics.numDocs()); Mockito.verify(elasticClientMock, Mockito.times(1)).count(ArgumentMatchers.any(CountRequest.class)); - Mockito.when(countResponse.count()).thenReturn(1000L); - - TimeUnit.MILLISECONDS.sleep(1200); - - long deadline = System.nanoTime() + TimeUnit.SECONDS.toNanos(5); - int refreshedValue = indexStatistics.numDocs(); - while (System.nanoTime() < deadline) { - refreshedValue = indexStatistics.numDocs(); - if (refreshedValue == 1000) { - break; - } - TimeUnit.MILLISECONDS.sleep(50); - } + clock.advanceSeconds(2); + Assert.assertEquals(100, indexStatistics.numDocs()); - Assert.assertEquals(1000, refreshedValue); + Assert.assertTrue("expected refresh to start", refreshStarted.await(5, TimeUnit.SECONDS)); + releaseRefresh.countDown(); + Assert.assertTrue("expected refresh completion", refreshCompleted.await(5, TimeUnit.SECONDS)); + assertEventuallyEquals(1000, indexStatistics::numDocs); Mockito.verify(elasticClientMock, Mockito.atLeast(2)).count(ArgumentMatchers.any(CountRequest.class)); } @Test public void numDocsReturnsStaleValueWhileRefreshIsInFlight() throws Exception { - // Block the refresh call on a latch so the test can prove a stale value is - // served immediately while the background refresh is still running. - System.setProperty("oak.elastic.statsExpireSeconds", "30"); - System.setProperty("oak.elastic.statsRefreshSeconds", "1"); - + // Advance a controllable clock into the refresh window, then block the + // reload so the read path can prove it returns the stale cached value. + MutableClock clock = new MutableClock(); CountResponse initialResponse = Mockito.mock(CountResponse.class); CountResponse refreshedResponse = Mockito.mock(CountResponse.class); Mockito.when(initialResponse.count()).thenReturn(100L); @@ -196,6 +203,7 @@ public void numDocsReturnsStaleValueWhileRefreshIsInFlight() throws Exception { CountDownLatch refreshStarted = new CountDownLatch(1); CountDownLatch releaseRefresh = new CountDownLatch(1); + CountDownLatch refreshCompleted = new CountDownLatch(1); AtomicInteger invocations = new AtomicInteger(); Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) .thenAnswer(invocation -> { @@ -206,64 +214,70 @@ public void numDocsReturnsStaleValueWhileRefreshIsInFlight() throws Exception { if (!releaseRefresh.await(5, TimeUnit.SECONDS)) { throw new AssertionError("timed out waiting to release refresh"); } + refreshCompleted.countDown(); return refreshedResponse; }); ElasticIndexStatistics indexStatistics = - new ElasticIndexStatistics(elasticConnectionMock, indexDefinitionMock); + newIndexStatistics(clock); Assert.assertEquals(100, indexStatistics.numDocs()); - TimeUnit.MILLISECONDS.sleep(1200); + clock.advanceSeconds(2); Assert.assertEquals(100, indexStatistics.numDocs()); Assert.assertTrue("expected refresh to start", refreshStarted.await(5, TimeUnit.SECONDS)); releaseRefresh.countDown(); - - long deadline = System.nanoTime() + TimeUnit.SECONDS.toNanos(5); - int refreshedValue = indexStatistics.numDocs(); - while (System.nanoTime() < deadline && refreshedValue != 1000) { - TimeUnit.MILLISECONDS.sleep(50); - refreshedValue = indexStatistics.numDocs(); - } - - Assert.assertEquals(1000, refreshedValue); + Assert.assertTrue("expected refresh completion", refreshCompleted.await(5, TimeUnit.SECONDS)); + assertEventuallyEquals(1000, indexStatistics::numDocs); } @Test public void numDocsKeepsCachedValueWhenRefreshFails() throws Exception { - // Make the reload attempt fail after a successful first load and verify the - // old cached value remains available to callers. - System.setProperty("oak.elastic.statsExpireSeconds", "30"); - System.setProperty("oak.elastic.statsRefreshSeconds", "1"); - + // Advance a controllable clock into the refresh window, then make the + // asynchronous refresh fail and verify the cached value is preserved. + MutableClock clock = new MutableClock(); CountResponse initialResponse = Mockito.mock(CountResponse.class); Mockito.when(initialResponse.count()).thenReturn(100L); + CountDownLatch refreshAttempted = new CountDownLatch(1); AtomicInteger invocations = new AtomicInteger(); Mockito.when(elasticClientMock.count(ArgumentMatchers.any(CountRequest.class))) .thenAnswer(invocation -> { if (invocations.getAndIncrement() == 0) { return initialResponse; } + refreshAttempted.countDown(); throw new IOException("refresh failed"); }); ElasticIndexStatistics indexStatistics = - new ElasticIndexStatistics(elasticConnectionMock, indexDefinitionMock); + newIndexStatistics(clock); Assert.assertEquals(100, indexStatistics.numDocs()); - TimeUnit.MILLISECONDS.sleep(1200); + clock.advanceSeconds(2); + Assert.assertEquals(100, indexStatistics.numDocs()); + Assert.assertTrue("expected refresh attempt", refreshAttempted.await(5, TimeUnit.SECONDS)); Assert.assertEquals(100, indexStatistics.numDocs()); + } + + private ElasticIndexStatistics newIndexStatistics(Clock clock) { + return new ElasticIndexStatistics( + elasticConnectionMock, + indexDefinitionMock, + ElasticIndexStatistics.setupCountCache(100, 30, 1, clock), + null); + } + private static void assertEventuallyEquals(int expected, IntSupplier supplier) throws InterruptedException { long deadline = System.nanoTime() + TimeUnit.SECONDS.toNanos(5); - while (System.nanoTime() < deadline && invocations.get() < 2) { - TimeUnit.MILLISECONDS.sleep(50); + int actual = supplier.getAsInt(); + while (System.nanoTime() < deadline && actual != expected) { + TimeUnit.MILLISECONDS.sleep(25); + actual = supplier.getAsInt(); } - - Assert.assertTrue("expected refresh attempt", invocations.get() >= 2); - Assert.assertEquals(100, indexStatistics.numDocs()); + Assert.assertEquals(expected, actual); } private static Throwable findCause(Throwable throwable, Class type) { @@ -276,4 +290,32 @@ private static Throwable findCause(Throwable throwable, Class Date: Mon, 23 Mar 2026 13:26:27 +0530 Subject: [PATCH 5/5] OAK-12145: Fix SegmentCache compatibility assertion --- .../org/apache/jackrabbit/oak/segment/SegmentCacheTest.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCacheTest.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCacheTest.java index dcd13264214..eb63dcd4633 100644 --- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCacheTest.java +++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCacheTest.java @@ -71,8 +71,8 @@ public void putTest() throws ExecutionException { } @Test - public void getSegmentWrapsLoaderFailureInExecutionException() { - RuntimeException failure = new RuntimeException("load failed"); + public void getSegmentWrapsCheckedLoaderFailureInExecutionException() { + Exception failure = new Exception("load failed"); try { cache.getSegment(id1, () -> {