Introduce ChronicleMapStore
Wrap the bare ChronicleMap store object in a ChronicleMapStore.
This works as a preparation to centralize the emission of metrics that
are related to the store only.
This will be addressed as a follow up change.
Change-Id: Ia4987b709a124fba3e786d3cc559c12ad23dfe41
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheFactory.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheFactory.java
index af13d57..6d1f7b8 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheFactory.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheFactory.java
@@ -44,7 +44,6 @@
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
-import net.openhft.chronicle.map.ChronicleMap;
import org.eclipse.jgit.lib.Config;
@Singleton
@@ -121,8 +120,7 @@
ChronicleMapCacheImpl<K, V> cache;
try {
- ChronicleMap<KeyWrapper<K>, TimedValue<V>> store =
- ChronicleMapCacheImpl.createOrRecoverStore(in, config);
+ ChronicleMapStore<K, V> store = ChronicleMapCacheImpl.createOrRecoverStore(in, config);
ChronicleMapCacheLoader<K, V> memLoader =
new ChronicleMapCacheLoader<>(
@@ -186,8 +184,7 @@
ChronicleMapCacheDefProxy<K, V> def = new ChronicleMapCacheDefProxy<>(in);
try {
- ChronicleMap<KeyWrapper<K>, TimedValue<V>> store =
- ChronicleMapCacheImpl.createOrRecoverStore(in, config);
+ ChronicleMapStore<K, V> store = ChronicleMapCacheImpl.createOrRecoverStore(in, config);
ChronicleMapCacheLoader<K, V> memLoader =
new ChronicleMapCacheLoader<>(
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheImpl.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheImpl.java
index 53a57ec..bdb5348 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheImpl.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheImpl.java
@@ -32,7 +32,6 @@
import java.util.concurrent.atomic.LongAdder;
import net.openhft.chronicle.map.ChronicleMap;
import net.openhft.chronicle.map.ChronicleMapBuilder;
-import net.openhft.chronicle.map.VanillaChronicleMap;
public class ChronicleMapCacheImpl<K, V> extends AbstractLoadingCache<K, V>
implements PersistentCache {
@@ -40,7 +39,7 @@
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private final ChronicleMapCacheConfig config;
- private final ChronicleMap<KeyWrapper<K>, TimedValue<V>> store;
+ private final ChronicleMapStore<K, V> store;
private final LongAdder hitCount = new LongAdder();
private final LongAdder missCount = new LongAdder();
private final LongAdder loadSuccessCount = new LongAdder();
@@ -90,7 +89,7 @@
}
@SuppressWarnings({"unchecked", "cast", "rawtypes"})
- static <K, V> ChronicleMap<KeyWrapper<K>, TimedValue<V>> createOrRecoverStore(
+ static <K, V> ChronicleMapStore<K, V> createOrRecoverStore(
PersistentCacheDef<K, V> def, ChronicleMapCacheConfig config) throws IOException {
CacheSerializers.registerCacheDef(def);
@@ -137,7 +136,7 @@
store.remainingAutoResizes(),
store.percentageFreeSpace());
- return store;
+ return new ChronicleMapStore<>(store, config);
}
protected PersistentCacheDef<K, V> getCacheDefinition() {
@@ -194,7 +193,7 @@
metricMaker.newConstantMetric(
MAX_AUTORESIZES_METRIC,
- cache.maxAutoResizes(),
+ cache.store.maxAutoResizes(),
new Description(
String.format(
"The maximum number of times the %s cache can automatically expand its capacity",
@@ -433,34 +432,8 @@
store.close();
}
- @SuppressWarnings("rawtypes")
public double percentageUsedAutoResizes() {
- /*
- * Chronicle-map already exposes the number of _remaining_ auto-resizes, but
- * this is an absolute value, and it is not enough to understand the
- * percentage of auto-resizes that have been utilized.
- *
- * For that, we fist need to understand the _maximum_ number of possible
- * resizes (inclusive of the resizes allowed by the max-bloat factor).
- * This information is exposed at low level, by the VanillaChronicleMap,
- * which has access to the number of allocated segments.
- *
- * So we proceed as follows:
- *
- * Calculate the maximum number of segments by multiplying the allocated
- * segments (`actualSegments`) by the configured max-bloat-factor.
- *
- * The ratio between this value and the _current_ segment utilization
- * (`getExtraTiersInUse`) shows the overall percentage.
- */
- VanillaChronicleMap vanillaStore = (VanillaChronicleMap) store;
- long usedResizes = vanillaStore.globalMutableState().getExtraTiersInUse();
- return usedResizes * 100 / maxAutoResizes();
- }
-
- @SuppressWarnings("rawtypes")
- public double maxAutoResizes() {
- return config.getMaxBloatFactor() * ((VanillaChronicleMap) store).actualSegments;
+ return store.percentageUsedAutoResizes();
}
public String name() {
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheLoader.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheLoader.java
index 8aa6349..33d970f 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheLoader.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheLoader.java
@@ -33,14 +33,13 @@
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.LongAdder;
-import net.openhft.chronicle.map.ChronicleMap;
class ChronicleMapCacheLoader<K, V> extends CacheLoader<K, TimedValue<V>> {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private final Executor storePersistenceExecutor;
private final Optional<CacheLoader<K, V>> loader;
- private final ChronicleMap<KeyWrapper<K>, TimedValue<V>> store;
+ private final ChronicleMapStore<K, V> store;
private final LongAdder loadSuccessCount = new LongAdder();
private final LongAdder loadExceptionCount = new LongAdder();
private final LongAdder totalLoadTime = new LongAdder();
@@ -58,7 +57,7 @@
*/
ChronicleMapCacheLoader(
Executor storePersistenceExecutor,
- ChronicleMap<KeyWrapper<K>, TimedValue<V>> store,
+ ChronicleMapStore<K, V> store,
CacheLoader<K, V> loader,
Duration expireAfterWrite) {
this.storePersistenceExecutor = storePersistenceExecutor;
@@ -75,9 +74,7 @@
* @param expireAfterWrite maximum lifetime of the data loaded into ChronicleMap
*/
ChronicleMapCacheLoader(
- Executor storePersistenceExecutor,
- ChronicleMap<KeyWrapper<K>, TimedValue<V>> store,
- Duration expireAfterWrite) {
+ Executor storePersistenceExecutor, ChronicleMapStore<K, V> store, Duration expireAfterWrite) {
this.storePersistenceExecutor = storePersistenceExecutor;
this.store = store;
this.loader = Optional.empty();
@@ -119,7 +116,7 @@
}
}
- public ChronicleMap<KeyWrapper<K>, TimedValue<V>> getStore() {
+ public ChronicleMapStore<K, V> getStore() {
return store;
}
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapStore.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapStore.java
new file mode 100644
index 0000000..bc90451
--- /dev/null
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapStore.java
@@ -0,0 +1,301 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.googlesource.gerrit.modules.cache.chroniclemap;
+
+import com.google.common.flogger.FluentLogger;
+import java.io.File;
+import java.io.IOException;
+import java.lang.reflect.Type;
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+import java.util.function.Consumer;
+import java.util.function.Predicate;
+import net.openhft.chronicle.bytes.BytesStore;
+import net.openhft.chronicle.core.io.Closeable;
+import net.openhft.chronicle.core.util.SerializableFunction;
+import net.openhft.chronicle.hash.Data;
+import net.openhft.chronicle.map.ChronicleMap;
+import net.openhft.chronicle.map.ExternalMapQueryContext;
+import net.openhft.chronicle.map.MapEntry;
+import net.openhft.chronicle.map.MapSegmentContext;
+import net.openhft.chronicle.map.VanillaChronicleMap;
+
+class ChronicleMapStore<K, V> implements ChronicleMap<KeyWrapper<K>, TimedValue<V>> {
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+
+ private final ChronicleMap<KeyWrapper<K>, TimedValue<V>> store;
+ private final ChronicleMapCacheConfig config;
+
+ ChronicleMapStore(
+ ChronicleMap<KeyWrapper<K>, TimedValue<V>> store, ChronicleMapCacheConfig config) {
+ this.store = store;
+ this.config = config;
+ }
+
+ @SuppressWarnings("rawtypes")
+ public double percentageUsedAutoResizes() {
+ /*
+ * Chronicle-map already exposes the number of _remaining_ auto-resizes, but
+ * this is an absolute value, and it is not enough to understand the
+ * percentage of auto-resizes that have been utilized.
+ *
+ * For that, we fist need to understand the _maximum_ number of possible
+ * resizes (inclusive of the resizes allowed by the max-bloat factor).
+ * This information is exposed at low level, by the VanillaChronicleMap,
+ * which has access to the number of allocated segments.
+ *
+ * So we proceed as follows:
+ *
+ * Calculate the maximum number of segments by multiplying the allocated
+ * segments (`actualSegments`) by the configured max-bloat-factor.
+ *
+ * The ratio between this value and the _current_ segment utilization
+ * (`getExtraTiersInUse`) shows the overall percentage.
+ */
+ VanillaChronicleMap vanillaStore = (VanillaChronicleMap) store;
+ long usedResizes = vanillaStore.globalMutableState().getExtraTiersInUse();
+ return usedResizes * 100 / maxAutoResizes();
+ }
+
+ @SuppressWarnings("rawtypes")
+ public double maxAutoResizes() {
+ return config.getMaxBloatFactor() * ((VanillaChronicleMap) store).actualSegments;
+ }
+
+ @Override
+ public int size() {
+ return store.size();
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return store.isEmpty();
+ }
+
+ @Override
+ public boolean containsKey(Object key) {
+ return store.containsKey(key);
+ }
+
+ @Override
+ public boolean containsValue(Object value) {
+ return store.containsValue(value);
+ }
+
+ @Override
+ public TimedValue<V> get(Object key) {
+ return store.get(key);
+ }
+
+ @Override
+ public TimedValue<V> put(KeyWrapper<K> key, TimedValue<V> value) {
+ return store.put(key, value);
+ }
+
+ @Override
+ public TimedValue<V> remove(Object key) {
+ return store.remove(key);
+ }
+
+ @Override
+ public void putAll(Map<? extends KeyWrapper<K>, ? extends TimedValue<V>> m) {
+ store.putAll(m);
+ }
+
+ @Override
+ public void clear() {
+ store.clear();
+ }
+
+ @Override
+ public Set<KeyWrapper<K>> keySet() {
+ return store.keySet();
+ }
+
+ @Override
+ public Collection<TimedValue<V>> values() {
+ return store.values();
+ }
+
+ @Override
+ public Set<Entry<KeyWrapper<K>, TimedValue<V>>> entrySet() {
+ return store.entrySet();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return store.equals(o);
+ }
+
+ @Override
+ public int hashCode() {
+ return store.hashCode();
+ }
+
+ @Override
+ public TimedValue<V> getUsing(KeyWrapper<K> key, TimedValue<V> usingValue) {
+ return store.getUsing(key, usingValue);
+ }
+
+ @Override
+ public TimedValue<V> acquireUsing(KeyWrapper<K> key, TimedValue<V> usingValue) {
+ return store.acquireUsing(key, usingValue);
+ }
+
+ @Override
+ public Closeable acquireContext(KeyWrapper<K> key, TimedValue<V> usingValue) {
+ return store.acquireContext(key, usingValue);
+ }
+
+ @Override
+ public <R> R getMapped(
+ KeyWrapper<K> key, SerializableFunction<? super TimedValue<V>, R> function) {
+ return store.getMapped(key, function);
+ }
+
+ @Override
+ public void getAll(File toFile) throws IOException {
+ store.getAll(toFile);
+ }
+
+ @Override
+ public void putAll(File fromFile) throws IOException {
+ store.putAll(fromFile);
+ }
+
+ @Override
+ public Class<TimedValue<V>> valueClass() {
+ return store.valueClass();
+ }
+
+ @Override
+ public Type valueType() {
+ return store.valueType();
+ }
+
+ @Override
+ public short percentageFreeSpace() {
+ return store.percentageFreeSpace();
+ }
+
+ @Override
+ public int remainingAutoResizes() {
+ return store.remainingAutoResizes();
+ }
+
+ @Override
+ public TimedValue<V> putIfAbsent(KeyWrapper<K> key, TimedValue<V> value) {
+ return store.putIfAbsent(key, value);
+ }
+
+ @Override
+ public boolean remove(Object key, Object value) {
+ return store.remove(key, value);
+ }
+
+ @Override
+ public boolean replace(KeyWrapper<K> key, TimedValue<V> oldValue, TimedValue<V> newValue) {
+ return store.replace(key, oldValue, newValue);
+ }
+
+ @Override
+ public TimedValue<V> replace(KeyWrapper<K> key, TimedValue<V> value) {
+ return store.replace(key, value);
+ }
+
+ @Override
+ public File file() {
+ return store.file();
+ }
+
+ @Override
+ public String name() {
+ return store.name();
+ }
+
+ @Override
+ public String toIdentityString() {
+ return store.toIdentityString();
+ }
+
+ @Override
+ public long longSize() {
+ return store.longSize();
+ }
+
+ @Override
+ public long offHeapMemoryUsed() {
+ return store.offHeapMemoryUsed();
+ }
+
+ @Override
+ public Class<KeyWrapper<K>> keyClass() {
+ return store.keyClass();
+ }
+
+ @Override
+ public Type keyType() {
+ return store.keyType();
+ }
+
+ @Override
+ public ExternalMapQueryContext<KeyWrapper<K>, TimedValue<V>, ?> queryContext(KeyWrapper<K> key) {
+ return store.queryContext(key);
+ }
+
+ @Override
+ public ExternalMapQueryContext<KeyWrapper<K>, TimedValue<V>, ?> queryContext(
+ Data<KeyWrapper<K>> key) {
+ return store.queryContext(key);
+ }
+
+ @Override
+ public ExternalMapQueryContext<KeyWrapper<K>, TimedValue<V>, ?> queryContext(
+ BytesStore keyBytes, long offset, long size) {
+ return store.queryContext(keyBytes, offset, size);
+ }
+
+ @Override
+ public MapSegmentContext<KeyWrapper<K>, TimedValue<V>, ?> segmentContext(int segmentIndex) {
+ return store.segmentContext(segmentIndex);
+ }
+
+ @Override
+ public int segments() {
+ return store.segments();
+ }
+
+ @Override
+ public boolean forEachEntryWhile(
+ Predicate<? super MapEntry<KeyWrapper<K>, TimedValue<V>>> predicate) {
+ return store.forEachEntryWhile(predicate);
+ }
+
+ @Override
+ public void forEachEntry(Consumer<? super MapEntry<KeyWrapper<K>, TimedValue<V>>> action) {
+ store.forEachEntry(action);
+ }
+
+ @Override
+ public void close() {
+ store.close();
+ }
+
+ @Override
+ public boolean isOpen() {
+ return store.isOpen();
+ }
+}