Merge branch 'stable-3.3'

* stable-3.3:
  Format BUILD file
  Normalize chronicle-map configuration parameters
  Delegate to memory cache factory for non persistent caches
  Expose hot keys metrics
  Expose remaining auto resizes free metric
  Expose percentage free space metric for each cache
  Bump up chronicle-map to 3.20.84
  Replace wrong "entries" parameter in docs and commands
  Evict cache entries when free space gets low
  Make admins aware that diskLimit cannot be honoured
  Provide SSH command to analyze H2 caches
  Honour cache versioning

Change-Id: I06b01b34b78cd8bc8943c2ef3e9135164ac1aebd
diff --git a/BUILD b/BUILD
index 2336593..52f23ec 100644
--- a/BUILD
+++ b/BUILD
@@ -1,6 +1,5 @@
 load("//tools/bzl:junit.bzl", "junit_tests")
 load("//javatests/com/google/gerrit/acceptance:tests.bzl", "acceptance_tests")
-
 load(
     "//tools/bzl:plugin.bzl",
     "PLUGIN_DEPS",
@@ -11,18 +10,23 @@
 gerrit_plugin(
     name = "cache-chroniclemap",
     srcs = glob(["src/main/java/**/*.java"]),
+    manifest_entries = [
+        "Gerrit-SshModule: com.googlesource.gerrit.modules.cache.chroniclemap.command.SSHCommandModule",
+    ],
     resources = glob(["src/main/resources/**/*"]),
     deps = [
-        "@chronicle-map//jar",
-        "@chronicle-core//jar",
-        "@chronicle-wire//jar",
-        "@chronicle-bytes//jar",
+        "//lib:h2",
+        "//lib/commons:io",
         "@chronicle-algo//jar",
-        "@chronicle-values//jar",
+        "@chronicle-bytes//jar",
+        "@chronicle-core//jar",
+        "@chronicle-map//jar",
         "@chronicle-threads//jar",
+        "@chronicle-values//jar",
+        "@chronicle-wire//jar",
+        "@dev-jna//jar",
         "@javapoet//jar",
         "@jna-platform//jar",
-        "@dev-jna//jar",
     ],
 )
 
@@ -35,6 +39,7 @@
     deps = PLUGIN_DEPS + PLUGIN_TEST_DEPS + [
         ":cache-chroniclemap__plugin",
         "@chronicle-bytes//jar",
+        ":chroniclemap-test-lib",
     ],
 )
 
@@ -44,5 +49,13 @@
     labels = ["server"],
     deps = [
         ":cache-chroniclemap__plugin",
+        ":chroniclemap-test-lib",
     ],
 )
+
+java_library(
+    name = "chroniclemap-test-lib",
+    testonly = True,
+    srcs = ["src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/TestPersistentCacheDef.java"],
+    deps = PLUGIN_DEPS,
+)
diff --git a/config.md b/config.md
index bffc481..60d4ae1 100644
--- a/config.md
+++ b/config.md
@@ -15,6 +15,16 @@
 * `refreshAfterWrite`: Duration after which we asynchronously refresh the cached value.
 [Gerrit docs](https://gerrit-review.googlesource.com/Documentation/config-gerrit.html#cache.name.refreshAfterWrite)
 
+* `diskLimit`: Total size in bytes of the keys and values stored on disk.
+Defaults are per-cache and can be found in the relevant documentation:
+[Gerrit docs](https://gerrit-review.googlesource.com/Documentation/config-gerrit.html#cache.name.diskLimit)
+
+  *NOTE*: a per gerrit documentation, a positive value is required to enable disk
+  storage for the cache. However, the provided value cannot be used to limit the
+  size of the file, since that is the result of chronicle-map pre-allocation and
+  it is a function of the number of entries, average sizes and bloat factor,
+  rather than the number of values stored in it.
+
 Chronicle-map implementation however might require some additional configuration
 
 ## Configuration parameters
@@ -39,7 +49,7 @@
 https://www.javadoc.io/doc/net.openhft/chronicle-map/3.8.0/net/openhft/chronicle/map/ChronicleMapBuilder.html#averageValueSize-double-
 )
 
-```cache.<name>.entries```
+```cache.<name>.maxEntries```
 : The number of entries that this cache is going to hold, _at most_.
 The actual number of entries needs to be less or equal to this value.
 
@@ -55,7 +65,7 @@
 configured entries.
 
 Chronicle Map will allocate memory until the actual number of entries inserted
-divided by the number configured through `entries` is not
+divided by the number configured through `maxEntries` is not
 higher than the configured `maxBloatFactor`.
 
 Chronicle Map works progressively slower when the actual size grows far beyond
@@ -66,6 +76,20 @@
 https://www.javadoc.io/doc/net.openhft/chronicle-map/3.8.0/net/openhft/chronicle/hash/ChronicleHashBuilder.html#maxBloatFactor-double-
 )
 
+* `cache.<name>.percentageFreeSpaceEvictionThreshold`
+: The percentage of free space in the last available expansion of chronicle-map
+beyond which cold cache entries will start being evicted.
+
+Since the eviction routine is scheduled as background task every 30 seconds,
+this value should always be < 100. This is to allow for additional entries to be
+inserted into the cache between the execution of two eviction runs.
+
+How much that margin is, depends on how fast the cache can increase between two
+eviction runs: caches that populate more quickly might need a lower value, and
+vice-versa.
+
+Default: *90*
+
 ### Defaults
 
 Unless overridden by configuration, sensible default values are be provided for
@@ -96,18 +120,41 @@
 The limit to the number of times the map can expand is set via the `maxBloatFactor`.
 if `remainingAutoResizes` drops to zero,this cache is no longer able to expand
 and it will not be able to take more entries, failing with a `IllegalStateException`
+[official documentation](https://javadoc.io/static/net.openhft/chronicle-map/3.20.83/net/openhft/chronicle/map/ChronicleMap.html#remainingAutoResizes--)
 
 * `percentageFreeSpace`
 : the amount of free space in the cache as a percentage. When the free space gets
  low ( around 5% ) the cache will automatically expand (see `remainingAutoResizes`).
  If the cache expands you will see an increase in the available free space.
+[official documentation](https://javadoc.io/static/net.openhft/chronicle-map/3.20.83/net/openhft/chronicle/map/ChronicleMap.html#percentageFreeSpace--)
+
+* `percentageHotKeys`
+: The percentage of _hot_ keys that can be kept in-memory.
+When performing evictions, _hot_ keys will be preserved and only _cold_ keys
+will be evicted from chronicle-map, in random order.
+
+This value implies a trade-off between eviction speed and eviction accuracy.
+
+The smaller the number of hotKeys allocated, the quicker the eviction phase
+will be. However, this will increase the chance of evicting entries that were
+recently accessed.
+
+Conversely, the higher the number of hotKeys allocated, the higher will be the
+accuracy in evicting only recently accessed keys, at the price of a longer
+time spent doing evictions.
+
+In order to ensure there is always a cold entry to be evicted, the number of
+`percentageHotKeys` always needs to be less than `maxEntries`.
+
+*Constraints*: [1-99]
+*Default*: 50
 
 These are the provided default values:
 
 * `web_sessions`:
     * `avgKeySize`: 45 bytes
     * `avgValueSize`: 221 bytes
-    * `entries`: 1000
+    * `maxEntries`: 1000
     * `maxBloatFactor`: 1
 
 Allows up to 1000 users to be logged in.
@@ -115,7 +162,7 @@
 * `change_notes`:
     * `avgKeySize`: 36 bytes
     * `avgValueSize`: 10240 bytes
-    * `entries`: 1000
+    * `maxEntries`: 1000
     * `maxBloatFactor`: 2
 
 Allow for a dozen review activities (votes, comments of medium length) to up to
@@ -124,7 +171,7 @@
 * `accounts`:
     * `avgKeySize`: 30 bytes
     * `avgValueSize`: 256 bytes
-    * `entries`: 1000
+    * `maxEntries`: 1000
     * `maxBloatFactor`: 1
 
 Allows to cache up to 1000 details of active users, including their display name,
@@ -133,7 +180,7 @@
 * `diff`:
     * `avgKeySize`: 98 bytes
     * `avgValueSize`: 10240 bytes
-    * `entries`: 1000
+    * `maxEntries`: 1000
     * `maxBloatFactor`: 3
 
 Allow for up to 1000 medium sized diffs between two commits to be cached.
@@ -142,7 +189,7 @@
 * `diff_intraline`:
     * `avgKeySize`: 512 bytes
     * `avgValueSize`: 2048 bytes
-    * `entries`: 1000
+    * `maxEntries`: 1000
     * `maxBloatFactor`: 2
 
 Allow for up to 1000 medium sized diffs between two files to be cached.
@@ -151,7 +198,7 @@
 * `external_ids_map`:
     * `avgKeySize`: 24 bytes
     * `avgValueSize`: 204800 bytes
-    * `entries`: 2
+    * `maxEntries`: 2
     * `maxBloatFactor`: 1
 
 This cache holds a map of the parsed representation of all current external IDs.
@@ -161,7 +208,7 @@
 * `oauth_tokens`:
     * `avgKeySize`: 8 bytes
     * `avgValueSize`: 2048 bytes
-    * `entries`: 1000
+    * `maxEntries`: 1000
     * `maxBloatFactor`: 1
 
 caches information about the operation performed by a change relative to its
@@ -170,7 +217,7 @@
 * `mergeability`:
     * `avgKeySize`: 79 bytes
     * `avgValueSize`: 16 bytes
-    * `entries`: 65000
+    * `maxEntries`: 65000
     * `maxBloatFactor`: 2
 
 Caches information about the mergeability status of up to 1000 open changes.
@@ -178,7 +225,7 @@
 * `pure_revert`:
     * `avgKeySize`: 55 bytes
     * `avgValueSize`: 16 bytes
-    * `entries`: 1000
+    * `maxEntries`: 1000
     * `maxBloatFactor`: 1
 
 Caches the result of checking if one change or commit is a pure/clean revert of
@@ -187,7 +234,7 @@
 * `persisted_projects`:
     * `avgKeySize`: 128 bytes
     * `avgValueSize`: 1024 bytes
-    * `entries`: 250
+    * `maxEntries`: 250
     * `maxBloatFactor`: 2
 
 Caches the project description records from the refs/meta/config branch of each
@@ -197,7 +244,7 @@
 * `conflicts`:
     * `avgKeySize`: 70 bytes
     * `avgValueSize`: 16 bytes
-    * `entries`: 1000
+    * `maxEntries`: 1000
     * `maxBloatFactor`: 1
 
 Caches whether two commits are in conflict with each other.
@@ -210,7 +257,7 @@
 
 * `avgKeySize`: 128 bytes
 * `avgValueSize`: 2048 bytes
-* `entries`: 1000
+* `maxEntries`: 1000
 * `maxBloatFactor`: 1
 
 ### Gotchas
@@ -226,4 +273,10 @@
 brand new persistent cache (i.e. delete the old one).
 
 More information on recovery can be found in the
-[Official documentation](https://github.com/OpenHFT/Chronicle-Map/blob/master/docs/CM_Tutorial.adoc#recovery)
\ No newline at end of file
+[Official documentation](https://github.com/OpenHFT/Chronicle-Map/blob/master/docs/CM_Tutorial.adoc#recovery)
+
+### Tuning
+
+This module provides tooling to help understand how configuration should be
+optimized for chronicle-map.
+More information in the [tuning](tuning.md) documentation.
diff --git a/external_plugin_deps.bzl b/external_plugin_deps.bzl
index 06f1fd9..90174bc 100644
--- a/external_plugin_deps.bzl
+++ b/external_plugin_deps.bzl
@@ -1,64 +1,64 @@
 load("//tools/bzl:maven_jar.bzl", "maven_jar")
 
 # Ensure artifacts compatibility by selecting them from the Bill Of Materials
-# https://search.maven.org/artifact/net.openhft/chronicle-bom/2.19.283/pom
+# https://search.maven.org/artifact/net.openhft/chronicle-bom/2.20.191/pom
 def external_plugin_deps():
     maven_jar(
         name = "chronicle-map",
-        artifact = "net.openhft:chronicle-map:3.19.40",
-        sha1 = "820edb9aad86adb2a836b4f66a878d6101bbee54",
+        artifact = "net.openhft:chronicle-map:3.20.84",
+        sha1 = "a4549f64d41e7f379d48cfee432f210c0ed563e1",
     )
 
     maven_jar(
         name = "chronicle-core",
-        artifact = "net.openhft:chronicle-core:2.19.50",
-        sha1 = "71234f0116eceda4034cddceb79cd924cea5c4be",
+        artifact = "net.openhft:chronicle-core:2.20.122",
+        sha1 = "aa9dcde008938f5c845b98a6b8f74b25a4689c7c",
     )
 
     maven_jar(
         name = "chronicle-wire",
-        artifact = "net.openhft:chronicle-wire:2.19.45",
-        sha1 = "1ec0da34391b57a3c1809b1e139caf0371784bc4",
+        artifact = "net.openhft:chronicle-wire:2.20.111",
+        sha1 = "4002820daefe5694ecd73b640afd26fa32534959",
     )
 
     maven_jar(
         name = "chronicle-bytes",
-        artifact = "net.openhft:chronicle-bytes:2.19.46",
-        sha1 = "790a1c374f008f97202dd94ec8435edfce798cd0",
+        artifact = "net.openhft:chronicle-bytes:2.20.106",
+        sha1 = "6e4c01ea06ec005ca79ee694efa0a90634b6169e",
     )
 
     maven_jar(
         name = "chronicle-algo",
-        artifact = "net.openhft:chronicle-algorithms:2.19.40",
-        sha1 = "9445d2c48468a32c54d631e3908c4362a2bbac2c",
+        artifact = "net.openhft:chronicle-algorithms:2.20.80",
+        sha1 = "60b86a584d272aae6b7a80f6c7859c689a7199be",
     )
 
     maven_jar(
         name = "chronicle-values",
-        artifact = "net.openhft:chronicle-values:2.19.41",
-        sha1 = "f8bb874bbd67ceabd5166510043b5473e66285f4",
+        artifact = "net.openhft:chronicle-values:2.20.80",
+        sha1 = "2cd2bceaa3f0bcdd4470311c05daafbc188b57e2",
     )
 
     maven_jar(
         name = "chronicle-threads",
-        artifact = "net.openhft:chronicle-threads:2.19.47",
-        sha1 = "ef25ab51b795551e4c7bf3f80acde9b5364a3641",
+        artifact = "net.openhft:chronicle-threads:2.20.104",
+        sha1 = "53295d10b1eb63c1f6bb1a8a58e6889567ae6355",
     )
 
     maven_jar(
         name = "javapoet",
-        artifact = "com.squareup:javapoet:1.12.1",
-        sha1 = "e0e49f502697522ef047470b117ff81edc9f9a07",
+        artifact = "com.squareup:javapoet:1.13.0",
+        sha1 = "d6562d385049f35eb50403fa86bb11cce76b866a",
     )
 
     maven_jar(
         name = "jna-platform",
-        artifact = "net.java.dev.jna:jna-platform:5.5.0",
-        sha1 = "af38e7c4d0fc73c23ecd785443705bfdee5b90bf",
+        artifact = "net.java.dev.jna:jna-platform:5.6.0",
+        sha1 = "d18424ffb8bbfd036d71bcaab9b546858f2ef986",
     )
 
     maven_jar(
         name = "dev-jna",
-        artifact = "net.java.dev.jna:jna:5.5.0",
-        sha1 = "0e0845217c4907822403912ad6828d8e0b256208",
+        artifact = "net.java.dev.jna:jna:5.6.0",
+        sha1 = "330f2244e9030119ab3030fc3fededc86713d9cc",
     )
diff --git a/metrics.md b/metrics.md
new file mode 100644
index 0000000..8e176ef
--- /dev/null
+++ b/metrics.md
@@ -0,0 +1,24 @@
+Metrics
+=============
+
+In addition to the [usual metrics](https://gerrit-review.googlesource.com/Documentation/metrics.html#_caches)
+exposed by caches, chronicle-map emits additional metrics that might be useful
+to monitor the state of the cache:
+
+* cache/chroniclemap/percentagae_free_space_<cache-name>
+  : the amount of free space left in the cache as a percentage.
+
+  See the [official documentation](https://javadoc.io/static/net.openhft/chronicle-map/3.20.83/net/openhft/chronicle/map/ChronicleMap.html#percentageFreeSpace--)
+  for more information.
+
+* cache/chroniclemap/remaining_autoresizes_<cache-name>
+  : the number of times the cache can automatically expand its capacity.
+
+  See the [official documentation](https://javadoc.io/static/net.openhft/chronicle-map/3.20.83/net/openhft/chronicle/map/ChronicleMap.html#remainingAutoResizes--)
+  for more information.
+
+* cache/chroniclemap/hot_keys_capacity_<cache-name>
+  : Constant number of hot keys for the cache that can be kept in memory.
+
+* cache/chroniclemap/hot_keys_size_<cache-name>
+  : The number of hot keys for the cache that are currently in memory.
\ No newline at end of file
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheConfig.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheConfig.java
index f3f59cf..2fc54d3 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheConfig.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheConfig.java
@@ -16,38 +16,31 @@
 import static java.util.concurrent.TimeUnit.SECONDS;
 
 import com.google.common.collect.ImmutableMap;
-import com.google.common.flogger.FluentLogger;
 import com.google.gerrit.common.Nullable;
 import com.google.gerrit.server.config.ConfigUtil;
 import com.google.gerrit.server.config.GerritServerConfig;
-import com.google.gerrit.server.config.SitePaths;
 import com.google.inject.assistedinject.Assisted;
 import com.google.inject.assistedinject.AssistedInject;
 import java.io.File;
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
 import java.time.Duration;
 import java.util.Optional;
 import org.eclipse.jgit.lib.Config;
 
 public class ChronicleMapCacheConfig {
-  private static final FluentLogger logger = FluentLogger.forEnclosingClass();
-
   private final File persistedFile;
-  private final long diskLimit;
   private final long maxEntries;
   private final long averageKeySize;
   private final long averageValueSize;
   private final Duration expireAfterWrite;
   private final Duration refreshAfterWrite;
   private final int maxBloatFactor;
+  private final int percentageFreeSpaceEvictionThreshold;
+  private final int percentageHotKeys;
 
   public interface Factory {
     ChronicleMapCacheConfig create(
-        @Assisted("Name") String name,
         @Assisted("ConfigKey") String configKey,
-        @Assisted("DiskLimit") long diskLimit,
+        @Assisted File persistedFile,
         @Nullable @Assisted("ExpireAfterWrite") Duration expireAfterWrite,
         @Nullable @Assisted("RefreshAfterWrite") Duration refreshAfterWrite);
   }
@@ -55,17 +48,11 @@
   @AssistedInject
   ChronicleMapCacheConfig(
       @GerritServerConfig Config cfg,
-      SitePaths site,
-      @Assisted("Name") String name,
       @Assisted("ConfigKey") String configKey,
-      @Assisted("DiskLimit") long diskLimit,
+      @Assisted File persistedFile,
       @Nullable @Assisted("ExpireAfterWrite") Duration expireAfterWrite,
-      @Nullable @Assisted("RefreshAfterWrite") Duration refreshAfterWrite)
-      throws IOException {
-    final Path cacheDir = getCacheDir(site, cfg.getString("cache", null, "directory"));
-    this.persistedFile =
-        cacheDir != null ? cacheDir.resolve(String.format("%s.dat", name)).toFile() : null;
-    this.diskLimit = cfg.getLong("cache", configKey, "diskLimit", diskLimit);
+      @Nullable @Assisted("RefreshAfterWrite") Duration refreshAfterWrite) {
+    this.persistedFile = persistedFile;
 
     this.maxEntries =
         cfg.getLong("cache", configKey, "maxEntries", Defaults.maxEntriesFor(configKey));
@@ -89,6 +76,28 @@
 
     this.maxBloatFactor =
         cfg.getInt("cache", configKey, "maxBloatFactor", Defaults.maxBloatFactorFor(configKey));
+
+    this.percentageFreeSpaceEvictionThreshold =
+        cfg.getInt(
+            "cache",
+            configKey,
+            "percentageFreeSpaceEvictionThreshold",
+            Defaults.percentageFreeSpaceEvictionThreshold());
+
+    this.percentageHotKeys =
+        cfg.getInt("cache", configKey, "percentageHotKeys", Defaults.percentageHotKeys());
+
+    if (percentageHotKeys <= 0 || percentageHotKeys >= 100) {
+      throw new IllegalArgumentException("Invalid 'percentageHotKeys': should be in range [1-99]");
+    }
+  }
+
+  public int getPercentageFreeSpaceEvictionThreshold() {
+    return percentageFreeSpaceEvictionThreshold;
+  }
+
+  public int getpercentageHotKeys() {
+    return percentageHotKeys;
   }
 
   public Duration getExpireAfterWrite() {
@@ -115,29 +124,10 @@
     return averageValueSize;
   }
 
-  public long getDiskLimit() {
-    return diskLimit;
-  }
-
   public int getMaxBloatFactor() {
     return maxBloatFactor;
   }
 
-  private static Path getCacheDir(SitePaths site, String name) throws IOException {
-    if (name == null) {
-      return null;
-    }
-    Path loc = site.resolve(name);
-    if (!Files.exists(loc)) {
-      Files.createDirectories(loc);
-    }
-    if (!Files.isWritable(loc)) {
-      throw new IOException(String.format("Can't write to disk cache: %s", loc.toAbsolutePath()));
-    }
-    logger.atFine().log("Enabling disk cache %s", loc.toAbsolutePath());
-    return loc;
-  }
-
   private static long toSeconds(@Nullable Duration duration) {
     return duration != null ? duration.getSeconds() : 0;
   }
@@ -151,6 +141,9 @@
 
     public static final int DEFAULT_MAX_BLOAT_FACTOR = 1;
 
+    public static final int DEFAULT_PERCENTAGE_FREE_SPACE_EVICTION_THRESHOLD = 90;
+    public static final int DEFAULT_PERCENTAGE_HOT_KEYS = 50;
+
     private static final ImmutableMap<String, DefaultConfig> defaultMap =
         new ImmutableMap.Builder<String, DefaultConfig>()
             .put("web_sessions", DefaultConfig.create(45, 221, 1000, 1))
@@ -191,5 +184,13 @@
           .map(DefaultConfig::maxBloatFactor)
           .orElse(DEFAULT_MAX_BLOAT_FACTOR);
     }
+
+    public static int percentageFreeSpaceEvictionThreshold() {
+      return DEFAULT_PERCENTAGE_FREE_SPACE_EVICTION_THRESHOLD;
+    }
+
+    public static int percentageHotKeys() {
+      return DEFAULT_PERCENTAGE_HOT_KEYS;
+    }
   }
 }
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheFactory.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheFactory.java
index 929c1c6..491b5dc 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheFactory.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheFactory.java
@@ -20,36 +20,57 @@
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import com.google.gerrit.extensions.events.LifecycleListener;
 import com.google.gerrit.extensions.registration.DynamicMap;
+import com.google.gerrit.metrics.MetricMaker;
 import com.google.gerrit.server.cache.CacheBackend;
+import com.google.gerrit.server.cache.MemoryCacheFactory;
 import com.google.gerrit.server.cache.PersistentCacheDef;
 import com.google.gerrit.server.cache.PersistentCacheFactory;
+import com.google.gerrit.server.config.GerritServerConfig;
+import com.google.gerrit.server.config.SitePaths;
 import com.google.gerrit.server.logging.LoggingContextAwareScheduledExecutorService;
 import com.google.inject.Inject;
 import com.google.inject.Provider;
 import com.google.inject.Singleton;
+import java.io.File;
 import java.io.IOException;
 import java.io.UncheckedIOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
+import org.eclipse.jgit.lib.Config;
 
 @Singleton
 class ChronicleMapCacheFactory implements PersistentCacheFactory, LifecycleListener {
   private static final FluentLogger logger = FluentLogger.forEnclosingClass();
 
+  private final MemoryCacheFactory memCacheFactory;
+  private final Config config;
   private final ChronicleMapCacheConfig.Factory configFactory;
+  private final MetricMaker metricMaker;
   private final DynamicMap<Cache<?, ?>> cacheMap;
   private final List<ChronicleMapCacheImpl<?, ?>> caches;
   private final ScheduledExecutorService cleanup;
+  private final Path cacheDir;
 
   @Inject
   ChronicleMapCacheFactory(
-      ChronicleMapCacheConfig.Factory configFactory, DynamicMap<Cache<?, ?>> cacheMap) {
+      MemoryCacheFactory memCacheFactory,
+      @GerritServerConfig Config cfg,
+      SitePaths site,
+      ChronicleMapCacheConfig.Factory configFactory,
+      DynamicMap<Cache<?, ?>> cacheMap,
+      MetricMaker metricMaker) {
+    this.memCacheFactory = memCacheFactory;
+    this.config = cfg;
     this.configFactory = configFactory;
+    this.metricMaker = metricMaker;
     this.caches = new LinkedList<>();
+    this.cacheDir = getCacheDir(site, cfg.getString("cache", null, "directory"));
     this.cacheMap = cacheMap;
     this.cleanup =
         new LoggingContextAwareScheduledExecutorService(
@@ -61,19 +82,20 @@
                     .build()));
   }
 
-  @SuppressWarnings({"unchecked"})
   @Override
   public <K, V> Cache<K, V> build(PersistentCacheDef<K, V> in, CacheBackend backend) {
+    if (isInMemoryCache(in)) {
+      return memCacheFactory.build(in, backend);
+    }
     ChronicleMapCacheConfig config =
         configFactory.create(
-            in.name(),
             in.configKey(),
-            in.diskLimit(),
+            fileName(cacheDir, in.name(), in.version()),
             in.expireAfterWrite(),
             in.refreshAfterWrite());
-    ChronicleMapCacheImpl<K, V> cache = null;
+    ChronicleMapCacheImpl<K, V> cache;
     try {
-      cache = new ChronicleMapCacheImpl<>(in, config, null);
+      cache = new ChronicleMapCacheImpl<>(in, config, null, metricMaker);
     } catch (IOException e) {
       throw new UncheckedIOException(e);
     }
@@ -83,20 +105,21 @@
     return cache;
   }
 
-  @SuppressWarnings("unchecked")
   @Override
   public <K, V> LoadingCache<K, V> build(
       PersistentCacheDef<K, V> in, CacheLoader<K, V> loader, CacheBackend backend) {
+    if (isInMemoryCache(in)) {
+      return memCacheFactory.build(in, loader, backend);
+    }
     ChronicleMapCacheConfig config =
         configFactory.create(
-            in.name(),
             in.configKey(),
-            in.diskLimit(),
+            fileName(cacheDir, in.name(), in.version()),
             in.expireAfterWrite(),
             in.refreshAfterWrite());
-    ChronicleMapCacheImpl<K, V> cache = null;
+    ChronicleMapCacheImpl<K, V> cache;
     try {
-      cache = new ChronicleMapCacheImpl<>(in, config, loader);
+      cache = new ChronicleMapCacheImpl<>(in, config, loader, metricMaker);
     } catch (IOException e) {
       throw new UncheckedIOException(e);
     }
@@ -118,12 +141,15 @@
     }
   }
 
+  private <K, V> boolean isInMemoryCache(PersistentCacheDef<K, V> in) {
+    return cacheDir == null
+        || config.getLong("cache", in.configKey(), "diskLimit", in.diskLimit()) <= 0;
+  }
+
   @Override
   public void start() {
     for (ChronicleMapCacheImpl<?, ?> cache : caches) {
-      if (!cache.getConfig().getExpireAfterWrite().isZero()) {
-        cleanup.scheduleWithFixedDelay(cache::prune, 30, 30, TimeUnit.SECONDS);
-      }
+      cleanup.scheduleWithFixedDelay(cache::prune, 30, 30, TimeUnit.SECONDS);
     }
   }
 
@@ -131,4 +157,29 @@
   public void stop() {
     cleanup.shutdownNow();
   }
+
+  public static File fileName(Path cacheDir, String name, Integer version) {
+    return cacheDir.resolve(String.format("%s_%s.dat", name, version)).toFile();
+  }
+
+  private static Path getCacheDir(SitePaths site, String name) {
+    if (name == null) {
+      return null;
+    }
+    Path loc = site.resolve(name);
+    if (!Files.exists(loc)) {
+      try {
+        Files.createDirectories(loc);
+      } catch (IOException e) {
+        logger.atWarning().log("Can't create disk cache: %s", loc.toAbsolutePath());
+        return null;
+      }
+    }
+    if (!Files.isWritable(loc)) {
+      logger.atWarning().log("Can't write to disk cache: %s", loc.toAbsolutePath());
+      return null;
+    }
+    logger.atInfo().log("Enabling disk cache %s", loc.toAbsolutePath());
+    return loc;
+  }
 }
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheImpl.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheImpl.java
index 59f9f5a..999775d 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheImpl.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheImpl.java
@@ -17,6 +17,8 @@
 import com.google.common.cache.CacheLoader;
 import com.google.common.cache.CacheStats;
 import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.metrics.Description;
+import com.google.gerrit.metrics.MetricMaker;
 import com.google.gerrit.server.cache.PersistentCache;
 import com.google.gerrit.server.cache.PersistentCacheDef;
 import com.google.gerrit.server.util.time.TimeUtil;
@@ -43,13 +45,22 @@
   private final LongAdder loadExceptionCount = new LongAdder();
   private final LongAdder totalLoadTime = new LongAdder();
   private final LongAdder evictionCount = new LongAdder();
+  private final InMemoryLRU<K> hotEntries;
 
   @SuppressWarnings("unchecked")
   ChronicleMapCacheImpl(
-      PersistentCacheDef<K, V> def, ChronicleMapCacheConfig config, CacheLoader<K, V> loader)
+      PersistentCacheDef<K, V> def,
+      ChronicleMapCacheConfig config,
+      CacheLoader<K, V> loader,
+      MetricMaker metricMaker)
       throws IOException {
     this.config = config;
     this.loader = loader;
+    this.hotEntries =
+        new InMemoryLRU<>(
+            (int) Math.max(config.getMaxEntries() * config.getpercentageHotKeys() / 100, 1));
+
+    ChronicleMapStorageMetrics metrics = new ChronicleMapStorageMetrics(metricMaker);
 
     final Class<K> keyClass = (Class<K>) def.keyType().getRawType();
     final Class<TimedValue<V>> valueWrapperClass = (Class<TimedValue<V>>) (Class) TimedValue.class;
@@ -69,32 +80,78 @@
     mapBuilder.averageValueSize(config.getAverageValueSize());
     mapBuilder.valueMarshaller(new TimedValueMarshaller<>(def.valueSerializer()));
 
-    // TODO: ChronicleMap must have "entries" configured, however cache definition
-    //  has already the concept of diskLimit. How to reconcile the two when both
-    //  are defined?
-    //  Should we honour diskLimit, by computing entries as a function of (avgKeySize +
-    // avgValueSize)
     mapBuilder.entries(config.getMaxEntries());
 
     mapBuilder.maxBloatFactor(config.getMaxBloatFactor());
 
-    if (config.getPersistedFile() == null || config.getDiskLimit() < 0) {
-      store = mapBuilder.create();
-    } else {
-      store = mapBuilder.createOrRecoverPersistedTo(config.getPersistedFile());
-    }
+    logger.atWarning().log(
+        "chronicle-map cannot honour the diskLimit of %s bytes for the %s "
+            + "cache, since the file size is pre-allocated rather than being "
+            + "a function of the number of entries in the cache",
+        def.diskLimit(), def.name());
+    store = mapBuilder.createOrRecoverPersistedTo(config.getPersistedFile());
 
     logger.atInfo().log(
-        "Initialized '%s'|avgKeySize: %s bytes|avgValueSize: %s bytes|"
-            + "entries: %s|maxBloatFactor: %s|remainingAutoResizes: %s|"
-            + "percentageFreeSpace: %s",
+        "Initialized '%s'|version: %s|avgKeySize: %s bytes|avgValueSize:"
+            + " %s bytes|entries: %s|maxBloatFactor: %s|remainingAutoResizes:"
+            + " %s|percentageFreeSpace: %s",
         def.name(),
+        def.version(),
         mapBuilder.constantlySizedKeys() ? "CONSTANT" : config.getAverageKeySize(),
         config.getAverageValueSize(),
         config.getMaxEntries(),
         config.getMaxBloatFactor(),
         store.remainingAutoResizes(),
         store.percentageFreeSpace());
+
+    metrics.registerCallBackMetrics(def.name(), store, hotEntries);
+  }
+
+  private static class ChronicleMapStorageMetrics {
+
+    private final MetricMaker metricMaker;
+
+    ChronicleMapStorageMetrics(MetricMaker metricMaker) {
+      this.metricMaker = metricMaker;
+    }
+
+    <K, V> void registerCallBackMetrics(
+        String name, ChronicleMap<K, TimedValue<V>> store, InMemoryLRU<K> hotEntries) {
+      String PERCENTAGE_FREE_SPACE_METRIC = "cache/chroniclemap/percentage_free_space_" + name;
+      String REMAINING_AUTORESIZES_METRIC = "cache/chroniclemap/remaining_autoresizes_" + name;
+      String HOT_KEYS_CAPACITY_METRIC = "cache/chroniclemap/hot_keys_capacity_" + name;
+      String HOT_KEYS_SIZE_METRIC = "cache/chroniclemap/hot_keys_size_" + name;
+
+      metricMaker.newCallbackMetric(
+          PERCENTAGE_FREE_SPACE_METRIC,
+          Long.class,
+          new Description(
+              String.format("The amount of free space in the %s cache as a percentage", name)),
+          () -> (long) store.percentageFreeSpace());
+
+      metricMaker.newCallbackMetric(
+          REMAINING_AUTORESIZES_METRIC,
+          Integer.class,
+          new Description(
+              String.format(
+                  "The number of times the %s cache can automatically expand its capacity", name)),
+          store::remainingAutoResizes);
+
+      metricMaker.newConstantMetric(
+          HOT_KEYS_CAPACITY_METRIC,
+          hotEntries.getCapacity(),
+          new Description(
+              String.format(
+                  "The number of hot cache keys for %s cache that can be kept in memory", name)));
+
+      metricMaker.newCallbackMetric(
+          HOT_KEYS_SIZE_METRIC,
+          Integer.class,
+          new Description(
+              String.format(
+                  "The number of hot cache keys for %s cache that are currently in memory", name)),
+          hotEntries::size);
+    }
   }
 
   public ChronicleMapCacheConfig getConfig() {
@@ -107,6 +164,7 @@
       TimedValue<V> vTimedValue = store.get(objKey);
       if (!expired(vTimedValue.getCreated())) {
         hitCount.increment();
+        hotEntries.add((K) objKey);
         return vTimedValue.getValue();
       } else {
         invalidate(objKey);
@@ -122,6 +180,7 @@
       TimedValue<V> vTimedValue = store.get(key);
       if (!needsRefresh(vTimedValue.getCreated())) {
         hitCount.increment();
+        hotEntries.add(key);
         return vTimedValue.getValue();
       }
     }
@@ -174,15 +233,23 @@
   public void put(K key, V val) {
     TimedValue<V> wrapped = new TimedValue<>(val);
     store.put(key, wrapped);
+    hotEntries.add(key);
   }
 
   public void prune() {
-    store.forEachEntry(
-        c -> {
-          if (expired(c.value().get().getCreated())) {
-            c.context().remove(c);
-          }
-        });
+    if (!config.getExpireAfterWrite().isZero()) {
+      store.forEachEntry(
+          c -> {
+            if (expired(c.value().get().getCreated())) {
+              hotEntries.remove(c.key().get());
+              c.context().remove(c);
+            }
+          });
+    }
+
+    if (runningOutOfFreeSpace()) {
+      evictColdEntries();
+    }
   }
 
   private boolean expired(long created) {
@@ -197,14 +264,31 @@
     return !refreshAfterWrite.isZero() && age.compareTo(refreshAfterWrite) > 0;
   }
 
+  protected boolean runningOutOfFreeSpace() {
+    return store.remainingAutoResizes() == 0
+        && store.percentageFreeSpace() <= config.getPercentageFreeSpaceEvictionThreshold();
+  }
+
+  private void evictColdEntries() {
+    store.forEachEntryWhile(
+        e -> {
+          if (!hotEntries.contains(e.key().get())) {
+            e.doRemove();
+          }
+          return runningOutOfFreeSpace();
+        });
+  }
+
   @Override
   public void invalidate(Object key) {
     store.remove(key);
+    hotEntries.remove((K) key);
   }
 
   @Override
   public void invalidateAll() {
     store.clear();
+    hotEntries.invalidateAll();
   }
 
   @Override
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/InMemoryLRU.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/InMemoryLRU.java
new file mode 100644
index 0000000..ac5183e
--- /dev/null
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/InMemoryLRU.java
@@ -0,0 +1,69 @@
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package com.googlesource.gerrit.modules.cache.chroniclemap;
+
+import com.google.common.annotations.VisibleForTesting;
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+public class InMemoryLRU<K> {
+
+  private final Map<K, Boolean> LRUMap;
+
+  private static final Boolean dummyValue = Boolean.TRUE;
+  private final int capacity;
+
+  public InMemoryLRU(int capacity) {
+    this.capacity = capacity;
+
+    LRUMap =
+        Collections.synchronizedMap(
+            new LinkedHashMap<K, Boolean>(capacity, 0.75f, true) {
+              @Override
+              protected boolean removeEldestEntry(Map.Entry<K, Boolean> eldest) {
+                return size() > capacity;
+              }
+            });
+  }
+
+  public void add(K objKey) {
+    LRUMap.putIfAbsent(objKey, dummyValue);
+  }
+
+  public boolean contains(K key) {
+    return LRUMap.containsKey(key);
+  }
+
+  public boolean remove(K key) {
+    return LRUMap.remove(key);
+  }
+
+  public void invalidateAll() {
+    LRUMap.clear();
+  }
+
+  public int size() {
+    return LRUMap.size();
+  }
+
+  @VisibleForTesting
+  protected Object[] toArray() {
+    return LRUMap.keySet().toArray();
+  }
+
+  public int getCapacity() {
+    return capacity;
+  }
+}
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/command/AnalyzeH2Caches.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/command/AnalyzeH2Caches.java
new file mode 100644
index 0000000..89dfeef
--- /dev/null
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/command/AnalyzeH2Caches.java
@@ -0,0 +1,141 @@
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package com.googlesource.gerrit.modules.cache.chroniclemap.command;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.server.config.GerritServerConfig;
+import com.google.gerrit.server.config.SitePaths;
+import com.google.gerrit.sshd.SshCommand;
+import com.google.inject.Inject;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Collections;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.commons.io.FilenameUtils;
+import org.eclipse.jgit.lib.Config;
+import org.h2.Driver;
+
+public class AnalyzeH2Caches extends SshCommand {
+  private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+
+  private String cacheDirectory;
+  private SitePaths site;
+
+  @Inject
+  AnalyzeH2Caches(@GerritServerConfig Config cfg, SitePaths site) {
+    this.cacheDirectory = cfg.getString("cache", null, "directory");
+    this.site = site;
+  }
+
+  @Override
+  protected void run() throws UnloggedFailure, Failure, Exception {
+    Set<Path> h2Files = getH2CacheFiles();
+    stdout.println("Extracting information from H2 caches...");
+
+    Config config = new Config();
+    for (Path h2 : h2Files) {
+      final String url = jdbcUrl(h2);
+      final String baseName =
+          FilenameUtils.removeExtension(FilenameUtils.getBaseName(h2.toString()));
+      try {
+
+        try (Connection conn = Driver.load().connect(url, null);
+            Statement s = conn.createStatement();
+            ResultSet r =
+                s.executeQuery(
+                    "SELECT COUNT(*), AVG(OCTET_LENGTH(k)), AVG(OCTET_LENGTH(v)) FROM data")) {
+          if (r.next()) {
+            long size = r.getLong(1);
+            long avgKeySize = r.getLong(2);
+            long avgValueSize = r.getLong(3);
+
+            if (size == 0) {
+              stdout.println(String.format("WARN: Cache %s is empty, skipping.", baseName));
+              continue;
+            }
+
+            config.setLong("cache", baseName, "maxEntries", size);
+            config.setLong("cache", baseName, "avgKeySize", avgKeySize);
+
+            // Account for extra serialization bytes of TimedValue entries.
+            short TIMED_VALUE_WRAPPER_OVERHEAD = Long.BYTES + Integer.BYTES;
+            config.setLong(
+                "cache", baseName, "avgValueSize", avgValueSize + TIMED_VALUE_WRAPPER_OVERHEAD);
+          }
+        }
+      } catch (SQLException e) {
+        stderr.println(String.format("Could not get information from %s", baseName));
+        throw die(e);
+      }
+    }
+    stdout.println();
+    stdout.println("****************************");
+    stdout.println("** Chronicle-map template **");
+    stdout.println("****************************");
+    stdout.println();
+    stdout.println(config.toText());
+  }
+
+  private Set<Path> getH2CacheFiles() throws UnloggedFailure {
+
+    try {
+      final Optional<Path> maybeCacheDir = getCacheDir(site, cacheDirectory);
+
+      return maybeCacheDir
+          .map(
+              cacheDir -> {
+                try {
+                  return Files.walk(cacheDir)
+                      .filter(path -> path.toString().endsWith("h2.db"))
+                      .collect(Collectors.toSet());
+                } catch (IOException e) {
+                  logger.atSevere().withCause(e).log("Could not read H2 files");
+                  return Collections.<Path>emptySet();
+                }
+              })
+          .orElse(Collections.emptySet());
+    } catch (IOException e) {
+      throw die(e);
+    }
+  }
+
+  private String jdbcUrl(Path h2FilePath) {
+    final String normalized =
+        FilenameUtils.removeExtension(FilenameUtils.removeExtension(h2FilePath.toString()));
+    return "jdbc:h2:" + normalized + ";AUTO_SERVER=TRUE";
+  }
+
+  private static Optional<Path> getCacheDir(SitePaths site, String name) throws IOException {
+    if (name == null) {
+      return Optional.empty();
+    }
+    Path loc = site.resolve(name);
+    if (!Files.exists(loc)) {
+      throw new IOException(
+          String.format("disk cache is configured but doesn't exist: %s", loc.toAbsolutePath()));
+    }
+    if (!Files.isReadable(loc)) {
+      throw new IOException(String.format("Can't read from disk cache: %s", loc.toAbsolutePath()));
+    }
+    logger.atFine().log("Enabling disk cache %s", loc.toAbsolutePath());
+    return Optional.of(loc);
+  }
+}
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/command/SSHCommandModule.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/command/SSHCommandModule.java
new file mode 100644
index 0000000..038c177
--- /dev/null
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/command/SSHCommandModule.java
@@ -0,0 +1,23 @@
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package com.googlesource.gerrit.modules.cache.chroniclemap.command;
+
+import com.google.gerrit.sshd.PluginCommandModule;
+
+public class SSHCommandModule extends PluginCommandModule {
+  @Override
+  protected void configureCommands() {
+    command("analyze-h2-caches").to(AnalyzeH2Caches.class);
+  }
+}
diff --git a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheConfigTest.java b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheConfigTest.java
index 735d23b..8cb28c1 100644
--- a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheConfigTest.java
+++ b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheConfigTest.java
@@ -19,10 +19,11 @@
 import static com.googlesource.gerrit.modules.cache.chroniclemap.ChronicleMapCacheConfig.Defaults.DEFAULT_AVG_VALUE_SIZE;
 import static com.googlesource.gerrit.modules.cache.chroniclemap.ChronicleMapCacheConfig.Defaults.DEFAULT_MAX_BLOAT_FACTOR;
 import static com.googlesource.gerrit.modules.cache.chroniclemap.ChronicleMapCacheConfig.Defaults.DEFAULT_MAX_ENTRIES;
+import static com.googlesource.gerrit.modules.cache.chroniclemap.ChronicleMapCacheConfig.Defaults.DEFAULT_PERCENTAGE_FREE_SPACE_EVICTION_THRESHOLD;
+import static com.googlesource.gerrit.modules.cache.chroniclemap.ChronicleMapCacheConfig.Defaults.DEFAULT_PERCENTAGE_HOT_KEYS;
 
 import com.google.gerrit.server.config.SitePaths;
-import java.io.IOException;
-import java.nio.file.FileSystemException;
+import java.io.File;
 import java.nio.file.Files;
 import java.time.Duration;
 import org.eclipse.jgit.lib.StoredConfig;
@@ -35,9 +36,10 @@
 
 public class ChronicleMapCacheConfigTest {
 
+  private final String cacheDirectory = ".";
   private final String cacheName = "foobar-cache";
   private final String cacheKey = "foobar-cache-key";
-  private final long definitionDiskLimit = 100;
+  private final int version = 1;
   private final Duration expireAfterWrite = Duration.ofSeconds(10_000);
   private final Duration refreshAfterWrite = Duration.ofSeconds(20_000);
 
@@ -54,14 +56,12 @@
         new FileBasedConfig(
             sitePaths.resolve("etc").resolve("gerrit.config").toFile(), FS.DETECTED);
     gerritConfig.load();
+    gerritConfig.setString("cache", null, "directory", cacheDirectory);
+    gerritConfig.save();
   }
 
   @Test
-  public void shouldProvidePersistedFileWhenCacheDirIsConfigured() throws Exception {
-    final String directory = "cache-dir";
-    gerritConfig.setString("cache", null, "directory", directory);
-    gerritConfig.save();
-
+  public void shouldProvidePersistedFile() throws Exception {
     assertThat(
             configUnderTest(gerritConfig)
                 .getPersistedFile()
@@ -69,26 +69,7 @@
                 .getParent()
                 .toRealPath()
                 .toString())
-        .isEqualTo(sitePaths.resolve(directory).toRealPath().toString());
-  }
-
-  @Test
-  public void shouldNotProvidePersistedFileWhenCacheDirIsNotConfigured() throws Exception {
-    assertThat(configUnderTest(gerritConfig).getPersistedFile()).isNull();
-  }
-
-  @Test
-  public void shouldProvideConfiguredDiskLimitWhenDefined() throws Exception {
-    long configuredDiskLimit = 50;
-    gerritConfig.setLong("cache", cacheKey, "diskLimit", configuredDiskLimit);
-    gerritConfig.save();
-
-    assertThat(configUnderTest(gerritConfig).getDiskLimit()).isEqualTo(configuredDiskLimit);
-  }
-
-  @Test
-  public void shouldProvideDefinitionDiskLimitWhenNotConfigured() throws Exception {
-    assertThat(configUnderTest(gerritConfig).getDiskLimit()).isEqualTo(definitionDiskLimit);
+        .isEqualTo(sitePaths.resolve(cacheDirectory).toRealPath().toString());
   }
 
   @Test
@@ -175,35 +156,68 @@
   }
 
   @Test
-  public void shouldThrowExceptionWhenDirectoryDoesntExist() throws Exception {
-    gerritConfig.setString("cache", null, "directory", "/var/bar/foobar");
-    gerritConfig.save();
-
-    assertThrows(FileSystemException.class, () -> configUnderTest(gerritConfig));
-  }
-
-  @Test
-  public void shouldThrowExceptionWhenDirectoryIsNotWriteable() throws Exception {
-    gerritConfig.setString("cache", null, "directory", "/var");
-    gerritConfig.save();
-
-    IOException thrown = assertThrows(IOException.class, () -> configUnderTest(gerritConfig));
-    assertThat(thrown).hasMessageThat().contains("Can't write to disk cache");
-  }
-
-  @Test
   public void shouldProvideDefinitionRefreshAfterWriteWhenNotConfigured() throws Exception {
     assertThat(configUnderTest(gerritConfig).getRefreshAfterWrite()).isEqualTo(refreshAfterWrite);
   }
 
-  private ChronicleMapCacheConfig configUnderTest(StoredConfig gerritConfig) throws IOException {
+  @Test
+  public void shouldProvidePercentageFreeSpaceEvictionThresholdWhenConfigured() throws Exception {
+    int percentageFreeThreshold = 70;
+    gerritConfig.setInt(
+        "cache", cacheKey, "percentageFreeSpaceEvictionThreshold", percentageFreeThreshold);
+    gerritConfig.save();
+
+    assertThat(configUnderTest(gerritConfig).getPercentageFreeSpaceEvictionThreshold())
+        .isEqualTo(percentageFreeThreshold);
+  }
+
+  @Test
+  public void shouldProvidePercentageFreeSpaceEvictionThresholdDefault() throws Exception {
+    assertThat(configUnderTest(gerritConfig).getPercentageFreeSpaceEvictionThreshold())
+        .isEqualTo(DEFAULT_PERCENTAGE_FREE_SPACE_EVICTION_THRESHOLD);
+  }
+
+  @Test
+  public void shouldProvidePercentageHotKeysDefault() throws Exception {
+    assertThat(configUnderTest(gerritConfig).getpercentageHotKeys())
+        .isEqualTo(DEFAULT_PERCENTAGE_HOT_KEYS);
+  }
+
+  @Test
+  public void shouldProvidePercentageHotKeysWhenConfigured() throws Exception {
+    int percentageHotKeys = 20;
+    gerritConfig.setInt("cache", cacheKey, "percentageHotKeys", percentageHotKeys);
+    gerritConfig.save();
+
+    assertThat(configUnderTest(gerritConfig).getpercentageHotKeys()).isEqualTo(percentageHotKeys);
+  }
+
+  @Test
+  public void shouldThrowWhenPercentageHotKeysIs100() throws Exception {
+    gerritConfig.setInt("cache", cacheKey, "percentageHotKeys", 100);
+    gerritConfig.save();
+
+    assertThrows(IllegalArgumentException.class, () -> configUnderTest(gerritConfig));
+  }
+
+  @Test
+  public void shouldThrowWhenPercentageHotKeysIs0() throws Exception {
+    gerritConfig.setInt("cache", cacheKey, "percentageHotKeys", 0);
+    gerritConfig.save();
+
+    assertThrows(IllegalArgumentException.class, () -> configUnderTest(gerritConfig));
+  }
+
+  private ChronicleMapCacheConfig configUnderTest(StoredConfig gerritConfig) {
+    File persistentFile =
+        ChronicleMapCacheFactory.fileName(
+            sitePaths.site_path.resolve(cacheDirectory), cacheName, version);
+    sitePaths
+        .resolve(cacheDirectory)
+        .resolve(String.format("%s_%s.dat", cacheName, version))
+        .toFile();
+
     return new ChronicleMapCacheConfig(
-        gerritConfig,
-        sitePaths,
-        cacheName,
-        cacheKey,
-        definitionDiskLimit,
-        expireAfterWrite,
-        refreshAfterWrite);
+        gerritConfig, cacheKey, persistentFile, expireAfterWrite, refreshAfterWrite);
   }
 }
diff --git a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheIT.java b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheIT.java
index 69f0845..df6fc38 100644
--- a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheIT.java
+++ b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheIT.java
@@ -16,15 +16,19 @@
 import static com.google.common.truth.Truth.assertThat;
 import static com.google.common.truth.Truth8.assertThat;
 
+import com.google.common.cache.Cache;
 import com.google.common.truth.Truth8;
 import com.google.gerrit.acceptance.AbstractDaemonTest;
 import com.google.gerrit.acceptance.RestResponse;
+import com.google.gerrit.acceptance.UseLocalDisk;
 import com.google.gerrit.entities.Project;
 import com.google.gerrit.extensions.api.accounts.AccountInput;
+import com.google.gerrit.server.cache.CacheBackend;
 import com.google.gerrit.server.cache.PersistentCacheFactory;
 import com.google.inject.Inject;
 import org.junit.Test;
 
+@UseLocalDisk
 public class ChronicleMapCacheIT extends AbstractDaemonTest {
 
   @Inject PersistentCacheFactory persistentCacheFactory;
@@ -40,6 +44,25 @@
   }
 
   @Test
+  public void shouldBuildInMemoryCacheWhenDiskLimitIsNegative() {
+    final int negativeDiskLimit = -1;
+    final Cache<String, String> cache =
+        persistentCacheFactory.build(
+            new TestPersistentCacheDef("foo", negativeDiskLimit), CacheBackend.CAFFEINE);
+
+    assertThat(cache.getClass().getSimpleName()).isEqualTo("CaffeinatedGuavaCache");
+  }
+
+  @Test
+  public void shouldBuildInMemoryCacheWhenDiskLimitIsPositive() {
+    final int positiveDiskLimit = 1024;
+    assertThat(
+            persistentCacheFactory.build(
+                new TestPersistentCacheDef("foo", positiveDiskLimit), CacheBackend.CAFFEINE))
+        .isInstanceOf(ChronicleMapCacheImpl.class);
+  }
+
+  @Test
   public void shouldCacheNewProject() throws Exception {
     String newProjectName = name("newProject");
     RestResponse r = adminRestSession.put("/projects/" + newProjectName);
diff --git a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheTest.java b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheTest.java
index 6abf49d..f8ef939 100644
--- a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheTest.java
+++ b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheTest.java
@@ -14,21 +14,30 @@
 package com.googlesource.gerrit.modules.cache.chroniclemap;
 
 import static com.google.common.truth.Truth.assertThat;
+import static com.google.common.truth.Truth.assertWithMessage;
 import static com.google.gerrit.testing.GerritJUnit.assertThrows;
 
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.Weigher;
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.MetricRegistry;
+import com.google.gerrit.acceptance.WaitUtil;
 import com.google.gerrit.common.Nullable;
-import com.google.gerrit.server.cache.PersistentCacheDef;
-import com.google.gerrit.server.cache.serialize.CacheSerializer;
+import com.google.gerrit.lifecycle.LifecycleManager;
+import com.google.gerrit.metrics.DisabledMetricMaker;
+import com.google.gerrit.metrics.MetricMaker;
+import com.google.gerrit.metrics.dropwizard.DropWizardMetricMaker;
 import com.google.gerrit.server.cache.serialize.StringCacheSerializer;
 import com.google.gerrit.server.config.SitePaths;
-import com.google.inject.TypeLiteral;
+import com.google.inject.Guice;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import java.io.File;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.nio.file.Files;
 import java.time.Duration;
 import java.util.UUID;
 import java.util.concurrent.ExecutionException;
+import net.openhft.chronicle.bytes.Bytes;
 import org.eclipse.jgit.lib.StoredConfig;
 import org.eclipse.jgit.storage.file.FileBasedConfig;
 import org.eclipse.jgit.util.FS;
@@ -38,11 +47,15 @@
 import org.junit.rules.TemporaryFolder;
 
 public class ChronicleMapCacheTest {
+  @Inject MetricMaker metricMaker;
+  @Inject MetricRegistry metricRegistry;
 
   @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder();
   private SitePaths sitePaths;
   private StoredConfig gerritConfig;
 
+  private final String cacheDirectory = ".";
+
   @Before
   public void setUp() throws Exception {
     sitePaths = new SitePaths(temporaryFolder.newFolder().toPath());
@@ -52,6 +65,20 @@
         new FileBasedConfig(
             sitePaths.resolve("etc").resolve("gerrit.config").toFile(), FS.DETECTED);
     gerritConfig.load();
+    gerritConfig.setString("cache", null, "directory", cacheDirectory);
+    gerritConfig.save();
+
+    setupMetrics();
+  }
+
+  public void setupMetrics() {
+    Injector injector = Guice.createInjector(new DropWizardMetricMaker.ApiModule());
+
+    LifecycleManager mgr = new LifecycleManager();
+    mgr.add(injector);
+    mgr.start();
+
+    injector.injectMembers(this);
   }
 
   @Test
@@ -60,6 +87,19 @@
   }
 
   @Test
+  public void getIfPresentShouldReturnNullWhenThereCacheHasADifferentVersion() throws Exception {
+    gerritConfig.setString("cache", null, "directory", "cache");
+    gerritConfig.save();
+    final ChronicleMapCacheImpl<String, String> cacheV1 = newCacheVersion(1);
+
+    cacheV1.put("foo", "value version 1");
+    cacheV1.close();
+
+    final ChronicleMapCacheImpl<String, String> cacheV2 = newCacheVersion(2);
+    assertThat(cacheV2.getIfPresent("foo")).isNull();
+  }
+
+  @Test
   public void getWithLoaderShouldPopulateTheCache() throws Exception {
     String cachedValue = UUID.randomUUID().toString();
     final ChronicleMapCacheImpl<String, String> cache = newCacheWithLoader();
@@ -77,6 +117,38 @@
   }
 
   @Test
+  public void getShouldRetrieveANewValueWhenCacheHasADifferentVersion() throws Exception {
+    gerritConfig.setString("cache", null, "directory", "cache");
+    gerritConfig.save();
+    final ChronicleMapCacheImpl<String, String> cacheV1 = newCacheVersion(1);
+
+    cacheV1.put("foo", "value version 1");
+    cacheV1.close();
+
+    final ChronicleMapCacheImpl<String, String> cacheV2 = newCacheVersion(2);
+
+    final String v2Value = "value version 2";
+    assertThat(cacheV2.get("foo", () -> v2Value)).isEqualTo(v2Value);
+  }
+
+  @Test
+  public void getShouldRetrieveCachedValueWhenCacheHasSameVersion() throws Exception {
+    int cacheVersion = 2;
+    gerritConfig.setString("cache", null, "directory", "cache");
+    gerritConfig.save();
+    final ChronicleMapCacheImpl<String, String> cache = newCacheVersion(cacheVersion);
+
+    final String originalValue = "value 1";
+    cache.put("foo", originalValue);
+    cache.close();
+
+    final ChronicleMapCacheImpl<String, String> newCache = newCacheVersion(cacheVersion);
+
+    final String newValue = "value 2";
+    assertThat(newCache.get("foo", () -> newValue)).isEqualTo(originalValue);
+  }
+
+  @Test
   public void getShoudThrowWhenNoLoaderHasBeenProvided() throws Exception {
     final ChronicleMapCacheImpl<String, String> cache = newCacheWithoutLoader();
 
@@ -164,7 +236,8 @@
 
   @Test
   public void getIfPresentShouldReturnNullWhenValueIsExpired() throws Exception {
-    ChronicleMapCacheImpl<String, String> cache = newCache(true, null, Duration.ofSeconds(1), null);
+    ChronicleMapCacheImpl<String, String> cache =
+        newCache(true, null, Duration.ofSeconds(1), null, 1);
     cache.put("foo", "some-stale-value");
     Thread.sleep(1010); // Allow cache entry to expire
     assertThat(cache.getIfPresent("foo")).isNull();
@@ -174,7 +247,7 @@
   public void getShouldRefreshValueWhenExpired() throws Exception {
     String newCachedValue = UUID.randomUUID().toString();
     ChronicleMapCacheImpl<String, String> cache =
-        newCache(true, newCachedValue, null, Duration.ofSeconds(1));
+        newCache(true, newCachedValue, null, Duration.ofSeconds(1), 1);
     cache.put("foo", "some-stale-value");
     Thread.sleep(1010); // Allow cache to be flagged as needing refresh
     assertThat(cache.get("foo")).isEqualTo(newCachedValue);
@@ -182,7 +255,8 @@
 
   @Test
   public void shouldPruneExpiredValues() throws Exception {
-    ChronicleMapCacheImpl<String, String> cache = newCache(true, null, Duration.ofSeconds(1), null);
+    ChronicleMapCacheImpl<String, String> cache =
+        newCache(true, null, Duration.ofSeconds(1), null, 1);
     cache.put("foo1", "some-stale-value1");
     cache.put("foo2", "some-stale-value1");
     Thread.sleep(1010); // Allow cache entries to expire
@@ -215,122 +289,263 @@
     assertThat(cache.size()).isEqualTo(0);
   }
 
+  @Test
+  public void shouldEvictOldestElementInCacheWhenIsNeverAccessed() throws Exception {
+    final String fooValue = "foo";
+
+    gerritConfig.setInt("cache", "foo", "maxEntries", 2);
+    gerritConfig.setInt("cache", "foo", "percentageHotKeys", 10);
+    gerritConfig.setInt("cache", "foo", "avgKeySize", "foo1".getBytes().length);
+    gerritConfig.setInt("cache", "foo", "avgValueSize", valueSize(fooValue));
+    gerritConfig.save();
+
+    ChronicleMapCacheImpl<String, String> cache = newCacheWithLoader(fooValue);
+    cache.put("foo1", fooValue);
+    cache.put("foo2", fooValue);
+
+    cache.prune();
+
+    assertThat(cache.size()).isEqualTo(1);
+    assertThat(cache.get("foo2")).isNotNull();
+  }
+
+  @Test
+  public void shouldEvictRecentlyInsertedElementInCacheWhenOldestElementIsAccessed()
+      throws Exception {
+    final String fooValue = "foo";
+    gerritConfig.setInt("cache", "foo", "maxEntries", 2);
+    gerritConfig.setInt("cache", "foo", "percentageHotKeys", 10);
+    gerritConfig.setInt("cache", "foo", "avgKeySize", "foo1".getBytes().length);
+    gerritConfig.setInt("cache", "foo", "avgValueSize", valueSize(fooValue));
+    gerritConfig.save();
+
+    ChronicleMapCacheImpl<String, String> cache = newCacheWithLoader(fooValue);
+    cache.put("foo1", fooValue);
+    cache.put("foo2", fooValue);
+
+    cache.get("foo1");
+
+    cache.prune();
+
+    assertThat(cache.size()).isEqualTo(1);
+    assertThat(cache.get("foo1")).isEqualTo(fooValue);
+  }
+
+  @Test
+  public void shouldEvictEntriesUntilFreeSpaceIsRecovered() throws Exception {
+    final int uuidSize = valueSize(UUID.randomUUID().toString());
+    gerritConfig.setInt("cache", "foo", "maxEntries", 50);
+    gerritConfig.setInt("cache", "foo", "percentageHotKeys", 10);
+    gerritConfig.setInt("cache", "foo", "avgKeySize", uuidSize);
+    gerritConfig.setInt("cache", "foo", "avgValueSize", uuidSize);
+    gerritConfig.save();
+
+    ChronicleMapCacheImpl<String, String> cache = newCacheWithLoader();
+    while (!cache.runningOutOfFreeSpace()) {
+      cache.put(UUID.randomUUID().toString(), UUID.randomUUID().toString());
+    }
+    assertThat(cache.runningOutOfFreeSpace()).isTrue();
+
+    cache.prune();
+
+    assertThat(cache.runningOutOfFreeSpace()).isFalse();
+  }
+
+  @Test
+  public void shouldTriggerPercentageFreeMetric() throws Exception {
+    String cachedValue = UUID.randomUUID().toString();
+    String freeSpaceMetricName = "cache/chroniclemap/percentage_free_space_" + cachedValue;
+    gerritConfig.setInt("cache", cachedValue, "maxEntries", 2);
+    gerritConfig.setInt("cache", cachedValue, "avgKeySize", cachedValue.getBytes().length);
+    gerritConfig.setInt("cache", cachedValue, "avgValueSize", valueSize(cachedValue));
+    gerritConfig.save();
+
+    ChronicleMapCacheImpl<String, String> cache = newCacheWithMetrics(cachedValue);
+
+    assertThat(getMetric(freeSpaceMetricName).getValue()).isEqualTo(100);
+
+    cache.put(cachedValue, cachedValue);
+
+    WaitUtil.waitUntil(
+        () -> (long) getMetric(freeSpaceMetricName).getValue() < 100, Duration.ofSeconds(2));
+  }
+
+  @Test
+  public void shouldTriggerRemainingAutoResizeMetric() throws Exception {
+    String cachedValue = UUID.randomUUID().toString();
+    String autoResizeMetricName = "cache/chroniclemap/remaining_autoresizes_" + cachedValue;
+    gerritConfig.setInt("cache", cachedValue, "maxEntries", 2);
+    gerritConfig.setInt("cache", cachedValue, "avgKeySize", cachedValue.getBytes().length);
+    gerritConfig.setInt("cache", cachedValue, "avgValueSize", valueSize(cachedValue));
+    gerritConfig.save();
+
+    ChronicleMapCacheImpl<String, String> cache = newCacheWithMetrics(cachedValue);
+
+    assertThat(getMetric(autoResizeMetricName).getValue()).isEqualTo(1);
+
+    cache.put(cachedValue + "1", cachedValue);
+    cache.put(cachedValue + "2", cachedValue);
+    cache.put(cachedValue + "3", cachedValue);
+
+    WaitUtil.waitUntil(
+        () -> (int) getMetric(autoResizeMetricName).getValue() == 0, Duration.ofSeconds(2));
+  }
+
+  @Test
+  public void shouldTriggerHotKeysCapacityCacheMetric() throws Exception {
+    String cachedValue = UUID.randomUUID().toString();
+    int percentageHotKeys = 60;
+    int maxEntries = 10;
+    int expectedCapacity = 6;
+    String hotKeysCapacityMetricName = "cache/chroniclemap/hot_keys_capacity_" + cachedValue;
+    gerritConfig.setInt("cache", cachedValue, "maxEntries", maxEntries);
+    gerritConfig.setInt("cache", cachedValue, "percentageHotKeys", percentageHotKeys);
+    gerritConfig.save();
+
+    ChronicleMapCacheImpl<String, String> cache = newCacheWithMetrics(cachedValue);
+
+    assertThat(getMetric(hotKeysCapacityMetricName).getValue()).isEqualTo(expectedCapacity);
+  }
+
+  @Test
+  public void shouldTriggerHotKeysSizeCacheMetric() throws Exception {
+    String cachedValue = UUID.randomUUID().toString();
+    int percentageHotKeys = 30;
+    int maxEntries = 10;
+    int maxHotKeyCapacity = 3;
+    final Duration METRIC_TRIGGER_TIMEOUT = Duration.ofSeconds(2);
+    String hotKeysSizeMetricName = "cache/chroniclemap/hot_keys_size_" + cachedValue;
+    gerritConfig.setInt("cache", cachedValue, "maxEntries", maxEntries);
+    gerritConfig.setInt("cache", cachedValue, "percentageHotKeys", percentageHotKeys);
+    gerritConfig.save();
+
+    ChronicleMapCacheImpl<String, String> cache = newCacheWithMetrics(cachedValue);
+
+    assertThat(getMetric(hotKeysSizeMetricName).getValue()).isEqualTo(0);
+
+    for (int i = 0; i < maxHotKeyCapacity; i++) {
+      cache.put(cachedValue + i, cachedValue);
+    }
+
+    WaitUtil.waitUntil(
+        () -> (int) getMetric(hotKeysSizeMetricName).getValue() == maxHotKeyCapacity,
+        METRIC_TRIGGER_TIMEOUT);
+
+    cache.put(cachedValue + maxHotKeyCapacity + 1, cachedValue);
+
+    assertThrows(
+        InterruptedException.class,
+        () ->
+            WaitUtil.waitUntil(
+                () -> (int) getMetric(hotKeysSizeMetricName).getValue() > maxHotKeyCapacity,
+                METRIC_TRIGGER_TIMEOUT));
+  }
+
+  @Test
+  public void shouldResetHotKeysWhenInvalidateAll() throws Exception {
+    String cachedValue = UUID.randomUUID().toString();
+    int percentageHotKeys = 30;
+    int maxEntries = 10;
+    int maxHotKeyCapacity = 3;
+    final Duration METRIC_TRIGGER_TIMEOUT = Duration.ofSeconds(2);
+    String hotKeysSizeMetricName = "cache/chroniclemap/hot_keys_size_" + cachedValue;
+    gerritConfig.setInt("cache", cachedValue, "maxEntries", maxEntries);
+    gerritConfig.setInt("cache", cachedValue, "percentageHotKeys", percentageHotKeys);
+    gerritConfig.save();
+
+    ChronicleMapCacheImpl<String, String> cache = newCacheWithMetrics(cachedValue);
+
+    for (int i = 0; i < maxHotKeyCapacity; i++) {
+      cache.put(cachedValue + i, cachedValue);
+    }
+
+    WaitUtil.waitUntil(
+        () -> (int) getMetric(hotKeysSizeMetricName).getValue() == maxHotKeyCapacity,
+        METRIC_TRIGGER_TIMEOUT);
+
+    cache.invalidateAll();
+
+    WaitUtil.waitUntil(
+        () -> (int) getMetric(hotKeysSizeMetricName).getValue() == 0, METRIC_TRIGGER_TIMEOUT);
+  }
+
+  private int valueSize(String value) {
+    final TimedValueMarshaller<String> marshaller =
+        new TimedValueMarshaller<>(StringCacheSerializer.INSTANCE);
+
+    Bytes<ByteBuffer> out = Bytes.elasticByteBuffer();
+    marshaller.write(out, new TimedValue<>(value));
+    return out.toByteArray().length;
+  }
+
+  private ChronicleMapCacheImpl<String, String> newCacheWithMetrics(String cachedValue)
+      throws IOException {
+    return newCache(true, cachedValue, null, null, 1, metricMaker);
+  }
+
   private ChronicleMapCacheImpl<String, String> newCache(
       Boolean withLoader,
       @Nullable String cachedValue,
       @Nullable Duration expireAfterWrite,
-      @Nullable Duration refreshAfterWrite)
+      @Nullable Duration refreshAfterWrite,
+      Integer version)
+      throws IOException {
+    return newCache(
+        withLoader,
+        cachedValue,
+        expireAfterWrite,
+        refreshAfterWrite,
+        version,
+        new DisabledMetricMaker());
+  }
+
+  private ChronicleMapCacheImpl<String, String> newCache(
+      Boolean withLoader,
+      @Nullable String cachedValue,
+      @Nullable Duration expireAfterWrite,
+      @Nullable Duration refreshAfterWrite,
+      Integer version,
+      MetricMaker metricMaker)
       throws IOException {
     TestPersistentCacheDef cacheDef = new TestPersistentCacheDef(cachedValue);
 
+    File persistentFile =
+        ChronicleMapCacheFactory.fileName(
+            sitePaths.site_path.resolve(cacheDirectory), cacheDef.name(), version);
+
     ChronicleMapCacheConfig config =
         new ChronicleMapCacheConfig(
             gerritConfig,
-            sitePaths,
-            cacheDef.name(),
             cacheDef.configKey(),
-            cacheDef.diskLimit(),
+            persistentFile,
             expireAfterWrite != null ? expireAfterWrite : Duration.ZERO,
             refreshAfterWrite != null ? refreshAfterWrite : Duration.ZERO);
 
-    return new ChronicleMapCacheImpl<>(cacheDef, config, withLoader ? cacheDef.loader() : null);
+    return new ChronicleMapCacheImpl<>(
+        cacheDef, config, withLoader ? cacheDef.loader() : null, metricMaker);
   }
 
   private ChronicleMapCacheImpl<String, String> newCacheWithLoader(@Nullable String cachedValue)
       throws IOException {
-    return newCache(true, cachedValue, null, null);
+    return newCache(true, cachedValue, null, null, 1);
   }
 
   private ChronicleMapCacheImpl<String, String> newCacheWithLoader() throws IOException {
-    return newCache(true, null, null, null);
+    return newCache(true, null, null, null, 1);
+  }
+
+  private ChronicleMapCacheImpl<String, String> newCacheVersion(int version) throws IOException {
+    return newCache(true, null, null, null, version);
   }
 
   private ChronicleMapCacheImpl<String, String> newCacheWithoutLoader() throws IOException {
-    return newCache(false, null, null, null);
+    return newCache(false, null, null, null, 1);
   }
 
-  public static class TestPersistentCacheDef implements PersistentCacheDef<String, String> {
-
-    private final String loadedValue;
-
-    TestPersistentCacheDef(@Nullable String loadedValue) {
-
-      this.loadedValue = loadedValue;
-    }
-
-    @Override
-    public long diskLimit() {
-      return 0;
-    }
-
-    @Override
-    public int version() {
-      return 0;
-    }
-
-    @Override
-    public CacheSerializer<String> keySerializer() {
-      return StringCacheSerializer.INSTANCE;
-    }
-
-    @Override
-    public CacheSerializer<String> valueSerializer() {
-      return StringCacheSerializer.INSTANCE;
-    }
-
-    @Override
-    public String name() {
-      return "chronicle-map-test-cache";
-    }
-
-    @Override
-    public String configKey() {
-      return name();
-    }
-
-    @Override
-    public TypeLiteral<String> keyType() {
-      return new TypeLiteral<String>() {};
-    }
-
-    @Override
-    public TypeLiteral<String> valueType() {
-      return new TypeLiteral<String>() {};
-    }
-
-    @Override
-    public long maximumWeight() {
-      return 0;
-    }
-
-    @Override
-    public Duration expireAfterWrite() {
-      return Duration.ZERO;
-    }
-
-    @Override
-    public Duration expireFromMemoryAfterAccess() {
-      return Duration.ZERO;
-    }
-
-    @Override
-    public Duration refreshAfterWrite() {
-      return Duration.ZERO;
-    }
-
-    @Override
-    public Weigher<String, String> weigher() {
-      return (s, s2) -> 0;
-    }
-
-    @Override
-    public CacheLoader<String, String> loader() {
-      return new CacheLoader<String, String>() {
-        @Override
-        public String load(String s) {
-          return loadedValue != null ? loadedValue : UUID.randomUUID().toString();
-        }
-      };
-    }
+  private <V> Gauge<V> getMetric(String name) {
+    @SuppressWarnings("unchecked")
+    Gauge<V> gauge = (Gauge<V>) metricRegistry.getMetrics().get(name);
+    assertWithMessage(name).that(gauge).isNotNull();
+    return gauge;
   }
 }
diff --git a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/InMemoryLRUTest.java b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/InMemoryLRUTest.java
new file mode 100644
index 0000000..af27216
--- /dev/null
+++ b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/InMemoryLRUTest.java
@@ -0,0 +1,45 @@
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package com.googlesource.gerrit.modules.cache.chroniclemap;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import org.junit.Test;
+
+public class InMemoryLRUTest {
+
+  @Test
+  public void add_shouldUpdateElementPositionWhenAlreadyInSet() {
+    final InMemoryLRU<Object> map = new InMemoryLRU<>(2);
+
+    map.add("A");
+    map.add("B");
+
+    assertThat(map.toArray()).asList().containsExactly("A", "B");
+
+    map.add("A");
+    assertThat(map.toArray()).asList().containsExactly("B", "A");
+  }
+
+  @Test
+  public void add_shouldEvictLRUElement() {
+    final InMemoryLRU<Object> map = new InMemoryLRU<>(2);
+
+    map.add("A");
+    map.add("B");
+    map.add("C");
+
+    assertThat(map.toArray()).asList().containsExactly("B", "C");
+  }
+}
diff --git a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/TestPersistentCacheDef.java b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/TestPersistentCacheDef.java
new file mode 100644
index 0000000..b81baec
--- /dev/null
+++ b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/TestPersistentCacheDef.java
@@ -0,0 +1,136 @@
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package com.googlesource.gerrit.modules.cache.chroniclemap;
+
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.Weigher;
+import com.google.gerrit.common.Nullable;
+import com.google.gerrit.server.cache.PersistentCacheDef;
+import com.google.gerrit.server.cache.serialize.CacheSerializer;
+import com.google.gerrit.server.cache.serialize.StringCacheSerializer;
+import com.google.inject.TypeLiteral;
+import java.time.Duration;
+import java.util.UUID;
+
+public class TestPersistentCacheDef implements PersistentCacheDef<String, String> {
+
+  private static final Integer DEFAULT_DISK_LIMIT = 1024;
+
+  private final String loadedValue;
+  private final Duration expireAfterWrite;
+  private final Duration refreshAfterWrite;
+  private final Integer diskLimit;
+
+  public TestPersistentCacheDef(
+      String loadedValue,
+      @Nullable Duration expireAfterWrite,
+      @Nullable Duration refreshAfterWrite) {
+
+    this.loadedValue = loadedValue;
+    this.expireAfterWrite = expireAfterWrite;
+    this.refreshAfterWrite = refreshAfterWrite;
+    this.diskLimit = DEFAULT_DISK_LIMIT;
+  }
+
+  public TestPersistentCacheDef(String loadedValue, Integer diskLimit) {
+
+    this.loadedValue = loadedValue;
+    this.expireAfterWrite = null;
+    this.refreshAfterWrite = null;
+    this.diskLimit = diskLimit;
+  }
+
+  public TestPersistentCacheDef(String loadedValue) {
+
+    this.loadedValue = loadedValue;
+    this.expireAfterWrite = Duration.ZERO;
+    this.refreshAfterWrite = Duration.ZERO;
+    this.diskLimit = DEFAULT_DISK_LIMIT;
+  }
+
+  @Override
+  public long diskLimit() {
+    return diskLimit;
+  }
+
+  @Override
+  public int version() {
+    return 0;
+  }
+
+  @Override
+  public CacheSerializer<String> keySerializer() {
+    return StringCacheSerializer.INSTANCE;
+  }
+
+  @Override
+  public CacheSerializer<String> valueSerializer() {
+    return StringCacheSerializer.INSTANCE;
+  }
+
+  @Override
+  public String name() {
+    return loadedValue;
+  }
+
+  @Override
+  public String configKey() {
+    return name();
+  }
+
+  @Override
+  public TypeLiteral<String> keyType() {
+    return new TypeLiteral<String>() {};
+  }
+
+  @Override
+  public TypeLiteral<String> valueType() {
+    return new TypeLiteral<String>() {};
+  }
+
+  @Override
+  public long maximumWeight() {
+    return 0;
+  }
+
+  @Override
+  public Duration expireAfterWrite() {
+    return expireAfterWrite;
+  }
+
+  @Override
+  public Duration expireFromMemoryAfterAccess() {
+    return Duration.ZERO;
+  }
+
+  @Override
+  public Duration refreshAfterWrite() {
+    return refreshAfterWrite;
+  }
+
+  @Override
+  public Weigher<String, String> weigher() {
+    return (s, s2) -> 0;
+  }
+
+  @Override
+  public CacheLoader<String, String> loader() {
+    return new CacheLoader<String, String>() {
+      @Override
+      public String load(String s) {
+        return loadedValue != null ? loadedValue : UUID.randomUUID().toString();
+      }
+    };
+  }
+}
diff --git a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/command/AnalyzeH2CachesIT.java b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/command/AnalyzeH2CachesIT.java
new file mode 100644
index 0000000..d6ae02d
--- /dev/null
+++ b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/command/AnalyzeH2CachesIT.java
@@ -0,0 +1,106 @@
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.googlesource.gerrit.modules.cache.chroniclemap.command;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableList;
+import com.google.gerrit.acceptance.LightweightPluginDaemonTest;
+import com.google.gerrit.acceptance.TestPlugin;
+import com.google.gerrit.acceptance.UseLocalDisk;
+import com.google.gerrit.acceptance.UseSsh;
+import com.google.gerrit.server.config.SitePaths;
+import com.google.inject.Inject;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.List;
+import org.junit.Test;
+
+@UseSsh
+@TestPlugin(
+    name = "cache-chroniclemap",
+    sshModule = "com.googlesource.gerrit.modules.cache.chroniclemap.command.SSHCommandModule")
+public class AnalyzeH2CachesIT extends LightweightPluginDaemonTest {
+
+  @Inject private SitePaths sitePaths;
+
+  private String cmd = Joiner.on(" ").join("cache-chroniclemap", "analyze-h2-caches");
+
+  @Test
+  @UseLocalDisk
+  public void shouldAnalyzeH2Cache() throws Exception {
+    createChange();
+
+    String result = adminSshSession.exec(cmd);
+
+    adminSshSession.assertSuccess();
+    assertThat(result).contains("[cache \"mergeability\"]\n" + "\tmaxEntries = 1\n");
+    assertThat(result).contains("[cache \"diff\"]\n" + "\tmaxEntries = 1\n");
+    assertThat(result).contains("[cache \"accounts\"]\n" + "\tmaxEntries = 4\n");
+    assertThat(result).contains("[cache \"diff_summary\"]\n" + "\tmaxEntries = 1\n");
+    assertThat(result).contains("[cache \"persisted_projects\"]\n" + "\tmaxEntries = 3\n");
+  }
+
+  @Test
+  @UseLocalDisk
+  public void shouldProduceWarningWhenCacheFileIsEmpty() throws Exception {
+    List<String> expected =
+        ImmutableList.of(
+            "WARN: Cache diff_intraline is empty, skipping.",
+            "WARN: Cache change_kind is empty, skipping.",
+            "WARN: Cache diff_summary is empty, skipping.",
+            "WARN: Cache diff is empty, skipping.",
+            "WARN: Cache mergeability is empty, skipping.",
+            "WARN: Cache pure_revert is empty, skipping.",
+            "WARN: Cache git_tags is empty, skipping.");
+    String result = adminSshSession.exec(cmd);
+
+    adminSshSession.assertSuccess();
+    assertThat(ImmutableList.copyOf(result.split("\n"))).containsAtLeastElementsIn(expected);
+  }
+
+  @Test
+  @UseLocalDisk
+  public void shouldIgnoreNonH2Files() throws Exception {
+
+    Path cacheDirectory = sitePaths.resolve(cfg.getString("cache", null, "directory"));
+    Files.write(cacheDirectory.resolve("some.dat"), "some_content".getBytes());
+
+    List<String> expected =
+        ImmutableList.of(
+            "WARN: Cache diff_intraline is empty, skipping.",
+            "WARN: Cache change_kind is empty, skipping.",
+            "WARN: Cache diff_summary is empty, skipping.",
+            "WARN: Cache diff is empty, skipping.",
+            "WARN: Cache mergeability is empty, skipping.",
+            "WARN: Cache pure_revert is empty, skipping.",
+            "WARN: Cache git_tags is empty, skipping.");
+    String result = adminSshSession.exec(cmd);
+
+    adminSshSession.assertSuccess();
+    assertThat(ImmutableList.copyOf(result.split("\n"))).containsAtLeastElementsIn(expected);
+  }
+
+  @Test
+  @UseLocalDisk
+  public void shouldFailWhenCacheDirectoryDoesNotExists() throws Exception {
+    cfg.setString("cache", null, "directory", "/tmp/non_existing_directory");
+
+    adminSshSession.exec(cmd);
+    adminSshSession.assertFailure(
+        "fatal: disk cache is configured but doesn't exist: /tmp/non_existing_directory");
+  }
+}
diff --git a/tuning.md b/tuning.md
new file mode 100644
index 0000000..86d8eb2
--- /dev/null
+++ b/tuning.md
@@ -0,0 +1,104 @@
+# Tuning
+
+Tuning chronicle-map correctly might be a daunting task:
+How many entries does a particular cache instance need?
+what is the average key and value for it?
+
+Rather than leaving you only with the trial and error (or the guesswork)
+approach, this module provides a utility to help you get started in the right
+direction.
+
+Since chronicle-map is one of the first open-source alternatives to the H2
+implementation, it is very likely that your Gerrit instance has been running
+with the default H2 cache backend.
+
+The idea is to read from the _actual_ H2 persisted files and output the
+information that will be required to configure chronicle-map as an alternative.
+
+You can do this _before_ installing cache-chroniclemap as a lib module so that
+your Gerrit server will not need downtime. As follows:
+
+* Drop `cache-chroniclemap.jar` file in the `plugins/` directory.
+* Wait for the pluginLoader to acknowledge and load the new plugin. You will
+see an entry in the `error_log`:
+
+```
+INFO  com.google.gerrit.server.plugins.PluginLoader : Loaded plugin cache-chroniclemap
+```
+
+* You can now run an analysis on the current status of your H2 caches
+
+```bash
+ssh -p 29418 admin@<gerrit-server> cache-chroniclemap analyze-h2-caches
+```
+
+The result will be outputted on standard output in a git config format.
+This is an example (the values are made up):
+
+```
+****************************
+** Chronicle-map template **
+****************************
+
+[cache "diff_summary"]
+	maxEntries = 101
+	avgKeySize = 192
+	avgValueSize = 1350
+[cache "web_sessions"]
+	maxEntries = 1
+	avgKeySize = 68
+	avgValueSize = 332
+[cache "pure_revert"]
+	maxEntries = 1
+	avgKeySize = 112
+	avgValueSize = 8
+[cache "mergeability"]
+	maxEntries = 101
+	avgKeySize = 150
+	avgValueSize = 8
+[cache "diff"]
+	maxEntries = 101
+	avgKeySize = 188
+	avgValueSize = 5035
+[cache "persisted_projects"]
+	maxEntries = 2
+	avgKeySize = 88
+	avgValueSize = 4489
+[cache "accounts"]
+	maxEntries = 5
+	avgKeySize = 52
+	avgValueSize = 505
+```
+
+Empty caches (if any) will not generate empty config stanzas, rather a warning
+will be displayed on standard output.
+
+For example:
+```
+WARN: Cache diff_intraline is empty, skipping
+```
+
+Please note that the generated configuration is not necessarily final and it
+might still need adjustments:
+* Since chronicle-map file size is pre-allocated, you might want to allow for
+more entries.
+* You might want account for uncertainty by specifying a `maxBloatFactor` greater
+than 1.
+* any other reason.
+
+Once you gathered the information you wanted you might consider to remove the
+plugin:
+
+* Remove the jar from the `plugins` directory
+
+```bash
+rm plugins/cache-chroniclemap.jar
+```
+
+* Wait for the pluginLoader to acknowledge and unload the plugin. You will
+see an entry in the `error_log`:
+
+```
+INFO  com.google.gerrit.server.plugins.PluginLoader : Unloading plugin cache-chroniclemap
+```
+