Merge branch 'stable-3.3'

* stable-3.3:
  Use class level @UseLocalDisk annotation
  Chronicle-Map cache tuning
  Clarify that the migrate command populates the cache with data

Change-Id: I300e7ddf84eebb25f3f1be99825c1893a2b38516
diff --git a/BUILD b/BUILD
index 619d3c7..7029515 100644
--- a/BUILD
+++ b/BUILD
@@ -29,6 +29,7 @@
         "@dev-jna//jar",
         "@javapoet//jar",
         "@jna-platform//jar",
+        "@commons-lang3//jar",
     ],
 )
 
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCaches.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCaches.java
new file mode 100644
index 0000000..d668884
--- /dev/null
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCaches.java
@@ -0,0 +1,221 @@
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package com.googlesource.gerrit.modules.cache.chroniclemap;
+
+import static com.googlesource.gerrit.modules.cache.chroniclemap.ChronicleMapCacheFactory.getCacheDir;
+
+import com.google.common.cache.Cache;
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.extensions.registration.DynamicMap;
+import com.google.gerrit.metrics.DisabledMetricMaker;
+import com.google.gerrit.server.config.GerritServerConfig;
+import com.google.gerrit.server.config.SitePaths;
+import com.google.gerrit.sshd.SshCommand;
+import com.google.inject.Inject;
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.stream.Collectors;
+import org.apache.commons.io.FilenameUtils;
+import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.eclipse.jgit.lib.Config;
+import org.eclipse.jgit.lib.TextProgressMonitor;
+import org.kohsuke.args4j.Option;
+
+public class AutoAdjustCaches extends SshCommand {
+  private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+  protected static final String CONFIG_HEADER = "__CONFIG__";
+  protected static final String TUNED_INFIX = "tuned";
+
+  private final DynamicMap<Cache<?, ?>> cacheMap;
+  private final ChronicleMapCacheConfig.Factory configFactory;
+  private final Path cacheDir;
+
+  @Option(
+      name = "--dry-run",
+      aliases = {"-d"},
+      usage = "Calculate the average key and value size, but do not migrate the data.")
+  private boolean dryRun;
+
+  @Inject
+  AutoAdjustCaches(
+      @GerritServerConfig Config cfg,
+      SitePaths site,
+      DynamicMap<Cache<?, ?>> cacheMap,
+      ChronicleMapCacheConfig.Factory configFactory) {
+    this.cacheMap = cacheMap;
+    this.configFactory = configFactory;
+    this.cacheDir = getCacheDir(site, cfg.getString("cache", null, "directory"));
+  }
+
+  @Override
+  protected void run() throws Exception {
+    Config outputChronicleMapConfig = new Config();
+
+    Map<String, ChronicleMapCacheImpl<Object, Object>> chronicleMapCaches = getChronicleMapCaches();
+
+    chronicleMapCaches.forEach(
+        (cacheName, currCache) -> {
+          ImmutablePair<Long, Long> avgSizes = averageSizes(cacheName, currCache.getStore());
+          if (!(avgSizes.getKey() > 0) || !(avgSizes.getValue() > 0)) {
+            logger.atWarning().log(
+                "Cache [%s] has %s entries, but average of (key: %d, value: %d). Skipping.",
+                cacheName, currCache.size(), avgSizes.getKey(), avgSizes.getValue());
+            return;
+          }
+
+          long averageKeySize = avgSizes.getKey();
+          long averageValueSize = avgSizes.getValue();
+          ChronicleMapCacheConfig newChronicleMapCacheConfig =
+              makeChronicleMapConfig(currCache.getConfig(), averageKeySize, averageValueSize);
+
+          updateOutputConfig(
+              outputChronicleMapConfig,
+              cacheName,
+              averageKeySize,
+              averageValueSize,
+              currCache.getConfig().getMaxEntries(),
+              currCache.getConfig().getMaxBloatFactor());
+
+          if (!dryRun) {
+            try {
+              ChronicleMapCacheImpl<Object, Object> newCache =
+                  new ChronicleMapCacheImpl<>(
+                      currCache.getCacheDefinition(),
+                      newChronicleMapCacheConfig,
+                      null,
+                      new DisabledMetricMaker());
+
+              TextProgressMonitor cacheMigrationProgress = new TextProgressMonitor(stdout);
+              cacheMigrationProgress.beginTask(
+                  String.format("[%s] migrate content", cacheName), (int) currCache.size());
+
+              currCache
+                  .getStore()
+                  .forEach(
+                      (k, v) -> {
+                        try {
+                          newCache.putUnchecked(k, v);
+                          cacheMigrationProgress.update(1);
+                        } catch (Exception e) {
+                          logger.atWarning().withCause(e).log(
+                              "[%s] Could not migrate entry %s -> %s",
+                              cacheName, k.getValue(), v.getValue());
+                        }
+                      });
+
+            } catch (IOException e) {
+              stderr.println(String.format("Could not create new cache %s", cacheName));
+            }
+          }
+        });
+
+    stdout.println();
+    stdout.println("****************************");
+    stdout.println("** Chronicle-map template **");
+    stdout.println("****************************");
+    stdout.println();
+    stdout.println(CONFIG_HEADER);
+    stdout.println(outputChronicleMapConfig.toText());
+  }
+
+  private ImmutablePair<Long, Long> averageSizes(
+      String cacheName, ConcurrentMap<KeyWrapper<Object>, TimedValue<Object>> store) {
+    long kAvg = 0;
+    long vAvg = 0;
+
+    if (store.isEmpty()) return ImmutablePair.of(kAvg, vAvg);
+
+    TextProgressMonitor progress = new TextProgressMonitor(stdout);
+
+    progress.beginTask(
+        String.format("[%s] calculate average key/value size", cacheName), store.size());
+
+    int i = 1;
+    for (Map.Entry<KeyWrapper<Object>, TimedValue<Object>> entry : store.entrySet()) {
+      kAvg = kAvg + (serializedKeyLength(cacheName, entry.getKey()) - kAvg) / i;
+      vAvg = vAvg + (serializedValueLength(cacheName, entry.getValue()) - vAvg) / i;
+      progress.update(1);
+    }
+    progress.endTask();
+    return ImmutablePair.of(kAvg, vAvg);
+  }
+
+  private static int serializedKeyLength(String cacheName, KeyWrapper<Object> keyWrapper) {
+    return CacheSerializers.getKeySerializer(cacheName).serialize(keyWrapper.getValue()).length;
+  }
+
+  private static int serializedValueLength(String cacheName, TimedValue<Object> timedValue) {
+    return CacheSerializers.getValueSerializer(cacheName).serialize(timedValue.getValue()).length;
+  }
+
+  private ChronicleMapCacheConfig makeChronicleMapConfig(
+      ChronicleMapCacheConfig currentChronicleMapConfig,
+      long averageKeySize,
+      long averageValueSize) {
+
+    return configFactory.createWithValues(
+        currentChronicleMapConfig.getConfigKey(),
+        resolveNewFile(currentChronicleMapConfig.getPersistedFile().getName()),
+        currentChronicleMapConfig.getExpireAfterWrite(),
+        currentChronicleMapConfig.getRefreshAfterWrite(),
+        currentChronicleMapConfig.getMaxEntries(),
+        averageKeySize,
+        averageValueSize,
+        currentChronicleMapConfig.getMaxBloatFactor());
+  }
+
+  private File resolveNewFile(String currentFileName) {
+    String newFileName =
+        String.format(
+            "%s_%s_%s.%s",
+            FilenameUtils.getBaseName(currentFileName),
+            TUNED_INFIX,
+            System.currentTimeMillis(),
+            FilenameUtils.getExtension(currentFileName));
+
+    return cacheDir.resolve(newFileName).toFile();
+  }
+
+  private static void updateOutputConfig(
+      Config config,
+      String cacheName,
+      long averageKeySize,
+      long averageValueSize,
+      long maxEntries,
+      int maxBloatFactor) {
+
+    config.setLong("cache", cacheName, "avgKeySize", averageKeySize);
+    config.setLong("cache", cacheName, "avgValueSize", averageValueSize);
+    config.setLong("cache", cacheName, "maxEntries", maxEntries);
+    config.setLong("cache", cacheName, "maxBloatFactor", maxBloatFactor);
+  }
+
+  @SuppressWarnings("unchecked")
+  private Map<String, ChronicleMapCacheImpl<Object, Object>> getChronicleMapCaches() {
+    return cacheMap.plugins().stream()
+        .map(cacheMap::byPlugin)
+        .flatMap(
+            pluginCaches ->
+                pluginCaches.entrySet().stream()
+                    .map(entry -> ImmutablePair.of(entry.getKey(), entry.getValue().get())))
+        .filter(
+            pair -> pair.getValue() instanceof ChronicleMapCacheImpl && pair.getValue().size() > 0)
+        .collect(
+            Collectors.toMap(
+                ImmutablePair::getKey, p -> (ChronicleMapCacheImpl<Object, Object>) p.getValue()));
+  }
+}
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheConfig.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheConfig.java
index 6cdc8a7..1235e53 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheConfig.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheConfig.java
@@ -36,6 +36,7 @@
   private final int maxBloatFactor;
   private final int percentageFreeSpaceEvictionThreshold;
   private final int percentageHotKeys;
+  private final String configKey;
 
   public interface Factory {
     ChronicleMapCacheConfig create(
@@ -87,6 +88,7 @@
       @Assisted("avgValueSize") long avgValueSize,
       @Assisted("maxBloatFactor") int maxBloatFactor) {
     this.persistedFile = persistedFile;
+    this.configKey = configKey;
 
     this.maxEntries = maxEntries;
     this.averageKeySize = avgKeySize;
@@ -158,6 +160,10 @@
     return maxBloatFactor;
   }
 
+  public String getConfigKey() {
+    return configKey;
+  }
+
   private static long toSeconds(@Nullable Duration duration) {
     return duration != null ? duration.getSeconds() : 0;
   }
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheFactory.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheFactory.java
index b4cec3b..088798b 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheFactory.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheFactory.java
@@ -142,4 +142,8 @@
   public static File fileName(Path cacheDir, String name, Integer version) {
     return cacheDir.resolve(String.format("%s_%s.dat", name, version)).toFile();
   }
+
+  protected static Path getCacheDir(SitePaths site, String name) {
+    return site.resolve(name);
+  }
 }
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheImpl.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheImpl.java
index bde4fd1..97ecb1c 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheImpl.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheImpl.java
@@ -27,6 +27,7 @@
 import java.time.Duration;
 import java.time.Instant;
 import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.atomic.LongAdder;
 import net.openhft.chronicle.map.ChronicleMap;
@@ -47,6 +48,7 @@
   private final LongAdder totalLoadTime = new LongAdder();
   private final LongAdder evictionCount = new LongAdder();
   private final InMemoryLRU<K> hotEntries;
+  private final PersistentCacheDef<K, V> cacheDefinition;
 
   @SuppressWarnings("unchecked")
   ChronicleMapCacheImpl(
@@ -57,6 +59,7 @@
       throws IOException {
     CacheSerializers.registerCacheDef(def);
 
+    this.cacheDefinition = def;
     this.config = config;
     this.loader = loader;
     this.hotEntries =
@@ -110,6 +113,10 @@
     metrics.registerCallBackMetrics(def.name(), store, hotEntries);
   }
 
+  protected PersistentCacheDef<K, V> getCacheDefinition() {
+    return cacheDefinition;
+  }
+
   private static class ChronicleMapStorageMetrics {
 
     private final MetricMaker metricMaker;
@@ -239,6 +246,16 @@
     return v;
   }
 
+  /**
+   * Associates the specified value with the specified key. This method should be used when the
+   * creation time of the value needs to be preserved, rather than computed at insertion time
+   * ({@link #put(K,V)}. This is typically the case when migrating from an existing cache where the
+   * creation timestamp needs to be preserved. See ({@link H2MigrationServlet} for an example.
+   *
+   * @param key
+   * @param value
+   * @param created
+   */
   @SuppressWarnings("unchecked")
   public void putUnchecked(Object key, Object value, Timestamp created) {
     TimedValue<?> wrappedValue = new TimedValue<>(value, created.toInstant().toEpochMilli());
@@ -246,6 +263,21 @@
     store.put((KeyWrapper<K>) wrappedKey, (TimedValue<V>) wrappedValue);
   }
 
+  /**
+   * Associates the specified value with the specified key. This method should be used when the
+   * {@link TimedValue} and the {@link KeyWrapper} have already been constructed elsewhere rather
+   * than delegate their construction to this cache ({@link #put(K, V)}. This is typically the case
+   * when the key/value are extracted from another chronicle-map cache see ({@link AutoAdjustCaches}
+   * for an example.
+   *
+   * @param wrappedKey The wrapper for the key object
+   * @param wrappedValue the wrapper for the value object
+   */
+  @SuppressWarnings("unchecked")
+  public void putUnchecked(KeyWrapper<Object> wrappedKey, TimedValue<Object> wrappedValue) {
+    store.put((KeyWrapper<K>) wrappedKey, (TimedValue<V>) wrappedValue);
+  }
+
   @Override
   public void put(K key, V val) {
     KeyWrapper<K> wrappedKey = new KeyWrapper<>(key);
@@ -311,6 +343,10 @@
     hotEntries.invalidateAll();
   }
 
+  ConcurrentMap<KeyWrapper<K>, TimedValue<V>> getStore() {
+    return store;
+  }
+
   @Override
   public long size() {
     return store.size();
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/HttpModule.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/HttpModule.java
index 28956ff..afc50ca 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/HttpModule.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/HttpModule.java
@@ -15,19 +15,35 @@
 package com.googlesource.gerrit.modules.cache.chroniclemap;
 
 import com.google.gerrit.extensions.config.FactoryModule;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.Key;
 import com.google.inject.servlet.ServletModule;
 
 public class HttpModule extends ServletModule {
+  private final Injector injector;
+
+  @Inject
+  HttpModule(Injector injector) {
+    this.injector = injector;
+  }
 
   @Override
   protected void configureServlets() {
-    install(
-        new FactoryModule() {
-          @Override
-          protected void configure() {
-            factory(ChronicleMapCacheConfig.Factory.class);
-          }
-        });
+    /*
+     This module can be installed as a plugin, as a lib or both, depending on the wanted usage
+     (refer to the docs for more details on why this is needed). For this reason, some binding
+     might or might have not already been configured.
+    */
+    if (injector.getExistingBinding(Key.get(ChronicleMapCacheConfig.Factory.class)) == null) {
+      install(
+          new FactoryModule() {
+            @Override
+            protected void configure() {
+              factory(ChronicleMapCacheConfig.Factory.class);
+            }
+          });
+    }
 
     serve("/migrate").with(H2MigrationServlet.class);
   }
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/SSHCommandModule.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/SSHCommandModule.java
index e7e0074..34e4954 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/SSHCommandModule.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/SSHCommandModule.java
@@ -14,11 +14,29 @@
 package com.googlesource.gerrit.modules.cache.chroniclemap;
 
 import com.google.gerrit.sshd.PluginCommandModule;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.Key;
 
 public class SSHCommandModule extends PluginCommandModule {
+  private final Injector injector;
+
+  @Inject
+  SSHCommandModule(Injector injector) {
+    this.injector = injector;
+  }
+
   @Override
   protected void configureCommands() {
-    factory(ChronicleMapCacheConfig.Factory.class);
+    /*
+     This module can be installed as a plugin, as a lib or both, depending on the wanted usage
+     (refer to the docs for more details on why this is needed). For this reason, some binding
+     might or might have not already been configured.
+    */
+    if (injector.getExistingBinding(Key.get(ChronicleMapCacheConfig.Factory.class)) == null) {
+      factory(ChronicleMapCacheConfig.Factory.class);
+    }
     command("analyze-h2-caches").to(AnalyzeH2Caches.class);
+    command("auto-adjust-caches").to(AutoAdjustCaches.class);
   }
 }
diff --git a/src/main/resources/Documentation/migration.md b/src/main/resources/Documentation/migration.md
index 22ebb2c..0370232 100644
--- a/src/main/resources/Documentation/migration.md
+++ b/src/main/resources/Documentation/migration.md
@@ -12,6 +12,12 @@
 database, it will participate to the overall database load, so you should test
 accordingly.
 
+The migration would do the following:
+1. scan all existing cache key-value pairs
+2. calculate the parameters for the new cache
+3. create the new cache
+4. read all existing key-value pairs and insert them into the new cache-chroniclemap files
+
 The following caches will be migrated (if they exist and contain any data):
 
 * accounts
diff --git a/src/main/resources/Documentation/tuning.md b/src/main/resources/Documentation/tuning.md
index 86d8eb2..1571723 100644
--- a/src/main/resources/Documentation/tuning.md
+++ b/src/main/resources/Documentation/tuning.md
@@ -5,9 +5,17 @@
 what is the average key and value for it?
 
 Rather than leaving you only with the trial and error (or the guesswork)
-approach, this module provides a utility to help you get started in the right
+approach, this module provides utilities to help you get started in the right
 direction.
 
+If you have not migrated to chronicle-map yet, then follow instructions on how
+to analyze your existing H2 caches [here](#analyze-h2-caches).
+
+In case you have already migrated to chronicle-map please follow instructions on
+how to further tune existing .dat caches [here](#tune-chronicle-map-caches).
+
+## Analyze H2 caches
+
 Since chronicle-map is one of the first open-source alternatives to the H2
 implementation, it is very likely that your Gerrit instance has been running
 with the default H2 cache backend.
@@ -19,7 +27,7 @@
 your Gerrit server will not need downtime. As follows:
 
 * Drop `cache-chroniclemap.jar` file in the `plugins/` directory.
-* Wait for the pluginLoader to acknowledge and load the new plugin. You will
+* Wait for the pluginLoader to acknowledge and load the new plugin. You will 
 see an entry in the `error_log`:
 
 ```
@@ -102,3 +110,176 @@
 INFO  com.google.gerrit.server.plugins.PluginLoader : Unloading plugin cache-chroniclemap
 ```
 
+## Auto-adjust Chronicle-map caches
+
+If you have already migrated to chronicle-map then already have `.dat` caches
+available under the `cache` directory, and you have provided suitable
+configuration for the existing caches as explained in the [configuration](./config.md)
+documentation.
+
+However, situations might arise for which new caches will be created for which
+no configuration has yet been provided: new persistent caches might be
+introduced on new versions of Gerrit, or you might end-up using a plugin that
+makes use of an additional cache, for example.
+
+When this happens, you might have little or no idea of what values should be
+provided for those caches, such as average key size and average value size, and
+you have to rely on default values.
+
+This plugin provides an SSH command that will help you analyze the current,
+suboptimal, chronicle-map caches and migrate into new ones for which a more
+realistic configuration is generated based on data.
+
+* Symlink the `cache-chroniclemap.jar` file in the `plugins/` directory (from
+  the `lib/` directory).
+* Wait for the pluginLoader to acknowledge and load the new plugin. You will see
+  an entry in the `error_log`:
+
+```
+INFO  com.google.gerrit.server.plugins.PluginLoader : Loaded plugin cache-chroniclemap
+```
+
+* You can now run an the tuning command:
+
+```bash
+ssh -p 29418 admin@<gerrit-server> cache-chroniclemap tune-chroniclemap-caches [--dry-run]
+```
+
+* --dry-run (Optional)
+
+Calculate the average key and value size, but do not migrate current cache
+data into new files
+
+For each chronicle-map cache (i.e. `foo_1.dat` file) in the `cache` directory, a
+new one will be created (i.e. `foo_1_tuned_<timestamp>.dat`).
+The new cache will have these characteristics:
+- Will have the same entries as the original cache.
+- Will be configured with the *actual* average key size and values calculated by
+  looking at the content of the original cache.
+
+An output will also be generated with the new configuration that should be put
+into `gerrit.config`, should you decide to use the new caches.
+
+An example of the output is the following:
+
+```bash
+ssh -p 29418 admin@localhost cache-chroniclemap auto-adjust-caches
+[mergeability] calculate average key/value size: 100% (849601/849601)
+[diff_summary] calculate average key/value size: 100% (410894/410894)
+[diff_intraline] calculate average key/value size: 100% (101868/101868)
+[web_sessions] calculate average key/value size: 100% (1/1)
+[conflicts] calculate average key/value size: 100% (364722/364722)
+[diff] calculate average key/value size: 100% (72613/72613)
+[accounts] calculate average key/value size: 100% (22614/22614)
+[change_kind] calculate average key/value size: 100% (838009/838009)
+[persisted_projects] calculate average key/value size: 100% (47385/47385)
+[persisted_projects] migrate content: 100% (47385/47385)
+****************************
+** Chronicle-map template **
+****************************
+
+__CONFIG__
+[cache "mergeability"]
+        avgKeySize = 76
+        avgValueSize = 5
+        maxEntries = 3398404
+        maxBloatFactor = 4
+[cache "diff_summary"]
+        avgKeySize = 96
+        avgValueSize = 241
+        maxEntries = 1643576
+        maxBloatFactor = 4
+[cache "diff_intraline"]
+        avgKeySize = 503
+        avgValueSize = 370
+        maxEntries = 407472
+        maxBloatFactor = 4
+[cache "web_sessions"]
+        avgKeySize = 41
+        avgValueSize = 166
+        maxEntries = 94852
+        maxBloatFactor = 4
+[cache "conflicts"]
+        avgKeySize = 61
+        avgValueSize = 5
+        maxEntries = 1458888
+        maxBloatFactor = 4
+[cache "diff"]
+        avgKeySize = 94
+        avgValueSize = 571
+        maxEntries = 290452
+        maxBloatFactor = 4
+[cache "accounts"]
+        avgKeySize = 26
+        avgValueSize = 90
+        maxEntries = 90456
+        maxBloatFactor = 4
+[cache "change_kind"]
+        avgKeySize = 55
+        avgValueSize = 6
+        maxEntries = 3352036
+        maxBloatFactor = 4
+[cache "persisted_projects"]
+        avgKeySize = 49
+        avgValueSize = 1770
+        maxEntries = 189536
+        maxBloatFactor = 4
+```
+
+The operation might take from seconds to minutes, depending on the size of the
+caches and it could be performed periodically to assess how the cache data
+evolves in respect to their current configuration.
+
+Running the command against gerrithub data for an overall number of entries
+of circa 3M, took ~2 mins (on a 2.6 GHz 6-Core Intel Core i7 with 16Gb or RAM).
+
+Depending on the results you might find that the newly generated caches have
+average key/value configurations that are substantially different from the
+current ones. This might be just a by-product of how the Gerrit instance is
+used, and of the different data that it generates (think about how the average
+size of your diffs might change over time, for example).
+
+You should consider replacing only those caches that have drifted away
+considerably from the actual profile of the data they store (i.e. the values
+currently in `gerrit.config` are substantially different from the output of
+the `auto-adjust-caches` command).
+
+Using the new caches requires things:
+* Update the `gerrit.config` with the output produced by the command
+* replace the existing caches with the new caches.
+* restart gerrit
+
+*Note*:
+The `auto-adjust-caches` can be run online without any disruption of the Gerrit
+server. However, note that since the migration perform many, sequential reads
+from the cache, it will participate in the overall load of the system, so
+you should test accordingly.
+
+In an HA environment the tuning of the cache can be done on a single node and
+then the caches can be copied over to other nodes.
+For example, in a two nodes installation (gerrit-1 and gerrit-2):
+
+- Run the `tune-chroniclemap-caches` on gerrit-2
+- copy the `tuned` cache files to gerrit-1
+
+For each cache `foo` you want to install/replace do:
+1. Stop `gerrit-2`
+2. replace the existing caches with the `tuned` ones.
+
+```bash
+  mv foo_1_tuned_<timestamp>.dat foo_1.dat
+```
+
+3. replace/add the `[cache "foo"]` stanza in the `gerrit.config`
+
+```
+  [cache "persisted_projects"]
+  avgKeySize = 49
+  avgValueSize = 1770
+  maxEntries = 189536
+  maxBloatFactor = 4
+```
+4. restart gerrit-2
+
+Once you have tested gerrit-2 and you are happy with the results you can perform
+steps *1.* to *4.* for `gerrit-1`.
\ No newline at end of file
diff --git a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/AnalyzeH2CachesIT.java b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/AnalyzeH2CachesIT.java
index 2a3a6ac..5fce93a 100644
--- a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/AnalyzeH2CachesIT.java
+++ b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/AnalyzeH2CachesIT.java
@@ -19,6 +19,7 @@
 import com.google.common.base.Joiner;
 import com.google.common.collect.ImmutableList;
 import com.google.gerrit.acceptance.LightweightPluginDaemonTest;
+import com.google.gerrit.acceptance.Sandboxed;
 import com.google.gerrit.acceptance.TestPlugin;
 import com.google.gerrit.acceptance.UseLocalDisk;
 import com.google.gerrit.acceptance.UseSsh;
@@ -33,6 +34,8 @@
 @TestPlugin(
     name = "cache-chroniclemap",
     sshModule = "com.googlesource.gerrit.modules.cache.chroniclemap.SSHCommandModule")
+@UseLocalDisk
+@Sandboxed
 public class AnalyzeH2CachesIT extends LightweightPluginDaemonTest {
 
   @Inject private SitePaths sitePaths;
@@ -40,7 +43,6 @@
   private String cmd = Joiner.on(" ").join("cache-chroniclemap", "analyze-h2-caches");
 
   @Test
-  @UseLocalDisk
   public void shouldAnalyzeH2Cache() throws Exception {
     createChange();
 
@@ -54,7 +56,6 @@
   }
 
   @Test
-  @UseLocalDisk
   public void shouldProduceWarningWhenCacheFileIsEmpty() throws Exception {
     List<String> expected =
         ImmutableList.of(
@@ -71,9 +72,7 @@
   }
 
   @Test
-  @UseLocalDisk
   public void shouldIgnoreNonH2Files() throws Exception {
-
     Path cacheDirectory = sitePaths.resolve(cfg.getString("cache", null, "directory"));
     Files.write(cacheDirectory.resolve("some.dat"), "some_content".getBytes());
 
@@ -92,7 +91,6 @@
   }
 
   @Test
-  @UseLocalDisk
   public void shouldFailWhenCacheDirectoryDoesNotExists() throws Exception {
     cfg.setString("cache", null, "directory", "/tmp/non_existing_directory");
 
diff --git a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesIT.java b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesIT.java
new file mode 100644
index 0000000..543f4c3
--- /dev/null
+++ b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesIT.java
@@ -0,0 +1,107 @@
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.googlesource.gerrit.modules.cache.chroniclemap;
+
+import static com.google.common.truth.Truth.assertThat;
+import static com.googlesource.gerrit.modules.cache.chroniclemap.AutoAdjustCaches.CONFIG_HEADER;
+import static com.googlesource.gerrit.modules.cache.chroniclemap.AutoAdjustCaches.TUNED_INFIX;
+import static com.googlesource.gerrit.modules.cache.chroniclemap.ChronicleMapCacheConfig.Defaults.maxBloatFactorFor;
+import static com.googlesource.gerrit.modules.cache.chroniclemap.ChronicleMapCacheConfig.Defaults.maxEntriesFor;
+
+import com.google.common.collect.ImmutableList;
+import com.google.gerrit.acceptance.LightweightPluginDaemonTest;
+import com.google.gerrit.acceptance.Sandboxed;
+import com.google.gerrit.acceptance.TestPlugin;
+import com.google.gerrit.acceptance.UseLocalDisk;
+import com.google.gerrit.acceptance.UseSsh;
+import com.google.gerrit.server.config.SitePaths;
+import com.google.inject.Inject;
+import java.io.File;
+import java.util.Objects;
+import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import org.eclipse.jgit.errors.ConfigInvalidException;
+import org.eclipse.jgit.lib.Config;
+import org.junit.Test;
+
+@Sandboxed
+@UseLocalDisk
+@UseSsh
+@TestPlugin(
+    name = "cache-chroniclemap",
+    sshModule = "com.googlesource.gerrit.modules.cache.chroniclemap.SSHCommandModule")
+public class AutoAdjustCachesIT extends LightweightPluginDaemonTest {
+  private static final String cmd = "cache-chroniclemap auto-adjust-caches";
+  private static final String GROUPS_BYUUID_PERSISTED = "groups_byuuid_persisted";
+  private static final String DIFF = "diff";
+  private static final String DIFF_SUMMARY = "diff_summary";
+  private static final String ACCOUNTS = "accounts";
+  private static final String PERSISTED_PROJECTS = "persisted_projects";
+
+  private static final ImmutableList<String> EXPECTED_CACHES =
+      ImmutableList.of(GROUPS_BYUUID_PERSISTED, DIFF, DIFF_SUMMARY, ACCOUNTS, PERSISTED_PROJECTS);
+
+  @Inject private SitePaths sitePaths;
+
+  @Override
+  public com.google.inject.Module createModule() {
+    return new ChronicleMapCacheModule();
+  }
+
+  @Test
+  public void shouldUseDefaultsWhenCachesAreNotConfigured() throws Exception {
+    createChange();
+
+    String result = adminSshSession.exec(cmd);
+
+    adminSshSession.assertSuccess();
+    Config configResult = configResult(result);
+
+    for (String cache : EXPECTED_CACHES) {
+      assertThat(configResult.getLong("cache", cache, "maxEntries", 0))
+          .isEqualTo(maxEntriesFor(cache));
+      assertThat(configResult.getLong("cache", cache, "maxBloatFactor", 0))
+          .isEqualTo(maxBloatFactorFor(cache));
+    }
+  }
+
+  @Test
+  public void shouldCreateNewCacheFiles() throws Exception {
+    createChange();
+
+    adminSshSession.exec(cmd);
+
+    adminSshSession.assertSuccess();
+    File cacheDir = sitePaths.resolve(cfg.getString("cache", null, "directory")).toFile();
+    Set<String> tunedCaches =
+        Stream.of(Objects.requireNonNull(cacheDir.listFiles()))
+            .filter(file -> !file.isDirectory())
+            .map(File::getName)
+            .filter(
+                n ->
+                    n.contains(TUNED_INFIX)
+                        && n.matches(".*(" + String.join("|", EXPECTED_CACHES) + ").*"))
+            .collect(Collectors.toSet());
+
+    assertThat(tunedCaches.size()).isEqualTo(EXPECTED_CACHES.size());
+  }
+
+  private Config configResult(String result) throws ConfigInvalidException {
+    Config configResult = new Config();
+    configResult.fromText((result.split(CONFIG_HEADER))[1]);
+    return configResult;
+  }
+}
diff --git a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesInMemoryIT.java b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesInMemoryIT.java
new file mode 100644
index 0000000..5480001
--- /dev/null
+++ b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesInMemoryIT.java
@@ -0,0 +1,75 @@
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.googlesource.gerrit.modules.cache.chroniclemap;
+
+import static com.google.common.net.HttpHeaders.CONTENT_TYPE;
+import static com.google.common.truth.Truth.assertThat;
+import static org.apache.http.HttpHeaders.ACCEPT;
+import static org.eclipse.jgit.util.HttpSupport.TEXT_PLAIN;
+
+import com.google.gerrit.acceptance.LightweightPluginDaemonTest;
+import com.google.gerrit.acceptance.RestResponse;
+import com.google.gerrit.acceptance.RestSession;
+import com.google.gerrit.acceptance.TestPlugin;
+import com.google.gerrit.server.git.GitRepositoryManager;
+import com.google.inject.Inject;
+import java.io.IOException;
+import org.apache.http.message.BasicHeader;
+import org.junit.Test;
+
+@TestPlugin(
+    name = "cache-chroniclemap",
+    httpModule = "com.googlesource.gerrit.modules.cache.chroniclemap.HttpModule")
+public class MigrateH2CachesInMemoryIT extends LightweightPluginDaemonTest {
+  private static final String MIGRATION_ENDPOINT = "/plugins/cache-chroniclemap/migrate";
+
+  @Inject protected GitRepositoryManager repoManager;
+
+  @Test
+  public void shouldReturnTexPlain() throws Exception {
+    RestResponse result = runMigration(adminRestSession);
+    assertThat(result.getHeader(CONTENT_TYPE)).contains(TEXT_PLAIN);
+  }
+
+  @Test
+  public void shouldReturnBadRequestWhenTextPlainIsNotAnAcceptedHeader() throws Exception {
+    runMigrationWithAcceptHeader(adminRestSession, "application/json").assertBadRequest();
+  }
+
+  @Test
+  public void shouldFailWhenUserHasNoAdminServerCapability() throws Exception {
+    RestResponse result = runMigration(userRestSession);
+    result.assertForbidden();
+    assertThat(result.getEntityContent())
+        .contains("administrateServer for plugin cache-chroniclemap not permitted");
+  }
+
+  @Test
+  public void shouldFailWhenCacheDirectoryIsNotDefined() throws Exception {
+    RestResponse result = runMigration(adminRestSession);
+    result.assertBadRequest();
+    assertThat(result.getEntityContent())
+        .contains("Cannot run migration, cache directory is not configured");
+  }
+
+  private RestResponse runMigration(RestSession restSession) throws IOException {
+    return runMigrationWithAcceptHeader(restSession, TEXT_PLAIN);
+  }
+
+  private RestResponse runMigrationWithAcceptHeader(RestSession restSession, String acceptHeader)
+      throws IOException {
+    return restSession.putWithHeader(MIGRATION_ENDPOINT, new BasicHeader(ACCEPT, acceptHeader));
+  }
+}
diff --git a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesIT.java b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesLocalDiskIT.java
similarity index 89%
rename from src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesIT.java
rename to src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesLocalDiskIT.java
index cc600b5..d2523f0 100644
--- a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesIT.java
+++ b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesLocalDiskIT.java
@@ -14,7 +14,6 @@
 
 package com.googlesource.gerrit.modules.cache.chroniclemap;
 
-import static com.google.common.net.HttpHeaders.CONTENT_TYPE;
 import static com.google.common.truth.Truth.assertThat;
 import static com.googlesource.gerrit.modules.cache.chroniclemap.H2CacheCommand.H2_SUFFIX;
 import static com.googlesource.gerrit.modules.cache.chroniclemap.H2MigrationServlet.DEFAULT_MAX_BLOAT_FACTOR;
@@ -61,7 +60,8 @@
 @TestPlugin(
     name = "cache-chroniclemap",
     httpModule = "com.googlesource.gerrit.modules.cache.chroniclemap.HttpModule")
-public class MigrateH2CachesIT extends LightweightPluginDaemonTest {
+@UseLocalDisk
+public class MigrateH2CachesLocalDiskIT extends LightweightPluginDaemonTest {
   private final Duration LOAD_CACHE_WAIT_TIMEOUT = Duration.ofSeconds(4);
   private String ACCOUNTS_CACHE_NAME = "accounts";
   private String PERSISTED_PROJECTS_CACHE_NAME = "persisted_projects";
@@ -80,38 +80,21 @@
   }
 
   @Test
-  @UseLocalDisk
   public void shouldRunAndCompleteSuccessfullyWhenCacheDirectoryIsDefined() throws Exception {
     runMigration(adminRestSession).assertOK();
   }
 
   @Test
-  @UseLocalDisk
-  public void shouldReturnTexPlain() throws Exception {
-    RestResponse result = runMigration(adminRestSession);
-    assertThat(result.getHeader(CONTENT_TYPE)).contains(TEXT_PLAIN);
-  }
-
-  @Test
-  @UseLocalDisk
-  public void shouldReturnBadRequestWhenTextPlainIsNotAnAcceptedHeader() throws Exception {
-    runMigrationWithAcceptHeader(adminRestSession, "application/json").assertBadRequest();
-  }
-
-  @Test
-  @UseLocalDisk
   public void shouldReturnSuccessWhenAllTextContentsAreAccepted() throws Exception {
     runMigrationWithAcceptHeader(adminRestSession, "text/*").assertOK();
   }
 
   @Test
-  @UseLocalDisk
   public void shouldReturnSuccessWhenAllContentsAreAccepted() throws Exception {
     runMigrationWithAcceptHeader(adminRestSession, "*/*").assertOK();
   }
 
   @Test
-  @UseLocalDisk
   public void shouldOutputChronicleMapBloatedDefaultConfiguration() throws Exception {
     waitForCacheToLoad(ACCOUNTS_CACHE_NAME);
     waitForCacheToLoad(PERSISTED_PROJECTS_CACHE_NAME);
@@ -137,7 +120,6 @@
   }
 
   @Test
-  @UseLocalDisk
   public void shouldOutputChronicleMapBloatedProvidedConfiguration() throws Exception {
     waitForCacheToLoad(ACCOUNTS_CACHE_NAME);
     waitForCacheToLoad(PERSISTED_PROJECTS_CACHE_NAME);
@@ -164,23 +146,6 @@
   }
 
   @Test
-  public void shouldFailWhenCacheDirectoryIsNotDefined() throws Exception {
-    RestResponse result = runMigration(adminRestSession);
-    result.assertBadRequest();
-    assertThat(result.getEntityContent())
-        .contains("Cannot run migration, cache directory is not configured");
-  }
-
-  @Test
-  public void shouldFailWhenUserHasNoAdminServerCapability() throws Exception {
-    RestResponse result = runMigration(userRestSession);
-    result.assertForbidden();
-    assertThat(result.getEntityContent())
-        .contains("administrateServer for plugin cache-chroniclemap not permitted");
-  }
-
-  @Test
-  @UseLocalDisk
   public void shouldMigrateAccountsCache() throws Exception {
     waitForCacheToLoad(ACCOUNTS_CACHE_NAME);
 
@@ -195,7 +160,6 @@
   }
 
   @Test
-  @UseLocalDisk
   public void shouldMigratePersistentProjects() throws Exception {
     waitForCacheToLoad(PERSISTED_PROJECTS_CACHE_NAME);