Use existing pre-configured cache config for H2 migration

When migrating from H2 to ChronicleMap, it may be useful
to pre-define the target cache configuration beforehand.
That allows the Gerrit admin to correctly tune a desired
set of parameters and use them during the cutover from H2
to ChronicleMap.

Keep the default behaviour (auto-tune from H2) when the
average key and values sizes are not set, so to allow a
smooth migration for those Gerrit admins that do not
need to get the cutover fully automated.

Change-Id: I900cc0ed4a83fde6f77c13eb69c4212251834525
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/H2MigrationServlet.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/H2MigrationServlet.java
index 39d90cb..63cf659 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/H2MigrationServlet.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/H2MigrationServlet.java
@@ -15,7 +15,6 @@
 package com.googlesource.gerrit.modules.cache.chroniclemap;
 
 import static com.googlesource.gerrit.modules.cache.chroniclemap.H2CacheCommand.H2_SUFFIX;
-import static com.googlesource.gerrit.modules.cache.chroniclemap.H2CacheCommand.appendToConfig;
 import static com.googlesource.gerrit.modules.cache.chroniclemap.H2CacheCommand.getStats;
 import static com.googlesource.gerrit.modules.cache.chroniclemap.H2CacheCommand.jdbcUrl;
 import static org.apache.http.HttpHeaders.ACCEPT;
@@ -50,6 +49,7 @@
 import com.google.inject.Singleton;
 import com.google.inject.TypeLiteral;
 import com.google.inject.name.Named;
+import java.io.File;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.nio.file.Files;
@@ -180,21 +180,55 @@
     try {
       for (PersistentCacheDef<?, ?> in : persistentCacheDefs) {
         Optional<Path> h2CacheFile = getH2CacheFile(cacheDir.get(), in.name());
+        Optional<ChronicleMapCacheConfig> chronicleMapConfig;
 
         if (h2CacheFile.isPresent()) {
-          H2AggregateData stats = getStats(h2CacheFile.get());
+          if (hasFullPersistentCacheConfiguration(in)) {
+            if (sizeMultiplier != DEFAULT_SIZE_MULTIPLIER) {
+              logger.atWarning().log(
+                  "Size multiplier = %d ignored because of existing configuration found",
+                  sizeMultiplier);
+            }
+            if (maxBloatFactor != DEFAULT_MAX_BLOAT_FACTOR) {
+              logger.atWarning().log(
+                  "Max Bloat Factor = %d ignored because of existing configuration found",
+                  maxBloatFactor);
+            }
 
-          if (!stats.isEmpty()) {
+            File cacheFile =
+                ChronicleMapCacheFactory.fileName(cacheDir.get(), in.name(), in.version());
+            chronicleMapConfig =
+                Optional.of(
+                    configFactory.create(
+                        in.name(), cacheFile, in.expireAfterWrite(), in.refreshAfterWrite()));
+
+          } else {
+            if (hasPartialPersistentCacheConfiguration(in)) {
+              logger.atWarning().log(
+                  "Existing configuration for cache %s found gerrit.config and will be ignored because incomplete",
+                  in.name());
+            }
+            chronicleMapConfig =
+                optionalOf(getStats(h2CacheFile.get()))
+                    .map(
+                        (stats) ->
+                            makeChronicleMapConfig(
+                                configFactory,
+                                cacheDir.get(),
+                                in,
+                                stats,
+                                sizeMultiplier,
+                                maxBloatFactor));
+          }
+
+          if (chronicleMapConfig.isPresent()) {
+            ChronicleMapCacheConfig cacheConfig = chronicleMapConfig.get();
             ChronicleMapCacheImpl<?, ?> chronicleMapCache =
-                new ChronicleMapCacheImpl<>(
-                    in,
-                    makeChronicleMapConfig(
-                        configFactory, cacheDir.get(), in, stats, sizeMultiplier, maxBloatFactor),
-                    null,
-                    new DisabledMetricMaker());
+                new ChronicleMapCacheImpl<>(in, cacheConfig, null, new DisabledMetricMaker());
+
             doMigrate(h2CacheFile.get(), in, chronicleMapCache);
             chronicleMapCache.close();
-            appendBloatedConfig(outputChronicleMapConfig, stats, maxBloatFactor, sizeMultiplier);
+            copyExistingCacheSettingsToConfig(outputChronicleMapConfig, cacheConfig);
           }
         }
       }
@@ -207,6 +241,27 @@
     setResponse(rsp, HttpServletResponse.SC_OK, outputChronicleMapConfig.toText());
   }
 
+  private Optional<H2AggregateData> optionalOf(H2AggregateData stats) {
+    if (stats.isEmpty()) {
+      return Optional.empty();
+    }
+    return Optional.of(stats);
+  }
+
+  private boolean hasFullPersistentCacheConfiguration(PersistentCacheDef<?, ?> in) {
+    return gerritConfig.getLong("cache", in.name(), "avgKeySize", 0L) > 0
+        && gerritConfig.getLong("cache", in.name(), "avgValueSize", 0L) > 0
+        && gerritConfig.getLong("cache", in.name(), "maxEntries", 0L) > 0
+        && gerritConfig.getInt("cache", in.name(), "maxBloatFactor", 0) > 0;
+  }
+
+  private boolean hasPartialPersistentCacheConfiguration(PersistentCacheDef<?, ?> in) {
+    return gerritConfig.getLong("cache", in.name(), "avgKeySize", 0L) > 0
+        || gerritConfig.getLong("cache", in.name(), "avgValueSize", 0L) > 0
+        || gerritConfig.getLong("cache", in.name(), "maxEntries", 0L) > 0
+        || gerritConfig.getInt("cache", in.name(), "maxBloatFactor", 0) > 0;
+  }
+
   protected Optional<Path> getCacheDir() throws IOException {
     String name = gerritConfig.getString("cache", null, "directory");
     if (name == null) {
@@ -232,18 +287,6 @@
     return Optional.empty();
   }
 
-  private void appendBloatedConfig(
-      Config config, H2AggregateData stats, int maxBloatFactor, int sizeMultiplier) {
-    appendToConfig(
-        config,
-        H2AggregateData.create(
-            stats.cacheName(),
-            stats.size() * sizeMultiplier,
-            stats.avgKeySize(),
-            stats.avgValueSize()));
-    config.setLong("cache", stats.cacheName(), "maxBloatFactor", maxBloatFactor);
-  }
-
   protected static ChronicleMapCacheConfig makeChronicleMapConfig(
       ChronicleMapCacheConfig.Factory configFactory,
       Path cacheDir,
@@ -310,4 +353,13 @@
     return req.getHeader(ACCEPT) != null
         && !Arrays.asList("text/plain", "text/*", "*/*").contains(req.getHeader(ACCEPT));
   }
+
+  private static void copyExistingCacheSettingsToConfig(
+      Config outputConfig, ChronicleMapCacheConfig cacheConfig) {
+    String cacheName = cacheConfig.getConfigKey();
+    outputConfig.setLong("cache", cacheName, "avgKeySize", cacheConfig.getAverageKeySize());
+    outputConfig.setLong("cache", cacheName, "avgValueSize", cacheConfig.getAverageValueSize());
+    outputConfig.setLong("cache", cacheName, "maxEntries", cacheConfig.getMaxEntries());
+    outputConfig.setLong("cache", cacheName, "maxBloatFactor", cacheConfig.getMaxBloatFactor());
+  }
 }
diff --git a/src/main/resources/Documentation/migration.md b/src/main/resources/Documentation/migration.md
index 11ed5b5..261f08f 100644
--- a/src/main/resources/Documentation/migration.md
+++ b/src/main/resources/Documentation/migration.md
@@ -14,10 +14,15 @@
 
 The migration would do the following:
 1. scan all existing cache key-value pairs
-2. calculate the parameters for the new cache
+2. calculate the parameters for the new cache, if not already defined in gerrit.config.
 3. create the new cache
 4. read all existing key-value pairs and insert them into the new cache-chroniclemap files
 
+> **NOTE**: The existing cache parameters are kept in `gerrit.config` only when they are all
+> defined (avgKeySize, avgValueSize, maxEntries and maxBloatFactor), otherwise the
+> migration process will recalculate them and create the new cache based on the new
+> values.
+
 The following caches will be migrated (if they exist and contain any data):
 
 * accounts
diff --git a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesLocalDiskIT.java b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesLocalDiskIT.java
index b19c622..93b0297 100644
--- a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesLocalDiskIT.java
+++ b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesLocalDiskIT.java
@@ -32,6 +32,7 @@
 import com.google.gerrit.acceptance.TestPlugin;
 import com.google.gerrit.acceptance.UseLocalDisk;
 import com.google.gerrit.acceptance.WaitUtil;
+import com.google.gerrit.acceptance.config.GerritConfig;
 import com.google.gerrit.acceptance.testsuite.project.ProjectOperations;
 import com.google.gerrit.entities.CachedProjectConfig;
 import com.google.gerrit.entities.Project;
@@ -172,6 +173,53 @@
   }
 
   @Test
+  @GerritConfig(name = "cache.accounts.maxBloatFactor", value = "1")
+  @GerritConfig(name = "cache.accounts.maxEntries", value = "10")
+  @GerritConfig(name = "cache.accounts.avgKeySize", value = "100")
+  @GerritConfig(name = "cache.accounts.avgValueSize", value = "1000")
+  public void shouldKeepExistingChronicleMapConfiguration() throws Exception {
+    waitForCacheToLoad(ACCOUNTS_CACHE_NAME);
+
+    int sizeMultiplier = 2;
+    int maxBloatFactor = 3;
+    RestResponse result = runMigration(sizeMultiplier, maxBloatFactor);
+    result.assertOK();
+
+    Config configResult = new Config();
+    String entityContent = result.getEntityContent();
+    configResult.fromText(entityContent);
+
+    assertThat(configResult.getInt("cache", ACCOUNTS_CACHE_NAME, "maxBloatFactor", 0)).isEqualTo(1);
+    assertThat(configResult.getInt("cache", ACCOUNTS_CACHE_NAME, "maxEntries", 0)).isEqualTo(10);
+    assertThat(configResult.getInt("cache", ACCOUNTS_CACHE_NAME, "avgKeySize", 0)).isEqualTo(100);
+    assertThat(configResult.getInt("cache", ACCOUNTS_CACHE_NAME, "avgValueSize", 0))
+        .isEqualTo(1000);
+  }
+
+  @Test
+  @GerritConfig(name = "cache.accounts.maxBloatFactor", value = "1")
+  @GerritConfig(name = "cache.accounts.maxEntries", value = "10")
+  @GerritConfig(name = "cache.accounts.avgValueSize", value = "1000")
+  public void shouldIgnoreIncompleteChronicleMapConfiguration() throws Exception {
+    waitForCacheToLoad(ACCOUNTS_CACHE_NAME);
+
+    int sizeMultiplier = 2;
+    int maxBloatFactor = 3;
+    RestResponse result = runMigration(sizeMultiplier, maxBloatFactor);
+    result.assertOK();
+
+    Config configResult = new Config();
+    String entityContent = result.getEntityContent();
+    configResult.fromText(entityContent);
+
+    assertThat(configResult.getInt("cache", ACCOUNTS_CACHE_NAME, "maxBloatFactor", 0))
+        .isEqualTo(maxBloatFactor);
+    assertThat(configResult.getInt("cache", ACCOUNTS_CACHE_NAME, "maxEntries", 0)).isNotEqualTo(10);
+    assertThat(configResult.getInt("cache", ACCOUNTS_CACHE_NAME, "avgValueSize", 0))
+        .isNotEqualTo(1000);
+  }
+
+  @Test
   public void shouldMigrateAccountsCache() throws Exception {
     waitForCacheToLoad(ACCOUNTS_CACHE_NAME);