Merge branch 'stable-3.4'

* stable-3.4:
  DRY out common functions between servlets
  DRY out listing of tuned files in AutoAdjustCachesIT
  Expose auto-adjust-cache command as Servlet
  Skip unnecessary auto-adjustments of caches
  Give more patience for cache to load in tests
  Remove empty char at EOL in docs
  Fix auto-adjust-caches example CLI in docs
  Use all-caps for static final strings in test

Change-Id: I6011d7e3c3d0e16f47be036d08e48c8d9e253585
diff --git a/BUILD b/BUILD
index 612cb3d..85222ca 100644
--- a/BUILD
+++ b/BUILD
@@ -51,6 +51,7 @@
     srcs = glob(["src/test/java/**/*IT.java"]),
     group = "server_cache",
     labels = ["server"],
+    vm_args = ["-Xmx2G"],
     deps = [
         ":cache-chroniclemap__plugin",
         ":chroniclemap-test-lib",
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCaches.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCaches.java
index a9f8bb7..d10ffc8 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCaches.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCaches.java
@@ -17,11 +17,13 @@
 
 import com.google.common.cache.Cache;
 import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.common.Nullable;
 import com.google.gerrit.extensions.registration.DynamicMap;
+import com.google.gerrit.extensions.restapi.AuthException;
 import com.google.gerrit.metrics.DisabledMetricMaker;
 import com.google.gerrit.server.config.GerritServerConfig;
 import com.google.gerrit.server.config.SitePaths;
-import com.google.gerrit.sshd.SshCommand;
+import com.google.gerrit.server.permissions.PermissionBackendException;
 import com.google.inject.Inject;
 import java.io.File;
 import java.io.IOException;
@@ -32,23 +34,19 @@
 import org.apache.commons.io.FilenameUtils;
 import org.apache.commons.lang3.tuple.ImmutablePair;
 import org.eclipse.jgit.lib.Config;
-import org.eclipse.jgit.lib.TextProgressMonitor;
-import org.kohsuke.args4j.Option;
+import org.eclipse.jgit.lib.NullProgressMonitor;
+import org.eclipse.jgit.lib.ProgressMonitor;
 
-public class AutoAdjustCaches extends SshCommand {
+public class AutoAdjustCaches {
   private static final FluentLogger logger = FluentLogger.forEnclosingClass();
   protected static final String CONFIG_HEADER = "__CONFIG__";
-  protected static final String TUNED_INFIX = "tuned";
+  protected static final String TUNED_INFIX = "_tuned_";
 
   private final DynamicMap<Cache<?, ?>> cacheMap;
   private final ChronicleMapCacheConfig.Factory configFactory;
   private final Path cacheDir;
   private final AdministerCachePermission adminCachePermission;
 
-  @Option(
-      name = "--dry-run",
-      aliases = {"-d"},
-      usage = "Calculate the average key and value size, but do not migrate the data.")
   private boolean dryRun;
 
   @Inject
@@ -64,98 +62,111 @@
     this.adminCachePermission = adminCachePermission;
   }
 
-  @Override
-  protected void run() throws Exception {
-    adminCachePermission.checkCurrentUserAllowed(e -> stderr.println(e.getLocalizedMessage()));
+  public boolean isDryRun() {
+    return dryRun;
+  }
+
+  public void setDryRun(boolean dryRun) {
+    this.dryRun = dryRun;
+  }
+
+  protected Config run(@Nullable ProgressMonitor optionalProgressMonitor)
+      throws AuthException, PermissionBackendException, IOException {
+    ProgressMonitor progressMonitor =
+        optionalProgressMonitor == null ? NullProgressMonitor.INSTANCE : optionalProgressMonitor;
+    adminCachePermission.checkCurrentUserAllowed(null);
 
     Config outputChronicleMapConfig = new Config();
 
     Map<String, ChronicleMapCacheImpl<Object, Object>> chronicleMapCaches = getChronicleMapCaches();
 
-    chronicleMapCaches.forEach(
-        (cacheName, currCache) -> {
-          ImmutablePair<Long, Long> avgSizes = averageSizes(cacheName, currCache.getStore());
-          if (!(avgSizes.getKey() > 0) || !(avgSizes.getValue() > 0)) {
-            logger.atWarning().log(
-                "Cache [%s] has %s entries, but average of (key: %d, value: %d). Skipping.",
-                cacheName, currCache.size(), avgSizes.getKey(), avgSizes.getValue());
-            return;
-          }
+    for (Map.Entry<String, ChronicleMapCacheImpl<Object, Object>> cache :
+        chronicleMapCaches.entrySet()) {
+      String cacheName = cache.getKey();
+      ChronicleMapCacheImpl<Object, Object> currCache = cache.getValue();
 
-          long averageKeySize = avgSizes.getKey();
-          long averageValueSize = avgSizes.getValue();
-          ChronicleMapCacheConfig newChronicleMapCacheConfig =
-              makeChronicleMapConfig(currCache.getConfig(), averageKeySize, averageValueSize);
+      {
+        ImmutablePair<Long, Long> avgSizes =
+            averageSizes(cacheName, currCache.getStore(), progressMonitor);
+        if (!(avgSizes.getKey() > 0) || !(avgSizes.getValue() > 0)) {
+          logger.atWarning().log(
+              "Cache [%s] has %s entries, but average of (key: %d, value: %d). Skipping.",
+              cacheName, currCache.size(), avgSizes.getKey(), avgSizes.getValue());
+          continue;
+        }
 
-          updateOutputConfig(
-              outputChronicleMapConfig,
-              cacheName,
-              averageKeySize,
-              averageValueSize,
-              currCache.getConfig().getMaxEntries(),
-              currCache.getConfig().getMaxBloatFactor());
+        long averageKeySize = avgSizes.getKey();
+        long averageValueSize = avgSizes.getValue();
 
-          if (!dryRun) {
-            try {
-              ChronicleMapCacheImpl<Object, Object> newCache =
-                  new ChronicleMapCacheImpl<>(
-                      currCache.getCacheDefinition(),
-                      newChronicleMapCacheConfig,
-                      null,
-                      new DisabledMetricMaker());
+        ChronicleMapCacheConfig currCacheConfig = currCache.getConfig();
 
-              TextProgressMonitor cacheMigrationProgress = new TextProgressMonitor(stdout);
-              cacheMigrationProgress.beginTask(
-                  String.format("[%s] migrate content", cacheName), (int) currCache.size());
+        if (currCacheConfig.getAverageKeySize() == averageKeySize
+            && currCacheConfig.getAverageValueSize() == averageValueSize) {
+          continue;
+        }
 
-              currCache
-                  .getStore()
-                  .forEach(
-                      (k, v) -> {
-                        try {
-                          newCache.putUnchecked(k, v);
-                          cacheMigrationProgress.update(1);
-                        } catch (Exception e) {
-                          logger.atWarning().withCause(e).log(
-                              "[%s] Could not migrate entry %s -> %s",
-                              cacheName, k.getValue(), v.getValue());
-                        }
-                      });
+        ChronicleMapCacheConfig newChronicleMapCacheConfig =
+            makeChronicleMapConfig(currCache.getConfig(), averageKeySize, averageValueSize);
 
-            } catch (IOException e) {
-              stderr.println(String.format("Could not create new cache %s", cacheName));
-            }
-          }
-        });
+        updateOutputConfig(
+            outputChronicleMapConfig,
+            cacheName,
+            averageKeySize,
+            averageValueSize,
+            currCache.getConfig().getMaxEntries(),
+            currCache.getConfig().getMaxBloatFactor());
 
-    stdout.println();
-    stdout.println("****************************");
-    stdout.println("** Chronicle-map template **");
-    stdout.println("****************************");
-    stdout.println();
-    stdout.println(CONFIG_HEADER);
-    stdout.println(outputChronicleMapConfig.toText());
+        if (!dryRun) {
+          ChronicleMapCacheImpl<Object, Object> newCache =
+              new ChronicleMapCacheImpl<>(
+                  currCache.getCacheDefinition(),
+                  newChronicleMapCacheConfig,
+                  null,
+                  new DisabledMetricMaker());
+
+          progressMonitor.beginTask(
+              String.format("[%s] migrate content", cacheName), (int) currCache.size());
+
+          currCache
+              .getStore()
+              .forEach(
+                  (k, v) -> {
+                    try {
+                      newCache.putUnchecked(k, v);
+
+                      progressMonitor.update(1);
+                    } catch (Exception e) {
+                      logger.atWarning().withCause(e).log(
+                          "[%s] Could not migrate entry %s -> %s",
+                          cacheName, k.getValue(), v.getValue());
+                    }
+                  });
+        }
+      }
+    }
+
+    return outputChronicleMapConfig;
   }
 
   private ImmutablePair<Long, Long> averageSizes(
-      String cacheName, ConcurrentMap<KeyWrapper<Object>, TimedValue<Object>> store) {
+      String cacheName,
+      ConcurrentMap<KeyWrapper<Object>, TimedValue<Object>> store,
+      ProgressMonitor progressMonitor) {
     long kAvg = 0;
     long vAvg = 0;
 
     if (store.isEmpty()) return ImmutablePair.of(kAvg, vAvg);
 
-    TextProgressMonitor progress = new TextProgressMonitor(stdout);
-
-    progress.beginTask(
+    progressMonitor.beginTask(
         String.format("[%s] calculate average key/value size", cacheName), store.size());
 
     int i = 1;
     for (Map.Entry<KeyWrapper<Object>, TimedValue<Object>> entry : store.entrySet()) {
       kAvg = kAvg + (serializedKeyLength(cacheName, entry.getKey()) - kAvg) / i;
       vAvg = vAvg + (serializedValueLength(cacheName, entry.getValue()) - vAvg) / i;
-      progress.update(1);
+      progressMonitor.update(1);
     }
-    progress.endTask();
+    progressMonitor.endTask();
     return ImmutablePair.of(kAvg, vAvg);
   }
 
@@ -186,7 +197,7 @@
   private File resolveNewFile(String currentFileName) {
     String newFileName =
         String.format(
-            "%s_%s_%s.%s",
+            "%s%s%s.%s",
             FilenameUtils.getBaseName(currentFileName),
             TUNED_INFIX,
             System.currentTimeMillis(),
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesCommand.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesCommand.java
new file mode 100644
index 0000000..7aa240e
--- /dev/null
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesCommand.java
@@ -0,0 +1,72 @@
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package com.googlesource.gerrit.modules.cache.chroniclemap;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.extensions.restapi.AuthException;
+import com.google.gerrit.server.permissions.PermissionBackendException;
+import com.google.gerrit.sshd.SshCommand;
+import com.google.inject.Inject;
+import java.io.IOException;
+import org.eclipse.jgit.lib.Config;
+import org.eclipse.jgit.lib.TextProgressMonitor;
+import org.kohsuke.args4j.Option;
+
+public class AutoAdjustCachesCommand extends SshCommand {
+  private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+  protected static final String CONFIG_HEADER = "__CONFIG__";
+  protected static final String TUNED_INFIX = "_tuned_";
+
+  private final AutoAdjustCaches autoAdjustCachesEngine;
+
+  @Option(
+      name = "--dry-run",
+      aliases = {"-d"},
+      usage = "Calculate the average key and value size, but do not migrate the data.")
+  public void setDryRun(boolean dryRun) {
+    autoAdjustCachesEngine.setDryRun(dryRun);
+  }
+
+  @Inject
+  AutoAdjustCachesCommand(AutoAdjustCaches autoAdjustCachesEngine) {
+    this.autoAdjustCachesEngine = autoAdjustCachesEngine;
+  }
+
+  @Override
+  protected void run() throws Exception {
+    try {
+      Config outputChronicleMapConfig = autoAdjustCachesEngine.run(new TextProgressMonitor(stdout));
+
+      stdout.println();
+      stdout.println("**********************************");
+
+      if (outputChronicleMapConfig.getSections().isEmpty()) {
+        stdout.println("All exsting caches are already tuned: no changes needed.");
+        return;
+      }
+
+      stdout.println("** Chronicle-map config changes **");
+      stdout.println("**********************************");
+      stdout.println();
+      stdout.println(CONFIG_HEADER);
+      stdout.println(outputChronicleMapConfig.toText());
+    } catch (AuthException | PermissionBackendException e) {
+      stderr.println(e.getLocalizedMessage());
+      throw e;
+    } catch (IOException e) {
+      logger.atSevere().log("Could not create new cache", e);
+      stderr.println(String.format("Could not create new cache : %s", e.getLocalizedMessage()));
+    }
+  }
+}
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesServlet.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesServlet.java
new file mode 100644
index 0000000..b217b0e
--- /dev/null
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesServlet.java
@@ -0,0 +1,79 @@
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.googlesource.gerrit.modules.cache.chroniclemap;
+
+import static com.googlesource.gerrit.modules.cache.chroniclemap.HttpServletOps.checkAcceptHeader;
+import static com.googlesource.gerrit.modules.cache.chroniclemap.HttpServletOps.setResponse;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.extensions.restapi.AuthException;
+import com.google.gerrit.server.permissions.PermissionBackendException;
+import com.google.inject.Inject;
+import com.google.inject.Provider;
+import com.google.inject.Singleton;
+import java.io.IOException;
+import java.util.Optional;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import org.eclipse.jgit.lib.Config;
+
+@Singleton
+public class AutoAdjustCachesServlet extends HttpServlet {
+  private static final long serialVersionUID = 1L;
+  protected static final FluentLogger logger = FluentLogger.forEnclosingClass();
+
+  // This needs to be a provider so that every doPut() call is reentrant because
+  // uses a different auto-adjuster for the caches.
+  private final Provider<AutoAdjustCaches> autoAdjustCachesProvider;
+
+  @Inject
+  AutoAdjustCachesServlet(Provider<AutoAdjustCaches> autoAdjustCachesProvider) {
+    this.autoAdjustCachesProvider = autoAdjustCachesProvider;
+  }
+
+  @Override
+  protected void doPut(HttpServletRequest req, HttpServletResponse rsp) throws IOException {
+    AutoAdjustCaches autoAdjustCachesEngine = autoAdjustCachesProvider.get();
+    if (!checkAcceptHeader(req, rsp)) {
+      return;
+    }
+
+    autoAdjustCachesEngine.setDryRun(
+        Optional.ofNullable(req.getParameter("dry-run"))
+            .or(() -> Optional.ofNullable(req.getParameter("d")))
+            .isPresent());
+
+    try {
+      Config outputChronicleMapConfig = autoAdjustCachesEngine.run(null);
+
+      if (outputChronicleMapConfig.getSections().isEmpty()) {
+        setResponse(
+            rsp,
+            HttpServletResponse.SC_NO_CONTENT,
+            "All existing caches are already tuned: no changes needed.");
+        return;
+      }
+
+      setResponse(rsp, HttpServletResponse.SC_CREATED, outputChronicleMapConfig.toText());
+    } catch (AuthException | PermissionBackendException e) {
+      setResponse(
+          rsp,
+          HttpServletResponse.SC_FORBIDDEN,
+          "not permitted to administer caches : " + e.getLocalizedMessage());
+      return;
+    }
+  }
+}
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheImpl.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheImpl.java
index ad18aa6..77d3887 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheImpl.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheImpl.java
@@ -266,8 +266,8 @@
    * Associates the specified value with the specified key. This method should be used when the
    * {@link TimedValue} and the {@link KeyWrapper} have already been constructed elsewhere rather
    * than delegate their construction to this cache ({@link #put}. This is typically the case when
-   * the key/value are extracted from another chronicle-map cache see ({@link AutoAdjustCaches} for
-   * an example.
+   * the key/value are extracted from another chronicle-map cache see ({@link
+   * AutoAdjustCachesCommand} for an example.
    *
    * @param wrappedKey The wrapper for the key object
    * @param wrappedValue the wrapper for the value object
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/H2MigrationServlet.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/H2MigrationServlet.java
index 63cf659..f35f0ee 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/H2MigrationServlet.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/H2MigrationServlet.java
@@ -17,8 +17,8 @@
 import static com.googlesource.gerrit.modules.cache.chroniclemap.H2CacheCommand.H2_SUFFIX;
 import static com.googlesource.gerrit.modules.cache.chroniclemap.H2CacheCommand.getStats;
 import static com.googlesource.gerrit.modules.cache.chroniclemap.H2CacheCommand.jdbcUrl;
-import static org.apache.http.HttpHeaders.ACCEPT;
-import static org.eclipse.jgit.util.HttpSupport.TEXT_PLAIN;
+import static com.googlesource.gerrit.modules.cache.chroniclemap.HttpServletOps.checkAcceptHeader;
+import static com.googlesource.gerrit.modules.cache.chroniclemap.HttpServletOps.setResponse;
 
 import com.google.common.flogger.FluentLogger;
 import com.google.gerrit.entities.Account;
@@ -51,14 +51,12 @@
 import com.google.inject.name.Named;
 import java.io.File;
 import java.io.IOException;
-import java.io.PrintWriter;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.Timestamp;
-import java.util.Arrays;
 import java.util.Optional;
 import java.util.Set;
 import java.util.stream.Collectors;
@@ -138,11 +136,7 @@
 
   @Override
   protected void doPut(HttpServletRequest req, HttpServletResponse rsp) throws IOException {
-    if (hasInvalidAcceptHeader(req)) {
-      setResponse(
-          rsp,
-          HttpServletResponse.SC_BAD_REQUEST,
-          "No advertised 'Accept' headers can be honoured. 'text/plain' should be provided in the request 'Accept' header.");
+    if (!checkAcceptHeader(req, rsp)) {
       return;
     }
 
@@ -341,19 +335,6 @@
     return typeLiteral.getRawType().getSimpleName().equals("String");
   }
 
-  private void setResponse(HttpServletResponse httpResponse, int statusCode, String value)
-      throws IOException {
-    httpResponse.setContentType(TEXT_PLAIN);
-    httpResponse.setStatus(statusCode);
-    PrintWriter writer = httpResponse.getWriter();
-    writer.print(value);
-  }
-
-  private boolean hasInvalidAcceptHeader(HttpServletRequest req) {
-    return req.getHeader(ACCEPT) != null
-        && !Arrays.asList("text/plain", "text/*", "*/*").contains(req.getHeader(ACCEPT));
-  }
-
   private static void copyExistingCacheSettingsToConfig(
       Config outputConfig, ChronicleMapCacheConfig cacheConfig) {
     String cacheName = cacheConfig.getConfigKey();
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/HttpModule.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/HttpModule.java
index afc50ca..869b623 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/HttpModule.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/HttpModule.java
@@ -46,5 +46,6 @@
     }
 
     serve("/migrate").with(H2MigrationServlet.class);
+    serve("/auto-adjust-caches").with(AutoAdjustCachesServlet.class);
   }
 }
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/HttpServletOps.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/HttpServletOps.java
new file mode 100644
index 0000000..dec758c
--- /dev/null
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/HttpServletOps.java
@@ -0,0 +1,49 @@
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.googlesource.gerrit.modules.cache.chroniclemap;
+
+import static org.apache.http.HttpHeaders.ACCEPT;
+import static org.eclipse.jgit.util.HttpSupport.TEXT_PLAIN;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.Arrays;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+class HttpServletOps {
+
+  static boolean checkAcceptHeader(HttpServletRequest req, HttpServletResponse rsp)
+      throws IOException {
+    if (req.getHeader(ACCEPT) != null
+        && !Arrays.asList("text/plain", "text/*", "*/*").contains(req.getHeader(ACCEPT))) {
+      setResponse(
+          rsp,
+          HttpServletResponse.SC_BAD_REQUEST,
+          "No advertised 'Accept' headers can be honoured. 'text/plain' should be provided in the request 'Accept' header.");
+      return false;
+    }
+
+    return true;
+  }
+
+  static void setResponse(HttpServletResponse httpResponse, int statusCode, String value)
+      throws IOException {
+    httpResponse.setContentType(TEXT_PLAIN);
+    httpResponse.setStatus(statusCode);
+    PrintWriter writer = httpResponse.getWriter();
+    writer.print(value);
+  }
+}
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/SSHCommandModule.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/SSHCommandModule.java
index 34e4954..abe1089 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/SSHCommandModule.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/SSHCommandModule.java
@@ -37,6 +37,6 @@
       factory(ChronicleMapCacheConfig.Factory.class);
     }
     command("analyze-h2-caches").to(AnalyzeH2Caches.class);
-    command("auto-adjust-caches").to(AutoAdjustCaches.class);
+    command("auto-adjust-caches").to(AutoAdjustCachesCommand.class);
   }
 }
diff --git a/src/main/resources/Documentation/tuning.md b/src/main/resources/Documentation/tuning.md
index 7ed7868..feb98fc 100644
--- a/src/main/resources/Documentation/tuning.md
+++ b/src/main/resources/Documentation/tuning.md
@@ -30,7 +30,7 @@
 your Gerrit server will not need downtime. As follows:
 
 * Drop `cache-chroniclemap.jar` file in the `plugins/` directory.
-* Wait for the pluginLoader to acknowledge and load the new plugin. You will 
+* Wait for the pluginLoader to acknowledge and load the new plugin. You will
 see an entry in the `error_log`:
 
 ```
@@ -129,11 +129,11 @@
 provided for those caches, such as average key size and average value size, and
 you have to rely on default values.
 
-This plugin provides an SSH command that will help you analyze the current,
-suboptimal, chronicle-map caches and migrate into new ones for which a more
-realistic configuration is generated based on data.
+This plugin provides an SSH command and a REST-API that will help you analyze
+the current, suboptimal, chronicle-map caches and migrate into new ones for
+which a more realistic configuration is generated based on data.
 
-The Gerrit/SSH command to tuning the caches requires the user to have
+The tuning of the caches requires the user to have
  `Administrate Caches` or `Administrate Server` capabilities.
 
 * Symlink the `cache-chroniclemap.jar` file in the `plugins/` directory (from
@@ -148,23 +148,29 @@
 * You can now run an the tuning command:
 
 ```bash
-ssh -p 29418 admin@<gerrit-server> cache-chroniclemap tune-chroniclemap-caches [--dry-run]
+ssh -p 29418 admin@<gerrit-server> cache-chroniclemap auto-adjust-caches [--dry-run]
 ```
 
-* --dry-run (Optional)
+* You can also use the REST-API:
+
+```
+PUT /plugins/cache-chroniclemap/auto-adjust-caches
+```
+
+* `--dry-run` or `-d` (SSH), `?dry-run` or `?d` (REST-API) optional parameter
 
 Calculate the average key and value size, but do not migrate current cache
 data into new files
 
-For each chronicle-map cache (i.e. `foo_1.dat` file) in the `cache` directory, a
-new one will be created (i.e. `foo_1_tuned_<timestamp>.dat`).
+For each chronicle-map cache that needs tuning (i.e. `foo_1.dat` file) in
+the `cache` directory, a new one will be created (i.e. `foo_1_tuned_<timestamp>.dat`).
 The new cache will have these characteristics:
 - Will have the same entries as the original cache.
 - Will be configured with the *actual* average key size and values calculated by
   looking at the content of the original cache.
 
-An output will also be generated with the new configuration that should be put
-into `gerrit.config`, should you decide to use the new caches.
+An output will also be generated with the configuration changes that should
+be included into `gerrit.config`, should you decide to use the new caches.
 
 An example of the output is the following:
 
diff --git a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesIT.java b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesIT.java
index a55c02a..57b0e46 100644
--- a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesIT.java
+++ b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesIT.java
@@ -15,22 +15,33 @@
 package com.googlesource.gerrit.modules.cache.chroniclemap;
 
 import static com.google.common.truth.Truth.assertThat;
-import static com.googlesource.gerrit.modules.cache.chroniclemap.AutoAdjustCaches.CONFIG_HEADER;
-import static com.googlesource.gerrit.modules.cache.chroniclemap.AutoAdjustCaches.TUNED_INFIX;
+import static com.googlesource.gerrit.modules.cache.chroniclemap.AutoAdjustCachesCommand.CONFIG_HEADER;
+import static com.googlesource.gerrit.modules.cache.chroniclemap.AutoAdjustCachesCommand.TUNED_INFIX;
 import static com.googlesource.gerrit.modules.cache.chroniclemap.ChronicleMapCacheConfig.Defaults.maxBloatFactorFor;
 import static com.googlesource.gerrit.modules.cache.chroniclemap.ChronicleMapCacheConfig.Defaults.maxEntriesFor;
 
+import com.google.common.base.Joiner;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
 import com.google.common.collect.ImmutableList;
 import com.google.gerrit.acceptance.LightweightPluginDaemonTest;
+import com.google.gerrit.acceptance.RestResponse;
 import com.google.gerrit.acceptance.Sandboxed;
 import com.google.gerrit.acceptance.TestPlugin;
 import com.google.gerrit.acceptance.UseLocalDisk;
 import com.google.gerrit.acceptance.UseSsh;
+import com.google.gerrit.acceptance.config.GerritConfig;
+import com.google.gerrit.common.Nullable;
+import com.google.gerrit.server.ModuleImpl;
+import com.google.gerrit.server.cache.CacheModule;
 import com.google.gerrit.server.config.SitePaths;
 import com.google.inject.Inject;
+import com.google.inject.name.Named;
 import java.io.File;
+import java.nio.file.Path;
 import java.util.Objects;
 import java.util.Set;
+import java.util.function.Function;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import org.eclipse.jgit.errors.ConfigInvalidException;
@@ -42,33 +53,65 @@
 @UseSsh
 @TestPlugin(
     name = "cache-chroniclemap",
-    sshModule = "com.googlesource.gerrit.modules.cache.chroniclemap.SSHCommandModule")
+    sshModule = "com.googlesource.gerrit.modules.cache.chroniclemap.SSHCommandModule",
+    httpModule = "com.googlesource.gerrit.modules.cache.chroniclemap.HttpModule")
 public class AutoAdjustCachesIT extends LightweightPluginDaemonTest {
-  private static final String cmd = "cache-chroniclemap auto-adjust-caches";
+  private static final String SSH_CMD = "cache-chroniclemap auto-adjust-caches";
+  private static final String REST_CMD = "/plugins/cache-chroniclemap/auto-adjust-caches";
   private static final String GROUPS_BYUUID_PERSISTED = "groups_byuuid_persisted";
   private static final String DIFF = "diff";
   private static final String DIFF_SUMMARY = "diff_summary";
   private static final String ACCOUNTS = "accounts";
   private static final String PERSISTED_PROJECTS = "persisted_projects";
+  private static final String TEST_CACHE_NAME = "test_cache";
+  private static final int TEST_CACHE_VERSION = 1;
+  private static final String TEST_CACHE_FILENAME_TUNED =
+      TEST_CACHE_NAME + "_" + TEST_CACHE_VERSION + AutoAdjustCaches.TUNED_INFIX;
+  private static final String TEST_CACHE_KEY_100_CHARS = new String(new char[100]);
+  private static final Function<String, Boolean> MATCH_ALL = (n) -> true;
 
   private static final ImmutableList<String> EXPECTED_CACHES =
       ImmutableList.of(GROUPS_BYUUID_PERSISTED, DIFF, DIFF_SUMMARY, ACCOUNTS, PERSISTED_PROJECTS);
 
   @Inject private SitePaths sitePaths;
 
+  @Inject
+  @Named(TEST_CACHE_NAME)
+  LoadingCache<String, String> testCache;
+
+  @ModuleImpl(name = CacheModule.PERSISTENT_MODULE)
+  public static class TestPersistentCacheModule extends CacheModule {
+
+    @Override
+    protected void configure() {
+      persist(TEST_CACHE_NAME, String.class, String.class)
+          .loader(TestCacheLoader.class)
+          .version(TEST_CACHE_VERSION);
+      install(new ChronicleMapCacheModule());
+    }
+  }
+
+  public static class TestCacheLoader extends CacheLoader<String, String> {
+
+    @Override
+    public String load(String key) throws Exception {
+      return key;
+    }
+  }
+
   @Override
   public com.google.inject.Module createModule() {
-    return new ChronicleMapCacheModule();
+    return new TestPersistentCacheModule();
   }
 
   @Test
   public void shouldUseDefaultsWhenCachesAreNotConfigured() throws Exception {
     createChange();
 
-    String result = adminSshSession.exec(cmd);
+    String result = adminSshSession.exec(SSH_CMD);
 
     adminSshSession.assertSuccess();
-    Config configResult = configResult(result);
+    Config configResult = configResult(result, CONFIG_HEADER);
 
     for (String cache : EXPECTED_CACHES) {
       assertThat(configResult.getLong("cache", cache, "maxEntries", 0))
@@ -82,32 +125,77 @@
   public void shouldCreateNewCacheFiles() throws Exception {
     createChange();
 
-    adminSshSession.exec(cmd);
+    adminSshSession.exec(SSH_CMD);
 
     adminSshSession.assertSuccess();
-    File cacheDir = sitePaths.resolve(cfg.getString("cache", null, "directory")).toFile();
     Set<String> tunedCaches =
-        Stream.of(Objects.requireNonNull(cacheDir.listFiles()))
-            .filter(file -> !file.isDirectory())
-            .map(File::getName)
-            .filter(
-                n ->
-                    n.contains(TUNED_INFIX)
-                        && n.matches(".*(" + String.join("|", EXPECTED_CACHES) + ").*"))
-            .collect(Collectors.toSet());
+        tunedFileNamesSet(n -> n.matches(".*(" + String.join("|", EXPECTED_CACHES) + ").*"));
 
     assertThat(tunedCaches.size()).isEqualTo(EXPECTED_CACHES.size());
   }
 
   @Test
-  public void shouldDenyAccessToCreateNewCacheFiles() throws Exception {
-    userSshSession.exec(cmd);
+  @GerritConfig(name = "cache.test_cache.avgKeySize", value = "207")
+  @GerritConfig(name = "cache.test_cache.avgValueSize", value = "207")
+  public void shouldNotRecreateTestCacheFileWhenAlreadyTuned() throws Exception {
+    testCache.get(TEST_CACHE_KEY_100_CHARS);
+
+    String tuneResult = adminSshSession.exec(SSH_CMD);
+    adminSshSession.assertSuccess();
+
+    assertThat(configResult(tuneResult, CONFIG_HEADER).getSubsections("cache"))
+        .doesNotContain(TEST_CACHE_NAME);
+    assertThat(Joiner.on('\n').join(tunedFileNamesSet(MATCH_ALL)))
+        .doesNotContain(TEST_CACHE_FILENAME_TUNED);
+  }
+
+  @Test
+  public void shouldCreateTestCacheTuned() throws Exception {
+    testCache.get(TEST_CACHE_KEY_100_CHARS);
+    String tuneResult = adminSshSession.exec(SSH_CMD);
+    adminSshSession.assertSuccess();
+
+    assertThat(configResult(tuneResult, CONFIG_HEADER).getSubsections("cache"))
+        .contains(TEST_CACHE_NAME);
+    assertThat(
+            Joiner.on('\n').join(tunedFileNamesSet((n) -> n.contains(TEST_CACHE_FILENAME_TUNED))))
+        .isNotEmpty();
+  }
+
+  @Test
+  public void shouldDenyAccessOverSshToCreateNewCacheFiles() throws Exception {
+    userSshSession.exec(SSH_CMD);
     userSshSession.assertFailure("not permitted");
   }
 
-  private Config configResult(String result) throws ConfigInvalidException {
+  @Test
+  public void shouldDenyAccessOverRestToCreateNewCacheFiles() throws Exception {
+    userRestSession.put(REST_CMD).assertForbidden();
+  }
+
+  @Test
+  public void shouldAllowTuningOverRestForAdmin() throws Exception {
+    RestResponse resp = adminRestSession.put(REST_CMD);
+
+    resp.assertCreated();
+
+    assertThat(configResult(resp.getEntityContent(), null).getSubsections("cache")).isNotEmpty();
+    assertThat(tunedFileNamesSet(MATCH_ALL)).isNotEmpty();
+  }
+
+  private Config configResult(String result, @Nullable String configHeader)
+      throws ConfigInvalidException {
     Config configResult = new Config();
-    configResult.fromText((result.split(CONFIG_HEADER))[1]);
+    configResult.fromText(configHeader == null ? result : result.split(configHeader)[1]);
     return configResult;
   }
+
+  private Set<String> tunedFileNamesSet(Function<String, Boolean> fileNameFilter) {
+    Path cachePath = sitePaths.resolve(cfg.getString("cache", null, "directory"));
+    return Stream.of(Objects.requireNonNull(cachePath.toFile().listFiles()))
+        .filter(file -> !file.isDirectory())
+        .map(File::getName)
+        .filter(n -> n.contains(TUNED_INFIX) && fileNameFilter.apply(n))
+        .collect(Collectors.toSet());
+  }
 }
diff --git a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesLocalDiskIT.java b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesLocalDiskIT.java
index ec025ed..628f6a0 100644
--- a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesLocalDiskIT.java
+++ b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesLocalDiskIT.java
@@ -65,7 +65,7 @@
     httpModule = "com.googlesource.gerrit.modules.cache.chroniclemap.HttpModule")
 @UseLocalDisk
 public class MigrateH2CachesLocalDiskIT extends LightweightPluginDaemonTest {
-  private final Duration LOAD_CACHE_WAIT_TIMEOUT = Duration.ofSeconds(4);
+  private final Duration LOAD_CACHE_WAIT_TIMEOUT = Duration.ofSeconds(60);
   private String ACCOUNTS_CACHE_NAME = "accounts";
   private String PERSISTED_PROJECTS_CACHE_NAME = "persisted_projects";
   private String MIGRATION_ENDPOINT = "/plugins/cache-chroniclemap/migrate";