Expose auto-adjust-cache command as Servlet

Allow Gerrit admins to easily automate the auto-adjust-cache
command by using a REST-API, easily supported in configuration
tools such as Ansible.

The REST-API returns 201 when one or more caches have been
tuned and created on the filesystem, otherwise returns 204
as indication that no changes are needed.

Change-Id: I9756c82baac2ad367e9259ffb169fe15eba12e19
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCaches.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCaches.java
index c85dc43..d10ffc8 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCaches.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCaches.java
@@ -17,11 +17,13 @@
 
 import com.google.common.cache.Cache;
 import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.common.Nullable;
 import com.google.gerrit.extensions.registration.DynamicMap;
+import com.google.gerrit.extensions.restapi.AuthException;
 import com.google.gerrit.metrics.DisabledMetricMaker;
 import com.google.gerrit.server.config.GerritServerConfig;
 import com.google.gerrit.server.config.SitePaths;
-import com.google.gerrit.sshd.SshCommand;
+import com.google.gerrit.server.permissions.PermissionBackendException;
 import com.google.inject.Inject;
 import java.io.File;
 import java.io.IOException;
@@ -32,10 +34,10 @@
 import org.apache.commons.io.FilenameUtils;
 import org.apache.commons.lang3.tuple.ImmutablePair;
 import org.eclipse.jgit.lib.Config;
-import org.eclipse.jgit.lib.TextProgressMonitor;
-import org.kohsuke.args4j.Option;
+import org.eclipse.jgit.lib.NullProgressMonitor;
+import org.eclipse.jgit.lib.ProgressMonitor;
 
-public class AutoAdjustCaches extends SshCommand {
+public class AutoAdjustCaches {
   private static final FluentLogger logger = FluentLogger.forEnclosingClass();
   protected static final String CONFIG_HEADER = "__CONFIG__";
   protected static final String TUNED_INFIX = "_tuned_";
@@ -45,10 +47,6 @@
   private final Path cacheDir;
   private final AdministerCachePermission adminCachePermission;
 
-  @Option(
-      name = "--dry-run",
-      aliases = {"-d"},
-      usage = "Calculate the average key and value size, but do not migrate the data.")
   private boolean dryRun;
 
   @Inject
@@ -64,112 +62,111 @@
     this.adminCachePermission = adminCachePermission;
   }
 
-  @Override
-  protected void run() throws Exception {
-    adminCachePermission.checkCurrentUserAllowed(e -> stderr.println(e.getLocalizedMessage()));
+  public boolean isDryRun() {
+    return dryRun;
+  }
+
+  public void setDryRun(boolean dryRun) {
+    this.dryRun = dryRun;
+  }
+
+  protected Config run(@Nullable ProgressMonitor optionalProgressMonitor)
+      throws AuthException, PermissionBackendException, IOException {
+    ProgressMonitor progressMonitor =
+        optionalProgressMonitor == null ? NullProgressMonitor.INSTANCE : optionalProgressMonitor;
+    adminCachePermission.checkCurrentUserAllowed(null);
 
     Config outputChronicleMapConfig = new Config();
 
     Map<String, ChronicleMapCacheImpl<Object, Object>> chronicleMapCaches = getChronicleMapCaches();
 
-    chronicleMapCaches.forEach(
-        (cacheName, currCache) -> {
-          ImmutablePair<Long, Long> avgSizes = averageSizes(cacheName, currCache.getStore());
-          if (!(avgSizes.getKey() > 0) || !(avgSizes.getValue() > 0)) {
-            logger.atWarning().log(
-                "Cache [%s] has %s entries, but average of (key: %d, value: %d). Skipping.",
-                cacheName, currCache.size(), avgSizes.getKey(), avgSizes.getValue());
-            return;
-          }
+    for (Map.Entry<String, ChronicleMapCacheImpl<Object, Object>> cache :
+        chronicleMapCaches.entrySet()) {
+      String cacheName = cache.getKey();
+      ChronicleMapCacheImpl<Object, Object> currCache = cache.getValue();
 
-          long averageKeySize = avgSizes.getKey();
-          long averageValueSize = avgSizes.getValue();
+      {
+        ImmutablePair<Long, Long> avgSizes =
+            averageSizes(cacheName, currCache.getStore(), progressMonitor);
+        if (!(avgSizes.getKey() > 0) || !(avgSizes.getValue() > 0)) {
+          logger.atWarning().log(
+              "Cache [%s] has %s entries, but average of (key: %d, value: %d). Skipping.",
+              cacheName, currCache.size(), avgSizes.getKey(), avgSizes.getValue());
+          continue;
+        }
 
-          ChronicleMapCacheConfig currCacheConfig = currCache.getConfig();
+        long averageKeySize = avgSizes.getKey();
+        long averageValueSize = avgSizes.getValue();
 
-          if (currCacheConfig.getAverageKeySize() == averageKeySize
-              && currCacheConfig.getAverageValueSize() == averageValueSize) {
-            return;
-          }
+        ChronicleMapCacheConfig currCacheConfig = currCache.getConfig();
 
-          ChronicleMapCacheConfig newChronicleMapCacheConfig =
-              makeChronicleMapConfig(currCache.getConfig(), averageKeySize, averageValueSize);
+        if (currCacheConfig.getAverageKeySize() == averageKeySize
+            && currCacheConfig.getAverageValueSize() == averageValueSize) {
+          continue;
+        }
 
-          updateOutputConfig(
-              outputChronicleMapConfig,
-              cacheName,
-              averageKeySize,
-              averageValueSize,
-              currCache.getConfig().getMaxEntries(),
-              currCache.getConfig().getMaxBloatFactor());
+        ChronicleMapCacheConfig newChronicleMapCacheConfig =
+            makeChronicleMapConfig(currCache.getConfig(), averageKeySize, averageValueSize);
 
-          if (!dryRun) {
-            try {
-              ChronicleMapCacheImpl<Object, Object> newCache =
-                  new ChronicleMapCacheImpl<>(
-                      currCache.getCacheDefinition(),
-                      newChronicleMapCacheConfig,
-                      null,
-                      new DisabledMetricMaker());
+        updateOutputConfig(
+            outputChronicleMapConfig,
+            cacheName,
+            averageKeySize,
+            averageValueSize,
+            currCache.getConfig().getMaxEntries(),
+            currCache.getConfig().getMaxBloatFactor());
 
-              TextProgressMonitor cacheMigrationProgress = new TextProgressMonitor(stdout);
-              cacheMigrationProgress.beginTask(
-                  String.format("[%s] migrate content", cacheName), (int) currCache.size());
+        if (!dryRun) {
+          ChronicleMapCacheImpl<Object, Object> newCache =
+              new ChronicleMapCacheImpl<>(
+                  currCache.getCacheDefinition(),
+                  newChronicleMapCacheConfig,
+                  null,
+                  new DisabledMetricMaker());
 
-              currCache
-                  .getStore()
-                  .forEach(
-                      (k, v) -> {
-                        try {
-                          newCache.putUnchecked(k, v);
-                          cacheMigrationProgress.update(1);
-                        } catch (Exception e) {
-                          logger.atWarning().withCause(e).log(
-                              "[%s] Could not migrate entry %s -> %s",
-                              cacheName, k.getValue(), v.getValue());
-                        }
-                      });
+          progressMonitor.beginTask(
+              String.format("[%s] migrate content", cacheName), (int) currCache.size());
 
-            } catch (IOException e) {
-              stderr.println(String.format("Could not create new cache %s", cacheName));
-            }
-          }
-        });
+          currCache
+              .getStore()
+              .forEach(
+                  (k, v) -> {
+                    try {
+                      newCache.putUnchecked(k, v);
 
-    stdout.println();
-    stdout.println("**********************************");
-
-    if (outputChronicleMapConfig.getSections().isEmpty()) {
-      stdout.println("All exsting caches are already tuned: no changes needed.");
-      return;
+                      progressMonitor.update(1);
+                    } catch (Exception e) {
+                      logger.atWarning().withCause(e).log(
+                          "[%s] Could not migrate entry %s -> %s",
+                          cacheName, k.getValue(), v.getValue());
+                    }
+                  });
+        }
+      }
     }
 
-    stdout.println("** Chronicle-map config changes **");
-    stdout.println("**********************************");
-    stdout.println();
-    stdout.println(CONFIG_HEADER);
-    stdout.println(outputChronicleMapConfig.toText());
+    return outputChronicleMapConfig;
   }
 
   private ImmutablePair<Long, Long> averageSizes(
-      String cacheName, ConcurrentMap<KeyWrapper<Object>, TimedValue<Object>> store) {
+      String cacheName,
+      ConcurrentMap<KeyWrapper<Object>, TimedValue<Object>> store,
+      ProgressMonitor progressMonitor) {
     long kAvg = 0;
     long vAvg = 0;
 
     if (store.isEmpty()) return ImmutablePair.of(kAvg, vAvg);
 
-    TextProgressMonitor progress = new TextProgressMonitor(stdout);
-
-    progress.beginTask(
+    progressMonitor.beginTask(
         String.format("[%s] calculate average key/value size", cacheName), store.size());
 
     int i = 1;
     for (Map.Entry<KeyWrapper<Object>, TimedValue<Object>> entry : store.entrySet()) {
       kAvg = kAvg + (serializedKeyLength(cacheName, entry.getKey()) - kAvg) / i;
       vAvg = vAvg + (serializedValueLength(cacheName, entry.getValue()) - vAvg) / i;
-      progress.update(1);
+      progressMonitor.update(1);
     }
-    progress.endTask();
+    progressMonitor.endTask();
     return ImmutablePair.of(kAvg, vAvg);
   }
 
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesCommand.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesCommand.java
new file mode 100644
index 0000000..7aa240e
--- /dev/null
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesCommand.java
@@ -0,0 +1,72 @@
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package com.googlesource.gerrit.modules.cache.chroniclemap;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.extensions.restapi.AuthException;
+import com.google.gerrit.server.permissions.PermissionBackendException;
+import com.google.gerrit.sshd.SshCommand;
+import com.google.inject.Inject;
+import java.io.IOException;
+import org.eclipse.jgit.lib.Config;
+import org.eclipse.jgit.lib.TextProgressMonitor;
+import org.kohsuke.args4j.Option;
+
+public class AutoAdjustCachesCommand extends SshCommand {
+  private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+  protected static final String CONFIG_HEADER = "__CONFIG__";
+  protected static final String TUNED_INFIX = "_tuned_";
+
+  private final AutoAdjustCaches autoAdjustCachesEngine;
+
+  @Option(
+      name = "--dry-run",
+      aliases = {"-d"},
+      usage = "Calculate the average key and value size, but do not migrate the data.")
+  public void setDryRun(boolean dryRun) {
+    autoAdjustCachesEngine.setDryRun(dryRun);
+  }
+
+  @Inject
+  AutoAdjustCachesCommand(AutoAdjustCaches autoAdjustCachesEngine) {
+    this.autoAdjustCachesEngine = autoAdjustCachesEngine;
+  }
+
+  @Override
+  protected void run() throws Exception {
+    try {
+      Config outputChronicleMapConfig = autoAdjustCachesEngine.run(new TextProgressMonitor(stdout));
+
+      stdout.println();
+      stdout.println("**********************************");
+
+      if (outputChronicleMapConfig.getSections().isEmpty()) {
+        stdout.println("All exsting caches are already tuned: no changes needed.");
+        return;
+      }
+
+      stdout.println("** Chronicle-map config changes **");
+      stdout.println("**********************************");
+      stdout.println();
+      stdout.println(CONFIG_HEADER);
+      stdout.println(outputChronicleMapConfig.toText());
+    } catch (AuthException | PermissionBackendException e) {
+      stderr.println(e.getLocalizedMessage());
+      throw e;
+    } catch (IOException e) {
+      logger.atSevere().log("Could not create new cache", e);
+      stderr.println(String.format("Could not create new cache : %s", e.getLocalizedMessage()));
+    }
+  }
+}
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesServlet.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesServlet.java
new file mode 100644
index 0000000..47f0a4d
--- /dev/null
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesServlet.java
@@ -0,0 +1,98 @@
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.googlesource.gerrit.modules.cache.chroniclemap;
+
+import static org.apache.http.HttpHeaders.ACCEPT;
+import static org.eclipse.jgit.util.HttpSupport.TEXT_PLAIN;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.extensions.restapi.AuthException;
+import com.google.gerrit.server.permissions.PermissionBackendException;
+import com.google.inject.Inject;
+import com.google.inject.Provider;
+import com.google.inject.Singleton;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.Arrays;
+import java.util.Optional;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import org.eclipse.jgit.lib.Config;
+
+@Singleton
+public class AutoAdjustCachesServlet extends HttpServlet {
+  private static final long serialVersionUID = 1L;
+  protected static final FluentLogger logger = FluentLogger.forEnclosingClass();
+
+  // This needs to be a provider so that every doPut() call is reentrant because
+  // uses a different auto-adjuster for the caches.
+  private final Provider<AutoAdjustCaches> autoAdjustCachesProvider;
+
+  @Inject
+  AutoAdjustCachesServlet(Provider<AutoAdjustCaches> autoAdjustCachesProvider) {
+    this.autoAdjustCachesProvider = autoAdjustCachesProvider;
+  }
+
+  @Override
+  protected void doPut(HttpServletRequest req, HttpServletResponse rsp) throws IOException {
+    AutoAdjustCaches autoAdjustCachesEngine = autoAdjustCachesProvider.get();
+    if (hasInvalidAcceptHeader(req)) {
+      setResponse(
+          rsp,
+          HttpServletResponse.SC_BAD_REQUEST,
+          "No advertised 'Accept' headers can be honoured. 'text/plain' should be provided in the request 'Accept' header.");
+      return;
+    }
+
+    autoAdjustCachesEngine.setDryRun(
+        Optional.ofNullable(req.getParameter("dry-run"))
+            .or(() -> Optional.ofNullable(req.getParameter("d")))
+            .isPresent());
+
+    try {
+      Config outputChronicleMapConfig = autoAdjustCachesEngine.run(null);
+
+      if (outputChronicleMapConfig.getSections().isEmpty()) {
+        setResponse(
+            rsp,
+            HttpServletResponse.SC_NO_CONTENT,
+            "All existing caches are already tuned: no changes needed.");
+        return;
+      }
+
+      setResponse(rsp, HttpServletResponse.SC_CREATED, outputChronicleMapConfig.toText());
+    } catch (AuthException | PermissionBackendException e) {
+      setResponse(
+          rsp,
+          HttpServletResponse.SC_FORBIDDEN,
+          "not permitted to administer caches : " + e.getLocalizedMessage());
+      return;
+    }
+  }
+
+  private static void setResponse(HttpServletResponse httpResponse, int statusCode, String value)
+      throws IOException {
+    httpResponse.setContentType(TEXT_PLAIN);
+    httpResponse.setStatus(statusCode);
+    PrintWriter writer = httpResponse.getWriter();
+    writer.print(value);
+  }
+
+  private static boolean hasInvalidAcceptHeader(HttpServletRequest req) {
+    return req.getHeader(ACCEPT) != null
+        && !Arrays.asList("text/plain", "text/*", "*/*").contains(req.getHeader(ACCEPT));
+  }
+}
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheImpl.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheImpl.java
index ad18aa6..77d3887 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheImpl.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/ChronicleMapCacheImpl.java
@@ -266,8 +266,8 @@
    * Associates the specified value with the specified key. This method should be used when the
    * {@link TimedValue} and the {@link KeyWrapper} have already been constructed elsewhere rather
    * than delegate their construction to this cache ({@link #put}. This is typically the case when
-   * the key/value are extracted from another chronicle-map cache see ({@link AutoAdjustCaches} for
-   * an example.
+   * the key/value are extracted from another chronicle-map cache see ({@link
+   * AutoAdjustCachesCommand} for an example.
    *
    * @param wrappedKey The wrapper for the key object
    * @param wrappedValue the wrapper for the value object
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/HttpModule.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/HttpModule.java
index afc50ca..869b623 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/HttpModule.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/HttpModule.java
@@ -46,5 +46,6 @@
     }
 
     serve("/migrate").with(H2MigrationServlet.class);
+    serve("/auto-adjust-caches").with(AutoAdjustCachesServlet.class);
   }
 }
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/SSHCommandModule.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/SSHCommandModule.java
index 34e4954..abe1089 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/SSHCommandModule.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/SSHCommandModule.java
@@ -37,6 +37,6 @@
       factory(ChronicleMapCacheConfig.Factory.class);
     }
     command("analyze-h2-caches").to(AnalyzeH2Caches.class);
-    command("auto-adjust-caches").to(AutoAdjustCaches.class);
+    command("auto-adjust-caches").to(AutoAdjustCachesCommand.class);
   }
 }
diff --git a/src/main/resources/Documentation/tuning.md b/src/main/resources/Documentation/tuning.md
index 18f7eca..feb98fc 100644
--- a/src/main/resources/Documentation/tuning.md
+++ b/src/main/resources/Documentation/tuning.md
@@ -129,11 +129,11 @@
 provided for those caches, such as average key size and average value size, and
 you have to rely on default values.
 
-This plugin provides an SSH command that will help you analyze the current,
-suboptimal, chronicle-map caches and migrate into new ones for which a more
-realistic configuration is generated based on data.
+This plugin provides an SSH command and a REST-API that will help you analyze
+the current, suboptimal, chronicle-map caches and migrate into new ones for
+which a more realistic configuration is generated based on data.
 
-The Gerrit/SSH command to tuning the caches requires the user to have
+The tuning of the caches requires the user to have
  `Administrate Caches` or `Administrate Server` capabilities.
 
 * Symlink the `cache-chroniclemap.jar` file in the `plugins/` directory (from
@@ -151,7 +151,13 @@
 ssh -p 29418 admin@<gerrit-server> cache-chroniclemap auto-adjust-caches [--dry-run]
 ```
 
-* --dry-run (Optional)
+* You can also use the REST-API:
+
+```
+PUT /plugins/cache-chroniclemap/auto-adjust-caches
+```
+
+* `--dry-run` or `-d` (SSH), `?dry-run` or `?d` (REST-API) optional parameter
 
 Calculate the average key and value size, but do not migrate current cache
 data into new files
diff --git a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesIT.java b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesIT.java
index 7d63239..bb63a4a 100644
--- a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesIT.java
+++ b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/AutoAdjustCachesIT.java
@@ -15,8 +15,8 @@
 package com.googlesource.gerrit.modules.cache.chroniclemap;
 
 import static com.google.common.truth.Truth.assertThat;
-import static com.googlesource.gerrit.modules.cache.chroniclemap.AutoAdjustCaches.CONFIG_HEADER;
-import static com.googlesource.gerrit.modules.cache.chroniclemap.AutoAdjustCaches.TUNED_INFIX;
+import static com.googlesource.gerrit.modules.cache.chroniclemap.AutoAdjustCachesCommand.CONFIG_HEADER;
+import static com.googlesource.gerrit.modules.cache.chroniclemap.AutoAdjustCachesCommand.TUNED_INFIX;
 import static com.googlesource.gerrit.modules.cache.chroniclemap.ChronicleMapCacheConfig.Defaults.maxBloatFactorFor;
 import static com.googlesource.gerrit.modules.cache.chroniclemap.ChronicleMapCacheConfig.Defaults.maxEntriesFor;
 
@@ -25,11 +25,13 @@
 import com.google.common.cache.LoadingCache;
 import com.google.common.collect.ImmutableList;
 import com.google.gerrit.acceptance.LightweightPluginDaemonTest;
+import com.google.gerrit.acceptance.RestResponse;
 import com.google.gerrit.acceptance.Sandboxed;
 import com.google.gerrit.acceptance.TestPlugin;
 import com.google.gerrit.acceptance.UseLocalDisk;
 import com.google.gerrit.acceptance.UseSsh;
 import com.google.gerrit.acceptance.config.GerritConfig;
+import com.google.gerrit.common.Nullable;
 import com.google.gerrit.server.ModuleImpl;
 import com.google.gerrit.server.cache.CacheModule;
 import com.google.gerrit.server.config.SitePaths;
@@ -51,9 +53,11 @@
 @UseSsh
 @TestPlugin(
     name = "cache-chroniclemap",
-    sshModule = "com.googlesource.gerrit.modules.cache.chroniclemap.SSHCommandModule")
+    sshModule = "com.googlesource.gerrit.modules.cache.chroniclemap.SSHCommandModule",
+    httpModule = "com.googlesource.gerrit.modules.cache.chroniclemap.HttpModule")
 public class AutoAdjustCachesIT extends LightweightPluginDaemonTest {
-  private static final String CMD = "cache-chroniclemap auto-adjust-caches";
+  private static final String SSH_CMD = "cache-chroniclemap auto-adjust-caches";
+  private static final String REST_CMD = "/plugins/cache-chroniclemap/auto-adjust-caches";
   private static final String MERGEABILITY = "mergeability";
   private static final String DIFF = "diff";
   private static final String DIFF_SUMMARY = "diff_summary";
@@ -103,10 +107,10 @@
   public void shouldUseDefaultsWhenCachesAreNotConfigured() throws Exception {
     createChange();
 
-    String result = adminSshSession.exec(CMD);
+    String result = adminSshSession.exec(SSH_CMD);
 
     adminSshSession.assertSuccess();
-    Config configResult = configResult(result);
+    Config configResult = configResult(result, CONFIG_HEADER);
 
     for (String cache : EXPECTED_CACHES) {
       assertThat(configResult.getLong("cache", cache, "maxEntries", 0))
@@ -120,7 +124,7 @@
   public void shouldCreateNewCacheFiles() throws Exception {
     createChange();
 
-    adminSshSession.exec(CMD);
+    adminSshSession.exec(SSH_CMD);
 
     adminSshSession.assertSuccess();
     File cacheDir = sitePaths.resolve(cfg.getString("cache", null, "directory")).toFile();
@@ -143,10 +147,11 @@
   public void shouldNotRecreateTestCacheFileWhenAlreadyTuned() throws Exception {
     testCache.get(TEST_CACHE_KEY_100_CHARS);
 
-    String tuneResult = adminSshSession.exec(CMD);
+    String tuneResult = adminSshSession.exec(SSH_CMD);
     adminSshSession.assertSuccess();
 
-    assertThat(configResult(tuneResult).getSubsections("cache")).doesNotContain(TEST_CACHE_NAME);
+    assertThat(configResult(tuneResult, CONFIG_HEADER).getSubsections("cache"))
+        .doesNotContain(TEST_CACHE_NAME);
     assertThat(Joiner.on('\n').join(listTunedFileNames()))
         .doesNotContain(TEST_CACHE_FILENAME_TUNED);
   }
@@ -154,23 +159,39 @@
   @Test
   public void shouldCreateTestCacheTuned() throws Exception {
     testCache.get(TEST_CACHE_KEY_100_CHARS);
-
-    String tuneResult = adminSshSession.exec(CMD);
+    String tuneResult = adminSshSession.exec(SSH_CMD);
     adminSshSession.assertSuccess();
 
-    assertThat(configResult(tuneResult).getSubsections("cache")).contains(TEST_CACHE_NAME);
+    assertThat(configResult(tuneResult, CONFIG_HEADER).getSubsections("cache"))
+        .contains(TEST_CACHE_NAME);
     assertThat(Joiner.on('\n').join(listTunedFileNames())).contains(TEST_CACHE_FILENAME_TUNED);
   }
 
   @Test
-  public void shouldDenyAccessToCreateNewCacheFiles() throws Exception {
-    userSshSession.exec(CMD);
+  public void shouldDenyAccessOverSshToCreateNewCacheFiles() throws Exception {
+    userSshSession.exec(SSH_CMD);
     userSshSession.assertFailure("not permitted");
   }
 
-  private Config configResult(String result) throws ConfigInvalidException {
+  @Test
+  public void shouldDenyAccessOverRestToCreateNewCacheFiles() throws Exception {
+    userRestSession.put(REST_CMD).assertForbidden();
+  }
+
+  @Test
+  public void shouldAllowTuningOverRestForAdmin() throws Exception {
+    RestResponse resp = adminRestSession.put(REST_CMD);
+
+    resp.assertCreated();
+
+    assertThat(configResult(resp.getEntityContent(), null).getSubsections("cache")).isNotEmpty();
+    assertThat(listTunedFileNames()).isNotEmpty();
+  }
+
+  private Config configResult(String result, @Nullable String configHeader)
+      throws ConfigInvalidException {
     Config configResult = new Config();
-    configResult.fromText((result.split(CONFIG_HEADER))[1]);
+    configResult.fromText(configHeader == null ? result : result.split(configHeader)[1]);
     return configResult;
   }