Move migration command over HTTP

Some caches are bound only by the HTTP injector, so in order to migrate
all persistent caches, the migration command needs to run over HTTP to
have visibility on all caches.

Crucially, this allows to migrate web_sessions too, which is only
available over HTTP.

Bug: Issue 14086
Change-Id: I92942cc35b60a37921a3704176a65bace8d25018
diff --git a/BUILD b/BUILD
index 22dd6f0..619d3c7 100644
--- a/BUILD
+++ b/BUILD
@@ -12,11 +12,13 @@
     srcs = glob(["src/main/java/**/*.java"]),
     manifest_entries = [
         "Gerrit-SshModule: com.googlesource.gerrit.modules.cache.chroniclemap.SSHCommandModule",
+        "Gerrit-HttpModule: com.googlesource.gerrit.modules.cache.chroniclemap.HttpModule",
     ],
     resources = glob(["src/main/resources/**/*"]),
     deps = [
         "//lib:h2",
         "//lib/commons:io",
+        "//proto:cache_java_proto",
         "@chronicle-algo//jar",
         "@chronicle-bytes//jar",
         "@chronicle-core//jar",
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/H2MigrationServlet.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/H2MigrationServlet.java
new file mode 100644
index 0000000..3dc7ef8
--- /dev/null
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/H2MigrationServlet.java
@@ -0,0 +1,321 @@
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.googlesource.gerrit.modules.cache.chroniclemap;
+
+import static com.googlesource.gerrit.modules.cache.chroniclemap.H2CacheCommand.H2_SUFFIX;
+import static com.googlesource.gerrit.modules.cache.chroniclemap.H2CacheCommand.appendToConfig;
+import static com.googlesource.gerrit.modules.cache.chroniclemap.H2CacheCommand.getStats;
+import static com.googlesource.gerrit.modules.cache.chroniclemap.H2CacheCommand.jdbcUrl;
+import static org.apache.http.HttpHeaders.ACCEPT;
+import static org.eclipse.jgit.util.HttpSupport.TEXT_PLAIN;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.entities.Account;
+import com.google.gerrit.entities.CachedProjectConfig;
+import com.google.gerrit.extensions.auth.oauth.OAuthToken;
+import com.google.gerrit.extensions.client.ChangeKind;
+import com.google.gerrit.extensions.restapi.AuthException;
+import com.google.gerrit.extensions.restapi.RestApiException;
+import com.google.gerrit.httpd.WebSessionManager;
+import com.google.gerrit.metrics.DisabledMetricMaker;
+import com.google.gerrit.server.account.CachedAccountDetails;
+import com.google.gerrit.server.cache.PersistentCacheDef;
+import com.google.gerrit.server.cache.proto.Cache;
+import com.google.gerrit.server.change.ChangeKindCacheImpl;
+import com.google.gerrit.server.change.MergeabilityCacheImpl;
+import com.google.gerrit.server.config.GerritServerConfig;
+import com.google.gerrit.server.config.SitePaths;
+import com.google.gerrit.server.git.TagSetHolder;
+import com.google.gerrit.server.notedb.ChangeNotesCache;
+import com.google.gerrit.server.notedb.ChangeNotesState;
+import com.google.gerrit.server.patch.DiffSummary;
+import com.google.gerrit.server.patch.DiffSummaryKey;
+import com.google.gerrit.server.patch.IntraLineDiff;
+import com.google.gerrit.server.patch.IntraLineDiffKey;
+import com.google.gerrit.server.patch.PatchList;
+import com.google.gerrit.server.patch.PatchListKey;
+import com.google.gerrit.server.permissions.GlobalPermission;
+import com.google.gerrit.server.permissions.PermissionBackend;
+import com.google.gerrit.server.permissions.PermissionBackendException;
+import com.google.gerrit.server.query.change.ConflictKey;
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+import com.google.inject.TypeLiteral;
+import com.google.inject.name.Named;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.Timestamp;
+import java.util.Arrays;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import org.eclipse.jgit.lib.Config;
+import org.h2.Driver;
+
+@Singleton
+public class H2MigrationServlet extends HttpServlet {
+  private static final long serialVersionUID = 1L;
+  protected static final FluentLogger logger = FluentLogger.forEnclosingClass();
+
+  private final ChronicleMapCacheConfig.Factory configFactory;
+  private final SitePaths site;
+  private final Config gerritConfig;
+  private final PermissionBackend permissionBackend;
+
+  public static int DEFAULT_SIZE_MULTIPLIER = 3;
+  public static int DEFAULT_MAX_BLOAT_FACTOR = 3;
+
+  public static final String MAX_BLOAT_FACTOR_PARAM = "max-bloat-factor";
+  public static final String SIZE_MULTIPLIER_PARAM = "size-multiplier";
+
+  private final Set<PersistentCacheDef<?, ?>> persistentCacheDefs;
+
+  @Inject
+  H2MigrationServlet(
+      @GerritServerConfig Config cfg,
+      SitePaths site,
+      ChronicleMapCacheConfig.Factory configFactory,
+      PermissionBackend permissionBackend,
+      @Named("web_sessions") PersistentCacheDef<String, WebSessionManager.Val> webSessionsCacheDef,
+      @Named("accounts")
+          PersistentCacheDef<CachedAccountDetails.Key, CachedAccountDetails> accountsCacheDef,
+      @Named("oauth_tokens") PersistentCacheDef<Account.Id, OAuthToken> oauthTokenDef,
+      @Named("change_kind")
+          PersistentCacheDef<ChangeKindCacheImpl.Key, ChangeKind> changeKindCacheDef,
+      @Named("mergeability")
+          PersistentCacheDef<MergeabilityCacheImpl.EntryKey, Boolean> mergeabilityCacheDef,
+      @Named("pure_revert")
+          PersistentCacheDef<Cache.PureRevertKeyProto, Boolean> pureRevertCacheDef,
+      @Named("git_tags") PersistentCacheDef<String, TagSetHolder> gitTagsCacheDef,
+      @Named("change_notes")
+          PersistentCacheDef<ChangeNotesCache.Key, ChangeNotesState> changeNotesCacheDef,
+      @Named("diff") PersistentCacheDef<PatchListKey, PatchList> diffCacheDef,
+      @Named("diff_intraline")
+          PersistentCacheDef<IntraLineDiffKey, IntraLineDiff> diffIntraLineCacheDef,
+      @Named("diff_summary") PersistentCacheDef<DiffSummaryKey, DiffSummary> diffSummaryCacheDef,
+      @Named("persisted_projects")
+          PersistentCacheDef<Cache.ProjectCacheKeyProto, CachedProjectConfig>
+              persistedProjectsCacheDef,
+      @Named("conflicts") PersistentCacheDef<ConflictKey, Boolean> conflictsCacheDef) {
+    this.configFactory = configFactory;
+    this.site = site;
+    this.gerritConfig = cfg;
+    this.permissionBackend = permissionBackend;
+    this.persistentCacheDefs =
+        Stream.of(
+                webSessionsCacheDef,
+                accountsCacheDef,
+                oauthTokenDef,
+                changeKindCacheDef,
+                mergeabilityCacheDef,
+                pureRevertCacheDef,
+                gitTagsCacheDef,
+                changeNotesCacheDef,
+                diffCacheDef,
+                diffIntraLineCacheDef,
+                diffSummaryCacheDef,
+                persistedProjectsCacheDef,
+                conflictsCacheDef)
+            .collect(Collectors.toSet());
+  }
+
+  @Override
+  protected void doPut(HttpServletRequest req, HttpServletResponse rsp) throws IOException {
+    if (hasInvalidAcceptHeader(req)) {
+      setResponse(
+          rsp,
+          HttpServletResponse.SC_BAD_REQUEST,
+          "No advertised 'Accept' headers can be honoured. 'text/plain' should be provided in the request 'Accept' header.");
+      return;
+    }
+
+    try {
+      permissionBackend.currentUser().check(GlobalPermission.ADMINISTRATE_SERVER);
+    } catch (AuthException | PermissionBackendException e) {
+      setResponse(
+          rsp,
+          HttpServletResponse.SC_FORBIDDEN,
+          "administrateServer for plugin cache-chroniclemap not permitted");
+      return;
+    }
+    Optional<Path> cacheDir = getCacheDir();
+
+    int maxBloatFactor =
+        Optional.ofNullable(req.getParameter(MAX_BLOAT_FACTOR_PARAM))
+            .map(Integer::parseInt)
+            .orElse(DEFAULT_MAX_BLOAT_FACTOR);
+
+    int sizeMultiplier =
+        Optional.ofNullable(req.getParameter(SIZE_MULTIPLIER_PARAM))
+            .map(Integer::parseInt)
+            .orElse(DEFAULT_SIZE_MULTIPLIER);
+
+    if (!cacheDir.isPresent()) {
+      setResponse(
+          rsp,
+          HttpServletResponse.SC_BAD_REQUEST,
+          "Cannot run migration, cache directory is not configured");
+      return;
+    }
+
+    logger.atInfo().log("Migrating H2 caches to Chronicle-Map...");
+    logger.atInfo().log("* Size multiplier: " + sizeMultiplier);
+    logger.atInfo().log("* Max Bloat Factor: " + maxBloatFactor);
+
+    Config outputChronicleMapConfig = new Config();
+
+    try {
+      for (PersistentCacheDef<?, ?> in : persistentCacheDefs) {
+        Optional<Path> h2CacheFile = getH2CacheFile(cacheDir.get(), in.name());
+
+        if (h2CacheFile.isPresent()) {
+          H2AggregateData stats = getStats(h2CacheFile.get());
+
+          if (!stats.isEmpty()) {
+            ChronicleMapCacheImpl<?, ?> chronicleMapCache =
+                new ChronicleMapCacheImpl<>(
+                    in,
+                    makeChronicleMapConfig(
+                        configFactory, cacheDir.get(), in, stats, sizeMultiplier, maxBloatFactor),
+                    null,
+                    new DisabledMetricMaker());
+            doMigrate(h2CacheFile.get(), in, chronicleMapCache);
+            chronicleMapCache.close();
+            appendBloatedConfig(outputChronicleMapConfig, stats, maxBloatFactor, sizeMultiplier);
+          }
+        }
+      }
+    } catch (Exception e) {
+      logger.atSevere().withCause(e).log("H2 to chronicle-map migration failed");
+      setResponse(rsp, HttpServletResponse.SC_INTERNAL_SERVER_ERROR, e.getMessage());
+    }
+
+    logger.atInfo().log("Migration completed");
+    setResponse(rsp, HttpServletResponse.SC_OK, outputChronicleMapConfig.toText());
+  }
+
+  protected Optional<Path> getCacheDir() throws IOException {
+    String name = gerritConfig.getString("cache", null, "directory");
+    if (name == null) {
+      return Optional.empty();
+    }
+    Path loc = site.resolve(name);
+    if (!Files.exists(loc)) {
+      throw new IOException(
+          String.format("disk cache is configured but doesn't exist: %s", loc.toAbsolutePath()));
+    }
+    if (!Files.isReadable(loc)) {
+      throw new IOException(String.format("Can't read from disk cache: %s", loc.toAbsolutePath()));
+    }
+    logger.atFine().log("Enabling disk cache %s", loc.toAbsolutePath());
+    return Optional.of(loc);
+  }
+
+  private Optional<Path> getH2CacheFile(Path cacheDir, String name) {
+    Path h2CacheFile = cacheDir.resolve(String.format("%s.%s", name, H2_SUFFIX));
+    if (Files.exists(h2CacheFile)) {
+      return Optional.of(h2CacheFile);
+    }
+    return Optional.empty();
+  }
+
+  private void appendBloatedConfig(
+      Config config, H2AggregateData stats, int maxBloatFactor, int sizeMultiplier) {
+    appendToConfig(
+        config,
+        H2AggregateData.create(
+            stats.cacheName(),
+            stats.size() * sizeMultiplier,
+            stats.avgKeySize(),
+            stats.avgValueSize()));
+    config.setLong("cache", stats.cacheName(), "maxBloatFactor", maxBloatFactor);
+  }
+
+  protected static ChronicleMapCacheConfig makeChronicleMapConfig(
+      ChronicleMapCacheConfig.Factory configFactory,
+      Path cacheDir,
+      PersistentCacheDef<?, ?> in,
+      H2AggregateData stats,
+      int sizeMultiplier,
+      int maxBloatFactor) {
+    return configFactory.createWithValues(
+        in.configKey(),
+        ChronicleMapCacheFactory.fileName(cacheDir, in.name(), in.version()),
+        in.expireAfterWrite(),
+        in.refreshAfterWrite(),
+        stats.size() * sizeMultiplier,
+        stats.avgKeySize(),
+        stats.avgValueSize(),
+        maxBloatFactor);
+  }
+
+  private void doMigrate(
+      Path h2File, PersistentCacheDef<?, ?> in, ChronicleMapCacheImpl<?, ?> chronicleMapCache)
+      throws RestApiException {
+
+    String url = jdbcUrl(h2File);
+    try (Connection conn = Driver.load().connect(url, null)) {
+      PreparedStatement preparedStatement =
+          conn.prepareStatement("SELECT k, v, created FROM data WHERE version=?");
+      preparedStatement.setInt(1, in.version());
+
+      try (ResultSet r = preparedStatement.executeQuery()) {
+        while (r.next()) {
+          Object key =
+              isStringType(in.keyType())
+                  ? r.getString(1)
+                  : in.keySerializer().deserialize(r.getBytes(1));
+          Object value =
+              isStringType(in.valueType())
+                  ? r.getString(2)
+                  : in.valueSerializer().deserialize(r.getBytes(2));
+          Timestamp created = r.getTimestamp(3);
+          chronicleMapCache.putUnchecked(key, value, created);
+        }
+      }
+
+    } catch (Exception e) {
+      String message = String.format("FATAL: error migrating %s H2 cache", in.name());
+      logger.atSevere().withCause(e).log(message);
+      throw RestApiException.wrap(message, e);
+    }
+  }
+
+  private boolean isStringType(TypeLiteral<?> typeLiteral) {
+    return typeLiteral.getRawType().getSimpleName().equals("String");
+  }
+
+  private void setResponse(HttpServletResponse httpResponse, int statusCode, String value)
+      throws IOException {
+    httpResponse.setContentType(TEXT_PLAIN);
+    httpResponse.setStatus(statusCode);
+    PrintWriter writer = httpResponse.getWriter();
+    writer.print(value);
+  }
+
+  private boolean hasInvalidAcceptHeader(HttpServletRequest req) {
+    return req.getHeader(ACCEPT) != null
+        && !Arrays.asList("text/plain", "text/*", "*/*").contains(req.getHeader(ACCEPT));
+  }
+}
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/HttpModule.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/HttpModule.java
new file mode 100644
index 0000000..28956ff
--- /dev/null
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/HttpModule.java
@@ -0,0 +1,34 @@
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.googlesource.gerrit.modules.cache.chroniclemap;
+
+import com.google.gerrit.extensions.config.FactoryModule;
+import com.google.inject.servlet.ServletModule;
+
+public class HttpModule extends ServletModule {
+
+  @Override
+  protected void configureServlets() {
+    install(
+        new FactoryModule() {
+          @Override
+          protected void configure() {
+            factory(ChronicleMapCacheConfig.Factory.class);
+          }
+        });
+
+    serve("/migrate").with(H2MigrationServlet.class);
+  }
+}
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2Caches.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2Caches.java
deleted file mode 100644
index f3827e1..0000000
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2Caches.java
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright (C) 2021 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package com.googlesource.gerrit.modules.cache.chroniclemap;
-
-import static com.googlesource.gerrit.modules.cache.chroniclemap.H2CacheCommand.H2_SUFFIX;
-import static com.googlesource.gerrit.modules.cache.chroniclemap.H2CacheCommand.appendToConfig;
-import static com.googlesource.gerrit.modules.cache.chroniclemap.H2CacheCommand.getCacheDir;
-import static com.googlesource.gerrit.modules.cache.chroniclemap.H2CacheCommand.getStats;
-import static com.googlesource.gerrit.modules.cache.chroniclemap.H2CacheCommand.jdbcUrl;
-
-import com.google.common.flogger.FluentLogger;
-import com.google.gerrit.common.data.GlobalCapability;
-import com.google.gerrit.extensions.annotations.RequiresCapability;
-import com.google.gerrit.metrics.DisabledMetricMaker;
-import com.google.gerrit.server.cache.PersistentCacheDef;
-import com.google.gerrit.server.cache.serialize.CacheSerializer;
-import com.google.gerrit.server.cache.serialize.StringCacheSerializer;
-import com.google.gerrit.server.config.GerritServerConfig;
-import com.google.gerrit.server.config.SitePaths;
-import com.google.gerrit.sshd.CommandMetaData;
-import com.google.gerrit.sshd.SshCommand;
-import com.google.inject.Binding;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.Key;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Timestamp;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Set;
-import org.eclipse.jgit.lib.Config;
-import org.eclipse.jgit.lib.TextProgressMonitor;
-import org.h2.Driver;
-import org.kohsuke.args4j.Option;
-
-@RequiresCapability(GlobalCapability.ADMINISTRATE_SERVER)
-@CommandMetaData(name = "migrate-h2-caches", description = "Migrate H2 caches to Chronicle-Map")
-public class MigrateH2Caches extends SshCommand {
-  protected static final FluentLogger logger = FluentLogger.forEnclosingClass();
-  private final Injector injector;
-  private final ChronicleMapCacheConfig.Factory configFactory;
-
-  protected static int DEFAULT_SIZE_MULTIPLIER = 3;
-  protected static int DEFAULT_MAX_BLOAT_FACTOR = 3;
-  private final SitePaths site;
-  private final Config gerritConfig;
-
-  @Option(
-      name = "--size-multiplier",
-      aliases = {"-s"},
-      metaVar = "MULTIPLIER",
-      usage = "Multiplicative factor for the number of entries allowed in chronicle-map")
-  private int sizeMultiplier = DEFAULT_SIZE_MULTIPLIER;
-
-  @Option(
-      name = "--max-bloat-factor",
-      aliases = {"-m"},
-      metaVar = "FACTOR",
-      usage = "maximum number of times chronicle-map cache is allowed to grow in size")
-  private int maxBloatFactor = DEFAULT_MAX_BLOAT_FACTOR;
-
-  @Inject
-  MigrateH2Caches(
-      @GerritServerConfig Config cfg,
-      SitePaths site,
-      Injector injector,
-      ChronicleMapCacheConfig.Factory configFactory) {
-    this.injector = injector;
-    this.configFactory = configFactory;
-    this.site = site;
-    this.gerritConfig = cfg;
-  }
-
-  @Override
-  protected void run() throws Exception {
-    Optional<Path> cacheDir = getCacheDir(gerritConfig, site);
-
-    if (!cacheDir.isPresent()) {
-      throw die("Cannot run migration, cache directory is not configured");
-    }
-
-    stdout.println("Migrating H2 caches to Chronicle-Map...");
-    stdout.println("* Size multiplier: " + sizeMultiplier);
-    stdout.println("* Max Bloat Factor: " + maxBloatFactor);
-    Set<PersistentCacheDef<?, ?>> cacheDefs = getAllBoundPersistentCacheDefs();
-
-    Config outputChronicleMapConfig = new Config();
-
-    for (PersistentCacheDef<?, ?> in : cacheDefs) {
-      Optional<Path> h2CacheFile = getH2CacheFile(cacheDir.get(), in.name());
-
-      if (h2CacheFile.isPresent()) {
-        H2AggregateData stats = getStats(h2CacheFile.get());
-
-        if (!stats.isEmpty()) {
-          ChronicleMapCacheImpl<?, ?> chronicleMapCache =
-              new ChronicleMapCacheImpl<>(
-                  in,
-                  makeChronicleMapConfig(
-                      configFactory, cacheDir.get(), in, stats, sizeMultiplier, maxBloatFactor),
-                  null,
-                  new DisabledMetricMaker());
-          doMigrate(h2CacheFile.get(), in, chronicleMapCache, stats.size());
-          chronicleMapCache.close();
-          appendBloatedConfig(outputChronicleMapConfig, stats);
-        }
-      }
-    }
-    stdout.println("Complete!");
-    stdout.println();
-    stdout.println("****************************");
-    stdout.println("** Chronicle-map template **");
-    stdout.println("****************************");
-    stdout.println();
-    stdout.println(outputChronicleMapConfig.toText());
-  }
-
-  protected static ChronicleMapCacheConfig makeChronicleMapConfig(
-      ChronicleMapCacheConfig.Factory configFactory,
-      Path cacheDir,
-      PersistentCacheDef<?, ?> in,
-      H2AggregateData stats,
-      int sizeMultiplier,
-      int maxBloatFactor) {
-    return configFactory.createWithValues(
-        in.configKey(),
-        ChronicleMapCacheFactory.fileName(cacheDir, in.name(), in.version()),
-        in.expireAfterWrite(),
-        in.refreshAfterWrite(),
-        stats.size() * sizeMultiplier,
-        stats.avgKeySize(),
-        stats.avgValueSize(),
-        maxBloatFactor);
-  }
-
-  private void doMigrate(
-      Path h2File,
-      PersistentCacheDef<?, ?> in,
-      ChronicleMapCacheImpl<?, ?> chronicleMapCache,
-      long totalEntries)
-      throws UnloggedFailure {
-
-    TextProgressMonitor cacheProgress = new TextProgressMonitor(stdout);
-    cacheProgress.beginTask(String.format("[%s]", in.name()), (int) totalEntries);
-
-    String url = jdbcUrl(h2File);
-    try (Connection conn = Driver.load().connect(url, null)) {
-      PreparedStatement preparedStatement =
-          conn.prepareStatement("SELECT k, v, created FROM data WHERE version=?");
-      preparedStatement.setInt(1, in.version());
-
-      try (ResultSet r = preparedStatement.executeQuery()) {
-        while (r.next()) {
-          Object key = in.keySerializer().deserialize(getBytes(r, 1, in.keySerializer()));
-          Object value = in.valueSerializer().deserialize(getBytes(r, 2, in.valueSerializer()));
-          Timestamp created = r.getTimestamp(3);
-          chronicleMapCache.putUnchecked(key, value, created);
-          cacheProgress.update(1);
-        }
-      }
-
-    } catch (Exception e) {
-      String message = String.format("FATAL: error migrating %s H2 cache", in.name());
-      logger.atSevere().withCause(e).log(message);
-      stderr.println(message);
-      throw die(e);
-    }
-    cacheProgress.endTask();
-  }
-
-  private Set<PersistentCacheDef<?, ?>> getAllBoundPersistentCacheDefs() {
-    Set<PersistentCacheDef<?, ?>> cacheDefs = new HashSet<>();
-    for (Map.Entry<Key<?>, Binding<?>> entry : injector.getParent().getAllBindings().entrySet()) {
-      final Class<?> rawType = entry.getKey().getTypeLiteral().getRawType();
-      if ("PersistentCacheDef".equals(rawType.getSimpleName())) {
-        cacheDefs.add((PersistentCacheDef<?, ?>) entry.getValue().getProvider().get());
-      }
-    }
-    return cacheDefs;
-  }
-
-  private byte[] getBytes(ResultSet r, int columnIndex, CacheSerializer<?> serializer)
-      throws SQLException {
-    return (serializer instanceof StringCacheSerializer)
-        ? r.getString(columnIndex).getBytes()
-        : r.getBytes(columnIndex);
-  }
-
-  private Optional<Path> getH2CacheFile(Path cacheDir, String name) {
-    Path h2CacheFile = cacheDir.resolve(String.format("%s.%s", name, H2_SUFFIX));
-    if (Files.exists(h2CacheFile)) {
-      return Optional.of(h2CacheFile);
-    }
-    return Optional.empty();
-  }
-
-  private void appendBloatedConfig(Config config, H2AggregateData stats) {
-    appendToConfig(
-        config,
-        H2AggregateData.create(
-            stats.cacheName(),
-            stats.size() * sizeMultiplier,
-            stats.avgKeySize(),
-            stats.avgValueSize()));
-    config.setLong("cache", stats.cacheName(), "maxBloatFactor", maxBloatFactor);
-  }
-}
diff --git a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/SSHCommandModule.java b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/SSHCommandModule.java
index 81c2a62..e7e0074 100644
--- a/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/SSHCommandModule.java
+++ b/src/main/java/com/googlesource/gerrit/modules/cache/chroniclemap/SSHCommandModule.java
@@ -20,6 +20,5 @@
   protected void configureCommands() {
     factory(ChronicleMapCacheConfig.Factory.class);
     command("analyze-h2-caches").to(AnalyzeH2Caches.class);
-    command("migrate-h2-caches").to(MigrateH2Caches.class);
   }
 }
diff --git a/src/main/resources/Documentation/migration.md b/src/main/resources/Documentation/migration.md
index e182d85..22ebb2c 100644
--- a/src/main/resources/Documentation/migration.md
+++ b/src/main/resources/Documentation/migration.md
@@ -1,6 +1,6 @@
 ## Migration from H2 Caches
 
-This module provides an SSH command to help converting existing cache from H2 to
+This module provides a REST API to help converting existing cache from H2 to
 chronicle-map, which requires the `Administrate Server` capability to be
 executed.
 
@@ -12,6 +12,22 @@
 database, it will participate to the overall database load, so you should test
 accordingly.
 
+The following caches will be migrated (if they exist and contain any data):
+
+* accounts
+* change_kind
+* change_notes
+* conflicts
+* diff
+* diff_intraline
+* diff_summary
+* git_tags
+* mergeability
+* oauth_token
+* persisted_projects
+* pure_revert
+* web_sessions
+
 The migration should be performed as follows:
 
 * Copy `cache-chroniclemap.jar` file in the `plugins/` directory.
@@ -25,31 +41,16 @@
 * You can now run the migration
 
 ```bash
-ssh -p 29418 admin@<gerrit-server> cache-chroniclemap migrate-h2-caches \
-    [--max-bloat-factor FACTOR] \
-    [--size-multiplier MULTIPLIER]
+curl -v -XPUT -u <admin> '<gerrit>/a/plugins/cache-chroniclemap/migrate?[size-multiplier=FACTOR]&[bax-bloat-factor=MULTIPLIER]'
 ```
 
 This might require some time, depending on the size of the H2 caches and it will
 terminate with the output of the configuration that should be places in
 `etc/gerrit.config`in order to leverage the newly created caches correctly.
 
-For example:
+Output example:
 
-```Migrating H2 caches to Chronicle-Map...
-   * Size multiplier: 1
-   * Max Bloat Factor: 1
-   [diff]:                 100% (216/216)
-   [persisted_projects]:   100% (3/3)
-   [diff_summary]:         100% (216/216)
-   [accounts]:             100% (2/2)
-   [mergeability]:         100% (2444/2444)
-   Complete!
-
-   ****************************
-   ** Chronicle-map template **
-   ****************************
-
+```
    [cache "diff"]
    	maxEntries = 216
    	avgKeySize = 188
@@ -75,15 +76,20 @@
    	avgKeySize = 150
    	avgValueSize = 20
    	maxBloatFactor = 1
+   [cache "web_sessions"]
+   	maxEntries = 94852
+   	avgKeySize = 68
+   	avgValueSize = 382
+   	maxBloatFactor = 1
 ```
 
-Optionally the SSH command can receive the following additional arguments:
+Optionally the REST endpoint can receive the following additional arguments:
 
-* --max-bloat-factor (-m) FACTOR
+* max-bloat-factor=FACTOR
 
 maximum number of times chronicle-map cache is allowed to grow in size.
 *default:3*
 
-*  --size-multiplier (-s) MULTIPLIER
+* size-multiplier=MULTIPLIER
 Multiplicative factor for the number of entries allowed in chronicle-map.
 *default:3*
\ No newline at end of file
diff --git a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesIT.java b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesIT.java
index 0359a45..cc600b5 100644
--- a/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesIT.java
+++ b/src/test/java/com/googlesource/gerrit/modules/cache/chroniclemap/MigrateH2CachesIT.java
@@ -14,19 +14,23 @@
 
 package com.googlesource.gerrit.modules.cache.chroniclemap;
 
+import static com.google.common.net.HttpHeaders.CONTENT_TYPE;
 import static com.google.common.truth.Truth.assertThat;
 import static com.googlesource.gerrit.modules.cache.chroniclemap.H2CacheCommand.H2_SUFFIX;
-import static com.googlesource.gerrit.modules.cache.chroniclemap.MigrateH2Caches.DEFAULT_MAX_BLOAT_FACTOR;
-import static com.googlesource.gerrit.modules.cache.chroniclemap.MigrateH2Caches.DEFAULT_SIZE_MULTIPLIER;
+import static com.googlesource.gerrit.modules.cache.chroniclemap.H2MigrationServlet.DEFAULT_MAX_BLOAT_FACTOR;
+import static com.googlesource.gerrit.modules.cache.chroniclemap.H2MigrationServlet.DEFAULT_SIZE_MULTIPLIER;
+import static com.googlesource.gerrit.modules.cache.chroniclemap.H2MigrationServlet.MAX_BLOAT_FACTOR_PARAM;
+import static com.googlesource.gerrit.modules.cache.chroniclemap.H2MigrationServlet.SIZE_MULTIPLIER_PARAM;
+import static org.apache.http.HttpHeaders.ACCEPT;
+import static org.eclipse.jgit.util.HttpSupport.TEXT_PLAIN;
 
-import com.google.common.base.Joiner;
 import com.google.common.cache.CacheLoader;
 import com.google.common.cache.LoadingCache;
 import com.google.gerrit.acceptance.LightweightPluginDaemonTest;
-import com.google.gerrit.acceptance.Sandboxed;
+import com.google.gerrit.acceptance.RestResponse;
+import com.google.gerrit.acceptance.RestSession;
 import com.google.gerrit.acceptance.TestPlugin;
 import com.google.gerrit.acceptance.UseLocalDisk;
-import com.google.gerrit.acceptance.UseSsh;
 import com.google.gerrit.acceptance.WaitUtil;
 import com.google.gerrit.entities.CachedProjectConfig;
 import com.google.gerrit.entities.Project;
@@ -37,6 +41,7 @@
 import com.google.gerrit.server.cache.h2.H2CacheImpl;
 import com.google.gerrit.server.cache.proto.Cache;
 import com.google.gerrit.server.cache.serialize.ObjectIdConverter;
+import com.google.gerrit.server.config.GerritServerConfig;
 import com.google.gerrit.server.config.SitePaths;
 import com.google.gerrit.server.git.GitRepositoryManager;
 import com.google.inject.Binding;
@@ -47,78 +52,131 @@
 import java.nio.file.Path;
 import java.time.Duration;
 import java.util.Map;
+import org.apache.http.message.BasicHeader;
+import org.eclipse.jgit.lib.Config;
 import org.eclipse.jgit.lib.Repository;
 import org.junit.Before;
 import org.junit.Test;
 
-@Sandboxed
-@UseSsh
 @TestPlugin(
     name = "cache-chroniclemap",
-    sshModule = "com.googlesource.gerrit.modules.cache.chroniclemap.SSHCommandModule")
+    httpModule = "com.googlesource.gerrit.modules.cache.chroniclemap.HttpModule")
 public class MigrateH2CachesIT extends LightweightPluginDaemonTest {
   private final Duration LOAD_CACHE_WAIT_TIMEOUT = Duration.ofSeconds(4);
   private String ACCOUNTS_CACHE_NAME = "accounts";
   private String PERSISTED_PROJECTS_CACHE_NAME = "persisted_projects";
+  private String MIGRATION_ENDPOINT = "/plugins/cache-chroniclemap/migrate";
 
   @Inject protected GitRepositoryManager repoManager;
   @Inject private SitePaths sitePaths;
+  @Inject @GerritServerConfig Config cfg;
 
   private ChronicleMapCacheConfig.Factory chronicleMapCacheConfigFactory;
 
-  private String cmd = Joiner.on(" ").join("cache-chroniclemap", "migrate-h2-caches");
-
   @Before
   public void setUp() {
     chronicleMapCacheConfigFactory =
-        plugin.getSshInjector().getInstance(ChronicleMapCacheConfig.Factory.class);
+        plugin.getHttpInjector().getInstance(ChronicleMapCacheConfig.Factory.class);
   }
 
   @Test
   @UseLocalDisk
   public void shouldRunAndCompleteSuccessfullyWhenCacheDirectoryIsDefined() throws Exception {
-    String result = adminSshSession.exec(cmd);
-    adminSshSession.assertSuccess();
-    assertThat(result).contains("Complete");
+    runMigration(adminRestSession).assertOK();
   }
 
   @Test
   @UseLocalDisk
-  public void shouldOutputChronicleMapBloatedConfiguration() throws Exception {
+  public void shouldReturnTexPlain() throws Exception {
+    RestResponse result = runMigration(adminRestSession);
+    assertThat(result.getHeader(CONTENT_TYPE)).contains(TEXT_PLAIN);
+  }
+
+  @Test
+  @UseLocalDisk
+  public void shouldReturnBadRequestWhenTextPlainIsNotAnAcceptedHeader() throws Exception {
+    runMigrationWithAcceptHeader(adminRestSession, "application/json").assertBadRequest();
+  }
+
+  @Test
+  @UseLocalDisk
+  public void shouldReturnSuccessWhenAllTextContentsAreAccepted() throws Exception {
+    runMigrationWithAcceptHeader(adminRestSession, "text/*").assertOK();
+  }
+
+  @Test
+  @UseLocalDisk
+  public void shouldReturnSuccessWhenAllContentsAreAccepted() throws Exception {
+    runMigrationWithAcceptHeader(adminRestSession, "*/*").assertOK();
+  }
+
+  @Test
+  @UseLocalDisk
+  public void shouldOutputChronicleMapBloatedDefaultConfiguration() throws Exception {
     waitForCacheToLoad(ACCOUNTS_CACHE_NAME);
     waitForCacheToLoad(PERSISTED_PROJECTS_CACHE_NAME);
 
-    String result = adminSshSession.exec(cmd);
-    adminSshSession.assertSuccess();
+    RestResponse result = runMigration(adminRestSession);
+    result.assertOK();
 
-    assertThat(result)
-        .contains(
-            "[cache \""
-                + ACCOUNTS_CACHE_NAME
-                + "\"]\n"
-                + "\tmaxEntries = "
-                + H2CacheFor(ACCOUNTS_CACHE_NAME).diskStats().size() * DEFAULT_SIZE_MULTIPLIER);
+    Config configResult = new Config();
+    configResult.fromText(result.getEntityContent());
 
-    assertThat(result)
-        .contains(
-            "[cache \""
-                + PERSISTED_PROJECTS_CACHE_NAME
-                + "\"]\n"
-                + "\tmaxEntries = "
-                + H2CacheFor(PERSISTED_PROJECTS_CACHE_NAME).diskStats().size()
-                    * DEFAULT_SIZE_MULTIPLIER);
+    assertThat(configResult.getInt("cache", ACCOUNTS_CACHE_NAME, "maxEntries", 0))
+        .isEqualTo(H2CacheFor(ACCOUNTS_CACHE_NAME).diskStats().size() * DEFAULT_SIZE_MULTIPLIER);
+
+    assertThat(configResult.getInt("cache", ACCOUNTS_CACHE_NAME, "maxBloatFactor", 0))
+        .isEqualTo(DEFAULT_MAX_BLOAT_FACTOR);
+
+    assertThat(configResult.getInt("cache", PERSISTED_PROJECTS_CACHE_NAME, "maxEntries", 0))
+        .isEqualTo(
+            H2CacheFor(PERSISTED_PROJECTS_CACHE_NAME).diskStats().size() * DEFAULT_SIZE_MULTIPLIER);
+
+    assertThat(configResult.getInt("cache", PERSISTED_PROJECTS_CACHE_NAME, "maxBloatFactor", 0))
+        .isEqualTo(DEFAULT_MAX_BLOAT_FACTOR);
+  }
+
+  @Test
+  @UseLocalDisk
+  public void shouldOutputChronicleMapBloatedProvidedConfiguration() throws Exception {
+    waitForCacheToLoad(ACCOUNTS_CACHE_NAME);
+    waitForCacheToLoad(PERSISTED_PROJECTS_CACHE_NAME);
+
+    int sizeMultiplier = 2;
+    int maxBloatFactor = 3;
+    RestResponse result = runMigration(sizeMultiplier, maxBloatFactor);
+    result.assertOK();
+
+    Config configResult = new Config();
+    configResult.fromText(result.getEntityContent());
+
+    assertThat(configResult.getInt("cache", ACCOUNTS_CACHE_NAME, "maxEntries", 0))
+        .isEqualTo(H2CacheFor(ACCOUNTS_CACHE_NAME).diskStats().size() * sizeMultiplier);
+
+    assertThat(configResult.getInt("cache", ACCOUNTS_CACHE_NAME, "maxBloatFactor", 0))
+        .isEqualTo(maxBloatFactor);
+
+    assertThat(configResult.getInt("cache", PERSISTED_PROJECTS_CACHE_NAME, "maxEntries", 0))
+        .isEqualTo(H2CacheFor(PERSISTED_PROJECTS_CACHE_NAME).diskStats().size() * sizeMultiplier);
+
+    assertThat(configResult.getInt("cache", PERSISTED_PROJECTS_CACHE_NAME, "maxBloatFactor", 0))
+        .isEqualTo(maxBloatFactor);
   }
 
   @Test
   public void shouldFailWhenCacheDirectoryIsNotDefined() throws Exception {
-    adminSshSession.exec(cmd);
-    adminSshSession.assertFailure("fatal: Cannot run migration, cache directory is not configured");
+    RestResponse result = runMigration(adminRestSession);
+    result.assertBadRequest();
+    assertThat(result.getEntityContent())
+        .contains("Cannot run migration, cache directory is not configured");
   }
 
   @Test
   public void shouldFailWhenUserHasNoAdminServerCapability() throws Exception {
-    userSshSession.exec(cmd);
-    userSshSession.assertFailure("administrateServer for plugin cache-chroniclemap not permitted");
+    RestResponse result = runMigration(userRestSession);
+    result.assertForbidden();
+    assertThat(result.getEntityContent())
+        .contains("administrateServer for plugin cache-chroniclemap not permitted");
   }
 
   @Test
@@ -126,8 +184,7 @@
   public void shouldMigrateAccountsCache() throws Exception {
     waitForCacheToLoad(ACCOUNTS_CACHE_NAME);
 
-    adminSshSession.exec(cmd);
-    adminSshSession.assertSuccess();
+    runMigration(adminRestSession).assertOK();
 
     ChronicleMapCacheImpl<CachedAccountDetails.Key, CachedAccountDetails> chronicleMapCache =
         chronicleCacheFor(ACCOUNTS_CACHE_NAME);
@@ -142,8 +199,7 @@
   public void shouldMigratePersistentProjects() throws Exception {
     waitForCacheToLoad(PERSISTED_PROJECTS_CACHE_NAME);
 
-    adminSshSession.exec(cmd);
-    adminSshSession.assertSuccess();
+    runMigration(adminRestSession).assertOK();
 
     H2CacheImpl<Cache.ProjectCacheKeyProto, CachedProjectConfig> h2Cache =
         H2CacheFor(PERSISTED_PROJECTS_CACHE_NAME);
@@ -183,6 +239,26 @@
     return findClassBoundWithName(CacheLoader.class, named);
   }
 
+  private RestResponse runMigration(int sizeMultiplier, int maxBloatFactor) throws IOException {
+    return adminRestSession.put(
+        String.format(
+            "%s?%s=%d&%s=%d",
+            MIGRATION_ENDPOINT,
+            MAX_BLOAT_FACTOR_PARAM,
+            maxBloatFactor,
+            SIZE_MULTIPLIER_PARAM,
+            sizeMultiplier));
+  }
+
+  private RestResponse runMigration(RestSession restSession) throws IOException {
+    return runMigrationWithAcceptHeader(restSession, TEXT_PLAIN);
+  }
+
+  private RestResponse runMigrationWithAcceptHeader(RestSession restSession, String acceptHeader)
+      throws IOException {
+    return restSession.putWithHeader(MIGRATION_ENDPOINT, new BasicHeader(ACCEPT, acceptHeader));
+  }
+
   private <T> T findClassBoundWithName(Class<T> clazz, String named) {
     return plugin.getSysInjector().getAllBindings().entrySet().stream()
         .filter(entry -> isClassBoundWithName(entry, clazz.getSimpleName(), named))
@@ -205,7 +281,7 @@
 
     PersistentCacheDef<K, V> persistentDef = getPersistentCacheDef(cacheName);
     ChronicleMapCacheConfig config =
-        MigrateH2Caches.makeChronicleMapConfig(
+        H2MigrationServlet.makeChronicleMapConfig(
             chronicleMapCacheConfigFactory,
             cacheDirectory,
             persistentDef,