Enforce a maximum PER_THREAD_CACHE_SIZE in PerThreadCache
When calling operations that create a lot of ProjectControls (e.g.
ListProjects) we observe OOMs in our Gerrit jobs from time to time. This
commit puts a max. limit on the cache size to prevent this from
happening.
The approach implemented is not an LRU. On purpose, we want to keep
PerThreadCache simple and don't add a dependency on e.g. Guava's LRU.
Change-Id: Ic2f6e11719b52ee2112f72698e887cbf8ca254d0
diff --git a/java/com/google/gerrit/server/cache/PerThreadCache.java b/java/com/google/gerrit/server/cache/PerThreadCache.java
index 32dff38..0881d59 100644
--- a/java/com/google/gerrit/server/cache/PerThreadCache.java
+++ b/java/com/google/gerrit/server/cache/PerThreadCache.java
@@ -43,9 +43,19 @@
* <p>Lastly, this class offers a cache, that requires callers to also provide a {@code Supplier} in
* case the object is not present in the cache, while {@code CurrentUser} provides a storage where
* just retrieving stored values is a valid operation.
+ *
+ * <p>To prevent OOM errors on requests that would cache a lot of objects, this class enforces an
+ * internal limit after which no new elements are cached. All {@code get} calls are served by
+ * invoking the {@code Supplier} after that.
*/
public class PerThreadCache implements AutoCloseable {
private static final ThreadLocal<PerThreadCache> CACHE = new ThreadLocal<>();
+ /**
+ * Cache at maximum 50 values per thread. This value was chosen arbitrarily. Some endpoints (like
+ * ListProjects) break the assumption that the data cached in a request is limited. To prevent
+ * this class from accumulating an unbound number of objects, we enforce this limit.
+ */
+ private static final int PER_THREAD_CACHE_SIZE = 50;
/**
* Unique key for key-value mappings stored in PerThreadCache. The key is based on the value's
@@ -109,7 +119,7 @@
return cache != null ? cache.get(key, loader) : loader.get();
}
- private final Map<Key<?>, Object> cache = Maps.newHashMapWithExpectedSize(10);
+ private final Map<Key<?>, Object> cache = Maps.newHashMapWithExpectedSize(PER_THREAD_CACHE_SIZE);
private PerThreadCache() {}
@@ -122,7 +132,9 @@
T value = (T) cache.get(key);
if (value == null) {
value = loader.get();
- cache.put(key, value);
+ if (cache.size() < PER_THREAD_CACHE_SIZE) {
+ cache.put(key, value);
+ }
}
return value;
}
diff --git a/javatests/com/google/gerrit/server/cache/PerThreadCacheTest.java b/javatests/com/google/gerrit/server/cache/PerThreadCacheTest.java
index ae5e911..cfb5f3f 100644
--- a/javatests/com/google/gerrit/server/cache/PerThreadCacheTest.java
+++ b/javatests/com/google/gerrit/server/cache/PerThreadCacheTest.java
@@ -83,4 +83,20 @@
PerThreadCache.create();
}
}
+
+ @Test
+ public void enforceMaxSize() {
+ try (PerThreadCache cache = PerThreadCache.create()) {
+ // Fill the cache
+ for (int i = 0; i < 50; i++) {
+ PerThreadCache.Key<String> key = PerThreadCache.Key.create(String.class, i);
+ cache.get(key, () -> "cached value");
+ }
+ // Assert that the value was not persisted
+ PerThreadCache.Key<String> key = PerThreadCache.Key.create(String.class, 1000);
+ cache.get(key, () -> "new value");
+ String value = cache.get(key, () -> "directly served");
+ assertThat(value).isEqualTo("directly served");
+ }
+ }
}