Define a skeleton NoSQL base for extension

This is a simple base that a specific NoSQL implementation can
build on top of as part of our public API.

Trivial implementations are supplied based upon a simple TreeMap in
the JVM, and persisting that map to disk.  These can be useful for
development and testing of applications built on top of the library.

Change-Id: Ida7fdfa4aca5db5cfbc1ca7b7bbec5af70b551fc
Signed-off-by: Shawn O. Pearce <sop@google.com>
diff --git a/src/main/java/com/google/gwtorm/nosql/AccessGen.java b/src/main/java/com/google/gwtorm/nosql/AccessGen.java
new file mode 100644
index 0000000..a225770
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/AccessGen.java
@@ -0,0 +1,583 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql;
+
+import com.google.gwtorm.client.Key;
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.ResultSet;
+import com.google.gwtorm.protobuf.CodecFactory;
+import com.google.gwtorm.protobuf.ProtobufCodec;
+import com.google.gwtorm.schema.ColumnModel;
+import com.google.gwtorm.schema.KeyModel;
+import com.google.gwtorm.schema.QueryModel;
+import com.google.gwtorm.schema.QueryParser;
+import com.google.gwtorm.schema.RelationModel;
+import com.google.gwtorm.schema.Util;
+import com.google.gwtorm.server.CodeGenSupport;
+import com.google.gwtorm.server.GeneratedClassLoader;
+
+import org.antlr.runtime.tree.Tree;
+import org.objectweb.asm.ClassWriter;
+import org.objectweb.asm.MethodVisitor;
+import org.objectweb.asm.Opcodes;
+import org.objectweb.asm.Type;
+
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+/** Generates a concrete implementation of a {@link NoSqlAccess} extension. */
+class AccessGen implements Opcodes {
+  private static final Type string = Type.getType(String.class);
+  private static final Type protobufCodec = Type.getType(ProtobufCodec.class);
+  private static final Type indexFunction = Type.getType(IndexFunction.class);
+  private static final Type object = Type.getType(Object.class);
+  private static final Type ormKey = Type.getType(Key.class);
+  private static final Type byteArray = Type.getType(byte[].class);
+  private static final Type ormException = Type.getType(OrmException.class);
+  private static final Type resultSet = Type.getType(ResultSet.class);
+  private static final Type indexKeyBuilder =
+      Type.getType(IndexKeyBuilder.class);
+
+  private static final String F_OBJECT_CODEC = "objectCodec";
+  private static final String F_KEY_INDEX = "keyIndex";
+  private static final String F_QUERY_INDEXES = "queryIndexes";
+
+  private final GeneratedClassLoader classLoader;
+  private final RelationModel model;
+  private final Class<?> modelClass;
+  private final Type schemaType;
+  private final Type accessType;
+  private final Type entityType;
+  private final KeyModel key;
+
+  private ClassWriter cw;
+  private String implClassName;
+  private String implTypeName;
+
+  AccessGen(final GeneratedClassLoader loader, final RelationModel rm,
+      final Class<? extends NoSqlSchema> schemaClazz,
+      final Class<? extends NoSqlAccess> accessClazz) throws OrmException {
+    classLoader = loader;
+    model = rm;
+
+    try {
+      modelClass =
+          Class.forName(model.getEntityTypeClassName(), true, classLoader);
+    } catch (ClassNotFoundException cnfe) {
+      throw new OrmException("Cannot locate model class", cnfe);
+    }
+
+    schemaType = Type.getType(schemaClazz);
+    accessType = Type.getType(accessClazz);
+    entityType = Type.getType(modelClass);
+
+    key = model.getPrimaryKey();
+    if (key == null) {
+      throw new OrmException("Relation " + rm.getMethodName()
+          + " has no primary key");
+    }
+  }
+
+  Class<?> create() throws OrmException {
+    init();
+    implementStaticFields();
+    implementConstructor();
+    implementGetString("getRelationName", model.getRelationName());
+    implementGetObjectCodec();
+    implementGetKeyIndex();
+    implementGetQueryIndexes();
+
+    implementPrimaryKey();
+    implementEncodeKey();
+    implementKeyQuery(key);
+
+    for (final QueryModel q : model.getQueries()) {
+      implementQuery(q);
+    }
+
+    cw.visitEnd();
+    classLoader.defineClass(implClassName, cw.toByteArray());
+
+    final Class<?> c = loadClass();
+    initObjectCodec(c);
+    initKeyIndex(c);
+    initQueryIndexes(c);
+    return c;
+  }
+
+  private void initObjectCodec(final Class<?> clazz) throws OrmException {
+    try {
+      final Field e = clazz.getDeclaredField(F_OBJECT_CODEC);
+      e.setAccessible(true);
+      e.set(null, CodecFactory.encoder(modelClass));
+    } catch (IllegalArgumentException err) {
+      throw new OrmException("Cannot setup ProtobufCodec", err);
+    } catch (IllegalStateException err) {
+      throw new OrmException("Cannot setup ProtobufCodec", err);
+    } catch (IllegalAccessException err) {
+      throw new OrmException("Cannot setup ProtobufCodec", err);
+    } catch (SecurityException err) {
+      throw new OrmException("Cannot setup ProtobufCodec", err);
+    } catch (NoSuchFieldException err) {
+      throw new OrmException("Cannot setup ProtobufCodec", err);
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  private void initKeyIndex(final Class<?> clazz) throws OrmException {
+    try {
+      final Field e = clazz.getDeclaredField(F_KEY_INDEX);
+      e.setAccessible(true);
+      e.set(null, new IndexFunctionGen(classLoader, toQueryModel(key),
+          modelClass).create());
+    } catch (IllegalArgumentException err) {
+      throw new OrmException("Cannot setup key IndexFunction", err);
+    } catch (IllegalStateException err) {
+      throw new OrmException("Cannot setup key IndexFunction", err);
+    } catch (IllegalAccessException err) {
+      throw new OrmException("Cannot setup key IndexFunction", err);
+    } catch (SecurityException err) {
+      throw new OrmException("Cannot setup key IndexFunction", err);
+    } catch (NoSuchFieldException err) {
+      throw new OrmException("Cannot setup key IndexFunction", err);
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  private void initQueryIndexes(final Class<?> clazz) throws OrmException {
+    final Collection<QueryModel> queries = model.getQueries();
+    final IndexFunction[] indexes = new IndexFunction[queries.size()];
+    int p = 0;
+    for (QueryModel m : queries) {
+      indexes[p++] = new IndexFunctionGen(classLoader, m, modelClass).create();
+    }
+
+    try {
+      final Field e = clazz.getDeclaredField(F_QUERY_INDEXES);
+      e.setAccessible(true);
+      e.set(null, indexes);
+    } catch (IllegalArgumentException err) {
+      throw new OrmException("Cannot setup query IndexFunctions", err);
+    } catch (IllegalStateException err) {
+      throw new OrmException("Cannot setup query IndexFunctions", err);
+    } catch (IllegalAccessException err) {
+      throw new OrmException("Cannot setup query IndexFunctions", err);
+    } catch (SecurityException err) {
+      throw new OrmException("Cannot setup query IndexFunctions", err);
+    } catch (NoSuchFieldException err) {
+      throw new OrmException("Cannot setup query IndexFunctions", err);
+    }
+  }
+
+  private Class<?> loadClass() throws OrmException {
+    try {
+      return Class.forName(implClassName, false, classLoader);
+    } catch (ClassNotFoundException err) {
+      throw new OrmException("Cannot load generated class", err);
+    }
+  }
+
+  private void init() {
+    implClassName =
+        model.getEntityTypeClassName() + "_Access_" + model.getMethodName()
+            + "_" + Util.createRandomName();
+    implTypeName = implClassName.replace('.', '/');
+
+    cw = new ClassWriter(ClassWriter.COMPUTE_MAXS);
+    cw.visit(V1_3, ACC_PUBLIC | ACC_FINAL | ACC_SUPER, implTypeName, null,
+        accessType.getInternalName(), new String[] {model
+            .getAccessInterfaceName().replace('.', '/')});
+  }
+
+  private void implementStaticFields() {
+    cw.visitField(ACC_PRIVATE | ACC_STATIC, F_OBJECT_CODEC,
+        protobufCodec.getDescriptor(), null, null).visitEnd();
+    cw.visitField(ACC_PRIVATE | ACC_STATIC, F_KEY_INDEX,
+        indexFunction.getDescriptor(), null, null).visitEnd();
+    cw.visitField(ACC_PRIVATE | ACC_STATIC, F_QUERY_INDEXES,
+        Type.getType(IndexFunction[].class).getDescriptor(), null, null)
+        .visitEnd();
+  }
+
+  private void implementConstructor() {
+    final String consName = "<init>";
+    final String consDesc =
+        Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {schemaType});
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC, consName, consDesc, null, null);
+    mv.visitCode();
+    mv.visitVarInsn(ALOAD, 0);
+    mv.visitVarInsn(ALOAD, 1);
+    mv.visitMethodInsn(INVOKESPECIAL, accessType.getInternalName(), consName,
+        consDesc);
+    mv.visitInsn(RETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementGetString(final String methodName,
+      final String returnValue) {
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, methodName, Type
+            .getMethodDescriptor(string, new Type[] {}), null, null);
+    mv.visitCode();
+    mv.visitLdcInsn(returnValue);
+    mv.visitInsn(ARETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementGetObjectCodec() {
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, "getObjectCodec", Type
+            .getMethodDescriptor(protobufCodec, new Type[] {}), null, null);
+    mv.visitCode();
+    mv.visitFieldInsn(GETSTATIC, implTypeName, F_OBJECT_CODEC, protobufCodec
+        .getDescriptor());
+    mv.visitInsn(ARETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementGetKeyIndex() {
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, "getKeyIndex", Type
+            .getMethodDescriptor(indexFunction, new Type[] {}), null, null);
+    mv.visitCode();
+    mv.visitFieldInsn(GETSTATIC, implTypeName, F_KEY_INDEX, indexFunction
+        .getDescriptor());
+    mv.visitInsn(ARETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementGetQueryIndexes() {
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, "getQueryIndexes", Type
+            .getMethodDescriptor(Type.getType(IndexFunction[].class),
+                new Type[] {}), null, null);
+    mv.visitCode();
+    mv.visitFieldInsn(GETSTATIC, implTypeName, F_QUERY_INDEXES, Type.getType(
+        IndexFunction[].class).getDescriptor());
+    mv.visitInsn(ARETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementPrimaryKey() {
+    final ColumnModel f = key.getField();
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, "primaryKey", Type
+            .getMethodDescriptor(ormKey, new Type[] {object}), null, null);
+    mv.visitCode();
+    mv.visitVarInsn(ALOAD, 1);
+    mv.visitTypeInsn(CHECKCAST, entityType.getInternalName());
+    mv.visitFieldInsn(GETFIELD, entityType.getInternalName(), f.getFieldName(),
+        CodeGenSupport.toType(f).getDescriptor());
+    mv.visitInsn(ARETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private QueryModel toQueryModel(KeyModel info) throws OrmException {
+    return new QueryModel(model, info.getName(), //
+        "WHERE " + info.getField().getFieldName() + "=?");
+  }
+
+  private void implementEncodeKey() throws OrmException {
+    final List<ColumnModel> pCols = Collections.singletonList(key.getField());
+    final Type argType = CodeGenSupport.toType(key.getField());
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, "encodeKey", Type
+            .getMethodDescriptor(Type.VOID_TYPE, new Type[] {indexKeyBuilder,
+                ormKey}), null, null);
+    mv.visitCode();
+
+    mv.visitVarInsn(ALOAD, 2);
+    mv.visitTypeInsn(CHECKCAST, argType.getInternalName());
+    mv.visitVarInsn(ASTORE, 2);
+
+    final QueryCGS cgs =
+        new QueryCGS(mv, new Type[] {argType}, pCols, new int[] {2}, 1);
+    for (ColumnModel f : pCols) {
+      IndexFunctionGen.encodeField(f, mv, cgs);
+    }
+
+    mv.visitInsn(RETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementKeyQuery(KeyModel key) {
+    final Type keyType = CodeGenSupport.toType(key.getField());
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, key.getName(), Type
+            .getMethodDescriptor(entityType, new Type[] {keyType}), null,
+            new String[] {Type.getType(OrmException.class).getInternalName()});
+    mv.visitCode();
+
+    mv.visitVarInsn(ALOAD, 0);
+    mv.visitVarInsn(ALOAD, 1);
+    mv.visitMethodInsn(INVOKESPECIAL, accessType.getInternalName(), "get", Type
+        .getMethodDescriptor(object, new Type[] {ormKey}));
+    mv.visitTypeInsn(CHECKCAST, entityType.getInternalName());
+
+    mv.visitInsn(ARETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementQuery(final QueryModel info) throws OrmException {
+    final List<ColumnModel> pCols = info.getParameters();
+    final boolean hasLimitParam = info.hasLimitParameter();
+    final Type[] pTypes = new Type[pCols.size() + (hasLimitParam ? 1 : 0)];
+    final int[] pVars = new int[pTypes.length];
+    int nextVar = 1;
+    for (int i = 0; i < pCols.size(); i++) {
+      pTypes[i] = CodeGenSupport.toType(pCols.get(i));
+      pVars[i] = nextVar;
+      nextVar += pTypes[i].getSize();
+    }
+    if (hasLimitParam) {
+      pTypes[pTypes.length - 1] = Type.INT_TYPE;
+      pVars[pTypes.length - 1] = nextVar;
+      nextVar += Type.INT_TYPE.getSize();
+    }
+
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, info.getName(), Type
+            .getMethodDescriptor(resultSet, pTypes), null,
+            new String[] {ormException.getInternalName()});
+    mv.visitCode();
+
+    final List<Tree> ops = compareOpsOnly(info.getParseTree());
+
+    // Generate fromKey
+    //
+    final int fromBuf = nextVar++;
+    mv.visitTypeInsn(NEW, indexKeyBuilder.getInternalName());
+    mv.visitInsn(DUP);
+    mv.visitMethodInsn(INVOKESPECIAL, indexKeyBuilder.getInternalName(),
+        "<init>", Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {}));
+    mv.visitVarInsn(ASTORE, fromBuf);
+
+    QueryCGS cgs = new QueryCGS(mv, pTypes, pCols, pVars, fromBuf);
+    encodeFields(info, ops, mv, cgs, true /* fromKey */);
+
+    // Generate toKey
+    //
+    final int toBuf = nextVar++;
+    mv.visitTypeInsn(NEW, indexKeyBuilder.getInternalName());
+    mv.visitInsn(DUP);
+    mv.visitMethodInsn(INVOKESPECIAL, indexKeyBuilder.getInternalName(),
+        "<init>", Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {}));
+    mv.visitVarInsn(ASTORE, toBuf);
+
+    cgs = new QueryCGS(mv, pTypes, pCols, pVars, toBuf);
+    encodeFields(info, ops, mv, cgs, false /* fromKey */);
+    cgs.infinity();
+
+    // Make the scan call
+    //
+    mv.visitVarInsn(ALOAD, 0);
+    mv.visitLdcInsn(info.getName());
+
+    mv.visitVarInsn(ALOAD, fromBuf);
+    mv.visitMethodInsn(INVOKEVIRTUAL, indexKeyBuilder.getInternalName(),
+        "toByteArray", Type.getMethodDescriptor(byteArray, new Type[] {}));
+
+    mv.visitVarInsn(ALOAD, toBuf);
+    mv.visitMethodInsn(INVOKEVIRTUAL, indexKeyBuilder.getInternalName(),
+        "toByteArray", Type.getMethodDescriptor(byteArray, new Type[] {}));
+
+    // Set the limit on the number of results.
+    //
+    if (info.hasLimit()) {
+      if (hasLimitParam) {
+        mv.visitVarInsn(ILOAD, pVars[pTypes.length - 1]);
+      } else {
+        cgs.push(info.getStaticLimit());
+      }
+    } else {
+      cgs.push(0);
+    }
+
+    mv.visitMethodInsn(INVOKEVIRTUAL, accessType.getInternalName(), "scan",
+        Type.getMethodDescriptor(resultSet, new Type[] {string, byteArray,
+            byteArray, Type.INT_TYPE}));
+    mv.visitInsn(ARETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void encodeFields(QueryModel qm, List<Tree> query, MethodVisitor mv,
+      QueryCGS cgs, boolean fromKey) throws OrmException {
+    final boolean toKey = !fromKey;
+    Tree lastNode = null;
+
+    for(Tree node : query) {
+      switch (node.getType()) {
+        case QueryParser.GE:
+          if (fromKey) {
+            checkLastNode(qm, lastNode);
+            encodeField(node, mv, cgs);
+            cgs.delimiter();
+            lastNode = node;
+          }
+          break;
+
+        case QueryParser.GT:
+          if (fromKey) {
+            checkLastNode(qm, lastNode);
+            encodeField(node, mv, cgs);
+            cgs.delimiter();
+            cgs.infinity();
+            lastNode = node;
+          }
+          break;
+
+        case QueryParser.EQ:
+          checkLastNode(qm, lastNode);
+          encodeField(node, mv, cgs);
+          cgs.delimiter();
+          break;
+
+        case QueryParser.LE:
+          if (toKey) {
+            checkLastNode(qm, lastNode);
+            encodeField(node, mv, cgs);
+            cgs.delimiter();
+            lastNode = node;
+          }
+          break;
+
+        case QueryParser.LT:
+          if (toKey) {
+            checkLastNode(qm, lastNode);
+            encodeField(node, mv, cgs);
+            cgs.delimiter();
+            cgs.nul();
+            lastNode = node;
+          }
+          break;
+
+        default:
+          throw new OrmException("Unsupported query token in "
+              + model.getMethodName() + "." + qm.getName() + ": "
+              + node.toStringTree());
+      }
+
+      cgs.nextParameter();
+    }
+  }
+
+  private void checkLastNode(QueryModel qm, Tree lastNode) throws OrmException {
+    if (lastNode != null) {
+      throw new OrmException(lastNode.getText() + " must be last operator in "
+          + model.getMethodName() + "." + qm.getName());
+    }
+  }
+
+  private void encodeField(Tree node, MethodVisitor mv, QueryCGS cgs)
+      throws OrmException {
+    ColumnModel f = ((QueryParser.Column) node.getChild(0)).getField();
+    IndexFunctionGen.encodeField(f, mv, cgs);
+  }
+
+  private List<Tree> compareOpsOnly(Tree node) throws OrmException {
+    if (node == null) {
+      return Collections.emptyList();
+    }
+
+    switch (node.getType()) {
+      case 0: // nil node used to join other nodes together
+      case QueryParser.WHERE:
+      case QueryParser.AND: {
+        List<Tree> res = new ArrayList<Tree>();
+        for (int i = 0; i < node.getChildCount(); i++) {
+          res.addAll(compareOpsOnly(node.getChild(i)));
+        }
+        return res;
+      }
+
+      case QueryParser.GT:
+      case QueryParser.GE:
+      case QueryParser.EQ:
+      case QueryParser.LE:
+      case QueryParser.LT: {
+        final Tree lhs = node.getChild(0);
+        final Tree rhs = node.getChild(1);
+        if (lhs.getType() != QueryParser.ID) {
+          throw new OrmException("Unsupported query token");
+        }
+        if (rhs.getType() == QueryParser.PLACEHOLDER) {
+          return Collections.singletonList(node);
+        }
+        break;
+      }
+
+      case QueryParser.ORDER:
+      case QueryParser.LIMIT:
+        break;
+
+      default:
+        throw new OrmException("Unsupported query token " + node.toStringTree());
+    }
+    return Collections.emptyList();
+  }
+
+  private final class QueryCGS extends IndexFunctionGen.EncodeCGS {
+    private final Type[] pTypes;
+    private final List<ColumnModel> pCols;
+    private final int[] pVars;
+    private final int bufvar;
+    private int currentp;
+
+    private QueryCGS(MethodVisitor method, Type[] pTypes,
+        List<ColumnModel> pCols, int[] pVars, int bufvar) {
+      super(method);
+      this.pTypes = pTypes;
+      this.pCols = pCols;
+      this.pVars = pVars;
+      this.bufvar = bufvar;
+    }
+
+    void nextParameter() {
+      currentp++;
+    }
+
+    @Override
+    void pushBuilder() {
+      mv.visitVarInsn(ALOAD, bufvar);
+    }
+
+    @Override
+    public void pushFieldValue() {
+      appendGetField(getFieldReference());
+    }
+
+    @Override
+    protected void appendGetField(final ColumnModel c) {
+      if (currentp < pTypes.length && pCols.get(currentp).equals(c)) {
+        loadVar(pTypes[currentp], pVars[currentp]);
+      } else {
+        super.appendGetField(c);
+      }
+    }
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/CounterShard.java b/src/main/java/com/google/gwtorm/nosql/CounterShard.java
new file mode 100644
index 0000000..4761bce
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/CounterShard.java
@@ -0,0 +1,88 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql;
+
+import com.google.gwtorm.client.Column;
+import com.google.gwtorm.protobuf.CodecFactory;
+import com.google.gwtorm.protobuf.ProtobufCodec;
+
+/**
+ * A single slice of an incrementing counter.
+ * <p>
+ * <b>This shard class is not thread safe.</b> Implementors using this type must
+ * perform synchronization through external mechanisms such as a row-level lock.
+ * <p>
+ * NoSQL implementations can use this object to store counters and keep track of
+ * their values within {@code nextLong(String)}. To improve allocation
+ * performance counters may be sliced into shards, with allocation coming out of
+ * a randomly selected shard, and each shard being replenished from a master
+ * shard when it {@link #isEmpty()}.
+ */
+public class CounterShard {
+  /** Standard encoder/decoder for this class. */
+  public static final ProtobufCodec<CounterShard> CODEC =
+      CodecFactory.encoder(CounterShard.class);
+
+  /** Current value in this shard, this is the next to assign out. */
+  @Column(id = 1)
+  protected long current;
+
+  /** Maximum value, the shard cannot hand out this value. */
+  @Column(id = 2)
+  protected long max;
+
+  protected CounterShard() {
+  }
+
+  /**
+   * Create a new shard with a specific starting value, with no maximum.
+   *
+   * @param next the first value this shard will hand out.
+   */
+  public CounterShard(long next) {
+    this(next, Long.MAX_VALUE);
+  }
+
+  /**
+   * Create a new shard with a specific starting point and maximum.
+   *
+   * @param next the first value this shard will hand out.
+   * @param max the highest value the shard will stop at. The shard will not
+   *        actually hand out this value.
+   */
+  public CounterShard(long next, long max) {
+    this.current = next;
+    this.max = max;
+  }
+
+  /** @return true if this shard cannot hand out any more values. */
+  public boolean isEmpty() {
+    return current == max;
+  }
+
+  /**
+   * Obtain the next value from this shard.
+   *
+   * @return the next value
+   * @throws IllegalStateException the shard {@link #isEmpty()} and cannot hand
+   *         out any more values.
+   */
+  public long next() {
+    if (isEmpty()) {
+      throw new IllegalStateException("Counter shard out of values");
+    }
+    return current++;
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/IndexFunction.java b/src/main/java/com/google/gwtorm/nosql/IndexFunction.java
index f13b050..4678500 100644
--- a/src/main/java/com/google/gwtorm/nosql/IndexFunction.java
+++ b/src/main/java/com/google/gwtorm/nosql/IndexFunction.java
@@ -18,11 +18,11 @@
  * A function to produce a NoSQL secondary index key from an object.
  * <p>
  * An index function computes a row key for a secondary index table by appending
- * the relevant values to builder's internal buffer in the order they are
+ * the relevant values to the builder's internal buffer in the order they are
  * referenced in the query.
  * <p>
- * Typically an IndexFunction would be code generated at runtime by
- * {@link IndexFunctionGen} as necessary by a NoSQL implementation.
+ * Typically an IndexFunction is automatically code generated at runtime by
+ * {@link IndexFunctionGen}.
  *
  * @param <T> type of the object the index record references.
  */
diff --git a/src/main/java/com/google/gwtorm/nosql/IndexFunctionGen.java b/src/main/java/com/google/gwtorm/nosql/IndexFunctionGen.java
index 206e238..15dfae4 100644
--- a/src/main/java/com/google/gwtorm/nosql/IndexFunctionGen.java
+++ b/src/main/java/com/google/gwtorm/nosql/IndexFunctionGen.java
@@ -54,8 +54,8 @@
   private String implClassName;
   private String implTypeName;
 
-  public IndexFunctionGen(final GeneratedClassLoader loader,
-      final QueryModel qm, final Class<T> t) {
+  IndexFunctionGen(final GeneratedClassLoader loader, final QueryModel qm,
+      final Class<T> t) {
     classLoader = loader;
     query = qm;
 
@@ -104,7 +104,7 @@
     return r;
   }
 
-  public IndexFunction<T> create() throws OrmException {
+  IndexFunction<T> create() throws OrmException {
     init();
     implementConstructor();
     implementGetName();
@@ -362,7 +362,7 @@
     mv.visitEnd();
   }
 
-  private static void encodeFields(final Collection<ColumnModel> myFields,
+  static void encodeFields(final Collection<ColumnModel> myFields,
       final MethodVisitor mv, final EncodeCGS cgs) throws OrmException {
     Iterator<ColumnModel> i = myFields.iterator();
     while (i.hasNext()) {
@@ -462,8 +462,8 @@
     }
   }
 
-  private static final class EncodeCGS extends CodeGenSupport {
-    private EncodeCGS(MethodVisitor method) {
+  static class EncodeCGS extends CodeGenSupport {
+    EncodeCGS(MethodVisitor method) {
       super(method);
     }
 
@@ -479,6 +479,12 @@
           "delimiter", Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {}));
     }
 
+    void nul() {
+      pushBuilder();
+      mv.visitMethodInsn(INVOKEVIRTUAL, indexKeyBuilder.getInternalName(),
+          "nul", Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {}));
+    }
+
     void pushBuilder() {
       mv.visitVarInsn(ALOAD, 1);
     }
diff --git a/src/main/java/com/google/gwtorm/nosql/IndexKeyBuilder.java b/src/main/java/com/google/gwtorm/nosql/IndexKeyBuilder.java
index a670840..8c6d9f4 100644
--- a/src/main/java/com/google/gwtorm/nosql/IndexKeyBuilder.java
+++ b/src/main/java/com/google/gwtorm/nosql/IndexKeyBuilder.java
@@ -70,6 +70,16 @@
   }
 
   /**
+   * Add \0 to the string.
+   * <p>
+   * \0 can be used during searches to enforce greater then or less then clauses
+   * in a query.
+   */
+  public void nul() {
+    buf.write(0x00);
+  }
+
+  /**
    * Add a raw sequence of bytes.
    * <p>
    * The bytes 0x00 and 0xff are escaped by this method according to the
@@ -156,6 +166,18 @@
   }
 
   /**
+   * Add a byte array as-is, without escaping.
+   * <p>
+   * This should only be used the byte array came from a prior index key and the
+   * caller is trying to create a new key with this key embedded at the end.
+   *
+   * @param bin the binary to append as-is, without further escaping.
+   */
+  public void addRaw(byte[] bin) {
+    buf.write(bin, 0, bin.length);
+  }
+
+  /**
    * Obtain a copy of the internal storage array.
    *
    * @return the current state of this, converted into a flat byte array.
diff --git a/src/main/java/com/google/gwtorm/nosql/IndexRow.java b/src/main/java/com/google/gwtorm/nosql/IndexRow.java
new file mode 100644
index 0000000..4986e3a
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/IndexRow.java
@@ -0,0 +1,83 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql;
+
+import com.google.gwtorm.client.Column;
+import com.google.gwtorm.protobuf.CodecFactory;
+import com.google.gwtorm.protobuf.ProtobufCodec;
+
+/**
+ * Data value stored in a NoSQL secondary index row.
+ * <p>
+ * Instances of this object can be used inside of the data portion of a
+ * secondary index row, and may either contain the key of the primary data row,
+ * or a copy of the primary object data.
+ * <p>
+ * The {@link #timestamp} field can be used to fossil collect secondary index
+ * rows that no longer match the primary data row and which are older than the
+ * longest expected transaction. These fossil rows may have occurred due to an
+ * aborted, but partially applied transaction.
+ */
+public class IndexRow {
+  /** Standard encoder/decoder for this class. */
+  public static final ProtobufCodec<IndexRow> CODEC =
+      CodecFactory.encoder(IndexRow.class);
+
+  /**
+   * Create an index row to reference the primary data row by key.
+   *
+   * @param update time of the update.
+   * @param key the key to reference.
+   * @return the new index row.
+   */
+  public static IndexRow forKey(long update, byte[] key) {
+    IndexRow r = new IndexRow();
+    r.timestamp = update;
+    r.dataKey = key;
+    return r;
+  }
+
+  /**
+   * Clock of the last time this index row was touched.
+   * <p>
+   * Invalid rows older than a certain time interval may be subject to automatic
+   * background pruning during data retrieval operations.
+   */
+  @Column(id = 1)
+  protected long timestamp;
+
+  /** Key within the same relation that holds the actual data. */
+  @Column(id = 2, notNull = false)
+  protected byte[] dataKey;
+
+  /** Stale copy of the data. */
+  @Column(id = 3, notNull = false)
+  protected byte[] dataCopy;
+
+  /** @return get the timestamp of the row. */
+  public long getTimestamp() {
+    return timestamp;
+  }
+
+  /** @return get the primary key data; or {@code null}. */
+  public byte[] getDataKey() {
+    return dataKey;
+  }
+
+  /** @return get the copy of the primary data; or {@code null}. */
+  public byte[] getDataCopy() {
+    return dataCopy;
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/NoSqlAccess.java b/src/main/java/com/google/gwtorm/nosql/NoSqlAccess.java
new file mode 100644
index 0000000..b52a65f
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/NoSqlAccess.java
@@ -0,0 +1,74 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql;
+
+import com.google.gwtorm.client.Access;
+import com.google.gwtorm.client.Key;
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.ResultSet;
+import com.google.gwtorm.client.impl.AbstractAccess;
+import com.google.gwtorm.protobuf.ProtobufCodec;
+
+/** Internal base class for implementations of {@link Access}. */
+public abstract class NoSqlAccess<T, K extends Key<?>> extends
+    AbstractAccess<T, K> {
+  protected NoSqlAccess(final NoSqlSchema s) {
+  }
+
+  /**
+   * Scan a range of keys and return any matching objects.
+   * <p>
+   * All NoSQL implementations must provide their own variant of this method.
+   * <p>
+   * To fetch a single record with a scan, set {@code toKey} to the same array
+   * as {@code fromKey}, but append a trailing NUL byte (0x00). The caller
+   * should validate that the returned ResultSet contains no more than 1 row.
+   *
+   * @param indexName name of the index the scan occurs over.
+   * @param fromKey key to start the scan on. This is inclusive.
+   * @param toKey key to stop the scan on. This is exclusive.
+   * @param limit maximum number of results to return.
+   * @return result set for the requested range. The result set may be lazily
+   *         filled, or filled completely.
+   * @throws OrmException an error occurred preventing the scan from completing.
+   */
+  protected abstract ResultSet<T> scan(String indexName, byte[] fromKey,
+      byte[] toKey, int limit) throws OrmException;
+
+  protected IndexFunction<T> getQueryIndex(String indexName)
+      throws OrmException {
+    for (IndexFunction<T> f : getQueryIndexes()) {
+      if (indexName.equals(f.getName())) {
+        return f;
+      }
+    }
+    if (indexName.equals(getKeyIndex().getName())) {
+      return getKeyIndex();
+    }
+    throw new OrmException("No index named " + indexName);
+  }
+
+  // -- These are all provided by AccessGen when builds a subclass --
+
+  protected abstract String getRelationName();
+
+  protected abstract ProtobufCodec<T> getObjectCodec();
+
+  protected abstract IndexFunction<T> getKeyIndex();
+
+  protected abstract IndexFunction<T>[] getQueryIndexes();
+
+  protected abstract void encodeKey(IndexKeyBuilder dst, K key);
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/NoSqlDatabase.java b/src/main/java/com/google/gwtorm/nosql/NoSqlDatabase.java
new file mode 100644
index 0000000..c426806
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/NoSqlDatabase.java
@@ -0,0 +1,109 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql;
+
+import com.google.gwtorm.client.KeyUtil;
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.Schema;
+import com.google.gwtorm.client.SchemaFactory;
+import com.google.gwtorm.schema.RelationModel;
+import com.google.gwtorm.schema.SchemaModel;
+import com.google.gwtorm.schema.java.JavaSchemaModel;
+import com.google.gwtorm.server.GeneratedClassLoader;
+import com.google.gwtorm.server.SchemaConstructorGen;
+import com.google.gwtorm.server.SchemaGen;
+import com.google.gwtorm.server.StandardKeyEncoder;
+
+/**
+ * Base class for NoSQL typed databases.
+ * <p>
+ * Applications should use the database class to create instances of their
+ * Schema extension interface, and thus open and connect to the data store.
+ * <p>
+ * Creating a new database instance is expensive, due to the type analysis and
+ * code generation performed to implement the Schema and Access interfaces.
+ * Applications should create and cache their database instance for the life of
+ * the application.
+ * <p>
+ * Database instances are thread-safe, but returned Schema instances are not.
+ * <p>
+ * This class must be further extended by the NoSQL implementation to configure
+ * the connectivity with the data store and supply the correct subclass of
+ * {@link NoSqlSchema} that knows how to interact with the data store.
+ *
+ * @param <T> type of the application's Schema.
+ * @param <S> type of the implementation's base for Schema implementations.
+ * @param <A> type of the implementation's base for Access implementations.
+ */
+public abstract class NoSqlDatabase<T extends Schema, S extends NoSqlSchema, A extends NoSqlAccess>
+    implements SchemaFactory<T> {
+  static {
+    KeyUtil.setEncoderImpl(new StandardKeyEncoder());
+  }
+
+  private final SchemaModel schemaModel;
+  private final SchemaFactory<T> implFactory;
+
+  /**
+   * Initialize a new database and generate the implementation.
+   *
+   * @param schemaBaseType class that the generated Schema implementation should
+   *        extend in order to provide data store connectivity.
+   * @param accessBaseType class that the generated Access implementations
+   *        should extend in order to provide single-relation access for each
+   *        schema instance.
+   * @param appSchema the application schema interface that must be implemented
+   *        and constructed on demand.
+   * @throws OrmException the schema cannot be created because of an annotation
+   *         error in the interface definitions.
+   */
+  protected NoSqlDatabase(final Class<S> schemaBaseType,
+      final Class<A> accessBaseType, final Class<T> appSchema)
+      throws OrmException {
+    schemaModel = new JavaSchemaModel(appSchema);
+    final GeneratedClassLoader loader = newLoader(appSchema);
+    final Class<T> impl = generate(schemaBaseType, accessBaseType, loader);
+    implFactory = new SchemaConstructorGen<T>(loader, impl, this).create();
+  }
+
+  @Override
+  public T open() throws OrmException {
+    return implFactory.open();
+  }
+
+  /** @return the derived model of the application's schema. */
+  protected SchemaModel getSchemaModel() {
+    return schemaModel;
+  }
+
+  @SuppressWarnings("unchecked")
+  private Class<T> generate(final Class<S> schemaBaseType,
+      final Class<A> accessBaseType, final GeneratedClassLoader loader)
+      throws OrmException {
+    return new SchemaGen(loader, schemaModel, getClass(), schemaBaseType,
+        new SchemaGen.AccessGenerator() {
+          @Override
+          public Class<?> create(GeneratedClassLoader loader, RelationModel rm)
+              throws OrmException {
+            return new AccessGen(loader, rm, schemaBaseType, accessBaseType)
+                .create();
+          }
+        }).create();
+  }
+
+  private static <T> GeneratedClassLoader newLoader(final Class<T> schema) {
+    return new GeneratedClassLoader(schema.getClassLoader());
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/NoSqlSchema.java b/src/main/java/com/google/gwtorm/nosql/NoSqlSchema.java
new file mode 100644
index 0000000..697a6c3
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/NoSqlSchema.java
@@ -0,0 +1,36 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql;
+
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.Schema;
+import com.google.gwtorm.client.StatementExecutor;
+import com.google.gwtorm.server.AbstractSchema;
+
+/** Internal base class for implementations of {@link Schema}. */
+public abstract class NoSqlSchema extends AbstractSchema {
+  protected NoSqlSchema(final NoSqlDatabase<?, ?, ?> d) {
+  }
+
+  @Override
+  public void pruneSchema(StatementExecutor e) throws OrmException {
+    // Assume no action is required in a default NoSQL environment.
+  }
+
+  @Override
+  public void updateSchema(StatementExecutor e) throws OrmException {
+    // Assume no action is required in a default NoSQL environment.
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/generic/GenericAccess.java b/src/main/java/com/google/gwtorm/nosql/generic/GenericAccess.java
new file mode 100644
index 0000000..7578d70
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/generic/GenericAccess.java
@@ -0,0 +1,353 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql.generic;
+
+import com.google.gwtorm.client.Access;
+import com.google.gwtorm.client.AtomicUpdate;
+import com.google.gwtorm.client.Key;
+import com.google.gwtorm.client.OrmDuplicateKeyException;
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.ResultSet;
+import com.google.gwtorm.client.impl.ListResultSet;
+import com.google.gwtorm.nosql.IndexFunction;
+import com.google.gwtorm.nosql.IndexKeyBuilder;
+import com.google.gwtorm.nosql.IndexRow;
+import com.google.gwtorm.nosql.NoSqlAccess;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.Map;
+
+/** Base implementation for {@link Access} in a {@link GenericDatabase}. */
+public abstract class GenericAccess<T, K extends Key<?>> extends
+    NoSqlAccess<T, K> {
+  private final GenericSchema db;
+
+  protected GenericAccess(final GenericSchema s) {
+    super(s);
+    db = s;
+  }
+
+  @Override
+  protected ResultSet<T> scan(String indexName, byte[] fromKey, byte[] toKey,
+      int limit) throws OrmException {
+    if (indexName.equals(getKeyIndex().getName())) {
+      return scanDataRow(fromKey, toKey, limit);
+
+    } else {
+      return scanIndexRow(indexName, fromKey, toKey, limit);
+    }
+  }
+
+  /**
+   * Lookup a single entity via its primary key.
+   * <p>
+   * The default implementation of this method performs a scan over the primary
+   * key with {@link #scan(String, byte[], byte[], int)}, '\0' appended onto
+   * the fromKey copy and a result limit of 2.
+   * <p>
+   * If multiple records are discovered {@link OrmDuplicateKeyException} is
+   * thrown back to the caller.
+   *
+   * @param key the primary key instance; must not be null.
+   * @return the entity; null if no entity has this key.
+   * @throws OrmException the data lookup failed.
+   * @throws OrmDuplicateKeyException more than one row matched in the scan.
+   */
+  @Override
+  public T get(K key) throws OrmException, OrmDuplicateKeyException {
+    final String primary = getKeyIndex().getName();
+
+    final IndexKeyBuilder dst = new IndexKeyBuilder();
+    encodeKey(dst, key);
+
+    final byte[] fromKey = dst.toByteArray();
+
+    dst.nul();
+    final byte[] toKey = dst.toByteArray();
+
+    Iterator<T> r = scan(primary, fromKey, toKey, 2).iterator();
+    if (!r.hasNext()) {
+      return null;
+    }
+
+    T obj = r.next();
+    if (r.hasNext()) {
+      throw new OrmDuplicateKeyException("Duplicate " + getRelationName());
+    }
+    return obj;
+  }
+
+  private ResultSet<T> scanDataRow(byte[] fromKey, byte[] toKey, int limit)
+      throws OrmException {
+    IndexKeyBuilder b;
+
+    b = new IndexKeyBuilder();
+    b.add(getRelationName());
+    b.delimiter();
+    b.addRaw(fromKey);
+    fromKey = b.toByteArray();
+
+    b = new IndexKeyBuilder();
+    b.add(getRelationName());
+    b.delimiter();
+    b.addRaw(toKey);
+    toKey = b.toByteArray();
+
+    final ArrayList<T> res = new ArrayList<T>();
+    for (Map.Entry<byte[], byte[]> ent : db.scan(fromKey, toKey)) {
+      res.add(getObjectCodec().decode(ent.getValue()));
+      if (limit > 0 && res.size() == limit) {
+        break;
+      }
+    }
+    return new ListResultSet<T>(res);
+  }
+
+  private ResultSet<T> scanIndexRow(String indexName, byte[] fromKey,
+      byte[] toKey, int limit) throws OrmException {
+    final long now = System.currentTimeMillis();
+    IndexKeyBuilder b;
+
+    b = new IndexKeyBuilder();
+    b.add(getRelationName());
+    b.add('.');
+    b.add(indexName);
+    b.delimiter();
+    b.addRaw(fromKey);
+    fromKey = b.toByteArray();
+
+    b = new IndexKeyBuilder();
+    b.add(getRelationName());
+    b.add('.');
+    b.add(indexName);
+    b.delimiter();
+    b.addRaw(toKey);
+    toKey = b.toByteArray();
+
+    final IndexFunction<T> idx = getQueryIndex(indexName);
+    final ArrayList<T> res = new ArrayList<T>();
+    for (Map.Entry<byte[], byte[]> ent : db.scan(fromKey, toKey)) {
+
+      // Decode the row and try to get the object data. If its
+      // not stored in this row in the secondary index we need
+      // to get the authoritative copy from the main index.
+      //
+      final IndexRow r = IndexRow.CODEC.decode(ent.getValue());
+      byte[] objData = r.getDataCopy();
+      if (objData == null) {
+        b = new IndexKeyBuilder();
+        b.add(getRelationName());
+        b.delimiter();
+        b.addRaw(r.getDataKey());
+        objData = db.get(b.toByteArray());
+      }
+
+      // If we have no data present and this row is stale enough,
+      // drop the row out of the index.
+      //
+      final byte[] idxkey = ent.getKey();
+      if (objData == null) {
+        db.maybeFossilCollectIndexRow(now, idxkey, r);
+        continue;
+      }
+
+      // Verify the object still matches the predicate of the index.
+      // If it does, include it in the result. Otherwise, maybe we
+      // should drop it from the index.
+      //
+      final T obj = getObjectCodec().decode(objData);
+      if (matches(idx, obj, idxkey)) {
+        res.add(obj);
+        if (limit > 0 && res.size() == limit) {
+          break;
+        }
+      } else {
+        db.maybeFossilCollectIndexRow(now, idxkey, r);
+      }
+    }
+    return new ListResultSet<T>(res);
+  }
+
+  @Override
+  public void insert(Iterable<T> instances) throws OrmException {
+    for (T obj : instances) {
+      insertOne(obj);
+    }
+  }
+
+  private void insertOne(T obj) throws OrmException {
+    byte[] idx = indexRowData(obj);
+
+    for (IndexFunction<T> f : getQueryIndexes()) {
+      if (f.includes(obj)) {
+        db.upsert(indexRowKey(f, obj), idx);
+      }
+    }
+
+    db.insert(dataRowKey(obj), getObjectCodec().encode(obj).toByteArray());
+  }
+
+  @Override
+  public void update(Iterable<T> instances) throws OrmException {
+    upsert(instances);
+  }
+
+  @Override
+  public void upsert(Iterable<T> instances) throws OrmException {
+    for (T obj : instances) {
+      upsertOne(obj);
+    }
+  }
+
+  private void upsertOne(T newObj) throws OrmException {
+    final byte[] key = dataRowKey(newObj);
+    final byte[] oldBin = db.get(key);
+    final T oldObj = oldBin != null ? getObjectCodec().decode(oldBin) : null;
+
+    writeNewIndexes(oldObj, newObj);
+    db.upsert(key, getObjectCodec().encode(newObj).toByteArray());
+    pruneOldIndexes(oldObj, newObj);
+  }
+
+  private void writeNewIndexes(T oldObj, final T newObj) throws OrmException {
+    final byte[] idx = indexRowData(newObj);
+
+    // Write any secondary index records first if they differ
+    // from what would already be there for the prior version.
+    //
+    for (IndexFunction<T> f : getQueryIndexes()) {
+      if (f.includes(newObj)) {
+        final byte[] row = indexRowKey(f, newObj);
+        if (oldObj == null || !matches(f, oldObj, row)) {
+          db.upsert(row, idx);
+        }
+      }
+    }
+  }
+
+  private void pruneOldIndexes(final T oldObj, T newObj) throws OrmException {
+    // Prune any old index records which no longer match.
+    //
+    if (oldObj != null) {
+      for (IndexFunction<T> f : getQueryIndexes()) {
+        if (f.includes(oldObj)) {
+          final byte[] k = indexRowKey(f, oldObj);
+          if (!matches(f, newObj, k)) {
+            db.delete(k);
+          }
+        }
+      }
+    }
+  }
+
+  @Override
+  public void delete(Iterable<T> instances) throws OrmException {
+    for (T obj : instances) {
+      db.delete(dataRowKey(obj));
+
+      for (IndexFunction<T> f : getQueryIndexes()) {
+        if (f.includes(obj)) {
+          db.delete(indexRowKey(f, obj));
+        }
+      }
+    }
+  }
+
+  @Override
+  public T atomicUpdate(K key, final AtomicUpdate<T> update)
+      throws OrmException {
+    final IndexKeyBuilder b = new IndexKeyBuilder();
+    b.add(getRelationName());
+    b.delimiter();
+    encodeKey(b, key);
+
+    try {
+      final T[] res = (T[]) new Object[3];
+      db.atomicUpdate(b.toByteArray(), new AtomicUpdate<byte[]>() {
+        @Override
+        public byte[] update(byte[] data) {
+          if (data != null) {
+            final T oldObj = getObjectCodec().decode(data);
+            final T newObj = getObjectCodec().decode(data);
+            res[0] = update.update(newObj);
+            res[1] = oldObj;
+            res[2] = newObj;
+            try {
+              writeNewIndexes(oldObj, newObj);
+            } catch (OrmException err) {
+              throw new IndexException(err);
+            }
+            return getObjectCodec().encode(newObj).toByteArray();
+
+          } else {
+            res[0] = null;
+            return null;
+          }
+        }
+      });
+      if (res[0] != null) {
+        pruneOldIndexes(res[1], res[2]);
+      }
+      return res[0];
+    } catch (IndexException err) {
+      throw err.cause;
+    }
+  }
+
+  private boolean matches(IndexFunction<T> f, T obj, byte[] exp) {
+    return f.includes(obj) && Arrays.equals(exp, indexRowKey(f, obj));
+  }
+
+  private byte[] dataRowKey(T obj) {
+    IndexKeyBuilder b = new IndexKeyBuilder();
+    b.add(getRelationName());
+    b.delimiter();
+    getKeyIndex().encode(b, obj);
+    return b.toByteArray();
+  }
+
+  private byte[] indexRowKey(IndexFunction<T> f, T obj) {
+    IndexKeyBuilder b = new IndexKeyBuilder();
+    b.add(getRelationName());
+    b.add('.');
+    b.add(f.getName());
+    b.delimiter();
+    f.encode(b, obj);
+    b.delimiter();
+    getKeyIndex().encode(b, obj);
+    return b.toByteArray();
+  }
+
+  private byte[] indexRowData(T obj) {
+    final long now = System.currentTimeMillis();
+
+    final IndexKeyBuilder b = new IndexKeyBuilder();
+    getKeyIndex().encode(b, obj);
+    final byte[] key = b.toByteArray();
+
+    return IndexRow.CODEC.encode(IndexRow.forKey(now, key)).toByteArray();
+  }
+
+  private static class IndexException extends RuntimeException {
+    final OrmException cause;
+
+    IndexException(OrmException err) {
+      super(err);
+      this.cause = err;
+    }
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/generic/GenericDatabase.java b/src/main/java/com/google/gwtorm/nosql/generic/GenericDatabase.java
new file mode 100644
index 0000000..90828f9
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/generic/GenericDatabase.java
@@ -0,0 +1,59 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql.generic;
+
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.Schema;
+import com.google.gwtorm.nosql.NoSqlDatabase;
+
+import java.util.concurrent.TimeUnit;
+
+public abstract class GenericDatabase<T extends Schema, S extends GenericSchema, A extends GenericAccess>
+    extends NoSqlDatabase<T, S, A> {
+  private static final long DEFAULT_FOSSIL_AGE =
+      TimeUnit.MILLISECONDS.convert(5, TimeUnit.MINUTES);
+
+  /**
+   * Initialize a new database and generate the implementation.
+   *
+   * @param schemaBaseType class that the generated Schema implementation should
+   *        extend in order to provide data store connectivity.
+   * @param accessBaseType class that the generated Access implementations
+   *        should extend in order to provide single-relation access for each
+   *        schema instance.
+   * @param appSchema the application schema interface that must be implemented
+   *        and constructed on demand.
+   * @throws OrmException the schema cannot be created because of an annotation
+   *         error in the interface definitions.
+   */
+  protected GenericDatabase(final Class<S> schemaBaseType,
+      final Class<A> accessBaseType, final Class<T> appSchema)
+      throws OrmException {
+    super(schemaBaseType, accessBaseType, appSchema);
+  }
+
+  /**
+   * Default number of milliseconds a transaction can appear to be open.
+   * <p>
+   * Secondary index rows that don't match their primary data object and that
+   * are older than this age are removed from the system during a scan.
+   *
+   * @return milliseconds before considering a fossil index record is garbage
+   *         and should be pruned. By default, 5 minutes.
+   */
+  public long getMaxFossilAge() {
+    return DEFAULT_FOSSIL_AGE;
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/generic/GenericSchema.java b/src/main/java/com/google/gwtorm/nosql/generic/GenericSchema.java
new file mode 100644
index 0000000..9792235
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/generic/GenericSchema.java
@@ -0,0 +1,121 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql.generic;
+
+import com.google.gwtorm.client.AtomicUpdate;
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.Schema;
+import com.google.gwtorm.nosql.CounterShard;
+import com.google.gwtorm.nosql.IndexKeyBuilder;
+import com.google.gwtorm.nosql.IndexRow;
+import com.google.gwtorm.nosql.NoSqlSchema;
+
+import java.util.Map;
+
+/** Base implementation for {@link Schema} in a {@link GenericDatabase}. */
+public abstract class GenericSchema extends NoSqlSchema {
+  private final GenericDatabase<?, ?, ?> db;
+
+  protected GenericSchema(final GenericDatabase<?, ?, ?> d) {
+    super(d);
+    db = d;
+  }
+
+  public GenericDatabase<?, ?, ?> getDatabase() {
+    return db;
+  }
+
+  @Override
+  protected long nextLong(final String poolName) throws OrmException {
+    IndexKeyBuilder b = new IndexKeyBuilder();
+    b.add(".sequence." + poolName);
+    b.delimiter();
+    try {
+      final long[] res = new long[1];
+      atomicUpdate(b.toByteArray(), new AtomicUpdate<byte[]>() {
+        @Override
+        public byte[] update(byte[] val) {
+          CounterShard ctr;
+          if (val != null) {
+            ctr = CounterShard.CODEC.decode(val);
+          } else {
+            ctr = new CounterShard(1, Long.MAX_VALUE);
+          }
+
+          if (ctr.isEmpty()) {
+            throw new NoMoreValues();
+          }
+
+          res[0] = ctr.next();
+          return CounterShard.CODEC.encode(ctr).toByteArray();
+        }
+      });
+      return res[0];
+    } catch (NoMoreValues err) {
+      throw new OrmException("Counter '" + poolName + "' out of values");
+    }
+  }
+
+  public abstract byte[] get(byte[] key) throws OrmException;
+
+  public abstract Iterable<Map.Entry<byte[], byte[]>> scan(byte[] fromKey,
+      byte[] toKey) throws OrmException;
+
+  public abstract void insert(byte[] key, byte[] data) throws OrmException;
+
+  public void replace(byte[] key, byte[] data) throws OrmException {
+    upsert(key, data);
+  }
+
+  public abstract void upsert(byte[] key, byte[] data) throws OrmException;
+
+  public abstract void delete(byte[] key) throws OrmException;
+
+  /**
+   * Atomically read and update a single row.
+   * <p>
+   * Unlike the schema atomicUpdate, this method handles missing rows.
+   * Implementations must be logically equivalent to the following, but
+   * performed atomically within the scope of the single row key:
+   *
+   * <pre>
+   * byte[] oldData = get(key);
+   * byte[] newData = update.update(oldData);
+   * if (newData != null)
+   *   upsert(key, newData);
+   * else if (oldData != null) remove(key);
+   * return data;
+   * </pre>
+   *
+   * @param key the row key to read, update and return.
+   * @param update action to perform on the row's data element. May be passed in
+   *        null if the row doesn't exist, and should return the new row data,
+   *        or null to remove the row.
+   * @return the return value of {@code update}.
+   * @throws OrmException the database cannot perform the update.
+   */
+  public abstract byte[] atomicUpdate(byte[] key, AtomicUpdate<byte[]> update)
+      throws OrmException;
+
+  public void maybeFossilCollectIndexRow(long now, byte[] key, IndexRow r)
+      throws OrmException {
+    if (r.getTimestamp() + db.getMaxFossilAge() <= now) {
+      delete(key);
+    }
+  }
+
+  private static class NoMoreValues extends RuntimeException {
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/heap/FileDatabase.java b/src/main/java/com/google/gwtorm/nosql/heap/FileDatabase.java
new file mode 100644
index 0000000..3ac6755
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/heap/FileDatabase.java
@@ -0,0 +1,321 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql.heap;
+
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.Schema;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.Map;
+
+/**
+ * Tiny NoSQL database stored on the local filesystem.
+ * <p>
+ * This is a simple NoSQL implementation intended only for development/debugging
+ * purposes. It is not capable of supporting any production traffic. Large data
+ * sets will cause the implementation to fall over, as all records are stored in
+ * memory.
+ * <p>
+ * Although some effort is made to persist data to disk during updates, and
+ * reload it during construction, durability of stored data is not guaranteed.
+ *
+ * @param <T> type of the application schema.
+ */
+public class FileDatabase<T extends Schema> extends
+    TreeMapDatabase<T, FileDatabase.LoggingSchema, FileDatabase.LoggingAccess> {
+  private static final int MAX_LOG_SIZE = 50000;
+
+  private final File heapFile;
+  private final File logFile;
+
+  private RandomAccessFile log;
+  private int logRecords;
+
+  /**
+   * Create the database and implement the application's schema interface.
+   *
+   * @param path path prefix for the data files. File suffixes will be added to
+   *        this name to name the database's various files.
+   * @param schema the application schema this database will open.
+   * @throws OrmException the schema cannot be queried, or the existing database
+   *         files are not readable.
+   */
+  public FileDatabase(final File path, final Class<T> schema)
+      throws OrmException {
+    super(LoggingSchema.class, LoggingAccess.class, schema);
+
+    heapFile = new File(path.getAbsolutePath() + ".nosql_db");
+    logFile = new File(path.getAbsolutePath() + ".nosql_log");
+
+    lock.lock();
+    try {
+      loadHeap();
+      loadLog();
+    } catch (IOException err) {
+      throw new OrmException("Cannot load existing database", err);
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  /** Gracefully close the database and its log file. */
+  public void close() throws OrmException {
+    lock.lock();
+    try {
+      if (log != null) {
+        try {
+          log.close();
+        } catch (IOException err) {
+          throw new OrmException("Cannot close log file", err);
+        } finally {
+          log = null;
+        }
+      }
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  private void loadHeap() throws IOException {
+    lock.lock();
+    try {
+      table.clear();
+
+      final DataInputStream in;
+      try {
+        in = new DataInputStream( //
+            new BufferedInputStream( //
+                new FileInputStream(heapFile)));
+      } catch (FileNotFoundException e) {
+        return;
+      }
+
+      try {
+        final int cnt = in.readInt();
+        for (int row = 0; row < cnt; row++) {
+          final byte[] key = new byte[in.readInt()];
+          final byte[] val = new byte[in.readInt()];
+          in.readFully(key);
+          in.readFully(val);
+          table.put(key, val);
+        }
+      } finally {
+        in.close();
+      }
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  private void loadLog() throws IOException, OrmException {
+    lock.lock();
+    try {
+      logRecords = 0;
+
+      final DataInputStream in;
+      try {
+        in = new DataInputStream( //
+            new BufferedInputStream( //
+                new FileInputStream(logFile)));
+      } catch (FileNotFoundException e) {
+        return;
+      }
+
+      try {
+        for (;; logRecords++) {
+          final int op = in.read();
+          if (op < 0) {
+            break;
+          }
+
+          switch (op) {
+            case 0: {
+              final byte[] key = new byte[in.readInt()];
+              in.readFully(key);
+              table.remove(key);
+              break;
+            }
+
+            case 1: {
+              final byte[] key = new byte[in.readInt()];
+              final byte[] val = new byte[in.readInt()];
+              in.readFully(key);
+              in.readFully(val);
+              table.put(key, val);
+              break;
+            }
+
+            default:
+              throw new OrmException("Unknown log command " + op);
+          }
+        }
+      } finally {
+        in.close();
+      }
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  private void writeLog(int op, byte[] key, byte[] val) throws OrmException {
+    if (logRecords == MAX_LOG_SIZE) {
+      compact();
+      return;
+    }
+
+    try {
+      openLog();
+
+      int sz = 1 + 4 + key.length;
+      if (op == 1) {
+        sz += 4 + val.length;
+      }
+
+      final ByteArrayOutputStream buf = new ByteArrayOutputStream(sz);
+      final DataOutputStream out = new DataOutputStream(buf);
+
+      out.write(op);
+      out.writeInt(key.length);
+      if (op == 1) {
+        out.writeInt(val.length);
+      }
+      out.write(key);
+      if (op == 1) {
+        out.write(val);
+      }
+      out.flush();
+
+      log.write(buf.toByteArray());
+      logRecords++;
+    } catch (IOException err) {
+      throw new OrmException("Cannot log operation", err);
+    }
+  }
+
+  private void compact() throws OrmException {
+    lock.lock();
+    try {
+      final File tmp = newTempFile();
+      boolean ok = false;
+      try {
+        DataOutputStream out = new DataOutputStream( //
+            new BufferedOutputStream( //
+                new FileOutputStream(tmp)));
+        try {
+          out.writeInt(table.size());
+          for (Map.Entry<byte[], byte[]> ent : table.entrySet()) {
+            out.writeInt(ent.getKey().length);
+            out.writeInt(ent.getValue().length);
+            out.write(ent.getKey());
+            out.write(ent.getValue());
+          }
+        } finally {
+          out.close();
+        }
+
+        if (!tmp.renameTo(heapFile)) {
+          throw new OrmException("Cannot replace " + heapFile);
+        }
+
+        ok = true;
+
+        openLog();
+        log.seek(0);
+        log.setLength(0);
+
+      } finally {
+        if (!ok) {
+          if (!tmp.delete()) {
+            tmp.deleteOnExit();
+          }
+        }
+      }
+    } catch (IOException err) {
+      throw new OrmException("Cannot compact database", err);
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  private void openLog() throws IOException {
+    if (log == null) {
+      log = new RandomAccessFile(logFile, "rws");
+      log.seek(log.length());
+    }
+  }
+
+  private File newTempFile() throws IOException {
+    return File.createTempFile("heap_", "_db", heapFile.getParentFile());
+  }
+
+  public static abstract class LoggingSchema extends TreeMapSchema {
+    private final FileDatabase<?> db;
+
+    protected LoggingSchema(FileDatabase<?> db) {
+      super(db);
+      this.db = db;
+    }
+
+    @Override
+    public void insert(byte[] key, byte[] data) throws OrmException {
+      db.lock.lock();
+      try {
+        super.insert(key, data);
+        db.writeLog(1, key, data);
+      } finally {
+        db.lock.unlock();
+      }
+    }
+
+    @Override
+    public void upsert(byte[] key, byte[] data) throws OrmException {
+      db.lock.lock();
+      try {
+        super.upsert(key, data);
+        db.writeLog(1, key, data);
+      } finally {
+        db.lock.unlock();
+      }
+    }
+
+    @Override
+    public void delete(byte[] key) throws OrmException {
+      db.lock.lock();
+      try {
+        super.delete(key);
+        db.writeLog(0, key, null);
+      } finally {
+        db.lock.unlock();
+      }
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  public static abstract class LoggingAccess extends TreeMapAccess {
+    protected LoggingAccess(LoggingSchema s) {
+      super(s);
+    }
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/heap/HeapKeyComparator.java b/src/main/java/com/google/gwtorm/nosql/heap/HeapKeyComparator.java
new file mode 100644
index 0000000..33aded6
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/heap/HeapKeyComparator.java
@@ -0,0 +1,38 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql.heap;
+
+import java.util.Comparator;
+
+class HeapKeyComparator implements Comparator<byte[]> {
+  static final HeapKeyComparator INSTANCE = new HeapKeyComparator();
+
+  private HeapKeyComparator() {
+  }
+
+  @Override
+  public int compare(byte[] a, byte[] b) {
+    for (int i = 0; i < a.length && i < b.length; i++) {
+      final int av = a[i] & 0xff;
+      final int bv = b[i] & 0xff;
+      final int rc = av - bv;
+      if (rc != 0) {
+        return rc;
+      }
+    }
+
+    return a.length - b.length;
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/heap/MemoryDatabase.java b/src/main/java/com/google/gwtorm/nosql/heap/MemoryDatabase.java
new file mode 100644
index 0000000..a55d520
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/heap/MemoryDatabase.java
@@ -0,0 +1,43 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql.heap;
+
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.Schema;
+
+/**
+ * Toy in-memory implementation of a NoSQL database.
+ * <p>
+ * Implements a simple NoSQL database with a standard {@link java.util.TreeMap}
+ * held inside of this JVM process. All operations occur on the TreeMap, with no
+ * durability across database restarts. Therefore this implementation is only
+ * suitable for simple tests.
+ *
+ * @param <T> type of the application schema.
+ * @see FileDatabase
+ */
+public class MemoryDatabase<T extends Schema> extends
+    TreeMapDatabase<T, TreeMapSchema, TreeMapAccess> {
+
+  /**
+   * Create the database and implement the application's schema interface.
+   *
+   * @param schema the application schema this database will open.
+   * @throws OrmException the schema cannot be queried.
+   */
+  public MemoryDatabase(final Class<T> schema) throws OrmException {
+    super(TreeMapSchema.class, TreeMapAccess.class, schema);
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/heap/TreeMapAccess.java b/src/main/java/com/google/gwtorm/nosql/heap/TreeMapAccess.java
new file mode 100644
index 0000000..afd5397
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/heap/TreeMapAccess.java
@@ -0,0 +1,27 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql.heap;
+
+import com.google.gwtorm.client.Access;
+import com.google.gwtorm.client.Key;
+import com.google.gwtorm.nosql.generic.GenericAccess;
+
+/** Base implementation for {@link Access} in a {@link TreeMapDatabase}. */
+public abstract class TreeMapAccess<T, K extends Key<?>> extends
+    GenericAccess<T, K> {
+  protected TreeMapAccess(final TreeMapSchema s) {
+    super(s);
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/heap/TreeMapDatabase.java b/src/main/java/com/google/gwtorm/nosql/heap/TreeMapDatabase.java
new file mode 100644
index 0000000..5bad767
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/heap/TreeMapDatabase.java
@@ -0,0 +1,129 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql.heap;
+
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.Schema;
+import com.google.gwtorm.nosql.generic.GenericDatabase;
+import com.google.protobuf.InvalidProtocolBufferException;
+import com.google.protobuf.UnknownFieldSet;
+
+import java.io.PrintWriter;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * Toy in-memory implementation of a NoSQL database.
+ * <p>
+ * Implements a simple NoSQL database with a standard {@link java.util.TreeMap}
+ * held inside of this JVM process. All operations occur on the TreeMap, with no
+ * durability across database restarts. Therefore this implementation is only
+ * suitable for simple tests.
+ *
+ * @param <T> type of the application schema.
+ */
+abstract class TreeMapDatabase<T extends Schema, S extends TreeMapSchema, A extends TreeMapAccess>
+    extends GenericDatabase<T, S, A> {
+
+  /** Lock that protects reads and writes of {@link #table}. */
+  final Lock lock;
+
+  /** The NoSQL database storage. */
+  final SortedMap<byte[], byte[]> table;
+
+  /**
+   * Initialize a new database and generate the implementation.
+   *
+   * @param schemaBaseType class that the generated Schema implementation should
+   *        extend in order to provide data store connectivity.
+   * @param accessBaseType class that the generated Access implementations
+   *        should extend in order to provide single-relation access for each
+   *        schema instance.
+   * @param appSchema the application schema interface that must be implemented
+   *        and constructed on demand.
+   * @throws OrmException the schema cannot be created because of an annotation
+   *         error in the interface definitions.
+   */
+  protected TreeMapDatabase(final Class<S> schemaBaseType,
+      final Class<A> accessBaseType, final Class<T> appSchema)
+      throws OrmException {
+    super(schemaBaseType, accessBaseType, appSchema);
+
+    lock = new ReentrantLock(true);
+    table = new TreeMap<byte[], byte[]>(HeapKeyComparator.INSTANCE);
+  }
+
+  /**
+   * Try to print the database contents in human readable format.
+   *
+   * @param pw writer to print the database out to.
+   */
+  public void dump(PrintWriter pw) {
+    lock.lock();
+    try {
+      for (Map.Entry<byte[], byte[]> ent : table.entrySet()) {
+        String key = format(ent.getKey());
+
+        String val;
+        try {
+          UnknownFieldSet proto = UnknownFieldSet.parseFrom(ent.getValue());
+          val = proto.toString();
+        } catch (InvalidProtocolBufferException notProto) {
+          val = format(ent.getValue());
+        }
+
+        if (val.contains("\n")) {
+          pw.println(key + ":\n" + "  " + val.replaceAll("\n", "\n  "));
+        } else {
+          pw.println(key + ": " + val);
+        }
+      }
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  private static String format(byte[] bin) {
+    StringBuilder s = new StringBuilder(bin.length);
+    for (int i = 0; i < bin.length; i++) {
+      byte b = bin[i];
+      switch (b) {
+        case 0x00:
+          s.append("\\0");
+          break;
+
+        case 0x01:
+          s.append("\\1");
+          break;
+
+        case -1:
+          s.append("\\xff");
+          break;
+
+        case '\r':
+          s.append("\\r");
+          break;
+
+        default:
+          s.append((char) b);
+          break;
+      }
+    }
+    return s.toString();
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/heap/TreeMapSchema.java b/src/main/java/com/google/gwtorm/nosql/heap/TreeMapSchema.java
new file mode 100644
index 0000000..e9aefc9
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/heap/TreeMapSchema.java
@@ -0,0 +1,142 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql.heap;
+
+import com.google.gwtorm.client.AtomicUpdate;
+import com.google.gwtorm.client.OrmDuplicateKeyException;
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.Schema;
+import com.google.gwtorm.nosql.generic.GenericSchema;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.Map.Entry;
+
+/** Base implementation for {@link Schema} in a {@link TreeMapDatabase}. */
+public abstract class TreeMapSchema extends GenericSchema {
+  private final TreeMapDatabase<?, ?, ?> db;
+
+  protected TreeMapSchema(final TreeMapDatabase<?, ?, ?> d) {
+    super(d);
+    db = d;
+  }
+
+  @Override
+  public void close() {
+    // Nothing to do.
+  }
+
+  @Override
+  public byte[] get(byte[] key) {
+    db.lock.lock();
+    try {
+      return db.table.get(key);
+    } finally {
+      db.lock.unlock();
+    }
+  }
+
+  @Override
+  public Iterable<Map.Entry<byte[], byte[]>> scan(byte[] fromKey, byte[] toKey) {
+    db.lock.lock();
+    try {
+      final List<Map.Entry<byte[], byte[]>> res =
+          new ArrayList<Map.Entry<byte[], byte[]>>();
+
+      for (Map.Entry<byte[], byte[]> ent : entries(fromKey, toKey)) {
+        final byte[] key = ent.getKey();
+        final byte[] val = ent.getValue();
+
+        res.add(new Map.Entry<byte[], byte[]>() {
+          @Override
+          public byte[] getKey() {
+            return key;
+          }
+
+          @Override
+          public byte[] getValue() {
+            return val;
+          }
+
+          @Override
+          public byte[] setValue(byte[] value) {
+            throw new UnsupportedOperationException();
+          }
+        });
+      }
+      return res;
+    } finally {
+      db.lock.unlock();
+    }
+  }
+
+  private Set<Entry<byte[], byte[]>> entries(byte[] fromKey, byte[] toKey) {
+    return db.table.subMap(fromKey, toKey).entrySet();
+  }
+
+  @Override
+  public void insert(byte[] key, byte[] data) throws OrmException {
+    db.lock.lock();
+    try {
+      if (db.table.containsKey(key)) {
+        throw new OrmDuplicateKeyException("Duplicate key");
+      } else {
+        db.table.put(key, data);
+      }
+    } finally {
+      db.lock.unlock();
+    }
+  }
+
+  @Override
+  public void upsert(byte[] key, byte[] data) throws OrmException {
+    db.lock.lock();
+    try {
+      db.table.put(key, data);
+    } finally {
+      db.lock.unlock();
+    }
+  }
+
+  @Override
+  public void delete(byte[] key) throws OrmException {
+    db.lock.lock();
+    try {
+      db.table.remove(key);
+    } finally {
+      db.lock.unlock();
+    }
+  }
+
+  @Override
+  public byte[] atomicUpdate(byte[] key, AtomicUpdate<byte[]> update)
+      throws OrmException {
+    db.lock.lock();
+    try {
+      final byte[] oldData = get(key);
+      final byte[] newData = update.update(oldData);
+      if (newData != null) {
+        upsert(key, newData);
+      } else if (oldData != null) {
+        delete(key);
+      }
+      return newData;
+    } finally {
+      db.lock.unlock();
+    }
+  }
+}