Merge branch 'stable'

* stable:
  MySQL: Declare VARCHAR columns as BINARY
  Allow registration of custom SqlDialects

Change-Id: Icf91aac2f14cd65b3678cac9c6f289e216c7fc3d
diff --git a/pom.xml b/pom.xml
index 6eae2e4..a5f703f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -21,7 +21,7 @@
   <groupId>gwtorm</groupId>
   <artifactId>gwtorm</artifactId>
   <packaging>jar</packaging>
-  <version>1.1.7</version>
+  <version>1.2-SNAPSHOT</version>
   <name>gwtorm</name>
   <description>Tiny ORM</description>
   <url>http://android.git.kernel.org/?p=tools/gwtorm.git</url>
@@ -334,7 +334,7 @@
     <dependency>
       <groupId>com.google.protobuf</groupId>
       <artifactId>protobuf-java</artifactId>
-      <version>2.2.0</version>
+      <version>2.3.0</version>
       <scope>provided</scope>
     </dependency>
   </dependencies>
diff --git a/src/main/antlr/com/google/gwtorm/schema/Query.g b/src/main/antlr/com/google/gwtorm/schema/Query.g
index ce13198..f3c5e8f 100644
--- a/src/main/antlr/com/google/gwtorm/schema/Query.g
+++ b/src/main/antlr/com/google/gwtorm/schema/Query.g
@@ -28,12 +28,9 @@
   GT;
   GE;
   EQ;
-  NE;
   ID;
   PLACEHOLDER;
   COMMA;
-  ASC;
-  DESC;
   LIMIT;
   CONSTANT_INTEGER;
   CONSTANT_STRING;
@@ -150,17 +147,7 @@
   ;
 
 orderBy
-  : ORDER^ BY! fieldSort (COMMA! fieldSort)*
-  ;
-
-fieldSort
-  : field sortDirection^
-  | field -> ^(ASC field)
-  ;
-
-sortDirection
-  : ASC
-  | DESC
+  : ORDER^ BY! field (COMMA! field)*
   ;
 
 limit
@@ -187,7 +174,6 @@
  | GT
  | GE
  | EQ
- | NE
  ;
 
 field
@@ -214,8 +200,6 @@
 ORDER: 'ORDER' ;
 BY:    'BY'    ;
 AND:   'AND'   ;
-ASC:   'ASC'   ;
-DESC:  'DESC'  ;
 LIMIT: 'LIMIT' ;
 TRUE:  'true'  ;
 FALSE: 'false' ;
@@ -225,7 +209,6 @@
 GT : '>'  ;
 GE : '>=' ;
 EQ : '='  ;
-NE : '!=' ;
 
 PLACEHOLDER: '?' ;
 COMMA: ',' ;
diff --git a/src/main/java/com/google/gwtorm/client/Access.java b/src/main/java/com/google/gwtorm/client/Access.java
index f535aa1..27a26f7 100644
--- a/src/main/java/com/google/gwtorm/client/Access.java
+++ b/src/main/java/com/google/gwtorm/client/Access.java
@@ -47,6 +47,19 @@
  *        implementation. Entity specific key subclasses are recommended.
  */
 public interface Access<T extends Object, K extends Key<?>> {
+  /** @return the name of this relation. */
+  String getRelationName();
+
+  /** @return the id of this relation (if defined), otherwise 0. */
+  int getRelationID();
+
+  /**
+   * Iterate through all members of the relation.
+   *
+   * @return an iterator over all members. This is most likely not fast.
+   */
+  ResultSet<T> iterateAllEntities() throws OrmException;
+
   /**
    * Obtain the primary key of an entity instance.
    *
@@ -106,17 +119,6 @@
   void insert(Iterable<T> instances) throws OrmException;
 
   /**
-   * Insert new entities into the data store.
-   *
-   * @param instances the instances to insert. The iteration occurs only once.
-   * @param txn transaction to batch the operation into. If not null the data
-   *        store changes will be delayed to {@link Transaction#commit()} is
-   *        invoked; if null the operation occurs immediately.
-   * @throws OrmException data insertion failed.
-   */
-  void insert(Iterable<T> instances, Transaction txn) throws OrmException;
-
-  /**
    * Immediately update existing entities in the data store.
    *
    * @param instances the instances to update. The iteration occurs only once.
@@ -126,18 +128,6 @@
   void update(Iterable<T> instances) throws OrmException;
 
   /**
-   * Update existing entities in the data store.
-   *
-   * @param instances the instances to update. The iteration occurs only once.
-   * @param txn transaction to batch the operation into. If not null the data
-   *        store changes will be delayed to {@link Transaction#commit()} is
-   *        invoked; if null the operation occurs immediately.
-   * @throws OrmException data modification failed.
-   * @throws UnsupportedOperationException no PrimaryKey was declared.
-   */
-  void update(Iterable<T> instances, Transaction txn) throws OrmException;
-
-  /**
    * Immediately update or insert entities in the data store.
    *
    * @param instances the instances to update. The iteration occurs only once.
@@ -147,18 +137,6 @@
   void upsert(Iterable<T> instances) throws OrmException;
 
   /**
-   * Update or insert entities in the data store.
-   *
-   * @param instances the instances to update. The iteration occurs only once.
-   * @param txn transaction to batch the operation into. If not null the data
-   *        store changes will be delayed to {@link Transaction#commit()} is
-   *        invoked; if null the operation occurs immediately.
-   * @throws OrmException data modification failed.
-   * @throws UnsupportedOperationException no PrimaryKey was declared.
-   */
-  void upsert(Iterable<T> instances, Transaction txn) throws OrmException;
-
-  /**
    * Immediately delete existing entities from the data store.
    *
    * @param keys the keys to delete. The iteration occurs only once.
@@ -177,18 +155,6 @@
   void delete(Iterable<T> instances) throws OrmException;
 
   /**
-   * Delete existing entities from the data store.
-   *
-   * @param instances the instances to delete. The iteration occurs only once.
-   * @param txn transaction to batch the operation into. If not null the data
-   *        store changes will be delayed to {@link Transaction#commit()} is
-   *        invoked; if null the operation occurs immediately.
-   * @throws OrmException data removal failed.
-   * @throws UnsupportedOperationException no PrimaryKey was declared.
-   */
-  void delete(Iterable<T> instances, Transaction txn) throws OrmException;
-
-  /**
    * Atomically update a single entity.
    * <p>
    * If the entity does not exist, the method returns {@code null} without
diff --git a/src/main/java/com/google/gwtorm/client/OrmRunnable.java b/src/main/java/com/google/gwtorm/client/OrmRunnable.java
deleted file mode 100644
index 4db3f51..0000000
--- a/src/main/java/com/google/gwtorm/client/OrmRunnable.java
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2009 Google Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package com.google.gwtorm.client;
-
-/**
- * Runs within an isolated database transaction, retrying if necessary.
- * <p>
- * The {@link Schema} is free to invoke this runnable multiple times if an
- * {@link OrmConcurrencyException} is thrown by the run method.
- *
- * @param <T> type of object the run method returns.
- * @param <S> type of schema the run method needs to perform its work.
- */
-public interface OrmRunnable<T, S extends Schema> {
-  /**
-   * Execute the task once.
-   * <p>
-   * Implementations should read any state they need within the method, to
-   * ensure they are looking at the most current copy of the data from the
-   * database. If a method is invoked a second time to recover from a
-   * concurrency error it would need to read the data again.
-   *
-   * @param db active schema handle to query through, and make updates on.
-   * @param txn the current transaction handle. Commit is invoked by the caller.
-   * @param retry true if this is not the first attempt to execute this task.
-   * @return the return value of the function, if any.
-   * @throws OrmException any database error. {@link OrmConcurrencyException}
-   *         may cause the transaction to be retried.
-   */
-  T run(S db, Transaction txn, boolean retry) throws OrmException;
-}
diff --git a/src/main/java/com/google/gwtorm/client/OrmRuntimeException.java b/src/main/java/com/google/gwtorm/client/OrmRuntimeException.java
new file mode 100644
index 0000000..d021072
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/client/OrmRuntimeException.java
@@ -0,0 +1,32 @@
+// Copyright 2008 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.client;
+
+/**
+ * Any data store read or write error.
+ */
+public class OrmRuntimeException extends RuntimeException {
+  public OrmRuntimeException(final String message) {
+    super(message);
+  }
+
+  public OrmRuntimeException(final String message, final Throwable why) {
+    super(message, why);
+  }
+
+  public OrmRuntimeException(final Throwable why) {
+    super(why);
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/client/Query.java b/src/main/java/com/google/gwtorm/client/Query.java
index 6e7bc9a..f2d99bc 100644
--- a/src/main/java/com/google/gwtorm/client/Query.java
+++ b/src/main/java/com/google/gwtorm/client/Query.java
@@ -35,10 +35,10 @@
  *
  * <pre>
  * [WHERE &lt;condition&gt; [AND &lt;condition&gt; ...]]
- * [ORDER BY &lt;property&gt; [ASC | DESC] [, &lt;property&gt; [ASC | DESC] ...]]
+ * [ORDER BY &lt;property&gt; [, &lt;property&gt; ...]]
  * [LIMIT { &lt;count&gt; | ? }]
  *
- * &lt;condition&gt; := &lt;property&gt; { &lt; | &lt;= | &gt; | &gt;= | = | != } &lt;value&gt;
+ * &lt;condition&gt; := &lt;property&gt; { &lt; | &lt;= | &gt; | &gt;= | = } &lt;value&gt;
  * &lt;value&gt; := { ? | true | false | &lt;int&gt; | &lt;string&gt; }
  * </pre>
  * <p>
diff --git a/src/main/java/com/google/gwtorm/client/Relation.java b/src/main/java/com/google/gwtorm/client/Relation.java
index 97b895e..b6bd0c5 100644
--- a/src/main/java/com/google/gwtorm/client/Relation.java
+++ b/src/main/java/com/google/gwtorm/client/Relation.java
@@ -45,4 +45,11 @@
    * @return the name of the data store table. Defaults to the method name.
    */
   String name() default "";
+
+  /**
+   * @return the unique ID for this relation. Must be unique among all
+   *         relations, and conform to Protobuf message ID rules. The ID must be
+   *         in the range [1,2^29-1] except 19000 through 19999.
+   */
+  int id();
 }
diff --git a/src/main/java/com/google/gwtorm/client/Schema.java b/src/main/java/com/google/gwtorm/client/Schema.java
index 2739177..e18133a 100644
--- a/src/main/java/com/google/gwtorm/client/Schema.java
+++ b/src/main/java/com/google/gwtorm/client/Schema.java
@@ -55,6 +55,31 @@
  * </pre>
  */
 public interface Schema {
+  /** @return true if auto flush is enabled (default). */
+  boolean isAutoFlush();
+
+  /**
+   * Set (or unset) the auto-flush flag for this connection.
+   * <p>
+   * If true writes are sent to the database by the time the method returns. If
+   * false, writes will be sent at any time, or some later point in the future.
+   * Callers should use {@link #flush()} to ensure the writes are visible, or
+   * reset the auto flush flag to true.
+   *
+   * @param autoFlush the new setting.
+   * @throws OrmException previously autoFlush was false, the new setting is
+   *         true, and flushed writes cannot be sent.
+   */
+  void setAutoFlush(boolean autoFlush) throws OrmException;
+
+  /**
+   * Ensures all modifications are now visible to others.
+   *
+   * @throws OrmException one or more modifications cannot be applied. The
+   *         writes are now inconsistent.
+   */
+  void flush() throws OrmException;
+
   /**
    * Add any missing columns, create any missing tables or sequences.
    * <p>
@@ -78,28 +103,9 @@
   void pruneSchema(StatementExecutor e) throws OrmException;
 
   /**
-   * Begin a new transaction.
-   * <p>
-   * Only one transaction can be in-flight at a time on any given Schema
-   * instance. Applications must commit or rollback a previously created
-   * transaction before beginning another transaction on the same Schema.
-   *
-   * @return the new transaction.
-   * @throws OrmException the schema has been closed or another transaction has
-   *         already been begun on this schema instance.
+   * @return access interface for each declared relation.
    */
-  Transaction beginTransaction() throws OrmException;
-
-  /**
-   * Execute a task within a transaction, restarting it if necessary.
-   *
-   * @param <T> type of return value for the task.
-   * @param <S> type of <code>this</code>.
-   * @param task the task to execute.
-   * @return the return value of the task.
-   * @throws OrmException the task could not be completed successfully.
-   */
-  <T, S extends Schema> T run(OrmRunnable<T, S> task) throws OrmException;
+  Access<?, ?>[] allRelations();
 
   /**
    * Close the schema and release all resources.
diff --git a/src/main/java/com/google/gwtorm/client/Transaction.java b/src/main/java/com/google/gwtorm/client/Transaction.java
deleted file mode 100644
index 0780ab4..0000000
--- a/src/main/java/com/google/gwtorm/client/Transaction.java
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2008 Google Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package com.google.gwtorm.client;
-
-/**
- * An active transaction running on a {@link Schema}.
- * <p>
- * Applications must invoke {@link #commit()} to finish a transaction.
- * <p>
- * Use method on one or more {@link Access} instances to schedule changes into
- * an open transaction:
- * <ul>
- * <li>{@link Access#insert(Iterable, Transaction)}</li>
- * <li>{@link Access#update(Iterable, Transaction)}</li>
- * <li>{@link Access#delete(Iterable, Transaction)}</li>
- * <ul>
- *
- * @see Schema#beginTransaction()
- */
-public interface Transaction {
-  /**
-   * Commit this transaction, finishing all actions.
-   *
-   * @throws OrmException data store refused/rejected one or more actions.
-   */
-  void commit() throws OrmException;
-
-  /**
-   * Rollback (abort) this transaction, performing none of the actions.
-   * <p>
-   * This method has no affect if the transaction has not made any changes.
-   *
-   * @throws OrmException data store couldn't undo the transaction, as it is
-   *         already committed.
-   */
-  void rollback() throws OrmException;
-}
diff --git a/src/main/java/com/google/gwtorm/client/impl/AbstractAccess.java b/src/main/java/com/google/gwtorm/client/impl/AbstractAccess.java
index bd66e34..17ff862 100644
--- a/src/main/java/com/google/gwtorm/client/impl/AbstractAccess.java
+++ b/src/main/java/com/google/gwtorm/client/impl/AbstractAccess.java
@@ -17,16 +17,19 @@
 import com.google.gwtorm.client.Access;
 import com.google.gwtorm.client.AtomicUpdate;
 import com.google.gwtorm.client.Key;
+import com.google.gwtorm.client.OrmConcurrencyException;
 import com.google.gwtorm.client.OrmException;
 import com.google.gwtorm.client.ResultSet;
-import com.google.gwtorm.client.Transaction;
 
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 
-public abstract class AbstractAccess<E, K extends Key<?>, T extends AbstractTransaction>
+public abstract class AbstractAccess<E, K extends Key<?>>
     implements Access<E, K> {
+  private static final int MAX_TRIES = 10;
+
   public ResultSet<E> get(final Iterable<K> keys) throws OrmException {
     final ArrayList<E> r = new ArrayList<E>();
     for (final K key : keys) {
@@ -52,72 +55,29 @@
     }
   }
 
-  public final void insert(final Iterable<E> instances) throws OrmException {
-    doInsert(instances, null);
-  }
-
-  public final void insert(final Iterable<E> instances, final Transaction txn)
+  @Override
+  public E atomicUpdate(final K key, final AtomicUpdate<E> update)
       throws OrmException {
-    if (txn != null) {
-      cast(txn).queueInsert(this, instances);
-    } else {
-      insert(instances);
+    for (int attempts = 1;; attempts++) {
+      try {
+        final E obj = get(key);
+        if (obj == null) {
+          return null;
+        }
+        final E res = update.update(obj);
+        update(Collections.singleton(obj));
+        return res;
+      } catch (OrmConcurrencyException err) {
+        if (attempts < MAX_TRIES) {
+          continue;
+        }
+        throw err;
+      }
     }
   }
 
-  public final void update(final Iterable<E> instances) throws OrmException {
-    doUpdate(instances, null);
-  }
-
-  public final void update(final Iterable<E> instances, final Transaction txn)
-      throws OrmException {
-    if (txn != null) {
-      cast(txn).queueUpdate(this, instances);
-    } else {
-      update(instances);
-    }
-  }
-
-  public final void upsert(final Iterable<E> instances) throws OrmException {
-    doUpsert(instances, null);
-  }
-
-  public final void upsert(final Iterable<E> instances, final Transaction txn)
-      throws OrmException {
-    if (txn != null) {
-      cast(txn).queueUpsert(this, instances);
-    } else {
-      upsert(instances);
-    }
-  }
-
-  public final void delete(final Iterable<E> instances) throws OrmException {
-    doDelete(instances, null);
-  }
-
-  public final void delete(final Iterable<E> instances, final Transaction txn)
-      throws OrmException {
-    if (txn != null) {
-      cast(txn).queueDelete(this, instances);
-    } else {
-      delete(instances);
-    }
-  }
-
-  protected abstract void doInsert(Iterable<E> instances, T txn)
-      throws OrmException;
-
-  protected abstract void doUpdate(Iterable<E> instances, T txn)
-      throws OrmException;
-
-  protected abstract void doUpsert(Iterable<E> instances, T txn)
-      throws OrmException;
-
-  protected abstract void doDelete(Iterable<E> instances, T txn)
-      throws OrmException;
-
-  @SuppressWarnings("unchecked")
-  private T cast(final Transaction txn) {
-    return ((T) txn);
+  @Override
+  public void deleteKeys(Iterable<K> keys) throws OrmException {
+    delete(get(keys));
   }
 }
diff --git a/src/main/java/com/google/gwtorm/client/impl/AbstractResultSet.java b/src/main/java/com/google/gwtorm/client/impl/AbstractResultSet.java
new file mode 100644
index 0000000..ab5f860
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/client/impl/AbstractResultSet.java
@@ -0,0 +1,62 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.client.impl;
+
+import com.google.gwtorm.client.ResultSet;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * Simple implementation of a {@link ResultSet}.
+ *
+ * @param <T> type of the object to be returned from the result set.
+ */
+public abstract class AbstractResultSet<T> implements ResultSet<T> {
+  @Override
+  public final Iterator<T> iterator() {
+    return new Iterator<T>() {
+      @Override
+      public boolean hasNext() {
+        return AbstractResultSet.this.hasNext();
+      }
+
+      @Override
+      public T next() {
+        return AbstractResultSet.this.next();
+      }
+
+      @Override
+      public void remove() {
+        throw new UnsupportedOperationException();
+      }
+    };
+  }
+
+  public List<T> toList() {
+    List<T> r = new ArrayList<T>();
+    for (T obj : this) {
+      r.add(obj);
+    }
+    return r;
+  }
+
+  /** @return true if another result remains, false otherwise. */
+  protected abstract boolean hasNext();
+
+  /** @return next result. */
+  protected abstract T next();
+}
diff --git a/src/main/java/com/google/gwtorm/client/impl/AbstractTransaction.java b/src/main/java/com/google/gwtorm/client/impl/AbstractTransaction.java
deleted file mode 100644
index def8385..0000000
--- a/src/main/java/com/google/gwtorm/client/impl/AbstractTransaction.java
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2008 Google Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package com.google.gwtorm.client.impl;
-
-import com.google.gwtorm.client.Key;
-import com.google.gwtorm.client.OrmException;
-import com.google.gwtorm.client.Transaction;
-
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.Map;
-import java.util.Set;
-
-public abstract class AbstractTransaction implements Transaction {
-  private static LinkedHashMap<Object, Action<?, Key<?>, AbstractTransaction>> newMap() {
-    return new LinkedHashMap<Object, Action<?, Key<?>, AbstractTransaction>>();
-  }
-
-  protected final Map<Object, Action<?, Key<?>, AbstractTransaction>> pendingInsert;
-  protected final Map<Object, Action<?, Key<?>, AbstractTransaction>> pendingUpdate;
-  protected final Map<Object, Action<?, Key<?>, AbstractTransaction>> pendingUpsert;
-  protected final Map<Object, Action<?, Key<?>, AbstractTransaction>> pendingDelete;
-
-  protected AbstractTransaction() {
-    pendingInsert = newMap();
-    pendingUpdate = newMap();
-    pendingUpsert = newMap();
-    pendingDelete = newMap();
-  }
-
-  public void commit() throws OrmException {
-    for (Action<?, Key<?>, AbstractTransaction> a : pendingDelete.values()) {
-      a.doDelete(this);
-    }
-    for (Action<?, Key<?>, AbstractTransaction> a : pendingInsert.values()) {
-      a.doInsert(this);
-    }
-    for (Action<?, Key<?>, AbstractTransaction> a : pendingUpdate.values()) {
-      a.doUpdate(this);
-    }
-    for (Action<?, Key<?>, AbstractTransaction> a : pendingUpsert.values()) {
-      a.doUpsert(this);
-    }
-  }
-
-  <E, K extends Key<?>, T extends AbstractTransaction> void queueInsert(
-      final AbstractAccess<E, ?, T> access, final Iterable<E> list) {
-    queue(pendingInsert, access, list);
-  }
-
-  <E, K extends Key<?>, T extends AbstractTransaction> void queueUpdate(
-      final AbstractAccess<E, ?, T> access, final Iterable<E> list) {
-    queue(pendingUpdate, access, list);
-  }
-
-  <E, K extends Key<?>, T extends AbstractTransaction> void queueUpsert(
-      final AbstractAccess<E, ?, T> access, final Iterable<E> list) {
-    queue(pendingUpsert, access, list);
-  }
-
-  <E, K extends Key<?>, T extends AbstractTransaction> void queueDelete(
-      final AbstractAccess<E, ?, T> access, final Iterable<E> list) {
-    queue(pendingDelete, access, list);
-  }
-
-  private static <E, K extends Key<?>, T extends AbstractTransaction> void queue(
-      final Map<Object, Action<?, Key<?>, AbstractTransaction>> queue,
-      final AbstractAccess<E, K, T> access, final Iterable<E> list) {
-    Action<E, K, T> c = get(queue, access);
-    if (c == null) {
-      c = new Action<E, K, T>(access);
-      put(queue, c);
-    }
-    c.addAll(list);
-  }
-
-  @SuppressWarnings("unchecked")
-  private static <E, K extends Key<?>, T extends AbstractTransaction> Action<E, K, T> get(
-      final Map<Object, Action<?, Key<?>, AbstractTransaction>> q,
-      final AbstractAccess<E, K, T> access) {
-    return (Action<E, K, T>) q.get(access);
-  }
-
-  @SuppressWarnings("unchecked")
-  private static <E, K extends Key<?>, T extends AbstractTransaction> void put(
-      final Map queue, Action<E, K, T> c) {
-    // This silly little method was needed to defeat the Java compiler's
-    // generic type checking. Somehow we got lost in the anonymous types
-    // from all the ? in our Map definition and the compiler just won't let
-    // us do a put into the map.
-    //
-    queue.put(c.access, c);
-  }
-
-  private static class Action<E, K extends Key<?>, T extends AbstractTransaction> {
-    private final AbstractAccess<E, K, T> access;
-    private final Set<E> instances;
-
-    Action(final AbstractAccess<E, K, T> a) {
-      access = a;
-      instances = new LinkedHashSet<E>();
-    }
-
-    void addAll(final Iterable<E> list) {
-      for (final E o : list) {
-        instances.add(o);
-      }
-    }
-
-    void doInsert(final T t) throws OrmException {
-      access.doInsert(instances, t);
-    }
-
-    void doUpdate(final T t) throws OrmException {
-      access.doUpdate(instances, t);
-    }
-
-    void doUpsert(final T t) throws OrmException {
-      access.doUpsert(instances, t);
-    }
-
-    void doDelete(final T t) throws OrmException {
-      access.doDelete(instances, t);
-    }
-  }
-}
diff --git a/src/main/java/com/google/gwtorm/jdbc/AbstractSchemaFactory.java b/src/main/java/com/google/gwtorm/jdbc/AbstractSchemaFactory.java
deleted file mode 100644
index 2db0290..0000000
--- a/src/main/java/com/google/gwtorm/jdbc/AbstractSchemaFactory.java
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2008 Google Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package com.google.gwtorm.jdbc;
-
-import com.google.gwtorm.client.Schema;
-
-import java.sql.Connection;
-
-/**
- * Internal interface to quickly create Schema instances.
- * <p>
- * Applications should not use this interface. It is automatically implemented
- * at runtime to provide fast construction for new Schema instances within
- * {@link Database#open()}.
- *
- * @param <T> type of the application schema.
- */
-public abstract class AbstractSchemaFactory<T extends Schema> {
-  /**
-   * Create a new schema instance.
-   *
-   * @param db the database instance which created the connection.
-   * @param c the JDBC connection the instance will talk to the database on.
-   * @return the new schema instance, wrapping the connection.
-   */
-  public abstract T create(Database<T> db, Connection c);
-}
diff --git a/src/main/java/com/google/gwtorm/jdbc/gen/AccessGen.java b/src/main/java/com/google/gwtorm/jdbc/AccessGen.java
similarity index 94%
rename from src/main/java/com/google/gwtorm/jdbc/gen/AccessGen.java
rename to src/main/java/com/google/gwtorm/jdbc/AccessGen.java
index ee5ccde..5971198 100644
--- a/src/main/java/com/google/gwtorm/jdbc/gen/AccessGen.java
+++ b/src/main/java/com/google/gwtorm/jdbc/AccessGen.java
@@ -12,20 +12,19 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package com.google.gwtorm.jdbc.gen;
+package com.google.gwtorm.jdbc;
 
 import com.google.gwtorm.client.Access;
 import com.google.gwtorm.client.Key;
 import com.google.gwtorm.client.OrmException;
-import com.google.gwtorm.client.impl.ListResultSet;
-import com.google.gwtorm.jdbc.JdbcAccess;
-import com.google.gwtorm.jdbc.JdbcSchema;
 import com.google.gwtorm.schema.ColumnModel;
 import com.google.gwtorm.schema.KeyModel;
 import com.google.gwtorm.schema.QueryModel;
 import com.google.gwtorm.schema.RelationModel;
 import com.google.gwtorm.schema.Util;
 import com.google.gwtorm.schema.sql.SqlDialect;
+import com.google.gwtorm.server.CodeGenSupport;
+import com.google.gwtorm.server.GeneratedClassLoader;
 
 import org.objectweb.asm.ClassWriter;
 import org.objectweb.asm.Label;
@@ -42,7 +41,7 @@
 import java.util.List;
 
 /** Generates a concrete implementation of an {@link Access} extension. */
-public class AccessGen implements Opcodes {
+class AccessGen implements Opcodes {
   private static final String REL_ALIAS = "T";
 
   private static enum DmlType {
@@ -60,7 +59,6 @@
   }
 
   private final GeneratedClassLoader classLoader;
-  private final SchemaGen.RelationGen info;
   private final RelationModel model;
   private final SqlDialect dialect;
 
@@ -70,22 +68,21 @@
   private String implTypeName;
   private Type entityType;
 
-
-  public AccessGen(final GeneratedClassLoader loader,
-      final SchemaGen.RelationGen ri) {
+  AccessGen(final GeneratedClassLoader loader,
+      final RelationModel rm, final SqlDialect sd) {
     classLoader = loader;
-    info = ri;
-    model = info.model;
-    dialect = ri.getDialect();
+    model = rm;
+    dialect = sd;
     entityType =
         Type.getObjectType(model.getEntityTypeClassName().replace('.', '/'));
   }
 
-  public void defineClass() throws OrmException {
+  public Class<?> create() throws OrmException {
     init();
     implementConstructor();
     implementGetString("getRelationName", model.getRelationName());
     implementGetString("getInsertOneSql", model.getInsertOneSql(dialect));
+    implementGetRelationID();
 
     if (model.getPrimaryKey() != null) {
       if (model.getDependentColumns().isEmpty()) {
@@ -119,12 +116,20 @@
     for (final QueryModel q : model.getQueries()) {
       implementQuery(q);
     }
+    implementQuery(new QueryModel(model, "iterateAllEntities", ""));
 
     cw.visitEnd();
     classLoader.defineClass(implClassName, cw.toByteArray());
-    info.accessClassName = implClassName;
+    return loadClass();
   }
 
+  private Class<?> loadClass() throws OrmException {
+    try {
+      return Class.forName(implClassName, false, classLoader);
+    } catch (ClassNotFoundException err) {
+      throw new OrmException("Cannot load generated class", err);
+    }
+  }
 
   private void init() {
     superTypeName = Type.getInternalName(JdbcAccess.class);
@@ -168,6 +173,17 @@
     mv.visitEnd();
   }
 
+  private void implementGetRelationID() {
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, "getRelationID", Type
+            .getMethodDescriptor(Type.INT_TYPE, new Type[] {}), null, null);
+    mv.visitCode();
+    new CodeGenSupport(mv).push(model.getRelationID());
+    mv.visitInsn(IRETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
   private void implementMissingGetString(final String methodName,
       final String why) {
     final MethodVisitor mv =
@@ -641,7 +657,7 @@
     mv.visitVarInsn(ALOAD, 0);
     mv.visitVarInsn(ALOAD, psvar);
     mv.visitMethodInsn(INVOKEVIRTUAL, superTypeName, "queryList", Type
-        .getMethodDescriptor(Type.getType(ListResultSet.class),
+        .getMethodDescriptor(Type.getType(com.google.gwtorm.client.ResultSet.class),
             new Type[] {Type.getType(PreparedStatement.class)}));
     mv.visitInsn(ARETURN);
     mv.visitMaxs(-1, -1);
@@ -733,7 +749,7 @@
     mv.visitVarInsn(ALOAD, 0);
     mv.visitVarInsn(ALOAD, psvar);
     mv.visitMethodInsn(INVOKEVIRTUAL, superTypeName, "queryList", Type
-        .getMethodDescriptor(Type.getType(ListResultSet.class),
+        .getMethodDescriptor(Type.getType(com.google.gwtorm.client.ResultSet.class),
             new Type[] {Type.getType(PreparedStatement.class)}));
     mv.visitInsn(ARETURN);
     mv.visitMaxs(-1, -1);
diff --git a/src/main/java/com/google/gwtorm/jdbc/Database.java b/src/main/java/com/google/gwtorm/jdbc/Database.java
index 65d0929..c47124a 100644
--- a/src/main/java/com/google/gwtorm/jdbc/Database.java
+++ b/src/main/java/com/google/gwtorm/jdbc/Database.java
@@ -18,19 +18,17 @@
 import com.google.gwtorm.client.OrmException;
 import com.google.gwtorm.client.Schema;
 import com.google.gwtorm.client.SchemaFactory;
-import com.google.gwtorm.jdbc.gen.GeneratedClassLoader;
-import com.google.gwtorm.jdbc.gen.SchemaFactoryGen;
-import com.google.gwtorm.jdbc.gen.SchemaGen;
+import com.google.gwtorm.schema.RelationModel;
 import com.google.gwtorm.schema.SchemaModel;
 import com.google.gwtorm.schema.java.JavaSchemaModel;
 import com.google.gwtorm.schema.sql.SqlDialect;
+import com.google.gwtorm.server.GeneratedClassLoader;
+import com.google.gwtorm.server.SchemaConstructorGen;
+import com.google.gwtorm.server.SchemaGen;
 import com.google.gwtorm.server.StandardKeyEncoder;
 
 import java.sql.Connection;
 import java.sql.SQLException;
-import java.util.Collections;
-import java.util.Map;
-import java.util.WeakHashMap;
 
 import javax.sql.DataSource;
 
@@ -50,43 +48,13 @@
  * @param <T>
  */
 public class Database<T extends Schema> implements SchemaFactory<T> {
-  private static final Map<SchemaKey, String> schemaFactoryNames =
-      Collections.synchronizedMap(new WeakHashMap<SchemaKey, String>());
-
-  private static class SchemaKey {
-    final Class<?> schema;
-    final SqlDialect dialect;
-
-    SchemaKey(Class<?> s, SqlDialect d) {
-      schema = s;
-      dialect = d;
-    }
-
-    @Override
-    public int hashCode() {
-      return schema.hashCode() * 31 + dialect.getClass().hashCode();
-    }
-
-    @Override
-    public boolean equals(Object o) {
-      if (o instanceof SchemaKey) {
-        SchemaKey a = this;
-        SchemaKey b = (SchemaKey) o;
-
-        return a.schema == b.schema
-            && a.dialect.getClass() == b.dialect.getClass();
-      }
-      return false;
-    }
-  }
-
   static {
     KeyUtil.setEncoderImpl(new StandardKeyEncoder());
   }
 
   private final DataSource dataSource;
   private final JavaSchemaModel schemaModel;
-  private final AbstractSchemaFactory<T> implFactory;
+  private final SchemaFactory<T> implFactory;
   private final SqlDialect implDialect;
 
   /**
@@ -115,35 +83,22 @@
 
     schemaModel = new JavaSchemaModel(schema);
     final GeneratedClassLoader loader = newLoader(schema);
-    final SchemaKey key = new SchemaKey(schema, dialect);
-    final String cachedName = schemaFactoryNames.get(key);
-    AbstractSchemaFactory<T> factory = null;
-    if (cachedName != null) {
-      factory = newFactory(loader, cachedName);
-    }
-    if (factory == null) {
-      final SchemaGen gen = new SchemaGen(loader, schemaModel, dialect);
-      gen.defineClass();
-      factory = new SchemaFactoryGen<T>(loader, gen).create();
-      schemaFactoryNames.put(key, factory.getClass().getName());
-    }
-    implFactory = factory;
+    final Class<T> impl = generate(dialect, loader);
+    implFactory = new SchemaConstructorGen<T>(loader, impl, this).create();
     implDialect = dialect;
   }
 
   @SuppressWarnings("unchecked")
-  private AbstractSchemaFactory<T> newFactory(final ClassLoader cl,
-      final String name) {
-    try {
-      final Class<?> ft = Class.forName(name, true, cl);
-      return (AbstractSchemaFactory<T>) ft.newInstance();
-    } catch (InstantiationException e) {
-      return null;
-    } catch (IllegalAccessException e) {
-      return null;
-    } catch (ClassNotFoundException e) {
-      return null;
-    }
+  private Class<T> generate(final SqlDialect dialect,
+      final GeneratedClassLoader loader) throws OrmException {
+    return new SchemaGen(loader, schemaModel, getClass(), JdbcSchema.class,
+        new SchemaGen.AccessGenerator() {
+          @Override
+          public Class<?> create(GeneratedClassLoader loader, RelationModel rm)
+              throws OrmException {
+            return new AccessGen(loader, rm, dialect).create();
+          }
+        }).create();
   }
 
   SqlDialect getDialect() {
@@ -163,6 +118,10 @@
    *         cause of the connection failure.
    */
   public T open() throws OrmException {
+    return implFactory.open();
+  }
+
+  Connection newConnection() throws OrmException {
     final Connection conn;
     try {
       conn = dataSource.getConnection();
@@ -181,8 +140,7 @@
       }
       throw new OrmException("Cannot force auto-commit on connection", e);
     }
-
-    return implFactory.create(this, conn);
+    return conn;
   }
 
   private static <T> GeneratedClassLoader newLoader(final Class<T> schema) {
diff --git a/src/main/java/com/google/gwtorm/jdbc/JdbcAccess.java b/src/main/java/com/google/gwtorm/jdbc/JdbcAccess.java
index e54df99..b75011a 100644
--- a/src/main/java/com/google/gwtorm/jdbc/JdbcAccess.java
+++ b/src/main/java/com/google/gwtorm/jdbc/JdbcAccess.java
@@ -15,12 +15,9 @@
 package com.google.gwtorm.jdbc;
 
 import com.google.gwtorm.client.Access;
-import com.google.gwtorm.client.AtomicUpdate;
 import com.google.gwtorm.client.Key;
 import com.google.gwtorm.client.OrmConcurrencyException;
 import com.google.gwtorm.client.OrmException;
-import com.google.gwtorm.client.OrmRunnable;
-import com.google.gwtorm.client.Transaction;
 import com.google.gwtorm.client.impl.AbstractAccess;
 import com.google.gwtorm.client.impl.ListResultSet;
 
@@ -33,7 +30,7 @@
 
 /** Internal base class for implementations of {@link Access}. */
 public abstract class JdbcAccess<T, K extends Key<?>> extends
-    AbstractAccess<T, K, JdbcTransaction> {
+    AbstractAccess<T, K> {
   private final JdbcSchema schema;
 
   protected JdbcAccess(final JdbcSchema s) {
@@ -129,33 +126,29 @@
     }
   }
 
-  protected ListResultSet<T> queryList(final PreparedStatement ps)
-      throws OrmException {
+  protected com.google.gwtorm.client.ResultSet<T> queryList(
+      final PreparedStatement ps) throws OrmException {
+    final ResultSet rs;
     try {
-      try {
-        final ResultSet rs = ps.executeQuery();
-        try {
-          final ArrayList<T> r = new ArrayList<T>();
-          while (rs.next()) {
-            final T o = newEntityInstance();
-            bindOneFetch(rs, o);
-            r.add(o);
-          }
-          return new ListResultSet<T>(r);
-        } finally {
-          rs.close();
-        }
-      } finally {
+      rs = ps.executeQuery();
+      if (!rs.next()) {
+        rs.close();
         ps.close();
+        return new ListResultSet<T>(Collections.<T> emptyList());
       }
-    } catch (SQLException e) {
-      throw convertError("fetch", e);
+    } catch (SQLException err) {
+      try {
+        ps.close();
+      } catch (SQLException e) {
+        // Ignored.
+      }
+      throw convertError("fetch", err);
     }
+    return new JdbcResultSet<T, K>(this, rs, ps);
   }
 
   @Override
-  protected void doInsert(final Iterable<T> instances, final JdbcTransaction txn)
-      throws OrmException {
+  public void insert(final Iterable<T> instances) throws OrmException {
     try {
       PreparedStatement ps = null;
       try {
@@ -180,8 +173,7 @@
   }
 
   @Override
-  protected void doUpdate(final Iterable<T> instances, final JdbcTransaction txn)
-      throws OrmException {
+  public void update(final Iterable<T> instances) throws OrmException {
     try {
       PreparedStatement ps = null;
       try {
@@ -206,8 +198,7 @@
   }
 
   @Override
-  protected void doUpsert(final Iterable<T> instances, final JdbcTransaction txn)
-      throws OrmException {
+  public void upsert(final Iterable<T> instances) throws OrmException {
     // Assume update first, it will cheaply tell us if the row is missing.
     //
     Collection<T> inserts = null;
@@ -254,13 +245,12 @@
     }
 
     if (inserts != null) {
-      doInsert(inserts, txn);
+      insert(inserts);
     }
   }
 
   @Override
-  protected void doDelete(final Iterable<T> instances, final JdbcTransaction txn)
-      throws OrmException {
+  public void delete(final Iterable<T> instances) throws OrmException {
     try {
       PreparedStatement ps = null;
       try {
@@ -301,30 +291,7 @@
     }
   }
 
-  @Override
-  public T atomicUpdate(final K key, final AtomicUpdate<T> update)
-      throws OrmException {
-    return schema.run(new OrmRunnable<T, JdbcSchema>() {
-      @Override
-      public T run(JdbcSchema db, Transaction txn, boolean retry)
-          throws OrmException {
-        final T obj = get(key);
-        if (obj == null) {
-          return null;
-        }
-        final T res = update.update(obj);
-        update(Collections.singleton(obj), txn);
-        return res;
-      }
-    });
-  }
-
-  @Override
-  public void deleteKeys(Iterable<K> keys) throws OrmException {
-    delete(get(keys));
-  }
-
-  private OrmException convertError(final String op, final SQLException err) {
+  protected OrmException convertError(final String op, final SQLException err) {
     if (err.getCause() == null && err.getNextException() != null) {
       err.initCause(err.getNextException());
     }
@@ -333,8 +300,6 @@
 
   protected abstract T newEntityInstance();
 
-  protected abstract String getRelationName();
-
   protected abstract String getInsertOneSql();
 
   protected abstract String getUpdateOneSql();
diff --git a/src/main/java/com/google/gwtorm/jdbc/JdbcResultSet.java b/src/main/java/com/google/gwtorm/jdbc/JdbcResultSet.java
new file mode 100644
index 0000000..f041964
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/jdbc/JdbcResultSet.java
@@ -0,0 +1,100 @@
+// Copyright 2008 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.jdbc;
+
+import com.google.gwtorm.client.Key;
+import com.google.gwtorm.client.OrmRuntimeException;
+import com.google.gwtorm.client.impl.AbstractResultSet;
+
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.NoSuchElementException;
+
+class JdbcResultSet<T, K extends Key<?>> extends AbstractResultSet<T> {
+  private final JdbcAccess<T, K> access;
+  private final ResultSet rs;
+  private final PreparedStatement ps;
+  private Boolean haveRow;
+  private boolean closed;
+
+  JdbcResultSet(JdbcAccess<T, K> jdbcAccess, ResultSet rs, PreparedStatement ps) {
+    this.access = jdbcAccess;
+    this.rs = rs;
+    this.ps = ps;
+    this.haveRow = Boolean.TRUE;
+  }
+
+  @Override
+  protected boolean hasNext() {
+    if (closed) {
+      return false;
+    }
+
+    if (haveRow == null) {
+      try {
+        if (rs.next()) {
+          haveRow = Boolean.TRUE;
+        } else {
+          haveRow = Boolean.FALSE;
+          close();
+        }
+      } catch (SQLException err) {
+        close();
+        throw new OrmRuntimeException(access.convertError("fetch", err));
+      }
+    }
+
+    return haveRow;
+  }
+
+  @Override
+  protected T next() {
+    if (!hasNext()) {
+      throw new NoSuchElementException();
+    }
+
+    final T o = access.newEntityInstance();
+    try {
+      access.bindOneFetch(rs, o);
+    } catch (SQLException err) {
+      close();
+      throw new OrmRuntimeException(access.convertError("fetch", err));
+    }
+
+    haveRow = null;
+    hasNext();
+    return o;
+  }
+
+  @Override
+  public void close() {
+    if (!closed) {
+      closed = true;
+
+      try {
+        rs.close();
+      } catch (SQLException e) {
+        // Ignore
+      }
+
+      try {
+        ps.close();
+      } catch (SQLException e) {
+        // Ignore
+      }
+    }
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/jdbc/JdbcSchema.java b/src/main/java/com/google/gwtorm/jdbc/JdbcSchema.java
index a587cf1..a4ecb8f 100644
--- a/src/main/java/com/google/gwtorm/jdbc/JdbcSchema.java
+++ b/src/main/java/com/google/gwtorm/jdbc/JdbcSchema.java
@@ -14,17 +14,15 @@
 
 package com.google.gwtorm.jdbc;
 
-import com.google.gwtorm.client.OrmConcurrencyException;
 import com.google.gwtorm.client.OrmException;
-import com.google.gwtorm.client.OrmRunnable;
 import com.google.gwtorm.client.Schema;
 import com.google.gwtorm.client.StatementExecutor;
-import com.google.gwtorm.client.Transaction;
 import com.google.gwtorm.schema.ColumnModel;
 import com.google.gwtorm.schema.RelationModel;
 import com.google.gwtorm.schema.SchemaModel;
 import com.google.gwtorm.schema.SequenceModel;
 import com.google.gwtorm.schema.sql.SqlDialect;
+import com.google.gwtorm.server.AbstractSchema;
 
 import java.sql.Connection;
 import java.sql.SQLException;
@@ -33,14 +31,27 @@
 import java.util.Set;
 
 /** Internal base class for implementations of {@link Schema}. */
-public abstract class JdbcSchema implements Schema {
-  private static final int MAX_TRIES = 10;
+public abstract class JdbcSchema extends AbstractSchema {
   private final Database<?> dbDef;
   private Connection conn;
 
-  protected JdbcSchema(final Database<?> d, final Connection c) {
+  protected JdbcSchema(final Database<?> d) throws OrmException {
     dbDef = d;
-    conn = c;
+    conn = dbDef.newConnection();
+  }
+
+  @Override
+  public boolean isAutoFlush() {
+    return true; // We are always flushing.
+  }
+
+  @Override
+  public void setAutoFlush(boolean autoFlush) {
+  }
+
+  @Override
+  public void flush() {
+    // Do nothing, we flush by default during execution.
   }
 
   public final Connection getConnection() {
@@ -51,27 +62,6 @@
     return dbDef.getDialect();
   }
 
-  public <T, S extends Schema> T run(final OrmRunnable<T, S> task)
-      throws OrmException {
-    for (int attempts = 1;; attempts++) {
-      try {
-        final Transaction txn = beginTransaction();
-        try {
-          return task.run((S) this, txn, attempts > 1);
-        } finally {
-          txn.commit();
-        }
-      } catch (OrmConcurrencyException err) {
-        // If the commit failed, our implementation rolled back automatically.
-        //
-        if (attempts < MAX_TRIES) {
-          continue;
-        }
-        throw err;
-      }
-    }
-  }
-
   public void updateSchema(final StatementExecutor e) throws OrmException {
     try {
       createSequences(e);
@@ -214,12 +204,9 @@
     }
   }
 
-  protected long nextLong(final String query) throws OrmException {
-    return getDialect().nextLong(getConnection(), query);
-  }
-
-  public Transaction beginTransaction() {
-    return new JdbcTransaction(this);
+  @Override
+  protected long nextLong(final String poolName) throws OrmException {
+    return getDialect().nextLong(getConnection(), poolName);
   }
 
   public void close() {
diff --git a/src/main/java/com/google/gwtorm/jdbc/JdbcTransaction.java b/src/main/java/com/google/gwtorm/jdbc/JdbcTransaction.java
deleted file mode 100644
index 80cf804..0000000
--- a/src/main/java/com/google/gwtorm/jdbc/JdbcTransaction.java
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2008 Google Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package com.google.gwtorm.jdbc;
-
-import com.google.gwtorm.client.OrmException;
-import com.google.gwtorm.client.Transaction;
-import com.google.gwtorm.client.impl.AbstractTransaction;
-
-import java.sql.SQLException;
-
-/** Implementation of the {@link Transaction} interface, on JDBC. */
-class JdbcTransaction extends AbstractTransaction {
-  private final JdbcSchema schema;
-  private boolean inProgress;
-  private boolean committed;
-
-  JdbcTransaction(final JdbcSchema s) {
-    schema = s;
-  }
-
-  @Override
-  public void commit() throws OrmException {
-    notCommitted();
-
-    if (!inProgress) {
-      try {
-        schema.getConnection().setAutoCommit(false);
-      } catch (SQLException e) {
-        throw new OrmException("Cannot start transaction", e);
-      }
-      inProgress = true;
-    }
-
-    try {
-      super.commit();
-    } catch (OrmException e) {
-      try {
-        rollback();
-      } catch (OrmException e2) {
-        // Ignore the cascaded rollback error.
-      }
-      throw e;
-    } catch (RuntimeException e) {
-      try {
-        rollback();
-      } catch (OrmException e2) {
-        // Ignore the cascaded rollback error.
-      }
-      throw e;
-    }
-
-    try {
-      schema.getConnection().commit();
-      committed = true;
-    } catch (SQLException e) {
-      throw new OrmException("Transaction failed", e);
-    } finally {
-      exitTransaction();
-    }
-  }
-
-  public void rollback() throws OrmException {
-    notCommitted();
-
-    if (inProgress) {
-      try {
-        schema.getConnection().rollback();
-      } catch (SQLException e) {
-        throw new OrmException("Rollback failed", e);
-      } finally {
-        exitTransaction();
-      }
-    }
-  }
-
-  private void notCommitted() throws OrmException {
-    if (committed) {
-      throw new OrmException("Transaction already committed");
-    }
-  }
-
-  private void exitTransaction() {
-    try {
-      schema.getConnection().setAutoCommit(true);
-    } catch (SQLException e) {
-    } finally {
-      inProgress = false;
-    }
-  }
-}
diff --git a/src/main/java/com/google/gwtorm/jdbc/gen/SchemaFactoryGen.java b/src/main/java/com/google/gwtorm/jdbc/gen/SchemaFactoryGen.java
deleted file mode 100644
index 094888c..0000000
--- a/src/main/java/com/google/gwtorm/jdbc/gen/SchemaFactoryGen.java
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2008 Google Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package com.google.gwtorm.jdbc.gen;
-
-import com.google.gwtorm.client.OrmException;
-import com.google.gwtorm.client.Schema;
-import com.google.gwtorm.jdbc.AbstractSchemaFactory;
-import com.google.gwtorm.jdbc.Database;
-import com.google.gwtorm.schema.Util;
-
-import org.objectweb.asm.ClassWriter;
-import org.objectweb.asm.MethodVisitor;
-import org.objectweb.asm.Opcodes;
-import org.objectweb.asm.Type;
-
-import java.sql.Connection;
-
-/** Generates a factory to efficiently create new Schema instances. */
-public class SchemaFactoryGen<T extends Schema> implements Opcodes {
-  private final GeneratedClassLoader classLoader;
-  private final SchemaGen schemaGen;
-  private ClassWriter cw;
-  private String superTypeName;
-  private String implClassName;
-  private String implTypeName;
-
-  public SchemaFactoryGen(final GeneratedClassLoader loader, final SchemaGen gen) {
-    classLoader = loader;
-    schemaGen = gen;
-  }
-
-  public void defineClass() throws OrmException {
-    init();
-    implementEmptyConstructor();
-    implementCreate();
-    cw.visitEnd();
-    classLoader.defineClass(implClassName, cw.toByteArray());
-  }
-
-  public AbstractSchemaFactory<T> create() throws OrmException {
-    defineClass();
-    try {
-      return cast(Class.forName(implClassName, true, classLoader).newInstance());
-    } catch (InstantiationException e) {
-      throw new OrmException("Cannot create schema factory", e);
-    } catch (IllegalAccessException e) {
-      throw new OrmException("Cannot create schema factory", e);
-    } catch (ClassNotFoundException e) {
-      throw new OrmException("Cannot create schema factory", e);
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  private AbstractSchemaFactory<T> cast(final Object newInstance) {
-    return (AbstractSchemaFactory<T>) newInstance;
-  }
-
-  private void init() {
-    superTypeName = Type.getInternalName(AbstractSchemaFactory.class);
-    implClassName =
-        schemaGen.getSchemaClassName() + "_Factory_" + Util.createRandomName();
-    implTypeName = implClassName.replace('.', '/');
-
-    cw = new ClassWriter(ClassWriter.COMPUTE_MAXS);
-    cw.visit(V1_3, ACC_PUBLIC | ACC_FINAL | ACC_SUPER, implTypeName, null,
-        superTypeName, null);
-  }
-
-  private void implementEmptyConstructor() {
-    final String consName = "<init>";
-    final String consDesc =
-        Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {});
-    final MethodVisitor mv;
-    mv = cw.visitMethod(ACC_PUBLIC, consName, consDesc, null, null);
-    mv.visitCode();
-    mv.visitVarInsn(ALOAD, 0);
-    mv.visitMethodInsn(INVOKESPECIAL, superTypeName, consName, consDesc);
-    mv.visitInsn(RETURN);
-    mv.visitMaxs(-1, -1);
-    mv.visitEnd();
-  }
-
-  private void implementCreate() {
-    final MethodVisitor mv =
-        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, "create", Type
-            .getMethodDescriptor(Type.getType(Schema.class), new Type[] {
-                Type.getType(Database.class), Type.getType(Connection.class)}),
-            null, null);
-    mv.visitCode();
-
-    mv.visitTypeInsn(NEW, schemaGen.getImplTypeName());
-    mv.visitInsn(DUP);
-    mv.visitVarInsn(ALOAD, 1);
-    mv.visitVarInsn(ALOAD, 2);
-    mv.visitMethodInsn(INVOKESPECIAL, schemaGen.getImplTypeName(), "<init>",
-        Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {
-            Type.getType(Database.class), Type.getType(Connection.class)}));
-    mv.visitInsn(ARETURN);
-    mv.visitMaxs(-1, -1);
-    mv.visitEnd();
-  }
-}
diff --git a/src/main/java/com/google/gwtorm/jdbc/gen/SchemaGen.java b/src/main/java/com/google/gwtorm/jdbc/gen/SchemaGen.java
deleted file mode 100644
index ee1bb89..0000000
--- a/src/main/java/com/google/gwtorm/jdbc/gen/SchemaGen.java
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2008 Google Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package com.google.gwtorm.jdbc.gen;
-
-import com.google.gwtorm.client.OrmException;
-import com.google.gwtorm.client.Schema;
-import com.google.gwtorm.jdbc.Database;
-import com.google.gwtorm.jdbc.JdbcSchema;
-import com.google.gwtorm.schema.RelationModel;
-import com.google.gwtorm.schema.SequenceModel;
-import com.google.gwtorm.schema.Util;
-import com.google.gwtorm.schema.java.JavaSchemaModel;
-import com.google.gwtorm.schema.sql.SqlDialect;
-
-import org.objectweb.asm.ClassWriter;
-import org.objectweb.asm.MethodVisitor;
-import org.objectweb.asm.Opcodes;
-import org.objectweb.asm.Type;
-
-import java.sql.Connection;
-import java.util.ArrayList;
-import java.util.List;
-
-/** Generates a concrete implementation of a {@link Schema} extension. */
-public class SchemaGen implements Opcodes {
-  private final GeneratedClassLoader classLoader;
-  private final JavaSchemaModel schema;
-  private final SqlDialect dialect;
-  private List<RelationGen> relations;
-  private ClassWriter cw;
-  private String superTypeName;
-  private String implClassName;
-  private String implTypeName;
-
-  public SchemaGen(final GeneratedClassLoader loader,
-      final JavaSchemaModel schemaModel, final SqlDialect sqlDialect) {
-    classLoader = loader;
-    schema = schemaModel;
-    dialect = sqlDialect;
-  }
-
-  public void defineClass() throws OrmException {
-    defineRelationClasses();
-
-    init();
-    implementRelationFields();
-    implementConstructor();
-    implementSequenceMethods();
-    implementRelationMethods();
-    cw.visitEnd();
-    classLoader.defineClass(getImplClassName(), cw.toByteArray());
-  }
-
-  String getSchemaClassName() {
-    return schema.getSchemaClassName();
-  }
-
-  String getImplClassName() {
-    return implClassName;
-  }
-
-  String getImplTypeName() {
-    return implTypeName;
-  }
-
-  private void defineRelationClasses() throws OrmException {
-    relations = new ArrayList<RelationGen>();
-    for (final RelationModel rel : schema.getRelations()) {
-      final RelationGen g = new RelationGen(rel);
-      relations.add(g);
-      new AccessGen(classLoader, g).defineClass();
-    }
-  }
-
-  private void init() {
-    superTypeName = Type.getInternalName(JdbcSchema.class);
-    implClassName = getSchemaClassName() + "_Schema_" + Util.createRandomName();
-    implTypeName = implClassName.replace('.', '/');
-
-    cw = new ClassWriter(ClassWriter.COMPUTE_MAXS);
-    cw.visit(V1_3, ACC_PUBLIC | ACC_FINAL | ACC_SUPER, implTypeName, null,
-        superTypeName, new String[] {getSchemaClassName().replace('.', '/')});
-  }
-
-  private void implementRelationFields() {
-    for (final RelationGen info : relations) {
-      info.implementField();
-    }
-  }
-
-  private void implementConstructor() {
-    final String consName = "<init>";
-    final String consDesc =
-        Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {
-            Type.getType(Database.class), Type.getType(Connection.class)});
-    final MethodVisitor mv =
-        cw.visitMethod(ACC_PUBLIC, consName, consDesc, null, null);
-    mv.visitCode();
-
-    mv.visitVarInsn(ALOAD, 0);
-    mv.visitVarInsn(ALOAD, 1);
-    mv.visitVarInsn(ALOAD, 2);
-    mv.visitMethodInsn(INVOKESPECIAL, superTypeName, consName, consDesc);
-
-    for (final RelationGen info : relations) {
-      mv.visitVarInsn(ALOAD, 0);
-      mv.visitTypeInsn(NEW, info.accessType.getInternalName());
-      mv.visitInsn(DUP);
-      mv.visitVarInsn(ALOAD, 0);
-      mv.visitMethodInsn(INVOKESPECIAL, info.accessType.getInternalName(),
-          consName, Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {Type
-              .getType(JdbcSchema.class)}));
-      mv.visitFieldInsn(PUTFIELD, implTypeName, info
-          .getAccessInstanceFieldName(), info.accessType.getDescriptor());
-    }
-
-    mv.visitInsn(RETURN);
-    mv.visitMaxs(-1, -1);
-    mv.visitEnd();
-  }
-
-  private void implementSequenceMethods() {
-    for (final SequenceModel seq : schema.getSequences()) {
-      final Type retType = Type.getType(seq.getResultType());
-      final MethodVisitor mv =
-          cw
-              .visitMethod(ACC_PUBLIC, seq.getMethodName(), Type
-                  .getMethodDescriptor(retType, new Type[] {}), null,
-                  new String[] {Type.getType(OrmException.class)
-                      .getInternalName()});
-      mv.visitCode();
-
-      mv.visitVarInsn(ALOAD, 0);
-      mv.visitLdcInsn(dialect.getNextSequenceValueSql(seq.getSequenceName()));
-      mv.visitMethodInsn(INVOKEVIRTUAL, superTypeName, "nextLong", Type
-          .getMethodDescriptor(Type.getType(Long.TYPE), new Type[] {Type
-              .getType(String.class)}));
-      if (retType.getSize() == 1) {
-        mv.visitInsn(L2I);
-        mv.visitInsn(IRETURN);
-      } else {
-        mv.visitInsn(LRETURN);
-      }
-      mv.visitMaxs(-1, -1);
-      mv.visitEnd();
-    }
-  }
-
-  private void implementRelationMethods() {
-    for (final RelationGen info : relations) {
-      info.implementMethod();
-    }
-  }
-
-  class RelationGen {
-    final RelationModel model;
-    String accessClassName;
-    Type accessType;
-
-    RelationGen(final RelationModel model) {
-      this.model = model;
-    }
-
-    SqlDialect getDialect() {
-      return SchemaGen.this.dialect;
-    }
-
-    void implementField() {
-      accessType = Type.getObjectType(accessClassName.replace('.', '/'));
-      cw.visitField(ACC_PRIVATE | ACC_FINAL, getAccessInstanceFieldName(),
-          accessType.getDescriptor(), null, null).visitEnd();
-    }
-
-    String getAccessInstanceFieldName() {
-      return "access_" + model.getMethodName();
-    }
-
-    void implementMethod() {
-      final MethodVisitor mv =
-          cw.visitMethod(ACC_PUBLIC | ACC_FINAL, model.getMethodName(), Type
-              .getMethodDescriptor(Type.getObjectType(model
-                  .getAccessInterfaceName().replace('.', '/')), new Type[] {}),
-              null, null);
-      mv.visitCode();
-      mv.visitVarInsn(ALOAD, 0);
-      mv.visitFieldInsn(GETFIELD, implTypeName, getAccessInstanceFieldName(),
-          accessType.getDescriptor());
-      mv.visitInsn(ARETURN);
-      mv.visitMaxs(-1, -1);
-      mv.visitEnd();
-    }
-  }
-}
diff --git a/src/main/java/com/google/gwtorm/nosql/AccessGen.java b/src/main/java/com/google/gwtorm/nosql/AccessGen.java
new file mode 100644
index 0000000..5344e1e
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/AccessGen.java
@@ -0,0 +1,595 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql;
+
+import com.google.gwtorm.client.Key;
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.ResultSet;
+import com.google.gwtorm.protobuf.CodecFactory;
+import com.google.gwtorm.protobuf.ProtobufCodec;
+import com.google.gwtorm.schema.ColumnModel;
+import com.google.gwtorm.schema.KeyModel;
+import com.google.gwtorm.schema.QueryModel;
+import com.google.gwtorm.schema.QueryParser;
+import com.google.gwtorm.schema.RelationModel;
+import com.google.gwtorm.schema.Util;
+import com.google.gwtorm.server.CodeGenSupport;
+import com.google.gwtorm.server.GeneratedClassLoader;
+
+import org.antlr.runtime.tree.Tree;
+import org.objectweb.asm.ClassWriter;
+import org.objectweb.asm.MethodVisitor;
+import org.objectweb.asm.Opcodes;
+import org.objectweb.asm.Type;
+
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+/** Generates a concrete implementation of a {@link NoSqlAccess} extension. */
+class AccessGen implements Opcodes {
+  private static final Type string = Type.getType(String.class);
+  private static final Type protobufCodec = Type.getType(ProtobufCodec.class);
+  private static final Type indexFunction = Type.getType(IndexFunction.class);
+  private static final Type object = Type.getType(Object.class);
+  private static final Type ormKey = Type.getType(Key.class);
+  private static final Type byteArray = Type.getType(byte[].class);
+  private static final Type ormException = Type.getType(OrmException.class);
+  private static final Type resultSet = Type.getType(ResultSet.class);
+  private static final Type indexKeyBuilder =
+      Type.getType(IndexKeyBuilder.class);
+
+  private static final String F_OBJECT_CODEC = "objectCodec";
+  private static final String F_INDEXES = "indexes";
+
+  private final GeneratedClassLoader classLoader;
+  private final RelationModel model;
+  private final Class<?> modelClass;
+  private final Type schemaType;
+  private final Type accessType;
+  private final Type entityType;
+  private final KeyModel key;
+
+  private ClassWriter cw;
+  private String implClassName;
+  private String implTypeName;
+
+  AccessGen(final GeneratedClassLoader loader, final RelationModel rm,
+      final Class<? extends NoSqlSchema> schemaClazz,
+      final Class<? extends NoSqlAccess> accessClazz) throws OrmException {
+    classLoader = loader;
+    model = rm;
+
+    try {
+      modelClass =
+          Class.forName(model.getEntityTypeClassName(), true, classLoader);
+    } catch (ClassNotFoundException cnfe) {
+      throw new OrmException("Cannot locate model class", cnfe);
+    }
+
+    schemaType = Type.getType(schemaClazz);
+    accessType = Type.getType(accessClazz);
+    entityType = Type.getType(modelClass);
+
+    key = model.getPrimaryKey();
+    if (key == null) {
+      throw new OrmException("Relation " + rm.getMethodName()
+          + " has no primary key");
+    }
+  }
+
+  Class<?> create() throws OrmException {
+    init();
+    implementStaticFields();
+    implementConstructor();
+    implementGetString("getRelationName", model.getRelationName());
+    implementGetRelationID();
+    implementGetObjectCodec();
+    implementGetIndexes();
+
+    implementPrimaryKey();
+    implementEncodePrimaryKey();
+    implementKeyQuery(key);
+
+    for (final QueryModel q : model.getQueries()) {
+      implementQuery(q);
+    }
+    implementQuery(new QueryModel(model, "iterateAllEntities", ""));
+
+    cw.visitEnd();
+    classLoader.defineClass(implClassName, cw.toByteArray());
+
+    final Class<?> c = loadClass();
+    initObjectCodec(c);
+    initQueryIndexes(c);
+    return c;
+  }
+
+  @SuppressWarnings("unchecked")
+  private void initObjectCodec(final Class<?> clazz) throws OrmException {
+    ProtobufCodec oc = CodecFactory.encoder(modelClass);
+    if (model.getRelationID() > 0) {
+      oc = new RelationCodec(model.getRelationID(), oc);
+    }
+
+    try {
+      final Field e = clazz.getDeclaredField(F_OBJECT_CODEC);
+      e.setAccessible(true);
+      e.set(null, oc);
+    } catch (IllegalArgumentException err) {
+      throw new OrmException("Cannot setup ProtobufCodec", err);
+    } catch (IllegalStateException err) {
+      throw new OrmException("Cannot setup ProtobufCodec", err);
+    } catch (IllegalAccessException err) {
+      throw new OrmException("Cannot setup ProtobufCodec", err);
+    } catch (SecurityException err) {
+      throw new OrmException("Cannot setup ProtobufCodec", err);
+    } catch (NoSuchFieldException err) {
+      throw new OrmException("Cannot setup ProtobufCodec", err);
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  private void initQueryIndexes(final Class<?> clazz) throws OrmException {
+    final Collection<QueryModel> queries = model.getQueries();
+    final ArrayList<IndexFunction> indexes = new ArrayList<IndexFunction>();
+    for (QueryModel m : queries) {
+      if (needsIndexFunction(m)) {
+        indexes.add(new IndexFunctionGen(classLoader, m, modelClass).create());
+      }
+    }
+
+    try {
+      Field e = clazz.getDeclaredField(F_INDEXES);
+      e.setAccessible(true);
+      e.set(null, indexes.toArray(new IndexFunction[indexes.size()]));
+
+      for (IndexFunction f : indexes) {
+        e = clazz.getDeclaredField("index_" + f.getName());
+        e.setAccessible(true);
+        e.set(null, f);
+      }
+    } catch (IllegalArgumentException err) {
+      throw new OrmException("Cannot setup query IndexFunctions", err);
+    } catch (IllegalStateException err) {
+      throw new OrmException("Cannot setup query IndexFunctions", err);
+    } catch (IllegalAccessException err) {
+      throw new OrmException("Cannot setup query IndexFunctions", err);
+    } catch (SecurityException err) {
+      throw new OrmException("Cannot setup query IndexFunctions", err);
+    } catch (NoSuchFieldException err) {
+      throw new OrmException("Cannot setup query IndexFunctions", err);
+    }
+  }
+
+  private Class<?> loadClass() throws OrmException {
+    try {
+      return Class.forName(implClassName, false, classLoader);
+    } catch (ClassNotFoundException err) {
+      throw new OrmException("Cannot load generated class", err);
+    }
+  }
+
+  private void init() {
+    implClassName =
+        model.getEntityTypeClassName() + "_Access_" + model.getMethodName()
+            + "_" + Util.createRandomName();
+    implTypeName = implClassName.replace('.', '/');
+
+    cw = new ClassWriter(ClassWriter.COMPUTE_MAXS);
+    cw.visit(V1_3, ACC_PUBLIC | ACC_FINAL | ACC_SUPER, implTypeName, null,
+        accessType.getInternalName(), new String[] {model
+            .getAccessInterfaceName().replace('.', '/')});
+  }
+
+  private void implementStaticFields() {
+    cw.visitField(ACC_PRIVATE | ACC_STATIC, F_OBJECT_CODEC,
+        protobufCodec.getDescriptor(), null, null).visitEnd();
+    cw.visitField(ACC_PRIVATE | ACC_STATIC, F_INDEXES,
+        Type.getType(IndexFunction[].class).getDescriptor(), null, null)
+        .visitEnd();
+
+    for (final QueryModel q : model.getQueries()) {
+      if (needsIndexFunction(q)) {
+        cw.visitField(ACC_PRIVATE | ACC_STATIC, "index_" + q.getName(),
+            indexFunction.getDescriptor(), null, null).visitEnd();
+      }
+    }
+  }
+
+  private void implementConstructor() {
+    final String consName = "<init>";
+    final String consDesc =
+        Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {schemaType});
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC, consName, consDesc, null, null);
+    mv.visitCode();
+    mv.visitVarInsn(ALOAD, 0);
+    mv.visitVarInsn(ALOAD, 1);
+    mv.visitMethodInsn(INVOKESPECIAL, accessType.getInternalName(), consName,
+        consDesc);
+    mv.visitInsn(RETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementGetString(final String methodName,
+      final String returnValue) {
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, methodName, Type
+            .getMethodDescriptor(string, new Type[] {}), null, null);
+    mv.visitCode();
+    mv.visitLdcInsn(returnValue);
+    mv.visitInsn(ARETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementGetRelationID() {
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, "getRelationID", Type
+            .getMethodDescriptor(Type.INT_TYPE, new Type[] {}), null, null);
+    mv.visitCode();
+    new CodeGenSupport(mv).push(model.getRelationID());
+    mv.visitInsn(IRETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementGetObjectCodec() {
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, "getObjectCodec", Type
+            .getMethodDescriptor(protobufCodec, new Type[] {}), null, null);
+    mv.visitCode();
+    mv.visitFieldInsn(GETSTATIC, implTypeName, F_OBJECT_CODEC, protobufCodec
+        .getDescriptor());
+    mv.visitInsn(ARETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementGetIndexes() {
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, "getIndexes", Type
+            .getMethodDescriptor(Type.getType(IndexFunction[].class),
+                new Type[] {}), null, null);
+    mv.visitCode();
+    mv.visitFieldInsn(GETSTATIC, implTypeName, F_INDEXES, Type.getType(
+        IndexFunction[].class).getDescriptor());
+    mv.visitInsn(ARETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementPrimaryKey() {
+    final ColumnModel f = key.getField();
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, "primaryKey", Type
+            .getMethodDescriptor(ormKey, new Type[] {object}), null, null);
+    mv.visitCode();
+    mv.visitVarInsn(ALOAD, 1);
+    mv.visitTypeInsn(CHECKCAST, entityType.getInternalName());
+    mv.visitFieldInsn(GETFIELD, entityType.getInternalName(), f.getFieldName(),
+        CodeGenSupport.toType(f).getDescriptor());
+    mv.visitInsn(ARETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementEncodePrimaryKey() throws OrmException {
+    final List<ColumnModel> pCols = Collections.singletonList(key.getField());
+    final Type argType = CodeGenSupport.toType(key.getField());
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, "encodePrimaryKey", Type
+            .getMethodDescriptor(Type.VOID_TYPE, new Type[] {indexKeyBuilder,
+                ormKey}), null, null);
+    mv.visitCode();
+
+    mv.visitVarInsn(ALOAD, 2);
+    mv.visitTypeInsn(CHECKCAST, argType.getInternalName());
+    mv.visitVarInsn(ASTORE, 2);
+
+    final QueryCGS cgs =
+        new QueryCGS(mv, new Type[] {argType}, pCols, new int[] {2}, 1);
+    for (ColumnModel f : pCols) {
+      IndexFunctionGen.encodeField(f, mv, cgs);
+    }
+
+    mv.visitInsn(RETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementKeyQuery(KeyModel key) {
+    final Type keyType = CodeGenSupport.toType(key.getField());
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, key.getName(), Type
+            .getMethodDescriptor(entityType, new Type[] {keyType}), null,
+            new String[] {Type.getType(OrmException.class).getInternalName()});
+    mv.visitCode();
+
+    mv.visitVarInsn(ALOAD, 0);
+    mv.visitVarInsn(ALOAD, 1);
+    mv.visitMethodInsn(INVOKESPECIAL, accessType.getInternalName(), "get", Type
+        .getMethodDescriptor(object, new Type[] {ormKey}));
+    mv.visitTypeInsn(CHECKCAST, entityType.getInternalName());
+
+    mv.visitInsn(ARETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementQuery(final QueryModel info) throws OrmException {
+    final List<ColumnModel> pCols = info.getParameters();
+    final boolean hasLimitParam = info.hasLimitParameter();
+    final Type[] pTypes = new Type[pCols.size() + (hasLimitParam ? 1 : 0)];
+    final int[] pVars = new int[pTypes.length];
+    int nextVar = 1;
+    for (int i = 0; i < pCols.size(); i++) {
+      pTypes[i] = CodeGenSupport.toType(pCols.get(i));
+      pVars[i] = nextVar;
+      nextVar += pTypes[i].getSize();
+    }
+    if (hasLimitParam) {
+      pTypes[pTypes.length - 1] = Type.INT_TYPE;
+      pVars[pTypes.length - 1] = nextVar;
+      nextVar += Type.INT_TYPE.getSize();
+    }
+
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, info.getName(), Type
+            .getMethodDescriptor(resultSet, pTypes), null,
+            new String[] {ormException.getInternalName()});
+    mv.visitCode();
+
+    final List<Tree> ops = compareOpsOnly(info.getParseTree());
+
+    // Generate fromKey
+    //
+    final int fromBuf = nextVar++;
+    mv.visitTypeInsn(NEW, indexKeyBuilder.getInternalName());
+    mv.visitInsn(DUP);
+    mv.visitMethodInsn(INVOKESPECIAL, indexKeyBuilder.getInternalName(),
+        "<init>", Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {}));
+    mv.visitVarInsn(ASTORE, fromBuf);
+
+    QueryCGS cgs = new QueryCGS(mv, pTypes, pCols, pVars, fromBuf);
+    encodeFields(info, ops, mv, cgs, true /* fromKey */);
+
+    // Generate toKey
+    //
+    final int toBuf = nextVar++;
+    mv.visitTypeInsn(NEW, indexKeyBuilder.getInternalName());
+    mv.visitInsn(DUP);
+    mv.visitMethodInsn(INVOKESPECIAL, indexKeyBuilder.getInternalName(),
+        "<init>", Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {}));
+    mv.visitVarInsn(ASTORE, toBuf);
+
+    cgs = new QueryCGS(mv, pTypes, pCols, pVars, toBuf);
+    encodeFields(info, ops, mv, cgs, false /* fromKey */);
+    cgs.infinity();
+
+    // Make the scan call
+    //
+    mv.visitVarInsn(ALOAD, 0);
+    if (needsIndexFunction(info)) {
+      mv.visitFieldInsn(GETSTATIC, implTypeName, "index_" + info.getName(),
+          indexFunction.getDescriptor());
+    }
+
+    mv.visitVarInsn(ALOAD, fromBuf);
+    mv.visitMethodInsn(INVOKEVIRTUAL, indexKeyBuilder.getInternalName(),
+        "toByteArray", Type.getMethodDescriptor(byteArray, new Type[] {}));
+
+    mv.visitVarInsn(ALOAD, toBuf);
+    mv.visitMethodInsn(INVOKEVIRTUAL, indexKeyBuilder.getInternalName(),
+        "toByteArray", Type.getMethodDescriptor(byteArray, new Type[] {}));
+
+    // Set the limit on the number of results.
+    //
+    if (info.hasLimit()) {
+      if (hasLimitParam) {
+        mv.visitVarInsn(ILOAD, pVars[pTypes.length - 1]);
+      } else {
+        cgs.push(info.getStaticLimit());
+      }
+    } else {
+      cgs.push(0);
+    }
+
+    // Only keep order if there is an order by clause present
+    //
+    cgs.push(info.hasOrderBy() ? 1 : 0);
+
+    if (needsIndexFunction(info)) {
+      mv.visitMethodInsn(INVOKEVIRTUAL, accessType.getInternalName(),
+          "scanIndex", Type.getMethodDescriptor(resultSet, new Type[] {
+              indexFunction, byteArray, byteArray, Type.INT_TYPE,
+              Type.BOOLEAN_TYPE}));
+    } else {
+      // No where and no order by clause? Use the primary key instead.
+      //
+      mv.visitMethodInsn(INVOKEVIRTUAL, accessType.getInternalName(),
+          "scanPrimaryKey", Type.getMethodDescriptor(resultSet, new Type[] {
+              byteArray, byteArray, Type.INT_TYPE, Type.BOOLEAN_TYPE}));
+    }
+
+    mv.visitInsn(ARETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private boolean needsIndexFunction(final QueryModel info) {
+    return info.hasWhere() || info.hasOrderBy();
+  }
+
+  private void encodeFields(QueryModel qm, List<Tree> query, MethodVisitor mv,
+      QueryCGS cgs, boolean fromKey) throws OrmException {
+    final boolean toKey = !fromKey;
+    Tree lastNode = null;
+
+    for (Tree node : query) {
+      switch (node.getType()) {
+        case QueryParser.GE:
+          if (fromKey) {
+            checkLastNode(qm, lastNode);
+            encodeField(node, mv, cgs);
+            cgs.delimiter();
+            lastNode = node;
+          }
+          break;
+
+        case QueryParser.GT:
+          if (fromKey) {
+            checkLastNode(qm, lastNode);
+            encodeField(node, mv, cgs);
+            cgs.delimiter();
+            cgs.infinity();
+            lastNode = node;
+          }
+          break;
+
+        case QueryParser.EQ:
+          checkLastNode(qm, lastNode);
+          encodeField(node, mv, cgs);
+          cgs.delimiter();
+          break;
+
+        case QueryParser.LE:
+          if (toKey) {
+            checkLastNode(qm, lastNode);
+            encodeField(node, mv, cgs);
+            cgs.delimiter();
+            lastNode = node;
+          }
+          break;
+
+        case QueryParser.LT:
+          if (toKey) {
+            checkLastNode(qm, lastNode);
+            encodeField(node, mv, cgs);
+            cgs.delimiter();
+            cgs.nul();
+            lastNode = node;
+          }
+          break;
+
+        default:
+          throw new OrmException("Unsupported query token in "
+              + model.getMethodName() + "." + qm.getName() + ": "
+              + node.toStringTree());
+      }
+
+      cgs.nextParameter();
+    }
+  }
+
+  private void checkLastNode(QueryModel qm, Tree lastNode) throws OrmException {
+    if (lastNode != null) {
+      throw new OrmException(lastNode.getText() + " must be last operator in "
+          + model.getMethodName() + "." + qm.getName());
+    }
+  }
+
+  private void encodeField(Tree node, MethodVisitor mv, QueryCGS cgs)
+      throws OrmException {
+    ColumnModel f = ((QueryParser.Column) node.getChild(0)).getField();
+    IndexFunctionGen.encodeField(f, mv, cgs);
+  }
+
+  private List<Tree> compareOpsOnly(Tree node) throws OrmException {
+    if (node == null) {
+      return Collections.emptyList();
+    }
+
+    switch (node.getType()) {
+      case 0: // nil node used to join other nodes together
+      case QueryParser.WHERE:
+      case QueryParser.AND: {
+        List<Tree> res = new ArrayList<Tree>();
+        for (int i = 0; i < node.getChildCount(); i++) {
+          res.addAll(compareOpsOnly(node.getChild(i)));
+        }
+        return res;
+      }
+
+      case QueryParser.GT:
+      case QueryParser.GE:
+      case QueryParser.EQ:
+      case QueryParser.LE:
+      case QueryParser.LT: {
+        final Tree lhs = node.getChild(0);
+        final Tree rhs = node.getChild(1);
+        if (lhs.getType() != QueryParser.ID) {
+          throw new OrmException("Unsupported query token");
+        }
+        if (rhs.getType() == QueryParser.PLACEHOLDER) {
+          return Collections.singletonList(node);
+        }
+        break;
+      }
+
+      case QueryParser.ORDER:
+      case QueryParser.LIMIT:
+        break;
+
+      default:
+        throw new OrmException("Unsupported query token " + node.toStringTree());
+    }
+    return Collections.emptyList();
+  }
+
+  private final class QueryCGS extends IndexFunctionGen.EncodeCGS {
+    private final Type[] pTypes;
+    private final List<ColumnModel> pCols;
+    private final int[] pVars;
+    private final int bufvar;
+    private int currentp;
+
+    private QueryCGS(MethodVisitor method, Type[] pTypes,
+        List<ColumnModel> pCols, int[] pVars, int bufvar) {
+      super(method);
+      this.pTypes = pTypes;
+      this.pCols = pCols;
+      this.pVars = pVars;
+      this.bufvar = bufvar;
+    }
+
+    void nextParameter() {
+      currentp++;
+    }
+
+    @Override
+    void pushBuilder() {
+      mv.visitVarInsn(ALOAD, bufvar);
+    }
+
+    @Override
+    public void pushFieldValue() {
+      appendGetField(getFieldReference());
+    }
+
+    @Override
+    protected void appendGetField(final ColumnModel c) {
+      if (currentp < pTypes.length && pCols.get(currentp).equals(c)) {
+        loadVar(pTypes[currentp], pVars[currentp]);
+      } else {
+        super.appendGetField(c);
+      }
+    }
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/CounterShard.java b/src/main/java/com/google/gwtorm/nosql/CounterShard.java
new file mode 100644
index 0000000..4761bce
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/CounterShard.java
@@ -0,0 +1,88 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql;
+
+import com.google.gwtorm.client.Column;
+import com.google.gwtorm.protobuf.CodecFactory;
+import com.google.gwtorm.protobuf.ProtobufCodec;
+
+/**
+ * A single slice of an incrementing counter.
+ * <p>
+ * <b>This shard class is not thread safe.</b> Implementors using this type must
+ * perform synchronization through external mechanisms such as a row-level lock.
+ * <p>
+ * NoSQL implementations can use this object to store counters and keep track of
+ * their values within {@code nextLong(String)}. To improve allocation
+ * performance counters may be sliced into shards, with allocation coming out of
+ * a randomly selected shard, and each shard being replenished from a master
+ * shard when it {@link #isEmpty()}.
+ */
+public class CounterShard {
+  /** Standard encoder/decoder for this class. */
+  public static final ProtobufCodec<CounterShard> CODEC =
+      CodecFactory.encoder(CounterShard.class);
+
+  /** Current value in this shard, this is the next to assign out. */
+  @Column(id = 1)
+  protected long current;
+
+  /** Maximum value, the shard cannot hand out this value. */
+  @Column(id = 2)
+  protected long max;
+
+  protected CounterShard() {
+  }
+
+  /**
+   * Create a new shard with a specific starting value, with no maximum.
+   *
+   * @param next the first value this shard will hand out.
+   */
+  public CounterShard(long next) {
+    this(next, Long.MAX_VALUE);
+  }
+
+  /**
+   * Create a new shard with a specific starting point and maximum.
+   *
+   * @param next the first value this shard will hand out.
+   * @param max the highest value the shard will stop at. The shard will not
+   *        actually hand out this value.
+   */
+  public CounterShard(long next, long max) {
+    this.current = next;
+    this.max = max;
+  }
+
+  /** @return true if this shard cannot hand out any more values. */
+  public boolean isEmpty() {
+    return current == max;
+  }
+
+  /**
+   * Obtain the next value from this shard.
+   *
+   * @return the next value
+   * @throws IllegalStateException the shard {@link #isEmpty()} and cannot hand
+   *         out any more values.
+   */
+  public long next() {
+    if (isEmpty()) {
+      throw new IllegalStateException("Counter shard out of values");
+    }
+    return current++;
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/IndexFunction.java b/src/main/java/com/google/gwtorm/nosql/IndexFunction.java
new file mode 100644
index 0000000..edc83b9
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/IndexFunction.java
@@ -0,0 +1,52 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql;
+
+/**
+ * A function to produce a NoSQL secondary index key from an object.
+ * <p>
+ * An index function computes a row key for a secondary index table by appending
+ * the relevant values to the builder's internal buffer in the order they are
+ * referenced in the query.
+ * <p>
+ * Typically an IndexFunction is automatically code generated at runtime by
+ * {@link IndexFunctionGen}.
+ *
+ * @param <T> type of the object the index record references.
+ */
+public abstract class IndexFunction<T> {
+  /** @return name of this index, should be unique within the relation. */
+  public abstract String getName();
+
+  /**
+   * Should this object exist in the index?
+   * <p>
+   * Objects that shouldn't appear in this index are skipped because field
+   * values are currently {@code null}, or because one or more field values do
+   * not match the constants used in the query that defines the index.
+   *
+   * @param object the object to read fields from.
+   * @return true if the object should be indexed by this index.
+   */
+  public abstract boolean includes(T object);
+
+  /**
+   * Encodes the current values from the object into the index buffer.
+   *
+   * @param dst the buffer to append the indexed field value(s) onto.
+   * @param object the object to read current field values from.
+   */
+  public abstract void encode(IndexKeyBuilder dst, T object);
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/IndexFunctionGen.java b/src/main/java/com/google/gwtorm/nosql/IndexFunctionGen.java
new file mode 100644
index 0000000..15dfae4
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/IndexFunctionGen.java
@@ -0,0 +1,497 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql;
+
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.schema.ColumnModel;
+import com.google.gwtorm.schema.QueryModel;
+import com.google.gwtorm.schema.QueryParser;
+import com.google.gwtorm.schema.Util;
+import com.google.gwtorm.server.CodeGenSupport;
+import com.google.gwtorm.server.GeneratedClassLoader;
+
+import org.antlr.runtime.tree.Tree;
+import org.objectweb.asm.ClassWriter;
+import org.objectweb.asm.Label;
+import org.objectweb.asm.MethodVisitor;
+import org.objectweb.asm.Opcodes;
+import org.objectweb.asm.Type;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+/** Generates {@link IndexFunction} implementations. */
+class IndexFunctionGen<T> implements Opcodes {
+  private static final Type string = Type.getType(String.class);
+  private static final Type object = Type.getType(Object.class);
+  private static final Type indexKeyBuilder =
+      Type.getType(IndexKeyBuilder.class);
+
+  private final GeneratedClassLoader classLoader;
+  private final QueryModel query;
+  private final List<ColumnModel> myFields;
+  private final Class<T> pojo;
+  private final Type pojoType;
+
+  private ClassWriter cw;
+  private String superTypeName;
+  private String implClassName;
+  private String implTypeName;
+
+  IndexFunctionGen(final GeneratedClassLoader loader, final QueryModel qm,
+      final Class<T> t) {
+    classLoader = loader;
+    query = qm;
+
+    myFields = new ArrayList<ColumnModel>();
+
+    // Only add each parameter column once, but in the order used.
+    // This avoids a range test on the same column from duplicating
+    // the data in the index record.
+    //
+    for (ColumnModel m : leaves(query.getParameters())) {
+      if (!myFields.contains(m)) {
+        myFields.add(m);
+      }
+    }
+
+    // Skip ORDER BY columns that match with the parameters, then
+    // add anything else onto the end.
+    //
+    int p = 0;
+    Iterator<ColumnModel> orderby = leaves(query.getOrderBy()).iterator();
+    while (p < myFields.size() && orderby.hasNext()) {
+      ColumnModel c = orderby.next();
+      if (!myFields.get(p).equals(c)) {
+        myFields.add(c);
+        break;
+      }
+      p++;
+    }
+    while (orderby.hasNext()) {
+      myFields.add(orderby.next());
+    }
+
+    pojo = t;
+    pojoType = Type.getType(pojo);
+  }
+
+  private List<ColumnModel> leaves(List<ColumnModel> in) {
+    ArrayList<ColumnModel> r = new ArrayList<ColumnModel>(in.size());
+    for (ColumnModel m : in) {
+      if (m.isNested()) {
+        r.addAll(m.getAllLeafColumns());
+      } else {
+        r.add(m);
+      }
+    }
+    return r;
+  }
+
+  IndexFunction<T> create() throws OrmException {
+    init();
+    implementConstructor();
+    implementGetName();
+    implementIncludes();
+    implementEncode();
+    cw.visitEnd();
+    classLoader.defineClass(implClassName, cw.toByteArray());
+
+    try {
+      final Class<?> c = Class.forName(implClassName, true, classLoader);
+      return cast(c.newInstance());
+    } catch (InstantiationException e) {
+      throw new OrmException("Cannot create new encoder", e);
+    } catch (IllegalAccessException e) {
+      throw new OrmException("Cannot create new encoder", e);
+    } catch (ClassNotFoundException e) {
+      throw new OrmException("Cannot create new encoder", e);
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  private static <T> IndexFunction<T> cast(final Object c) {
+    return (IndexFunction<T>) c;
+  }
+
+  private void init() {
+    superTypeName = Type.getInternalName(IndexFunction.class);
+    implClassName =
+        pojo.getName() + "_IndexFunction_" + query.getName() + "_"
+            + Util.createRandomName();
+    implTypeName = implClassName.replace('.', '/');
+
+    cw = new ClassWriter(ClassWriter.COMPUTE_MAXS);
+    cw.visit(V1_3, ACC_PUBLIC | ACC_FINAL | ACC_SUPER, implTypeName, null,
+        superTypeName, new String[] {});
+  }
+
+  private void implementConstructor() {
+    final String consName = "<init>";
+    final String consDesc =
+        Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {});
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC, consName, consDesc, null, null);
+    mv.visitCode();
+
+    mv.visitVarInsn(ALOAD, 0);
+    mv.visitMethodInsn(INVOKESPECIAL, superTypeName, consName, consDesc);
+
+    mv.visitInsn(RETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementGetName() {
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, "getName", Type
+            .getMethodDescriptor(Type.getType(String.class), new Type[] {}),
+            null, null);
+    mv.visitCode();
+    mv.visitLdcInsn(query.getName());
+    mv.visitInsn(ARETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementIncludes() throws OrmException {
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC, "includes", Type.getMethodDescriptor(
+            Type.BOOLEAN_TYPE, new Type[] {object}), null, null);
+    mv.visitCode();
+    final IncludeCGS cgs = new IncludeCGS(mv);
+    cgs.setEntityType(pojoType);
+
+    mv.visitVarInsn(ALOAD, 1);
+    mv.visitTypeInsn(CHECKCAST, pojoType.getInternalName());
+    mv.visitVarInsn(ASTORE, 1);
+
+    Set<ColumnModel> checked = new HashSet<ColumnModel>();
+    checkNotNullFields(myFields, checked, mv, cgs);
+
+    final Tree parseTree = query.getParseTree();
+    if (parseTree != null) {
+      checkConstants(parseTree, mv, cgs);
+    }
+
+    cgs.push(1);
+    mv.visitInsn(IRETURN);
+
+    mv.visitLabel(cgs.no);
+    cgs.push(0);
+    mv.visitInsn(IRETURN);
+
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private static void checkNotNullFields(Collection<ColumnModel> myFields,
+      Set<ColumnModel> checked, MethodVisitor mv, IncludeCGS cgs)
+      throws OrmException {
+    for (ColumnModel f : myFields) {
+      if (f.isNested()) {
+        checkNotNullFields(f.getNestedColumns(), checked, mv, cgs);
+      } else {
+        checkNotNullScalar(mv, checked, cgs, f);
+      }
+    }
+  }
+
+  private static void checkNotNullScalar(MethodVisitor mv,
+      Set<ColumnModel> checked, IncludeCGS cgs, ColumnModel f)
+      throws OrmException {
+    checkParentNotNull(f.getParent(), checked, mv, cgs);
+    cgs.setFieldReference(f);
+
+    switch (Type.getType(f.getPrimitiveType()).getSort()) {
+      case Type.BOOLEAN:
+      case Type.BYTE:
+      case Type.SHORT:
+      case Type.CHAR:
+      case Type.INT:
+      case Type.LONG:
+        break;
+
+      case Type.ARRAY:
+      case Type.OBJECT: {
+        if (f.getPrimitiveType() == byte[].class) {
+          cgs.pushFieldValue();
+          mv.visitJumpInsn(IFNULL, cgs.no);
+
+        } else if (f.getPrimitiveType() == String.class) {
+          cgs.pushFieldValue();
+          mv.visitJumpInsn(IFNULL, cgs.no);
+
+        } else if (f.getPrimitiveType() == java.sql.Timestamp.class
+            || f.getPrimitiveType() == java.util.Date.class
+            || f.getPrimitiveType() == java.sql.Date.class) {
+          cgs.pushFieldValue();
+          mv.visitJumpInsn(IFNULL, cgs.no);
+
+        } else {
+          throw new OrmException("Type " + f.getPrimitiveType()
+              + " not supported for field " + f.getPathToFieldName());
+        }
+        break;
+      }
+
+      default:
+        throw new OrmException("Type " + f.getPrimitiveType()
+            + " not supported for field " + f.getPathToFieldName());
+    }
+  }
+
+  private static void checkParentNotNull(ColumnModel f,
+      Set<ColumnModel> checked, MethodVisitor mv, IncludeCGS cgs) {
+    if (f != null && checked.add(f)) {
+      checkParentNotNull(f.getParent(), checked, mv, cgs);
+      cgs.setFieldReference(f);
+      cgs.pushFieldValue();
+      mv.visitJumpInsn(IFNULL, cgs.no);
+    }
+  }
+
+  private void checkConstants(Tree node, MethodVisitor mv, IncludeCGS cgs)
+      throws OrmException {
+    switch (node.getType()) {
+      // These don't impact the constant evaluation
+      case QueryParser.ORDER:
+      case QueryParser.LIMIT:
+        break;
+
+      case 0: // nil node used to join other nodes together
+      case QueryParser.WHERE:
+      case QueryParser.AND:
+        for (int i = 0; i < node.getChildCount(); i++) {
+          checkConstants(node.getChild(i), mv, cgs);
+        }
+        break;
+
+      case QueryParser.LT:
+      case QueryParser.LE:
+      case QueryParser.GT:
+      case QueryParser.GE:
+      case QueryParser.EQ: {
+        final Tree lhs = node.getChild(0);
+        final Tree rhs = node.getChild(1);
+        if (lhs.getType() != QueryParser.ID) {
+          throw new OrmException("Unsupported query token");
+        }
+
+        cgs.setFieldReference(((QueryParser.Column) lhs).getField());
+        switch (rhs.getType()) {
+          case QueryParser.PLACEHOLDER:
+            // Parameter evaluated at runtime
+            break;
+
+          case QueryParser.TRUE:
+            cgs.pushFieldValue();
+            mv.visitJumpInsn(IFEQ, cgs.no);
+            break;
+
+          case QueryParser.FALSE:
+            cgs.pushFieldValue();
+            mv.visitJumpInsn(IFNE, cgs.no);
+            break;
+
+          case QueryParser.CONSTANT_INTEGER:
+            cgs.pushFieldValue();
+            cgs.push(Integer.parseInt(rhs.getText()));
+            mv.visitJumpInsn(IF_ICMPNE, cgs.no);
+            break;
+
+          case QueryParser.CONSTANT_STRING:
+            if (cgs.getFieldReference().getPrimitiveType() == Character.TYPE) {
+              cgs.push(dequote(rhs.getText()).charAt(0));
+              cgs.pushFieldValue();
+              mv.visitJumpInsn(IF_ICMPNE, cgs.no);
+            } else {
+              mv.visitLdcInsn(dequote(rhs.getText()));
+              cgs.pushFieldValue();
+              mv.visitMethodInsn(INVOKEVIRTUAL, string.getInternalName(),
+                  "equals", Type.getMethodDescriptor(Type.BOOLEAN_TYPE,
+                      new Type[] {object}));
+              mv.visitJumpInsn(IFEQ, cgs.no);
+            }
+            break;
+        }
+        break;
+      }
+
+      default:
+        throw new OrmException("Unsupported query token " + node.toStringTree());
+    }
+  }
+
+  private static String dequote(String text) {
+    return text.substring(1, text.length() - 1);
+  }
+
+  private void implementEncode() throws OrmException {
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC, "encode", Type.getMethodDescriptor(
+            Type.VOID_TYPE, new Type[] {indexKeyBuilder, object}), null, null);
+    mv.visitCode();
+    final EncodeCGS cgs = new EncodeCGS(mv);
+    cgs.setEntityType(pojoType);
+
+    mv.visitVarInsn(ALOAD, 2);
+    mv.visitTypeInsn(CHECKCAST, pojoType.getInternalName());
+    mv.visitVarInsn(ASTORE, 2);
+
+    encodeFields(myFields, mv, cgs);
+
+    mv.visitInsn(RETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  static void encodeFields(final Collection<ColumnModel> myFields,
+      final MethodVisitor mv, final EncodeCGS cgs) throws OrmException {
+    Iterator<ColumnModel> i = myFields.iterator();
+    while (i.hasNext()) {
+      ColumnModel f = i.next();
+      encodeScalar(f, mv, cgs);
+      if (i.hasNext()) {
+        cgs.delimiter();
+      }
+    }
+  }
+
+  static void encodeField(ColumnModel f, final MethodVisitor mv,
+      final EncodeCGS cgs) throws OrmException {
+    if (f.isNested()) {
+      encodeFields(f.getAllLeafColumns(), mv, cgs);
+    } else {
+      encodeScalar(f, mv, cgs);
+    }
+  }
+
+  private static void encodeScalar(final ColumnModel f, final MethodVisitor mv,
+      final EncodeCGS cgs) throws OrmException {
+    cgs.setFieldReference(f);
+
+    switch (Type.getType(f.getPrimitiveType()).getSort()) {
+      case Type.BOOLEAN:
+      case Type.BYTE:
+      case Type.SHORT:
+      case Type.CHAR:
+      case Type.INT:
+        cgs.pushBuilder();
+        cgs.pushFieldValue();
+        mv.visitInsn(I2L);
+        mv.visitMethodInsn(INVOKEVIRTUAL, indexKeyBuilder.getInternalName(),
+            "add", Type.getMethodDescriptor(Type.VOID_TYPE,
+                new Type[] {Type.LONG_TYPE}));
+        break;
+
+      case Type.LONG:
+        cgs.pushBuilder();
+        cgs.pushFieldValue();
+        mv.visitMethodInsn(INVOKEVIRTUAL, indexKeyBuilder.getInternalName(),
+            "add", Type.getMethodDescriptor(Type.VOID_TYPE,
+                new Type[] {Type.LONG_TYPE}));
+        break;
+
+      case Type.ARRAY:
+      case Type.OBJECT: {
+        if (f.getPrimitiveType() == byte[].class) {
+          cgs.pushBuilder();
+          cgs.pushFieldValue();
+          mv.visitMethodInsn(INVOKEVIRTUAL, indexKeyBuilder.getInternalName(),
+              "add", Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {Type
+                  .getType(byte[].class)}));
+
+        } else if (f.getPrimitiveType() == String.class) {
+          cgs.pushBuilder();
+          cgs.pushFieldValue();
+          mv.visitMethodInsn(INVOKEVIRTUAL, indexKeyBuilder.getInternalName(),
+              "add", Type.getMethodDescriptor(Type.VOID_TYPE,
+                  new Type[] {string}));
+
+        } else if (f.getPrimitiveType() == java.sql.Timestamp.class
+            || f.getPrimitiveType() == java.util.Date.class
+            || f.getPrimitiveType() == java.sql.Date.class) {
+          cgs.pushBuilder();
+          cgs.pushFieldValue();
+          String tsType = Type.getType(f.getPrimitiveType()).getInternalName();
+          mv.visitMethodInsn(INVOKEVIRTUAL, tsType, "getTime", Type
+              .getMethodDescriptor(Type.LONG_TYPE, new Type[] {}));
+          mv.visitMethodInsn(INVOKEVIRTUAL, indexKeyBuilder.getInternalName(),
+              "add", Type.getMethodDescriptor(Type.VOID_TYPE,
+                  new Type[] {Type.LONG_TYPE}));
+        } else {
+          throw new OrmException("Type " + f.getPrimitiveType()
+              + " not supported for field " + f.getPathToFieldName());
+        }
+        break;
+      }
+
+      default:
+        throw new OrmException("Type " + f.getPrimitiveType()
+            + " not supported for field " + f.getPathToFieldName());
+    }
+  }
+
+  private static final class IncludeCGS extends CodeGenSupport {
+    final Label no = new Label();
+
+    private IncludeCGS(MethodVisitor method) {
+      super(method);
+    }
+
+    @Override
+    public void pushEntity() {
+      mv.visitVarInsn(ALOAD, 1);
+    }
+  }
+
+  static class EncodeCGS extends CodeGenSupport {
+    EncodeCGS(MethodVisitor method) {
+      super(method);
+    }
+
+    void infinity() {
+      pushBuilder();
+      mv.visitMethodInsn(INVOKEVIRTUAL, indexKeyBuilder.getInternalName(),
+          "infinity", Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {}));
+    }
+
+    void delimiter() {
+      pushBuilder();
+      mv.visitMethodInsn(INVOKEVIRTUAL, indexKeyBuilder.getInternalName(),
+          "delimiter", Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {}));
+    }
+
+    void nul() {
+      pushBuilder();
+      mv.visitMethodInsn(INVOKEVIRTUAL, indexKeyBuilder.getInternalName(),
+          "nul", Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {}));
+    }
+
+    void pushBuilder() {
+      mv.visitVarInsn(ALOAD, 1);
+    }
+
+    @Override
+    public void pushEntity() {
+      mv.visitVarInsn(ALOAD, 2);
+    }
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/IndexKeyBuilder.java b/src/main/java/com/google/gwtorm/nosql/IndexKeyBuilder.java
new file mode 100644
index 0000000..8c6d9f4
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/IndexKeyBuilder.java
@@ -0,0 +1,188 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql;
+
+
+import java.io.ByteArrayOutputStream;
+import java.io.UnsupportedEncodingException;
+
+/**
+ * Encoder support for {@link IndexFunction} computed strings.
+ * <p>
+ * This class provides a string that may contain multiple values, using
+ * delimiters between fields and big-endian encoded numerics. Sorting the
+ * resulting strings using unsigned byte orderings produces a stable sorting.
+ * <p>
+ * The encoding used by this class relies on having 258 tokens. To get the extra
+ * 2 tokens within a 256 byte range, escapes are used according to the following
+ * simple table:
+ * <ul>
+ * <li>delimiter = \x00\x01
+ * <li>byte \x00 = \x00\xff
+ * <li>byte \xff = \xff\x00
+ * <li>infinity = \xff\xff
+ * </ul>
+ * <p>
+ * Integers are encoded as variable length big-endian values, skipping leading
+ * zero bytes, prefixed by the number of bytes used to encode them. Therefore 0
+ * is encoded as "\x00", and 256 is encoded as "\x02\x01\x00". Negative values
+ * are encoded in their twos complement encoding and therefore sort after the
+ * maximum positive value.
+ * <p>
+ * Strings and byte arrays supplied by the caller have their \x00 and \xff
+ * values escaped according to the table above, but are otherwise written as-is
+ * without a length prefix.
+ * <p>
+ * Callers are responsible for inserting {@link #delimiter()} markers at the
+ * appropriate positions in the sequence.
+ */
+public class IndexKeyBuilder {
+  private final ByteArrayOutputStream buf = new ByteArrayOutputStream();
+
+  /**
+   * Add a delimiter marker to the string.
+   */
+  public void delimiter() {
+    buf.write(0x00);
+    buf.write(0x01);
+  }
+
+  /**
+   * Add the special infinity symbol to the string.
+   * <p>
+   * The infinity symbol sorts after all other values in the same position.
+   */
+  public void infinity() {
+    buf.write(0xff);
+    buf.write(0xff);
+  }
+
+  /**
+   * Add \0 to the string.
+   * <p>
+   * \0 can be used during searches to enforce greater then or less then clauses
+   * in a query.
+   */
+  public void nul() {
+    buf.write(0x00);
+  }
+
+  /**
+   * Add a raw sequence of bytes.
+   * <p>
+   * The bytes 0x00 and 0xff are escaped by this method according to the
+   * encoding table described in the class documentation.
+   *
+   * @param bin array to copy from.
+   * @param pos first index to copy.
+   * @param cnt number of bytes to copy.
+   */
+  public void add(byte[] bin, int pos, int cnt) {
+    while (0 < cnt--) {
+      byte b = bin[pos++];
+      if (b == 0x00) {
+        buf.write(0x00);
+        buf.write(0xff);
+
+      } else if (b == -1) {
+        buf.write(0xff);
+        buf.write(0x00);
+
+      } else {
+        buf.write(b);
+      }
+    }
+  }
+
+  /**
+   * Add a raw sequence of bytes.
+   * <p>
+   * The bytes 0x00 and 0xff are escaped by this method according to the
+   * encoding table described in the class documentation.
+   *
+   * @param bin the complete array to copy.
+   */
+  public void add(byte[] bin) {
+    add(bin, 0, bin.length);
+  }
+
+  /**
+   * Encode a string into UTF-8 and append as a sequence of bytes.
+   *
+   * @param str the string to encode and append.
+   */
+  public void add(String str) {
+    try {
+      add(str.getBytes("UTF-8"));
+    } catch (UnsupportedEncodingException e) {
+      throw new RuntimeException("JVM does not support UTF-8", e);
+    }
+  }
+
+  /**
+   * Add a single character as though it were part of a UTF-8 string.
+   *
+   * @param ch the character to encode and append.
+   */
+  public void add(char ch) {
+    if (ch == 0x00) {
+      buf.write(0x00);
+      buf.write(0xff);
+
+    } else if (ch <= 255) {
+      buf.write(ch);
+
+    } else {
+      add(Character.toString(ch));
+    }
+  }
+
+  /**
+   * Add an integer value as a big-endian variable length integer.
+   *
+   * @param val the value to add.
+   */
+  public void add(long val) {
+    final byte[] t = new byte[9];
+    int i = t.length;
+    while (val != 0) {
+      t[--i] = (byte) (val & 0xff);
+      val >>>= 8;
+    }
+    t[i - 1] = (byte) (t.length - i);
+    buf.write(t, i - 1, t.length - i + 1);
+  }
+
+  /**
+   * Add a byte array as-is, without escaping.
+   * <p>
+   * This should only be used the byte array came from a prior index key and the
+   * caller is trying to create a new key with this key embedded at the end.
+   *
+   * @param bin the binary to append as-is, without further escaping.
+   */
+  public void addRaw(byte[] bin) {
+    buf.write(bin, 0, bin.length);
+  }
+
+  /**
+   * Obtain a copy of the internal storage array.
+   *
+   * @return the current state of this, converted into a flat byte array.
+   */
+  public byte[] toByteArray() {
+    return buf.toByteArray();
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/IndexRow.java b/src/main/java/com/google/gwtorm/nosql/IndexRow.java
new file mode 100644
index 0000000..4986e3a
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/IndexRow.java
@@ -0,0 +1,83 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql;
+
+import com.google.gwtorm.client.Column;
+import com.google.gwtorm.protobuf.CodecFactory;
+import com.google.gwtorm.protobuf.ProtobufCodec;
+
+/**
+ * Data value stored in a NoSQL secondary index row.
+ * <p>
+ * Instances of this object can be used inside of the data portion of a
+ * secondary index row, and may either contain the key of the primary data row,
+ * or a copy of the primary object data.
+ * <p>
+ * The {@link #timestamp} field can be used to fossil collect secondary index
+ * rows that no longer match the primary data row and which are older than the
+ * longest expected transaction. These fossil rows may have occurred due to an
+ * aborted, but partially applied transaction.
+ */
+public class IndexRow {
+  /** Standard encoder/decoder for this class. */
+  public static final ProtobufCodec<IndexRow> CODEC =
+      CodecFactory.encoder(IndexRow.class);
+
+  /**
+   * Create an index row to reference the primary data row by key.
+   *
+   * @param update time of the update.
+   * @param key the key to reference.
+   * @return the new index row.
+   */
+  public static IndexRow forKey(long update, byte[] key) {
+    IndexRow r = new IndexRow();
+    r.timestamp = update;
+    r.dataKey = key;
+    return r;
+  }
+
+  /**
+   * Clock of the last time this index row was touched.
+   * <p>
+   * Invalid rows older than a certain time interval may be subject to automatic
+   * background pruning during data retrieval operations.
+   */
+  @Column(id = 1)
+  protected long timestamp;
+
+  /** Key within the same relation that holds the actual data. */
+  @Column(id = 2, notNull = false)
+  protected byte[] dataKey;
+
+  /** Stale copy of the data. */
+  @Column(id = 3, notNull = false)
+  protected byte[] dataCopy;
+
+  /** @return get the timestamp of the row. */
+  public long getTimestamp() {
+    return timestamp;
+  }
+
+  /** @return get the primary key data; or {@code null}. */
+  public byte[] getDataKey() {
+    return dataKey;
+  }
+
+  /** @return get the copy of the primary data; or {@code null}. */
+  public byte[] getDataCopy() {
+    return dataCopy;
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/NoSqlAccess.java b/src/main/java/com/google/gwtorm/nosql/NoSqlAccess.java
new file mode 100644
index 0000000..52e84ab
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/NoSqlAccess.java
@@ -0,0 +1,97 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql;
+
+import com.google.gwtorm.client.Access;
+import com.google.gwtorm.client.Key;
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.ResultSet;
+import com.google.gwtorm.client.impl.AbstractAccess;
+import com.google.gwtorm.protobuf.ProtobufCodec;
+
+/** Internal base class for implementations of {@link Access}. */
+public abstract class NoSqlAccess<T, K extends Key<?>> extends
+    AbstractAccess<T, K> {
+  protected NoSqlAccess(final NoSqlSchema s) {
+  }
+
+  /**
+   * Scan a range of keys from the data rows and return any matching objects.
+   * <p>
+   * All NoSQL implementations must provide their own variant of this method.
+   * <p>
+   * To fetch a single record with a scan, set {@code toKey} to the same array
+   * as {@code fromKey}, but append a trailing NUL byte (0x00). The caller
+   * should validate that the returned ResultSet contains no more than 1 row.
+   *
+   * @param fromKey key to start the scan on. This is inclusive.
+   * @param toKey key to stop the scan on. This is exclusive.
+   * @param limit maximum number of results to return, 0 for unlimited.
+   * @param order if true the order will be preserved, false if the result order
+   *        order can be arbitrary.
+   * @return result set for the requested range. The result set may be lazily
+   *         filled, or filled completely.
+   * @throws OrmException an error occurred preventing the scan from completing.
+   */
+  protected abstract ResultSet<T> scanPrimaryKey(byte[] fromKey, byte[] toKey,
+      int limit, boolean order) throws OrmException;
+
+  /**
+   * Scan a range of keys and return any matching objects.
+   * <p>
+   * All NoSQL implementations must provide their own variant of this method.
+   * <p>
+   * To fetch a single record with a scan, set {@code toKey} to the same array
+   * as {@code fromKey}, but append a trailing NUL byte (0x00). The caller
+   * should validate that the returned ResultSet contains no more than 1 row.
+   *
+   * @param index definition of the index the scan occurs over.
+   * @param fromKey key to start the scan on. This is inclusive.
+   * @param toKey key to stop the scan on. This is exclusive.
+   * @param limit maximum number of results to return, 0 for unlimited.
+   * @param order if true the order will be preserved, false if the result order
+   *        order can be arbitrary.
+   * @return result set for the requested range. The result set may be lazily
+   *         filled, or filled completely.
+   * @throws OrmException an error occurred preventing the scan from completing.
+   */
+  protected abstract ResultSet<T> scanIndex(IndexFunction<T> index,
+      byte[] fromKey, byte[] toKey, int limit, boolean order)
+      throws OrmException;
+
+  // -- These are all provided by AccessGen when it builds a subclass --
+
+  /** @return encoder/decoder for the object data. */
+  protected abstract ProtobufCodec<T> getObjectCodec();
+
+  /**
+   * Get the indexes that support query functions.
+   * <p>
+   * This array may be a subset of the total query functions. This can occur
+   * when two or more queries can be efficiently answered by performing a range
+   * scan over the same index.
+   *
+   * @return indexes needed to support queries.
+   */
+  protected abstract IndexFunction<T>[] getIndexes();
+
+  /**
+   * Encode the primary key of the object.
+   *
+   * @param dst builder the key components will be added into.
+   * @param key the object primary key.
+   */
+  protected abstract void encodePrimaryKey(IndexKeyBuilder dst, K key);
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/NoSqlDatabase.java b/src/main/java/com/google/gwtorm/nosql/NoSqlDatabase.java
new file mode 100644
index 0000000..86f88a5
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/NoSqlDatabase.java
@@ -0,0 +1,109 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql;
+
+import com.google.gwtorm.client.KeyUtil;
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.Schema;
+import com.google.gwtorm.client.SchemaFactory;
+import com.google.gwtorm.schema.RelationModel;
+import com.google.gwtorm.schema.SchemaModel;
+import com.google.gwtorm.schema.java.JavaSchemaModel;
+import com.google.gwtorm.server.GeneratedClassLoader;
+import com.google.gwtorm.server.SchemaConstructorGen;
+import com.google.gwtorm.server.SchemaGen;
+import com.google.gwtorm.server.StandardKeyEncoder;
+
+/**
+ * Base class for NoSQL typed databases.
+ * <p>
+ * Applications should use the database class to create instances of their
+ * Schema extension interface, and thus open and connect to the data store.
+ * <p>
+ * Creating a new database instance is expensive, due to the type analysis and
+ * code generation performed to implement the Schema and Access interfaces.
+ * Applications should create and cache their database instance for the life of
+ * the application.
+ * <p>
+ * Database instances are thread-safe, but returned Schema instances are not.
+ * <p>
+ * This class must be further extended by the NoSQL implementation to configure
+ * the connectivity with the data store and supply the correct subclass of
+ * {@link NoSqlSchema} that knows how to interact with the data store.
+ *
+ * @param <T> type of the application's Schema.
+ * @param <S> type of the implementation's base for Schema implementations.
+ * @param <A> type of the implementation's base for Access implementations.
+ */
+public abstract class NoSqlDatabase<T extends Schema, S extends NoSqlSchema, A extends NoSqlAccess>
+    implements SchemaFactory<T> {
+  static {
+    KeyUtil.setEncoderImpl(new StandardKeyEncoder());
+  }
+
+  private final SchemaModel schemaModel;
+  private final SchemaFactory<T> implFactory;
+
+  /**
+   * Initialize a new database and generate the implementation.
+   *
+   * @param schemaBaseType class that the generated Schema implementation should
+   *        extend in order to provide data store connectivity.
+   * @param accessBaseType class that the generated Access implementations
+   *        should extend in order to provide single-relation access for each
+   *        schema instance.
+   * @param appSchema the application schema interface that must be implemented
+   *        and constructed on demand.
+   * @throws OrmException the schema cannot be created because of an annotation
+   *         error in the interface definitions.
+   */
+  protected NoSqlDatabase(final Class<S> schemaBaseType,
+      final Class<A> accessBaseType, final Class<T> appSchema)
+      throws OrmException {
+    schemaModel = new JavaSchemaModel(appSchema);
+    final GeneratedClassLoader loader = newLoader(appSchema);
+    final Class<T> impl = generate(schemaBaseType, accessBaseType, loader);
+    implFactory = new SchemaConstructorGen<T>(loader, impl, this).create();
+  }
+
+  @Override
+  public T open() throws OrmException {
+    return implFactory.open();
+  }
+
+  /** @return the derived model of the application's schema. */
+  public SchemaModel getSchemaModel() {
+    return schemaModel;
+  }
+
+  @SuppressWarnings("unchecked")
+  private Class<T> generate(final Class<S> schemaBaseType,
+      final Class<A> accessBaseType, final GeneratedClassLoader loader)
+      throws OrmException {
+    return new SchemaGen(loader, schemaModel, getClass(), schemaBaseType,
+        new SchemaGen.AccessGenerator() {
+          @Override
+          public Class<?> create(GeneratedClassLoader loader, RelationModel rm)
+              throws OrmException {
+            return new AccessGen(loader, rm, schemaBaseType, accessBaseType)
+                .create();
+          }
+        }).create();
+  }
+
+  private static <T> GeneratedClassLoader newLoader(final Class<T> schema) {
+    return new GeneratedClassLoader(schema.getClassLoader());
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/NoSqlSchema.java b/src/main/java/com/google/gwtorm/nosql/NoSqlSchema.java
new file mode 100644
index 0000000..f130ca7
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/NoSqlSchema.java
@@ -0,0 +1,52 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql;
+
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.Schema;
+import com.google.gwtorm.client.StatementExecutor;
+import com.google.gwtorm.server.AbstractSchema;
+
+/** Internal base class for implementations of {@link Schema}. */
+public abstract class NoSqlSchema extends AbstractSchema {
+  private boolean autoFlush = true;
+
+  protected NoSqlSchema(final NoSqlDatabase<?, ?, ?> d) {
+  }
+
+  @Override
+  public boolean isAutoFlush() {
+    return autoFlush;
+  }
+
+  @Override
+  public void setAutoFlush(boolean autoFlush) throws OrmException {
+    if (!this.autoFlush && autoFlush) {
+      flush();
+    }
+
+    this.autoFlush = autoFlush;
+  }
+
+  @Override
+  public void pruneSchema(StatementExecutor e) throws OrmException {
+    // Assume no action is required in a default NoSQL environment.
+  }
+
+  @Override
+  public void updateSchema(StatementExecutor e) throws OrmException {
+    // Assume no action is required in a default NoSQL environment.
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/RelationCodec.java b/src/main/java/com/google/gwtorm/nosql/RelationCodec.java
new file mode 100644
index 0000000..84add28
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/RelationCodec.java
@@ -0,0 +1,102 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql;
+
+import com.google.gwtorm.protobuf.ProtobufCodec;
+import com.google.protobuf.CodedInputStream;
+import com.google.protobuf.CodedOutputStream;
+import com.google.protobuf.InvalidProtocolBufferException;
+import com.google.protobuf.WireFormat;
+
+import java.io.EOFException;
+import java.io.IOException;
+
+/** Encodes a relation number in front of an object. */
+public class RelationCodec<T> extends ProtobufCodec<T> {
+  /**
+   * Pop the field number from the stream and return it.
+   *
+   * @param in the stream to pop the field number from. The caller is
+   *        responsible for making sure the underlying stream had a mark set for
+   *        at least 8 bytes so the tag can be examined, reset, and later read
+   *        again during mergeFrom or decode.
+   * @return the field number of the relation.
+   * @throws IOException the stream cannot be read.
+   */
+  public static int peekId(CodedInputStream in) throws IOException {
+    return in.readTag() >>> 3;
+  }
+
+  private final int fieldId;
+  private final ProtobufCodec<T> objectCodec;
+
+  public RelationCodec(int fieldId, ProtobufCodec<T> objectCodec) {
+    this.fieldId = fieldId;
+    this.objectCodec = objectCodec;
+  }
+
+  @Override
+  public T newInstance() {
+    return objectCodec.newInstance();
+  }
+
+  @Override
+  public int sizeof(T obj) {
+    int sz = objectCodec.sizeof(obj);
+    return CodedOutputStream.computeTagSize(fieldId) //
+        + CodedOutputStream.computeRawVarint32Size(sz) //
+        + sz;
+  }
+
+  @Override
+  public void encode(T obj, CodedOutputStream out) throws IOException {
+    int sz = objectCodec.sizeof(obj);
+    out.writeTag(fieldId, WireFormat.FieldType.MESSAGE.getWireType());
+    out.writeRawVarint32(sz);
+    objectCodec.encode(obj, out);
+  }
+
+  @Override
+  public void mergeFrom(CodedInputStream in, T obj) throws IOException {
+    boolean found = false;
+    for (;;) {
+      int tag = in.readTag();
+      if (tag == 0) {
+        if (found) {
+          break;
+        } else {
+          // Reached EOF. But we require an object in our only field.
+          throw new EOFException("Expected field " + fieldId);
+        }
+      }
+
+      if ((tag >>> 3) == fieldId) {
+        if ((tag & 0x7) == WireFormat.FieldType.MESSAGE.getWireType()) {
+          int sz = in.readRawVarint32();
+          int oldLimit = in.pushLimit(sz);
+          objectCodec.mergeFrom(in, obj);
+          in.checkLastTagWas(0);
+          in.popLimit(oldLimit);
+          found = true;
+        } else {
+          throw new InvalidProtocolBufferException("Field " + fieldId
+              + " should be length delimited (wire type 2)");
+        }
+      } else {
+        in.skipField(tag);
+      }
+    }
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/generic/CandidateRow.java b/src/main/java/com/google/gwtorm/nosql/generic/CandidateRow.java
new file mode 100644
index 0000000..15ecbad
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/generic/CandidateRow.java
@@ -0,0 +1,53 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql.generic;
+
+import com.google.gwtorm.nosql.IndexRow;
+
+class CandidateRow {
+  private final byte[] indexKey;
+  private final IndexRow indexRow;
+  private byte[] objData;
+
+  CandidateRow(byte[] ik, IndexRow ir) {
+    indexKey = ik;
+    indexRow = ir;
+    objData = indexRow.getDataCopy();
+  }
+
+  byte[] getIndexKey() {
+    return indexKey;
+  }
+
+  IndexRow getIndexRow() {
+    return indexRow;
+  }
+
+  byte[] getDataKey() {
+    return indexRow.getDataKey();
+  }
+
+  boolean hasData() {
+    return objData != null;
+  }
+
+  byte[] getData() {
+    return objData;
+  }
+
+  void setData(byte[] objData) {
+    this.objData = objData;
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/generic/GenericAccess.java b/src/main/java/com/google/gwtorm/nosql/generic/GenericAccess.java
new file mode 100644
index 0000000..8c4df87
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/generic/GenericAccess.java
@@ -0,0 +1,581 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql.generic;
+
+import com.google.gwtorm.client.Access;
+import com.google.gwtorm.client.AtomicUpdate;
+import com.google.gwtorm.client.Key;
+import com.google.gwtorm.client.OrmConcurrencyException;
+import com.google.gwtorm.client.OrmDuplicateKeyException;
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.ResultSet;
+import com.google.gwtorm.client.impl.AbstractResultSet;
+import com.google.gwtorm.client.impl.ListResultSet;
+import com.google.gwtorm.nosql.IndexFunction;
+import com.google.gwtorm.nosql.IndexKeyBuilder;
+import com.google.gwtorm.nosql.IndexRow;
+import com.google.gwtorm.nosql.NoSqlAccess;
+import com.google.protobuf.ByteString;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map.Entry;
+
+/** Base implementation for {@link Access} in a {@link GenericDatabase}. */
+public abstract class GenericAccess<T, K extends Key<?>> extends
+    NoSqlAccess<T, K> {
+  /** Maximum number of results to cache to improve updates on upsert. */
+  private static final int MAX_SZ = 64;
+
+  private final GenericSchema db;
+  private LinkedHashMap<K, byte[]> cache;
+
+  protected GenericAccess(final GenericSchema s) {
+    super(s);
+    db = s;
+  }
+
+  protected LinkedHashMap<K, byte[]> cache() {
+    if (cache == null) {
+      cache = new LinkedHashMap<K, byte[]>(8) {
+        @Override
+        protected boolean removeEldestEntry(Entry<K, byte[]> entry) {
+          return MAX_SZ <= size();
+        }
+      };
+    }
+    return cache;
+  }
+
+  /**
+   * Lookup a single entity via its primary key.
+   *
+   * @param key the primary key instance; must not be null.
+   * @return the entity; null if no entity has this key.
+   * @throws OrmException the data lookup failed.
+   * @throws OrmDuplicateKeyException more than one row was identified in the
+   *         key scan.
+   */
+  @Override
+  public T get(K key) throws OrmException, OrmDuplicateKeyException {
+    byte[] bin = db.fetchRow(dataRowKey(key));
+    if (bin != null) {
+      T obj = getObjectCodec().decode(bin);
+      cache().put(primaryKey(obj), bin);
+      return obj;
+    } else {
+      return null;
+    }
+  }
+
+  @Override
+  public ResultSet<T> get(final Iterable<K> keys) throws OrmException {
+    final ResultSet<Row> rs = db.fetchRows(new Iterable<byte[]>() {
+      @Override
+      public Iterator<byte[]> iterator() {
+        return new Iterator<byte[]>() {
+          private final Iterator<K> i = keys.iterator();
+
+          @Override
+          public boolean hasNext() {
+            return i.hasNext();
+          }
+
+          @Override
+          public byte[] next() {
+            return dataRowKey(i.next());
+          }
+
+          @Override
+          public void remove() {
+            throw new UnsupportedOperationException();
+          }
+        };
+      }
+    });
+
+    final Iterator<Row> i = rs.iterator();
+    return new AbstractResultSet<T>() {
+      @Override
+      protected boolean hasNext() {
+        return i.hasNext();
+      }
+
+      @Override
+      protected T next() {
+        byte[] bin = i.next().getValue();
+        T obj = getObjectCodec().decode(bin);
+        cache().put(primaryKey(obj), bin);
+        return obj;
+      }
+
+      @Override
+      public void close() {
+        rs.close();
+      }
+    };
+  }
+
+  /**
+   * Scan a range of keys from the data rows and return any matching objects.
+   *
+   * @param fromKey key to start the scan on. This is inclusive.
+   * @param toKey key to stop the scan on. This is exclusive.
+   * @param limit maximum number of results to return.
+   * @param order if true the order will be preserved, false if the result order
+   *        order can be arbitrary.
+   * @return result set for the requested range. The result set may be lazily
+   *         filled, or filled completely.
+   * @throws OrmException an error occurred preventing the scan from completing.
+   */
+  @Override
+  protected ResultSet<T> scanPrimaryKey(byte[] fromKey, byte[] toKey,
+      int limit, boolean order) throws OrmException {
+    IndexKeyBuilder b;
+
+    b = new IndexKeyBuilder();
+    b.add(getRelationName());
+    b.delimiter();
+    b.addRaw(fromKey);
+    fromKey = b.toByteArray();
+
+    b = new IndexKeyBuilder();
+    b.add(getRelationName());
+    b.delimiter();
+    b.addRaw(toKey);
+    toKey = b.toByteArray();
+
+    final ResultSet<Row> rs = db.scan(fromKey, toKey, limit, order);
+    final Iterator<Row> i = rs.iterator();
+
+    return new AbstractResultSet<T>() {
+      @Override
+      protected boolean hasNext() {
+        return i.hasNext();
+      }
+
+      @Override
+      protected T next() {
+        byte[] bin = i.next().getValue();
+        T obj = getObjectCodec().decode(bin);
+        cache().put(primaryKey(obj), bin);
+        return obj;
+      }
+
+      @Override
+      public void close() {
+        rs.close();
+      }
+    };
+  }
+
+  /**
+   * Scan a range of index keys and return any matching objects.
+   *
+   * @param idx the index function describing the index to scan.
+   * @param fromKey key to start the scan on. This is inclusive.
+   * @param toKey key to stop the scan on. This is exclusive.
+   * @param limit maximum number of results to return.
+   * @param order if true the order will be preserved, false if the result order
+   *        order can be arbitrary.
+   * @return result set for the requested range. The result set may be lazily
+   *         filled, or filled completely.
+   * @throws OrmException an error occurred preventing the scan from completing.
+   */
+  @Override
+  protected ResultSet<T> scanIndex(IndexFunction<T> idx, byte[] fromKey,
+      byte[] toKey, int limit, boolean order) throws OrmException {
+    final long now = System.currentTimeMillis();
+    IndexKeyBuilder b;
+
+    b = new IndexKeyBuilder();
+    b.add(getRelationName());
+    b.add('.');
+    b.add(idx.getName());
+    b.delimiter();
+    b.addRaw(fromKey);
+    fromKey = b.toByteArray();
+
+    b = new IndexKeyBuilder();
+    b.add(getRelationName());
+    b.add('.');
+    b.add(idx.getName());
+    b.delimiter();
+    b.addRaw(toKey);
+    toKey = b.toByteArray();
+
+    final ArrayList<T> res = new ArrayList<T>();
+    byte[] lastKey = fromKey;
+
+    SCAN: for (;;) {
+      List<CandidateRow> scanned;
+      if (0 < limit) {
+        scanned = new ArrayList<CandidateRow>(limit);
+      } else {
+        scanned = new ArrayList<CandidateRow>();
+      }
+
+      boolean needData = false;
+      for (Row ent : db.scan(lastKey, toKey, limit, order)) {
+        byte[] idxKey = ent.getKey();
+        IndexRow idxRow = IndexRow.CODEC.decode(ent.getValue());
+        CandidateRow row = new CandidateRow(idxKey, idxRow);
+        scanned.add(row);
+        needData |= !row.hasData();
+        lastKey = idxKey;
+      }
+
+      if (needData) {
+        // At least one row from the index didn't have a cached copy of the
+        // object stored within. For these rows we need to fetch the real
+        // data row and join it against the index information.
+        //
+        HashMap<ByteString, CandidateRow> byKey =
+            new HashMap<ByteString, CandidateRow>();
+        List<byte[]> toFetch = new ArrayList<byte[]>(scanned.size());
+
+        for (CandidateRow idxRow : scanned) {
+          if (!idxRow.hasData()) {
+            IndexKeyBuilder pk = new IndexKeyBuilder();
+            pk.add(getRelationName());
+            pk.delimiter();
+            pk.addRaw(idxRow.getDataKey());
+            byte[] key = pk.toByteArray();
+
+            byKey.put(ByteString.copyFrom(key), idxRow);
+            toFetch.add(key);
+          }
+        }
+
+        for (Row objRow : db.fetchRows(toFetch)) {
+          CandidateRow idxRow = byKey.get(ByteString.copyFrom(objRow.getKey()));
+          if (idxRow != null) {
+            idxRow.setData(objRow.getValue());
+          }
+        }
+
+        for (CandidateRow idxRow : scanned) {
+          // If we have no data present and this row is stale enough,
+          // drop the row out of the index.
+          //
+          if (!idxRow.hasData()) {
+            db.maybeFossilCollectIndexRow(now, idxRow.getIndexKey(), //
+                idxRow.getIndexRow());
+            continue;
+          }
+
+          // Verify the object still matches the predicate of the index.
+          // If it does, include it in the result. Otherwise, maybe we
+          // should drop it from the index.
+          //
+          byte[] bin = idxRow.getData();
+          final T obj = getObjectCodec().decode(bin);
+          if (matches(idx, obj, idxRow.getIndexKey())) {
+            cache().put(primaryKey(obj), bin);
+            res.add(obj);
+            if (limit > 0 && res.size() == limit) {
+              break SCAN;
+            }
+          } else {
+            db.maybeFossilCollectIndexRow(now, idxRow.getIndexKey(), //
+                idxRow.getIndexRow());
+          }
+        }
+      } else {
+        // All of the rows are using a cached copy of the object. We can
+        // simply decode and produce those without further validation.
+        //
+        for (CandidateRow idxRow : scanned) {
+          byte[] bin = idxRow.getData();
+          T obj = getObjectCodec().decode(bin);
+          cache().put(primaryKey(obj), bin);
+          res.add(obj);
+          if (limit > 0 && res.size() == limit) {
+            break SCAN;
+          }
+        }
+      }
+
+      // If we have no limit we scanned everything, so break out.
+      // If scanned < limit, we saw every index row that might be
+      // a match, and no further rows would exist.
+      //
+      if (limit == 0 || scanned.size() < limit) {
+        break SCAN;
+      }
+
+      // Otherwise we have to scan again starting after lastKey.
+      //
+      b = new IndexKeyBuilder();
+      b.addRaw(lastKey);
+      b.nul();
+      lastKey = b.toByteArray();
+    }
+
+    return new ListResultSet<T>(res);
+  }
+
+  private void maybeFlush() throws OrmException {
+    if (db.isAutoFlush()) {
+      db.flush();
+    }
+  }
+
+  @Override
+  public void insert(Iterable<T> instances) throws OrmException {
+    for (T obj : instances) {
+      insertOne(obj);
+    }
+    maybeFlush();
+  }
+
+  private void insertOne(T nObj) throws OrmException {
+    writeNewIndexes(null, nObj);
+
+    final byte[] key = dataRowKey(primaryKey(nObj));
+    db.insert(key, getObjectCodec().encodeToByteString(nObj).toByteArray());
+  }
+
+  @Override
+  public void update(Iterable<T> instances) throws OrmException {
+    for (T obj : instances) {
+      upsertOne(obj, true);
+    }
+    maybeFlush();
+  }
+
+  @Override
+  public void upsert(Iterable<T> instances) throws OrmException {
+    for (T obj : instances) {
+      upsertOne(obj, false);
+    }
+    maybeFlush();
+  }
+
+  private void upsertOne(T newObj, boolean mustExist) throws OrmException {
+    final byte[] key = dataRowKey(primaryKey(newObj));
+
+    T oldObj;
+    byte[] oldBin = cache().get(primaryKey(newObj));
+    if (oldBin != null) {
+      oldObj = getObjectCodec().decode(oldBin);
+    } else if (mustExist) {
+      oldBin = db.fetchRow(key);
+      if (oldBin != null) {
+        oldObj = getObjectCodec().decode(oldBin);
+      } else {
+        throw new OrmConcurrencyException();
+      }
+    } else {
+      oldObj = null;
+    }
+
+    writeNewIndexes(oldObj, newObj);
+    db.upsert(key, getObjectCodec().encodeToByteString(newObj).toByteArray());
+    pruneOldIndexes(oldObj, newObj);
+  }
+
+  /**
+   * Insert secondary index rows for an object about to be written.
+   * <p>
+   * Insert or update operations should invoke this method before the main data
+   * row is written, allowing the secondary index rows to be put into the data
+   * store before the main data row arrives. Compatible scan implementations
+   * (such as {@link #scanIndex(IndexFunction, byte[], byte[], int, boolean)}
+   * above) will ignore these rows for a short time period.
+   *
+   * @param oldObj an old copy of the object; if non-null this may be used to
+   *        avoid writing unnecessary secondary index rows that already exist.
+   * @param newObj the new (or updated) object being stored. Must not be null.
+   * @throws OrmException the data store is unable to update an index row.
+   */
+  protected void writeNewIndexes(T oldObj, T newObj) throws OrmException {
+    final byte[] idxData = indexRowData(newObj);
+    for (IndexFunction<T> f : getIndexes()) {
+      if (f.includes(newObj)) {
+        final byte[] idxKey = indexRowKey(f, newObj);
+        if (oldObj == null || !matches(f, oldObj, idxKey)) {
+          db.upsert(idxKey, idxData);
+        }
+      }
+    }
+  }
+
+  /**
+   * Remove old secondary index rows that are no longer valid for an object.
+   *
+   * @param oldObj an old copy of the object, prior to the current update taking
+   *        place. If null the method does nothing and simply returns.
+   * @param newObj the new copy of the object. Index rows that are still valid
+   *        for {@code #newObj} are left alone. If null, all index rows for
+   *        {@code oldObj} are removed.
+   * @throws OrmException the data store is unable to remove an index row.
+   */
+  protected void pruneOldIndexes(final T oldObj, T newObj) throws OrmException {
+    if (oldObj != null) {
+      for (IndexFunction<T> f : getIndexes()) {
+        if (f.includes(oldObj)) {
+          final byte[] idxKey = indexRowKey(f, oldObj);
+          if (newObj == null || !matches(f, newObj, idxKey)) {
+            db.delete(idxKey);
+          }
+        }
+      }
+    }
+  }
+
+  @Override
+  public void delete(Iterable<T> instances) throws OrmException {
+    for (T oldObj : instances) {
+      db.delete(dataRowKey(primaryKey(oldObj)));
+      pruneOldIndexes(oldObj, null);
+      cache().remove(primaryKey(oldObj));
+    }
+    maybeFlush();
+  }
+
+  @Override
+  public T atomicUpdate(K key, final AtomicUpdate<T> update)
+      throws OrmException {
+    final IndexKeyBuilder b = new IndexKeyBuilder();
+    b.add(getRelationName());
+    b.delimiter();
+    encodePrimaryKey(b, key);
+
+    try {
+      final T[] res = (T[]) new Object[3];
+      db.atomicUpdate(b.toByteArray(), new AtomicUpdate<byte[]>() {
+        @Override
+        public byte[] update(byte[] data) {
+          if (data != null) {
+            final T oldObj = getObjectCodec().decode(data);
+            final T newObj = getObjectCodec().decode(data);
+            res[0] = update.update(newObj);
+            res[1] = oldObj;
+            res[2] = newObj;
+            try {
+              writeNewIndexes(oldObj, newObj);
+            } catch (OrmException err) {
+              throw new IndexException(err);
+            }
+            return getObjectCodec().encodeToByteString(newObj).toByteArray();
+
+          } else {
+            res[0] = null;
+            return null;
+          }
+        }
+      });
+      if (res[0] != null) {
+        pruneOldIndexes(res[1], res[2]);
+      }
+      return res[0];
+    } catch (IndexException err) {
+      throw err.cause;
+    }
+  }
+
+  /**
+   * Determine if an object still matches the index row.
+   * <p>
+   * This method checks that the object's fields still match the criteria
+   * necessary for it to be part of the index defined by {@code f}. It also
+   * formats the index key and validates it is still identical to {@code exp}.
+   *
+   * @param f the function that defines the index.
+   * @param obj the object instance being tested; must not be null.
+   * @param exp the index row key, as scanned from the index.
+   * @return true if the object still matches the data encoded in {@code #exp}.
+   */
+  protected boolean matches(IndexFunction<T> f, T obj, byte[] exp) {
+    return f.includes(obj) && Arrays.equals(exp, indexRowKey(f, obj));
+  }
+
+  /**
+   * Generate the row key for the object's primary data row.
+   * <p>
+   * The default implementation uses the relation name, a delimiter, and then
+   * the encoded primary key.
+   *
+   * @param key key of the object.
+   * @return the object's data row key.
+   */
+  protected byte[] dataRowKey(K key) {
+    IndexKeyBuilder b = new IndexKeyBuilder();
+    b.add(getRelationName());
+    b.delimiter();
+    encodePrimaryKey(b, key);
+    return b.toByteArray();
+  }
+
+  /**
+   * Generate the row key for an object's secondary index row.
+   * <p>
+   * The default implementation uses the relation name, '.', the index name, a
+   * delimiter, the indexed fields encoded, a delimiter, and then the encoded
+   * primary key (without the relation name prefix).
+   * <p>
+   * The object's primary key is always appended onto the end of the secondary
+   * index row key to ensure that objects with the same field values still get
+   * distinct rows in the secondary index.
+   *
+   * @param idx function that describes the index.
+   * @param obj the object the index record should reference.
+   * @return the encoded secondary index row key.
+   */
+  protected byte[] indexRowKey(IndexFunction<T> idx, T obj) {
+    IndexKeyBuilder b = new IndexKeyBuilder();
+    b.add(getRelationName());
+    b.add('.');
+    b.add(idx.getName());
+    b.delimiter();
+    idx.encode(b, obj);
+    b.delimiter();
+    encodePrimaryKey(b, primaryKey(obj));
+    return b.toByteArray();
+  }
+
+  /**
+   * Generate the data to store in a secondary index row for an object.
+   * <p>
+   * The default implementation of this method stores the encoded primary key,
+   * and the current system timestamp.
+   *
+   * @param obj the object the index record should reference.
+   * @return the encoded secondary index row data.
+   */
+  protected byte[] indexRowData(T obj) {
+    final long now = System.currentTimeMillis();
+
+    final IndexKeyBuilder b = new IndexKeyBuilder();
+    encodePrimaryKey(b, primaryKey(obj));
+    final byte[] key = b.toByteArray();
+
+    return IndexRow.CODEC.encodeToByteArray(IndexRow.forKey(now, key));
+  }
+
+  private static class IndexException extends RuntimeException {
+    final OrmException cause;
+
+    IndexException(OrmException err) {
+      super(err);
+      this.cause = err;
+    }
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/generic/GenericDatabase.java b/src/main/java/com/google/gwtorm/nosql/generic/GenericDatabase.java
new file mode 100644
index 0000000..3ee0f2f
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/generic/GenericDatabase.java
@@ -0,0 +1,86 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql.generic;
+
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.Schema;
+import com.google.gwtorm.nosql.NoSqlDatabase;
+import com.google.gwtorm.nosql.NoSqlSchema;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Base class for generic NoSQL typed databases.
+ * <p>
+ * The generic types provide basic NoSQL implementation assuming a handful of
+ * primitive operations are available inside of the implementation's extension
+ * of {@link GenericSchema}. All relations are stored within the same key space,
+ * using the relation name as a prefix for the row's primary or secondary key.
+ * <p>
+ * Applications should use the database class to create instances of their
+ * Schema extension interface, and thus open and connect to the data store.
+ * <p>
+ * Creating a new database instance is expensive, due to the type analysis and
+ * code generation performed to implement the Schema and Access interfaces.
+ * Applications should create and cache their database instance for the life of
+ * the application.
+ * <p>
+ * Database instances are thread-safe, but returned Schema instances are not.
+ * <p>
+ * This class must be further extended by the NoSQL implementation to configure
+ * the connectivity with the data store and supply the correct subclass of
+ * {@link NoSqlSchema} that knows how to interact with the data store.
+ *
+ * @param <T> type of the application's Schema.
+ * @param <S> type of the implementation's base for Schema implementations.
+ * @param <A> type of the implementation's base for Access implementations.
+ */
+public abstract class GenericDatabase<T extends Schema, S extends GenericSchema, A extends GenericAccess>
+    extends NoSqlDatabase<T, S, A> {
+  private static final long DEFAULT_FOSSIL_AGE =
+      TimeUnit.MILLISECONDS.convert(5, TimeUnit.MINUTES);
+
+  /**
+   * Initialize a new database and generate the implementation.
+   *
+   * @param schemaBaseType class that the generated Schema implementation should
+   *        extend in order to provide data store connectivity.
+   * @param accessBaseType class that the generated Access implementations
+   *        should extend in order to provide single-relation access for each
+   *        schema instance.
+   * @param appSchema the application schema interface that must be implemented
+   *        and constructed on demand.
+   * @throws OrmException the schema cannot be created because of an annotation
+   *         error in the interface definitions.
+   */
+  protected GenericDatabase(final Class<S> schemaBaseType,
+      final Class<A> accessBaseType, final Class<T> appSchema)
+      throws OrmException {
+    super(schemaBaseType, accessBaseType, appSchema);
+  }
+
+  /**
+   * Default number of milliseconds a transaction can appear to be open.
+   * <p>
+   * Secondary index rows that don't match their primary data object and that
+   * are older than this age are removed from the system during a scan.
+   *
+   * @return milliseconds before considering a fossil index record is garbage
+   *         and should be pruned. By default, 5 minutes.
+   */
+  public long getMaxFossilAge() {
+    return DEFAULT_FOSSIL_AGE;
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/generic/GenericSchema.java b/src/main/java/com/google/gwtorm/nosql/generic/GenericSchema.java
new file mode 100644
index 0000000..81b62aa
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/generic/GenericSchema.java
@@ -0,0 +1,332 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql.generic;
+
+import com.google.gwtorm.client.AtomicUpdate;
+import com.google.gwtorm.client.OrmDuplicateKeyException;
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.ResultSet;
+import com.google.gwtorm.client.Schema;
+import com.google.gwtorm.client.impl.ListResultSet;
+import com.google.gwtorm.nosql.CounterShard;
+import com.google.gwtorm.nosql.IndexKeyBuilder;
+import com.google.gwtorm.nosql.IndexRow;
+import com.google.gwtorm.nosql.NoSqlSchema;
+import com.google.gwtorm.schema.SequenceModel;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * Base implementation for {@link Schema} in a {@link GenericDatabase}.
+ * <p>
+ * NoSQL implementors must extend this class and provide implementations for the
+ * abstract methods declared here. Each schema instance will wrap one thread's
+ * connection to the data store. Therefore, unlike database, this class does not
+ * need to be thread-safe.
+ */
+public abstract class GenericSchema extends NoSqlSchema {
+  private final GenericDatabase<?, ?, ?> db;
+
+  protected GenericSchema(final GenericDatabase<?, ?, ?> d) {
+    super(d);
+    db = d;
+  }
+
+  /** @return the database that created this schema instance. */
+  public GenericDatabase<?, ?, ?> getDatabase() {
+    return db;
+  }
+
+  /**
+   * Allocate a new unique value from a pool of values.
+   * <p>
+   * This method is only required to return a unique value per invocation.
+   * Implementors may override the method to provide an implementation that
+   * returns values out of order.
+   * <p>
+   * The default implementation of this method stores a {@link CounterShard}
+   * under the row key {@code ".sequence." + poolName}, and updates it through
+   * the atomic semantics of {@link #atomicUpdate(byte[], AtomicUpdate)}. If the
+   * row does not yet exist, it is initialized and the value 1 is returned.
+   *
+   * @param poolName name of the value pool to allocate from. This is typically
+   *        the name of a sequence in the schema.
+   * @return a new unique value.
+   * @throws OrmException a unique value cannot be obtained.
+   */
+  @Override
+  protected long nextLong(final String poolName) throws OrmException {
+    IndexKeyBuilder b = new IndexKeyBuilder();
+    b.add(".sequence." + poolName);
+    b.delimiter();
+    try {
+      final long[] res = new long[1];
+      atomicUpdate(b.toByteArray(), new AtomicUpdate<byte[]>() {
+        @Override
+        public byte[] update(byte[] val) {
+          CounterShard ctr;
+          if (val != null) {
+            ctr = CounterShard.CODEC.decode(val);
+          } else {
+            long start = 1;
+            for (SequenceModel s : getDatabase().getSchemaModel()
+                .getSequences()) {
+              if (poolName.equals(s.getSequenceName())) {
+                start = s.getSequence().startWith();
+                if (start == 0) {
+                  start = 1;
+                }
+                break;
+              }
+            }
+            ctr = new CounterShard(start, Long.MAX_VALUE);
+          }
+
+          if (ctr.isEmpty()) {
+            throw new NoMoreValues();
+          }
+
+          res[0] = ctr.next();
+          return CounterShard.CODEC.encodeToByteString(ctr).toByteArray();
+        }
+      });
+      return res[0];
+    } catch (NoMoreValues err) {
+      throw new OrmException("Counter '" + poolName + "' out of values");
+    }
+  }
+
+  /**
+   * Fetch one row's data.
+   * <p>
+   * The default implementation of this method creates a pair of keys and passes
+   * them to {@link #scan(byte[], byte[], int, boolean)}. The {@code fromKey} is
+   * the supplied {@code key}, while the {@code toKey} has '\0' appended onto
+   * {@code key}. If more than one row matches in that range, the method throws
+   * an exception.
+   *
+   * @param key key of the row to fetch and return.
+   * @return the data stored under {@code key}; null if no row exists.
+   * @throws OrmDuplicateKeyException more than one row was identified in the
+   *         key scan.
+   * @throws OrmException the data store cannot process the request.
+   */
+  public byte[] fetchRow(byte[] key) throws OrmDuplicateKeyException,
+      OrmException {
+    final byte[] fromKey = key;
+    final byte[] toKey = new byte[key.length + 1];
+    System.arraycopy(key, 0, toKey, 0, key.length);
+
+    ResultSet<Row> r = scan(fromKey, toKey, 2, false);
+    try {
+      Iterator<Row> i = r.iterator();
+      if (!i.hasNext()) {
+        return null;
+      }
+
+      byte[] data = i.next().getValue();
+      if (i.hasNext()) {
+        throw new OrmDuplicateKeyException("Unexpected duplicate keys");
+      }
+      return data;
+    } finally {
+      r.close();
+    }
+  }
+
+  /**
+   * Fetch multiple rows at once.
+   * <p>
+   * The default implementation of this method is a simple iteration over each
+   * key and executes a sequential fetch with {@link #fetchRow(byte[])}.
+   *
+   * @param keys keys to fetch and return.
+   * @return iteration over the rows that exist and appear in {@code keys}.
+   * @throws OrmException the data store cannot process the request.
+   */
+  public ResultSet<Row> fetchRows(Iterable<byte[]> keys) throws OrmException {
+    List<Row> r = new ArrayList<Row>();
+    for (byte[] key : keys) {
+      byte[] val = fetchRow(key);
+      if (val != null) {
+        r.add(new Row(key, val));
+      }
+    }
+    return new ListResultSet<Row>(r);
+  }
+
+  /**
+   * Scan a range of keys and return any matching objects.
+   * <p>
+   * To fetch a single record with a scan, set {@code toKey} to the same array
+   * as {@code fromKey}, but append a trailing NUL byte (0x00). The caller
+   * should validate that the returned ResultSet contains no more than 1 row.
+   * <p>
+   * The resulting iteration does not support remove.
+   * <p>
+   * Each iteration element is a map entry, describing the row key and the row
+   * value. The map entry's value cannot be changed.
+   *
+   * @param fromKey key to start the scan on. This is inclusive.
+   * @param toKey key to stop the scan on. This is exclusive.
+   * @param limit maximum number of results to return.
+   * @param order if true the order will be preserved, false if the result order
+   *        order can be arbitrary.
+   * @return result iteration for the requested range. The result set may be
+   *         lazily filled, or filled completely.
+   * @throws OrmException an error occurred preventing the scan from completing.
+   */
+  public abstract ResultSet<Row> scan(byte[] fromKey, byte[] toKey, int limit,
+      boolean order) throws OrmException;
+
+  /**
+   * Atomically insert one row, failing if the row already exists.
+   * <p>
+   * The default implementation of this method relies upon the atomic nature of
+   * the {@link #atomicUpdate(byte[], AtomicUpdate)} primitive to test for the
+   * row's existence, and create the row only if it is not found.
+   *
+   * @param key key of the new row to insert.
+   * @param newData data of the new row.
+   * @throws OrmDuplicateKeyException another row already exists with the
+   *         specified key.
+   * @throws OrmException the data store cannot process the request right now,
+   *         for example due to a network connectivity problem.
+   */
+  public void insert(byte[] key, final byte[] newData)
+      throws OrmDuplicateKeyException, OrmException {
+    try {
+      atomicUpdate(key, new AtomicUpdate<byte[]>() {
+        @Override
+        public byte[] update(byte[] oldData) {
+          if (oldData != null) {
+            throw new KeyExists();
+          }
+          return newData;
+        }
+      });
+    } catch (KeyExists err) {
+      throw new OrmDuplicateKeyException("Duplicate key");
+    }
+  }
+
+  /**
+   * Update a single row, inserting it if it does not exist.
+   * <p>
+   * Unlike insert, this method always succeeds.
+   *
+   * @param key key of the row to update, or insert if missing.
+   * @param data data to store at this row.
+   * @throws OrmException the data store cannot process the request, for example
+   *         due to a network connectivity problem.
+   */
+  public abstract void upsert(byte[] key, byte[] data) throws OrmException;
+
+  /**
+   * Delete the row stored under the given key.
+   * <p>
+   * If the row does not exist, this method must complete successfully anyway.
+   * The intent of the caller is to ensure the row does not exist when the
+   * method completes, and a row that did not exist satisfies that intent.
+   *
+   * @param key the key to delete.
+   * @throws OrmException the data store cannot perform the removal.
+   */
+  public abstract void delete(byte[] key) throws OrmException;
+
+  /**
+   * Atomically read and update a single row.
+   * <p>
+   * Unlike schema's atomicUpdate() method, this method must handle missing
+   * rows. Implementations must be logically equivalent to the following, but
+   * performed atomically within the scope of the single row key:
+   *
+   * <pre>
+   * byte[] oldData = get(key);
+   * byte[] newData = update.update(oldData);
+   * if (newData != null) {
+   *   upsert(key, newData);
+   * } else if (oldData != null) {
+   *   remove(key);
+   * }
+   * return data;
+   * </pre>
+   * <p>
+   * Secondary index row updates are assumed to never be part of the atomic
+   * update transaction. This is an intentional design decision to fit with many
+   * NoSQL product's limitations to support only single-row atomic updates.
+   * <p>
+   * The {@code update} method may be invoked multiple times before the
+   * operation is considered successful. This permits an implementation to
+   * perform an opportunistic update attempt, and retry the update if the same
+   * row was modified by another concurrent worker.
+   *
+   * @param key the row key to read, update and return.
+   * @param update action to perform on the row's data element. The action may
+   *        be passed null if the row doesn't exist.
+   * @throws OrmException the database cannot perform the update.
+   */
+  public abstract void atomicUpdate(byte[] key, AtomicUpdate<byte[]> update)
+      throws OrmException;
+
+  /**
+   * Check (and delete) an index row if its a fossil.
+   * <p>
+   * As index rows are written ahead of the main data row being written out,
+   * scans sometimes see an index row that does not match the data row. These
+   * are ignored for a short period ({@link GenericDatabase#getMaxFossilAge()})
+   * to allow the primary data row to eventually get written out. If however the
+   * writer never finished the update, these index rows are stale and need to be
+   * pruned. Any index row older than the fossil age is removed by this method.
+   *
+   * @param now timestamp when the current scan started.
+   * @param key the index row key.
+   * @param row the index row data.
+   */
+  public void maybeFossilCollectIndexRow(long now, byte[] key, IndexRow row) {
+    if (row.getTimestamp() + db.getMaxFossilAge() <= now) {
+      fossilCollectIndexRow(key, row);
+    }
+  }
+
+  /**
+   * Delete the given fossil index row.
+   * <p>
+   * This method is logically the same as {@link #delete(byte[])}, but its
+   * separated out to permit asynchronous delivery of the delete events since
+   * these are arriving during an index scan and are less time-critical than
+   * other delete operations.
+   * <p>
+   * The default implementation of this method calls {@link #delete(byte[])}.
+   *
+   * @param key index key to remove.
+   * @param row the index row data.
+   */
+  protected void fossilCollectIndexRow(byte[] key, IndexRow row) {
+    try {
+      delete(key);
+    } catch (OrmException e) {
+      // Ignore a fossil delete error.
+    }
+  }
+
+  private static class KeyExists extends RuntimeException {
+  }
+
+  private static class NoMoreValues extends RuntimeException {
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/generic/Row.java b/src/main/java/com/google/gwtorm/nosql/generic/Row.java
new file mode 100644
index 0000000..cdeacbd
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/generic/Row.java
@@ -0,0 +1,34 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql.generic;
+
+/** A pairing of a row key and its value. */
+public class Row {
+  private final byte[] key;
+  private final byte[] val;
+
+  public Row(byte[] key, byte[] val) {
+    this.key = key;
+    this.val = val;
+  }
+
+  public byte[] getKey() {
+    return key;
+  }
+
+  public byte[] getValue() {
+    return val;
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/heap/FileDatabase.java b/src/main/java/com/google/gwtorm/nosql/heap/FileDatabase.java
new file mode 100644
index 0000000..5b5e71b
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/heap/FileDatabase.java
@@ -0,0 +1,310 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql.heap;
+
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.Schema;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.Map;
+
+/**
+ * Tiny NoSQL database stored on the local filesystem.
+ * <p>
+ * This is a simple NoSQL implementation intended only for development/debugging
+ * purposes. It is not capable of supporting any production traffic. Large data
+ * sets will cause the implementation to fall over, as all records are stored in
+ * memory.
+ * <p>
+ * Although some effort is made to persist data to disk during updates, and
+ * reload it during construction, durability of stored data is not guaranteed.
+ *
+ * @param <T> type of the application schema.
+ */
+public class FileDatabase<T extends Schema> extends
+    TreeMapDatabase<T, FileDatabase.LoggingSchema, FileDatabase.LoggingAccess> {
+  private static final int MAX_LOG_SIZE = 50000;
+
+  private final File heapFile;
+  private final File logFile;
+
+  private RandomAccessFile log;
+  private int logRecords;
+
+  /**
+   * Create the database and implement the application's schema interface.
+   *
+   * @param path path prefix for the data files. File suffixes will be added to
+   *        this name to name the database's various files.
+   * @param schema the application schema this database will open.
+   * @throws OrmException the schema cannot be queried, or the existing database
+   *         files are not readable.
+   */
+  public FileDatabase(final File path, final Class<T> schema)
+      throws OrmException {
+    super(LoggingSchema.class, LoggingAccess.class, schema);
+
+    heapFile = new File(path.getAbsolutePath() + ".nosql_db");
+    logFile = new File(path.getAbsolutePath() + ".nosql_log");
+
+    lock.lock();
+    try {
+      loadHeap();
+      loadLog();
+    } catch (IOException err) {
+      throw new OrmException("Cannot load existing database", err);
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  /** Gracefully close the database and its log file. */
+  public void close() throws OrmException {
+    lock.lock();
+    try {
+      if (log != null) {
+        try {
+          log.close();
+        } catch (IOException err) {
+          throw new OrmException("Cannot close log file", err);
+        } finally {
+          log = null;
+        }
+      }
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  private void loadHeap() throws IOException {
+    lock.lock();
+    try {
+      table.clear();
+
+      final DataInputStream in;
+      try {
+        in = new DataInputStream( //
+            new BufferedInputStream( //
+                new FileInputStream(heapFile)));
+      } catch (FileNotFoundException e) {
+        return;
+      }
+
+      try {
+        final int cnt = in.readInt();
+        for (int row = 0; row < cnt; row++) {
+          final byte[] key = new byte[in.readInt()];
+          final byte[] val = new byte[in.readInt()];
+          in.readFully(key);
+          in.readFully(val);
+          table.put(key, val);
+        }
+      } finally {
+        in.close();
+      }
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  private void loadLog() throws IOException, OrmException {
+    lock.lock();
+    try {
+      logRecords = 0;
+
+      final DataInputStream in;
+      try {
+        in = new DataInputStream( //
+            new BufferedInputStream( //
+                new FileInputStream(logFile)));
+      } catch (FileNotFoundException e) {
+        return;
+      }
+
+      try {
+        for (;; logRecords++) {
+          final int op = in.read();
+          if (op < 0) {
+            break;
+          }
+
+          switch (op) {
+            case 0: {
+              final byte[] key = new byte[in.readInt()];
+              in.readFully(key);
+              table.remove(key);
+              break;
+            }
+
+            case 1: {
+              final byte[] key = new byte[in.readInt()];
+              final byte[] val = new byte[in.readInt()];
+              in.readFully(key);
+              in.readFully(val);
+              table.put(key, val);
+              break;
+            }
+
+            default:
+              throw new OrmException("Unknown log command " + op);
+          }
+        }
+      } finally {
+        in.close();
+      }
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  private void writeLog(int op, byte[] key, byte[] val) throws OrmException {
+    if (logRecords == MAX_LOG_SIZE) {
+      compact();
+      return;
+    }
+
+    try {
+      openLog();
+
+      int sz = 1 + 4 + key.length;
+      if (op == 1) {
+        sz += 4 + val.length;
+      }
+
+      final ByteArrayOutputStream buf = new ByteArrayOutputStream(sz);
+      final DataOutputStream out = new DataOutputStream(buf);
+
+      out.write(op);
+      out.writeInt(key.length);
+      if (op == 1) {
+        out.writeInt(val.length);
+      }
+      out.write(key);
+      if (op == 1) {
+        out.write(val);
+      }
+      out.flush();
+
+      log.write(buf.toByteArray());
+      logRecords++;
+    } catch (IOException err) {
+      throw new OrmException("Cannot log operation", err);
+    }
+  }
+
+  private void compact() throws OrmException {
+    lock.lock();
+    try {
+      final File tmp = newTempFile();
+      boolean ok = false;
+      try {
+        DataOutputStream out = new DataOutputStream( //
+            new BufferedOutputStream( //
+                new FileOutputStream(tmp)));
+        try {
+          out.writeInt(table.size());
+          for (Map.Entry<byte[], byte[]> ent : table.entrySet()) {
+            out.writeInt(ent.getKey().length);
+            out.writeInt(ent.getValue().length);
+            out.write(ent.getKey());
+            out.write(ent.getValue());
+          }
+        } finally {
+          out.close();
+        }
+
+        if (!tmp.renameTo(heapFile)) {
+          throw new OrmException("Cannot replace " + heapFile);
+        }
+
+        ok = true;
+
+        openLog();
+        log.seek(0);
+        log.setLength(0);
+
+      } finally {
+        if (!ok) {
+          if (!tmp.delete()) {
+            tmp.deleteOnExit();
+          }
+        }
+      }
+    } catch (IOException err) {
+      throw new OrmException("Cannot compact database", err);
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  private void openLog() throws IOException {
+    if (log == null) {
+      log = new RandomAccessFile(logFile, "rws");
+      log.seek(log.length());
+    }
+  }
+
+  private File newTempFile() throws IOException {
+    return File.createTempFile("heap_", "_db", heapFile.getParentFile());
+  }
+
+  public static abstract class LoggingSchema extends TreeMapSchema {
+    private final FileDatabase<?> db;
+
+    protected LoggingSchema(FileDatabase<?> db) {
+      super(db);
+      this.db = db;
+    }
+
+    @Override
+    public void upsert(byte[] key, byte[] data) throws OrmException {
+      db.lock.lock();
+      try {
+        super.upsert(key, data);
+        db.writeLog(1, key, data);
+      } finally {
+        db.lock.unlock();
+      }
+    }
+
+    @Override
+    public void delete(byte[] key) throws OrmException {
+      db.lock.lock();
+      try {
+        super.delete(key);
+        db.writeLog(0, key, null);
+      } finally {
+        db.lock.unlock();
+      }
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  public static abstract class LoggingAccess extends TreeMapAccess {
+    protected LoggingAccess(LoggingSchema s) {
+      super(s);
+    }
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/heap/HeapKeyComparator.java b/src/main/java/com/google/gwtorm/nosql/heap/HeapKeyComparator.java
new file mode 100644
index 0000000..33aded6
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/heap/HeapKeyComparator.java
@@ -0,0 +1,38 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql.heap;
+
+import java.util.Comparator;
+
+class HeapKeyComparator implements Comparator<byte[]> {
+  static final HeapKeyComparator INSTANCE = new HeapKeyComparator();
+
+  private HeapKeyComparator() {
+  }
+
+  @Override
+  public int compare(byte[] a, byte[] b) {
+    for (int i = 0; i < a.length && i < b.length; i++) {
+      final int av = a[i] & 0xff;
+      final int bv = b[i] & 0xff;
+      final int rc = av - bv;
+      if (rc != 0) {
+        return rc;
+      }
+    }
+
+    return a.length - b.length;
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/heap/MemoryDatabase.java b/src/main/java/com/google/gwtorm/nosql/heap/MemoryDatabase.java
new file mode 100644
index 0000000..a55d520
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/heap/MemoryDatabase.java
@@ -0,0 +1,43 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql.heap;
+
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.Schema;
+
+/**
+ * Toy in-memory implementation of a NoSQL database.
+ * <p>
+ * Implements a simple NoSQL database with a standard {@link java.util.TreeMap}
+ * held inside of this JVM process. All operations occur on the TreeMap, with no
+ * durability across database restarts. Therefore this implementation is only
+ * suitable for simple tests.
+ *
+ * @param <T> type of the application schema.
+ * @see FileDatabase
+ */
+public class MemoryDatabase<T extends Schema> extends
+    TreeMapDatabase<T, TreeMapSchema, TreeMapAccess> {
+
+  /**
+   * Create the database and implement the application's schema interface.
+   *
+   * @param schema the application schema this database will open.
+   * @throws OrmException the schema cannot be queried.
+   */
+  public MemoryDatabase(final Class<T> schema) throws OrmException {
+    super(TreeMapSchema.class, TreeMapAccess.class, schema);
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/heap/TreeMapAccess.java b/src/main/java/com/google/gwtorm/nosql/heap/TreeMapAccess.java
new file mode 100644
index 0000000..afd5397
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/heap/TreeMapAccess.java
@@ -0,0 +1,27 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql.heap;
+
+import com.google.gwtorm.client.Access;
+import com.google.gwtorm.client.Key;
+import com.google.gwtorm.nosql.generic.GenericAccess;
+
+/** Base implementation for {@link Access} in a {@link TreeMapDatabase}. */
+public abstract class TreeMapAccess<T, K extends Key<?>> extends
+    GenericAccess<T, K> {
+  protected TreeMapAccess(final TreeMapSchema s) {
+    super(s);
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/heap/TreeMapDatabase.java b/src/main/java/com/google/gwtorm/nosql/heap/TreeMapDatabase.java
new file mode 100644
index 0000000..5bad767
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/heap/TreeMapDatabase.java
@@ -0,0 +1,129 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql.heap;
+
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.Schema;
+import com.google.gwtorm.nosql.generic.GenericDatabase;
+import com.google.protobuf.InvalidProtocolBufferException;
+import com.google.protobuf.UnknownFieldSet;
+
+import java.io.PrintWriter;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * Toy in-memory implementation of a NoSQL database.
+ * <p>
+ * Implements a simple NoSQL database with a standard {@link java.util.TreeMap}
+ * held inside of this JVM process. All operations occur on the TreeMap, with no
+ * durability across database restarts. Therefore this implementation is only
+ * suitable for simple tests.
+ *
+ * @param <T> type of the application schema.
+ */
+abstract class TreeMapDatabase<T extends Schema, S extends TreeMapSchema, A extends TreeMapAccess>
+    extends GenericDatabase<T, S, A> {
+
+  /** Lock that protects reads and writes of {@link #table}. */
+  final Lock lock;
+
+  /** The NoSQL database storage. */
+  final SortedMap<byte[], byte[]> table;
+
+  /**
+   * Initialize a new database and generate the implementation.
+   *
+   * @param schemaBaseType class that the generated Schema implementation should
+   *        extend in order to provide data store connectivity.
+   * @param accessBaseType class that the generated Access implementations
+   *        should extend in order to provide single-relation access for each
+   *        schema instance.
+   * @param appSchema the application schema interface that must be implemented
+   *        and constructed on demand.
+   * @throws OrmException the schema cannot be created because of an annotation
+   *         error in the interface definitions.
+   */
+  protected TreeMapDatabase(final Class<S> schemaBaseType,
+      final Class<A> accessBaseType, final Class<T> appSchema)
+      throws OrmException {
+    super(schemaBaseType, accessBaseType, appSchema);
+
+    lock = new ReentrantLock(true);
+    table = new TreeMap<byte[], byte[]>(HeapKeyComparator.INSTANCE);
+  }
+
+  /**
+   * Try to print the database contents in human readable format.
+   *
+   * @param pw writer to print the database out to.
+   */
+  public void dump(PrintWriter pw) {
+    lock.lock();
+    try {
+      for (Map.Entry<byte[], byte[]> ent : table.entrySet()) {
+        String key = format(ent.getKey());
+
+        String val;
+        try {
+          UnknownFieldSet proto = UnknownFieldSet.parseFrom(ent.getValue());
+          val = proto.toString();
+        } catch (InvalidProtocolBufferException notProto) {
+          val = format(ent.getValue());
+        }
+
+        if (val.contains("\n")) {
+          pw.println(key + ":\n" + "  " + val.replaceAll("\n", "\n  "));
+        } else {
+          pw.println(key + ": " + val);
+        }
+      }
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  private static String format(byte[] bin) {
+    StringBuilder s = new StringBuilder(bin.length);
+    for (int i = 0; i < bin.length; i++) {
+      byte b = bin[i];
+      switch (b) {
+        case 0x00:
+          s.append("\\0");
+          break;
+
+        case 0x01:
+          s.append("\\1");
+          break;
+
+        case -1:
+          s.append("\\xff");
+          break;
+
+        case '\r':
+          s.append("\\r");
+          break;
+
+        default:
+          s.append((char) b);
+          break;
+      }
+    }
+    return s.toString();
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/nosql/heap/TreeMapSchema.java b/src/main/java/com/google/gwtorm/nosql/heap/TreeMapSchema.java
new file mode 100644
index 0000000..239003f
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/nosql/heap/TreeMapSchema.java
@@ -0,0 +1,109 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql.heap;
+
+import com.google.gwtorm.client.AtomicUpdate;
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.ResultSet;
+import com.google.gwtorm.client.Schema;
+import com.google.gwtorm.client.impl.ListResultSet;
+import com.google.gwtorm.nosql.generic.GenericSchema;
+import com.google.gwtorm.nosql.generic.Row;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.Map.Entry;
+
+/** Base implementation for {@link Schema} in a {@link TreeMapDatabase}. */
+public abstract class TreeMapSchema extends GenericSchema {
+  private final TreeMapDatabase<?, ?, ?> db;
+
+  protected TreeMapSchema(final TreeMapDatabase<?, ?, ?> d) {
+    super(d);
+    db = d;
+  }
+
+  @Override
+  public void flush() {
+    // We don't buffer writes.
+  }
+
+  @Override
+  public void close() {
+    // Nothing to do.
+  }
+
+  @Override
+  public ResultSet<Row> scan(byte[] fromKey, byte[] toKey, int limit,
+      boolean order) {
+    db.lock.lock();
+    try {
+      final List<Row> res = new ArrayList<Row>();
+      for (Map.Entry<byte[], byte[]> ent : entries(fromKey, toKey)) {
+        res.add(new Row(ent.getKey(), ent.getValue()));
+
+        if (limit > 0 && res.size() == limit) {
+          break;
+        }
+      }
+      return new ListResultSet<Row>(res);
+    } finally {
+      db.lock.unlock();
+    }
+  }
+
+  private Set<Entry<byte[], byte[]>> entries(byte[] fromKey, byte[] toKey) {
+    return db.table.subMap(fromKey, toKey).entrySet();
+  }
+
+  @Override
+  public void upsert(byte[] key, byte[] data) throws OrmException {
+    db.lock.lock();
+    try {
+      db.table.put(key, data);
+    } finally {
+      db.lock.unlock();
+    }
+  }
+
+  @Override
+  public void delete(byte[] key) throws OrmException {
+    db.lock.lock();
+    try {
+      db.table.remove(key);
+    } finally {
+      db.lock.unlock();
+    }
+  }
+
+  @Override
+  public void atomicUpdate(byte[] key, AtomicUpdate<byte[]> update)
+      throws OrmException {
+    db.lock.lock();
+    try {
+      final byte[] oldData = fetchRow(key);
+      final byte[] newData = update.update(oldData);
+      if (newData != null) {
+        upsert(key, newData);
+      } else if (oldData != null) {
+        delete(key);
+      }
+    } finally {
+      db.lock.unlock();
+    }
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/protobuf/ByteBufferOutputStream.java b/src/main/java/com/google/gwtorm/protobuf/ByteBufferOutputStream.java
new file mode 100644
index 0000000..0316d31
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/protobuf/ByteBufferOutputStream.java
@@ -0,0 +1,36 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.protobuf;
+
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+
+class ByteBufferOutputStream extends OutputStream {
+  private final ByteBuffer buffer;
+
+  ByteBufferOutputStream(ByteBuffer buffer) {
+    this.buffer = buffer;
+  }
+
+  @Override
+  public void write(int b) {
+    buffer.put((byte) b);
+  }
+
+  @Override
+  public void write(byte[] src, int offset, int length) {
+    buffer.put(src, offset, length);
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/protobuf/CappedInputStream.java b/src/main/java/com/google/gwtorm/protobuf/CappedInputStream.java
new file mode 100644
index 0000000..5bb3bea
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/protobuf/CappedInputStream.java
@@ -0,0 +1,66 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.protobuf;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+class CappedInputStream extends InputStream {
+  private final InputStream src;
+  private int remaining;
+
+  CappedInputStream(InputStream src, int limit) {
+    this.src = src;
+    this.remaining = limit;
+  }
+
+  @Override
+  public int read() throws IOException {
+    if (0 < remaining) {
+      int r = src.read();
+      if (r < 0) {
+        remaining = 0;
+      } else {
+        remaining--;
+      }
+      return r;
+    } else {
+      return -1;
+    }
+  }
+
+  @Override
+  public int read(byte[] b, int off, int len) throws IOException {
+    if (len == 0) {
+      return 0;
+    } else if (0 < remaining) {
+      int n = src.read(b, off, Math.min(len, remaining));
+      if (n < 0) {
+        remaining = 0;
+      } else {
+        remaining -= n;
+      }
+      return n;
+    } else {
+      return -1;
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    remaining = 0;
+    src.close();
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/protobuf/CodecFactory.java b/src/main/java/com/google/gwtorm/protobuf/CodecFactory.java
index 7db0f96..f7b2cdf 100644
--- a/src/main/java/com/google/gwtorm/protobuf/CodecFactory.java
+++ b/src/main/java/com/google/gwtorm/protobuf/CodecFactory.java
@@ -16,7 +16,7 @@
 
 import com.google.gwtorm.client.Column;
 import com.google.gwtorm.client.OrmException;
-import com.google.gwtorm.jdbc.gen.GeneratedClassLoader;
+import com.google.gwtorm.server.GeneratedClassLoader;
 
 import java.util.Collections;
 import java.util.Map;
diff --git a/src/main/java/com/google/gwtorm/protobuf/CodecGen.java b/src/main/java/com/google/gwtorm/protobuf/CodecGen.java
index 6c79119..2aaf841 100644
--- a/src/main/java/com/google/gwtorm/protobuf/CodecGen.java
+++ b/src/main/java/com/google/gwtorm/protobuf/CodecGen.java
@@ -16,11 +16,11 @@
 
 import com.google.gwtorm.client.Column;
 import com.google.gwtorm.client.OrmException;
-import com.google.gwtorm.jdbc.gen.CodeGenSupport;
-import com.google.gwtorm.jdbc.gen.GeneratedClassLoader;
 import com.google.gwtorm.schema.ColumnModel;
 import com.google.gwtorm.schema.Util;
 import com.google.gwtorm.schema.java.JavaColumnModel;
+import com.google.gwtorm.server.CodeGenSupport;
+import com.google.gwtorm.server.GeneratedClassLoader;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.CodedInputStream;
 import com.google.protobuf.CodedOutputStream;
@@ -32,18 +32,26 @@
 import org.objectweb.asm.Opcodes;
 import org.objectweb.asm.Type;
 
-import java.io.OutputStream;
 import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.TreeSet;
 
 /** Generates {@link ProtobufCodec} implementations. */
 class CodecGen<T> implements Opcodes {
+  private static final Type illegalStateException =
+      Type.getType(IllegalStateException.class);
+  private static final Type collection =
+      Type.getType(java.util.Collection.class);
+  private static final Type iterator = Type.getType(java.util.Iterator.class);
   private static final Type string = Type.getType(String.class);
-  private static final Type byteStringOutput =
-      Type.getType(ByteString.Output.class);
+  private static final Type enumType = Type.getType(Enum.class);
   private static final Type byteString = Type.getType(ByteString.class);
   private static final Type object = Type.getType(Object.class);
   private static final Type codedOutputStream =
@@ -60,20 +68,34 @@
   private String implClassName;
   private String implTypeName;
 
+  private Map<Class<?>, NestedCodec> nestedCodecs;
+
   public CodecGen(final GeneratedClassLoader loader, final Class<T> t) {
     classLoader = loader;
     pojo = t;
     pojoType = Type.getType(pojo);
+    nestedCodecs = new HashMap<Class<?>, NestedCodec>();
   }
 
   public ProtobufCodec<T> create() throws OrmException {
     myFields = scanFields(pojo);
 
     init();
+    implementNewInstanceObject();
+    implementNewInstanceSelf();
+
+    implementSizeofObject();
+    implementSizeofSelf();
+
+    implementEncodeObject();
+    implementEncodeSelf();
+
+    implementMergeFromObject();
+    implementMergeFromSelf();
+
+    implementCodecFields();
+    implementStaticInit();
     implementConstructor();
-    implementSizeof();
-    implementEncode();
-    implementDecode();
     cw.visitEnd();
     classLoader.defineClass(implClassName, cw.toByteArray());
 
@@ -99,6 +121,10 @@
       }
       in = in.getSuperclass();
     }
+    if (col.isEmpty()) {
+      throw new OrmException(
+          "Cannot create new encoder, no @Column fields found");
+    }
     return sort(col);
   }
 
@@ -129,6 +155,33 @@
         superTypeName, new String[] {});
   }
 
+  private void implementCodecFields() {
+    for (NestedCodec other : nestedCodecs.values()) {
+      cw.visitField(ACC_PRIVATE | ACC_STATIC | ACC_FINAL, other.field,
+          other.codecType.getDescriptor(), null, null).visitEnd();
+    }
+  }
+
+  private void implementStaticInit() {
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_STATIC, "<clinit>", Type
+            .getMethodDescriptor(Type.VOID_TYPE, new Type[] {}), null, null);
+    mv.visitCode();
+
+    for (NestedCodec other : nestedCodecs.values()) {
+      mv.visitTypeInsn(NEW, other.codecType.getInternalName());
+      mv.visitInsn(DUP);
+      mv.visitMethodInsn(INVOKESPECIAL, other.codecType.getInternalName(),
+          "<init>", Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {}));
+      mv.visitFieldInsn(PUTSTATIC, implTypeName, other.field, other.codecType
+          .getDescriptor());
+    }
+
+    mv.visitInsn(RETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
   private void implementConstructor() {
     final String consName = "<init>";
     final String consDesc =
@@ -145,17 +198,66 @@
     mv.visitEnd();
   }
 
-  private void implementSizeof() throws OrmException {
+  private void implementNewInstanceObject() {
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC, "newInstance", Type.getMethodDescriptor(
+            object, new Type[] {}), null, new String[] {});
+    mv.visitCode();
+
+    mv.visitTypeInsn(NEW, pojoType.getInternalName());
+    mv.visitInsn(DUP);
+    mv.visitMethodInsn(INVOKESPECIAL, pojoType.getInternalName(), "<init>",
+        Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {}));
+
+    mv.visitInsn(ARETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementNewInstanceSelf() {
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC, "newInstance", Type.getMethodDescriptor(
+            pojoType, new Type[] {}), null, new String[] {});
+    mv.visitCode();
+
+    mv.visitTypeInsn(NEW, pojoType.getInternalName());
+    mv.visitInsn(DUP);
+    mv.visitMethodInsn(INVOKESPECIAL, pojoType.getInternalName(), "<init>",
+        Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {}));
+
+    mv.visitInsn(ARETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementSizeofObject() {
     final MethodVisitor mv =
         cw.visitMethod(ACC_PUBLIC, "sizeof", Type.getMethodDescriptor(
             Type.INT_TYPE, new Type[] {object}), null, new String[] {});
     mv.visitCode();
     final SizeofCGS cgs = new SizeofCGS(mv);
+    cgs.sizeVar = cgs.newLocal();
     cgs.setEntityType(pojoType);
 
+    mv.visitVarInsn(ALOAD, 0);
     mv.visitVarInsn(ALOAD, 1);
     mv.visitTypeInsn(CHECKCAST, pojoType.getInternalName());
-    mv.visitVarInsn(ASTORE, 1);
+    mv.visitMethodInsn(INVOKEVIRTUAL, implTypeName, "sizeof", Type
+        .getMethodDescriptor(Type.INT_TYPE, new Type[] {pojoType}));
+
+    mv.visitInsn(IRETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementSizeofSelf() throws OrmException {
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC, "sizeof", Type.getMethodDescriptor(
+            Type.INT_TYPE, new Type[] {pojoType}), null, new String[] {});
+    mv.visitCode();
+    final SizeofCGS cgs = new SizeofCGS(mv);
+    cgs.sizeVar = cgs.newLocal();
+    cgs.setEntityType(pojoType);
 
     cgs.push(0);
     mv.visitVarInsn(ISTORE, cgs.sizeVar);
@@ -167,82 +269,197 @@
     mv.visitEnd();
   }
 
-  private static void sizeofMessage(final JavaColumnModel[] myFields,
+  private void sizeofMessage(final JavaColumnModel[] myFields,
       final MethodVisitor mv, final SizeofCGS cgs) throws OrmException {
     for (final JavaColumnModel f : myFields) {
       if (f.isNested()) {
+        final NestedCodec n = nestedFor(f);
         final Label end = new Label();
         cgs.setFieldReference(f);
         cgs.pushFieldValue();
         mv.visitJumpInsn(IFNULL, end);
 
-        final int oldVar = cgs.sizeVar;
-        final int msgVar = cgs.newLocal();
-        cgs.sizeVar = msgVar;
-        cgs.push(0);
-        mv.visitVarInsn(ISTORE, cgs.sizeVar);
+        final int msgSizeVar = cgs.newLocal();
+        mv.visitFieldInsn(GETSTATIC, implTypeName, n.field, n.codecType
+            .getDescriptor());
+        cgs.pushFieldValue();
+        mv.visitMethodInsn(INVOKEVIRTUAL, n.codecType.getInternalName(),
+            "sizeof", Type.getMethodDescriptor(Type.INT_TYPE,
+                new Type[] {n.pojoType}));
+        mv.visitVarInsn(ISTORE, msgSizeVar);
 
-        sizeofMessage(sort(f.getNestedColumns()), mv, cgs);
-        cgs.sizeVar = oldVar;
-
+        cgs.preinc();
         cgs.push(f.getColumnID());
-        cgs.inc("computeTagSize", Type.INT_TYPE);
+        cgs.doinc("computeTagSize", Type.INT_TYPE);
 
-        mv.visitVarInsn(ILOAD, msgVar);
-        cgs.inc("computeRawVarint32Size", Type.INT_TYPE);
+        cgs.preinc();
+        mv.visitVarInsn(ILOAD, msgSizeVar);
+        cgs.doinc("computeRawVarint32Size", Type.INT_TYPE);
 
-        mv.visitVarInsn(ILOAD, msgVar);
-        cgs.inc();
+        cgs.preinc();
+        mv.visitVarInsn(ILOAD, msgSizeVar);
+        cgs.doinc();
 
-        cgs.freeLocal(msgVar);
+        cgs.freeLocal(msgSizeVar);
         mv.visitLabel(end);
+
+      } else if (f.isCollection()) {
+        sizeofCollection(f, mv, cgs);
+
       } else {
         sizeofScalar(mv, cgs, f);
       }
     }
   }
 
-  private static void sizeofScalar(final MethodVisitor mv, final SizeofCGS cgs,
+  @SuppressWarnings("unchecked")
+  private NestedCodec nestedFor(JavaColumnModel f) {
+    Class clazz = f.getNestedClass();
+    NestedCodec n = nestedCodecs.get(clazz);
+    if (n == null) {
+      Class<? extends ProtobufCodec> codec = null;
+      Type type = Type.getType(clazz);
+      if (f.getField() != null) {
+        final CustomCodec cc = f.getField().getAnnotation(CustomCodec.class);
+        if (cc != null) {
+          codec = cc.value();
+          type = object;
+        }
+      }
+      if (codec == null) {
+        codec = CodecFactory.encoder(clazz).getClass();
+      }
+
+      n = new NestedCodec("codec" + f.getColumnID(), codec, type);
+      nestedCodecs.put(clazz, n);
+    }
+    return n;
+  }
+
+  private void sizeofCollection(final JavaColumnModel f,
+      final MethodVisitor mv, final SizeofCGS cgs) throws OrmException {
+    final int itr = cgs.newLocal();
+    final int val = cgs.newLocal();
+    final Class<?> valClazz = (Class<?>) f.getArgumentTypes()[0];
+    final Type valType = Type.getType(valClazz);
+    final JavaColumnModel col = collectionColumn(f, valClazz);
+    final SizeofCGS ng = new SizeofCGS(mv) {
+      {
+        sizeVar = cgs.sizeVar;
+        setEntityType(valType);
+      }
+
+      @Override
+      public void pushEntity() {
+        mv.visitVarInsn(ALOAD, val);
+      }
+
+      @Override
+      protected void appendGetField(final ColumnModel c) {
+        if (c != col) {
+          super.appendGetField(c);
+        }
+      }
+
+      @Override
+      public int newLocal() {
+        return cgs.newLocal();
+      }
+
+      @Override
+      public void freeLocal(int index) {
+        cgs.freeLocal(index);
+      }
+    };
+
+    final Label end = new Label();
+    cgs.setFieldReference(f);
+    cgs.pushFieldValue();
+    mv.visitJumpInsn(IFNULL, end);
+
+    cgs.setFieldReference(f);
+    cgs.pushFieldValue();
+    mv.visitMethodInsn(INVOKEINTERFACE, collection.getInternalName(),
+        "iterator", Type.getMethodDescriptor(iterator, new Type[] {}));
+    mv.visitVarInsn(ASTORE, itr);
+
+    final Label doloop = new Label();
+    mv.visitLabel(doloop);
+    mv.visitVarInsn(ALOAD, itr);
+    mv.visitMethodInsn(INVOKEINTERFACE, iterator.getInternalName(), "hasNext",
+        Type.getMethodDescriptor(Type.BOOLEAN_TYPE, new Type[] {}));
+    mv.visitJumpInsn(IFEQ, end);
+
+    mv.visitVarInsn(ALOAD, itr);
+    mv.visitMethodInsn(INVOKEINTERFACE, iterator.getInternalName(), "next",
+        Type.getMethodDescriptor(object, new Type[] {}));
+    mv.visitTypeInsn(CHECKCAST, valType.getInternalName());
+    mv.visitVarInsn(ASTORE, val);
+
+    sizeofMessage(new JavaColumnModel[] {col}, mv, ng);
+    mv.visitJumpInsn(GOTO, doloop);
+
+    mv.visitLabel(end);
+    cgs.freeLocal(itr);
+    cgs.freeLocal(val);
+  }
+
+  private JavaColumnModel collectionColumn(final JavaColumnModel f,
+      final Class<?> valClazz) throws OrmException {
+    return new JavaColumnModel( //
+        f.getField(), //
+        f.getPathToFieldName(), //
+        f.getColumnID(), //
+        valClazz);
+  }
+
+  private void sizeofScalar(final MethodVisitor mv, final SizeofCGS cgs,
       final JavaColumnModel f) throws OrmException {
     cgs.setFieldReference(f);
 
     switch (Type.getType(f.getPrimitiveType()).getSort()) {
       case Type.BOOLEAN:
+        cgs.preinc();
         cgs.push(f.getColumnID());
         cgs.pushFieldValue();
-        cgs.inc("computeBoolSize", Type.INT_TYPE, Type.BOOLEAN_TYPE);
+        cgs.doinc("computeBoolSize", Type.INT_TYPE, Type.BOOLEAN_TYPE);
         break;
 
       case Type.CHAR:
+        cgs.preinc();
         cgs.push(f.getColumnID());
         cgs.pushFieldValue();
-        cgs.inc("computeUInt32Size", Type.INT_TYPE, Type.INT_TYPE);
+        cgs.doinc("computeUInt32Size", Type.INT_TYPE, Type.INT_TYPE);
         break;
 
       case Type.BYTE:
       case Type.SHORT:
       case Type.INT:
+        cgs.preinc();
         cgs.push(f.getColumnID());
         cgs.pushFieldValue();
-        cgs.inc("computeSInt32Size", Type.INT_TYPE, Type.INT_TYPE);
+        cgs.doinc("computeSInt32Size", Type.INT_TYPE, Type.INT_TYPE);
         break;
 
       case Type.FLOAT:
+        cgs.preinc();
         cgs.push(f.getColumnID());
         cgs.pushFieldValue();
-        cgs.inc("computeFloatSize", Type.INT_TYPE, Type.FLOAT_TYPE);
+        cgs.doinc("computeFloatSize", Type.INT_TYPE, Type.FLOAT_TYPE);
         break;
 
       case Type.DOUBLE:
+        cgs.preinc();
         cgs.push(f.getColumnID());
         cgs.pushFieldValue();
-        cgs.inc("computeDoubleSize", Type.INT_TYPE, Type.DOUBLE_TYPE);
+        cgs.doinc("computeDoubleSize", Type.INT_TYPE, Type.DOUBLE_TYPE);
         break;
 
       case Type.LONG:
+        cgs.preinc();
         cgs.push(f.getColumnID());
         cgs.pushFieldValue();
-        cgs.inc("computeSInt64", Type.INT_TYPE, Type.LONG_TYPE);
+        cgs.doinc("computeSInt64Size", Type.INT_TYPE, Type.LONG_TYPE);
         break;
 
       case Type.ARRAY:
@@ -252,30 +469,45 @@
         mv.visitJumpInsn(IFNULL, end);
 
         if (f.getPrimitiveType() == byte[].class) {
+          cgs.preinc();
           cgs.push(f.getColumnID());
-          cgs.inc("computeTagSize", Type.INT_TYPE);
+          cgs.doinc("computeTagSize", Type.INT_TYPE);
 
+          cgs.preinc();
           cgs.pushFieldValue();
           mv.visitInsn(ARRAYLENGTH);
-          cgs.inc("computeRawVarint32Size", Type.INT_TYPE);
+          cgs.doinc("computeRawVarint32Size", Type.INT_TYPE);
 
+          cgs.preinc();
           cgs.pushFieldValue();
           mv.visitInsn(ARRAYLENGTH);
-          cgs.inc();
+          cgs.doinc();
 
         } else if (f.getPrimitiveType() == String.class) {
+          cgs.preinc();
           cgs.push(f.getColumnID());
           cgs.pushFieldValue();
-          cgs.inc("computeStringSize", Type.INT_TYPE, string);
+          cgs.doinc("computeStringSize", Type.INT_TYPE, string);
 
         } else if (f.getPrimitiveType() == java.sql.Timestamp.class
             || f.getPrimitiveType() == java.util.Date.class
             || f.getPrimitiveType() == java.sql.Date.class) {
+          cgs.preinc();
           cgs.push(f.getColumnID());
+          cgs.pushFieldValue();
           String tsType = Type.getType(f.getPrimitiveType()).getInternalName();
           mv.visitMethodInsn(INVOKEVIRTUAL, tsType, "getTime", Type
               .getMethodDescriptor(Type.LONG_TYPE, new Type[] {}));
-          cgs.inc("computeFixed64Size", Type.INT_TYPE, Type.LONG_TYPE);
+          cgs.doinc("computeFixed64Size", Type.INT_TYPE, Type.LONG_TYPE);
+
+        } else if (f.getPrimitiveType().isEnum()) {
+          cgs.preinc();
+          cgs.push(f.getColumnID());
+          cgs.pushFieldValue();
+          mv.visitMethodInsn(INVOKEVIRTUAL, enumType.getInternalName(),
+              "ordinal", //
+              Type.getMethodDescriptor(Type.INT_TYPE, new Type[] {}));
+          cgs.doinc("computeEnumSize", Type.INT_TYPE, Type.INT_TYPE);
 
         } else {
           throw new OrmException("Type " + f.getPrimitiveType()
@@ -291,83 +523,166 @@
     }
   }
 
-  private void implementEncode() throws OrmException {
+  private void implementEncodeObject() {
     final MethodVisitor mv =
         cw.visitMethod(ACC_PUBLIC, "encode", Type.getMethodDescriptor(
-            byteString, new Type[] {object}), null, new String[] {});
+            Type.VOID_TYPE, new Type[] {object, codedOutputStream}), null,
+            new String[] {});
     mv.visitCode();
     final EncodeCGS cgs = new EncodeCGS(mv);
     cgs.setEntityType(pojoType);
 
+    mv.visitVarInsn(ALOAD, 0);
     mv.visitVarInsn(ALOAD, 1);
     mv.visitTypeInsn(CHECKCAST, pojoType.getInternalName());
-    mv.visitVarInsn(ASTORE, 1);
+    mv.visitVarInsn(ALOAD, 2);
+    mv.visitMethodInsn(INVOKEVIRTUAL, implTypeName, "encode", Type
+        .getMethodDescriptor(Type.VOID_TYPE, new Type[] {pojoType,
+            codedOutputStream}));
 
-    encodeMessage(myFields, mv, cgs);
-
-    mv.visitInsn(ARETURN);
+    mv.visitInsn(RETURN);
     mv.visitMaxs(-1, -1);
     mv.visitEnd();
   }
 
-  private static void encodeMessage(final JavaColumnModel[] myFields,
+  private void implementEncodeSelf() throws OrmException {
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC, "encode", Type.getMethodDescriptor(
+            Type.VOID_TYPE, new Type[] {pojoType, codedOutputStream}), null,
+            new String[] {});
+    mv.visitCode();
+    final EncodeCGS cgs = new EncodeCGS(mv);
+    cgs.setEntityType(pojoType);
+
+    encodeMessage(myFields, mv, cgs);
+
+    mv.visitInsn(RETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void encodeMessage(final JavaColumnModel[] myFields,
       final MethodVisitor mv, final EncodeCGS cgs) throws OrmException {
-    final int oldVar = cgs.codedOutputStreamVar;
-    cgs.codedOutputStreamVar = cgs.newLocal();
-
-    final int strVar = cgs.newLocal();
-    mv.visitMethodInsn(INVOKESTATIC, byteString.getInternalName(), "newOutput",
-        Type.getMethodDescriptor(byteStringOutput, new Type[] {}));
-    mv.visitVarInsn(ASTORE, strVar);
-
-    mv.visitVarInsn(ALOAD, strVar);
-    mv.visitMethodInsn(INVOKESTATIC, codedOutputStream.getInternalName(),
-        "newInstance", Type.getMethodDescriptor(codedOutputStream,
-            new Type[] {Type.getType(OutputStream.class)}));
-    mv.visitVarInsn(ASTORE, cgs.codedOutputStreamVar);
-
     for (final JavaColumnModel f : myFields) {
       if (f.isNested()) {
+        final NestedCodec n = nestedFor(f);
+
         final Label end = new Label();
         cgs.setFieldReference(f);
         cgs.pushFieldValue();
         mv.visitJumpInsn(IFNULL, end);
 
-        final int v = cgs.newLocal();
-        encodeMessage(sort(f.getNestedColumns()), mv, cgs);
-        mv.visitVarInsn(ASTORE, v);
-
-        mv.visitVarInsn(ALOAD, v);
-        mv.visitMethodInsn(INVOKEVIRTUAL, byteString.getInternalName(), "size",
-            Type.getMethodDescriptor(Type.INT_TYPE, new Type[] {}));
-        mv.visitJumpInsn(IFEQ, end);
+        final int msgSizeVar = cgs.newLocal();
+        mv.visitFieldInsn(GETSTATIC, implTypeName, n.field, n.codecType
+            .getDescriptor());
+        cgs.pushFieldValue();
+        mv.visitMethodInsn(INVOKEVIRTUAL, n.codecType.getInternalName(),
+            "sizeof", Type.getMethodDescriptor(Type.INT_TYPE,
+                new Type[] {n.pojoType}));
+        mv.visitVarInsn(ISTORE, msgSizeVar);
 
         cgs.pushCodedOutputStream();
         cgs.push(f.getColumnID());
-        mv.visitVarInsn(ALOAD, v);
-        cgs.write("writeBytes", byteString);
+        cgs.push(WireFormat.FieldType.MESSAGE.getWireType());
+        mv.visitMethodInsn(INVOKEVIRTUAL, codedOutputStream.getInternalName(),
+            "writeTag", Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {
+                Type.INT_TYPE, Type.INT_TYPE}));
 
-        cgs.freeLocal(v);
+        cgs.pushCodedOutputStream();
+        mv.visitVarInsn(ILOAD, msgSizeVar);
+        mv.visitMethodInsn(INVOKEVIRTUAL, codedOutputStream.getInternalName(),
+            "writeRawVarint32", Type.getMethodDescriptor(Type.VOID_TYPE,
+                new Type[] {Type.INT_TYPE}));
+
+        mv.visitFieldInsn(GETSTATIC, implTypeName, n.field, n.codecType
+            .getDescriptor());
+        cgs.pushFieldValue();
+        cgs.pushCodedOutputStream();
+        mv.visitMethodInsn(INVOKEVIRTUAL, n.codecType.getInternalName(),
+            "encode", Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {
+                n.pojoType, codedOutputStream}));
+
+        cgs.freeLocal(msgSizeVar);
         mv.visitLabel(end);
+
+      } else if (f.isCollection()) {
+        encodeCollection(f, mv, cgs);
+
       } else {
         encodeScalar(mv, cgs, f);
       }
     }
-
-    cgs.pushCodedOutputStream();
-    mv.visitMethodInsn(INVOKEVIRTUAL, codedOutputStream.getInternalName(),
-        "flush", Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {}));
-
-    cgs.freeLocal(cgs.codedOutputStreamVar);
-    cgs.codedOutputStreamVar = oldVar;
-
-    mv.visitVarInsn(ALOAD, strVar);
-    mv.visitMethodInsn(INVOKEVIRTUAL, byteStringOutput.getInternalName(),
-        "toByteString", Type.getMethodDescriptor(byteString, new Type[] {}));
-    cgs.freeLocal(strVar);
   }
 
-  private static void encodeScalar(final MethodVisitor mv, final EncodeCGS cgs,
+  private void encodeCollection(final JavaColumnModel f,
+      final MethodVisitor mv, final EncodeCGS cgs) throws OrmException {
+    final int itr = cgs.newLocal();
+    final int val = cgs.newLocal();
+    final Class<?> valClazz = (Class<?>) f.getArgumentTypes()[0];
+    final Type valType = Type.getType(valClazz);
+    final JavaColumnModel col = collectionColumn(f, valClazz);
+    final EncodeCGS ng = new EncodeCGS(mv) {
+      {
+        sizeVar = cgs.sizeVar;
+        setEntityType(valType);
+      }
+
+      @Override
+      public void pushEntity() {
+        mv.visitVarInsn(ALOAD, val);
+      }
+
+      @Override
+      protected void appendGetField(final ColumnModel c) {
+        if (c != col) {
+          super.appendGetField(c);
+        }
+      }
+
+      @Override
+      public int newLocal() {
+        return cgs.newLocal();
+      }
+
+      @Override
+      public void freeLocal(int index) {
+        cgs.freeLocal(index);
+      }
+    };
+
+    final Label end = new Label();
+    cgs.setFieldReference(f);
+    cgs.pushFieldValue();
+    mv.visitJumpInsn(IFNULL, end);
+
+    cgs.setFieldReference(f);
+    cgs.pushFieldValue();
+    mv.visitMethodInsn(INVOKEINTERFACE, collection.getInternalName(),
+        "iterator", Type.getMethodDescriptor(iterator, new Type[] {}));
+    mv.visitVarInsn(ASTORE, itr);
+
+    final Label doloop = new Label();
+    mv.visitLabel(doloop);
+    mv.visitVarInsn(ALOAD, itr);
+    mv.visitMethodInsn(INVOKEINTERFACE, iterator.getInternalName(), "hasNext",
+        Type.getMethodDescriptor(Type.BOOLEAN_TYPE, new Type[] {}));
+    mv.visitJumpInsn(IFEQ, end);
+
+    mv.visitVarInsn(ALOAD, itr);
+    mv.visitMethodInsn(INVOKEINTERFACE, iterator.getInternalName(), "next",
+        Type.getMethodDescriptor(object, new Type[] {}));
+    mv.visitTypeInsn(CHECKCAST, valType.getInternalName());
+    mv.visitVarInsn(ASTORE, val);
+
+    encodeMessage(new JavaColumnModel[] {col}, mv, ng);
+    mv.visitJumpInsn(GOTO, doloop);
+
+    mv.visitLabel(end);
+    cgs.freeLocal(itr);
+    cgs.freeLocal(val);
+  }
+
+  private void encodeScalar(final MethodVisitor mv, final EncodeCGS cgs,
       final JavaColumnModel f) throws OrmException {
     cgs.setFieldReference(f);
 
@@ -463,6 +778,12 @@
                 .getMethodDescriptor(Type.LONG_TYPE, new Type[] {}));
             cgs.write("writeFixed64", Type.LONG_TYPE);
 
+          } else if (f.getPrimitiveType().isEnum()) {
+            mv.visitMethodInsn(INVOKEVIRTUAL, enumType.getInternalName(),
+                "ordinal", //
+                Type.getMethodDescriptor(Type.INT_TYPE, new Type[] {}));
+            cgs.write("writeEnum", Type.INT_TYPE);
+
           } else {
             throw new OrmException("Type " + f.getPrimitiveType()
                 + " not supported for field " + f.getPathToFieldName());
@@ -478,32 +799,45 @@
     }
   }
 
-  private void implementDecode() throws OrmException {
-    final Type retType = object;
+  private void implementMergeFromObject() {
     final MethodVisitor mv =
-        cw.visitMethod(ACC_PROTECTED, "decode", Type.getMethodDescriptor(
-            retType, new Type[] {codedInputStream}), null, new String[] {});
+        cw.visitMethod(ACC_PUBLIC, "mergeFrom", Type.getMethodDescriptor(
+            Type.VOID_TYPE, new Type[] {codedInputStream, object}), null,
+            new String[] {});
     mv.visitCode();
-    final DecodeCGS cgs = new DecodeCGS(mv);
 
-    cgs.setEntityType(pojoType);
+    mv.visitVarInsn(ALOAD, 0);
+    mv.visitVarInsn(ALOAD, 1);
+    mv.visitVarInsn(ALOAD, 2);
+    mv.visitTypeInsn(CHECKCAST, pojoType.getInternalName());
+    mv.visitMethodInsn(INVOKEVIRTUAL, implTypeName, "mergeFrom", Type
+        .getMethodDescriptor(Type.VOID_TYPE, new Type[] {codedInputStream,
+            pojoType}));
 
-    mv.visitTypeInsn(NEW, pojoType.getInternalName());
-    mv.visitInsn(DUP);
-    mv.visitMethodInsn(INVOKESPECIAL, pojoType.getInternalName(), "<init>",
-        Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {}));
-    mv.visitVarInsn(ASTORE, cgs.objVar);
-
-    final int tagVar = cgs.newLocal();
-    decodeMessage(myFields, mv, cgs);
-
-    cgs.pushEntity();
-    mv.visitInsn(ARETURN);
+    mv.visitInsn(RETURN);
     mv.visitMaxs(-1, -1);
     mv.visitEnd();
   }
 
-  private static void decodeMessage(final JavaColumnModel[] myFields,
+  private void implementMergeFromSelf() throws OrmException {
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC, "mergeFrom", Type.getMethodDescriptor(
+            Type.VOID_TYPE, new Type[] {codedInputStream, pojoType}), null,
+            new String[] {});
+    mv.visitCode();
+    final DecodeCGS cgs = new DecodeCGS(mv);
+    cgs.objVar = 2;
+    cgs.tagVar = cgs.newLocal();
+    cgs.setEntityType(pojoType);
+
+    decodeMessage(myFields, mv, cgs);
+
+    mv.visitInsn(RETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void decodeMessage(final JavaColumnModel[] myFields,
       final MethodVisitor mv, final DecodeCGS cgs) throws OrmException {
     final Label nextField = new Label();
     final Label end = new Label();
@@ -550,40 +884,7 @@
     for (int idx = 1; idx < caseTags.length; idx++) {
       final JavaColumnModel f = myFields[idx - 1];
       mv.visitLabel(caseLabels[idx]);
-      if (f.isNested()) {
-        final Label load = new Label();
-        cgs.setFieldReference(f);
-        cgs.pushFieldValue();
-        mv.visitJumpInsn(IFNONNULL, load);
-        cgs.fieldSetBegin();
-        mv.visitTypeInsn(NEW, Type.getType(f.getNestedClass())
-            .getInternalName());
-        mv.visitInsn(DUP);
-        mv.visitMethodInsn(INVOKESPECIAL, Type.getType(f.getNestedClass())
-            .getInternalName(), "<init>", Type.getMethodDescriptor(
-            Type.VOID_TYPE, new Type[] {}));
-        cgs.fieldSetEnd();
-
-        // read the length, set a new limit, decode the message, validate
-        // we stopped at the end of it as expected.
-        //
-        mv.visitLabel(load);
-        final int limitVar = cgs.newLocal();
-        cgs.pushCodedInputStream();
-        cgs.call("readRawVarint32", Type.INT_TYPE);
-        cgs.ncallInt("pushLimit", Type.INT_TYPE);
-        mv.visitVarInsn(ISTORE, limitVar);
-
-        decodeMessage(sort(f.getNestedColumns()), mv, cgs);
-
-        cgs.pushCodedInputStream();
-        mv.visitVarInsn(ILOAD, limitVar);
-        cgs.ncallInt("popLimit", Type.VOID_TYPE);
-        cgs.freeLocal(limitVar);
-
-      } else {
-        decodeScalar(mv, cgs, f);
-      }
+      decodeField(mv, cgs, f);
       mv.visitJumpInsn(GOTO, nextField);
     }
 
@@ -601,6 +902,200 @@
     cgs.ncallInt("checkLastTagWas", Type.VOID_TYPE);
   }
 
+  private void decodeField(final MethodVisitor mv, final DecodeCGS cgs,
+      final JavaColumnModel f) throws OrmException {
+    if (f.isNested()) {
+      final NestedCodec n = nestedFor(f);
+      final Label load = new Label();
+      cgs.setFieldReference(f);
+      cgs.pushFieldValue();
+      mv.visitJumpInsn(IFNONNULL, load);
+
+      // Since the field isn't initialized, construct it
+      //
+      cgs.fieldSetBegin();
+      mv.visitFieldInsn(GETSTATIC, implTypeName, n.field, n.codecType
+          .getDescriptor());
+      mv.visitMethodInsn(INVOKEVIRTUAL, n.codecType.getInternalName(),
+          "newInstance", Type.getMethodDescriptor(n.pojoType, new Type[] {}));
+      if (object.equals(n.pojoType)) {
+        mv.visitTypeInsn(CHECKCAST, Type.getType(f.getNestedClass())
+            .getInternalName());
+      }
+      cgs.fieldSetEnd();
+
+      // read the length, set a new limit, decode the message, validate
+      // we stopped at the end of it as expected.
+      //
+      mv.visitLabel(load);
+      final int limitVar = cgs.newLocal();
+      cgs.pushCodedInputStream();
+      cgs.call("readRawVarint32", Type.INT_TYPE);
+      cgs.ncallInt("pushLimit", Type.INT_TYPE);
+      mv.visitVarInsn(ISTORE, limitVar);
+
+      mv.visitFieldInsn(GETSTATIC, implTypeName, n.field, n.codecType
+          .getDescriptor());
+      cgs.pushCodedInputStream();
+      cgs.pushFieldValue();
+      mv.visitMethodInsn(INVOKEVIRTUAL, n.codecType.getInternalName(),
+          "mergeFrom", Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {
+              codedInputStream, n.pojoType}));
+
+      cgs.pushCodedInputStream();
+      mv.visitVarInsn(ILOAD, limitVar);
+      cgs.ncallInt("popLimit", Type.VOID_TYPE);
+      cgs.freeLocal(limitVar);
+
+    } else if (f.isCollection()) {
+      decodeCollection(mv, cgs, f);
+
+    } else {
+      decodeScalar(mv, cgs, f);
+    }
+  }
+
+  private void decodeCollection(final MethodVisitor mv, final DecodeCGS cgs,
+      final JavaColumnModel f) throws OrmException {
+    final Class<?> valClazz = (Class<?>) f.getArgumentTypes()[0];
+    final Type valType = Type.getType(valClazz);
+    final JavaColumnModel col = collectionColumn(f, valClazz);
+    final DecodeCGS ng = new DecodeCGS(mv) {
+      {
+        tagVar = cgs.tagVar;
+        setEntityType(valType);
+      }
+
+      @Override
+      public int newLocal() {
+        return cgs.newLocal();
+      }
+
+      @Override
+      public void freeLocal(int index) {
+        cgs.freeLocal(index);
+      }
+
+      @Override
+      protected void appendGetField(final ColumnModel c) {
+        if (c != col) {
+          super.appendGetField(c);
+        }
+      }
+
+      @Override
+      public void fieldSetBegin() {
+        if (col.isNested()) {
+          super.fieldSetBegin();
+        } else {
+          cgs.pushFieldValue();
+        }
+      }
+
+      @Override
+      public void fieldSetEnd() {
+        if (col.isNested()) {
+          super.fieldSetEnd();
+        } else {
+          mv.visitMethodInsn(INVOKEINTERFACE, collection.getInternalName(),
+              "add", Type.getMethodDescriptor(Type.BOOLEAN_TYPE,
+                  new Type[] {object}));
+          mv.visitInsn(POP);
+        }
+      }
+    };
+
+    final Label notnull = new Label();
+    cgs.setFieldReference(f);
+    cgs.pushFieldValue();
+    mv.visitJumpInsn(IFNONNULL, notnull);
+
+    // If the field is null, try to initialize it based on its declared type.
+    // If we don't know what that is, we have to throw an exception instead.
+    //
+    final Type concreteType;
+    if (!f.getNestedClass().isInterface()
+        && (f.getNestedClass().getModifiers() & Modifier.ABSTRACT) == 0) {
+      concreteType = Type.getType(f.getNestedClass());
+
+    } else if (f.getNestedClass().isAssignableFrom(ArrayList.class)) {
+      concreteType = Type.getType(ArrayList.class);
+
+    } else if (f.getNestedClass().isAssignableFrom(HashSet.class)) {
+      concreteType = Type.getType(HashSet.class);
+
+    } else if (f.getNestedClass().isAssignableFrom(TreeSet.class)) {
+      concreteType = Type.getType(TreeSet.class);
+
+    } else {
+      mv.visitTypeInsn(NEW, illegalStateException.getInternalName());
+      mv.visitInsn(DUP);
+      mv.visitLdcInsn("Field " + f.getPathToFieldName() + " not initialized");
+      mv.visitMethodInsn(INVOKESPECIAL,
+          illegalStateException.getInternalName(), "<init>", Type
+              .getMethodDescriptor(Type.VOID_TYPE, new Type[] {string}));
+      mv.visitInsn(ATHROW);
+      concreteType = null;
+    }
+    if (concreteType != null) {
+      cgs.fieldSetBegin();
+      mv.visitTypeInsn(NEW, concreteType.getInternalName());
+      mv.visitInsn(DUP);
+      mv.visitMethodInsn(INVOKESPECIAL, concreteType.getInternalName(),
+          "<init>", Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {}));
+      cgs.fieldSetEnd();
+    }
+    mv.visitLabel(notnull);
+
+    if (col.isNested()) {
+      // If its nested, we have to build the object instance.
+      //
+      final NestedCodec n = nestedFor(col);
+      ng.objVar = cgs.newLocal();
+      mv.visitFieldInsn(GETSTATIC, implTypeName, n.field, n.codecType
+          .getDescriptor());
+      mv.visitMethodInsn(INVOKEVIRTUAL, n.codecType.getInternalName(),
+          "newInstance", Type.getMethodDescriptor(n.pojoType, new Type[] {}));
+      mv.visitVarInsn(ASTORE, ng.objVar);
+
+      // read the length, set a new limit, decode the message, validate
+      // we stopped at the end of it as expected.
+      //
+      final int limitVar = cgs.newLocal();
+      cgs.pushCodedInputStream();
+      cgs.call("readRawVarint32", Type.INT_TYPE);
+      cgs.ncallInt("pushLimit", Type.INT_TYPE);
+      mv.visitVarInsn(ISTORE, limitVar);
+
+      mv.visitFieldInsn(GETSTATIC, implTypeName, n.field, n.codecType
+          .getDescriptor());
+      cgs.pushCodedInputStream();
+      mv.visitVarInsn(ALOAD, ng.objVar);
+      mv.visitMethodInsn(INVOKEVIRTUAL, n.codecType.getInternalName(),
+          "mergeFrom", Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {
+              codedInputStream, n.pojoType}));
+
+      cgs.pushCodedInputStream();
+      mv.visitVarInsn(ILOAD, limitVar);
+      cgs.ncallInt("popLimit", Type.VOID_TYPE);
+      cgs.freeLocal(limitVar);
+      cgs.pushFieldValue();
+
+      mv.visitVarInsn(ALOAD, ng.objVar);
+      mv.visitMethodInsn(INVOKEINTERFACE, collection.getInternalName(), "add",
+          Type.getMethodDescriptor(Type.BOOLEAN_TYPE, new Type[] {object}));
+      mv.visitInsn(POP);
+      cgs.freeLocal(ng.objVar);
+
+    } else if (col.isCollection()) {
+      throw new OrmException("Cannot nest collection as member of another"
+          + " collection: " + f.getPathToFieldName());
+
+    } else {
+      decodeScalar(mv, ng, col);
+    }
+  }
+
   private static void decodeScalar(final MethodVisitor mv, final DecodeCGS cgs,
       final JavaColumnModel f) throws OrmException {
     cgs.setFieldReference(f);
@@ -649,8 +1144,17 @@
           mv.visitTypeInsn(NEW, tsType);
           mv.visitInsn(DUP);
           cgs.call("readFixed64", Type.LONG_TYPE);
-          mv.visitMethodInsn(INVOKESPECIAL, tsType, "<init>", Type
-              .getMethodDescriptor(Type.VOID_TYPE, new Type[] {}));
+          mv.visitMethodInsn(INVOKESPECIAL, tsType, "<init>", //
+              Type.getMethodDescriptor(Type.VOID_TYPE,
+                  new Type[] {Type.LONG_TYPE}));
+
+        } else if (f.getPrimitiveType().isEnum()) {
+          Type et = Type.getType(f.getPrimitiveType());
+          mv.visitMethodInsn(INVOKESTATIC, et.getInternalName(), "values", Type
+              .getMethodDescriptor(Type.getType("[" + et.getDescriptor()),
+                  new Type[] {}));
+          cgs.call("readEnum", Type.INT_TYPE);
+          mv.visitInsn(AALOAD);
 
         } else {
           throw new OrmException("Type " + f.getPrimitiveType()
@@ -661,22 +1165,24 @@
     cgs.fieldSetEnd();
   }
 
-  private static final class SizeofCGS extends CodeGenSupport {
+  private static class SizeofCGS extends CodeGenSupport {
     int sizeVar;
 
-    private SizeofCGS(MethodVisitor method) {
+    SizeofCGS(MethodVisitor method) {
       super(method);
-      sizeVar = newLocal();
     }
 
-    void inc(String name, Type... args) {
+    void doinc(String name, Type... args) {
       mv.visitMethodInsn(INVOKESTATIC, codedOutputStream.getInternalName(),
           name, Type.getMethodDescriptor(Type.INT_TYPE, args));
-      inc();
+      doinc();
     }
 
-    void inc() {
+    void preinc() {
       mv.visitVarInsn(ILOAD, sizeVar);
+    }
+
+    void doinc() {
       mv.visitInsn(IADD);
       mv.visitVarInsn(ISTORE, sizeVar);
     }
@@ -687,15 +1193,13 @@
     }
   }
 
-  private static final class EncodeCGS extends CodeGenSupport {
-    int codedOutputStreamVar;
-
+  private static class EncodeCGS extends SizeofCGS {
     private EncodeCGS(MethodVisitor method) {
       super(method);
     }
 
     void pushCodedOutputStream() {
-      mv.visitVarInsn(ALOAD, codedOutputStreamVar);
+      mv.visitVarInsn(ALOAD, 2);
     }
 
     void write(String name, Type arg) {
@@ -703,22 +1207,15 @@
           name, Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {
               Type.INT_TYPE, arg}));
     }
-
-    @Override
-    public void pushEntity() {
-      mv.visitVarInsn(ALOAD, 1);
-    }
   }
 
-  private static final class DecodeCGS extends CodeGenSupport {
+  private static class DecodeCGS extends CodeGenSupport {
     final int codedInputStreamVar = 1;
-    final int objVar;
-    final int tagVar;
+    int objVar;
+    int tagVar;
 
-    private DecodeCGS(MethodVisitor method) {
+    DecodeCGS(MethodVisitor method) {
       super(method);
-      objVar = newLocal();
-      tagVar = newLocal();
     }
 
     void pushCodedInputStream() {
@@ -741,4 +1238,17 @@
       mv.visitVarInsn(ALOAD, objVar);
     }
   }
+
+  private static class NestedCodec {
+    final String field;
+    final Type codecType;
+    final Type pojoType;
+
+    @SuppressWarnings("unchecked")
+    NestedCodec(String field, Class impl, Type pojoType) {
+      this.field = field;
+      this.codecType = Type.getType(impl);
+      this.pojoType = pojoType;
+    }
+  }
 }
diff --git a/src/main/java/com/google/gwtorm/protobuf/CustomCodec.java b/src/main/java/com/google/gwtorm/protobuf/CustomCodec.java
new file mode 100644
index 0000000..3462a3c
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/protobuf/CustomCodec.java
@@ -0,0 +1,34 @@
+// Copyright 2008 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.protobuf;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Identity of a custom {@link ProtobufCodec} for a {@code Column}.
+ * <p>
+ * Additional annotation tagged onto a {@code Column} field that carries the
+ * name of a custom {@link ProtobufCodec} that should be used to handle that
+ * field. The field data will be treated as an opaque binary sequence, so its
+ * {@link ProtobufCodec#sizeof(Object)} method must be accurate.
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.FIELD)
+public @interface CustomCodec {
+  Class<? extends ProtobufCodec<?>> value();
+}
diff --git a/src/main/java/com/google/gwtorm/protobuf/ProtobufCodec.java b/src/main/java/com/google/gwtorm/protobuf/ProtobufCodec.java
index d13e7b3..43202b1 100644
--- a/src/main/java/com/google/gwtorm/protobuf/ProtobufCodec.java
+++ b/src/main/java/com/google/gwtorm/protobuf/ProtobufCodec.java
@@ -17,6 +17,13 @@
 import com.google.gwtorm.client.Column;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.CodedInputStream;
+import com.google.protobuf.CodedOutputStream;
+import com.google.protobuf.InvalidProtocolBufferException;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
 
 /**
  * Encode and decode an arbitrary Java object as a Protobuf message.
@@ -26,21 +33,240 @@
  */
 public abstract class ProtobufCodec<T> {
   /** Encode the object into an immutable byte string. */
-  public abstract ByteString encode(T obj);
+  public ByteString encodeToByteString(T obj) {
+    return ByteString.copyFrom(encodeToByteBuffer(obj));
+  }
+
+  /** Encode the object into an immutable byte string. */
+  public ByteBuffer encodeToByteBuffer(T obj) {
+    ByteBuffer data = ByteBuffer.allocate(sizeof(obj));
+    encode(obj, data);
+    data.flip();
+    return data;
+  }
+
+  /** Encode the object into a byte array. */
+  public byte[] encodeToByteArray(T obj) {
+    byte[] data = new byte[sizeof(obj)];
+    encode(obj, data);
+    return data;
+  }
+
+  /** Encode the object into a byte array. */
+  public void encode(T obj, final byte[] data) {
+    encode(obj, data, 0, data.length);
+  }
+
+  /** Encode the object into a byte array. */
+  public void encode(T obj, final byte[] data, int offset, int length) {
+    CodedOutputStream out = CodedOutputStream.newInstance(data, offset, length);
+    try {
+      encode(obj, out);
+      out.flush();
+    } catch (IOException err) {
+      throw new RuntimeException("Cannot encode message", err);
+    }
+  }
+
+  /** Encode the object into a ByteBuffer. */
+  public void encode(T obj, ByteBuffer buf) {
+    if (buf.hasArray()) {
+      CodedOutputStream out = CodedOutputStream.newInstance( //
+          buf.array(), //
+          buf.position(), //
+          buf.remaining());
+      try {
+        encode(obj, out);
+        out.flush();
+      } catch (IOException err) {
+        throw new RuntimeException("Cannot encode message", err);
+      }
+      buf.position(buf.position() + (buf.remaining() - out.spaceLeft()));
+
+    } else {
+      CodedOutputStream out = CodedOutputStream.newInstance(newStream(buf));
+      try {
+        encode(obj, out);
+        out.flush();
+      } catch (IOException err) {
+        throw new RuntimeException("Cannot encode message", err);
+      }
+    }
+  }
+
+  /**
+   * Encodes the object, prefixed by its encoded length.
+   * <p>
+   * The length is encoded as a raw varint with no tag.
+   *
+   * @param obj the object to encode.
+   * @param out stream that will receive the object's data.
+   * @throws IOException the stream failed to write data.
+   */
+  public void encodeWithSize(T obj, OutputStream out) throws IOException {
+    CodedOutputStream cos = CodedOutputStream.newInstance(out);
+    cos.writeRawVarint32(sizeof(obj));
+    encode(obj, cos);
+    cos.flush();
+  }
+
+  private static ByteBufferOutputStream newStream(ByteBuffer buf) {
+    return new ByteBufferOutputStream(buf);
+  }
+
+  /**
+   * Encode the object to the supplied output stream.
+   * <p>
+   * The stream {@code out} is not flushed by this method. Callers that need the
+   * entire byte representation after invoking encode must flush the stream to
+   * ensure its intermediate buffers have been written to the backing store.
+   *
+   * @param obj the object to encode.
+   * @param out the stream to encode the object onto.
+   * @throws IOException the underlying stream cannot be written to.
+   */
+  public abstract void encode(T obj, CodedOutputStream out) throws IOException;
 
   /** Compute the number of bytes of the encoded form of the object. */
   public abstract int sizeof(T obj);
 
+  /** Create a new uninitialized instance of the object type. */
+  public abstract T newInstance();
+
   /** Decode a byte string into an object instance. */
   public T decode(ByteString buf) {
-    return decode(buf.newCodedInput());
+    T obj = newInstance();
+    mergeFrom(buf, obj);
+    return obj;
   }
 
-  /** Decode a byte string into an object instance. */
-  public T decode(byte[] buf) {
-    return decode(CodedInputStream.newInstance(buf));
+  /** Decode a byte array into an object instance. */
+  public T decode(byte[] data) {
+    T obj = newInstance();
+    mergeFrom(data, obj);
+    return obj;
   }
 
-  /** Decode an object by reading it from the stream. */
-  protected abstract T decode(CodedInputStream in);
+  /** Decode a byte array into an object instance. */
+  public T decode(byte[] data, int offset, int length) {
+    T obj = newInstance();
+    mergeFrom(data, offset, length, obj);
+    return obj;
+  }
+
+  /** Decode a byte buffer into an object instance. */
+  public T decode(ByteBuffer buf) {
+    T obj = newInstance();
+    mergeFrom(buf, obj);
+    return obj;
+  }
+
+  /**
+   * Decode an object by reading it from the stream.
+   *
+   * @throws IOException the underlying stream cannot be read.
+   */
+  public T decode(CodedInputStream in) throws IOException {
+    T obj = newInstance();
+    mergeFrom(in, obj);
+    return obj;
+  }
+
+  /** Decode an object that is prefixed by its encoded length. */
+  public T decodeWithSize(InputStream in) throws IOException {
+    T obj = newInstance();
+    mergeFromWithSize(in, obj);
+    return obj;
+  }
+
+  /** Decode a byte string into an existing object instance. */
+  public void mergeFrom(ByteString buf, T obj) {
+    try {
+      mergeFrom(buf.newCodedInput(), obj);
+    } catch (IOException err) {
+      throw new RuntimeException("Cannot decode message", err);
+    }
+  }
+
+  /** Decode a byte array into an existing object instance. */
+  public void mergeFrom(byte[] data, T obj) {
+    mergeFrom(data, 0, data.length, obj);
+  }
+
+  /** Decode a byte array into an existing object instance. */
+  public void mergeFrom(byte[] data, int offset, int length, T obj) {
+    try {
+      mergeFrom(CodedInputStream.newInstance(data, offset, length), obj);
+    } catch (IOException err) {
+      throw new RuntimeException("Cannot decode message", err);
+    }
+  }
+
+  /** Decode a byte buffer into an existing object instance. */
+  public void mergeFrom(ByteBuffer buf, T obj) {
+    if (buf.hasArray()) {
+      CodedInputStream in = CodedInputStream.newInstance( //
+          buf.array(), //
+          buf.position(), //
+          buf.remaining());
+      try {
+        mergeFrom(in, obj);
+      } catch (IOException err) {
+        throw new RuntimeException("Cannot decode message", err);
+      }
+      buf.position(buf.position() + in.getTotalBytesRead());
+    } else {
+      mergeFrom(ByteString.copyFrom(buf), obj);
+    }
+  }
+
+  /** Decode an object that is prefixed by its encoded length. */
+  public void mergeFromWithSize(InputStream in, T obj) throws IOException {
+    int sz = readRawVarint32(in);
+    mergeFrom(CodedInputStream.newInstance(new CappedInputStream(in, sz)), obj);
+  }
+
+  /**
+   * Decode an input stream into an existing object instance.
+   *
+   * @throws IOException the underlying stream cannot be read.
+   */
+  public abstract void mergeFrom(CodedInputStream in, T obj) throws IOException;
+
+  private static int readRawVarint32(InputStream in) throws IOException {
+    int b = in.read();
+    if (b == -1) {
+      throw new InvalidProtocolBufferException("Truncated input");
+    }
+
+    if ((b & 0x80) == 0) {
+      return b;
+    }
+
+    int result = b & 0x7f;
+    int offset = 7;
+    for (; offset < 32; offset += 7) {
+      b = in.read();
+      if (b == -1) {
+        throw new InvalidProtocolBufferException("Truncated input");
+      }
+      result |= (b & 0x7f) << offset;
+      if ((b & 0x80) == 0) {
+        return result;
+      }
+    }
+
+    // Keep reading up to 64 bits.
+    for (; offset < 64; offset += 7) {
+      b = in.read();
+      if (b == -1) {
+        throw new InvalidProtocolBufferException("Truncated input");
+      }
+      if ((b & 0x80) == 0) {
+        return result;
+      }
+    }
+
+    throw new InvalidProtocolBufferException("Malformed varint");
+  }
 }
diff --git a/src/main/java/com/google/gwtorm/schema/ColumnModel.java b/src/main/java/com/google/gwtorm/schema/ColumnModel.java
index 58afd5d..72bf017 100644
--- a/src/main/java/com/google/gwtorm/schema/ColumnModel.java
+++ b/src/main/java/com/google/gwtorm/schema/ColumnModel.java
@@ -17,6 +17,7 @@
 import com.google.gwtorm.client.Column;
 import com.google.gwtorm.client.OrmException;
 
+import java.lang.reflect.Type;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -56,11 +57,6 @@
 
   protected void initNestedColumns(final Collection<? extends ColumnModel> col)
       throws OrmException {
-    if (col == null || col.isEmpty()) {
-      throw new OrmException("Field " + getPathToFieldName()
-          + " has no nested members inside type " + getNestedClassName());
-    }
-
     nestedColumns = new ArrayList<ColumnModel>(col);
     recomputeColumnNames();
     if (!isNotNull()) {
@@ -147,7 +143,7 @@
   }
 
   public boolean isNested() {
-    return getPrimitiveType() == null;
+    return !isCollection() && getPrimitiveType() == null;
   }
 
   public boolean isRowVersion() {
@@ -162,10 +158,14 @@
     return notNull;
   }
 
+  public abstract boolean isCollection();
+
   public abstract String getFieldName();
 
   public abstract Class<?> getPrimitiveType();
 
+  public abstract Type[] getArgumentTypes();
+
   public abstract String getNestedClassName();
 
   @Override
diff --git a/src/main/java/com/google/gwtorm/schema/QueryModel.java b/src/main/java/com/google/gwtorm/schema/QueryModel.java
index c24bc2b..f557a0f 100644
--- a/src/main/java/com/google/gwtorm/schema/QueryModel.java
+++ b/src/main/java/com/google/gwtorm/schema/QueryModel.java
@@ -31,24 +31,31 @@
 public class QueryModel {
   private final RelationModel model;
   private final String name;
-  private final Query query;
   private final Tree parsedQuery;
 
   public QueryModel(final RelationModel rel, final String queryName,
       final Query q) throws OrmException {
+    this(rel, queryName, queryTextOf(queryName, q));
+  }
+
+  private static String queryTextOf(String queryName, Query q)
+      throws OrmException {
     if (q == null) {
       throw new OrmException("Query " + queryName + " is missing "
           + Query.class.getName() + " annotation");
     }
+    return q.value();
+  }
 
+  public QueryModel(final RelationModel rel, final String queryName,
+      final String queryText) throws OrmException {
     model = rel;
     name = queryName;
-    query = q;
 
     try {
-      parsedQuery = QueryParser.parse(model, q.value());
+      parsedQuery = QueryParser.parse(model, queryText);
     } catch (QueryParseException e) {
-      throw new OrmException("Cannot parse query " + q.value(), e);
+      throw new OrmException("Cannot parse query " + queryText, e);
     }
   }
 
@@ -68,6 +75,20 @@
     return r;
   }
 
+  public List<ColumnModel> getOrderBy() {
+    final ArrayList<ColumnModel> r = new ArrayList<ColumnModel>();
+    if (parsedQuery != null) {
+      Tree node = findOrderBy(parsedQuery);
+      if (node != null) {
+        for (int i = 0; i < node.getChildCount(); i++) {
+          final Tree id = node.getChild(i);
+          r.add(((QueryParser.Column) id).getField());
+        }
+      }
+    }
+    return r;
+  }
+
   private void findParameters(final List<ColumnModel> r, final Tree node) {
     switch (node.getType()) {
       case QueryParser.WHERE:
@@ -89,7 +110,6 @@
       case QueryParser.GT:
       case QueryParser.GE:
       case QueryParser.EQ:
-      case QueryParser.NE:
         if (node.getChild(1).getType() == QueryParser.PLACEHOLDER) {
           r.add(((QueryParser.Column) node.getChild(0)).getField());
         }
@@ -103,6 +123,14 @@
     }
   }
 
+  public boolean hasWhere() {
+    return findWhere(parsedQuery) != null;
+  }
+
+  public boolean hasOrderBy() {
+    return findOrderBy(parsedQuery) != null;
+  }
+
   public boolean hasLimit() {
     return findLimit(parsedQuery) != null;
   }
@@ -117,6 +145,24 @@
     return Integer.parseInt(findLimit(parsedQuery).getChild(0).getText());
   }
 
+  private Tree findWhere(final Tree node) {
+    if (node == null) {
+      return null;
+    }
+    switch (node.getType()) {
+      case QueryParser.WHERE:
+        return node;
+      default:
+        for (int i = 0; i < node.getChildCount(); i++) {
+          final Tree r = findLimit(node.getChild(i));
+          if (r != null) {
+            return r;
+          }
+        }
+        return null;
+    }
+  }
+
   private Tree findLimit(final Tree node) {
     if (node == null) {
       return null;
@@ -135,6 +181,24 @@
     }
   }
 
+  private Tree findOrderBy(final Tree node) {
+    if (node == null) {
+      return null;
+    }
+    switch (node.getType()) {
+      case QueryParser.ORDER:
+        return node;
+      default:
+        for (int i = 0; i < node.getChildCount(); i++) {
+          final Tree r = findOrderBy(node.getChild(i));
+          if (r != null) {
+            return r;
+          }
+        }
+        return null;
+    }
+  }
+
   public String getSelectSql(final SqlDialect dialect, final String tableAlias) {
     final StringBuilder buf = new StringBuilder();
     buf.append(model.getSelectSql(dialect, tableAlias));
@@ -181,11 +245,6 @@
         fmt.buf.append(node.getText());
         format(fmt, node.getChild(1));
         break;
-      case QueryParser.NE:
-        format(fmt, node.getChild(0));
-        fmt.buf.append("<>");
-        format(fmt, node.getChild(1));
-        break;
 
       case QueryParser.ID: {
         final ColumnModel col = ((QueryParser.Column) node).getField();
@@ -220,8 +279,7 @@
       case QueryParser.ORDER:
         fmt.buf.append(" ORDER BY ");
         for (int i = 0; i < node.getChildCount(); i++) {
-          final Tree sortOrder = node.getChild(i);
-          final Tree id = sortOrder.getChild(0);
+          final Tree id = node.getChild(i);
           if (i > 0) {
             fmt.buf.append(',');
           }
@@ -232,9 +290,6 @@
               fmt.buf.append(fmt.tableAlias);
               fmt.buf.append('.');
               fmt.buf.append(cItr.next().getColumnName());
-              if (sortOrder.getType() == QueryParser.DESC) {
-                fmt.buf.append(" DESC");
-              }
               if (cItr.hasNext()) {
                 fmt.buf.append(',');
               }
@@ -243,9 +298,6 @@
             fmt.buf.append(fmt.tableAlias);
             fmt.buf.append('.');
             fmt.buf.append(col.getColumnName());
-            if (sortOrder.getType() == QueryParser.DESC) {
-              fmt.buf.append(" DESC");
-            }
           }
         }
         break;
@@ -267,7 +319,7 @@
 
   @Override
   public String toString() {
-    return "Query[" + name + " " + query.value() + "]";
+    return "Query[" + name + " " + getParseTree().toStringTree() + "]";
   }
 
   private Tree expand(final Tree node) {
@@ -276,8 +328,7 @@
       case QueryParser.LE:
       case QueryParser.GT:
       case QueryParser.GE:
-      case QueryParser.EQ:
-      case QueryParser.NE: {
+      case QueryParser.EQ: {
         final Column qpc = (QueryParser.Column) node.getChild(0);
         final ColumnModel f = qpc.getField();
         if (f.isNested()) {
diff --git a/src/main/java/com/google/gwtorm/schema/RelationModel.java b/src/main/java/com/google/gwtorm/schema/RelationModel.java
index 85c440f..f8060ce 100644
--- a/src/main/java/com/google/gwtorm/schema/RelationModel.java
+++ b/src/main/java/com/google/gwtorm/schema/RelationModel.java
@@ -104,7 +104,13 @@
     }
   }
 
-  protected void addQuery(final QueryModel q) {
+  protected void addQuery(final QueryModel q) throws OrmException {
+    for (QueryModel e : queries) {
+      if (e.getName().equals(q.getName())) {
+        throw new OrmException("Duplicate query " + q.getName() //
+            + " in " + getAccessInterfaceName());
+      }
+    }
     queries.add(q);
   }
 
@@ -117,6 +123,10 @@
     return relationName;
   }
 
+  public int getRelationID() {
+    return relation.id();
+  }
+
   public Collection<ColumnModel> getDependentFields() {
     final ArrayList<ColumnModel> r = new ArrayList<ColumnModel>();
     for (final ColumnModel c : fieldsByFieldName.values()) {
@@ -342,6 +352,7 @@
     r.append("Relation[\n");
     r.append("  method: " + getMethodName() + "\n");
     r.append("  table:  " + getRelationName() + "\n");
+    r.append("  id:     " + getRelationID() + "\n");
     r.append("  access: " + getAccessInterfaceName() + "\n");
     r.append("  entity: " + getEntityTypeClassName() + "\n");
     r.append("]");
diff --git a/src/main/java/com/google/gwtorm/schema/Util.java b/src/main/java/com/google/gwtorm/schema/Util.java
index ff34f0a..1ca5e5d 100644
--- a/src/main/java/com/google/gwtorm/schema/Util.java
+++ b/src/main/java/com/google/gwtorm/schema/Util.java
@@ -67,7 +67,7 @@
     if (type == null || type == Void.TYPE) {
       return false;
     }
-    if (type.isPrimitive()) {
+    if (type.isPrimitive() || type.isEnum()) {
       return true;
     }
     if (type == String.class) {
diff --git a/src/main/java/com/google/gwtorm/schema/java/JavaColumnModel.java b/src/main/java/com/google/gwtorm/schema/java/JavaColumnModel.java
index 984da77..9aebf5b 100644
--- a/src/main/java/com/google/gwtorm/schema/java/JavaColumnModel.java
+++ b/src/main/java/com/google/gwtorm/schema/java/JavaColumnModel.java
@@ -22,16 +22,25 @@
 
 import java.lang.reflect.Field;
 import java.lang.reflect.Modifier;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
 import java.util.ArrayList;
 import java.util.List;
 
 
 public class JavaColumnModel extends ColumnModel {
   private final Field field;
+  private final String fieldName;
+  private final Class<?> primitiveType;
+  private final Type genericType;
 
-  public JavaColumnModel(final Field columnField) throws OrmException {
-    field = columnField;
-    initName(field.getName(), field.getAnnotation(Column.class));
+  public JavaColumnModel(final Field f) throws OrmException {
+    field = f;
+    fieldName = field.getName();
+    primitiveType = field.getType();
+    genericType = field.getGenericType();
+
+    initName(fieldName, field.getAnnotation(Column.class));
 
     if (Modifier.isPrivate(field.getModifiers())) {
       throw new OrmException("Field " + field.getName() + " of "
@@ -48,9 +57,24 @@
           + field.getDeclaringClass().getName() + " must have type 'int'");
     }
 
+    initNested();
+  }
+
+  public JavaColumnModel(Field f, final String fieldPath, final int columnId,
+      final Class<?> columnType) throws OrmException {
+    this.field = f;
+    this.fieldName = fieldPath;
+    this.columnName = fieldPath;
+    this.columnId = columnId;
+    this.primitiveType = columnType;
+    this.genericType = null;
+    initNested();
+  }
+
+  private void initNested() throws OrmException {
     if (isNested()) {
       final List<JavaColumnModel> col = new ArrayList<JavaColumnModel>();
-      Class<?> in = field.getType();
+      Class<?> in = primitiveType;
       while (in != null) {
         for (final Field f : in.getDeclaredFields()) {
           if (f.getAnnotation(Column.class) != null) {
@@ -65,24 +89,42 @@
 
   @Override
   public String getFieldName() {
-    return field.getName();
+    return fieldName;
   }
 
   @Override
   public Class<?> getPrimitiveType() {
-    return isPrimitive() ? field.getType() : null;
+    return isPrimitive() ? primitiveType : null;
+  }
+
+  @Override
+  public Type[] getArgumentTypes() {
+    if (genericType instanceof ParameterizedType) {
+      ParameterizedType pt = (ParameterizedType) genericType;
+      return pt.getActualTypeArguments();
+    }
+    return new Type[0];
   }
 
   @Override
   public String getNestedClassName() {
-    return isPrimitive() ? null : field.getType().getName();
+    return isPrimitive() ? null : primitiveType.getName();
+  }
+
+  @Override
+  public boolean isCollection() {
+    return java.util.Collection.class.isAssignableFrom(primitiveType);
   }
 
   public Class<?> getNestedClass() {
-    return field.getType();
+    return primitiveType;
+  }
+
+  public Field getField() {
+    return field;
   }
 
   private boolean isPrimitive() {
-    return Util.isSqlPrimitive(field.getType());
+    return Util.isSqlPrimitive(primitiveType);
   }
 }
diff --git a/src/main/java/com/google/gwtorm/schema/java/JavaSchemaModel.java b/src/main/java/com/google/gwtorm/schema/java/JavaSchemaModel.java
index 53eabc9..390ef3e 100644
--- a/src/main/java/com/google/gwtorm/schema/java/JavaSchemaModel.java
+++ b/src/main/java/com/google/gwtorm/schema/java/JavaSchemaModel.java
@@ -18,9 +18,11 @@
 import com.google.gwtorm.client.Relation;
 import com.google.gwtorm.client.Schema;
 import com.google.gwtorm.client.Sequence;
+import com.google.gwtorm.schema.RelationModel;
 import com.google.gwtorm.schema.SchemaModel;
 import com.google.gwtorm.schema.SequenceModel;
 
+import java.io.PrintWriter;
 import java.lang.reflect.Method;
 
 
@@ -55,6 +57,20 @@
     }
   }
 
+  public RelationModel getRelation(String name) {
+    for (RelationModel m : getRelations()) {
+      if (m.getMethodName().equals(name)) {
+        return m;
+      }
+    }
+    throw new IllegalArgumentException("No relation named " + name);
+  }
+
+  public void generateProto(PrintWriter out) {
+    ProtoFileGenerator pfg = new ProtoFileGenerator(schema.getSimpleName(), getRelations());
+    pfg.print(out);
+  }
+
   @Override
   public String getSchemaClassName() {
     return schema.getName();
diff --git a/src/main/java/com/google/gwtorm/schema/java/ProtoFileGenerator.java b/src/main/java/com/google/gwtorm/schema/java/ProtoFileGenerator.java
new file mode 100644
index 0000000..66f617e
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/schema/java/ProtoFileGenerator.java
@@ -0,0 +1,209 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.schema.java;
+
+import com.google.gwtorm.client.Column;
+import com.google.gwtorm.schema.ColumnModel;
+import com.google.gwtorm.schema.RelationModel;
+
+import org.objectweb.asm.Type;
+
+import java.io.PrintWriter;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.List;
+
+class ProtoFileGenerator {
+  private static final Comparator<ColumnModel> COLUMN_COMPARATOR =
+      new Comparator<ColumnModel>() {
+        @Override
+        public int compare(ColumnModel o1, ColumnModel o2) {
+          return o1.getColumnID() - o2.getColumnID();
+        }
+      };
+
+  private static final Comparator<RelationModel> RELATION_COMPARATOR =
+      new Comparator<RelationModel>() {
+        @Override
+        public int compare(RelationModel o1, RelationModel o2) {
+          return o1.getRelationID() - o2.getRelationID();
+        }
+      };
+
+  private final Collection<RelationModel> rels;
+  private final String schemaName;
+  private final HashSet<String> seen;
+  private final HashSet<String> collisions;
+
+  ProtoFileGenerator(String schemaName, Collection<RelationModel> relations) {
+    this.schemaName = schemaName;
+    this.rels = relations;
+    this.seen = new HashSet<String>();
+    this.collisions = new HashSet<String>();
+  }
+
+  void print(PrintWriter out) {
+    seen.clear();
+    collisions.clear();
+
+    for (RelationModel r : rels) {
+      for (ColumnModel c : r.getColumns()) {
+        if (c.isNested()) {
+          String type = getShortClassName(c);
+          if (seen.contains(type)) {
+            collisions.add(type);
+          } else {
+            seen.add(type);
+          }
+        }
+      }
+    }
+
+    seen.clear();
+    for (RelationModel r : rels) {
+      generateMessage(r, out);
+    }
+
+    out.print("message " + schemaName + " {\n");
+
+    for (RelationModel r : sortRelations(rels)) {
+      out.print("\toptional " + getMessageName(r) + " "
+          + r.getRelationName().toLowerCase() + " = " + r.getRelationID()
+          + ";\n");
+    }
+
+    out.print("}\n");
+  }
+
+  private void generateMessage(RelationModel rel, PrintWriter out) {
+    List<ColumnModel> cols = sortColumns(rel.getFields());
+    for (ColumnModel c : cols) {
+      generateMessage(c, out);
+    }
+
+    out.print("message " + getMessageName(rel) + " {\n");
+    for (ColumnModel c : cols) {
+      out.append("\toptional " + getType(c) + " " + getName(c) + " = "
+          + c.getColumnID() + ";\n");
+    }
+    out.print("}\n\n");
+  }
+
+  private void generateMessage(ColumnModel parent, PrintWriter out) {
+    // Handle base cases
+    if (!parent.isNested()) {
+      return;
+    } else if (seen.contains(parent.getNestedClassName())) {
+      return;
+    }
+
+    List<ColumnModel> children = sortColumns(parent.getNestedColumns());
+    for (ColumnModel child : children) {
+      generateMessage(child, out);
+    }
+
+    out.print("message " + getType(parent) + " {\n");
+    for (ColumnModel child : children) {
+      out.append("\toptional " + getType(child) + " " + getName(child) + " = "
+          + child.getColumnID() + ";\n");
+    }
+    out.print("}\n\n");
+
+    seen.add(parent.getNestedClassName());
+  }
+
+  private String getType(ColumnModel cm) {
+    if (cm.isNested()) {
+      String type = getShortClassName(cm);
+      if (collisions.contains(type)) {
+        return cm.getNestedClassName().replace('.', '_').replace('$', '_');
+      } else {
+        return type;
+      }
+    } else {
+      return toProtoType(cm.getPrimitiveType());
+    }
+  }
+
+  private static String getName(ColumnModel cm) {
+    if (cm.getColumnName().equals(Column.NONE)) {
+      return cm.getFieldName();
+    } else {
+      return cm.getColumnName();
+    }
+  }
+
+  private static String getShortClassName(ColumnModel cm) {
+    String tmp = cm.getNestedClassName();
+    return tmp.substring(tmp.lastIndexOf('.') + 1).replace('$', '_');
+  }
+
+  private static String getMessageName(RelationModel r) {
+    String typeName = r.getEntityTypeClassName();
+    return typeName.substring(typeName.lastIndexOf('.') + 1);
+  }
+
+  private static List<ColumnModel> sortColumns(Collection<ColumnModel> cols) {
+    ArrayList<ColumnModel> list = new ArrayList<ColumnModel>(cols);
+    Collections.sort(list, COLUMN_COMPARATOR);
+    return list;
+  }
+
+  private static List<RelationModel> sortRelations(
+      Collection<RelationModel> rels) {
+    ArrayList<RelationModel> list = new ArrayList<RelationModel>(rels);
+    Collections.sort(list, RELATION_COMPARATOR);
+    return list;
+  }
+
+  private static String toProtoType(Class<?> clazz) {
+    switch (Type.getType(clazz).getSort()) {
+      case Type.BOOLEAN:
+        return "bool";
+      case Type.CHAR:
+        return "uint32";
+      case Type.BYTE:
+      case Type.SHORT:
+      case Type.INT:
+        return "sint32";
+      case Type.FLOAT:
+        return "float";
+      case Type.DOUBLE:
+        return "double";
+      case Type.LONG:
+        return "sint64";
+      case Type.ARRAY:
+      case Type.OBJECT: {
+        if (clazz == byte[].class) {
+          return "bytes";
+        } else if (clazz == String.class) {
+          return "string";
+        } else if (clazz == java.sql.Timestamp.class) {
+          return "fixed64";
+        } else {
+          throw new RuntimeException("Type " + clazz
+              + " not supported on protobuf!");
+        }
+      }
+
+      default:
+        throw new RuntimeException("Type " + clazz
+            + " not supported on protobuf!");
+    }
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/schema/sql/DialectH2.java b/src/main/java/com/google/gwtorm/schema/sql/DialectH2.java
index 5d0fa59..5277459 100644
--- a/src/main/java/com/google/gwtorm/schema/sql/DialectH2.java
+++ b/src/main/java/com/google/gwtorm/schema/sql/DialectH2.java
@@ -47,7 +47,7 @@
   }
 
   @Override
-  public String getNextSequenceValueSql(final String seqname) {
+  protected String getNextSequenceValueSql(final String seqname) {
     return "SELECT NEXT VALUE FOR " + seqname;
   }
 
diff --git a/src/main/java/com/google/gwtorm/schema/sql/DialectMySQL.java b/src/main/java/com/google/gwtorm/schema/sql/DialectMySQL.java
index fed1c61..86afa6a 100644
--- a/src/main/java/com/google/gwtorm/schema/sql/DialectMySQL.java
+++ b/src/main/java/com/google/gwtorm/schema/sql/DialectMySQL.java
@@ -96,7 +96,7 @@
   }
 
   @Override
-  public String getNextSequenceValueSql(final String seqname) {
+  protected String getNextSequenceValueSql(final String seqname) {
     return seqname;
   }
 
diff --git a/src/main/java/com/google/gwtorm/schema/sql/DialectPostgreSQL.java b/src/main/java/com/google/gwtorm/schema/sql/DialectPostgreSQL.java
index 32ea333..5a469ec 100644
--- a/src/main/java/com/google/gwtorm/schema/sql/DialectPostgreSQL.java
+++ b/src/main/java/com/google/gwtorm/schema/sql/DialectPostgreSQL.java
@@ -67,7 +67,7 @@
   }
 
   @Override
-  public String getNextSequenceValueSql(final String seqname) {
+  protected String getNextSequenceValueSql(final String seqname) {
     return "SELECT nextval('" + seqname + "')";
   }
 
diff --git a/src/main/java/com/google/gwtorm/schema/sql/SqlBooleanTypeInfo.java b/src/main/java/com/google/gwtorm/schema/sql/SqlBooleanTypeInfo.java
index afea6b3..57f8fe8 100644
--- a/src/main/java/com/google/gwtorm/schema/sql/SqlBooleanTypeInfo.java
+++ b/src/main/java/com/google/gwtorm/schema/sql/SqlBooleanTypeInfo.java
@@ -14,8 +14,8 @@
 
 package com.google.gwtorm.schema.sql;
 
-import com.google.gwtorm.jdbc.gen.CodeGenSupport;
 import com.google.gwtorm.schema.ColumnModel;
+import com.google.gwtorm.server.CodeGenSupport;
 
 import org.objectweb.asm.Label;
 import org.objectweb.asm.Opcodes;
diff --git a/src/main/java/com/google/gwtorm/schema/sql/SqlByteArrayTypeInfo.java b/src/main/java/com/google/gwtorm/schema/sql/SqlByteArrayTypeInfo.java
index 12ed53a..eb0f840 100644
--- a/src/main/java/com/google/gwtorm/schema/sql/SqlByteArrayTypeInfo.java
+++ b/src/main/java/com/google/gwtorm/schema/sql/SqlByteArrayTypeInfo.java
@@ -14,8 +14,8 @@
 
 package com.google.gwtorm.schema.sql;
 
-import com.google.gwtorm.jdbc.gen.CodeGenSupport;
 import com.google.gwtorm.schema.ColumnModel;
+import com.google.gwtorm.server.CodeGenSupport;
 
 import org.objectweb.asm.Opcodes;
 import org.objectweb.asm.Type;
diff --git a/src/main/java/com/google/gwtorm/schema/sql/SqlCharTypeInfo.java b/src/main/java/com/google/gwtorm/schema/sql/SqlCharTypeInfo.java
index 890600f..510993a 100644
--- a/src/main/java/com/google/gwtorm/schema/sql/SqlCharTypeInfo.java
+++ b/src/main/java/com/google/gwtorm/schema/sql/SqlCharTypeInfo.java
@@ -14,8 +14,8 @@
 
 package com.google.gwtorm.schema.sql;
 
-import com.google.gwtorm.jdbc.gen.CodeGenSupport;
 import com.google.gwtorm.schema.ColumnModel;
+import com.google.gwtorm.server.CodeGenSupport;
 
 import org.objectweb.asm.Opcodes;
 import org.objectweb.asm.Type;
diff --git a/src/main/java/com/google/gwtorm/schema/sql/SqlDialect.java b/src/main/java/com/google/gwtorm/schema/sql/SqlDialect.java
index e1ea7c3..cb1e77d 100644
--- a/src/main/java/com/google/gwtorm/schema/sql/SqlDialect.java
+++ b/src/main/java/com/google/gwtorm/schema/sql/SqlDialect.java
@@ -150,8 +150,9 @@
     return new OrmException(op + " failure on " + entity, err);
   }
 
-  public long nextLong(final Connection conn, final String query)
+  public long nextLong(final Connection conn, final String poolName)
       throws OrmException {
+    final String query = getNextSequenceValueSql(poolName);
     try {
       final Statement st = conn.createStatement();
       try {
@@ -325,5 +326,5 @@
   public abstract void renameColumn(StatementExecutor e, String tableName,
       String fromColumn, ColumnModel col) throws OrmException;
 
-  public abstract String getNextSequenceValueSql(String seqname);
+  protected abstract String getNextSequenceValueSql(String seqname);
 }
diff --git a/src/main/java/com/google/gwtorm/schema/sql/SqlStringTypeInfo.java b/src/main/java/com/google/gwtorm/schema/sql/SqlStringTypeInfo.java
index 2ca7522..2c32f1e 100644
--- a/src/main/java/com/google/gwtorm/schema/sql/SqlStringTypeInfo.java
+++ b/src/main/java/com/google/gwtorm/schema/sql/SqlStringTypeInfo.java
@@ -15,8 +15,8 @@
 package com.google.gwtorm.schema.sql;
 
 import com.google.gwtorm.client.Column;
-import com.google.gwtorm.jdbc.gen.CodeGenSupport;
 import com.google.gwtorm.schema.ColumnModel;
+import com.google.gwtorm.server.CodeGenSupport;
 
 import org.objectweb.asm.Opcodes;
 import org.objectweb.asm.Type;
diff --git a/src/main/java/com/google/gwtorm/schema/sql/SqlTypeInfo.java b/src/main/java/com/google/gwtorm/schema/sql/SqlTypeInfo.java
index ba9064e..f238541 100644
--- a/src/main/java/com/google/gwtorm/schema/sql/SqlTypeInfo.java
+++ b/src/main/java/com/google/gwtorm/schema/sql/SqlTypeInfo.java
@@ -14,8 +14,8 @@
 
 package com.google.gwtorm.schema.sql;
 
-import com.google.gwtorm.jdbc.gen.CodeGenSupport;
 import com.google.gwtorm.schema.ColumnModel;
+import com.google.gwtorm.server.CodeGenSupport;
 
 import org.objectweb.asm.Opcodes;
 import org.objectweb.asm.Type;
diff --git a/src/main/java/com/google/gwtorm/server/AbstractSchema.java b/src/main/java/com/google/gwtorm/server/AbstractSchema.java
new file mode 100644
index 0000000..11a934c
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/server/AbstractSchema.java
@@ -0,0 +1,36 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.server;
+
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.Schema;
+
+/** Base implementation any generated schema must implement. */
+public abstract class AbstractSchema implements Schema {
+  /**
+   * Obtain the next unique value from a pool of available numbers.
+   * <p>
+   * Frequently the next number will be just an increment of a global counter,
+   * but may be spread across multiple counter ranges to increase concurrency.
+   *
+   * @param poolName unique name of the counter within the schema. The
+   *        underlying storage system should use this to identify the counter
+   *        pool to obtain the next value from.
+   * @return a new unique value.
+   * @throws OrmException a value cannot be reserved for the caller, or the pool
+   *         has been exhausted and no new values are available.
+   */
+  protected abstract long nextLong(String poolName) throws OrmException;
+}
diff --git a/src/main/java/com/google/gwtorm/jdbc/gen/CodeGenSupport.java b/src/main/java/com/google/gwtorm/server/CodeGenSupport.java
similarity index 98%
rename from src/main/java/com/google/gwtorm/jdbc/gen/CodeGenSupport.java
rename to src/main/java/com/google/gwtorm/server/CodeGenSupport.java
index 2d78ccc..71756ee 100644
--- a/src/main/java/com/google/gwtorm/jdbc/gen/CodeGenSupport.java
+++ b/src/main/java/com/google/gwtorm/server/CodeGenSupport.java
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package com.google.gwtorm.jdbc.gen;
+package com.google.gwtorm.server;
 
 import com.google.gwtorm.schema.ColumnModel;
 
@@ -197,7 +197,7 @@
     return Type.getObjectType(n.replace('.', '/'));
   }
 
-  static Type toType(final ColumnModel c) {
+  public static Type toType(final ColumnModel c) {
     if (c.isSqlPrimitive()) {
       return Type.getType(c.getPrimitiveType());
     }
diff --git a/src/main/java/com/google/gwtorm/jdbc/gen/GeneratedClassLoader.java b/src/main/java/com/google/gwtorm/server/GeneratedClassLoader.java
similarity index 98%
rename from src/main/java/com/google/gwtorm/jdbc/gen/GeneratedClassLoader.java
rename to src/main/java/com/google/gwtorm/server/GeneratedClassLoader.java
index d56d9e0..5bfdc02 100644
--- a/src/main/java/com/google/gwtorm/jdbc/gen/GeneratedClassLoader.java
+++ b/src/main/java/com/google/gwtorm/server/GeneratedClassLoader.java
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package com.google.gwtorm.jdbc.gen;
+package com.google.gwtorm.server;
 
 import com.google.gwtorm.client.OrmException;
 
diff --git a/src/main/java/com/google/gwtorm/server/SchemaConstructorGen.java b/src/main/java/com/google/gwtorm/server/SchemaConstructorGen.java
new file mode 100644
index 0000000..1761348
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/server/SchemaConstructorGen.java
@@ -0,0 +1,143 @@
+// Copyright 2008 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.server;
+
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.Schema;
+import com.google.gwtorm.client.SchemaFactory;
+import com.google.gwtorm.schema.Util;
+
+import org.objectweb.asm.ClassWriter;
+import org.objectweb.asm.MethodVisitor;
+import org.objectweb.asm.Opcodes;
+import org.objectweb.asm.Type;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+
+/** Generates a factory to efficiently create new Schema instances. */
+public class SchemaConstructorGen<T extends Schema> implements Opcodes {
+  private static final String CTX = "schemaArg";
+
+  private final GeneratedClassLoader classLoader;
+  private final Class<T> schemaImpl;
+  private final Object schemaArg;
+  private ClassWriter cw;
+  private String implClassName;
+  private String implTypeName;
+
+  public SchemaConstructorGen(final GeneratedClassLoader loader,
+      final Class<T> c, final Object f) {
+    classLoader = loader;
+    schemaImpl = c;
+    schemaArg = f;
+  }
+
+  public void defineClass() throws OrmException {
+    init();
+    declareFactoryField();
+    implementConstructor();
+    implementNewInstance();
+    cw.visitEnd();
+    classLoader.defineClass(implClassName, cw.toByteArray());
+  }
+
+
+  public SchemaFactory<T> create() throws OrmException {
+    defineClass();
+    try {
+      final Class<?> c = Class.forName(implClassName, true, classLoader);
+      final Constructor<?> n = c.getDeclaredConstructors()[0];
+      return cast(n.newInstance(new Object[] {schemaArg}));
+    } catch (InstantiationException e) {
+      throw new OrmException("Cannot create schema factory", e);
+    } catch (IllegalAccessException e) {
+      throw new OrmException("Cannot create schema factory", e);
+    } catch (ClassNotFoundException e) {
+      throw new OrmException("Cannot create schema factory", e);
+    } catch (IllegalArgumentException e) {
+      throw new OrmException("Cannot create schema factory", e);
+    } catch (InvocationTargetException e) {
+      throw new OrmException("Cannot create schema factory", e);
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  private SchemaFactory<T> cast(final Object newInstance) {
+    return (SchemaFactory<T>) newInstance;
+  }
+
+  private void init() {
+    implClassName =
+        schemaImpl.getName() + "_Factory_" + Util.createRandomName();
+    implTypeName = implClassName.replace('.', '/');
+
+    cw = new ClassWriter(ClassWriter.COMPUTE_MAXS);
+    cw.visit(V1_3, ACC_PUBLIC | ACC_FINAL | ACC_SUPER, implTypeName, null, Type
+        .getInternalName(Object.class), new String[] {Type
+        .getInternalName(SchemaFactory.class)});
+  }
+
+  private void declareFactoryField() {
+    cw.visitField(ACC_PRIVATE | ACC_FINAL, CTX,
+        Type.getType(schemaArg.getClass()).getDescriptor(), null, null)
+        .visitEnd();
+  }
+
+  private void implementConstructor() {
+    final Type ft = Type.getType(schemaArg.getClass());
+    final String consName = "<init>";
+    final String consDesc =
+        Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {ft});
+    final MethodVisitor mv;
+    mv = cw.visitMethod(ACC_PUBLIC, consName, consDesc, null, null);
+    mv.visitCode();
+
+    mv.visitVarInsn(ALOAD, 0);
+    mv.visitMethodInsn(INVOKESPECIAL, Type.getInternalName(Object.class),
+        consName, Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {}));
+
+    mv.visitVarInsn(ALOAD, 0);
+    mv.visitVarInsn(ALOAD, 1);
+    mv.visitFieldInsn(PUTFIELD, implTypeName, CTX, ft.getDescriptor());
+
+    mv.visitInsn(RETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementNewInstance() {
+    final Type ft = Type.getType(schemaArg.getClass());
+    final String typeName = Type.getType(schemaImpl).getInternalName();
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, "open", Type
+            .getMethodDescriptor(Type.getType(Schema.class), new Type[] {}),
+            null, null);
+    mv.visitCode();
+
+    Constructor<?> c = schemaImpl.getDeclaredConstructors()[0];
+    Type argType = Type.getType(c.getParameterTypes()[0]);
+
+    mv.visitTypeInsn(NEW, typeName);
+    mv.visitInsn(DUP);
+    mv.visitVarInsn(ALOAD, 0);
+    mv.visitFieldInsn(GETFIELD, implTypeName, CTX, ft.getDescriptor());
+    mv.visitMethodInsn(INVOKESPECIAL, typeName, "<init>", Type
+        .getMethodDescriptor(Type.VOID_TYPE, new Type[] {argType}));
+    mv.visitInsn(ARETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+}
diff --git a/src/main/java/com/google/gwtorm/server/SchemaGen.java b/src/main/java/com/google/gwtorm/server/SchemaGen.java
new file mode 100644
index 0000000..24854bf
--- /dev/null
+++ b/src/main/java/com/google/gwtorm/server/SchemaGen.java
@@ -0,0 +1,268 @@
+// Copyright 2008 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.server;
+
+import com.google.gwtorm.client.Access;
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.client.Schema;
+import com.google.gwtorm.schema.RelationModel;
+import com.google.gwtorm.schema.SchemaModel;
+import com.google.gwtorm.schema.SequenceModel;
+import com.google.gwtorm.schema.Util;
+
+import org.objectweb.asm.ClassWriter;
+import org.objectweb.asm.MethodVisitor;
+import org.objectweb.asm.Opcodes;
+import org.objectweb.asm.Type;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+
+/** Generates a concrete implementation of a {@link Schema} extension. */
+public class SchemaGen<S extends AbstractSchema> implements Opcodes {
+  public interface AccessGenerator {
+    Class<?> create(GeneratedClassLoader loader, RelationModel rm)
+        throws OrmException;
+  }
+
+  private final GeneratedClassLoader classLoader;
+  private final SchemaModel schema;
+  private final Class<?> databaseClass;
+  private final Class<S> schemaSuperClass;
+  private final AccessGenerator accessGen;
+  private List<RelationGen> relations;
+  private ClassWriter cw;
+  private String implClassName;
+  private String implTypeName;
+
+  public SchemaGen(final GeneratedClassLoader loader,
+      final SchemaModel schemaModel, final Class<?> databaseType,
+      final Class<S> superType, final AccessGenerator ag) {
+    classLoader = loader;
+    schema = schemaModel;
+    databaseClass = databaseType;
+    schemaSuperClass = superType;
+    accessGen = ag;
+  }
+
+  public Class<Schema> create() throws OrmException {
+    defineRelationClasses();
+
+    init();
+    implementRelationFields();
+    implementConstructor();
+    implementSequenceMethods();
+    implementRelationMethods();
+    implementAllRelationsMethod();
+
+    cw.visitEnd();
+    classLoader.defineClass(getImplClassName(), cw.toByteArray());
+    return loadClass();
+  }
+
+  @SuppressWarnings("unchecked")
+  private Class<Schema> loadClass() throws OrmException {
+    try {
+      final Class<?> c = Class.forName(getImplClassName(), false, classLoader);
+      return (Class<Schema>) c;
+    } catch (ClassNotFoundException err) {
+      throw new OrmException("Cannot load generated class", err);
+    }
+  }
+
+  String getSchemaClassName() {
+    return schema.getSchemaClassName();
+  }
+
+  String getImplClassName() {
+    return implClassName;
+  }
+
+  String getImplTypeName() {
+    return implTypeName;
+  }
+
+  private void defineRelationClasses() throws OrmException {
+    relations = new ArrayList<RelationGen>();
+    for (final RelationModel rel : schema.getRelations()) {
+      final Class<?> a = accessGen.create(classLoader, rel);
+      relations.add(new RelationGen(rel, a));
+    }
+
+    Collections.sort(relations, new Comparator<RelationGen>() {
+      @Override
+      public int compare(RelationGen a, RelationGen b) {
+        int cmp = a.model.getRelationID() - b.model.getRelationID();
+        if (cmp == 0) {
+          cmp = a.model.getRelationName().compareTo(b.model.getRelationName());
+        }
+        return cmp;
+      }
+    });
+  }
+
+  private void init() {
+    implClassName = getSchemaClassName() + "_Schema_" + Util.createRandomName();
+    implTypeName = implClassName.replace('.', '/');
+
+    cw = new ClassWriter(ClassWriter.COMPUTE_MAXS);
+    cw.visit(V1_3, ACC_PUBLIC | ACC_FINAL | ACC_SUPER, implTypeName, null, Type
+        .getInternalName(schemaSuperClass), new String[] {getSchemaClassName()
+        .replace('.', '/')});
+  }
+
+  private void implementRelationFields() {
+    for (final RelationGen info : relations) {
+      info.implementField();
+    }
+  }
+
+  private void implementConstructor() {
+    final String consName = "<init>";
+    final Type superType = Type.getType(schemaSuperClass);
+    final Type dbType = Type.getType(databaseClass);
+
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC, consName, Type.getMethodDescriptor(
+            Type.VOID_TYPE, new Type[] {dbType}), null, null);
+    mv.visitCode();
+
+    mv.visitVarInsn(ALOAD, 0);
+    mv.visitVarInsn(ALOAD, 1);
+    mv.visitMethodInsn(INVOKESPECIAL, superType.getInternalName(), consName,
+        Type.getMethodDescriptor(Type.VOID_TYPE, new Type[] {Type
+            .getType(schemaSuperClass.getDeclaredConstructors()[0]
+                .getParameterTypes()[0])}));
+
+    for (final RelationGen info : relations) {
+      mv.visitVarInsn(ALOAD, 0);
+      mv.visitTypeInsn(NEW, info.accessType.getInternalName());
+      mv.visitInsn(DUP);
+      mv.visitVarInsn(ALOAD, 0);
+      mv.visitMethodInsn(INVOKESPECIAL, info.accessType.getInternalName(),
+          consName, Type.getMethodDescriptor(Type.VOID_TYPE,
+              new Type[] {superType}));
+      mv.visitFieldInsn(PUTFIELD, implTypeName, info
+          .getAccessInstanceFieldName(), info.accessType.getDescriptor());
+    }
+
+    mv.visitInsn(RETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private void implementSequenceMethods() {
+    for (final SequenceModel seq : schema.getSequences()) {
+      final Type retType = Type.getType(seq.getResultType());
+      final MethodVisitor mv =
+          cw
+              .visitMethod(ACC_PUBLIC, seq.getMethodName(), Type
+                  .getMethodDescriptor(retType, new Type[] {}), null,
+                  new String[] {Type.getType(OrmException.class)
+                      .getInternalName()});
+      mv.visitCode();
+
+      mv.visitVarInsn(ALOAD, 0);
+      mv.visitLdcInsn(seq.getSequenceName());
+      mv.visitMethodInsn(INVOKEVIRTUAL, Type.getInternalName(schemaSuperClass),
+          "nextLong", Type.getMethodDescriptor(Type.getType(Long.TYPE),
+              new Type[] {Type.getType(String.class)}));
+      if (retType.getSize() == 1) {
+        mv.visitInsn(L2I);
+        mv.visitInsn(IRETURN);
+      } else {
+        mv.visitInsn(LRETURN);
+      }
+      mv.visitMaxs(-1, -1);
+      mv.visitEnd();
+    }
+  }
+
+  private void implementRelationMethods() {
+    for (final RelationGen info : relations) {
+      info.implementMethod();
+    }
+  }
+
+  private void implementAllRelationsMethod() {
+    final MethodVisitor mv =
+        cw.visitMethod(ACC_PUBLIC | ACC_FINAL, "allRelations", Type
+            .getMethodDescriptor(Type.getType(Access[].class), new Type[] {}),
+            null, null);
+    mv.visitCode();
+
+    final int r = 1;
+    CodeGenSupport cgs = new CodeGenSupport(mv);
+    cgs.push(relations.size());
+    mv.visitTypeInsn(ANEWARRAY, Type.getType(Access.class).getInternalName());
+    mv.visitVarInsn(ASTORE, r);
+
+    int index = 0;
+    for (RelationGen info : relations) {
+      mv.visitVarInsn(ALOAD, r);
+      cgs.push(index++);
+
+      mv.visitVarInsn(ALOAD, 0);
+      mv.visitMethodInsn(INVOKEVIRTUAL, getImplTypeName(), info.model
+          .getMethodName(), info.getDescriptor());
+
+      mv.visitInsn(AASTORE);
+    }
+
+    mv.visitVarInsn(ALOAD, r);
+    mv.visitInsn(ARETURN);
+    mv.visitMaxs(-1, -1);
+    mv.visitEnd();
+  }
+
+  private class RelationGen {
+    final RelationModel model;
+    final Type accessType;
+
+    RelationGen(final RelationModel model, final Class<?> accessClass) {
+      this.model = model;
+      this.accessType = Type.getType(accessClass);
+    }
+
+    void implementField() {
+      cw.visitField(ACC_PRIVATE | ACC_FINAL, getAccessInstanceFieldName(),
+          accessType.getDescriptor(), null, null).visitEnd();
+    }
+
+    String getAccessInstanceFieldName() {
+      return "access_" + model.getMethodName();
+    }
+
+    void implementMethod() {
+      final MethodVisitor mv =
+          cw.visitMethod(ACC_PUBLIC | ACC_FINAL, model.getMethodName(),
+              getDescriptor(), null, null);
+      mv.visitCode();
+      mv.visitVarInsn(ALOAD, 0);
+      mv.visitFieldInsn(GETFIELD, implTypeName, getAccessInstanceFieldName(),
+          accessType.getDescriptor());
+      mv.visitInsn(ARETURN);
+      mv.visitMaxs(-1, -1);
+      mv.visitEnd();
+    }
+
+    String getDescriptor() {
+      return Type.getMethodDescriptor(Type.getObjectType(model
+          .getAccessInterfaceName().replace('.', '/')), new Type[] {});
+    }
+  }
+}
diff --git a/src/test/java/com/google/gwtorm/data/PersonAccess.java b/src/test/java/com/google/gwtorm/data/PersonAccess.java
index 7df107e..594d588 100644
--- a/src/test/java/com/google/gwtorm/data/PersonAccess.java
+++ b/src/test/java/com/google/gwtorm/data/PersonAccess.java
@@ -30,10 +30,6 @@
   @Query("WHERE age > ? ORDER BY age")
   ResultSet<TestPerson> olderThan(int age) throws OrmException;
 
-  @Query("WHERE name != ? AND age > ? ORDER BY name DESC")
-  ResultSet<TestPerson> notPerson(TestPerson.Key key, int age)
-      throws OrmException;
-
   @Query("WHERE name = 'bob' LIMIT ?")
   ResultSet<TestPerson> firstNBob(int n) throws OrmException;
 
diff --git a/src/test/java/com/google/gwtorm/data/PhoneBookDb.java b/src/test/java/com/google/gwtorm/data/PhoneBookDb.java
index 15439b8..cf6225c 100644
--- a/src/test/java/com/google/gwtorm/data/PhoneBookDb.java
+++ b/src/test/java/com/google/gwtorm/data/PhoneBookDb.java
@@ -19,10 +19,10 @@
 import com.google.gwtorm.client.Sequence;
 
 public interface PhoneBookDb extends Schema {
-  @Relation
+  @Relation(id = 1)
   PersonAccess people();
 
-  @Relation
+  @Relation(id = 2)
   AddressAccess addresses();
 
   @Sequence
diff --git a/src/test/java/com/google/gwtorm/data/PhoneBookDb2.java b/src/test/java/com/google/gwtorm/data/PhoneBookDb2.java
index 908dc92..9b19bc8 100644
--- a/src/test/java/com/google/gwtorm/data/PhoneBookDb2.java
+++ b/src/test/java/com/google/gwtorm/data/PhoneBookDb2.java
@@ -18,6 +18,6 @@
 import com.google.gwtorm.client.Schema;
 
 public interface PhoneBookDb2 extends Schema {
-  @Relation
+  @Relation(id = 1)
   PersonAccess2 people();
 }
diff --git a/src/test/java/com/google/gwtorm/data/TestPerson.java b/src/test/java/com/google/gwtorm/data/TestPerson.java
index 3f6f3f9..bafeaf9 100644
--- a/src/test/java/com/google/gwtorm/data/TestPerson.java
+++ b/src/test/java/com/google/gwtorm/data/TestPerson.java
@@ -81,4 +81,18 @@
   public void unregister() {
     registered = false;
   }
+
+  @Override
+  public int hashCode() {
+    return name.hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other instanceof TestPerson) {
+      TestPerson p = (TestPerson) other;
+      return name.equals(p.name) && age == p.age && registered == p.registered;
+    }
+    return false;
+  }
 }
diff --git a/src/test/java/com/google/gwtorm/nosql/IndexFunctionTest.java b/src/test/java/com/google/gwtorm/nosql/IndexFunctionTest.java
new file mode 100644
index 0000000..862456a
--- /dev/null
+++ b/src/test/java/com/google/gwtorm/nosql/IndexFunctionTest.java
@@ -0,0 +1,200 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql;
+
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.data.PhoneBookDb;
+import com.google.gwtorm.data.TestPerson;
+import com.google.gwtorm.schema.QueryModel;
+import com.google.gwtorm.schema.RelationModel;
+import com.google.gwtorm.schema.java.JavaSchemaModel;
+import com.google.gwtorm.server.GeneratedClassLoader;
+
+import junit.framework.TestCase;
+
+@SuppressWarnings("unchecked")
+public class IndexFunctionTest extends TestCase {
+  private JavaSchemaModel schema;
+  private RelationModel people;
+
+  @Override
+  protected void setUp() throws Exception {
+    super.setUp();
+    schema = new JavaSchemaModel(PhoneBookDb.class);
+    people = schema.getRelation("people");
+  }
+
+  public void testPersonByName() throws Exception {
+    IndexFunction<TestPerson> idx = index("testMyQuery", "WHERE name=?");
+    assertEquals("testMyQuery", idx.getName());
+
+    IndexKeyBuilder b;
+    TestPerson p;
+
+    b = new IndexKeyBuilder();
+    p = new TestPerson(new TestPerson.Key("bob"), 12);
+    assertTrue(idx.includes(p));
+    idx.encode(b, p);
+    assertEquals(new byte[] {'b', 'o', 'b'}, b);
+  }
+
+  public void testPersonByNameAge() throws Exception {
+    IndexFunction<TestPerson> idx = index("nameAge", "WHERE name=? AND age=?");
+    assertEquals("nameAge", idx.getName());
+
+    IndexKeyBuilder b;
+    TestPerson p;
+
+    b = new IndexKeyBuilder();
+    p = new TestPerson(new TestPerson.Key("hm"), 42);
+    assertTrue(idx.includes(p));
+    idx.encode(b, p);
+    assertEquals(new byte[] {'h', 'm', 0x00, 0x01, 0x01, 42}, b);
+
+    p = new TestPerson(new TestPerson.Key(null), 0);
+    assertFalse(idx.includes(p));
+
+    b = new IndexKeyBuilder();
+    assertFalse(idx.includes(p));
+  }
+
+  public void testPersonByNameAge_OrderByName() throws Exception {
+    IndexFunction<TestPerson> idx =
+        index("nameAge", "WHERE name=? AND age=? ORDER BY name");
+    assertEquals("nameAge", idx.getName());
+
+    IndexKeyBuilder b;
+    TestPerson p;
+
+    b = new IndexKeyBuilder();
+    p = new TestPerson(new TestPerson.Key("qy"), 42);
+    assertTrue(idx.includes(p));
+    idx.encode(b, p);
+    assertEquals(new byte[] {'q', 'y', 0x00, 0x01, 0x01, 42}, b);
+  }
+
+  public void testPersonByNameAge_OrderByRegistered() throws Exception {
+    IndexFunction<TestPerson> idx =
+        index("nameAge", "WHERE name=? AND age=? ORDER BY registered");
+    assertEquals("nameAge", idx.getName());
+
+    IndexKeyBuilder b;
+    TestPerson p;
+
+    b = new IndexKeyBuilder();
+    p = new TestPerson(new TestPerson.Key("q"), 42);
+    p.register();
+    assertTrue(idx.includes(p));
+    idx.encode(b, p);
+    assertEquals(new byte[] {'q', 0x00, 0x01, // name
+        0x01, 42, 0x00, 0x01, // age
+        0x01, 0x01 // registered
+        }, b);
+  }
+
+  public void testPersonByNameRange_OrderByName() throws Exception {
+    IndexFunction<TestPerson> idx =
+        index("nameSuggest", "WHERE name >= ? AND name <= ? ORDER BY name");
+    assertEquals("nameSuggest", idx.getName());
+
+    IndexKeyBuilder b;
+    TestPerson p;
+
+    b = new IndexKeyBuilder();
+    p = new TestPerson(new TestPerson.Key("q"), 42);
+    assertTrue(idx.includes(p));
+    idx.encode(b, p);
+    assertEquals(new byte[] {'q'}, b);
+  }
+
+  public void testOnlyRegistered() throws Exception {
+    IndexFunction<TestPerson> idx =
+        index("isregistered", "WHERE registered = true ORDER BY name");
+    assertEquals("isregistered", idx.getName());
+
+    IndexKeyBuilder b;
+    TestPerson p;
+
+    b = new IndexKeyBuilder();
+    p = new TestPerson(new TestPerson.Key("q"), 42);
+    assertFalse(idx.includes(p));
+    p.register();
+    assertTrue(idx.includes(p));
+
+    idx.encode(b, p);
+    assertEquals(new byte[] {'q'}, b);
+  }
+
+  public void testOnlyAge42() throws Exception {
+    IndexFunction<TestPerson> idx =
+        index("isOldEnough", "WHERE age = 42 ORDER BY name");
+    assertEquals("isOldEnough", idx.getName());
+
+    IndexKeyBuilder b;
+    TestPerson p;
+
+    b = new IndexKeyBuilder();
+    p = new TestPerson(new TestPerson.Key("q"), 32);
+    assertFalse(idx.includes(p));
+
+    p = new TestPerson(new TestPerson.Key("q"), 42);
+    assertTrue(idx.includes(p));
+
+    idx.encode(b, p);
+    assertEquals(new byte[] {'q'}, b);
+  }
+
+  public void testOnlyBob() throws Exception {
+    IndexFunction<TestPerson> idx = index("isbob", "WHERE name.name = 'bob'");
+    assertEquals("isbob", idx.getName());
+
+    IndexKeyBuilder b;
+    TestPerson p;
+
+    b = new IndexKeyBuilder();
+    p = new TestPerson(new TestPerson.Key("q"), 42);
+    assertFalse(idx.includes(p));
+
+    p = new TestPerson(new TestPerson.Key("bob"), 42);
+    assertTrue(idx.includes(p));
+
+    idx.encode(b, p);
+    assertEquals(new byte[] {}, b);
+  }
+
+  private IndexFunction<TestPerson> index(String name, String query)
+      throws OrmException {
+    final QueryModel qm = new QueryModel(people, name, query);
+    return new IndexFunctionGen(new GeneratedClassLoader(TestPerson.class
+        .getClassLoader()), qm, TestPerson.class).create();
+  }
+
+  private static void assertEquals(byte[] exp, IndexKeyBuilder ic) {
+    assertEquals(toString(exp), toString(ic.toByteArray()));
+  }
+
+  private static String toString(byte[] bin) {
+    StringBuilder dst = new StringBuilder(bin.length * 2);
+    for (byte b : bin) {
+      dst.append(hexchar[(b >>> 4) & 0x0f]);
+      dst.append(hexchar[b & 0x0f]);
+    }
+    return dst.toString();
+  }
+
+  private static final char[] hexchar =
+      {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', //
+          'a', 'b', 'c', 'd', 'e', 'f'};
+}
diff --git a/src/test/java/com/google/gwtorm/nosql/IndexKeyBuilderTest.java b/src/test/java/com/google/gwtorm/nosql/IndexKeyBuilderTest.java
new file mode 100644
index 0000000..e0f8028
--- /dev/null
+++ b/src/test/java/com/google/gwtorm/nosql/IndexKeyBuilderTest.java
@@ -0,0 +1,82 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql;
+
+import junit.framework.TestCase;
+
+public class IndexKeyBuilderTest extends TestCase {
+  public void testInt() {
+    IndexKeyBuilder ib;
+
+    ib = new IndexKeyBuilder();
+    ib.add(0);
+    assertEquals(new byte[] {0x00}, ib);
+
+    ib = new IndexKeyBuilder();
+    ib.add(1);
+    assertEquals(new byte[] {0x01, 0x01}, ib);
+
+    ib = new IndexKeyBuilder();
+    ib.add(256);
+    assertEquals(new byte[] {0x02, 0x01, 0x00}, ib);
+  }
+
+  public void testDelimiter() {
+    IndexKeyBuilder ib = new IndexKeyBuilder();
+    ib.delimiter();
+    assertEquals(new byte[] {0x00, 0x01}, ib);
+  }
+
+  public void testStringASCII() {
+    IndexKeyBuilder ib = new IndexKeyBuilder();
+    ib.add("hi");
+    assertEquals(new byte[] {'h', 'i'}, ib);
+  }
+
+  public void testStringNUL() {
+    IndexKeyBuilder ib = new IndexKeyBuilder();
+    ib.add("\0");
+    assertEquals(new byte[] {0x00, (byte) 0xff}, ib);
+  }
+
+  public void testStringFF() {
+    IndexKeyBuilder ib = new IndexKeyBuilder();
+    ib.add(new byte[] {(byte) 0xff});
+    assertEquals(new byte[] {(byte) 0xff, 0x00}, ib);
+  }
+
+  public void testInfinity() {
+    IndexKeyBuilder ib = new IndexKeyBuilder();
+    ib.infinity();
+    assertEquals(new byte[] {(byte) 0xff, (byte) 0xff}, ib);
+  }
+
+  private static void assertEquals(byte[] exp, IndexKeyBuilder ic) {
+    assertEquals(toString(exp), toString(ic.toByteArray()));
+  }
+
+  private static String toString(byte[] bin) {
+    StringBuilder dst = new StringBuilder(bin.length * 2);
+    for (byte b : bin) {
+      dst.append(hexchar[(b >>> 4) & 0x0f]);
+      dst.append(hexchar[b & 0x0f]);
+    }
+    return dst.toString();
+  }
+
+  private static final char[] hexchar =
+      {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', //
+          'a', 'b', 'c', 'd', 'e', 'f'};
+}
diff --git a/src/test/java/com/google/gwtorm/nosql/NoSqlPhoneBookTest.java b/src/test/java/com/google/gwtorm/nosql/NoSqlPhoneBookTest.java
new file mode 100644
index 0000000..8532809
--- /dev/null
+++ b/src/test/java/com/google/gwtorm/nosql/NoSqlPhoneBookTest.java
@@ -0,0 +1,250 @@
+// Copyright 2008 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.gwtorm.nosql;
+
+import com.google.gwtorm.client.Access;
+import com.google.gwtorm.client.OrmConcurrencyException;
+import com.google.gwtorm.client.OrmException;
+import com.google.gwtorm.data.PersonAccess;
+import com.google.gwtorm.data.PhoneBookDb;
+import com.google.gwtorm.data.TestPerson;
+import com.google.gwtorm.jdbc.JdbcExecutor;
+import com.google.gwtorm.jdbc.JdbcSchema;
+import com.google.gwtorm.nosql.heap.MemoryDatabase;
+
+import junit.framework.TestCase;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+public class NoSqlPhoneBookTest extends TestCase {
+  protected MemoryDatabase<PhoneBookDb> db;
+  private List<PhoneBookDb> openSchemas;
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+
+    db = new MemoryDatabase<PhoneBookDb>(PhoneBookDb.class);
+    openSchemas = new ArrayList<PhoneBookDb>();
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    if (openSchemas != null) {
+      for (PhoneBookDb schema : openSchemas) {
+        schema.close();
+      }
+      openSchemas = null;
+    }
+    super.tearDown();
+  }
+
+  protected PhoneBookDb open() throws OrmException {
+    final PhoneBookDb r = db.open();
+    if (r != null) {
+      openSchemas.add(r);
+    }
+    return r;
+  }
+
+  public void testCreateDatabaseHandle() throws Exception {
+    assertNotNull(db);
+  }
+
+  public void testOpenSchema() throws Exception {
+    final PhoneBookDb schema1 = open();
+    assertNotNull(schema1);
+
+    final PhoneBookDb schema2 = open();
+    assertNotNull(schema2);
+    assertNotSame(schema1, schema2);
+  }
+
+  public void testGetPeopleAccess() throws Exception {
+    final PhoneBookDb schema = open();
+    assertNotNull(schema.people());
+    assertEquals("people", schema.people().getRelationName());
+    assertEquals(1, schema.people().getRelationID());
+  }
+
+  public void testGetAddressAccess() throws Exception {
+    final PhoneBookDb schema = open();
+    assertNotNull(schema.addresses());
+    assertEquals("addresses", schema.addresses().getRelationName());
+    assertEquals(2, schema.addresses().getRelationID());
+  }
+
+  public void testGetAllRelations() throws Exception {
+    final PhoneBookDb schema = open();
+    Access<?, ?>[] all = schema.allRelations();
+    assertNotNull(all);
+    assertEquals(2, all.length);
+    assertSame(schema.people(), all[0]);
+    assertSame(schema.addresses(), all[1]);
+  }
+
+  public void testNextAddressId() throws Exception {
+    final PhoneBookDb schema = open();
+    final int a = schema.nextAddressId();
+    final int b = schema.nextAddressId();
+    assertTrue(a != b);
+  }
+
+  public void testPersonPrimaryKey() throws Exception {
+    final PhoneBookDb schema = open();
+    final TestPerson.Key key = new TestPerson.Key("Bob");
+    final TestPerson bob = new TestPerson(key, 18);
+    assertSame(key, schema.people().primaryKey(bob));
+  }
+
+  public void testInsertOnePerson() throws Exception {
+    final PhoneBookDb schema = open();
+    final TestPerson bob = new TestPerson(new TestPerson.Key("Bob"), 18);
+    schema.people().insert(Collections.singleton(bob));
+
+    TestPerson copy = schema.people().all().toList().get(0);
+    assertNotSame(copy, bob);
+    assertEquals(bob.name(), copy.name());
+  }
+
+  public void testGetOnePerson() throws Exception {
+    final PhoneBookDb schema = open();
+    final PersonAccess sp = schema.people();
+    final TestPerson p1 = new TestPerson(new TestPerson.Key("Bob"), 18);
+    sp.insert(Collections.singleton(p1));
+
+    final TestPerson p2 = sp.get(sp.primaryKey(p1));
+    assertNotNull(p2);
+    assertNotSame(p1, p2);
+    assertEquals(sp.primaryKey(p1), sp.primaryKey(p2));
+  }
+
+  public void testGetOnePersonIterator() throws Exception {
+    final PhoneBookDb schema = open();
+    final PersonAccess sp = schema.people();
+    final TestPerson p1 = new TestPerson(new TestPerson.Key("Bob"), 18);
+    sp.insert(Collections.singleton(p1));
+
+    final List<TestPerson> list =
+        sp.get(Collections.singleton(sp.primaryKey(p1))).toList();
+    assertNotNull(list);
+    assertEquals(1, list.size());
+
+    final TestPerson p2 = list.get(0);
+    assertNotNull(p2);
+    assertNotSame(p1, p2);
+    assertEquals(sp.primaryKey(p1), sp.primaryKey(p2));
+  }
+
+  public void testInsertManyPeople() throws Exception {
+    final PhoneBookDb schema = open();
+    final ArrayList<TestPerson> all = new ArrayList<TestPerson>();
+    all.add(new TestPerson(new TestPerson.Key("Bob"), 18));
+    all.add(new TestPerson(new TestPerson.Key("Mary"), 22));
+    all.add(new TestPerson(new TestPerson.Key("Zak"), 33));
+    schema.people().insert(all);
+  }
+
+  public void testDeleteOnePerson() throws Exception {
+    final PhoneBookDb schema = open();
+    final TestPerson bob = new TestPerson(new TestPerson.Key("Bob"), 18);
+    schema.people().insert(Collections.singleton(bob));
+    schema.people().delete(Collections.singleton(bob));
+  }
+
+  public void testUpdateOnePerson() throws Exception {
+    final PhoneBookDb schema = open();
+    final TestPerson bob = new TestPerson(new TestPerson.Key("Bob"), 18);
+    schema.people().insert(Collections.singleton(bob));
+    bob.growOlder();
+    schema.people().update(Collections.singleton(bob));
+  }
+
+  public void testUpdateNoPerson() throws Exception {
+    final PhoneBookDb schema = open();
+    final TestPerson bob = new TestPerson(new TestPerson.Key("Bob"), 18);
+    try {
+      schema.people().update(Collections.singleton(bob));
+      fail("Update of missing person succeeded");
+    } catch (OrmConcurrencyException e) {
+      assertEquals("Concurrent modification detected", e.getMessage());
+    }
+  }
+
+  public void testFetchOnePerson() throws Exception {
+    final PhoneBookDb schema = open();
+    final TestPerson bob = new TestPerson(new TestPerson.Key("Bob"), 18);
+    schema.people().insert(Collections.singleton(bob));
+
+    final List<TestPerson> all = schema.people().all().toList();
+    assertNotNull(all);
+    assertEquals(1, all.size());
+    assertNotSame(bob, all.get(0));
+    assertEquals(bob.name(), all.get(0).name());
+    assertEquals(bob.age(), all.get(0).age());
+    assertEquals(bob.isRegistered(), all.get(0).isRegistered());
+  }
+
+  public void testFetchOnePersonByName() throws Exception {
+    final PhoneBookDb schema = open();
+    final TestPerson bob1 = new TestPerson(new TestPerson.Key("Bob"), 18);
+    schema.people().insert(Collections.singleton(bob1));
+
+    final TestPerson bob2 =
+        schema.people().get(new TestPerson.Key(bob1.name()));
+    assertNotNull(bob2);
+    assertNotSame(bob1, bob2);
+    assertEquals(bob1.name(), bob2.name());
+    assertEquals(bob1.age(), bob2.age());
+    assertEquals(bob1.isRegistered(), bob2.isRegistered());
+  }
+
+  public void testFetchByAge() throws Exception {
+    final PhoneBookDb schema = open();
+    final ArrayList<TestPerson> all = new ArrayList<TestPerson>();
+    all.add(new TestPerson(new TestPerson.Key("Bob"), 18));
+    all.add(new TestPerson(new TestPerson.Key("Mary"), 22));
+    all.add(new TestPerson(new TestPerson.Key("Zak"), 33));
+    schema.people().insert(all);
+
+    final List<TestPerson> r = schema.people().olderThan(20).toList();
+    assertEquals(2, r.size());
+    assertEquals(all.get(1).name(), r.get(0).name());
+    assertEquals(all.get(2).name(), r.get(1).name());
+  }
+
+  public void testBooleanType() throws Exception {
+    final PhoneBookDb schema = open();
+    final TestPerson bob = new TestPerson(new TestPerson.Key("Bob"), 18);
+    schema.people().insert(Collections.singleton(bob));
+
+    assertEquals(bob.isRegistered(), schema.people().all().toList().get(0)
+        .isRegistered());
+
+    bob.register();
+    schema.people().update(Collections.singleton(bob));
+
+    assertEquals(bob.isRegistered(), schema.people().all().toList().get(0)
+        .isRegistered());
+
+    bob.unregister();
+    schema.people().update(Collections.singleton(bob));
+
+    assertEquals(bob.isRegistered(), schema.people().all().toList().get(0)
+        .isRegistered());
+  }
+}
diff --git a/src/test/java/com/google/gwtorm/protobuf/ProtobufEncoderTest.java b/src/test/java/com/google/gwtorm/protobuf/ProtobufEncoderTest.java
index 15fc754..7c9ce10 100644
--- a/src/test/java/com/google/gwtorm/protobuf/ProtobufEncoderTest.java
+++ b/src/test/java/com/google/gwtorm/protobuf/ProtobufEncoderTest.java
@@ -14,39 +14,52 @@
 
 package com.google.gwtorm.protobuf;
 
+import com.google.gwtorm.client.Column;
 import com.google.gwtorm.data.TestAddress;
 import com.google.gwtorm.data.TestPerson;
+import com.google.protobuf.CodedInputStream;
+import com.google.protobuf.CodedOutputStream;
 
 import junit.framework.TestCase;
 
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
 import java.io.UnsupportedEncodingException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.SortedSet;
+import java.util.TreeSet;
 
 public class ProtobufEncoderTest extends TestCase {
+  private static final byte[] testingBin = new byte[] {
+  //
+      // name
+      0x0a, 0x09,
+      // name.name
+      0x0a, 0x07, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, //
+      // age
+      0x10, (byte) 0x96, 0x01, //
+      // registered (true)
+      0x18, 0x01 //
+      //
+      };
+
   @SuppressWarnings("cast")
   public void testPerson() throws UnsupportedEncodingException {
     final ProtobufCodec<TestPerson> e = CodecFactory.encoder(TestPerson.class);
-    final byte[] bin = new byte[] {
-    //
-        // name
-        0x0a, 0x09,
-        // name.name
-        0x0a, 0x07, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, //
-        // age
-        0x10, (byte) 0x96, 0x01, //
-        // registered (true)
-        0x18, 0x01 //
-        //
-        };
-    TestPerson p = e.decode(bin);
+
+    TestPerson p = e.decode(testingBin);
     assertNotNull(p);
     assertTrue(p instanceof TestPerson);
     assertEquals("testing", p.name());
     assertEquals(75, p.age());
     assertTrue(p.isRegistered());
 
-    final byte[] out = e.encode(p).toByteArray();
-    assertEquals(new String(bin, "ISO-8859-1"), new String(out, "ISO-8859-1"));
-    assertEquals(bin.length, e.sizeof(p));
+    final byte[] out = e.encodeToByteArray(p);
+    assertEquals(asString(testingBin), asString(out));
+    assertEquals(testingBin.length, e.sizeof(p));
   }
 
   public void testAddress() {
@@ -60,11 +73,227 @@
     TestPerson p = new TestPerson(k, 42);
     TestAddress b = new TestAddress(new TestAddress.Key(k, "ny"), "ny");
 
-    byte[] act = e.encode(b).toByteArray();
+    byte[] act = e.encodeToByteArray(b);
 
     TestAddress c = e.decode(act);
     assertEquals(c.location(), b.location());
     assertEquals(c.city(), b.city());
     assertEquals(c.key(), b.key());
   }
+
+  public void testDecodeEmptiesByteBuffer() {
+    ProtobufCodec<TestPerson> e = CodecFactory.encoder(TestPerson.class);
+    ByteBuffer buf = ByteBuffer.wrap(testingBin);
+    TestPerson p = e.decode(buf);
+    assertEquals(0, buf.remaining());
+    assertEquals(testingBin.length, buf.position());
+  }
+
+  public void testEncodeFillsByteBuffer() throws UnsupportedEncodingException {
+    ProtobufCodec<TestPerson> e = CodecFactory.encoder(TestPerson.class);
+
+    TestPerson p = new TestPerson(new TestPerson.Key("testing"), 75);
+    p.register();
+
+    int sz = e.sizeof(p);
+    assertEquals(testingBin.length, sz);
+
+    ByteBuffer buf = ByteBuffer.allocate(sz);
+    e.encode(p, buf);
+    assertEquals(0, buf.remaining());
+    assertEquals(sz, buf.position());
+
+    buf.flip();
+    byte[] act = new byte[sz];
+    buf.get(act);
+
+    assertEquals(asString(testingBin), asString(act));
+  }
+
+  public void testEncodeNonArrayByteBuffer()
+      throws UnsupportedEncodingException {
+    ProtobufCodec<TestPerson> e = CodecFactory.encoder(TestPerson.class);
+
+    TestPerson p = new TestPerson(new TestPerson.Key("testing"), 75);
+    p.register();
+
+    int sz = e.sizeof(p);
+    assertEquals(testingBin.length, sz);
+
+    ByteBuffer buf = ByteBuffer.allocateDirect(sz);
+    assertFalse("direct ByteBuffer has no array", buf.hasArray());
+
+    e.encode(p, buf);
+    assertEquals(0, buf.remaining());
+    assertEquals(sz, buf.position());
+
+    buf.flip();
+    byte[] act = new byte[sz];
+    buf.get(act);
+
+    assertEquals(asString(testingBin), asString(act));
+  }
+
+  public void testStringList() throws UnsupportedEncodingException {
+    ProtobufCodec<StringList> e = CodecFactory.encoder(StringList.class);
+
+    StringList list = new StringList();
+    list.list = new ArrayList<String>();
+    list.list.add("moe");
+    list.list.add("larry");
+
+    byte[] act = e.encodeToByteArray(list);
+    StringList other = e.decode(act);
+    assertNotNull(other.list);
+    assertEquals(list.list, other.list);
+    assertEquals(asString(new byte[] { //
+        //
+            0x12, 0x03, 'm', 'o', 'e', //
+            0x12, 0x05, 'l', 'a', 'r', 'r', 'y' //
+        }), asString(act));
+  }
+
+  public void testStringSet() throws UnsupportedEncodingException {
+    ProtobufCodec<StringSet> e = CodecFactory.encoder(StringSet.class);
+
+    StringSet list = new StringSet();
+    list.list = new TreeSet<String>();
+    list.list.add("larry");
+    list.list.add("moe");
+
+    byte[] act = e.encodeToByteArray(list);
+    StringSet other = e.decode(act);
+    assertNotNull(other.list);
+    assertEquals(list.list, other.list);
+    assertEquals(asString(new byte[] { //
+        //
+            0x0a, 0x05, 'l', 'a', 'r', 'r', 'y', //
+            0x0a, 0x03, 'm', 'o', 'e' //
+        }), asString(act));
+  }
+
+  public void testPersonList() {
+    ProtobufCodec<PersonList> e = CodecFactory.encoder(PersonList.class);
+
+    PersonList list = new PersonList();
+    list.people = new ArrayList<TestPerson>();
+    list.people.add(new TestPerson(new TestPerson.Key("larry"), 1 << 16));
+    list.people.add(new TestPerson(new TestPerson.Key("curly"), 1));
+    list.people.add(new TestPerson(new TestPerson.Key("moe"), -1));
+
+    PersonList other = e.decode(e.encodeToByteArray(list));
+    assertNotNull(other.people);
+    assertEquals(list.people, other.people);
+  }
+
+  public void testCustomEncoderList() {
+    ProtobufCodec<ItemList> e = CodecFactory.encoder(ItemList.class);
+
+    ItemList list = new ItemList();
+    list.list = new ArrayList<Item>();
+    list.list.add(new Item());
+    list.list.add(new Item());
+
+    ItemList other = e.decode(e.encodeToByteArray(list));
+    assertNotNull(other.list);
+    assertEquals(2, other.list.size());
+  }
+
+  public void testEnumEncoder() throws UnsupportedEncodingException {
+    assertEquals(1, ThingWithEnum.Type.B.ordinal());
+    assertSame(ThingWithEnum.Type.B, ThingWithEnum.Type.values()[1]);
+
+    ProtobufCodec<ThingWithEnum> e = CodecFactory.encoder(ThingWithEnum.class);
+
+    ThingWithEnum thing = new ThingWithEnum();
+    thing.type = ThingWithEnum.Type.B;
+
+    ThingWithEnum other = e.decode(e.encodeToByteArray(thing));
+    assertNotNull(other.type);
+    assertSame(thing.type, other.type);
+
+    byte[] act = e.encodeToByteArray(thing);
+    byte[] exp = {0x08, 0x01};
+    assertEquals(asString(exp), asString(act));
+  }
+
+  public void testEncodeToStream()throws IOException {
+    ProtobufCodec<ThingWithEnum> e = CodecFactory.encoder(ThingWithEnum.class);
+
+    ThingWithEnum thing = new ThingWithEnum();
+    thing.type = ThingWithEnum.Type.B;
+
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    e.encodeWithSize(thing, out);
+    byte[] exp = {0x02, 0x08, 0x01};
+    assertEquals(asString(exp), asString(out.toByteArray()));
+
+    byte[] exp2 = {0x02, 0x08, 0x01, '\n'};
+    ByteArrayInputStream in = new ByteArrayInputStream(exp2);
+    ThingWithEnum other = e.decodeWithSize(in);
+    assertEquals('\n', in.read());
+    assertEquals(-1, in.read());
+    assertNotNull(other.type);
+    assertSame(thing.type, other.type);
+  }
+
+  private static String asString(byte[] bin)
+      throws UnsupportedEncodingException {
+    return new String(bin, "ISO-8859-1");
+  }
+
+  static class PersonList {
+    @Column(id = 5)
+    public List<TestPerson> people;
+  }
+
+  static class StringList {
+    @Column(id = 2)
+    List<String> list;
+  }
+
+  static class StringSet {
+    @Column(id = 1)
+    SortedSet<String> list;
+  }
+
+  static class Item {
+  }
+
+  static class ItemCodec extends ProtobufCodec<Item> {
+    @Override
+    public void encode(Item obj, CodedOutputStream out) throws IOException {
+      out.writeBoolNoTag(true);
+    }
+
+    @Override
+    public void mergeFrom(CodedInputStream in, Item obj) throws IOException {
+      in.readBool();
+    }
+
+    @Override
+    public Item newInstance() {
+      return new Item();
+    }
+
+    @Override
+    public int sizeof(Item obj) {
+      return 1;
+    }
+  }
+
+  static class ItemList {
+    @Column(id = 2)
+    @CustomCodec(ItemCodec.class)
+    List<Item> list;
+  }
+
+  static class ThingWithEnum {
+    static enum Type {
+      A, B;
+    }
+
+    @Column(id = 1)
+    Type type;
+  }
 }
diff --git a/src/test/java/com/google/gwtorm/schema/QueryParserTest.java b/src/test/java/com/google/gwtorm/schema/QueryParserTest.java
index c050ede..8eb9936 100644
--- a/src/test/java/com/google/gwtorm/schema/QueryParserTest.java
+++ b/src/test/java/com/google/gwtorm/schema/QueryParserTest.java
@@ -20,6 +20,7 @@
 
 import org.antlr.runtime.tree.Tree;
 
+import java.lang.reflect.Type;
 import java.util.ArrayList;
 import java.util.Collection;
 
@@ -47,6 +48,16 @@
     public Class<?> getPrimitiveType() {
       return String.class;
     }
+
+    @Override
+    public Type[] getArgumentTypes() {
+      return new Type[0];
+    }
+
+    @Override
+    public boolean isCollection() {
+      return false;
+    }
   }
 
   protected Tree parse(final String str) throws QueryParseException {
@@ -120,33 +131,27 @@
     assertEquals(1, t.getChildCount());
 
     final Tree a = t.getChild(0);
-    assertEquals(QueryParser.ASC, a.getType());
-    assertEquals(1, a.getChildCount());
-    assertEquals(QueryParser.ID, a.getChild(0).getType());
-    assertTrue(a.getChild(0) instanceof QueryParser.Column);
-    assertEquals("a", a.getChild(0).getText());
+    assertEquals(QueryParser.ID, a.getType());
+    assertTrue(a instanceof QueryParser.Column);
+    assertEquals("a", a.getText());
   }
 
   public void testOrderByAB() throws QueryParseException {
-    final Tree t = parse("ORDER BY a DESC, b ASC");
+    final Tree t = parse("ORDER BY a, b");
     assertNotNull(t);
     assertEquals(QueryParser.ORDER, t.getType());
     assertEquals(2, t.getChildCount());
     {
       final Tree a = t.getChild(0);
-      assertEquals(QueryParser.DESC, a.getType());
-      assertEquals(1, a.getChildCount());
-      assertEquals(QueryParser.ID, a.getChild(0).getType());
-      assertTrue(a.getChild(0) instanceof QueryParser.Column);
-      assertEquals("a", a.getChild(0).getText());
+      assertEquals(QueryParser.ID, a.getType());
+      assertTrue(a instanceof QueryParser.Column);
+      assertEquals("a", a.getText());
     }
     {
       final Tree b = t.getChild(1);
-      assertEquals(QueryParser.ASC, b.getType());
-      assertEquals(1, b.getChildCount());
-      assertEquals(QueryParser.ID, b.getChild(0).getType());
-      assertTrue(b.getChild(0) instanceof QueryParser.Column);
-      assertEquals("b", b.getChild(0).getText());
+      assertEquals(QueryParser.ID, b.getType());
+      assertTrue(b instanceof QueryParser.Column);
+      assertEquals("b", b.getText());
     }
   }
 
@@ -166,10 +171,7 @@
       assertEquals(QueryParser.ORDER, o.getType());
       assertEquals(1, o.getChildCount());
 
-      final Tree a = o.getChild(0);
-      assertEquals(QueryParser.ASC, a.getType());
-      assertEquals(1, a.getChildCount());
-      final Tree aId = a.getChild(0);
+      final Tree aId = o.getChild(0);
       assertEquals(QueryParser.ID, aId.getType());
       assertTrue(aId instanceof QueryParser.Column);
       assertEquals("a", aId.getText());
diff --git a/src/test/java/com/google/gwtorm/server/PhoneBookDbTestCase.java b/src/test/java/com/google/gwtorm/server/PhoneBookDbTestCase.java
index 2ffdebd..55fd759 100644
--- a/src/test/java/com/google/gwtorm/server/PhoneBookDbTestCase.java
+++ b/src/test/java/com/google/gwtorm/server/PhoneBookDbTestCase.java
@@ -14,9 +14,9 @@
 
 package com.google.gwtorm.server;
 
+import com.google.gwtorm.client.Access;
 import com.google.gwtorm.client.OrmConcurrencyException;
 import com.google.gwtorm.client.OrmException;
-import com.google.gwtorm.client.Transaction;
 import com.google.gwtorm.data.PersonAccess;
 import com.google.gwtorm.data.PhoneBookDb;
 import com.google.gwtorm.data.TestPerson;
@@ -101,11 +101,24 @@
   public void testGetPeopleAccess() throws Exception {
     final PhoneBookDb schema = open();
     assertNotNull(schema.people());
+    assertEquals("people", schema.people().getRelationName());
+    assertEquals(1, schema.people().getRelationID());
   }
 
   public void testGetAddressAccess() throws Exception {
     final PhoneBookDb schema = open();
     assertNotNull(schema.addresses());
+    assertEquals("addresses", schema.addresses().getRelationName());
+    assertEquals(2, schema.addresses().getRelationID());
+  }
+
+  public void testGetAllRelations() throws Exception {
+    final PhoneBookDb schema = open();
+    Access<?, ?>[] all = schema.allRelations();
+    assertNotNull(all);
+    assertEquals(2, all.length);
+    assertSame(schema.people(), all[0]);
+    assertSame(schema.addresses(), all[1]);
   }
 
   public void testCreateSchema() throws Exception {
@@ -197,34 +210,6 @@
     st.close();
   }
 
-  public void testInsertManyPeopleByTransaction() throws Exception {
-    final PhoneBookDb schema = openAndCreate();
-    final Transaction txn = schema.beginTransaction();
-    final ArrayList<TestPerson> all = new ArrayList<TestPerson>();
-    all.add(new TestPerson(new TestPerson.Key("Bob"), 18));
-    all.add(new TestPerson(new TestPerson.Key("Mary"), 22));
-    all.add(new TestPerson(new TestPerson.Key("Zak"), 33));
-    schema.people().insert(all, txn);
-
-    final Statement st = statement(schema);
-    ResultSet rs;
-
-    rs = st.executeQuery("SELECT name,age FROM people ORDER BY name");
-    assertFalse(rs.next());
-    rs.close();
-
-    txn.commit();
-    rs = st.executeQuery("SELECT name,age FROM people ORDER BY name");
-    for (int rowIdx = 0; rowIdx < all.size(); rowIdx++) {
-      assertTrue(rs.next());
-      assertEquals(all.get(rowIdx).name(), rs.getString(1));
-      assertEquals(all.get(rowIdx).age(), rs.getInt(2));
-    }
-    assertFalse(rs.next());
-    rs.close();
-    st.close();
-  }
-
   public void testDeleteOnePerson() throws Exception {
     final PhoneBookDb schema = openAndCreate();
     final TestPerson bob = new TestPerson(new TestPerson.Key("Bob"), 18);
@@ -308,21 +293,6 @@
     assertEquals(all.get(2).name(), r.get(1).name());
   }
 
-  public void testFetchNotPerson() throws Exception {
-    final PhoneBookDb schema = openAndCreate();
-    final ArrayList<TestPerson> all = new ArrayList<TestPerson>();
-    all.add(new TestPerson(new TestPerson.Key("Bob"), 18));
-    all.add(new TestPerson(new TestPerson.Key("Mary"), 22));
-    all.add(new TestPerson(new TestPerson.Key("Zak"), 33));
-    schema.people().insert(all);
-
-    final List<TestPerson> r =
-        schema.people().notPerson(new TestPerson.Key("Mary"), 10).toList();
-    assertEquals(2, r.size());
-    assertEquals(all.get(2).name(), r.get(0).name());
-    assertEquals(all.get(0).name(), r.get(1).name());
-  }
-
   public void testBooleanType() throws Exception {
     final PhoneBookDb schema = openAndCreate();
     final TestPerson bob = new TestPerson(new TestPerson.Key("Bob"), 18);