diff --git a/.project b/.project
index 72e4b9760eb09bf1c1d2a57c35da9b7101061e42..bb2fb71390c0d0a76bd480c24ecdc133f4bc878d 100644
--- a/.project
+++ b/.project
@@ -7,6 +7,7 @@
 	<buildSpec>
 		<buildCommand>
 			<name>com.googlecode.goclipse.goBuilder</name>
+			<triggers>clean,full,incremental,</triggers>
 			<arguments>
 			</arguments>
 		</buildCommand>
diff --git a/DOCKER/build.sh b/DOCKER/build.sh
index 996dbc5b7602232ffe50fdb692f81b57eb39a9e7..2a810588b2fe8ebaea700224e7dac440d5f72d5b 100755
--- a/DOCKER/build.sh
+++ b/DOCKER/build.sh
@@ -25,4 +25,4 @@ else
   docker build -t $image_base:$branch -f DOCKER/Dockerfile .
 fi
 
-cd $start
+cd $start
\ No newline at end of file
diff --git a/DOCKER/chain_api.sh b/DOCKER/chain_api.sh
new file mode 100755
index 0000000000000000000000000000000000000000..8fecb978619334a8394d279d28d00e45e2ff48d1
--- /dev/null
+++ b/DOCKER/chain_api.sh
@@ -0,0 +1,4 @@
+#! /bin/bash
+
+echo "Running chain $CHAIN_ID (via ErisDB API)"
+erisdb $TMROOT
diff --git a/DOCKER/start.sh b/DOCKER/start.sh
index b120888df173954304141aedfe89ac991c34708b..7f3194e58a43d069eaf8f3256238efb78fec52ff 100755
--- a/DOCKER/start.sh
+++ b/DOCKER/start.sh
@@ -119,7 +119,6 @@ export ECM_PATH  # set by Dockerfile
 export MINTX_NODE_ADDR=$NODE_ADDR
 export MINTX_SIGN_ADDR=keys:4767
 
-
 # print the version
 bash $ECM_PATH/version.sh
 
diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index 4420887e10eb7359e0d377d22ca9253cdbeb2aa3..c1596e21f360b65ac7fac591ba089bdc2fd59490 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -1,6 +1,6 @@
 {
 	"ImportPath": "github.com/eris-ltd/eris-db",
-	"GoVersion": "go1.4.2",
+	"GoVersion": "go1.5.1",
 	"Packages": [
 		"./..."
 	],
@@ -24,23 +24,19 @@
 			"Comment": "v1.0rc1-104-g1a7ab6e",
 			"Rev": "1a7ab6e4d5fdc72d6df30ef562102ae6e0d18518"
 		},
-		{
-			"ImportPath": "github.com/google/go-snappy/snappy",
-			"Rev": "eaa750b9bf4dcb7cb20454be850613b66cda3273"
-		},
 		{
 			"ImportPath": "github.com/gorilla/websocket",
-			"Rev": "a3ec486e6a7a41858210b0fc5d7b5df593b3c4a3"
+			"Rev": "1f87405cd9755fc388e111c4003caca4a2f52fa6"
 		},
 		{
 			"ImportPath": "github.com/inconshreveable/log15/stack",
-			"Comment": "v2.3-42-gee210fc",
-			"Rev": "ee210fc98cc7756aa0cf55d8d554148828e8e658"
+			"Comment": "v2.3-38-g352fceb",
+			"Rev": "352fceb48e895bd1dd0b9f5d3ae8f8516c49af0f"
 		},
 		{
 			"ImportPath": "github.com/inconshreveable/log15/term",
-			"Comment": "v2.3-42-gee210fc",
-			"Rev": "ee210fc98cc7756aa0cf55d8d554148828e8e658"
+			"Comment": "v2.3-38-g352fceb",
+			"Rev": "352fceb48e895bd1dd0b9f5d3ae8f8516c49af0f"
 		},
 		{
 			"ImportPath": "github.com/manucorporat/sse",
@@ -48,7 +44,7 @@
 		},
 		{
 			"ImportPath": "github.com/mattn/go-colorable",
-			"Rev": "d67e0b7d1797975196499f79bcc322c08b9f218b"
+			"Rev": "043ae16291351db8465272edf465c9f388161627"
 		},
 		{
 			"ImportPath": "github.com/naoina/go-stringutil",
@@ -56,7 +52,7 @@
 		},
 		{
 			"ImportPath": "github.com/naoina/toml",
-			"Rev": "5667c316ee9576e9d5bca793ce4ec813a88ce7d3"
+			"Rev": "7b2dffbeaee47506726f29e36d19cf4ee90d361b"
 		},
 		{
 			"ImportPath": "github.com/sfreiberg/gotwilio",
@@ -79,7 +75,11 @@
 		},
 		{
 			"ImportPath": "github.com/syndtr/goleveldb/leveldb",
-			"Rev": "a06509502ca32565bdf74afc1e573050023f261c"
+			"Rev": "63c9e642efad852f49e20a6f90194cae112fd2ac"
+		},
+		{
+			"ImportPath": "github.com/syndtr/gosnappy/snappy",
+			"Rev": "ce8acff4829e0c2458a67ead32390ac0a381c862"
 		},
 		{
 			"ImportPath": "github.com/tendermint/ed25519",
@@ -87,8 +87,8 @@
 		},
 		{
 			"ImportPath": "github.com/tendermint/log15",
-			"Comment": "v2.3-36-gc65281b",
-			"Rev": "c65281bb703b7612f60558e75b07c434c06e2636"
+			"Comment": "v2.3-36-g6e46075",
+			"Rev": "6e460758f10ef42a4724b8e4a82fee59aaa0e41d"
 		},
 		{
 			"ImportPath": "github.com/tendermint/tendermint/account",
@@ -215,16 +215,16 @@
 		},
 		{
 			"ImportPath": "golang.org/x/net/context",
-			"Rev": "10576091dc82c9c109dddfb5ed77bdbbc87a9af8"
+			"Rev": "2cba614e8ff920c60240d2677bc019af32ee04e5"
 		},
 		{
 			"ImportPath": "golang.org/x/net/netutil",
-			"Rev": "10576091dc82c9c109dddfb5ed77bdbbc87a9af8"
+			"Rev": "2cba614e8ff920c60240d2677bc019af32ee04e5"
 		},
 		{
 			"ImportPath": "gopkg.in/bluesuncorp/validator.v5",
-			"Comment": "v5.8",
-			"Rev": "c06d47f593d786142436a43334f724d819093c04"
+			"Comment": "v5.12",
+			"Rev": "d5acf1dac43705f8bfbb71d878e290e2bed3950b"
 		},
 		{
 			"ImportPath": "gopkg.in/fatih/set.v0",
diff --git a/Godeps/_workspace/src/github.com/google/go-snappy/snappy/decode.go b/Godeps/_workspace/src/github.com/google/go-snappy/snappy/decode.go
deleted file mode 100644
index 552a17bfb0500fe0617753c9ca7df0263ec363db..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/google/go-snappy/snappy/decode.go
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright 2011 The Snappy-Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package snappy
-
-import (
-	"encoding/binary"
-	"errors"
-	"io"
-)
-
-var (
-	// ErrCorrupt reports that the input is invalid.
-	ErrCorrupt = errors.New("snappy: corrupt input")
-	// ErrUnsupported reports that the input isn't supported.
-	ErrUnsupported = errors.New("snappy: unsupported input")
-)
-
-// DecodedLen returns the length of the decoded block.
-func DecodedLen(src []byte) (int, error) {
-	v, _, err := decodedLen(src)
-	return v, err
-}
-
-// decodedLen returns the length of the decoded block and the number of bytes
-// that the length header occupied.
-func decodedLen(src []byte) (blockLen, headerLen int, err error) {
-	v, n := binary.Uvarint(src)
-	if n == 0 {
-		return 0, 0, ErrCorrupt
-	}
-	if uint64(int(v)) != v {
-		return 0, 0, errors.New("snappy: decoded block is too large")
-	}
-	return int(v), n, nil
-}
-
-// Decode returns the decoded form of src. The returned slice may be a sub-
-// slice of dst if dst was large enough to hold the entire decoded block.
-// Otherwise, a newly allocated slice will be returned.
-// It is valid to pass a nil dst.
-func Decode(dst, src []byte) ([]byte, error) {
-	dLen, s, err := decodedLen(src)
-	if err != nil {
-		return nil, err
-	}
-	if len(dst) < dLen {
-		dst = make([]byte, dLen)
-	}
-
-	var d, offset, length int
-	for s < len(src) {
-		switch src[s] & 0x03 {
-		case tagLiteral:
-			x := uint(src[s] >> 2)
-			switch {
-			case x < 60:
-				s += 1
-			case x == 60:
-				s += 2
-				if s > len(src) {
-					return nil, ErrCorrupt
-				}
-				x = uint(src[s-1])
-			case x == 61:
-				s += 3
-				if s > len(src) {
-					return nil, ErrCorrupt
-				}
-				x = uint(src[s-2]) | uint(src[s-1])<<8
-			case x == 62:
-				s += 4
-				if s > len(src) {
-					return nil, ErrCorrupt
-				}
-				x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
-			case x == 63:
-				s += 5
-				if s > len(src) {
-					return nil, ErrCorrupt
-				}
-				x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
-			}
-			length = int(x + 1)
-			if length <= 0 {
-				return nil, errors.New("snappy: unsupported literal length")
-			}
-			if length > len(dst)-d || length > len(src)-s {
-				return nil, ErrCorrupt
-			}
-			copy(dst[d:], src[s:s+length])
-			d += length
-			s += length
-			continue
-
-		case tagCopy1:
-			s += 2
-			if s > len(src) {
-				return nil, ErrCorrupt
-			}
-			length = 4 + int(src[s-2])>>2&0x7
-			offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
-
-		case tagCopy2:
-			s += 3
-			if s > len(src) {
-				return nil, ErrCorrupt
-			}
-			length = 1 + int(src[s-3])>>2
-			offset = int(src[s-2]) | int(src[s-1])<<8
-
-		case tagCopy4:
-			return nil, errors.New("snappy: unsupported COPY_4 tag")
-		}
-
-		end := d + length
-		if offset > d || end > len(dst) {
-			return nil, ErrCorrupt
-		}
-		for ; d < end; d++ {
-			dst[d] = dst[d-offset]
-		}
-	}
-	if d != dLen {
-		return nil, ErrCorrupt
-	}
-	return dst[:d], nil
-}
-
-// NewReader returns a new Reader that decompresses from r, using the framing
-// format described at
-// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
-func NewReader(r io.Reader) *Reader {
-	return &Reader{
-		r:       r,
-		decoded: make([]byte, maxUncompressedChunkLen),
-		buf:     make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize),
-	}
-}
-
-// Reader is an io.Reader than can read Snappy-compressed bytes.
-type Reader struct {
-	r       io.Reader
-	err     error
-	decoded []byte
-	buf     []byte
-	// decoded[i:j] contains decoded bytes that have not yet been passed on.
-	i, j       int
-	readHeader bool
-}
-
-// Reset discards any buffered data, resets all state, and switches the Snappy
-// reader to read from r. This permits reusing a Reader rather than allocating
-// a new one.
-func (r *Reader) Reset(reader io.Reader) {
-	r.r = reader
-	r.err = nil
-	r.i = 0
-	r.j = 0
-	r.readHeader = false
-}
-
-func (r *Reader) readFull(p []byte) (ok bool) {
-	if _, r.err = io.ReadFull(r.r, p); r.err != nil {
-		if r.err == io.ErrUnexpectedEOF {
-			r.err = ErrCorrupt
-		}
-		return false
-	}
-	return true
-}
-
-// Read satisfies the io.Reader interface.
-func (r *Reader) Read(p []byte) (int, error) {
-	if r.err != nil {
-		return 0, r.err
-	}
-	for {
-		if r.i < r.j {
-			n := copy(p, r.decoded[r.i:r.j])
-			r.i += n
-			return n, nil
-		}
-		if !r.readFull(r.buf[:4]) {
-			return 0, r.err
-		}
-		chunkType := r.buf[0]
-		if !r.readHeader {
-			if chunkType != chunkTypeStreamIdentifier {
-				r.err = ErrCorrupt
-				return 0, r.err
-			}
-			r.readHeader = true
-		}
-		chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
-		if chunkLen > len(r.buf) {
-			r.err = ErrUnsupported
-			return 0, r.err
-		}
-
-		// The chunk types are specified at
-		// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
-		switch chunkType {
-		case chunkTypeCompressedData:
-			// Section 4.2. Compressed data (chunk type 0x00).
-			if chunkLen < checksumSize {
-				r.err = ErrCorrupt
-				return 0, r.err
-			}
-			buf := r.buf[:chunkLen]
-			if !r.readFull(buf) {
-				return 0, r.err
-			}
-			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
-			buf = buf[checksumSize:]
-
-			n, err := DecodedLen(buf)
-			if err != nil {
-				r.err = err
-				return 0, r.err
-			}
-			if n > len(r.decoded) {
-				r.err = ErrCorrupt
-				return 0, r.err
-			}
-			if _, err := Decode(r.decoded, buf); err != nil {
-				r.err = err
-				return 0, r.err
-			}
-			if crc(r.decoded[:n]) != checksum {
-				r.err = ErrCorrupt
-				return 0, r.err
-			}
-			r.i, r.j = 0, n
-			continue
-
-		case chunkTypeUncompressedData:
-			// Section 4.3. Uncompressed data (chunk type 0x01).
-			if chunkLen < checksumSize {
-				r.err = ErrCorrupt
-				return 0, r.err
-			}
-			buf := r.buf[:checksumSize]
-			if !r.readFull(buf) {
-				return 0, r.err
-			}
-			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
-			// Read directly into r.decoded instead of via r.buf.
-			n := chunkLen - checksumSize
-			if !r.readFull(r.decoded[:n]) {
-				return 0, r.err
-			}
-			if crc(r.decoded[:n]) != checksum {
-				r.err = ErrCorrupt
-				return 0, r.err
-			}
-			r.i, r.j = 0, n
-			continue
-
-		case chunkTypeStreamIdentifier:
-			// Section 4.1. Stream identifier (chunk type 0xff).
-			if chunkLen != len(magicBody) {
-				r.err = ErrCorrupt
-				return 0, r.err
-			}
-			if !r.readFull(r.buf[:len(magicBody)]) {
-				return 0, r.err
-			}
-			for i := 0; i < len(magicBody); i++ {
-				if r.buf[i] != magicBody[i] {
-					r.err = ErrCorrupt
-					return 0, r.err
-				}
-			}
-			continue
-		}
-
-		if chunkType <= 0x7f {
-			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
-			r.err = ErrUnsupported
-			return 0, r.err
-
-		} else {
-			// Section 4.4 Padding (chunk type 0xfe).
-			// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
-			if !r.readFull(r.buf[:chunkLen]) {
-				return 0, r.err
-			}
-		}
-	}
-}
diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/client.go b/Godeps/_workspace/src/github.com/gorilla/websocket/client.go
index 93db8ddc320df9489edf64271ef2188ab793d93e..c25d24f80421a422f130045a04754d90d261f4f9 100644
--- a/Godeps/_workspace/src/github.com/gorilla/websocket/client.go
+++ b/Godeps/_workspace/src/github.com/gorilla/websocket/client.go
@@ -5,11 +5,8 @@
 package websocket
 
 import (
-	"bytes"
 	"crypto/tls"
 	"errors"
-	"io"
-	"io/ioutil"
 	"net"
 	"net/http"
 	"net/url"
@@ -130,11 +127,6 @@ func parseURL(s string) (*url.URL, error) {
 		u.Opaque = s[i:]
 	}
 
-	if strings.Contains(u.Host, "@") {
-		// WebSocket URIs do not contain user information.
-		return nil, errMalformedURL
-	}
-
 	return &u, nil
 }
 
@@ -163,8 +155,7 @@ var DefaultDialer *Dialer
 //
 // If the WebSocket handshake fails, ErrBadHandshake is returned along with a
 // non-nil *http.Response so that callers can handle redirects, authentication,
-// etcetera. The response body may not contain the entire response and does not
-// need to be closed by the application.
+// etc.
 func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
 	u, err := parseURL(urlStr)
 	if err != nil {
@@ -233,33 +224,8 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re
 		requestHeader = h
 	}
 
-	if len(requestHeader["Host"]) > 0 {
-		// This can be used to supply a Host: header which is different from
-		// the dial address.
-		u.Host = requestHeader.Get("Host")
-
-		// Drop "Host" header
-		h := http.Header{}
-		for k, v := range requestHeader {
-			if k == "Host" {
-				continue
-			}
-			h[k] = v
-		}
-		requestHeader = h
-	}
-
 	conn, resp, err := NewClient(netConn, u, requestHeader, d.ReadBufferSize, d.WriteBufferSize)
-
 	if err != nil {
-		if err == ErrBadHandshake {
-			// Before closing the network connection on return from this
-			// function, slurp up some of the response to aid application
-			// debugging.
-			buf := make([]byte, 1024)
-			n, _ := io.ReadFull(resp.Body, buf)
-			resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
-		}
 		return nil, resp, err
 	}
 
diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/client_server_test.go b/Godeps/_workspace/src/github.com/gorilla/websocket/client_server_test.go
index 749ef20509ccb54418fee9f78e1d57067d1e4a08..8c608f68c4b41fbd6c920ffcc5c0f31ab482dcb4 100644
--- a/Godeps/_workspace/src/github.com/gorilla/websocket/client_server_test.go
+++ b/Godeps/_workspace/src/github.com/gorilla/websocket/client_server_test.go
@@ -8,13 +8,11 @@ import (
 	"crypto/tls"
 	"crypto/x509"
 	"io"
-	"io/ioutil"
 	"net"
 	"net/http"
 	"net/http/httptest"
 	"net/url"
 	"reflect"
-	"strings"
 	"testing"
 	"time"
 )
@@ -36,22 +34,22 @@ var cstDialer = Dialer{
 
 type cstHandler struct{ *testing.T }
 
-type cstServer struct {
+type Server struct {
 	*httptest.Server
 	URL string
 }
 
-func newServer(t *testing.T) *cstServer {
-	var s cstServer
+func newServer(t *testing.T) *Server {
+	var s Server
 	s.Server = httptest.NewServer(cstHandler{t})
-	s.URL = makeWsProto(s.Server.URL)
+	s.URL = "ws" + s.Server.URL[len("http"):]
 	return &s
 }
 
-func newTLSServer(t *testing.T) *cstServer {
-	var s cstServer
+func newTLSServer(t *testing.T) *Server {
+	var s Server
 	s.Server = httptest.NewTLSServer(cstHandler{t})
-	s.URL = makeWsProto(s.Server.URL)
+	s.URL = "ws" + s.Server.URL[len("http"):]
 	return &s
 }
 
@@ -99,10 +97,6 @@ func (t cstHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	}
 }
 
-func makeWsProto(s string) string {
-	return "ws" + strings.TrimPrefix(s, "http")
-}
-
 func sendRecv(t *testing.T, ws *Conn) {
 	const message = "Hello World!"
 	if err := ws.SetWriteDeadline(time.Now().Add(time.Second)); err != nil {
@@ -163,7 +157,6 @@ func TestDialTLS(t *testing.T) {
 }
 
 func xTestDialTLSBadCert(t *testing.T) {
-	// This test is deactivated because of noisy logging from the net/http package.
 	s := newTLSServer(t)
 	defer s.Close()
 
@@ -254,70 +247,3 @@ func TestHandshake(t *testing.T) {
 	}
 	sendRecv(t, ws)
 }
-
-func TestRespOnBadHandshake(t *testing.T) {
-	const expectedStatus = http.StatusGone
-	const expectedBody = "This is the response body."
-
-	s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-		w.WriteHeader(expectedStatus)
-		io.WriteString(w, expectedBody)
-	}))
-	defer s.Close()
-
-	ws, resp, err := cstDialer.Dial(makeWsProto(s.URL), nil)
-	if err == nil {
-		ws.Close()
-		t.Fatalf("Dial: nil")
-	}
-
-	if resp == nil {
-		t.Fatalf("resp=nil, err=%v", err)
-	}
-
-	if resp.StatusCode != expectedStatus {
-		t.Errorf("resp.StatusCode=%d, want %d", resp.StatusCode, expectedStatus)
-	}
-
-	p, err := ioutil.ReadAll(resp.Body)
-	if err != nil {
-		t.Fatalf("ReadFull(resp.Body) returned error %v", err)
-	}
-
-	if string(p) != expectedBody {
-		t.Errorf("resp.Body=%s, want %s", p, expectedBody)
-	}
-}
-
-// If the Host header is specified in `Dial()`, the server must receive it as
-// the `Host:` header.
-func TestHostHeader(t *testing.T) {
-	s := newServer(t)
-	defer s.Close()
-
-	specifiedHost := make(chan string, 1)
-	origHandler := s.Server.Config.Handler
-
-	// Capture the request Host header.
-	s.Server.Config.Handler = http.HandlerFunc(
-		func(w http.ResponseWriter, r *http.Request) {
-			specifiedHost <- r.Host
-			origHandler.ServeHTTP(w, r)
-		})
-
-	ws, resp, err := cstDialer.Dial(s.URL, http.Header{"Host": {"testhost"}})
-	if err != nil {
-		t.Fatalf("Dial: %v", err)
-	}
-	defer ws.Close()
-
-	if resp.StatusCode != http.StatusSwitchingProtocols {
-		t.Fatalf("resp.StatusCode = %v, want http.StatusSwitchingProtocols", resp.StatusCode)
-	}
-
-	if gotHost := <-specifiedHost; gotHost != "testhost" {
-		t.Fatalf("gotHost = %q, want \"testhost\"", gotHost)
-	}
-
-	sendRecv(t, ws)
-}
diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/client_test.go b/Godeps/_workspace/src/github.com/gorilla/websocket/client_test.go
index 07a9cb453ed7353dbbfa098a6a7020b8d82b4615..d2f2ebd798b2f1834fa2d19b6508b82eb2a77b01 100644
--- a/Godeps/_workspace/src/github.com/gorilla/websocket/client_test.go
+++ b/Godeps/_workspace/src/github.com/gorilla/websocket/client_test.go
@@ -20,7 +20,6 @@ var parseURLTests = []struct {
 	{"wss://example.com/", &url.URL{Scheme: "wss", Host: "example.com", Opaque: "/"}},
 	{"wss://example.com/a/b", &url.URL{Scheme: "wss", Host: "example.com", Opaque: "/a/b"}},
 	{"ss://example.com/a/b", nil},
-	{"ws://webmaster@example.com/", nil},
 }
 
 func TestParseURL(t *testing.T) {
diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/conn.go b/Godeps/_workspace/src/github.com/gorilla/websocket/conn.go
index e719f1ce63eee37e332a244960df9b126cd407a3..86c35e5fc06136f5a3d712dcc282640883cd08e7 100644
--- a/Godeps/_workspace/src/github.com/gorilla/websocket/conn.go
+++ b/Godeps/_workspace/src/github.com/gorilla/websocket/conn.go
@@ -801,7 +801,7 @@ func (c *Conn) SetPingHandler(h func(string) error) {
 	c.handlePing = h
 }
 
-// SetPongHandler sets the handler for pong messages received from the peer.
+// SetPongHandler sets then handler for pong messages received from the peer.
 // The default pong handler does nothing.
 func (c *Conn) SetPongHandler(h func(string) error) {
 	if h == nil {
diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/doc.go b/Godeps/_workspace/src/github.com/gorilla/websocket/doc.go
index f52925dd11a50a9b5d01fe85c73f0e774f31fc7e..0d2bd912b3e475e4acff7291130d6b76f9a08d93 100644
--- a/Godeps/_workspace/src/github.com/gorilla/websocket/doc.go
+++ b/Godeps/_workspace/src/github.com/gorilla/websocket/doc.go
@@ -24,7 +24,7 @@
 //      ... Use conn to send and receive messages.
 //  }
 //
-// Call the connection's WriteMessage and ReadMessage methods to send and
+// Call the connection WriteMessage and ReadMessages methods to send and
 // receive messages as a slice of bytes. This snippet of code shows how to echo
 // messages using these methods:
 //
diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/json.go b/Godeps/_workspace/src/github.com/gorilla/websocket/json.go
index 18e62f2256cbff262fe9273884b59e853c03a0bb..e0668f25e15fc65279662c32269ea677127e91d8 100644
--- a/Godeps/_workspace/src/github.com/gorilla/websocket/json.go
+++ b/Godeps/_workspace/src/github.com/gorilla/websocket/json.go
@@ -6,7 +6,6 @@ package websocket
 
 import (
 	"encoding/json"
-	"io"
 )
 
 // WriteJSON is deprecated, use c.WriteJSON instead.
@@ -46,12 +45,5 @@ func (c *Conn) ReadJSON(v interface{}) error {
 	if err != nil {
 		return err
 	}
-	err = json.NewDecoder(r).Decode(v)
-	if err == io.EOF {
-		// Decode returns io.EOF when the message is empty or all whitespace.
-		// Convert to io.ErrUnexpectedEOF so that application can distinguish
-		// between an error reading the JSON value and the connection closing.
-		err = io.ErrUnexpectedEOF
-	}
-	return err
+	return json.NewDecoder(r).Decode(v)
 }
diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/json_test.go b/Godeps/_workspace/src/github.com/gorilla/websocket/json_test.go
index 1b7a5ec8bd08f5ec4fd101f53a2b9d51476711f9..2edb28d2f876298f0e7dce0a37db827503eb8c96 100644
--- a/Godeps/_workspace/src/github.com/gorilla/websocket/json_test.go
+++ b/Godeps/_workspace/src/github.com/gorilla/websocket/json_test.go
@@ -6,8 +6,6 @@ package websocket
 
 import (
 	"bytes"
-	"encoding/json"
-	"io"
 	"reflect"
 	"testing"
 )
@@ -38,60 +36,6 @@ func TestJSON(t *testing.T) {
 	}
 }
 
-func TestPartialJsonRead(t *testing.T) {
-	var buf bytes.Buffer
-	c := fakeNetConn{&buf, &buf}
-	wc := newConn(c, true, 1024, 1024)
-	rc := newConn(c, false, 1024, 1024)
-
-	var v struct {
-		A int
-		B string
-	}
-	v.A = 1
-	v.B = "hello"
-
-	messageCount := 0
-
-	// Partial JSON values.
-
-	data, err := json.Marshal(v)
-	if err != nil {
-		t.Fatal(err)
-	}
-	for i := len(data) - 1; i >= 0; i-- {
-		if err := wc.WriteMessage(TextMessage, data[:i]); err != nil {
-			t.Fatal(err)
-		}
-		messageCount++
-	}
-
-	// Whitespace.
-
-	if err := wc.WriteMessage(TextMessage, []byte(" ")); err != nil {
-		t.Fatal(err)
-	}
-	messageCount++
-
-	// Close.
-
-	if err := wc.WriteMessage(CloseMessage, FormatCloseMessage(CloseNormalClosure, "")); err != nil {
-		t.Fatal(err)
-	}
-
-	for i := 0; i < messageCount; i++ {
-		err := rc.ReadJSON(&v)
-		if err != io.ErrUnexpectedEOF {
-			t.Error("read", i, err)
-		}
-	}
-
-	err = rc.ReadJSON(&v)
-	if err != io.EOF {
-		t.Error("final", err)
-	}
-}
-
 func TestDeprecatedJSON(t *testing.T) {
 	var buf bytes.Buffer
 	c := fakeNetConn{&buf, &buf}
diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/server.go b/Godeps/_workspace/src/github.com/gorilla/websocket/server.go
index e56a004933ad31a9846eda5f8942f11e40990ddd..349e5b997ab951174e5dfa6d9b7a27f19e662e1d 100644
--- a/Godeps/_workspace/src/github.com/gorilla/websocket/server.go
+++ b/Godeps/_workspace/src/github.com/gorilla/websocket/server.go
@@ -98,11 +98,11 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade
 	}
 
 	if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
-		return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find connection header with token 'upgrade'")
+		return u.returnError(w, r, http.StatusBadRequest, "websocket: connection header != upgrade")
 	}
 
 	if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
-		return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find upgrade header with token 'websocket'")
+		return u.returnError(w, r, http.StatusBadRequest, "websocket: upgrade != websocket")
 	}
 
 	checkOrigin := u.CheckOrigin
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_notwindows.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_notwindows.go
index 87df7d5b0290d5297cf32cfc482e7282e3886c16..c0b201a5308991fe68a003c3684805fcb34fa556 100644
--- a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_notwindows.go
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_notwindows.go
@@ -3,7 +3,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build linux,!appengine darwin freebsd openbsd
+// +build linux,!appengine darwin freebsd
 
 package term
 
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_openbsd.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_openbsd.go
deleted file mode 100644
index 571ece3d139223c8fc9687e1ff140697d8cc39ae..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_openbsd.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package term
-
-import "syscall"
-
-const ioctlReadTermios = syscall.TIOCGETA
-
-type Termios syscall.Termios
diff --git a/Godeps/_workspace/src/github.com/naoina/toml/decode.go b/Godeps/_workspace/src/github.com/naoina/toml/decode.go
index d9cb862404ae19d9d58a42d6401dbe2f6a32c22f..4bc86a4e47eedcc4d4e7db680c5f1a308f3bd1ec 100644
--- a/Godeps/_workspace/src/github.com/naoina/toml/decode.go
+++ b/Godeps/_workspace/src/github.com/naoina/toml/decode.go
@@ -2,8 +2,6 @@ package toml
 
 import (
 	"fmt"
-	"io"
-	"io/ioutil"
 	"reflect"
 	"strconv"
 	"strings"
@@ -51,29 +49,6 @@ func Unmarshal(data []byte, v interface{}) error {
 	return nil
 }
 
-// A Decoder reads and decodes TOML from an input stream.
-type Decoder struct {
-	r io.Reader
-}
-
-// NewDecoder returns a new Decoder that reads from r.
-// Note that it reads all from r before parsing it.
-func NewDecoder(r io.Reader) *Decoder {
-	return &Decoder{
-		r: r,
-	}
-}
-
-// Decode parses the TOML data from its input and stores it in the value pointed to by v.
-// See the documentation for Unmarshal for details about the conversion of TOML into a Go value.
-func (d *Decoder) Decode(v interface{}) error {
-	b, err := ioutil.ReadAll(d.r)
-	if err != nil {
-		return err
-	}
-	return Unmarshal(b, v)
-}
-
 // Unmarshaler is the interface implemented by objects that can unmarshal a
 // TOML description of themselves.
 // The input can be assumed to be a valid encoding of a TOML value.
diff --git a/Godeps/_workspace/src/github.com/naoina/toml/encode.go b/Godeps/_workspace/src/github.com/naoina/toml/encode.go
index 94302f44d04d6166a3ee5e6401ed16f84f24e5db..7465a7a2b996ea2a35f4f16e278dd91c785e5ff9 100644
--- a/Godeps/_workspace/src/github.com/naoina/toml/encode.go
+++ b/Godeps/_workspace/src/github.com/naoina/toml/encode.go
@@ -2,7 +2,6 @@ package toml
 
 import (
 	"fmt"
-	"io"
 	"reflect"
 	"strconv"
 	"time"
@@ -44,29 +43,6 @@ func Marshal(v interface{}) ([]byte, error) {
 	return marshal(nil, "", reflect.ValueOf(v), false, false)
 }
 
-// A Encoder writes TOML to an output stream.
-type Encoder struct {
-	w io.Writer
-}
-
-// NewEncoder returns a new Encoder that writes to w.
-func NewEncoder(w io.Writer) *Encoder {
-	return &Encoder{
-		w: w,
-	}
-}
-
-// Encode writes the TOML of v to the stream.
-// See the documentation for Marshal for details about the conversion of Go values to TOML.
-func (e *Encoder) Encode(v interface{}) error {
-	b, err := Marshal(v)
-	if err != nil {
-		return err
-	}
-	_, err = e.w.Write(b)
-	return err
-}
-
 // Marshaler is the interface implemented by objects that can marshal themshelves into valid TOML.
 type Marshaler interface {
 	MarshalTOML() ([]byte, error)
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go
deleted file mode 100644
index 175e2220323c8428957d1cad24aa9e8604f8a23e..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// +build !go1.2
-
-package cache
-
-import (
-	"math/rand"
-	"testing"
-)
-
-func BenchmarkLRUCache(b *testing.B) {
-	c := NewCache(NewLRU(10000))
-
-	b.SetParallelism(10)
-	b.RunParallel(func(pb *testing.PB) {
-		r := rand.New(rand.NewSource(time.Now().UnixNano()))
-
-		for pb.Next() {
-			key := uint64(r.Intn(1000000))
-			c.Get(0, key, func() (int, Value) {
-				return 1, key
-			}).Release()
-		}
-	})
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
index c2a50156f0472c99d3b60022781435fc18cf8947..5575583dcef4c151badda08a1d9009888884157e 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
@@ -552,3 +552,19 @@ func TestLRUCache_Close(t *testing.T) {
 		t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled)
 	}
 }
+
+func BenchmarkLRUCache(b *testing.B) {
+	c := NewCache(NewLRU(10000))
+
+	b.SetParallelism(10)
+	b.RunParallel(func(pb *testing.PB) {
+		r := rand.New(rand.NewSource(time.Now().UnixNano()))
+
+		for pb.Next() {
+			key := uint64(r.Intn(1000000))
+			c.Get(0, key, func() (int, Value) {
+				return 1, key
+			}).Release()
+		}
+	})
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
index 365d311f6f0d408ff5da18ebf9dd0a803565dca3..03c6a06867327b8ab74a9c91acc6bc0200efb05b 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
@@ -63,14 +63,13 @@ type DB struct {
 	journalAckC  chan error
 
 	// Compaction.
-	tcompCmdC        chan cCmd
-	tcompPauseC      chan chan<- struct{}
-	mcompCmdC        chan cCmd
-	compErrC         chan error
-	compPerErrC      chan error
-	compErrSetC      chan error
-	compWriteLocking bool
-	compStats        []cStats
+	tcompCmdC   chan cCmd
+	tcompPauseC chan chan<- struct{}
+	mcompCmdC   chan cCmd
+	compErrC    chan error
+	compPerErrC chan error
+	compErrSetC chan error
+	compStats   []cStats
 
 	// Close.
 	closeW sync.WaitGroup
@@ -109,44 +108,28 @@ func openDB(s *session) (*DB, error) {
 		closeC: make(chan struct{}),
 	}
 
-	// Read-only mode.
-	readOnly := s.o.GetReadOnly()
-
-	if readOnly {
-		// Recover journals (read-only mode).
-		if err := db.recoverJournalRO(); err != nil {
-			return nil, err
-		}
-	} else {
-		// Recover journals.
-		if err := db.recoverJournal(); err != nil {
-			return nil, err
-		}
+	if err := db.recoverJournal(); err != nil {
+		return nil, err
+	}
 
-		// Remove any obsolete files.
-		if err := db.checkAndCleanFiles(); err != nil {
-			// Close journal.
-			if db.journal != nil {
-				db.journal.Close()
-				db.journalWriter.Close()
-			}
-			return nil, err
+	// Remove any obsolete files.
+	if err := db.checkAndCleanFiles(); err != nil {
+		// Close journal.
+		if db.journal != nil {
+			db.journal.Close()
+			db.journalWriter.Close()
 		}
-
+		return nil, err
 	}
 
 	// Doesn't need to be included in the wait group.
 	go db.compactionError()
 	go db.mpoolDrain()
 
-	if readOnly {
-		db.SetReadOnly()
-	} else {
-		db.closeW.Add(3)
-		go db.tCompaction()
-		go db.mCompaction()
-		go db.jWriter()
-	}
+	db.closeW.Add(3)
+	go db.tCompaction()
+	go db.mCompaction()
+	go db.jWriter()
 
 	s.logf("db@open done T·%v", time.Since(start))
 
@@ -292,7 +275,7 @@ func recoverTable(s *session, o *opt.Options) error {
 		// We will drop corrupted table.
 		strict = o.GetStrict(opt.StrictRecovery)
 
-		rec   = &sessionRecord{}
+		rec   = &sessionRecord{numLevel: o.GetNumLevel()}
 		bpool = util.NewBufferPool(o.GetBlockSize() + 5)
 	)
 	buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) {
@@ -364,14 +347,12 @@ func recoverTable(s *session, o *opt.Options) error {
 			return err
 		}
 		iter := tr.NewIterator(nil, nil)
-		if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok {
-			itererr.SetErrorCallback(func(err error) {
-				if errors.IsCorrupted(err) {
-					s.logf("table@recovery block corruption @%d %q", file.Num(), err)
-					tcorruptedBlock++
-				}
-			})
-		}
+		iter.(iterator.ErrorCallbackSetter).SetErrorCallback(func(err error) {
+			if errors.IsCorrupted(err) {
+				s.logf("table@recovery block corruption @%d %q", file.Num(), err)
+				tcorruptedBlock++
+			}
+		})
 
 		// Scan the table.
 		for iter.Next() {
@@ -467,136 +448,132 @@ func recoverTable(s *session, o *opt.Options) error {
 }
 
 func (db *DB) recoverJournal() error {
-	// Get all journals and sort it by file number.
-	allJournalFiles, err := db.s.getFiles(storage.TypeJournal)
+	// Get all tables and sort it by file number.
+	journalFiles_, err := db.s.getFiles(storage.TypeJournal)
 	if err != nil {
 		return err
 	}
-	files(allJournalFiles).sort()
+	journalFiles := files(journalFiles_)
+	journalFiles.sort()
 
-	// Journals that will be recovered.
-	var recJournalFiles []storage.File
-	for _, jf := range allJournalFiles {
-		if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum {
-			recJournalFiles = append(recJournalFiles, jf)
+	// Discard older journal.
+	prev := -1
+	for i, file := range journalFiles {
+		if file.Num() >= db.s.stJournalNum {
+			if prev >= 0 {
+				i--
+				journalFiles[i] = journalFiles[prev]
+			}
+			journalFiles = journalFiles[i:]
+			break
+		} else if file.Num() == db.s.stPrevJournalNum {
+			prev = i
+		}
+	}
+
+	var jr *journal.Reader
+	var of storage.File
+	var mem *memdb.DB
+	batch := new(Batch)
+	cm := newCMem(db.s)
+	buf := new(util.Buffer)
+	// Options.
+	strict := db.s.o.GetStrict(opt.StrictJournal)
+	checksum := db.s.o.GetStrict(opt.StrictJournalChecksum)
+	writeBuffer := db.s.o.GetWriteBuffer()
+	recoverJournal := func(file storage.File) error {
+		db.logf("journal@recovery recovering @%d", file.Num())
+		reader, err := file.Open()
+		if err != nil {
+			return err
 		}
-	}
+		defer reader.Close()
 
-	var (
-		of  storage.File // Obsolete file.
-		rec = &sessionRecord{}
-	)
-
-	// Recover journals.
-	if len(recJournalFiles) > 0 {
-		db.logf("journal@recovery F·%d", len(recJournalFiles))
-
-		// Mark file number as used.
-		db.s.markFileNum(recJournalFiles[len(recJournalFiles)-1].Num())
-
-		var (
-			// Options.
-			strict      = db.s.o.GetStrict(opt.StrictJournal)
-			checksum    = db.s.o.GetStrict(opt.StrictJournalChecksum)
-			writeBuffer = db.s.o.GetWriteBuffer()
-
-			jr    *journal.Reader
-			mdb   = memdb.New(db.s.icmp, writeBuffer)
-			buf   = &util.Buffer{}
-			batch = &Batch{}
-		)
-
-		for _, jf := range recJournalFiles {
-			db.logf("journal@recovery recovering @%d", jf.Num())
+		// Create/reset journal reader instance.
+		if jr == nil {
+			jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum)
+		} else {
+			jr.Reset(reader, dropper{db.s, file}, strict, checksum)
+		}
 
-			fr, err := jf.Open()
-			if err != nil {
+		// Flush memdb and remove obsolete journal file.
+		if of != nil {
+			if mem.Len() > 0 {
+				if err := cm.flush(mem, 0); err != nil {
+					return err
+				}
+			}
+			if err := cm.commit(file.Num(), db.seq); err != nil {
 				return err
 			}
+			cm.reset()
+			of.Remove()
+			of = nil
+		}
 
-			// Create or reset journal reader instance.
-			if jr == nil {
-				jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum)
-			} else {
-				jr.Reset(fr, dropper{db.s, jf}, strict, checksum)
+		// Replay journal to memdb.
+		mem.Reset()
+		for {
+			r, err := jr.Next()
+			if err != nil {
+				if err == io.EOF {
+					break
+				}
+				return errors.SetFile(err, file)
 			}
 
-			// Flush memdb and remove obsolete journal file.
-			if of != nil {
-				if mdb.Len() > 0 {
-					if _, err := db.s.flushMemdb(rec, mdb, -1); err != nil {
-						fr.Close()
-						return err
-					}
+			buf.Reset()
+			if _, err := buf.ReadFrom(r); err != nil {
+				if err == io.ErrUnexpectedEOF {
+					// This is error returned due to corruption, with strict == false.
+					continue
+				} else {
+					return errors.SetFile(err, file)
 				}
-
-				rec.setJournalNum(jf.Num())
-				rec.setSeqNum(db.seq)
-				if err := db.s.commit(rec); err != nil {
-					fr.Close()
-					return err
+			}
+			if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mem); err != nil {
+				if strict || !errors.IsCorrupted(err) {
+					return errors.SetFile(err, file)
+				} else {
+					db.s.logf("journal error: %v (skipped)", err)
+					// We won't apply sequence number as it might be corrupted.
+					continue
 				}
-				rec.resetAddedTables()
-
-				of.Remove()
-				of = nil
 			}
 
-			// Replay journal to memdb.
-			mdb.Reset()
-			for {
-				r, err := jr.Next()
-				if err != nil {
-					if err == io.EOF {
-						break
-					}
+			// Save sequence number.
+			db.seq = batch.seq + uint64(batch.Len())
 
-					fr.Close()
-					return errors.SetFile(err, jf)
+			// Flush it if large enough.
+			if mem.Size() >= writeBuffer {
+				if err := cm.flush(mem, 0); err != nil {
+					return err
 				}
+				mem.Reset()
+			}
+		}
 
-				buf.Reset()
-				if _, err := buf.ReadFrom(r); err != nil {
-					if err == io.ErrUnexpectedEOF {
-						// This is error returned due to corruption, with strict == false.
-						continue
-					}
-
-					fr.Close()
-					return errors.SetFile(err, jf)
-				}
-				if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
-					if !strict && errors.IsCorrupted(err) {
-						db.s.logf("journal error: %v (skipped)", err)
-						// We won't apply sequence number as it might be corrupted.
-						continue
-					}
-
-					fr.Close()
-					return errors.SetFile(err, jf)
-				}
+		of = file
+		return nil
+	}
 
-				// Save sequence number.
-				db.seq = batch.seq + uint64(batch.Len())
+	// Recover all journals.
+	if len(journalFiles) > 0 {
+		db.logf("journal@recovery F·%d", len(journalFiles))
 
-				// Flush it if large enough.
-				if mdb.Size() >= writeBuffer {
-					if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
-						fr.Close()
-						return err
-					}
+		// Mark file number as used.
+		db.s.markFileNum(journalFiles[len(journalFiles)-1].Num())
 
-					mdb.Reset()
-				}
+		mem = memdb.New(db.s.icmp, writeBuffer)
+		for _, file := range journalFiles {
+			if err := recoverJournal(file); err != nil {
+				return err
 			}
-
-			fr.Close()
-			of = jf
 		}
 
-		// Flush the last memdb.
-		if mdb.Len() > 0 {
-			if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
+		// Flush the last journal.
+		if mem.Len() > 0 {
+			if err := cm.flush(mem, 0); err != nil {
 				return err
 			}
 		}
@@ -608,10 +585,8 @@ func (db *DB) recoverJournal() error {
 	}
 
 	// Commit.
-	rec.setJournalNum(db.journalFile.Num())
-	rec.setSeqNum(db.seq)
-	if err := db.s.commit(rec); err != nil {
-		// Close journal on error.
+	if err := cm.commit(db.journalFile.Num(), db.seq); err != nil {
+		// Close journal.
 		if db.journal != nil {
 			db.journal.Close()
 			db.journalWriter.Close()
@@ -627,103 +602,6 @@ func (db *DB) recoverJournal() error {
 	return nil
 }
 
-func (db *DB) recoverJournalRO() error {
-	// Get all journals and sort it by file number.
-	allJournalFiles, err := db.s.getFiles(storage.TypeJournal)
-	if err != nil {
-		return err
-	}
-	files(allJournalFiles).sort()
-
-	// Journals that will be recovered.
-	var recJournalFiles []storage.File
-	for _, jf := range allJournalFiles {
-		if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum {
-			recJournalFiles = append(recJournalFiles, jf)
-		}
-	}
-
-	var (
-		// Options.
-		strict      = db.s.o.GetStrict(opt.StrictJournal)
-		checksum    = db.s.o.GetStrict(opt.StrictJournalChecksum)
-		writeBuffer = db.s.o.GetWriteBuffer()
-
-		mdb = memdb.New(db.s.icmp, writeBuffer)
-	)
-
-	// Recover journals.
-	if len(recJournalFiles) > 0 {
-		db.logf("journal@recovery RO·Mode F·%d", len(recJournalFiles))
-
-		var (
-			jr    *journal.Reader
-			buf   = &util.Buffer{}
-			batch = &Batch{}
-		)
-
-		for _, jf := range recJournalFiles {
-			db.logf("journal@recovery recovering @%d", jf.Num())
-
-			fr, err := jf.Open()
-			if err != nil {
-				return err
-			}
-
-			// Create or reset journal reader instance.
-			if jr == nil {
-				jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum)
-			} else {
-				jr.Reset(fr, dropper{db.s, jf}, strict, checksum)
-			}
-
-			// Replay journal to memdb.
-			for {
-				r, err := jr.Next()
-				if err != nil {
-					if err == io.EOF {
-						break
-					}
-
-					fr.Close()
-					return errors.SetFile(err, jf)
-				}
-
-				buf.Reset()
-				if _, err := buf.ReadFrom(r); err != nil {
-					if err == io.ErrUnexpectedEOF {
-						// This is error returned due to corruption, with strict == false.
-						continue
-					}
-
-					fr.Close()
-					return errors.SetFile(err, jf)
-				}
-				if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
-					if !strict && errors.IsCorrupted(err) {
-						db.s.logf("journal error: %v (skipped)", err)
-						// We won't apply sequence number as it might be corrupted.
-						continue
-					}
-
-					fr.Close()
-					return errors.SetFile(err, jf)
-				}
-
-				// Save sequence number.
-				db.seq = batch.seq + uint64(batch.Len())
-			}
-
-			fr.Close()
-		}
-	}
-
-	// Set memDB.
-	db.mem = &memDB{db: db, DB: mdb, ref: 1}
-
-	return nil
-}
-
 func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
 	ikey := newIkey(key, seq, ktSeek)
 
@@ -734,7 +612,7 @@ func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, er
 		}
 		defer m.decref()
 
-		mk, mv, me := m.Find(ikey)
+		mk, mv, me := m.mdb.Find(ikey)
 		if me == nil {
 			ukey, _, kt, kerr := parseIkey(mk)
 			if kerr != nil {
@@ -772,7 +650,7 @@ func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err er
 		}
 		defer m.decref()
 
-		mk, _, me := m.Find(ikey)
+		mk, _, me := m.mdb.Find(ikey)
 		if me == nil {
 			ukey, _, kt, kerr := parseIkey(mk)
 			if kerr != nil {
@@ -904,7 +782,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
 
 	const prefix = "leveldb."
 	if !strings.HasPrefix(name, prefix) {
-		return "", ErrNotFound
+		return "", errors.New("leveldb: GetProperty: unknown property: " + name)
 	}
 	p := name[len(prefix):]
 
@@ -918,7 +796,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
 		var rest string
 		n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest)
 		if n != 1 || int(level) >= db.s.o.GetNumLevel() {
-			err = ErrNotFound
+			err = errors.New("leveldb: GetProperty: invalid property: " + name)
 		} else {
 			value = fmt.Sprint(v.tLen(int(level)))
 		}
@@ -957,7 +835,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
 	case p == "aliveiters":
 		value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters))
 	default:
-		err = ErrNotFound
+		err = errors.New("leveldb: GetProperty: unknown property: " + name)
 	}
 
 	return
@@ -1020,9 +898,6 @@ func (db *DB) Close() error {
 	var err error
 	select {
 	case err = <-db.compErrC:
-		if err == ErrReadOnly {
-			err = nil
-		}
 	default:
 	}
 
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
index 2c2f409322e134a286f0d01edb1e98b697e92837..2f14a588fec1b697d89073853761c3184047ea88 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
@@ -11,6 +11,7 @@ import (
 	"time"
 
 	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
+	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb"
 	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
 )
 
@@ -61,8 +62,58 @@ func (p *cStatsStaging) stopTimer() {
 	}
 }
 
+type cMem struct {
+	s     *session
+	level int
+	rec   *sessionRecord
+}
+
+func newCMem(s *session) *cMem {
+	return &cMem{s: s, rec: &sessionRecord{numLevel: s.o.GetNumLevel()}}
+}
+
+func (c *cMem) flush(mem *memdb.DB, level int) error {
+	s := c.s
+
+	// Write memdb to table.
+	iter := mem.NewIterator(nil)
+	defer iter.Release()
+	t, n, err := s.tops.createFrom(iter)
+	if err != nil {
+		return err
+	}
+
+	// Pick level.
+	if level < 0 {
+		v := s.version()
+		level = v.pickLevel(t.imin.ukey(), t.imax.ukey())
+		v.release()
+	}
+	c.rec.addTableFile(level, t)
+
+	s.logf("mem@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax)
+
+	c.level = level
+	return nil
+}
+
+func (c *cMem) reset() {
+	c.rec = &sessionRecord{numLevel: c.s.o.GetNumLevel()}
+}
+
+func (c *cMem) commit(journal, seq uint64) error {
+	c.rec.setJournalNum(journal)
+	c.rec.setSeqNum(seq)
+
+	// Commit changes.
+	return c.s.commit(c.rec)
+}
+
 func (db *DB) compactionError() {
-	var err error
+	var (
+		err     error
+		wlocked bool
+	)
 noerr:
 	// No error.
 	for {
@@ -70,7 +121,7 @@ noerr:
 		case err = <-db.compErrSetC:
 			switch {
 			case err == nil:
-			case err == ErrReadOnly, errors.IsCorrupted(err):
+			case errors.IsCorrupted(err):
 				goto hasperr
 			default:
 				goto haserr
@@ -88,7 +139,7 @@ haserr:
 			switch {
 			case err == nil:
 				goto noerr
-			case err == ErrReadOnly, errors.IsCorrupted(err):
+			case errors.IsCorrupted(err):
 				goto hasperr
 			default:
 			}
@@ -104,9 +155,9 @@ hasperr:
 		case db.compPerErrC <- err:
 		case db.writeLockC <- struct{}{}:
 			// Hold write lock, so that write won't pass-through.
-			db.compWriteLocking = true
+			wlocked = true
 		case _, _ = <-db.closeC:
-			if db.compWriteLocking {
+			if wlocked {
 				// We should release the lock or Close will hang.
 				<-db.writeLockC
 			}
@@ -236,18 +287,21 @@ func (db *DB) compactionExitTransact() {
 }
 
 func (db *DB) memCompaction() {
-	mdb := db.getFrozenMem()
-	if mdb == nil {
+	mem := db.getFrozenMem()
+	if mem == nil {
 		return
 	}
-	defer mdb.decref()
+	defer mem.decref()
+
+	c := newCMem(db.s)
+	stats := new(cStatsStaging)
 
-	db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size()))
+	db.logf("mem@flush N·%d S·%s", mem.mdb.Len(), shortenb(mem.mdb.Size()))
 
 	// Don't compact empty memdb.
-	if mdb.Len() == 0 {
-		db.logf("memdb@flush skipping")
-		// drop frozen memdb
+	if mem.mdb.Len() == 0 {
+		db.logf("mem@flush skipping")
+		// drop frozen mem
 		db.dropFrozenMem()
 		return
 	}
@@ -263,20 +317,13 @@ func (db *DB) memCompaction() {
 		return
 	}
 
-	var (
-		rec        = &sessionRecord{}
-		stats      = &cStatsStaging{}
-		flushLevel int
-	)
-
-	db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) {
+	db.compactionTransactFunc("mem@flush", func(cnt *compactionTransactCounter) (err error) {
 		stats.startTimer()
-		flushLevel, err = db.s.flushMemdb(rec, mdb.DB, -1)
-		stats.stopTimer()
-		return
+		defer stats.stopTimer()
+		return c.flush(mem.mdb, -1)
 	}, func() error {
-		for _, r := range rec.addedTables {
-			db.logf("memdb@flush revert @%d", r.num)
+		for _, r := range c.rec.addedTables {
+			db.logf("mem@flush revert @%d", r.num)
 			f := db.s.getTableFile(r.num)
 			if err := f.Remove(); err != nil {
 				return err
@@ -285,23 +332,20 @@ func (db *DB) memCompaction() {
 		return nil
 	})
 
-	db.compactionTransactFunc("memdb@commit", func(cnt *compactionTransactCounter) (err error) {
+	db.compactionTransactFunc("mem@commit", func(cnt *compactionTransactCounter) (err error) {
 		stats.startTimer()
-		rec.setJournalNum(db.journalFile.Num())
-		rec.setSeqNum(db.frozenSeq)
-		err = db.s.commit(rec)
-		stats.stopTimer()
-		return
+		defer stats.stopTimer()
+		return c.commit(db.journalFile.Num(), db.frozenSeq)
 	}, nil)
 
-	db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration)
+	db.logf("mem@flush committed F·%d T·%v", len(c.rec.addedTables), stats.duration)
 
-	for _, r := range rec.addedTables {
+	for _, r := range c.rec.addedTables {
 		stats.write += r.size
 	}
-	db.compStats[flushLevel].add(stats)
+	db.compStats[c.level].add(stats)
 
-	// Drop frozen memdb.
+	// Drop frozen mem.
 	db.dropFrozenMem()
 
 	// Resume table compaction.
@@ -513,7 +557,7 @@ func (b *tableCompactionBuilder) revert() error {
 func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
 	defer c.release()
 
-	rec := &sessionRecord{}
+	rec := &sessionRecord{numLevel: db.s.o.GetNumLevel()}
 	rec.addCompPtr(c.level, c.imax)
 
 	if !noTrivial && c.trivial() {
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
index 453db5b1d20190957631cc2898b96ae72df1f2df..626369271785310282175947f10a2b473a47c5d4 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
@@ -8,7 +8,6 @@ package leveldb
 
 import (
 	"errors"
-	"math/rand"
 	"runtime"
 	"sync"
 	"sync/atomic"
@@ -40,11 +39,11 @@ func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.It
 	ti := v.getIterators(slice, ro)
 	n := len(ti) + 2
 	i := make([]iterator.Iterator, 0, n)
-	emi := em.NewIterator(slice)
+	emi := em.mdb.NewIterator(slice)
 	emi.SetReleaser(&memdbReleaser{m: em})
 	i = append(i, emi)
 	if fm != nil {
-		fmi := fm.NewIterator(slice)
+		fmi := fm.mdb.NewIterator(slice)
 		fmi.SetReleaser(&memdbReleaser{m: fm})
 		i = append(i, fmi)
 	}
@@ -81,10 +80,6 @@ func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *d
 	return iter
 }
 
-func (db *DB) iterSamplingRate() int {
-	return rand.Intn(2 * db.s.o.GetIteratorSamplingRate())
-}
-
 type dir int
 
 const (
@@ -103,21 +98,11 @@ type dbIter struct {
 	seq    uint64
 	strict bool
 
-	smaplingGap int
-	dir         dir
-	key         []byte
-	value       []byte
-	err         error
-	releaser    util.Releaser
-}
-
-func (i *dbIter) sampleSeek() {
-	ikey := i.iter.Key()
-	i.smaplingGap -= len(ikey) + len(i.iter.Value())
-	for i.smaplingGap < 0 {
-		i.smaplingGap += i.db.iterSamplingRate()
-		i.db.sampleSeek(ikey)
-	}
+	dir      dir
+	key      []byte
+	value    []byte
+	err      error
+	releaser util.Releaser
 }
 
 func (i *dbIter) setErr(err error) {
@@ -190,7 +175,6 @@ func (i *dbIter) Seek(key []byte) bool {
 func (i *dbIter) next() bool {
 	for {
 		if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
-			i.sampleSeek()
 			if seq <= i.seq {
 				switch kt {
 				case ktDel:
@@ -241,7 +225,6 @@ func (i *dbIter) prev() bool {
 	if i.iter.Valid() {
 		for {
 			if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
-				i.sampleSeek()
 				if seq <= i.seq {
 					if !del && i.icmp.uCompare(ukey, i.key) < 0 {
 						return true
@@ -283,7 +266,6 @@ func (i *dbIter) Prev() bool {
 	case dirForward:
 		for i.iter.Prev() {
 			if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil {
-				i.sampleSeek()
 				if i.icmp.uCompare(ukey, i.key) < 0 {
 					goto cont
 				}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
index 87cd33adc6b81ff39e6e2eb7a7b9dcf9823e8d4e..ddad4d4a9bcb7b4631c37910443719bd8e033ada 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
@@ -15,8 +15,8 @@ import (
 )
 
 type memDB struct {
-	db *DB
-	*memdb.DB
+	db  *DB
+	mdb *memdb.DB
 	ref int32
 }
 
@@ -27,12 +27,12 @@ func (m *memDB) incref() {
 func (m *memDB) decref() {
 	if ref := atomic.AddInt32(&m.ref, -1); ref == 0 {
 		// Only put back memdb with std capacity.
-		if m.Capacity() == m.db.s.o.GetWriteBuffer() {
-			m.Reset()
-			m.db.mpoolPut(m.DB)
+		if m.mdb.Capacity() == m.db.s.o.GetWriteBuffer() {
+			m.mdb.Reset()
+			m.db.mpoolPut(m.mdb)
 		}
 		m.db = nil
-		m.DB = nil
+		m.mdb = nil
 	} else if ref < 0 {
 		panic("negative memdb ref")
 	}
@@ -48,15 +48,6 @@ func (db *DB) addSeq(delta uint64) {
 	atomic.AddUint64(&db.seq, delta)
 }
 
-func (db *DB) sampleSeek(ikey iKey) {
-	v := db.s.version()
-	if v.sampleSeek(ikey) {
-		// Trigger table compaction.
-		db.compSendTrigger(db.tcompCmdC)
-	}
-	v.release()
-}
-
 func (db *DB) mpoolPut(mem *memdb.DB) {
 	defer func() {
 		recover()
@@ -126,7 +117,7 @@ func (db *DB) newMem(n int) (mem *memDB, err error) {
 	}
 	mem = &memDB{
 		db:  db,
-		DB:  mdb,
+		mdb: mdb,
 		ref: 2,
 	}
 	db.mem = mem
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
index 108fdc00b3d6cd6b072b81135c76ce45329c6f21..b2e0fb36f2eca1271313ef1a5c3df75f0c2f9ca3 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
@@ -405,21 +405,19 @@ func (h *dbHarness) compactRange(min, max string) {
 	t.Log("DB range compaction done")
 }
 
-func (h *dbHarness) sizeOf(start, limit string) uint64 {
-	sz, err := h.db.SizeOf([]util.Range{
+func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
+	t := h.t
+	db := h.db
+
+	s, err := db.SizeOf([]util.Range{
 		{[]byte(start), []byte(limit)},
 	})
 	if err != nil {
-		h.t.Error("SizeOf: got error: ", err)
+		t.Error("SizeOf: got error: ", err)
 	}
-	return sz.Sum()
-}
-
-func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
-	sz := h.sizeOf(start, limit)
-	if sz < low || sz > hi {
-		h.t.Errorf("sizeOf %q to %q not in range, want %d - %d, got %d",
-			shorten(start), shorten(limit), low, hi, sz)
+	if s.Sum() < low || s.Sum() > hi {
+		t.Errorf("sizeof %q to %q not in range, want %d - %d, got %d",
+			shorten(start), shorten(limit), low, hi, s.Sum())
 	}
 }
 
@@ -2445,7 +2443,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
 		if err != nil {
 			t.Fatal(err)
 		}
-		rec := &sessionRecord{}
+		rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
 		rec.addTableFile(i, tf)
 		if err := s.commit(rec); err != nil {
 			t.Fatal(err)
@@ -2455,7 +2453,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
 	// Build grandparent.
 	v := s.version()
 	c := newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...))
-	rec := &sessionRecord{}
+	rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
 	b := &tableCompactionBuilder{
 		s:         s,
 		c:         c,
@@ -2479,7 +2477,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
 	// Build level-1.
 	v = s.version()
 	c = newCompaction(s, v, 0, append(tFiles{}, v.tables[0]...))
-	rec = &sessionRecord{}
+	rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
 	b = &tableCompactionBuilder{
 		s:         s,
 		c:         c,
@@ -2523,7 +2521,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
 	// Compaction with transient error.
 	v = s.version()
 	c = newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...))
-	rec = &sessionRecord{}
+	rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
 	b = &tableCompactionBuilder{
 		s:         s,
 		c:         c,
@@ -2579,123 +2577,3 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
 	}
 	v.release()
 }
-
-func testDB_IterTriggeredCompaction(t *testing.T, limitDiv int) {
-	const (
-		vSize = 200 * opt.KiB
-		tSize = 100 * opt.MiB
-		mIter = 100
-		n     = tSize / vSize
-	)
-
-	h := newDbHarnessWopt(t, &opt.Options{
-		Compression:       opt.NoCompression,
-		DisableBlockCache: true,
-	})
-	defer h.close()
-
-	key := func(x int) string {
-		return fmt.Sprintf("v%06d", x)
-	}
-
-	// Fill.
-	value := strings.Repeat("x", vSize)
-	for i := 0; i < n; i++ {
-		h.put(key(i), value)
-	}
-	h.compactMem()
-
-	// Delete all.
-	for i := 0; i < n; i++ {
-		h.delete(key(i))
-	}
-	h.compactMem()
-
-	var (
-		limit = n / limitDiv
-
-		startKey = key(0)
-		limitKey = key(limit)
-		maxKey   = key(n)
-		slice    = &util.Range{Limit: []byte(limitKey)}
-
-		initialSize0 = h.sizeOf(startKey, limitKey)
-		initialSize1 = h.sizeOf(limitKey, maxKey)
-	)
-
-	t.Logf("inital size %s [rest %s]", shortenb(int(initialSize0)), shortenb(int(initialSize1)))
-
-	for r := 0; true; r++ {
-		if r >= mIter {
-			t.Fatal("taking too long to compact")
-		}
-
-		// Iterates.
-		iter := h.db.NewIterator(slice, h.ro)
-		for iter.Next() {
-		}
-		if err := iter.Error(); err != nil {
-			t.Fatalf("Iter err: %v", err)
-		}
-		iter.Release()
-
-		// Wait compaction.
-		h.waitCompaction()
-
-		// Check size.
-		size0 := h.sizeOf(startKey, limitKey)
-		size1 := h.sizeOf(limitKey, maxKey)
-		t.Logf("#%03d size %s [rest %s]", r, shortenb(int(size0)), shortenb(int(size1)))
-		if size0 < initialSize0/10 {
-			break
-		}
-	}
-
-	if initialSize1 > 0 {
-		h.sizeAssert(limitKey, maxKey, initialSize1/4-opt.MiB, initialSize1+opt.MiB)
-	}
-}
-
-func TestDB_IterTriggeredCompaction(t *testing.T) {
-	testDB_IterTriggeredCompaction(t, 1)
-}
-
-func TestDB_IterTriggeredCompactionHalf(t *testing.T) {
-	testDB_IterTriggeredCompaction(t, 2)
-}
-
-func TestDB_ReadOnly(t *testing.T) {
-	h := newDbHarness(t)
-	defer h.close()
-
-	h.put("foo", "v1")
-	h.put("bar", "v2")
-	h.compactMem()
-
-	h.put("xfoo", "v1")
-	h.put("xbar", "v2")
-
-	t.Log("Trigger read-only")
-	if err := h.db.SetReadOnly(); err != nil {
-		h.close()
-		t.Fatalf("SetReadOnly error: %v", err)
-	}
-
-	h.stor.SetEmuErr(storage.TypeAll, tsOpCreate, tsOpReplace, tsOpRemove, tsOpWrite, tsOpWrite, tsOpSync)
-
-	ro := func(key, value, wantValue string) {
-		if err := h.db.Put([]byte(key), []byte(value), h.wo); err != ErrReadOnly {
-			t.Fatalf("unexpected error: %v", err)
-		}
-		h.getVal(key, wantValue)
-	}
-
-	ro("foo", "vx", "v1")
-
-	h.o.ReadOnly = true
-	h.reopenDB()
-
-	ro("foo", "vx", "v1")
-	ro("bar", "vx", "v2")
-	h.assertNumKeys(4)
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
index 476ec3a8384aa6bc98390a5bb70b86256054d0f8..0f7ba29892ba34ba2badbd5d84d876a1e4ef9a74 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
@@ -63,24 +63,24 @@ func (db *DB) rotateMem(n int) (mem *memDB, err error) {
 	return
 }
 
-func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) {
+func (db *DB) flush(n int) (mem *memDB, nn int, err error) {
 	delayed := false
 	flush := func() (retry bool) {
 		v := db.s.version()
 		defer v.release()
-		mdb = db.getEffectiveMem()
+		mem = db.getEffectiveMem()
 		defer func() {
 			if retry {
-				mdb.decref()
-				mdb = nil
+				mem.decref()
+				mem = nil
 			}
 		}()
-		mdbFree = mdb.Free()
+		nn = mem.mdb.Free()
 		switch {
 		case v.tLen(0) >= db.s.o.GetWriteL0SlowdownTrigger() && !delayed:
 			delayed = true
 			time.Sleep(time.Millisecond)
-		case mdbFree >= n:
+		case nn >= n:
 			return false
 		case v.tLen(0) >= db.s.o.GetWriteL0PauseTrigger():
 			delayed = true
@@ -90,15 +90,15 @@ func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) {
 			}
 		default:
 			// Allow memdb to grow if it has no entry.
-			if mdb.Len() == 0 {
-				mdbFree = n
+			if mem.mdb.Len() == 0 {
+				nn = n
 			} else {
-				mdb.decref()
-				mdb, err = db.rotateMem(n)
+				mem.decref()
+				mem, err = db.rotateMem(n)
 				if err == nil {
-					mdbFree = mdb.Free()
+					nn = mem.mdb.Free()
 				} else {
-					mdbFree = 0
+					nn = 0
 				}
 			}
 			return false
@@ -157,18 +157,18 @@ func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
 		}
 	}()
 
-	mdb, mdbFree, err := db.flush(b.size())
+	mem, memFree, err := db.flush(b.size())
 	if err != nil {
 		return
 	}
-	defer mdb.decref()
+	defer mem.decref()
 
 	// Calculate maximum size of the batch.
 	m := 1 << 20
 	if x := b.size(); x <= 128<<10 {
 		m = x + (128 << 10)
 	}
-	m = minInt(m, mdbFree)
+	m = minInt(m, memFree)
 
 	// Merge with other batch.
 drain:
@@ -197,7 +197,7 @@ drain:
 		select {
 		case db.journalC <- b:
 			// Write into memdb
-			if berr := b.memReplay(mdb.DB); berr != nil {
+			if berr := b.memReplay(mem.mdb); berr != nil {
 				panic(berr)
 			}
 		case err = <-db.compPerErrC:
@@ -211,7 +211,7 @@ drain:
 		case err = <-db.journalAckC:
 			if err != nil {
 				// Revert memdb if error detected
-				if berr := b.revertMemReplay(mdb.DB); berr != nil {
+				if berr := b.revertMemReplay(mem.mdb); berr != nil {
 					panic(berr)
 				}
 				return
@@ -225,7 +225,7 @@ drain:
 		if err != nil {
 			return
 		}
-		if berr := b.memReplay(mdb.DB); berr != nil {
+		if berr := b.memReplay(mem.mdb); berr != nil {
 			panic(berr)
 		}
 	}
@@ -233,7 +233,7 @@ drain:
 	// Set last seq number.
 	db.addSeq(uint64(b.Len()))
 
-	if b.size() >= mdbFree {
+	if b.size() >= memFree {
 		db.rotateMem(0)
 	}
 	return
@@ -249,7 +249,8 @@ func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error {
 	return db.Write(b, wo)
 }
 
-// Delete deletes the value for the given key.
+// Delete deletes the value for the given key. It returns ErrNotFound if
+// the DB does not contain the key.
 //
 // It is safe to modify the contents of the arguments after Delete returns.
 func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error {
@@ -289,9 +290,9 @@ func (db *DB) CompactRange(r util.Range) error {
 	}
 
 	// Check for overlaps in memdb.
-	mdb := db.getEffectiveMem()
-	defer mdb.decref()
-	if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) {
+	mem := db.getEffectiveMem()
+	defer mem.decref()
+	if isMemOverlaps(db.s.icmp, mem.mdb, r.Start, r.Limit) {
 		// Memdb compaction.
 		if _, err := db.rotateMem(0); err != nil {
 			<-db.writeLockC
@@ -308,31 +309,3 @@ func (db *DB) CompactRange(r util.Range) error {
 	// Table compaction.
 	return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit)
 }
-
-// SetReadOnly makes DB read-only. It will stay read-only until reopened.
-func (db *DB) SetReadOnly() error {
-	if err := db.ok(); err != nil {
-		return err
-	}
-
-	// Lock writer.
-	select {
-	case db.writeLockC <- struct{}{}:
-		db.compWriteLocking = true
-	case err := <-db.compPerErrC:
-		return err
-	case _, _ = <-db.closeC:
-		return ErrClosed
-	}
-
-	// Set compaction read-only.
-	select {
-	case db.compErrSetC <- ErrReadOnly:
-	case perr := <-db.compPerErrC:
-		return perr
-	case _, _ = <-db.closeC:
-		return ErrClosed
-	}
-
-	return nil
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go
index f45199d11d6f916f8d7a105c83a01c9a60735792..d5ba768413b5d23b0ddbb4a4343a232e7bdd8d52 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go
@@ -12,7 +12,6 @@ import (
 
 var (
 	ErrNotFound         = errors.ErrNotFound
-	ErrReadOnly         = errors.New("leveldb: read-only mode")
 	ErrSnapshotReleased = errors.New("leveldb: snapshot released")
 	ErrIterReleased     = errors.New("leveldb: iterator released")
 	ErrClosed           = errors.New("leveldb: closed")
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go
similarity index 98%
rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go
rename to Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go
index 0dd60fd829bb83fbe497a9838e6c73e462729c6b..e76657e5ec3ec065e80fc4335cb5dffb92a98277 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go
@@ -4,7 +4,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// +build !go1.2
+// +build go1.3
 
 package leveldb
 
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
index c90c3561d25aa7dc789351348fbf6041cfaf75d8..67c7254c482f543afc91ef6ca2353cd488a39c87 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
@@ -206,7 +206,6 @@ func (p *DB) randHeight() (h int) {
 	return
 }
 
-// Must hold RW-lock if prev == true, as it use shared prevNode slice.
 func (p *DB) findGE(key []byte, prev bool) (int, bool) {
 	node := 0
 	h := p.maxHeight - 1
@@ -303,7 +302,7 @@ func (p *DB) Put(key []byte, value []byte) error {
 	node := len(p.nodeData)
 	p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h)
 	for i, n := range p.prevNode[:h] {
-		m := n + nNext + i
+		m := n + 4 + i
 		p.nodeData = append(p.nodeData, p.nodeData[m])
 		p.nodeData[m] = node
 	}
@@ -435,22 +434,20 @@ func (p *DB) Len() int {
 
 // Reset resets the DB to initial empty state. Allows reuse the buffer.
 func (p *DB) Reset() {
-	p.mu.Lock()
 	p.rnd = rand.New(rand.NewSource(0xdeadbeef))
 	p.maxHeight = 1
 	p.n = 0
 	p.kvSize = 0
 	p.kvData = p.kvData[:0]
-	p.nodeData = p.nodeData[:nNext+tMaxHeight]
+	p.nodeData = p.nodeData[:4+tMaxHeight]
 	p.nodeData[nKV] = 0
 	p.nodeData[nKey] = 0
 	p.nodeData[nVal] = 0
 	p.nodeData[nHeight] = tMaxHeight
 	for n := 0; n < tMaxHeight; n++ {
-		p.nodeData[nNext+n] = 0
+		p.nodeData[4+n] = 0
 		p.prevNode[n] = 0
 	}
-	p.mu.Unlock()
 }
 
 // New creates a new initalized in-memory key/value DB. The capacity
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
index 09cf9b6cffedfe9ef4f2efaaddcdce462c5fd37f..abf0f7ea72ba55afa228e6986c92583d13a89572 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
@@ -34,11 +34,10 @@ var (
 	DefaultCompactionTotalSize           = 10 * MiB
 	DefaultCompactionTotalSizeMultiplier = 10.0
 	DefaultCompressionType               = SnappyCompression
-	DefaultIteratorSamplingRate          = 1 * MiB
-	DefaultMaxMemCompationLevel          = 2
-	DefaultNumLevel                      = 7
 	DefaultOpenFilesCacher               = LRUCacher
 	DefaultOpenFilesCacheCapacity        = 500
+	DefaultMaxMemCompationLevel          = 2
+	DefaultNumLevel                      = 7
 	DefaultWriteBuffer                   = 4 * MiB
 	DefaultWriteL0PauseTrigger           = 12
 	DefaultWriteL0SlowdownTrigger        = 8
@@ -154,7 +153,7 @@ type Options struct {
 	BlockCacher Cacher
 
 	// BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
-	// Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher.
+	// Use -1 for zero, this has same effect with specifying NoCacher to BlockCacher.
 	//
 	// The default value is 8MiB.
 	BlockCacheCapacity int
@@ -250,11 +249,6 @@ type Options struct {
 	// The default value (DefaultCompression) uses snappy compression.
 	Compression Compression
 
-	// DisableBufferPool allows disable use of util.BufferPool functionality.
-	//
-	// The default value is false.
-	DisableBufferPool bool
-
 	// DisableBlockCache allows disable use of cache.Cache functionality on
 	// 'sorted table' block.
 	//
@@ -294,13 +288,6 @@ type Options struct {
 	// The default value is nil.
 	Filter filter.Filter
 
-	// IteratorSamplingRate defines approximate gap (in bytes) between read
-	// sampling of an iterator. The samples will be used to determine when
-	// compaction should be triggered.
-	//
-	// The default is 1MiB.
-	IteratorSamplingRate int
-
 	// MaxMemCompationLevel defines maximum level a newly compacted 'memdb'
 	// will be pushed into if doesn't creates overlap. This should less than
 	// NumLevel. Use -1 for level-0.
@@ -321,16 +308,11 @@ type Options struct {
 	OpenFilesCacher Cacher
 
 	// OpenFilesCacheCapacity defines the capacity of the open files caching.
-	// Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher.
+	// Use -1 for zero, this has same effect with specifying NoCacher to OpenFilesCacher.
 	//
 	// The default value is 500.
 	OpenFilesCacheCapacity int
 
-	// If true then opens DB in read-only mode.
-	//
-	// The default value is false.
-	ReadOnly bool
-
 	// Strict defines the DB strict level.
 	Strict Strict
 
@@ -373,9 +355,9 @@ func (o *Options) GetBlockCacher() Cacher {
 }
 
 func (o *Options) GetBlockCacheCapacity() int {
-	if o == nil || o.BlockCacheCapacity == 0 {
+	if o == nil || o.BlockCacheCapacity <= 0 {
 		return DefaultBlockCacheCapacity
-	} else if o.BlockCacheCapacity < 0 {
+	} else if o.BlockCacheCapacity == -1 {
 		return 0
 	}
 	return o.BlockCacheCapacity
@@ -482,20 +464,6 @@ func (o *Options) GetCompression() Compression {
 	return o.Compression
 }
 
-func (o *Options) GetDisableBufferPool() bool {
-	if o == nil {
-		return false
-	}
-	return o.DisableBufferPool
-}
-
-func (o *Options) GetDisableBlockCache() bool {
-	if o == nil {
-		return false
-	}
-	return o.DisableBlockCache
-}
-
 func (o *Options) GetDisableCompactionBackoff() bool {
 	if o == nil {
 		return false
@@ -524,19 +492,12 @@ func (o *Options) GetFilter() filter.Filter {
 	return o.Filter
 }
 
-func (o *Options) GetIteratorSamplingRate() int {
-	if o == nil || o.IteratorSamplingRate <= 0 {
-		return DefaultIteratorSamplingRate
-	}
-	return o.IteratorSamplingRate
-}
-
 func (o *Options) GetMaxMemCompationLevel() int {
 	level := DefaultMaxMemCompationLevel
 	if o != nil {
 		if o.MaxMemCompationLevel > 0 {
 			level = o.MaxMemCompationLevel
-		} else if o.MaxMemCompationLevel < 0 {
+		} else if o.MaxMemCompationLevel == -1 {
 			level = 0
 		}
 	}
@@ -564,21 +525,14 @@ func (o *Options) GetOpenFilesCacher() Cacher {
 }
 
 func (o *Options) GetOpenFilesCacheCapacity() int {
-	if o == nil || o.OpenFilesCacheCapacity == 0 {
+	if o == nil || o.OpenFilesCacheCapacity <= 0 {
 		return DefaultOpenFilesCacheCapacity
-	} else if o.OpenFilesCacheCapacity < 0 {
+	} else if o.OpenFilesCacheCapacity == -1 {
 		return 0
 	}
 	return o.OpenFilesCacheCapacity
 }
 
-func (o *Options) GetReadOnly() bool {
-	if o == nil {
-		return false
-	}
-	return o.ReadOnly
-}
-
 func (o *Options) GetStrict(strict Strict) bool {
 	if o == nil || o.Strict == 0 {
 		return DefaultStrict&strict != 0
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
index cb07209eb0544b2c02d6af299c8dbd0886bb46ff..e8c81572a542c1b0b14698e5b761beeca27e7d53 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
@@ -11,8 +11,10 @@ import (
 	"io"
 	"os"
 	"sync"
+	"sync/atomic"
 
 	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
+	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
 	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal"
 	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
 	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
@@ -125,16 +127,11 @@ func (s *session) recover() (err error) {
 		return
 	}
 	defer reader.Close()
+	strict := s.o.GetStrict(opt.StrictManifest)
+	jr := journal.NewReader(reader, dropper{s, m}, strict, true)
 
-	var (
-		// Options.
-		numLevel = s.o.GetNumLevel()
-		strict   = s.o.GetStrict(opt.StrictManifest)
-
-		jr      = journal.NewReader(reader, dropper{s, m}, strict, true)
-		rec     = &sessionRecord{}
-		staging = s.stVersion.newStaging()
-	)
+	staging := s.stVersion.newStaging()
+	rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
 	for {
 		var r io.Reader
 		r, err = jr.Next()
@@ -146,7 +143,7 @@ func (s *session) recover() (err error) {
 			return errors.SetFile(err, m)
 		}
 
-		err = rec.decode(r, numLevel)
+		err = rec.decode(r)
 		if err == nil {
 			// save compact pointers
 			for _, r := range rec.compPtrs {
@@ -209,3 +206,250 @@ func (s *session) commit(r *sessionRecord) (err error) {
 
 	return
 }
+
+// Pick a compaction based on current state; need external synchronization.
+func (s *session) pickCompaction() *compaction {
+	v := s.version()
+
+	var level int
+	var t0 tFiles
+	if v.cScore >= 1 {
+		level = v.cLevel
+		cptr := s.stCompPtrs[level]
+		tables := v.tables[level]
+		for _, t := range tables {
+			if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
+				t0 = append(t0, t)
+				break
+			}
+		}
+		if len(t0) == 0 {
+			t0 = append(t0, tables[0])
+		}
+	} else {
+		if p := atomic.LoadPointer(&v.cSeek); p != nil {
+			ts := (*tSet)(p)
+			level = ts.level
+			t0 = append(t0, ts.table)
+		} else {
+			v.release()
+			return nil
+		}
+	}
+
+	return newCompaction(s, v, level, t0)
+}
+
+// Create compaction from given level and range; need external synchronization.
+func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction {
+	v := s.version()
+
+	t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0)
+	if len(t0) == 0 {
+		v.release()
+		return nil
+	}
+
+	// Avoid compacting too much in one shot in case the range is large.
+	// But we cannot do this for level-0 since level-0 files can overlap
+	// and we must not pick one file and drop another older file if the
+	// two files overlap.
+	if level > 0 {
+		limit := uint64(v.s.o.GetCompactionSourceLimit(level))
+		total := uint64(0)
+		for i, t := range t0 {
+			total += t.size
+			if total >= limit {
+				s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1)
+				t0 = t0[:i+1]
+				break
+			}
+		}
+	}
+
+	return newCompaction(s, v, level, t0)
+}
+
+func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction {
+	c := &compaction{
+		s:             s,
+		v:             v,
+		level:         level,
+		tables:        [2]tFiles{t0, nil},
+		maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)),
+		tPtrs:         make([]int, s.o.GetNumLevel()),
+	}
+	c.expand()
+	c.save()
+	return c
+}
+
+// compaction represent a compaction state.
+type compaction struct {
+	s *session
+	v *version
+
+	level         int
+	tables        [2]tFiles
+	maxGPOverlaps uint64
+
+	gp                tFiles
+	gpi               int
+	seenKey           bool
+	gpOverlappedBytes uint64
+	imin, imax        iKey
+	tPtrs             []int
+	released          bool
+
+	snapGPI               int
+	snapSeenKey           bool
+	snapGPOverlappedBytes uint64
+	snapTPtrs             []int
+}
+
+func (c *compaction) save() {
+	c.snapGPI = c.gpi
+	c.snapSeenKey = c.seenKey
+	c.snapGPOverlappedBytes = c.gpOverlappedBytes
+	c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...)
+}
+
+func (c *compaction) restore() {
+	c.gpi = c.snapGPI
+	c.seenKey = c.snapSeenKey
+	c.gpOverlappedBytes = c.snapGPOverlappedBytes
+	c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...)
+}
+
+func (c *compaction) release() {
+	if !c.released {
+		c.released = true
+		c.v.release()
+	}
+}
+
+// Expand compacted tables; need external synchronization.
+func (c *compaction) expand() {
+	limit := uint64(c.s.o.GetCompactionExpandLimit(c.level))
+	vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1]
+
+	t0, t1 := c.tables[0], c.tables[1]
+	imin, imax := t0.getRange(c.s.icmp)
+	// We expand t0 here just incase ukey hop across tables.
+	t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0)
+	if len(t0) != len(c.tables[0]) {
+		imin, imax = t0.getRange(c.s.icmp)
+	}
+	t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
+	// Get entire range covered by compaction.
+	amin, amax := append(t0, t1...).getRange(c.s.icmp)
+
+	// See if we can grow the number of inputs in "level" without
+	// changing the number of "level+1" files we pick up.
+	if len(t1) > 0 {
+		exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0)
+		if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
+			xmin, xmax := exp0.getRange(c.s.icmp)
+			exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
+			if len(exp1) == len(t1) {
+				c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
+					c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
+					len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
+				imin, imax = xmin, xmax
+				t0, t1 = exp0, exp1
+				amin, amax = append(t0, t1...).getRange(c.s.icmp)
+			}
+		}
+	}
+
+	// Compute the set of grandparent files that overlap this compaction
+	// (parent == level+1; grandparent == level+2)
+	if c.level+2 < c.s.o.GetNumLevel() {
+		c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
+	}
+
+	c.tables[0], c.tables[1] = t0, t1
+	c.imin, c.imax = imin, imax
+}
+
+// Check whether compaction is trivial.
+func (c *compaction) trivial() bool {
+	return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
+}
+
+func (c *compaction) baseLevelForKey(ukey []byte) bool {
+	for level, tables := range c.v.tables[c.level+2:] {
+		for c.tPtrs[level] < len(tables) {
+			t := tables[c.tPtrs[level]]
+			if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
+				// We've advanced far enough.
+				if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
+					// Key falls in this file's range, so definitely not base level.
+					return false
+				}
+				break
+			}
+			c.tPtrs[level]++
+		}
+	}
+	return true
+}
+
+func (c *compaction) shouldStopBefore(ikey iKey) bool {
+	for ; c.gpi < len(c.gp); c.gpi++ {
+		gp := c.gp[c.gpi]
+		if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
+			break
+		}
+		if c.seenKey {
+			c.gpOverlappedBytes += gp.size
+		}
+	}
+	c.seenKey = true
+
+	if c.gpOverlappedBytes > c.maxGPOverlaps {
+		// Too much overlap for current output; start new output.
+		c.gpOverlappedBytes = 0
+		return true
+	}
+	return false
+}
+
+// Creates an iterator.
+func (c *compaction) newIterator() iterator.Iterator {
+	// Creates iterator slice.
+	icap := len(c.tables)
+	if c.level == 0 {
+		// Special case for level-0
+		icap = len(c.tables[0]) + 1
+	}
+	its := make([]iterator.Iterator, 0, icap)
+
+	// Options.
+	ro := &opt.ReadOptions{
+		DontFillCache: true,
+		Strict:        opt.StrictOverride,
+	}
+	strict := c.s.o.GetStrict(opt.StrictCompaction)
+	if strict {
+		ro.Strict |= opt.StrictReader
+	}
+
+	for i, tables := range c.tables {
+		if len(tables) == 0 {
+			continue
+		}
+
+		// Level-0 is not sorted and may overlaps each other.
+		if c.level+i == 0 {
+			for _, t := range tables {
+				its = append(its, c.s.tops.newIterator(t, nil, ro))
+			}
+		} else {
+			it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict)
+			its = append(its, it)
+		}
+	}
+
+	return iterator.NewMergedIterator(its, c.s.icmp, strict)
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go
deleted file mode 100644
index 0c785f4d31ee454f618163a137413874682a07bf..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go
+++ /dev/null
@@ -1,287 +0,0 @@
-// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package leveldb
-
-import (
-	"sync/atomic"
-
-	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
-	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb"
-	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
-)
-
-func (s *session) pickMemdbLevel(umin, umax []byte) int {
-	v := s.version()
-	defer v.release()
-	return v.pickMemdbLevel(umin, umax)
-}
-
-func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, level int) (level_ int, err error) {
-	// Create sorted table.
-	iter := mdb.NewIterator(nil)
-	defer iter.Release()
-	t, n, err := s.tops.createFrom(iter)
-	if err != nil {
-		return level, err
-	}
-
-	// Pick level and add to record.
-	if level < 0 {
-		level = s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey())
-	}
-	rec.addTableFile(level, t)
-
-	s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax)
-	return level, nil
-}
-
-// Pick a compaction based on current state; need external synchronization.
-func (s *session) pickCompaction() *compaction {
-	v := s.version()
-
-	var level int
-	var t0 tFiles
-	if v.cScore >= 1 {
-		level = v.cLevel
-		cptr := s.stCompPtrs[level]
-		tables := v.tables[level]
-		for _, t := range tables {
-			if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
-				t0 = append(t0, t)
-				break
-			}
-		}
-		if len(t0) == 0 {
-			t0 = append(t0, tables[0])
-		}
-	} else {
-		if p := atomic.LoadPointer(&v.cSeek); p != nil {
-			ts := (*tSet)(p)
-			level = ts.level
-			t0 = append(t0, ts.table)
-		} else {
-			v.release()
-			return nil
-		}
-	}
-
-	return newCompaction(s, v, level, t0)
-}
-
-// Create compaction from given level and range; need external synchronization.
-func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction {
-	v := s.version()
-
-	t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0)
-	if len(t0) == 0 {
-		v.release()
-		return nil
-	}
-
-	// Avoid compacting too much in one shot in case the range is large.
-	// But we cannot do this for level-0 since level-0 files can overlap
-	// and we must not pick one file and drop another older file if the
-	// two files overlap.
-	if level > 0 {
-		limit := uint64(v.s.o.GetCompactionSourceLimit(level))
-		total := uint64(0)
-		for i, t := range t0 {
-			total += t.size
-			if total >= limit {
-				s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1)
-				t0 = t0[:i+1]
-				break
-			}
-		}
-	}
-
-	return newCompaction(s, v, level, t0)
-}
-
-func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction {
-	c := &compaction{
-		s:             s,
-		v:             v,
-		level:         level,
-		tables:        [2]tFiles{t0, nil},
-		maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)),
-		tPtrs:         make([]int, s.o.GetNumLevel()),
-	}
-	c.expand()
-	c.save()
-	return c
-}
-
-// compaction represent a compaction state.
-type compaction struct {
-	s *session
-	v *version
-
-	level         int
-	tables        [2]tFiles
-	maxGPOverlaps uint64
-
-	gp                tFiles
-	gpi               int
-	seenKey           bool
-	gpOverlappedBytes uint64
-	imin, imax        iKey
-	tPtrs             []int
-	released          bool
-
-	snapGPI               int
-	snapSeenKey           bool
-	snapGPOverlappedBytes uint64
-	snapTPtrs             []int
-}
-
-func (c *compaction) save() {
-	c.snapGPI = c.gpi
-	c.snapSeenKey = c.seenKey
-	c.snapGPOverlappedBytes = c.gpOverlappedBytes
-	c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...)
-}
-
-func (c *compaction) restore() {
-	c.gpi = c.snapGPI
-	c.seenKey = c.snapSeenKey
-	c.gpOverlappedBytes = c.snapGPOverlappedBytes
-	c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...)
-}
-
-func (c *compaction) release() {
-	if !c.released {
-		c.released = true
-		c.v.release()
-	}
-}
-
-// Expand compacted tables; need external synchronization.
-func (c *compaction) expand() {
-	limit := uint64(c.s.o.GetCompactionExpandLimit(c.level))
-	vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1]
-
-	t0, t1 := c.tables[0], c.tables[1]
-	imin, imax := t0.getRange(c.s.icmp)
-	// We expand t0 here just incase ukey hop across tables.
-	t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0)
-	if len(t0) != len(c.tables[0]) {
-		imin, imax = t0.getRange(c.s.icmp)
-	}
-	t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
-	// Get entire range covered by compaction.
-	amin, amax := append(t0, t1...).getRange(c.s.icmp)
-
-	// See if we can grow the number of inputs in "level" without
-	// changing the number of "level+1" files we pick up.
-	if len(t1) > 0 {
-		exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0)
-		if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
-			xmin, xmax := exp0.getRange(c.s.icmp)
-			exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
-			if len(exp1) == len(t1) {
-				c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
-					c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
-					len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
-				imin, imax = xmin, xmax
-				t0, t1 = exp0, exp1
-				amin, amax = append(t0, t1...).getRange(c.s.icmp)
-			}
-		}
-	}
-
-	// Compute the set of grandparent files that overlap this compaction
-	// (parent == level+1; grandparent == level+2)
-	if c.level+2 < c.s.o.GetNumLevel() {
-		c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
-	}
-
-	c.tables[0], c.tables[1] = t0, t1
-	c.imin, c.imax = imin, imax
-}
-
-// Check whether compaction is trivial.
-func (c *compaction) trivial() bool {
-	return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
-}
-
-func (c *compaction) baseLevelForKey(ukey []byte) bool {
-	for level, tables := range c.v.tables[c.level+2:] {
-		for c.tPtrs[level] < len(tables) {
-			t := tables[c.tPtrs[level]]
-			if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
-				// We've advanced far enough.
-				if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
-					// Key falls in this file's range, so definitely not base level.
-					return false
-				}
-				break
-			}
-			c.tPtrs[level]++
-		}
-	}
-	return true
-}
-
-func (c *compaction) shouldStopBefore(ikey iKey) bool {
-	for ; c.gpi < len(c.gp); c.gpi++ {
-		gp := c.gp[c.gpi]
-		if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
-			break
-		}
-		if c.seenKey {
-			c.gpOverlappedBytes += gp.size
-		}
-	}
-	c.seenKey = true
-
-	if c.gpOverlappedBytes > c.maxGPOverlaps {
-		// Too much overlap for current output; start new output.
-		c.gpOverlappedBytes = 0
-		return true
-	}
-	return false
-}
-
-// Creates an iterator.
-func (c *compaction) newIterator() iterator.Iterator {
-	// Creates iterator slice.
-	icap := len(c.tables)
-	if c.level == 0 {
-		// Special case for level-0.
-		icap = len(c.tables[0]) + 1
-	}
-	its := make([]iterator.Iterator, 0, icap)
-
-	// Options.
-	ro := &opt.ReadOptions{
-		DontFillCache: true,
-		Strict:        opt.StrictOverride,
-	}
-	strict := c.s.o.GetStrict(opt.StrictCompaction)
-	if strict {
-		ro.Strict |= opt.StrictReader
-	}
-
-	for i, tables := range c.tables {
-		if len(tables) == 0 {
-			continue
-		}
-
-		// Level-0 is not sorted and may overlaps each other.
-		if c.level+i == 0 {
-			for _, t := range tables {
-				its = append(its, c.s.tops.newIterator(t, nil, ro))
-			}
-		} else {
-			it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict)
-			its = append(its, it)
-		}
-	}
-
-	return iterator.NewMergedIterator(its, c.s.icmp, strict)
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
index b774bafac504552d75253d0943739e7e8229ef42..5fad6105b36eda1a7237e2007878e0110e955607 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
@@ -52,6 +52,8 @@ type dtRecord struct {
 }
 
 type sessionRecord struct {
+	numLevel int
+
 	hasRec         int
 	comparer       string
 	journalNum     uint64
@@ -228,7 +230,7 @@ func (p *sessionRecord) readBytes(field string, r byteReader) []byte {
 	return x
 }
 
-func (p *sessionRecord) readLevel(field string, r io.ByteReader, numLevel int) int {
+func (p *sessionRecord) readLevel(field string, r io.ByteReader) int {
 	if p.err != nil {
 		return 0
 	}
@@ -236,14 +238,14 @@ func (p *sessionRecord) readLevel(field string, r io.ByteReader, numLevel int) i
 	if p.err != nil {
 		return 0
 	}
-	if x >= uint64(numLevel) {
+	if x >= uint64(p.numLevel) {
 		p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "invalid level number"})
 		return 0
 	}
 	return int(x)
 }
 
-func (p *sessionRecord) decode(r io.Reader, numLevel int) error {
+func (p *sessionRecord) decode(r io.Reader) error {
 	br, ok := r.(byteReader)
 	if !ok {
 		br = bufio.NewReader(r)
@@ -284,13 +286,13 @@ func (p *sessionRecord) decode(r io.Reader, numLevel int) error {
 				p.setSeqNum(x)
 			}
 		case recCompPtr:
-			level := p.readLevel("comp-ptr.level", br, numLevel)
+			level := p.readLevel("comp-ptr.level", br)
 			ikey := p.readBytes("comp-ptr.ikey", br)
 			if p.err == nil {
 				p.addCompPtr(level, iKey(ikey))
 			}
 		case recAddTable:
-			level := p.readLevel("add-table.level", br, numLevel)
+			level := p.readLevel("add-table.level", br)
 			num := p.readUvarint("add-table.num", br)
 			size := p.readUvarint("add-table.size", br)
 			imin := p.readBytes("add-table.imin", br)
@@ -299,7 +301,7 @@ func (p *sessionRecord) decode(r io.Reader, numLevel int) error {
 				p.addTable(level, num, size, imin, imax)
 			}
 		case recDelTable:
-			level := p.readLevel("del-table.level", br, numLevel)
+			level := p.readLevel("del-table.level", br)
 			num := p.readUvarint("del-table.num", br)
 			if p.err == nil {
 				p.delTable(level, num)
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go
index 7a18cf448a7524a4d891bec7dccb150b48f70c06..ba3864b20828dd0452324765ddba8c780b91c73d 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go
@@ -19,8 +19,8 @@ func decodeEncode(v *sessionRecord) (res bool, err error) {
 	if err != nil {
 		return
 	}
-	v2 := &sessionRecord{}
-	err = v.decode(b, opt.DefaultNumLevel)
+	v2 := &sessionRecord{numLevel: opt.DefaultNumLevel}
+	err = v.decode(b)
 	if err != nil {
 		return
 	}
@@ -34,7 +34,7 @@ func decodeEncode(v *sessionRecord) (res bool, err error) {
 
 func TestSessionRecord_EncodeDecode(t *testing.T) {
 	big := uint64(1) << 50
-	v := &sessionRecord{}
+	v := &sessionRecord{numLevel: opt.DefaultNumLevel}
 	i := uint64(0)
 	test := func() {
 		res, err := decodeEncode(v)
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
index 740f3447e21286a394507a1c3fbf53df1d8022f6..23f3d80fab2b57e0fb7ac3ac3d42b3743e32d478 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
@@ -182,7 +182,7 @@ func (s *session) newManifest(rec *sessionRecord, v *version) (err error) {
 		defer v.release()
 	}
 	if rec == nil {
-		rec = &sessionRecord{}
+		rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
 	}
 	s.fillRecord(rec, true)
 	v.fillRecord(rec)
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
index a068113fd81ffef9cd6177a8468253f2bd3f9810..768dfc88ec200c74acadd8ef02acc6f399a02b42 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
@@ -42,8 +42,6 @@ type tsOp uint
 const (
 	tsOpOpen tsOp = iota
 	tsOpCreate
-	tsOpReplace
-	tsOpRemove
 	tsOpRead
 	tsOpReadAt
 	tsOpWrite
@@ -243,10 +241,6 @@ func (tf tsFile) Replace(newfile storage.File) (err error) {
 	if err != nil {
 		return
 	}
-	if tf.shouldErr(tsOpReplace) {
-		err = errors.New("leveldb.testStorage: emulated create error")
-		return
-	}
 	err = tf.File.Replace(newfile.(tsFile).File)
 	if err != nil {
 		ts.t.Errorf("E: cannot replace file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
@@ -264,10 +258,6 @@ func (tf tsFile) Remove() (err error) {
 	if err != nil {
 		return
 	}
-	if tf.shouldErr(tsOpRemove) {
-		err = errors.New("leveldb.testStorage: emulated create error")
-		return
-	}
 	err = tf.File.Remove()
 	if err != nil {
 		ts.t.Errorf("E: cannot remove file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
index 57fce1314b202eea9e06bcc473c385ad3aa2489d..357e51845d1618453e65117b1f80f09726ee0a23 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
@@ -441,26 +441,22 @@ func newTableOps(s *session) *tOps {
 	var (
 		cacher cache.Cacher
 		bcache *cache.Cache
-		bpool  *util.BufferPool
 	)
 	if s.o.GetOpenFilesCacheCapacity() > 0 {
 		cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity())
 	}
-	if !s.o.GetDisableBlockCache() {
+	if !s.o.DisableBlockCache {
 		var bcacher cache.Cacher
 		if s.o.GetBlockCacheCapacity() > 0 {
 			bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity())
 		}
 		bcache = cache.NewCache(bcacher)
 	}
-	if !s.o.GetDisableBufferPool() {
-		bpool = util.NewBufferPool(s.o.GetBlockSize() + 5)
-	}
 	return &tOps{
 		s:      s,
 		cache:  cache.NewCache(cacher),
 		bcache: bcache,
-		bpool:  bpool,
+		bpool:  util.NewBufferPool(s.o.GetBlockSize() + 5),
 	}
 }
 
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
index 0ed58fb51be12eec4c780a98f1fa84510590b417..1bfc3978768c2126690c6aa9f707532539e1a8f0 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
@@ -14,7 +14,7 @@ import (
 	"strings"
 	"sync"
 
-	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/google/go-snappy/snappy"
+	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy"
 
 	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache"
 	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go
index f656f42199b1916facf2016f28df7c0f12df9e13..51f9fc7173978511abb9ab83a1e91bfd5b60197e 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go
@@ -12,7 +12,7 @@ import (
 	"fmt"
 	"io"
 
-	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/google/go-snappy/snappy"
+	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy"
 
 	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
 	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter"
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
index 25ae21da97e390fb12f92a32ae44053cf247e2ba..e8268286f1938ed25cfcb5371cd7708a59f6b273 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
@@ -136,8 +136,9 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt
 		if !tseek {
 			if tset == nil {
 				tset = &tSet{level, t}
-			} else {
+			} else if tset.table.consumeSeek() <= 0 {
 				tseek = true
+				tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
 			}
 		}
 
@@ -202,28 +203,6 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt
 		return true
 	})
 
-	if tseek && tset.table.consumeSeek() <= 0 {
-		tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
-	}
-
-	return
-}
-
-func (v *version) sampleSeek(ikey iKey) (tcomp bool) {
-	var tset *tSet
-
-	v.walkOverlapping(ikey, func(level int, t *tFile) bool {
-		if tset == nil {
-			tset = &tSet{level, t}
-			return true
-		} else {
-			if tset.table.consumeSeek() <= 0 {
-				tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
-			}
-			return false
-		}
-	}, nil)
-
 	return
 }
 
@@ -300,7 +279,7 @@ func (v *version) offsetOf(ikey iKey) (n uint64, err error) {
 	return
 }
 
-func (v *version) pickMemdbLevel(umin, umax []byte) (level int) {
+func (v *version) pickLevel(umin, umax []byte) (level int) {
 	if !v.tables[0].overlaps(v.s.icmp, umin, umax, true) {
 		var overlaps tFiles
 		maxLevel := v.s.o.GetMaxMemCompationLevel()
diff --git a/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/decode.go b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/decode.go
new file mode 100644
index 0000000000000000000000000000000000000000..d93c1b9dbfd7cea5fe7b86520548181d3729fa94
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/decode.go
@@ -0,0 +1,124 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+	"encoding/binary"
+	"errors"
+)
+
+// ErrCorrupt reports that the input is invalid.
+var ErrCorrupt = errors.New("snappy: corrupt input")
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+	v, _, err := decodedLen(src)
+	return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+	v, n := binary.Uvarint(src)
+	if n == 0 {
+		return 0, 0, ErrCorrupt
+	}
+	if uint64(int(v)) != v {
+		return 0, 0, errors.New("snappy: decoded block is too large")
+	}
+	return int(v), n, nil
+}
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+// It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+	dLen, s, err := decodedLen(src)
+	if err != nil {
+		return nil, err
+	}
+	if len(dst) < dLen {
+		dst = make([]byte, dLen)
+	}
+
+	var d, offset, length int
+	for s < len(src) {
+		switch src[s] & 0x03 {
+		case tagLiteral:
+			x := uint(src[s] >> 2)
+			switch {
+			case x < 60:
+				s += 1
+			case x == 60:
+				s += 2
+				if s > len(src) {
+					return nil, ErrCorrupt
+				}
+				x = uint(src[s-1])
+			case x == 61:
+				s += 3
+				if s > len(src) {
+					return nil, ErrCorrupt
+				}
+				x = uint(src[s-2]) | uint(src[s-1])<<8
+			case x == 62:
+				s += 4
+				if s > len(src) {
+					return nil, ErrCorrupt
+				}
+				x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
+			case x == 63:
+				s += 5
+				if s > len(src) {
+					return nil, ErrCorrupt
+				}
+				x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
+			}
+			length = int(x + 1)
+			if length <= 0 {
+				return nil, errors.New("snappy: unsupported literal length")
+			}
+			if length > len(dst)-d || length > len(src)-s {
+				return nil, ErrCorrupt
+			}
+			copy(dst[d:], src[s:s+length])
+			d += length
+			s += length
+			continue
+
+		case tagCopy1:
+			s += 2
+			if s > len(src) {
+				return nil, ErrCorrupt
+			}
+			length = 4 + int(src[s-2])>>2&0x7
+			offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
+
+		case tagCopy2:
+			s += 3
+			if s > len(src) {
+				return nil, ErrCorrupt
+			}
+			length = 1 + int(src[s-3])>>2
+			offset = int(src[s-2]) | int(src[s-1])<<8
+
+		case tagCopy4:
+			return nil, errors.New("snappy: unsupported COPY_4 tag")
+		}
+
+		end := d + length
+		if offset > d || end > len(dst) {
+			return nil, ErrCorrupt
+		}
+		for ; d < end; d++ {
+			dst[d] = dst[d-offset]
+		}
+	}
+	if d != dLen {
+		return nil, ErrCorrupt
+	}
+	return dst[:d], nil
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-snappy/snappy/encode.go b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go
similarity index 69%
rename from Godeps/_workspace/src/github.com/google/go-snappy/snappy/encode.go
rename to Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go
index dda372422d437441e1932607737295eae4bd30fd..b2371db11c8f0c15a4be374eed72f96bd42b864c 100644
--- a/Godeps/_workspace/src/github.com/google/go-snappy/snappy/encode.go
+++ b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go
@@ -6,7 +6,6 @@ package snappy
 
 import (
 	"encoding/binary"
-	"io"
 )
 
 // We limit how far copy back-references can go, the same as the C++ code.
@@ -173,86 +172,3 @@ func MaxEncodedLen(srcLen int) int {
 	// This last factor dominates the blowup, so the final estimate is:
 	return 32 + srcLen + srcLen/6
 }
-
-// NewWriter returns a new Writer that compresses to w, using the framing
-// format described at
-// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
-func NewWriter(w io.Writer) *Writer {
-	return &Writer{
-		w:   w,
-		enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)),
-	}
-}
-
-// Writer is an io.Writer than can write Snappy-compressed bytes.
-type Writer struct {
-	w           io.Writer
-	err         error
-	enc         []byte
-	buf         [checksumSize + chunkHeaderSize]byte
-	wroteHeader bool
-}
-
-// Reset discards the writer's state and switches the Snappy writer to write to
-// w. This permits reusing a Writer rather than allocating a new one.
-func (w *Writer) Reset(writer io.Writer) {
-	w.w = writer
-	w.err = nil
-	w.wroteHeader = false
-}
-
-// Write satisfies the io.Writer interface.
-func (w *Writer) Write(p []byte) (n int, errRet error) {
-	if w.err != nil {
-		return 0, w.err
-	}
-	if !w.wroteHeader {
-		copy(w.enc, magicChunk)
-		if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil {
-			w.err = err
-			return n, err
-		}
-		w.wroteHeader = true
-	}
-	for len(p) > 0 {
-		var uncompressed []byte
-		if len(p) > maxUncompressedChunkLen {
-			uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:]
-		} else {
-			uncompressed, p = p, nil
-		}
-		checksum := crc(uncompressed)
-
-		// Compress the buffer, discarding the result if the improvement
-		// isn't at least 12.5%.
-		chunkType := uint8(chunkTypeCompressedData)
-		chunkBody, err := Encode(w.enc, uncompressed)
-		if err != nil {
-			w.err = err
-			return n, err
-		}
-		if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 {
-			chunkType, chunkBody = chunkTypeUncompressedData, uncompressed
-		}
-
-		chunkLen := 4 + len(chunkBody)
-		w.buf[0] = chunkType
-		w.buf[1] = uint8(chunkLen >> 0)
-		w.buf[2] = uint8(chunkLen >> 8)
-		w.buf[3] = uint8(chunkLen >> 16)
-		w.buf[4] = uint8(checksum >> 0)
-		w.buf[5] = uint8(checksum >> 8)
-		w.buf[6] = uint8(checksum >> 16)
-		w.buf[7] = uint8(checksum >> 24)
-		if _, err = w.w.Write(w.buf[:]); err != nil {
-			w.err = err
-			return n, err
-		}
-		if _, err = w.w.Write(chunkBody); err != nil {
-			w.err = err
-			return n, err
-		}
-		n += len(uncompressed)
-	}
-	return n, nil
-}
diff --git a/Godeps/_workspace/src/github.com/google/go-snappy/snappy/snappy.go b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy.go
similarity index 68%
rename from Godeps/_workspace/src/github.com/google/go-snappy/snappy/snappy.go
rename to Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy.go
index 043bf3d81a949af84bafdf12a7b1e06f93e75bb9..2f1b790d0b7170df134cdf3f95786120a1f54dae 100644
--- a/Godeps/_workspace/src/github.com/google/go-snappy/snappy/snappy.go
+++ b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy.go
@@ -8,10 +8,6 @@
 // The C++ snappy implementation is at http://code.google.com/p/snappy/
 package snappy
 
-import (
-	"hash/crc32"
-)
-
 /*
 Each encoded block begins with the varint-encoded length of the decoded data,
 followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
@@ -40,29 +36,3 @@ const (
 	tagCopy2   = 0x02
 	tagCopy4   = 0x03
 )
-
-const (
-	checksumSize    = 4
-	chunkHeaderSize = 4
-	magicChunk      = "\xff\x06\x00\x00" + magicBody
-	magicBody       = "sNaPpY"
-	// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt says
-	// that "the uncompressed data in a chunk must be no longer than 65536 bytes".
-	maxUncompressedChunkLen = 65536
-)
-
-const (
-	chunkTypeCompressedData   = 0x00
-	chunkTypeUncompressedData = 0x01
-	chunkTypePadding          = 0xfe
-	chunkTypeStreamIdentifier = 0xff
-)
-
-var crcTable = crc32.MakeTable(crc32.Castagnoli)
-
-// crc implements the checksum specified in section 3 of
-// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
-func crc(b []byte) uint32 {
-	c := crc32.Update(0, crcTable, b)
-	return uint32(c>>15|c<<17) + 0xa282ead8
-}
diff --git a/Godeps/_workspace/src/github.com/google/go-snappy/snappy/snappy_test.go b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go
similarity index 57%
rename from Godeps/_workspace/src/github.com/google/go-snappy/snappy/snappy_test.go
rename to Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go
index 0623385b71d22ebe18ae8b5aae94e64d0825580d..7ba839244e9bb8b75ec49b2083eb428549190784 100644
--- a/Godeps/_workspace/src/github.com/google/go-snappy/snappy/snappy_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go
@@ -18,10 +18,7 @@ import (
 	"testing"
 )
 
-var (
-	download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
-	testdata = flag.String("testdata", "testdata", "Directory containing the test data")
-)
+var download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
 
 func roundtrip(b, ebuf, dbuf []byte) error {
 	e, err := Encode(ebuf, b)
@@ -58,11 +55,11 @@ func TestSmallCopy(t *testing.T) {
 }
 
 func TestSmallRand(t *testing.T) {
-	rng := rand.New(rand.NewSource(27354294))
+	rand.Seed(27354294)
 	for n := 1; n < 20000; n += 23 {
 		b := make([]byte, n)
-		for i := range b {
-			b[i] = uint8(rng.Uint32())
+		for i, _ := range b {
+			b[i] = uint8(rand.Uint32())
 		}
 		if err := roundtrip(b, nil, nil); err != nil {
 			t.Fatal(err)
@@ -73,7 +70,7 @@ func TestSmallRand(t *testing.T) {
 func TestSmallRegular(t *testing.T) {
 	for n := 1; n < 20000; n += 23 {
 		b := make([]byte, n)
-		for i := range b {
+		for i, _ := range b {
 			b[i] = uint8(i%10 + 'a')
 		}
 		if err := roundtrip(b, nil, nil); err != nil {
@@ -82,120 +79,6 @@ func TestSmallRegular(t *testing.T) {
 	}
 }
 
-func cmp(a, b []byte) error {
-	if len(a) != len(b) {
-		return fmt.Errorf("got %d bytes, want %d", len(a), len(b))
-	}
-	for i := range a {
-		if a[i] != b[i] {
-			return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i])
-		}
-	}
-	return nil
-}
-
-func TestFramingFormat(t *testing.T) {
-	// src is comprised of alternating 1e5-sized sequences of random
-	// (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen
-	// because it is larger than maxUncompressedChunkLen (64k).
-	src := make([]byte, 1e6)
-	rng := rand.New(rand.NewSource(1))
-	for i := 0; i < 10; i++ {
-		if i%2 == 0 {
-			for j := 0; j < 1e5; j++ {
-				src[1e5*i+j] = uint8(rng.Intn(256))
-			}
-		} else {
-			for j := 0; j < 1e5; j++ {
-				src[1e5*i+j] = uint8(i)
-			}
-		}
-	}
-
-	buf := new(bytes.Buffer)
-	if _, err := NewWriter(buf).Write(src); err != nil {
-		t.Fatalf("Write: encoding: %v", err)
-	}
-	dst, err := ioutil.ReadAll(NewReader(buf))
-	if err != nil {
-		t.Fatalf("ReadAll: decoding: %v", err)
-	}
-	if err := cmp(dst, src); err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestReaderReset(t *testing.T) {
-	gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000)
-	buf := new(bytes.Buffer)
-	if _, err := NewWriter(buf).Write(gold); err != nil {
-		t.Fatalf("Write: %v", err)
-	}
-	encoded, invalid, partial := buf.String(), "invalid", "partial"
-	r := NewReader(nil)
-	for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} {
-		if s == partial {
-			r.Reset(strings.NewReader(encoded))
-			if _, err := r.Read(make([]byte, 101)); err != nil {
-				t.Errorf("#%d: %v", i, err)
-				continue
-			}
-			continue
-		}
-		r.Reset(strings.NewReader(s))
-		got, err := ioutil.ReadAll(r)
-		switch s {
-		case encoded:
-			if err != nil {
-				t.Errorf("#%d: %v", i, err)
-				continue
-			}
-			if err := cmp(got, gold); err != nil {
-				t.Errorf("#%d: %v", i, err)
-				continue
-			}
-		case invalid:
-			if err == nil {
-				t.Errorf("#%d: got nil error, want non-nil", i)
-				continue
-			}
-		}
-	}
-}
-
-func TestWriterReset(t *testing.T) {
-	gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000)
-	var gots, wants [][]byte
-	const n = 20
-	w, failed := NewWriter(nil), false
-	for i := 0; i <= n; i++ {
-		buf := new(bytes.Buffer)
-		w.Reset(buf)
-		want := gold[:len(gold)*i/n]
-		if _, err := w.Write(want); err != nil {
-			t.Errorf("#%d: Write: %v", i, err)
-			failed = true
-			continue
-		}
-		got, err := ioutil.ReadAll(NewReader(buf))
-		if err != nil {
-			t.Errorf("#%d: ReadAll: %v", i, err)
-			failed = true
-			continue
-		}
-		gots = append(gots, got)
-		wants = append(wants, want)
-	}
-	if failed {
-		return
-	}
-	for i := range gots {
-		if err := cmp(gots[i], wants[i]); err != nil {
-			t.Errorf("#%d: %v", i, err)
-		}
-	}
-}
-
 func benchDecode(b *testing.B, src []byte) {
 	encoded, err := Encode(nil, src)
 	if err != nil {
@@ -219,7 +102,7 @@ func benchEncode(b *testing.B, src []byte) {
 	}
 }
 
-func readFile(b testing.TB, filename string) []byte {
+func readFile(b *testing.B, filename string) []byte {
 	src, err := ioutil.ReadFile(filename)
 	if err != nil {
 		b.Fatalf("failed reading %s: %s", filename, err)
@@ -261,7 +144,7 @@ func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }
 func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
 
 // testFiles' values are copied directly from
-// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc
+// https://code.google.com/p/snappy/source/browse/trunk/snappy_unittest.cc.
 // The label field is unused in snappy-go.
 var testFiles = []struct {
 	label    string
@@ -269,36 +152,29 @@ var testFiles = []struct {
 }{
 	{"html", "html"},
 	{"urls", "urls.10K"},
-	{"jpg", "fireworks.jpeg"},
-	{"jpg_200", "fireworks.jpeg"},
-	{"pdf", "paper-100k.pdf"},
+	{"jpg", "house.jpg"},
+	{"pdf", "mapreduce-osdi-1.pdf"},
 	{"html4", "html_x_4"},
+	{"cp", "cp.html"},
+	{"c", "fields.c"},
+	{"lsp", "grammar.lsp"},
+	{"xls", "kennedy.xls"},
 	{"txt1", "alice29.txt"},
 	{"txt2", "asyoulik.txt"},
 	{"txt3", "lcet10.txt"},
 	{"txt4", "plrabn12.txt"},
+	{"bin", "ptt5"},
+	{"sum", "sum"},
+	{"man", "xargs.1"},
 	{"pb", "geo.protodata"},
 	{"gaviota", "kppkn.gtb"},
 }
 
 // The test data files are present at this canonical URL.
-const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/"
+const baseURL = "https://snappy.googlecode.com/svn/trunk/testdata/"
 
 func downloadTestdata(basename string) (errRet error) {
-	filename := filepath.Join(*testdata, basename)
-	if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 {
-		return nil
-	}
-
-	if !*download {
-		return fmt.Errorf("test data not found; skipping benchmark without the -download flag")
-	}
-	// Download the official snappy C++ implementation reference test data
-	// files for benchmarking.
-	if err := os.Mkdir(*testdata, 0777); err != nil && !os.IsExist(err) {
-		return fmt.Errorf("failed to create testdata: %s", err)
-	}
-
+	filename := filepath.Join("testdata", basename)
 	f, err := os.Create(filename)
 	if err != nil {
 		return fmt.Errorf("failed to create %s: %s", filename, err)
@@ -309,27 +185,36 @@ func downloadTestdata(basename string) (errRet error) {
 			os.Remove(filename)
 		}
 	}()
-	url := baseURL + basename
-	resp, err := http.Get(url)
+	resp, err := http.Get(baseURL + basename)
 	if err != nil {
-		return fmt.Errorf("failed to download %s: %s", url, err)
+		return fmt.Errorf("failed to download %s: %s", baseURL+basename, err)
 	}
 	defer resp.Body.Close()
-	if s := resp.StatusCode; s != http.StatusOK {
-		return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s))
-	}
 	_, err = io.Copy(f, resp.Body)
 	if err != nil {
-		return fmt.Errorf("failed to download %s to %s: %s", url, filename, err)
+		return fmt.Errorf("failed to write %s: %s", filename, err)
 	}
 	return nil
 }
 
 func benchFile(b *testing.B, n int, decode bool) {
-	if err := downloadTestdata(testFiles[n].filename); err != nil {
-		b.Fatalf("failed to download testdata: %s", err)
+	filename := filepath.Join("testdata", testFiles[n].filename)
+	if stat, err := os.Stat(filename); err != nil || stat.Size() == 0 {
+		if !*download {
+			b.Fatal("test data not found; skipping benchmark without the -download flag")
+		}
+		// Download the official snappy C++ implementation reference test data
+		// files for benchmarking.
+		if err := os.Mkdir("testdata", 0777); err != nil && !os.IsExist(err) {
+			b.Fatalf("failed to create testdata: %s", err)
+		}
+		for _, tf := range testFiles {
+			if err := downloadTestdata(tf.filename); err != nil {
+				b.Fatalf("failed to download testdata: %s", err)
+			}
+		}
 	}
-	data := readFile(b, filepath.Join(*testdata, testFiles[n].filename))
+	data := readFile(b, filename)
 	if decode {
 		benchDecode(b, data)
 	} else {
@@ -350,6 +235,12 @@ func Benchmark_UFlat8(b *testing.B)  { benchFile(b, 8, true) }
 func Benchmark_UFlat9(b *testing.B)  { benchFile(b, 9, true) }
 func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
 func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
+func Benchmark_UFlat12(b *testing.B) { benchFile(b, 12, true) }
+func Benchmark_UFlat13(b *testing.B) { benchFile(b, 13, true) }
+func Benchmark_UFlat14(b *testing.B) { benchFile(b, 14, true) }
+func Benchmark_UFlat15(b *testing.B) { benchFile(b, 15, true) }
+func Benchmark_UFlat16(b *testing.B) { benchFile(b, 16, true) }
+func Benchmark_UFlat17(b *testing.B) { benchFile(b, 17, true) }
 func Benchmark_ZFlat0(b *testing.B)  { benchFile(b, 0, false) }
 func Benchmark_ZFlat1(b *testing.B)  { benchFile(b, 1, false) }
 func Benchmark_ZFlat2(b *testing.B)  { benchFile(b, 2, false) }
@@ -362,3 +253,9 @@ func Benchmark_ZFlat8(b *testing.B)  { benchFile(b, 8, false) }
 func Benchmark_ZFlat9(b *testing.B)  { benchFile(b, 9, false) }
 func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
 func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }
+func Benchmark_ZFlat12(b *testing.B) { benchFile(b, 12, false) }
+func Benchmark_ZFlat13(b *testing.B) { benchFile(b, 13, false) }
+func Benchmark_ZFlat14(b *testing.B) { benchFile(b, 14, false) }
+func Benchmark_ZFlat15(b *testing.B) { benchFile(b, 15, false) }
+func Benchmark_ZFlat16(b *testing.B) { benchFile(b, 16, false) }
+func Benchmark_ZFlat17(b *testing.B) { benchFile(b, 17, false) }
diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/logger.go b/Godeps/_workspace/src/github.com/tendermint/log15/logger.go
index 325448d218b1d476af1efca14ec11845ff8a6dfc..8ddf3ffd05e32b43d93bffa8748119de9665a512 100644
--- a/Godeps/_workspace/src/github.com/tendermint/log15/logger.go
+++ b/Godeps/_workspace/src/github.com/tendermint/log15/logger.go
@@ -30,7 +30,7 @@ func (l Lvl) String() string {
 	case LvlInfo:
 		return "info"
 	case LvlNotice:
-		return "notice"
+		return "note"
 	case LvlWarn:
 		return "warn"
 	case LvlError:
@@ -50,7 +50,7 @@ func LvlFromString(lvlString string) (Lvl, error) {
 		return LvlDebug, nil
 	case "info":
 		return LvlInfo, nil
-	case "notice", "note":
+	case "note", "notice":
 		return LvlNotice, nil
 	case "warn":
 		return LvlWarn, nil
diff --git a/Godeps/_workspace/src/golang.org/x/net/context/context_test.go b/Godeps/_workspace/src/golang.org/x/net/context/context_test.go
index faf67722a0f975f8b60ee314e3ef2b84f0dc1a5f..05345fc5e5fb856499057fef5d1b9ea59bdb69ef 100644
--- a/Godeps/_workspace/src/golang.org/x/net/context/context_test.go
+++ b/Godeps/_workspace/src/golang.org/x/net/context/context_test.go
@@ -375,7 +375,7 @@ func TestAllocs(t *testing.T) {
 				<-c.Done()
 			},
 			limit:      8,
-			gccgoLimit: 13,
+			gccgoLimit: 15,
 		},
 		{
 			desc: "WithCancel(bg)",
@@ -536,7 +536,7 @@ func testLayers(t *testing.T, seed int64, testTimeout bool) {
 	if testTimeout {
 		select {
 		case <-ctx.Done():
-		case <-time.After(timeout + timeout/10):
+		case <-time.After(timeout + 100*time.Millisecond):
 			errorf("ctx should have timed out")
 		}
 		checkValues("after timeout")
diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go
new file mode 100644
index 0000000000000000000000000000000000000000..48610e3627701c05e1cbf8e12db219af43cbd108
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go
@@ -0,0 +1,18 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.5
+
+package ctxhttp
+
+import "net/http"
+
+func canceler(client *http.Client, req *http.Request) func() {
+	ch := make(chan struct{})
+	req.Cancel = ch
+
+	return func() {
+		close(ch)
+	}
+}
diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go
new file mode 100644
index 0000000000000000000000000000000000000000..56bcbadb85fcc2a69db540797f41511cf16e5a6d
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go
@@ -0,0 +1,23 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.5
+
+package ctxhttp
+
+import "net/http"
+
+type requestCanceler interface {
+	CancelRequest(*http.Request)
+}
+
+func canceler(client *http.Client, req *http.Request) func() {
+	rc, ok := client.Transport.(requestCanceler)
+	if !ok {
+		return func() {}
+	}
+	return func() {
+		rc.CancelRequest(req)
+	}
+}
diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go
new file mode 100644
index 0000000000000000000000000000000000000000..0e3c1ebfe17f2fa98d633ee0f7f08cca862a1b19
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go
@@ -0,0 +1,79 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
+package ctxhttp
+
+import (
+	"io"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/golang.org/x/net/context"
+)
+
+// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
+// If the client is nil, http.DefaultClient is used.
+// If the context is canceled or times out, ctx.Err() will be returned.
+func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
+	if client == nil {
+		client = http.DefaultClient
+	}
+
+	// Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go.
+	cancel := canceler(client, req)
+
+	type responseAndError struct {
+		resp *http.Response
+		err  error
+	}
+	result := make(chan responseAndError, 1)
+
+	go func() {
+		resp, err := client.Do(req)
+		result <- responseAndError{resp, err}
+	}()
+
+	select {
+	case <-ctx.Done():
+		cancel()
+		return nil, ctx.Err()
+	case r := <-result:
+		return r.resp, r.err
+	}
+}
+
+// Get issues a GET request via the Do function.
+func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+	req, err := http.NewRequest("GET", url, nil)
+	if err != nil {
+		return nil, err
+	}
+	return Do(ctx, client, req)
+}
+
+// Head issues a HEAD request via the Do function.
+func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+	req, err := http.NewRequest("HEAD", url, nil)
+	if err != nil {
+		return nil, err
+	}
+	return Do(ctx, client, req)
+}
+
+// Post issues a POST request via the Do function.
+func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
+	req, err := http.NewRequest("POST", url, body)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Content-Type", bodyType)
+	return Do(ctx, client, req)
+}
+
+// PostForm issues a POST request via the Do function.
+func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
+	return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp_test.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..a02a2a97b6248c99260c7aed855d46c19e7d331f
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp_test.go
@@ -0,0 +1,72 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ctxhttp
+
+import (
+	"io/ioutil"
+	"net/http"
+	"net/http/httptest"
+	"testing"
+	"time"
+
+	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/golang.org/x/net/context"
+)
+
+const (
+	requestDuration = 100 * time.Millisecond
+	requestBody     = "ok"
+)
+
+func TestNoTimeout(t *testing.T) {
+	ctx := context.Background()
+	resp, err := doRequest(ctx)
+
+	if resp == nil || err != nil {
+		t.Fatalf("error received from client: %v %v", err, resp)
+	}
+}
+func TestCancel(t *testing.T) {
+	ctx, cancel := context.WithCancel(context.Background())
+	go func() {
+		time.Sleep(requestDuration / 2)
+		cancel()
+	}()
+
+	resp, err := doRequest(ctx)
+
+	if resp != nil || err == nil {
+		t.Fatalf("expected error, didn't get one. resp: %v", resp)
+	}
+	if err != ctx.Err() {
+		t.Fatalf("expected error from context but got: %v", err)
+	}
+}
+
+func TestCancelAfterRequest(t *testing.T) {
+	ctx, cancel := context.WithCancel(context.Background())
+
+	resp, err := doRequest(ctx)
+
+	// Cancel before reading the body.
+	// Request.Body should still be readable after the context is canceled.
+	cancel()
+
+	b, err := ioutil.ReadAll(resp.Body)
+	if err != nil || string(b) != requestBody {
+		t.Fatalf("could not read body: %q %v", b, err)
+	}
+}
+
+func doRequest(ctx context.Context) (*http.Response, error) {
+	var okHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		time.Sleep(requestDuration)
+		w.Write([]byte(requestBody))
+	})
+
+	serv := httptest.NewServer(okHandler)
+	defer serv.Close()
+
+	return Get(ctx, nil, serv.URL)
+}
diff --git a/Godeps/_workspace/src/golang.org/x/net/netutil/listen_test.go b/Godeps/_workspace/src/golang.org/x/net/netutil/listen_test.go
index ac87e0ee4a9a688622541088d0f77f9d8ffcd03e..c0d5bc2a70f6877a7a5c8cdc9855b5d0f0175ce9 100644
--- a/Godeps/_workspace/src/golang.org/x/net/netutil/listen_test.go
+++ b/Godeps/_workspace/src/golang.org/x/net/netutil/listen_test.go
@@ -20,17 +20,20 @@ import (
 	"sync/atomic"
 	"testing"
 	"time"
+
+	"golang.org/x/net/internal/nettest"
 )
 
 func TestLimitListener(t *testing.T) {
-	const (
-		max = 5
-		num = 200
-	)
+	const max = 5
+	attempts := (nettest.MaxOpenFiles() - max) / 2
+	if attempts > 256 { // maximum length of accept queue is 128 by default
+		attempts = 256
+	}
 
 	l, err := net.Listen("tcp", "127.0.0.1:0")
 	if err != nil {
-		t.Fatalf("Listen: %v", err)
+		t.Fatal(err)
 	}
 	defer l.Close()
 	l = LimitListener(l, max)
@@ -47,14 +50,14 @@ func TestLimitListener(t *testing.T) {
 
 	var wg sync.WaitGroup
 	var failed int32
-	for i := 0; i < num; i++ {
+	for i := 0; i < attempts; i++ {
 		wg.Add(1)
 		go func() {
 			defer wg.Done()
 			c := http.Client{Timeout: 3 * time.Second}
 			r, err := c.Get("http://" + l.Addr().String())
 			if err != nil {
-				t.Logf("Get: %v", err)
+				t.Log(err)
 				atomic.AddInt32(&failed, 1)
 				return
 			}
@@ -66,8 +69,8 @@ func TestLimitListener(t *testing.T) {
 
 	// We expect some Gets to fail as the kernel's accept queue is filled,
 	// but most should succeed.
-	if failed >= num/2 {
-		t.Errorf("too many Gets failed: %v", failed)
+	if int(failed) >= attempts/2 {
+		t.Errorf("%d requests failed within %d attempts", failed, attempts)
 	}
 }
 
diff --git a/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/.gitignore b/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/.gitignore
index 415902041c853f9d77290ef92c94043b0d30abfc..7e9b50032d207cedcd10de2796dc4203d9889332 100644
--- a/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/.gitignore
+++ b/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/.gitignore
@@ -23,4 +23,6 @@ _testmain.go
 *.test
 *.prof
 *.test
-*.out
\ No newline at end of file
+*.out
+cover.html
+README.html
\ No newline at end of file
diff --git a/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/.travis.yml b/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/.travis.yml
deleted file mode 100644
index 68398d9f152d5b07e90a55adc00e5006436e58dd..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/.travis.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-language: go
-
-notificaitons:
-  email:
-    recipients: bluesuncorp01@gmail.com
-    on_success: change
-    on_failure: always
-
-go:
-  - 1.2
-  - 1.3
-  - 1.4
-  - tip
\ No newline at end of file
diff --git a/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/README.md b/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/README.md
index c1e9d00cfddae372708ada9681bf588026df55f9..1a78bec3c3f0b07dcb9b7a56083558362817b247 100644
--- a/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/README.md
+++ b/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/README.md
@@ -1,33 +1,143 @@
 Package validator
 ================
-[![Build Status](https://travis-ci.org/bluesuncorp/validator.svg?branch=v5.1)](https://travis-ci.org/bluesuncorp/validator)
-[![GoDoc](https://godoc.org/gopkg.in/bluesuncorp/validator.v5?status.svg)](https://godoc.org/gopkg.in/bluesuncorp/validator.v5)
+
+[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+[![Build Status](https://semaphoreci.com/api/v1/projects/ec20115f-ef1b-4c7d-9393-cc76aba74eb4/487382/badge.svg)](https://semaphoreci.com/joeybloggs/validator)
+[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=v5)](https://coveralls.io/r/go-playground/validator?branch=v5)
+[![GoDoc](https://godoc.org/gopkg.in/go-playground/validator.v5?status.svg)](https://godoc.org/gopkg.in/go-playground/validator.v5)
 
 Package validator implements value validations for structs and individual fields based on tags.
-It is also capable of Cross Field and Cross Struct validations.
+
+It has the following **unique** features:
+
+-   Cross Field and Cross Struct validations.  
+-   Slice, Array and Map diving, which allows any or all levels of a multidimensional field to be validated.  
+-   Handles type interface by determining it's underlying type prior to validation.  
 
 Installation
-============
+------------
 
 Use go get.
 
-	go get gopkg.in/bluesuncorp/validator.v5
+	go get gopkg.in/go-playground/validator.v5
 
 or to update
 
-	go get -u gopkg.in/bluesuncorp/validator.v5
+	go get -u gopkg.in/go-playground/validator.v5
 
 Then import the validator package into your own code.
 
-	import "gopkg.in/bluesuncorp/validator.v5"
+	import "gopkg.in/go-playground/validator.v5"
 
 Usage and documentation
-=======================
+------
+
+Please see http://godoc.org/gopkg.in/go-playground/validator.v5 for detailed usage docs.
+
+##### Example:
+```go
+package main
+
+import (
+	"fmt"
+
+	"gopkg.in/go-playground/validator.v5"
+)
+
+// User contains user information
+type User struct {
+	FirstName      string     `validate:"required"`
+	LastName       string     `validate:"required"`
+	Age            uint8      `validate:"gte=0,lte=130"`
+	Email          string     `validate:"required,email"`
+	FavouriteColor string     `validate:"hexcolor|rgb|rgba"`
+	Addresses      []*Address `validate:"required,dive,required"` // a person can have a home and cottage...
+}
+
+// Address houses a users address information
+type Address struct {
+	Street string `validate:"required"`
+	City   string `validate:"required"`
+	Planet string `validate:"required"`
+	Phone  string `validate:"required"`
+}
+
+var validate *validator.Validate
+
+func main() {
+
+	validate = validator.New("validate", validator.BakedInValidators)
+
+	address := &Address{
+		Street: "Eavesdown Docks",
+		Planet: "Persphone",
+		Phone:  "none",
+	}
+
+	user := &User{
+		FirstName:      "Badger",
+		LastName:       "Smith",
+		Age:            135,
+		Email:          "Badger.Smith@gmail.com",
+		FavouriteColor: "#000",
+		Addresses:      []*Address{address},
+	}
+
+	// returns nil or *StructErrors
+	errs := validate.Struct(user)
+
+	if errs != nil {
+
+		// err will be of type *FieldError
+		err := errs.Errors["Age"]
+		fmt.Println(err.Error()) // output: Field validation for "Age" failed on the "lte" tag
+		fmt.Println(err.Field)   // output: Age
+		fmt.Println(err.Tag)     // output: lte
+		fmt.Println(err.Kind)    // output: uint8
+		fmt.Println(err.Type)    // output: uint8
+		fmt.Println(err.Param)   // output: 130
+		fmt.Println(err.Value)   // output: 135
+
+		// or if you prefer you can use the Flatten function
+		// NOTE: I find this usefull when using a more hard static approach of checking field errors.
+		// The above, is best for passing to some generic code to say parse the errors. i.e. I pass errs
+		// to a routine which loops through the errors, creates and translates the error message into the
+		// users locale and returns a map of map[string]string // field and error which I then use
+		// within the HTML rendering.
+
+		flat := errs.Flatten()
+		fmt.Println(flat) // output: map[Age:Field validation for "Age" failed on the "lte" tag Addresses[0].Address.City:Field validation for "City" failed on the "required" tag]
+		err = flat["Addresses[0].Address.City"]
+		fmt.Println(err.Field) // output: City
+		fmt.Println(err.Tag)   // output: required
+		fmt.Println(err.Kind)  // output: string
+		fmt.Println(err.Type)  // output: string
+		fmt.Println(err.Param) // output:
+		fmt.Println(err.Value) // output:
+
+		// from here you can create your own error messages in whatever language you wish
+		return
+	}
+
+	// save user to database
+}
+```
 
-Please see http://godoc.org/gopkg.in/bluesuncorp/validator.v5 for detailed usage docs.
+Benchmarks
+------
+###### Run on MacBook Pro (Retina, 15-inch, Late 2013) 2.6 GHz Intel Core i7 16 GB 1600 MHz DDR3
+```go
+$ go test -cpu=4 -bench=. -benchmem=true
+PASS
+BenchmarkValidateField-4	 		 3000000	       429 ns/op	     192 B/op	       2 allocs/op
+BenchmarkValidateStructSimple-4	  	  500000	      2877 ns/op	     657 B/op	      10 allocs/op
+BenchmarkTemplateParallelSimple-4	  500000	      3097 ns/op	     657 B/op	      10 allocs/op
+BenchmarkValidateStructLarge-4	  	  100000	     15228 ns/op	    4350 B/op	      62 allocs/op
+BenchmarkTemplateParallelLarge-4	  100000	     14257 ns/op	    4354 B/op	      62 allocs/op
+```
 
 How to Contribute
-=================
+------
 
 There will always be a development branch for each version i.e. `v1-development`. In order to contribute, 
 please make your pull requests against those branches.
@@ -40,5 +150,5 @@ I strongly encourage everyone whom creates a custom validation function to contr
 help make this package even better.
 
 License
-=======
+------
 Distributed under MIT License, please see license file in code for more details.
diff --git a/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/baked_in.go b/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/baked_in.go
index 22746adae10d34f704f7cc9f63203de3264bc4ec..82868ccfd0f09d83819f9d23b86eae002a2e8ce0 100644
--- a/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/baked_in.go
+++ b/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/baked_in.go
@@ -424,10 +424,12 @@ func hasValue(top interface{}, current interface{}, field interface{}, param str
 	st := reflect.ValueOf(field)
 
 	switch st.Kind() {
-
-	case reflect.Slice, reflect.Map, reflect.Array:
-		return field != nil && int64(st.Len()) > 0
-
+	case reflect.Invalid:
+		return false
+	case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func:
+		return !st.IsNil()
+	case reflect.Array:
+		return field != reflect.Zero(reflect.TypeOf(field)).Interface()
 	default:
 		return field != nil && field != reflect.Zero(reflect.TypeOf(field)).Interface()
 	}
@@ -592,7 +594,7 @@ func isGte(top interface{}, current interface{}, field interface{}, param string
 	case reflect.String:
 		p := asInt(param)
 
-		return int64(len(st.String())) >= p
+		return int64(utf8.RuneCountInString(st.String())) >= p
 
 	case reflect.Slice, reflect.Map, reflect.Array:
 		p := asInt(param)
@@ -637,7 +639,7 @@ func isGt(top interface{}, current interface{}, field interface{}, param string)
 	case reflect.String:
 		p := asInt(param)
 
-		return int64(len(st.String())) > p
+		return int64(utf8.RuneCountInString(st.String())) > p
 
 	case reflect.Slice, reflect.Map, reflect.Array:
 		p := asInt(param)
@@ -681,7 +683,7 @@ func hasLengthOf(top interface{}, current interface{}, field interface{}, param
 	case reflect.String:
 		p := asInt(param)
 
-		return int64(len(st.String())) == p
+		return int64(utf8.RuneCountInString(st.String())) == p
 
 	case reflect.Slice, reflect.Map, reflect.Array:
 		p := asInt(param)
@@ -875,7 +877,7 @@ func isLte(top interface{}, current interface{}, field interface{}, param string
 	case reflect.String:
 		p := asInt(param)
 
-		return int64(len(st.String())) <= p
+		return int64(utf8.RuneCountInString(st.String())) <= p
 
 	case reflect.Slice, reflect.Map, reflect.Array:
 		p := asInt(param)
@@ -920,7 +922,7 @@ func isLt(top interface{}, current interface{}, field interface{}, param string)
 	case reflect.String:
 		p := asInt(param)
 
-		return int64(len(st.String())) < p
+		return int64(utf8.RuneCountInString(st.String())) < p
 
 	case reflect.Slice, reflect.Map, reflect.Array:
 		p := asInt(param)
diff --git a/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/benchmarks_test.go b/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/benchmarks_test.go
index 25172092b937f1c26596c7adc7551f82bef5fcd8..ee836c288be215917335d3ddb28914e3f3b2426a 100644
--- a/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/benchmarks_test.go
+++ b/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/benchmarks_test.go
@@ -24,23 +24,23 @@ func BenchmarkValidateStructSimple(b *testing.B) {
 	}
 }
 
-// func BenchmarkTemplateParallelSimple(b *testing.B) {
+func BenchmarkTemplateParallelSimple(b *testing.B) {
 
-// 	type Foo struct {
-// 		StringValue string `validate:"min=5,max=10"`
-// 		IntValue    int    `validate:"min=5,max=10"`
-// 	}
+	type Foo struct {
+		StringValue string `validate:"min=5,max=10"`
+		IntValue    int    `validate:"min=5,max=10"`
+	}
 
-// 	validFoo := &Foo{StringValue: "Foobar", IntValue: 7}
-// 	invalidFoo := &Foo{StringValue: "Fo", IntValue: 3}
+	validFoo := &Foo{StringValue: "Foobar", IntValue: 7}
+	invalidFoo := &Foo{StringValue: "Fo", IntValue: 3}
 
-// 	b.RunParallel(func(pb *testing.PB) {
-// 		for pb.Next() {
-// 			validate.Struct(validFoo)
-// 			validate.Struct(invalidFoo)
-// 		}
-// 	})
-// }
+	b.RunParallel(func(pb *testing.PB) {
+		for pb.Next() {
+			validate.Struct(validFoo)
+			validate.Struct(invalidFoo)
+		}
+	})
+}
 
 func BenchmarkValidateStructLarge(b *testing.B) {
 
@@ -101,63 +101,63 @@ func BenchmarkValidateStructLarge(b *testing.B) {
 	}
 }
 
-// func BenchmarkTemplateParallelLarge(b *testing.B) {
-
-// 	tFail := &TestString{
-// 		Required:  "",
-// 		Len:       "",
-// 		Min:       "",
-// 		Max:       "12345678901",
-// 		MinMax:    "",
-// 		Lt:        "0123456789",
-// 		Lte:       "01234567890",
-// 		Gt:        "1",
-// 		Gte:       "1",
-// 		OmitEmpty: "12345678901",
-// 		Sub: &SubTest{
-// 			Test: "",
-// 		},
-// 		Anonymous: struct {
-// 			A string `validate:"required"`
-// 		}{
-// 			A: "",
-// 		},
-// 		Iface: &Impl{
-// 			F: "12",
-// 		},
-// 	}
-
-// 	tSuccess := &TestString{
-// 		Required:  "Required",
-// 		Len:       "length==10",
-// 		Min:       "min=1",
-// 		Max:       "1234567890",
-// 		MinMax:    "12345",
-// 		Lt:        "012345678",
-// 		Lte:       "0123456789",
-// 		Gt:        "01234567890",
-// 		Gte:       "0123456789",
-// 		OmitEmpty: "",
-// 		Sub: &SubTest{
-// 			Test: "1",
-// 		},
-// 		SubIgnore: &SubTest{
-// 			Test: "",
-// 		},
-// 		Anonymous: struct {
-// 			A string `validate:"required"`
-// 		}{
-// 			A: "1",
-// 		},
-// 		Iface: &Impl{
-// 			F: "123",
-// 		},
-// 	}
-
-// 	b.RunParallel(func(pb *testing.PB) {
-// 		for pb.Next() {
-// 			validate.Struct(tSuccess)
-// 			validate.Struct(tFail)
-// 		}
-// 	})
-// }
+func BenchmarkTemplateParallelLarge(b *testing.B) {
+
+	tFail := &TestString{
+		Required:  "",
+		Len:       "",
+		Min:       "",
+		Max:       "12345678901",
+		MinMax:    "",
+		Lt:        "0123456789",
+		Lte:       "01234567890",
+		Gt:        "1",
+		Gte:       "1",
+		OmitEmpty: "12345678901",
+		Sub: &SubTest{
+			Test: "",
+		},
+		Anonymous: struct {
+			A string `validate:"required"`
+		}{
+			A: "",
+		},
+		Iface: &Impl{
+			F: "12",
+		},
+	}
+
+	tSuccess := &TestString{
+		Required:  "Required",
+		Len:       "length==10",
+		Min:       "min=1",
+		Max:       "1234567890",
+		MinMax:    "12345",
+		Lt:        "012345678",
+		Lte:       "0123456789",
+		Gt:        "01234567890",
+		Gte:       "0123456789",
+		OmitEmpty: "",
+		Sub: &SubTest{
+			Test: "1",
+		},
+		SubIgnore: &SubTest{
+			Test: "",
+		},
+		Anonymous: struct {
+			A string `validate:"required"`
+		}{
+			A: "1",
+		},
+		Iface: &Impl{
+			F: "123",
+		},
+	}
+
+	b.RunParallel(func(pb *testing.PB) {
+		for pb.Next() {
+			validate.Struct(tSuccess)
+			validate.Struct(tFail)
+		}
+	})
+}
diff --git a/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/doc.go b/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/doc.go
index 89142e0751e120d75529a1fd6c785ede43ab057d..74db756f954c5304994be5ed36a62dee4d1f229d 100644
--- a/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/doc.go
+++ b/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/doc.go
@@ -167,16 +167,44 @@ Here is a list of the current built in validators:
 		inside of you program you know the struct will be valid, but need to
 		verify it has been assigned.
 
+	exists
+		Is a special tag without a validation function attached. It is used when a field
+		is a Pointer, Interface or Invalid and you wish to validate that it exists.
+		Example: want to ensure a bool exists if you define the bool as a pointer and
+		use exists it will ensure there is a value; couldn't use required as it would
+		fail when the bool was false. exists will fail is the value is a Pointer, Interface
+		or Invalid and is nil. (Usage: exists)
+
 	omitempty
 		Allows conditional validation, for example if a field is not set with
 		a value (Determined by the required validator) then other validation
 		such as min or max won't run, but if a value is set validation will run.
 		(Usage: omitempty)
 
+	dive
+		This tells the validator to dive into a slice, array or map and validate that
+		level of the slice, array or map with the validation tags that follow.
+		Multidimensional nesting is also supported, each level you wish to dive will
+		require another dive tag. (Usage: dive)
+		Example: [][]string with validation tag "gt=0,dive,len=1,dive,required"
+		gt=0 will be applied to []
+		len=1 will be applied to []string
+		required will be applied to string
+		Example2: [][]string with validation tag "gt=0,dive,dive,required"
+		gt=0 will be applied to []
+		[]string will be spared validation
+		required will be applied to string
+		NOTE: in Example2 if the required validation failed, but all others passed
+		the hierarchy of FieldError's in the middle with have their IsPlaceHolder field
+		set to true. If a FieldError has IsSliceOrMap=true or IsMap=true then the
+		FieldError is a Slice or Map field and if IsPlaceHolder=true then contains errors
+		within its SliceOrArrayErrs or MapErrs fields.
+
 	required
-		This validates that the value is not the data types default value.
+		This validates that the value is not the data types default zero value.
 		For numbers ensures value is not zero. For strings ensures value is
-		not "". For slices, arrays, and maps, ensures the length is not zero.
+		not "". For slices, maps, pointers, interfaces, channels and functions
+		ensures the value is not nil.
 		(Usage: required)
 
 	len
diff --git a/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/examples/simple.go b/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/examples/simple.go
new file mode 100644
index 0000000000000000000000000000000000000000..c36bd6868f5b65304f232a1902f2105ea86f7344
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/examples/simple.go
@@ -0,0 +1,85 @@
+package main
+
+import (
+	"fmt"
+
+	"gopkg.in/go-playground/validator.v5"
+)
+
+// User contains user information
+type User struct {
+	FirstName      string     `validate:"required"`
+	LastName       string     `validate:"required"`
+	Age            uint8      `validate:"gte=0,lte=130"`
+	Email          string     `validate:"required,email"`
+	FavouriteColor string     `validate:"hexcolor|rgb|rgba"`
+	Addresses      []*Address `validate:"required,dive,required"` // a person can have a home and cottage...
+}
+
+// Address houses a users address information
+type Address struct {
+	Street string `validate:"required"`
+	City   string `validate:"required"`
+	Planet string `validate:"required"`
+	Phone  string `validate:"required"`
+}
+
+var validate *validator.Validate
+
+func main() {
+
+	validate = validator.New("validate", validator.BakedInValidators)
+
+	address := &Address{
+		Street: "Eavesdown Docks",
+		Planet: "Persphone",
+		Phone:  "none",
+	}
+
+	user := &User{
+		FirstName:      "Badger",
+		LastName:       "Smith",
+		Age:            135,
+		Email:          "Badger.Smith@gmail.com",
+		FavouriteColor: "#000",
+		Addresses:      []*Address{address},
+	}
+
+	// returns nil or *StructErrors
+	errs := validate.Struct(user)
+
+	if errs != nil {
+
+		// err will be of type *FieldError
+		err := errs.Errors["Age"]
+		fmt.Println(err.Error()) // output: Field validation for "Age" failed on the "lte" tag
+		fmt.Println(err.Field)   // output: Age
+		fmt.Println(err.Tag)     // output: lte
+		fmt.Println(err.Kind)    // output: uint8
+		fmt.Println(err.Type)    // output: uint8
+		fmt.Println(err.Param)   // output: 130
+		fmt.Println(err.Value)   // output: 135
+
+		// or if you prefer you can use the Flatten function
+		// NOTE: I find this usefull when using a more hard static approach of checking field errors.
+		// The above, is best for passing to some generic code to say parse the errors. i.e. I pass errs
+		// to a routine which loops through the errors, creates and translates the error message into the
+		// users locale and returns a map of map[string]string // field and error which I then use
+		// within the HTML rendering.
+
+		flat := errs.Flatten()
+		fmt.Println(flat) // output: map[Age:Field validation for "Age" failed on the "lte" tag Addresses[0].Address.City:Field validation for "City" failed on the "required" tag]
+		err = flat["Addresses[0].Address.City"]
+		fmt.Println(err.Field) // output: City
+		fmt.Println(err.Tag)   // output: required
+		fmt.Println(err.Kind)  // output: string
+		fmt.Println(err.Type)  // output: string
+		fmt.Println(err.Param) // output:
+		fmt.Println(err.Value) // output:
+
+		// from here you can create your own error messages in whatever language you wish
+		return
+	}
+
+	// save user to database
+}
diff --git a/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/examples_test.go b/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/examples_test.go
index c5dd3517d1812ba8559a31783860971df2079555..0acdaa0c2d35a7ead14e7f02e846dd36bd2ea2f1 100644
--- a/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/examples_test.go
+++ b/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/examples_test.go
@@ -3,7 +3,7 @@ package validator_test
 import (
 	"fmt"
 
-	"../validator"
+	"gopkg.in/go-playground/validator.v5"
 )
 
 func ExampleValidate_new() {
diff --git a/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/validator.go b/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/validator.go
index 5f0c18afa0100c890106b93a0ffa307dc6ec25d4..f195647baed6d1d24c03f8c0c9259cd1b654c74d 100644
--- a/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/validator.go
+++ b/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/validator.go
@@ -20,56 +20,31 @@ import (
 )
 
 const (
-	utf8HexComma    = "0x2C"
-	tagSeparator    = ","
-	orSeparator     = "|"
-	noValidationTag = "-"
-	tagKeySeparator = "="
-	structOnlyTag   = "structonly"
-	omitempty       = "omitempty"
-	fieldErrMsg     = "Field validation for \"%s\" failed on the \"%s\" tag"
-	structErrMsg    = "Struct:%s\n"
+	utf8HexComma        = "0x2C"
+	tagSeparator        = ","
+	orSeparator         = "|"
+	noValidationTag     = "-"
+	tagKeySeparator     = "="
+	structOnlyTag       = "structonly"
+	omitempty           = "omitempty"
+	required            = "required"
+	fieldErrMsg         = "Field validation for \"%s\" failed on the \"%s\" tag"
+	sliceErrMsg         = "Field validation for \"%s\" failed at index \"%d\" with error(s): %s"
+	mapErrMsg           = "Field validation for \"%s\" failed on key \"%v\" with error(s): %s"
+	structErrMsg        = "Struct:%s\n"
+	diveTag             = "dive"
+	existsTag           = "exists"
+	arrayIndexFieldName = "%s[%d]"
+	mapIndexFieldName   = "%s[%v]"
 )
 
-var structPool *pool
+var structPool *sync.Pool
 
-// Pool holds a channelStructErrors.
-type pool struct {
-	pool chan *StructErrors
-}
-
-// NewPool creates a new pool of Clients.
-func newPool(max int) *pool {
-	return &pool{
-		pool: make(chan *StructErrors, max),
-	}
-}
-
-// Borrow a StructErrors from the pool.
-func (p *pool) Borrow() *StructErrors {
-	var c *StructErrors
-
-	select {
-	case c = <-p.pool:
-	default:
-		c = &StructErrors{
-			Errors:       map[string]*FieldError{},
-			StructErrors: map[string]*StructErrors{},
-		}
-	}
-
-	return c
-}
-
-// Return returns a StructErrors to the pool.
-func (p *pool) Return(c *StructErrors) {
-
-	// c.Struct = ""
-
-	select {
-	case p.pool <- c:
-	default:
-		// let it go, let it go...
+// returns new *StructErrors to the pool
+func newStructErrors() interface{} {
+	return &StructErrors{
+		Errors:       map[string]*FieldError{},
+		StructErrors: map[string]*StructErrors{},
 	}
 }
 
@@ -79,13 +54,22 @@ type cachedTags struct {
 }
 
 type cachedField struct {
-	index  int
-	name   string
-	tags   []*cachedTags
-	tag    string
-	kind   reflect.Kind
-	typ    reflect.Type
-	isTime bool
+	index          int
+	name           string
+	tags           []*cachedTags
+	tag            string
+	kind           reflect.Kind
+	typ            reflect.Type
+	isTime         bool
+	isSliceOrArray bool
+	isMap          bool
+	isTimeSubtype  bool
+	sliceSubtype   reflect.Type
+	mapSubtype     reflect.Type
+	sliceSubKind   reflect.Kind
+	mapSubKind     reflect.Kind
+	dive           bool
+	diveTag        string
 }
 
 type cachedStruct struct {
@@ -138,20 +122,123 @@ var fieldsCache = &fieldsCacheMap{m: map[string][]*cachedTags{}}
 // FieldError contains a single field's validation error along
 // with other properties that may be needed for error message creation
 type FieldError struct {
-	Field string
-	Tag   string
-	Kind  reflect.Kind
-	Type  reflect.Type
-	Param string
-	Value interface{}
+	Field            string
+	Tag              string
+	Kind             reflect.Kind
+	Type             reflect.Type
+	Param            string
+	Value            interface{}
+	IsPlaceholderErr bool
+	IsSliceOrArray   bool
+	IsMap            bool
+	SliceOrArrayErrs map[int]error         // counld be FieldError, StructErrors
+	MapErrs          map[interface{}]error // counld be FieldError, StructErrors
 }
 
 // This is intended for use in development + debugging and not intended to be a production error message.
 // it also allows FieldError to be used as an Error interface
 func (e *FieldError) Error() string {
+
+	if e.IsPlaceholderErr {
+
+		buff := bytes.NewBufferString("")
+
+		if e.IsSliceOrArray {
+
+			for j, err := range e.SliceOrArrayErrs {
+				buff.WriteString("\n")
+				buff.WriteString(fmt.Sprintf(sliceErrMsg, e.Field, j, "\n"+err.Error()))
+			}
+
+		} else if e.IsMap {
+
+			for key, err := range e.MapErrs {
+				buff.WriteString(fmt.Sprintf(mapErrMsg, e.Field, key, "\n"+err.Error()))
+			}
+		}
+
+		return strings.TrimSpace(buff.String())
+	}
+
 	return fmt.Sprintf(fieldErrMsg, e.Field, e.Tag)
 }
 
+// Flatten flattens the FieldError hierarchical structure into a flat namespace style field name
+// for those that want/need it.
+// This is now needed because of the new dive functionality
+func (e *FieldError) Flatten() map[string]*FieldError {
+
+	errs := map[string]*FieldError{}
+
+	if e.IsPlaceholderErr {
+
+		if e.IsSliceOrArray {
+			for key, err := range e.SliceOrArrayErrs {
+
+				fe, ok := err.(*FieldError)
+
+				if ok {
+
+					if flat := fe.Flatten(); flat != nil && len(flat) > 0 {
+						for k, v := range flat {
+							if fe.IsPlaceholderErr {
+								errs[fmt.Sprintf("[%#v]%s", key, k)] = v
+							} else {
+								errs[fmt.Sprintf("[%#v]", key)] = v
+							}
+
+						}
+					}
+				} else {
+
+					se := err.(*StructErrors)
+
+					if flat := se.Flatten(); flat != nil && len(flat) > 0 {
+						for k, v := range flat {
+							errs[fmt.Sprintf("[%#v].%s.%s", key, se.Struct, k)] = v
+						}
+					}
+				}
+			}
+		}
+
+		if e.IsMap {
+			for key, err := range e.MapErrs {
+
+				fe, ok := err.(*FieldError)
+
+				if ok {
+
+					if flat := fe.Flatten(); flat != nil && len(flat) > 0 {
+						for k, v := range flat {
+							if fe.IsPlaceholderErr {
+								errs[fmt.Sprintf("[%#v]%s", key, k)] = v
+							} else {
+								errs[fmt.Sprintf("[%#v]", key)] = v
+							}
+						}
+					}
+				} else {
+
+					se := err.(*StructErrors)
+
+					if flat := se.Flatten(); flat != nil && len(flat) > 0 {
+						for k, v := range flat {
+							errs[fmt.Sprintf("[%#v].%s.%s", key, se.Struct, k)] = v
+						}
+					}
+				}
+			}
+		}
+
+		return errs
+	}
+
+	errs[e.Field] = e
+
+	return errs
+}
+
 // StructErrors is hierarchical list of field and struct validation errors
 // for a non hierarchical representation please see the Flatten method for StructErrors
 type StructErrors struct {
@@ -178,7 +265,7 @@ func (e *StructErrors) Error() string {
 		buff.WriteString(err.Error())
 	}
 
-	return buff.String()
+	return strings.TrimSpace(buff.String())
 }
 
 // Flatten flattens the StructErrors hierarchical structure into a flat namespace style field name
@@ -193,7 +280,17 @@ func (e *StructErrors) Flatten() map[string]*FieldError {
 
 	for _, f := range e.Errors {
 
-		errs[f.Field] = f
+		if flat := f.Flatten(); flat != nil && len(flat) > 0 {
+
+			for k, fe := range flat {
+
+				if f.IsPlaceholderErr {
+					errs[f.Field+k] = fe
+				} else {
+					errs[k] = fe
+				}
+			}
+		}
 	}
 
 	for key, val := range e.StructErrors {
@@ -231,7 +328,7 @@ type Validate struct {
 // New creates a new Validate instance for use.
 func New(tagName string, funcs map[string]Func) *Validate {
 
-	structPool = newPool(10)
+	structPool = &sync.Pool{New: newStructErrors}
 
 	return &Validate{
 		tagName:         tagName,
@@ -251,9 +348,8 @@ func (v *Validate) SetTag(tagName string) {
 // nearly all cases. only increase if you have a deeply nested struct structure.
 // NOTE: this method is not thread-safe
 // NOTE: this is only here to keep compatibility with v5, in v6 the method will be removed
-// and the max pool size will be passed into the New function
 func (v *Validate) SetMaxStructPoolSize(max int) {
-	structPool = newPool(max)
+	structPool = &sync.Pool{New: newStructErrors}
 }
 
 // AddFunction adds a validation Func to a Validate's map of validators denoted by the key
@@ -312,10 +408,9 @@ func (v *Validate) structRecursive(top interface{}, current interface{}, s inter
 		structName = structType.Name()
 		numFields = structValue.NumField()
 		cs = &cachedStruct{name: structName, children: numFields}
-		structCache.Set(structType, cs)
 	}
 
-	validationErrors := structPool.Borrow()
+	validationErrors := structPool.Get().(*StructErrors)
 	validationErrors.Struct = structName
 
 	for i := 0; i < numFields; i++ {
@@ -340,7 +435,7 @@ func (v *Validate) structRecursive(top interface{}, current interface{}, s inter
 
 			typeField = structType.Field(i)
 
-			cField = &cachedField{index: i, tag: typeField.Tag.Get(v.tagName)}
+			cField = &cachedField{index: i, tag: typeField.Tag.Get(v.tagName), isTime: (valueField.Type() == reflect.TypeOf(time.Time{}) || valueField.Type() == reflect.TypeOf(&time.Time{}))}
 
 			if cField.tag == noValidationTag {
 				cs.children--
@@ -373,9 +468,7 @@ func (v *Validate) structRecursive(top interface{}, current interface{}, s inter
 				continue
 			}
 
-			if cField.isTime || valueField.Type() == reflect.TypeOf(time.Time{}) {
-
-				cField.isTime = true
+			if cField.isTime {
 
 				if fieldError := v.fieldWithNameAndValue(top, current, valueField.Interface(), cField.tag, cField.name, false, cField); fieldError != nil {
 					validationErrors.Errors[fieldError.Field] = fieldError
@@ -390,6 +483,62 @@ func (v *Validate) structRecursive(top interface{}, current interface{}, s inter
 					continue
 				}
 
+				if (valueField.Kind() == reflect.Ptr || cField.kind == reflect.Interface) && valueField.IsNil() {
+
+					if strings.Contains(cField.tag, omitempty) {
+						goto CACHEFIELD
+					}
+
+					tags := strings.Split(cField.tag, tagSeparator)
+
+					if len(tags) > 0 {
+
+						var param string
+						vals := strings.SplitN(tags[0], tagKeySeparator, 2)
+
+						if len(vals) > 1 {
+							param = vals[1]
+						}
+
+						validationErrors.Errors[cField.name] = &FieldError{
+							Field: cField.name,
+							Tag:   vals[0],
+							Param: param,
+							Value: valueField.Interface(),
+							Kind:  valueField.Kind(),
+							Type:  valueField.Type(),
+						}
+
+						goto CACHEFIELD
+					}
+				}
+
+				// if we get here, the field is interface and could be a struct or a field
+				// and we need to check the inner type and validate
+				if cField.kind == reflect.Interface {
+
+					valueField = valueField.Elem()
+
+					if valueField.Kind() == reflect.Ptr && !valueField.IsNil() {
+						valueField = valueField.Elem()
+					}
+
+					if valueField.Kind() == reflect.Struct {
+						goto VALIDATESTRUCT
+					}
+
+					// sending nil for cField as it was type interface and could be anything
+					// each time and so must be calculated each time and can't be cached reliably
+					if fieldError := v.fieldWithNameAndValue(top, current, valueField.Interface(), cField.tag, cField.name, false, nil); fieldError != nil {
+						validationErrors.Errors[fieldError.Field] = fieldError
+						// free up memory reference
+						fieldError = nil
+					}
+
+					goto CACHEFIELD
+				}
+
+			VALIDATESTRUCT:
 				if structErrors := v.structRecursive(top, valueField.Interface(), valueField.Interface()); structErrors != nil {
 					validationErrors.StructErrors[cField.name] = structErrors
 					// free up memory map no longer needed
@@ -397,22 +546,48 @@ func (v *Validate) structRecursive(top interface{}, current interface{}, s inter
 				}
 			}
 
-		default:
+		case reflect.Slice, reflect.Array:
+			cField.isSliceOrArray = true
+			cField.sliceSubtype = cField.typ.Elem()
+			cField.isTimeSubtype = (cField.sliceSubtype == reflect.TypeOf(time.Time{}) || cField.sliceSubtype == reflect.TypeOf(&time.Time{}))
+			cField.sliceSubKind = cField.sliceSubtype.Kind()
 
 			if fieldError := v.fieldWithNameAndValue(top, current, valueField.Interface(), cField.tag, cField.name, false, cField); fieldError != nil {
 				validationErrors.Errors[fieldError.Field] = fieldError
 				// free up memory reference
 				fieldError = nil
 			}
+
+		case reflect.Map:
+			cField.isMap = true
+			cField.mapSubtype = cField.typ.Elem()
+			cField.isTimeSubtype = (cField.mapSubtype == reflect.TypeOf(time.Time{}) || cField.mapSubtype == reflect.TypeOf(&time.Time{}))
+			cField.mapSubKind = cField.mapSubtype.Kind()
+
+			if fieldError := v.fieldWithNameAndValue(top, current, valueField.Interface(), cField.tag, cField.name, false, cField); fieldError != nil {
+				validationErrors.Errors[fieldError.Field] = fieldError
+				// free up memory reference
+				fieldError = nil
+			}
+
+		default:
+			if fieldError := v.fieldWithNameAndValue(top, current, valueField.Interface(), cField.tag, cField.name, false, cField); fieldError != nil {
+				validationErrors.Errors[fieldError.Field] = fieldError
+				// free up memory reference
+				fieldError = nil
+			}
 		}
 
+	CACHEFIELD:
 		if !isCached {
 			cs.fields = append(cs.fields, cField)
 		}
 	}
 
+	structCache.Set(structType, cs)
+
 	if len(validationErrors.Errors) == 0 && len(validationErrors.StructErrors) == 0 {
-		structPool.Return(validationErrors)
+		structPool.Put(validationErrors)
 		return nil
 	}
 
@@ -421,13 +596,11 @@ func (v *Validate) structRecursive(top interface{}, current interface{}, s inter
 
 // Field allows validation of a single field, still using tag style validation to check multiple errors
 func (v *Validate) Field(f interface{}, tag string) *FieldError {
-
 	return v.FieldWithValue(nil, f, tag)
 }
 
 // FieldWithValue allows validation of a single field, possibly even against another fields value, still using tag style validation to check multiple errors
 func (v *Validate) FieldWithValue(val interface{}, f interface{}, tag string) *FieldError {
-
 	return v.fieldWithNameAndValue(nil, val, f, tag, "", true, nil)
 }
 
@@ -435,9 +608,10 @@ func (v *Validate) fieldWithNameAndValue(val interface{}, current interface{}, f
 
 	var cField *cachedField
 	var isCached bool
+	var valueField reflect.Value
 
 	// This is a double check if coming from validate.Struct but need to be here in case function is called directly
-	if tag == noValidationTag {
+	if tag == noValidationTag || tag == "" {
 		return nil
 	}
 
@@ -445,25 +619,51 @@ func (v *Validate) fieldWithNameAndValue(val interface{}, current interface{}, f
 		return nil
 	}
 
+	valueField = reflect.ValueOf(f)
+
 	if cacheField == nil {
-		valueField := reflect.ValueOf(f)
 
 		if valueField.Kind() == reflect.Ptr && !valueField.IsNil() {
 			valueField = valueField.Elem()
 			f = valueField.Interface()
 		}
 
-		cField = &cachedField{name: name, kind: valueField.Kind(), tag: tag, typ: valueField.Type()}
+		cField = &cachedField{name: name, kind: valueField.Kind(), tag: tag}
+
+		if cField.kind != reflect.Invalid {
+			cField.typ = valueField.Type()
+		}
+
+		switch cField.kind {
+		case reflect.Slice, reflect.Array:
+			isSingleField = false // cached tags mean nothing because it will be split up while diving
+			cField.isSliceOrArray = true
+			cField.sliceSubtype = cField.typ.Elem()
+			cField.isTimeSubtype = (cField.sliceSubtype == reflect.TypeOf(time.Time{}) || cField.sliceSubtype == reflect.TypeOf(&time.Time{}))
+			cField.sliceSubKind = cField.sliceSubtype.Kind()
+		case reflect.Map:
+			isSingleField = false // cached tags mean nothing because it will be split up while diving
+			cField.isMap = true
+			cField.mapSubtype = cField.typ.Elem()
+			cField.isTimeSubtype = (cField.mapSubtype == reflect.TypeOf(time.Time{}) || cField.mapSubtype == reflect.TypeOf(&time.Time{}))
+			cField.mapSubKind = cField.mapSubtype.Kind()
+		}
 	} else {
 		cField = cacheField
 	}
 
 	switch cField.kind {
+	case reflect.Invalid:
+		return &FieldError{
+			Field: cField.name,
+			Tag:   cField.tag,
+			Kind:  cField.kind,
+		}
 
-	case reflect.Struct, reflect.Interface, reflect.Invalid:
+	case reflect.Struct, reflect.Interface:
 
 		if cField.typ != reflect.TypeOf(time.Time{}) {
-			panic("Invalid field passed to ValidateFieldWithTag")
+			panic("Invalid field passed to fieldWithNameAndValue")
 		}
 	}
 
@@ -477,6 +677,13 @@ func (v *Validate) fieldWithNameAndValue(val interface{}, current interface{}, f
 
 			for _, t := range strings.Split(tag, tagSeparator) {
 
+				if t == diveTag {
+
+					cField.dive = true
+					cField.diveTag = strings.TrimLeft(strings.SplitN(tag, diveTag, 2)[1], ",")
+					break
+				}
+
 				orVals := strings.Split(t, orSeparator)
 				cTag := &cachedTags{isOrVal: len(orVals) > 1, keyVals: make([][]string, len(orVals))}
 				cField.tags = append(cField.tags, cTag)
@@ -516,7 +723,22 @@ func (v *Validate) fieldWithNameAndValue(val interface{}, current interface{}, f
 
 			for _, val := range cTag.keyVals {
 
+				// if (idxField.Kind() == reflect.Ptr || idxField.Kind() == reflect.Interface) && idxField.IsNil() {
+				// if val[0] == existsTag {
+				// 	if (cField.kind == reflect.Ptr || cField.kind == reflect.Interface) && valueField.IsNil() {
+				// 		fieldErr = &FieldError{
+				// 			Field: name,
+				// 			Tag:   val[0],
+				// 			Value: f,
+				// 			Param: val[1],
+				// 		}
+				// 		err = errors.New(fieldErr.Tag)
+				// 	}
+
+				// } else {
+
 				fieldErr, err = v.fieldWithNameAndSingleTag(val, current, f, val[0], val[1], name)
+				// }
 
 				if err == nil {
 					return nil
@@ -534,6 +756,18 @@ func (v *Validate) fieldWithNameAndValue(val interface{}, current interface{}, f
 			return fieldErr
 		}
 
+		if cTag.keyVals[0][0] == existsTag {
+			if (cField.kind == reflect.Ptr || cField.kind == reflect.Interface) && valueField.IsNil() {
+				return &FieldError{
+					Field: name,
+					Tag:   cTag.keyVals[0][0],
+					Value: f,
+					Param: cTag.keyVals[0][1],
+				}
+			}
+			continue
+		}
+
 		if fieldErr, err = v.fieldWithNameAndSingleTag(val, current, f, cTag.keyVals[0][0], cTag.keyVals[0][1], name); err != nil {
 
 			fieldErr.Kind = cField.kind
@@ -543,9 +777,231 @@ func (v *Validate) fieldWithNameAndValue(val interface{}, current interface{}, f
 		}
 	}
 
+	if cField.dive {
+
+		if cField.isSliceOrArray {
+
+			if errs := v.traverseSliceOrArray(val, current, valueField, cField); errs != nil && len(errs) > 0 {
+
+				return &FieldError{
+					Field:            cField.name,
+					Kind:             cField.kind,
+					Type:             cField.typ,
+					Value:            f,
+					IsPlaceholderErr: true,
+					IsSliceOrArray:   true,
+					SliceOrArrayErrs: errs,
+				}
+			}
+
+		} else if cField.isMap {
+			if errs := v.traverseMap(val, current, valueField, cField); errs != nil && len(errs) > 0 {
+
+				return &FieldError{
+					Field:            cField.name,
+					Kind:             cField.kind,
+					Type:             cField.typ,
+					Value:            f,
+					IsPlaceholderErr: true,
+					IsMap:            true,
+					MapErrs:          errs,
+				}
+			}
+		} else {
+			// throw error, if not a slice or map then should not have gotten here
+			panic("dive error! can't dive on a non slice or map")
+		}
+	}
+
 	return nil
 }
 
+func (v *Validate) traverseMap(val interface{}, current interface{}, valueField reflect.Value, cField *cachedField) map[interface{}]error {
+
+	errs := map[interface{}]error{}
+
+	for _, key := range valueField.MapKeys() {
+
+		idxField := valueField.MapIndex(key)
+
+		if cField.mapSubKind == reflect.Ptr && !idxField.IsNil() {
+			idxField = idxField.Elem()
+			cField.mapSubKind = idxField.Kind()
+		}
+
+		switch cField.mapSubKind {
+		case reflect.Struct, reflect.Interface:
+
+			if cField.isTimeSubtype {
+
+				if fieldError := v.fieldWithNameAndValue(val, current, idxField.Interface(), cField.diveTag, fmt.Sprintf(mapIndexFieldName, cField.name, key.Interface()), false, nil); fieldError != nil {
+					errs[key.Interface()] = fieldError
+				}
+
+				continue
+			}
+
+			if (idxField.Kind() == reflect.Ptr || idxField.Kind() == reflect.Interface) && idxField.IsNil() {
+
+				if strings.Contains(cField.diveTag, omitempty) {
+					continue
+				}
+
+				tags := strings.Split(cField.diveTag, tagSeparator)
+
+				if len(tags) > 0 {
+
+					var param string
+					vals := strings.SplitN(tags[0], tagKeySeparator, 2)
+
+					if len(vals) > 1 {
+						param = vals[1]
+					}
+
+					errs[key.Interface()] = &FieldError{
+						Field: fmt.Sprintf(mapIndexFieldName, cField.name, key.Interface()),
+						Tag:   vals[0],
+						Param: param,
+						Value: idxField.Interface(),
+						Kind:  idxField.Kind(),
+						Type:  cField.mapSubtype,
+					}
+				}
+
+				continue
+			}
+
+			// if we get here, the field is interface and could be a struct or a field
+			// and we need to check the inner type and validate
+			if idxField.Kind() == reflect.Interface {
+
+				idxField = idxField.Elem()
+
+				if idxField.Kind() == reflect.Ptr && !idxField.IsNil() {
+					idxField = idxField.Elem()
+				}
+
+				if idxField.Kind() == reflect.Struct {
+					goto VALIDATESTRUCT
+				}
+
+				// sending nil for cField as it was type interface and could be anything
+				// each time and so must be calculated each time and can't be cached reliably
+				if fieldError := v.fieldWithNameAndValue(val, current, idxField.Interface(), cField.diveTag, fmt.Sprintf(mapIndexFieldName, cField.name, key.Interface()), false, nil); fieldError != nil {
+					errs[key.Interface()] = fieldError
+				}
+
+				continue
+			}
+
+		VALIDATESTRUCT:
+			if structErrors := v.structRecursive(val, current, idxField.Interface()); structErrors != nil {
+				errs[key.Interface()] = structErrors
+			}
+
+		default:
+			if fieldError := v.fieldWithNameAndValue(val, current, idxField.Interface(), cField.diveTag, fmt.Sprintf(mapIndexFieldName, cField.name, key.Interface()), false, nil); fieldError != nil {
+				errs[key.Interface()] = fieldError
+			}
+		}
+	}
+
+	return errs
+}
+
+func (v *Validate) traverseSliceOrArray(val interface{}, current interface{}, valueField reflect.Value, cField *cachedField) map[int]error {
+
+	errs := map[int]error{}
+
+	for i := 0; i < valueField.Len(); i++ {
+
+		idxField := valueField.Index(i)
+
+		if cField.sliceSubKind == reflect.Ptr && !idxField.IsNil() {
+			idxField = idxField.Elem()
+			cField.sliceSubKind = idxField.Kind()
+		}
+
+		switch cField.sliceSubKind {
+		case reflect.Struct, reflect.Interface:
+
+			if cField.isTimeSubtype {
+
+				if fieldError := v.fieldWithNameAndValue(val, current, idxField.Interface(), cField.diveTag, fmt.Sprintf(arrayIndexFieldName, cField.name, i), false, nil); fieldError != nil {
+					errs[i] = fieldError
+				}
+
+				continue
+			}
+
+			if (idxField.Kind() == reflect.Ptr || idxField.Kind() == reflect.Interface) && idxField.IsNil() {
+
+				if strings.Contains(cField.diveTag, omitempty) {
+					continue
+				}
+
+				tags := strings.Split(cField.diveTag, tagSeparator)
+
+				if len(tags) > 0 {
+
+					var param string
+					vals := strings.SplitN(tags[0], tagKeySeparator, 2)
+
+					if len(vals) > 1 {
+						param = vals[1]
+					}
+
+					errs[i] = &FieldError{
+						Field: fmt.Sprintf(arrayIndexFieldName, cField.name, i),
+						Tag:   vals[0],
+						Param: param,
+						Value: idxField.Interface(),
+						Kind:  idxField.Kind(),
+						Type:  cField.sliceSubtype,
+					}
+				}
+
+				continue
+			}
+
+			// if we get here, the field is interface and could be a struct or a field
+			// and we need to check the inner type and validate
+			if idxField.Kind() == reflect.Interface {
+
+				idxField = idxField.Elem()
+
+				if idxField.Kind() == reflect.Ptr && !idxField.IsNil() {
+					idxField = idxField.Elem()
+				}
+
+				if idxField.Kind() == reflect.Struct {
+					goto VALIDATESTRUCT
+				}
+
+				// sending nil for cField as it was type interface and could be anything
+				// each time and so must be calculated each time and can't be cached reliably
+				if fieldError := v.fieldWithNameAndValue(val, current, idxField.Interface(), cField.diveTag, fmt.Sprintf(arrayIndexFieldName, cField.name, i), false, nil); fieldError != nil {
+					errs[i] = fieldError
+				}
+
+				continue
+			}
+
+		VALIDATESTRUCT:
+			if structErrors := v.structRecursive(val, current, idxField.Interface()); structErrors != nil {
+				errs[i] = structErrors
+			}
+
+		default:
+			if fieldError := v.fieldWithNameAndValue(val, current, idxField.Interface(), cField.diveTag, fmt.Sprintf(arrayIndexFieldName, cField.name, i), false, nil); fieldError != nil {
+				errs[i] = fieldError
+			}
+		}
+	}
+
+	return errs
+}
+
 func (v *Validate) fieldWithNameAndSingleTag(val interface{}, current interface{}, f interface{}, key string, param string, name string) (*FieldError, error) {
 
 	// OK to continue because we checked it's existance before getting into this loop
@@ -553,6 +1009,10 @@ func (v *Validate) fieldWithNameAndSingleTag(val interface{}, current interface{
 		return nil, nil
 	}
 
+	// if key == existsTag {
+	// 	continue
+	// }
+
 	valFunc, ok := v.validationFuncs[key]
 	if !ok {
 		panic(fmt.Sprintf("Undefined validation function on field %s", name))
diff --git a/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/validator_test.go b/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/validator_test.go
index a2d5394a54ff7fd5ddd4db55ec88ac82ab35795a..0752932e13175ef597a18744612c88c34e687e4e 100644
--- a/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/validator_test.go
+++ b/Godeps/_workspace/src/gopkg.in/bluesuncorp/validator.v5/validator_test.go
@@ -1,6 +1,7 @@
 package validator
 
 import (
+	"encoding/json"
 	"fmt"
 	"path"
 	"reflect"
@@ -14,6 +15,11 @@ import (
 // - Run "gocov test | gocov report" to report on test converage by file
 // - Run "gocov test | gocov annotate -" to report on all code and functions, those ,marked with "MISS" were never called
 //
+// or
+//
+// -- may be a good idea to change to output path to somewherelike /tmp
+// go test -coverprofile cover.out && go tool cover -html=cover.out -o cover.html
+//
 //
 // go test -cpuprofile cpu.out
 // ./validator.test -test.bench=. -test.cpuprofile=cpu.prof
@@ -226,6 +232,1116 @@ func AssertMapFieldError(t *testing.T, s map[string]*FieldError, field string, e
 	EqualSkip(t, 2, val.Tag, expectedTag)
 }
 
+func TestExistsValidation(t *testing.T) {
+
+	jsonText := "{ \"truthiness2\": true }"
+
+	type Thing struct {
+		Truthiness *bool `json:"truthiness" validate:"exists,required"`
+	}
+
+	var ting Thing
+
+	err := json.Unmarshal([]byte(jsonText), &ting)
+	Equal(t, err, nil)
+	NotEqual(t, ting, nil)
+	Equal(t, ting.Truthiness, nil)
+
+	errs := validate.Struct(ting)
+	NotEqual(t, errs, nil)
+	AssertFieldError(t, errs, "Truthiness", "exists")
+
+	jsonText = "{ \"truthiness\": true }"
+
+	err = json.Unmarshal([]byte(jsonText), &ting)
+	Equal(t, err, nil)
+	NotEqual(t, ting, nil)
+	Equal(t, ting.Truthiness, true)
+
+	errs = validate.Struct(ting)
+	Equal(t, errs, nil)
+}
+
+func TestSliceMapArrayChanFuncPtrInterfaceRequiredValidation(t *testing.T) {
+
+	var m map[string]string
+
+	errs := validate.Field(m, "required")
+	NotEqual(t, errs, nil)
+	// AssertError(t, errs, "", "", "required")
+
+	m = map[string]string{}
+	errs = validate.Field(m, "required")
+	Equal(t, errs, nil)
+
+	var arr [5]string
+	errs = validate.Field(arr, "required")
+	NotEqual(t, errs, nil)
+	// AssertError(t, errs, "", "", "required")
+
+	arr[0] = "ok"
+	errs = validate.Field(arr, "required")
+	Equal(t, errs, nil)
+
+	var s []string
+	errs = validate.Field(s, "required")
+	NotEqual(t, errs, nil)
+	// AssertError(t, errs, "", "", "required")
+
+	s = []string{}
+	errs = validate.Field(s, "required")
+	Equal(t, errs, nil)
+
+	var c chan string
+	errs = validate.Field(c, "required")
+	NotEqual(t, errs, nil)
+	// AssertError(t, errs, "", "", "required")
+
+	c = make(chan string)
+	errs = validate.Field(c, "required")
+	Equal(t, errs, nil)
+
+	var tst *int
+	errs = validate.Field(tst, "required")
+	NotEqual(t, errs, nil)
+	// AssertError(t, errs, "", "", "required")
+
+	one := 1
+	tst = &one
+	errs = validate.Field(tst, "required")
+	Equal(t, errs, nil)
+
+	var iface interface{}
+
+	errs = validate.Field(iface, "required")
+	NotEqual(t, errs, nil)
+	// AssertError(t, errs, "", "", "required")
+
+	errs = validate.Field(iface, "omitempty,required")
+	Equal(t, errs, nil)
+
+	errs = validate.Field(iface, "")
+	Equal(t, errs, nil)
+
+	errs = validate.Field(iface, "len=1")
+	NotEqual(t, errs, nil)
+
+	var f func(string)
+
+	errs = validate.Field(f, "required")
+	NotEqual(t, errs, nil)
+	// AssertError(t, errs, "", "", "required")
+
+	f = func(name string) {}
+
+	errs = validate.Field(f, "required")
+	Equal(t, errs, nil)
+}
+
+func TestBadKeyValidation(t *testing.T) {
+	type Test struct {
+		Name string `validate:"required, "`
+	}
+
+	tst := &Test{
+		Name: "test",
+	}
+
+	PanicMatches(t, func() { validate.Struct(tst) }, "Invalid validation tag on field Name")
+}
+
+func TestFlattenValidation(t *testing.T) {
+
+	type Inner struct {
+		Name string `validate:"required"`
+	}
+
+	type TestMultiDimensionalStructsPtr struct {
+		Errs [][]*Inner `validate:"gt=0,dive,dive,required"`
+	}
+
+	var errStructPtrArray [][]*Inner
+
+	errStructPtrArray = append(errStructPtrArray, []*Inner{{"ok"}, {""}, {"ok"}})
+
+	tmsp := &TestMultiDimensionalStructsPtr{
+		Errs: errStructPtrArray,
+	}
+
+	errs := validate.Struct(tmsp)
+	NotEqual(t, errs, nil)
+	Equal(t, len(errs.Errors), 1)
+	// for full test coverage
+	fmt.Sprint(errs.Error())
+
+	fieldErr := errs.Errors["Errs"]
+	Equal(t, fieldErr.IsPlaceholderErr, true)
+	Equal(t, fieldErr.IsSliceOrArray, true)
+	Equal(t, fieldErr.Field, "Errs")
+	Equal(t, len(fieldErr.SliceOrArrayErrs), 1)
+
+	innerSlice1, ok := fieldErr.SliceOrArrayErrs[0].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, innerSlice1.IsPlaceholderErr, true)
+	Equal(t, innerSlice1.Field, "Errs[0]")
+
+	flatFieldErr, ok := fieldErr.Flatten()["[0][1].Inner.Name"]
+	Equal(t, ok, true)
+	Equal(t, flatFieldErr.Field, "Name")
+	Equal(t, flatFieldErr.Tag, "required")
+
+	structErrFlatten, ok := errs.Flatten()["Errs[0][1].Inner.Name"]
+	Equal(t, ok, true)
+	Equal(t, structErrFlatten.Field, "Name")
+	Equal(t, structErrFlatten.Tag, "required")
+
+	errStructPtrArray = [][]*Inner{}
+	errStructPtrArray = append(errStructPtrArray, []*Inner{{"ok"}, nil, {"ok"}})
+
+	tmsp = &TestMultiDimensionalStructsPtr{
+		Errs: errStructPtrArray,
+	}
+
+	errs = validate.Struct(tmsp)
+	NotEqual(t, errs, nil)
+	Equal(t, len(errs.Errors), 1)
+	// for full test coverage
+	fmt.Sprint(errs.Error())
+
+	fieldErr = errs.Errors["Errs"]
+	Equal(t, fieldErr.IsPlaceholderErr, true)
+	Equal(t, fieldErr.IsSliceOrArray, true)
+	Equal(t, fieldErr.Field, "Errs")
+	Equal(t, len(fieldErr.SliceOrArrayErrs), 1)
+
+	innerSlice1, ok = fieldErr.SliceOrArrayErrs[0].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, innerSlice1.IsPlaceholderErr, true)
+	Equal(t, innerSlice1.Field, "Errs[0]")
+
+	flatFieldErr, ok = fieldErr.Flatten()["[0][1]"]
+	Equal(t, ok, true)
+	Equal(t, flatFieldErr.Field, "Errs[0][1]")
+	Equal(t, flatFieldErr.Tag, "required")
+
+	type TestMapStructPtr struct {
+		Errs map[int]*Inner `validate:"gt=0,dive,required"`
+	}
+
+	mip := map[int]*Inner{0: {"ok"}, 3: {""}, 4: {"ok"}}
+
+	msp := &TestMapStructPtr{
+		Errs: mip,
+	}
+
+	errs = validate.Struct(msp)
+	NotEqual(t, errs, nil)
+	Equal(t, len(errs.Errors), 1)
+
+	fieldError := errs.Errors["Errs"]
+	Equal(t, fieldError.IsPlaceholderErr, true)
+	Equal(t, fieldError.IsMap, true)
+	Equal(t, len(fieldError.MapErrs), 1)
+
+	innerStructError, ok := fieldError.MapErrs[3].(*StructErrors)
+	Equal(t, ok, true)
+	Equal(t, innerStructError.Struct, "Inner")
+	Equal(t, len(innerStructError.Errors), 1)
+
+	innerInnerFieldError, ok := innerStructError.Errors["Name"]
+	Equal(t, ok, true)
+	Equal(t, innerInnerFieldError.IsPlaceholderErr, false)
+	Equal(t, innerInnerFieldError.IsSliceOrArray, false)
+	Equal(t, innerInnerFieldError.Field, "Name")
+	Equal(t, innerInnerFieldError.Tag, "required")
+
+	flatErrs, ok := errs.Flatten()["Errs[3].Inner.Name"]
+	Equal(t, ok, true)
+	Equal(t, flatErrs.Field, "Name")
+	Equal(t, flatErrs.Tag, "required")
+
+	mip2 := map[int]*Inner{0: {"ok"}, 3: nil, 4: {"ok"}}
+
+	msp2 := &TestMapStructPtr{
+		Errs: mip2,
+	}
+
+	errs = validate.Struct(msp2)
+	NotEqual(t, errs, nil)
+	Equal(t, len(errs.Errors), 1)
+
+	fieldError = errs.Errors["Errs"]
+	Equal(t, fieldError.IsPlaceholderErr, true)
+	Equal(t, fieldError.IsMap, true)
+	Equal(t, len(fieldError.MapErrs), 1)
+
+	innerFieldError, ok := fieldError.MapErrs[3].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, innerFieldError.IsPlaceholderErr, false)
+	Equal(t, innerFieldError.IsSliceOrArray, false)
+	Equal(t, innerFieldError.Field, "Errs[3]")
+	Equal(t, innerFieldError.Tag, "required")
+
+	flatErrs, ok = errs.Flatten()["Errs[3]"]
+	Equal(t, ok, true)
+	Equal(t, flatErrs.Field, "Errs[3]")
+	Equal(t, flatErrs.Tag, "required")
+
+	type TestMapInnerArrayStruct struct {
+		Errs map[int][]string `validate:"gt=0,dive,dive,required"`
+	}
+
+	mias := map[int][]string{0: {"ok"}, 3: {"ok", ""}, 4: {"ok"}}
+
+	mia := &TestMapInnerArrayStruct{
+		Errs: mias,
+	}
+
+	errs = validate.Struct(mia)
+	NotEqual(t, errs, nil)
+	Equal(t, len(errs.Errors), 1)
+
+	flatErrs, ok = errs.Flatten()["Errs[3][1]"]
+	Equal(t, ok, true)
+	Equal(t, flatErrs.Field, "Errs[3][1]")
+	Equal(t, flatErrs.Tag, "required")
+}
+
+func TestInterfaceErrValidation(t *testing.T) {
+
+	var v1 interface{}
+	var v2 interface{}
+
+	v2 = 1
+	v1 = v2
+
+	err := validate.Field(v1, "len=1")
+	Equal(t, err, nil)
+	err = validate.Field(v2, "len=1")
+	Equal(t, err, nil)
+
+	type ExternalCMD struct {
+		Userid string      `json:"userid"`
+		Action uint32      `json:"action"`
+		Data   interface{} `json:"data,omitempty" validate:"required"`
+	}
+
+	s := &ExternalCMD{
+		Userid: "123456",
+		Action: 10000,
+		// Data:   1,
+	}
+
+	errs := validate.Struct(s)
+	NotEqual(t, errs, nil)
+	Equal(t, errs.Errors["Data"].Field, "Data")
+	Equal(t, errs.Errors["Data"].Tag, "required")
+
+	type ExternalCMD2 struct {
+		Userid string      `json:"userid"`
+		Action uint32      `json:"action"`
+		Data   interface{} `json:"data,omitempty" validate:"len=1"`
+	}
+
+	s2 := &ExternalCMD2{
+		Userid: "123456",
+		Action: 10000,
+		// Data:   1,
+	}
+
+	errs = validate.Struct(s2)
+	NotEqual(t, errs, nil)
+	Equal(t, errs.Errors["Data"].Field, "Data")
+	Equal(t, errs.Errors["Data"].Tag, "len")
+	Equal(t, errs.Errors["Data"].Param, "1")
+
+	s3 := &ExternalCMD2{
+		Userid: "123456",
+		Action: 10000,
+		Data:   2,
+	}
+
+	errs = validate.Struct(s3)
+	NotEqual(t, errs, nil)
+	Equal(t, errs.Errors["Data"].Field, "Data")
+	Equal(t, errs.Errors["Data"].Tag, "len")
+	Equal(t, errs.Errors["Data"].Param, "1")
+
+	type Inner struct {
+		Name string `validate:"required"`
+	}
+
+	inner := &Inner{
+		Name: "",
+	}
+
+	s4 := &ExternalCMD{
+		Userid: "123456",
+		Action: 10000,
+		Data:   inner,
+	}
+
+	errs = validate.Struct(s4)
+	NotEqual(t, errs, nil)
+	Equal(t, errs.StructErrors["Data"].Struct, "Inner")
+	Equal(t, errs.StructErrors["Data"].Errors["Name"].Field, "Name")
+	Equal(t, errs.StructErrors["Data"].Errors["Name"].Tag, "required")
+
+	type TestMapStructPtr struct {
+		Errs map[int]interface{} `validate:"gt=0,dive,len=2"`
+	}
+
+	mip := map[int]interface{}{0: &Inner{"ok"}, 3: nil, 4: &Inner{"ok"}}
+
+	msp := &TestMapStructPtr{
+		Errs: mip,
+	}
+
+	errs = validate.Struct(msp)
+	NotEqual(t, errs, nil)
+	Equal(t, len(errs.Errors), 1)
+
+	fieldError := errs.Errors["Errs"]
+	Equal(t, fieldError.IsPlaceholderErr, true)
+	Equal(t, fieldError.IsMap, true)
+	Equal(t, len(fieldError.MapErrs), 1)
+
+	innerFieldError, ok := fieldError.MapErrs[3].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, innerFieldError.IsPlaceholderErr, false)
+	Equal(t, innerFieldError.IsMap, false)
+	Equal(t, len(innerFieldError.MapErrs), 0)
+	Equal(t, innerFieldError.Field, "Errs[3]")
+	Equal(t, innerFieldError.Tag, "len")
+
+	type TestMultiDimensionalStructs struct {
+		Errs [][]interface{} `validate:"gt=0,dive,dive,len=2"`
+	}
+
+	var errStructArray [][]interface{}
+
+	errStructArray = append(errStructArray, []interface{}{&Inner{"ok"}, &Inner{""}, &Inner{""}})
+	errStructArray = append(errStructArray, []interface{}{&Inner{"ok"}, &Inner{""}, &Inner{""}})
+
+	tms := &TestMultiDimensionalStructs{
+		Errs: errStructArray,
+	}
+
+	errs = validate.Struct(tms)
+	NotEqual(t, errs, nil)
+	Equal(t, len(errs.Errors), 1)
+
+	fieldErr, ok := errs.Errors["Errs"]
+	Equal(t, ok, true)
+	Equal(t, fieldErr.IsPlaceholderErr, true)
+	Equal(t, fieldErr.IsSliceOrArray, true)
+	Equal(t, len(fieldErr.SliceOrArrayErrs), 2)
+
+	sliceError1, ok := fieldErr.SliceOrArrayErrs[0].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, sliceError1.IsPlaceholderErr, true)
+	Equal(t, sliceError1.IsSliceOrArray, true)
+	Equal(t, len(sliceError1.SliceOrArrayErrs), 2)
+
+	innerSliceStructError1, ok := sliceError1.SliceOrArrayErrs[1].(*StructErrors)
+	Equal(t, ok, true)
+	Equal(t, len(innerSliceStructError1.Errors), 1)
+
+	innerInnersliceError1 := innerSliceStructError1.Errors["Name"]
+	Equal(t, innerInnersliceError1.IsPlaceholderErr, false)
+	Equal(t, innerInnersliceError1.IsSliceOrArray, false)
+	Equal(t, len(innerInnersliceError1.SliceOrArrayErrs), 0)
+
+	type TestMultiDimensionalStructsPtr2 struct {
+		Errs [][]*Inner `validate:"gt=0,dive,dive,len=2"`
+	}
+
+	var errStructPtr2Array [][]*Inner
+
+	errStructPtr2Array = append(errStructPtr2Array, []*Inner{{"ok"}, {""}, {""}})
+	errStructPtr2Array = append(errStructPtr2Array, []*Inner{{"ok"}, {""}, {""}})
+	errStructPtr2Array = append(errStructPtr2Array, []*Inner{{"ok"}, {""}, nil})
+
+	tmsp2 := &TestMultiDimensionalStructsPtr2{
+		Errs: errStructPtr2Array,
+	}
+
+	errs = validate.Struct(tmsp2)
+	NotEqual(t, errs, nil)
+	Equal(t, len(errs.Errors), 1)
+
+	fieldErr, ok = errs.Errors["Errs"]
+	Equal(t, ok, true)
+	Equal(t, fieldErr.IsPlaceholderErr, true)
+	Equal(t, fieldErr.IsSliceOrArray, true)
+	Equal(t, len(fieldErr.SliceOrArrayErrs), 3)
+
+	sliceError1, ok = fieldErr.SliceOrArrayErrs[2].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, sliceError1.IsPlaceholderErr, true)
+	Equal(t, sliceError1.IsSliceOrArray, true)
+	Equal(t, len(sliceError1.SliceOrArrayErrs), 2)
+
+	innerSliceStructError1, ok = sliceError1.SliceOrArrayErrs[1].(*StructErrors)
+	Equal(t, ok, true)
+	Equal(t, len(innerSliceStructError1.Errors), 1)
+
+	innerSliceStructError2, ok := sliceError1.SliceOrArrayErrs[2].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, innerSliceStructError2.IsPlaceholderErr, false)
+	Equal(t, innerSliceStructError2.IsSliceOrArray, false)
+	Equal(t, len(innerSliceStructError2.SliceOrArrayErrs), 0)
+	Equal(t, innerSliceStructError2.Field, "Errs[2][2]")
+
+	innerInnersliceError1 = innerSliceStructError1.Errors["Name"]
+	Equal(t, innerInnersliceError1.IsPlaceholderErr, false)
+	Equal(t, innerInnersliceError1.IsSliceOrArray, false)
+	Equal(t, len(innerInnersliceError1.SliceOrArrayErrs), 0)
+
+	m := map[int]interface{}{0: "ok", 3: "", 4: "ok"}
+
+	err = validate.Field(m, "len=3,dive,len=2")
+	NotEqual(t, err, nil)
+	Equal(t, err.IsPlaceholderErr, true)
+	Equal(t, err.IsMap, true)
+	Equal(t, len(err.MapErrs), 1)
+
+	err = validate.Field(m, "len=2,dive,required")
+	NotEqual(t, err, nil)
+	Equal(t, err.IsPlaceholderErr, false)
+	Equal(t, err.IsMap, false)
+	Equal(t, len(err.MapErrs), 0)
+
+	arr := []interface{}{"ok", "", "ok"}
+
+	err = validate.Field(arr, "len=3,dive,len=2")
+	NotEqual(t, err, nil)
+	Equal(t, err.IsPlaceholderErr, true)
+	Equal(t, err.IsSliceOrArray, true)
+	Equal(t, len(err.SliceOrArrayErrs), 1)
+
+	err = validate.Field(arr, "len=2,dive,required")
+	NotEqual(t, err, nil)
+	Equal(t, err.IsPlaceholderErr, false)
+	Equal(t, err.IsSliceOrArray, false)
+	Equal(t, len(err.SliceOrArrayErrs), 0)
+
+	type MyStruct struct {
+		A, B string
+		C    interface{}
+	}
+
+	var a MyStruct
+
+	a.A = "value"
+	a.C = "nu"
+
+	errs = validate.Struct(a)
+	Equal(t, errs, nil)
+}
+
+func TestMapDiveValidation(t *testing.T) {
+
+	n := map[int]interface{}{0: nil}
+	err := validate.Field(n, "omitempty,required")
+
+	m := map[int]string{0: "ok", 3: "", 4: "ok"}
+
+	err = validate.Field(m, "len=3,dive,required")
+	NotEqual(t, err, nil)
+	Equal(t, err.IsPlaceholderErr, true)
+	Equal(t, err.IsMap, true)
+	Equal(t, len(err.MapErrs), 1)
+
+	err = validate.Field(m, "len=2,dive,required")
+	NotEqual(t, err, nil)
+	Equal(t, err.IsPlaceholderErr, false)
+	Equal(t, err.IsMap, false)
+	Equal(t, len(err.MapErrs), 0)
+
+	type Inner struct {
+		Name string `validate:"required"`
+	}
+
+	type TestMapStruct struct {
+		Errs map[int]Inner `validate:"gt=0,dive"`
+	}
+
+	mi := map[int]Inner{0: {"ok"}, 3: {""}, 4: {"ok"}}
+
+	ms := &TestMapStruct{
+		Errs: mi,
+	}
+
+	errs := validate.Struct(ms)
+	NotEqual(t, errs, nil)
+	Equal(t, len(errs.Errors), 1)
+	// for full test coverage
+	fmt.Sprint(errs.Error())
+
+	fieldError := errs.Errors["Errs"]
+	Equal(t, fieldError.IsPlaceholderErr, true)
+	Equal(t, fieldError.IsMap, true)
+	Equal(t, len(fieldError.MapErrs), 1)
+
+	structErr, ok := fieldError.MapErrs[3].(*StructErrors)
+	Equal(t, ok, true)
+	Equal(t, len(structErr.Errors), 1)
+
+	innerErr := structErr.Errors["Name"]
+	Equal(t, innerErr.IsPlaceholderErr, false)
+	Equal(t, innerErr.IsMap, false)
+	Equal(t, len(innerErr.MapErrs), 0)
+	Equal(t, innerErr.Field, "Name")
+	Equal(t, innerErr.Tag, "required")
+
+	type TestMapTimeStruct struct {
+		Errs map[int]*time.Time `validate:"gt=0,dive,required"`
+	}
+
+	t1 := time.Now().UTC()
+
+	mta := map[int]*time.Time{0: &t1, 3: nil, 4: nil}
+
+	mt := &TestMapTimeStruct{
+		Errs: mta,
+	}
+
+	errs = validate.Struct(mt)
+	NotEqual(t, errs, nil)
+	Equal(t, len(errs.Errors), 1)
+
+	fieldError = errs.Errors["Errs"]
+	Equal(t, fieldError.IsPlaceholderErr, true)
+	Equal(t, fieldError.IsMap, true)
+	Equal(t, len(fieldError.MapErrs), 2)
+
+	innerErr, ok = fieldError.MapErrs[3].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, innerErr.IsPlaceholderErr, false)
+	Equal(t, innerErr.IsMap, false)
+	Equal(t, len(innerErr.MapErrs), 0)
+	Equal(t, innerErr.Field, "Errs[3]")
+	Equal(t, innerErr.Tag, "required")
+
+	type TestMapStructPtr struct {
+		Errs map[int]*Inner `validate:"gt=0,dive,required"`
+	}
+
+	mip := map[int]*Inner{0: {"ok"}, 3: nil, 4: {"ok"}}
+
+	msp := &TestMapStructPtr{
+		Errs: mip,
+	}
+
+	errs = validate.Struct(msp)
+	NotEqual(t, errs, nil)
+	Equal(t, len(errs.Errors), 1)
+
+	fieldError = errs.Errors["Errs"]
+	Equal(t, fieldError.IsPlaceholderErr, true)
+	Equal(t, fieldError.IsMap, true)
+	Equal(t, len(fieldError.MapErrs), 1)
+
+	innerFieldError, ok := fieldError.MapErrs[3].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, innerFieldError.IsPlaceholderErr, false)
+	Equal(t, innerFieldError.IsMap, false)
+	Equal(t, len(innerFieldError.MapErrs), 0)
+	Equal(t, innerFieldError.Field, "Errs[3]")
+	Equal(t, innerFieldError.Tag, "required")
+
+	type TestMapStructPtr2 struct {
+		Errs map[int]*Inner `validate:"gt=0,dive,omitempty,required"`
+	}
+
+	mip2 := map[int]*Inner{0: {"ok"}, 3: nil, 4: {"ok"}}
+
+	msp2 := &TestMapStructPtr2{
+		Errs: mip2,
+	}
+
+	errs = validate.Struct(msp2)
+	Equal(t, errs, nil)
+}
+
+func TestArrayDiveValidation(t *testing.T) {
+
+	arr := []string{"ok", "", "ok"}
+
+	err := validate.Field(arr, "len=3,dive,required")
+	NotEqual(t, err, nil)
+	Equal(t, err.IsPlaceholderErr, true)
+	Equal(t, err.IsSliceOrArray, true)
+	Equal(t, len(err.SliceOrArrayErrs), 1)
+
+	// flat := err.Flatten()
+	// fe, ok := flat["[1]"]
+	// Equal(t, ok, true)
+	// Equal(t, fe.Tag, "required")
+
+	err = validate.Field(arr, "len=2,dive,required")
+	NotEqual(t, err, nil)
+	Equal(t, err.IsPlaceholderErr, false)
+	Equal(t, err.IsSliceOrArray, false)
+	Equal(t, len(err.SliceOrArrayErrs), 0)
+
+	type BadDive struct {
+		Name string `validate:"dive"`
+	}
+
+	bd := &BadDive{
+		Name: "TEST",
+	}
+
+	PanicMatches(t, func() { validate.Struct(bd) }, "dive error! can't dive on a non slice or map")
+
+	type Test struct {
+		Errs []string `validate:"gt=0,dive,required"`
+	}
+
+	test := &Test{
+		Errs: []string{"ok", "", "ok"},
+	}
+
+	errs := validate.Struct(test)
+	NotEqual(t, errs, nil)
+	Equal(t, len(errs.Errors), 1)
+
+	// flat = errs.Flatten()
+	// me, ok := flat["Errs[1]"]
+	// Equal(t, ok, true)
+	// Equal(t, me.Field, "Errs[1]")
+	// Equal(t, me.Tag, "required")
+
+	fieldErr, ok := errs.Errors["Errs"]
+	Equal(t, ok, true)
+	Equal(t, fieldErr.IsPlaceholderErr, true)
+	Equal(t, fieldErr.IsSliceOrArray, true)
+	Equal(t, len(fieldErr.SliceOrArrayErrs), 1)
+
+	innerErr, ok := fieldErr.SliceOrArrayErrs[1].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, innerErr.Tag, required)
+	Equal(t, innerErr.IsPlaceholderErr, false)
+	Equal(t, innerErr.Field, "Errs[1]")
+
+	test = &Test{
+		Errs: []string{"ok", "ok", ""},
+	}
+
+	errs = validate.Struct(test)
+	NotEqual(t, errs, nil)
+	Equal(t, len(errs.Errors), 1)
+
+	fieldErr, ok = errs.Errors["Errs"]
+	Equal(t, ok, true)
+	Equal(t, fieldErr.IsPlaceholderErr, true)
+	Equal(t, fieldErr.IsSliceOrArray, true)
+	Equal(t, len(fieldErr.SliceOrArrayErrs), 1)
+
+	innerErr, ok = fieldErr.SliceOrArrayErrs[2].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, innerErr.Tag, required)
+	Equal(t, innerErr.IsPlaceholderErr, false)
+	Equal(t, innerErr.Field, "Errs[2]")
+
+	type TestMultiDimensional struct {
+		Errs [][]string `validate:"gt=0,dive,dive,required"`
+	}
+
+	var errArray [][]string
+
+	errArray = append(errArray, []string{"ok", "", ""})
+	errArray = append(errArray, []string{"ok", "", ""})
+
+	tm := &TestMultiDimensional{
+		Errs: errArray,
+	}
+
+	errs = validate.Struct(tm)
+	NotEqual(t, errs, nil)
+	Equal(t, len(errs.Errors), 1)
+
+	fieldErr, ok = errs.Errors["Errs"]
+	Equal(t, ok, true)
+	Equal(t, fieldErr.IsPlaceholderErr, true)
+	Equal(t, fieldErr.IsSliceOrArray, true)
+	Equal(t, len(fieldErr.SliceOrArrayErrs), 2)
+
+	sliceError1, ok := fieldErr.SliceOrArrayErrs[0].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, sliceError1.IsPlaceholderErr, true)
+	Equal(t, sliceError1.IsSliceOrArray, true)
+	Equal(t, len(sliceError1.SliceOrArrayErrs), 2)
+	Equal(t, sliceError1.Field, "Errs[0]")
+
+	innerSliceError1, ok := sliceError1.SliceOrArrayErrs[1].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, innerSliceError1.IsPlaceholderErr, false)
+	Equal(t, innerSliceError1.Tag, required)
+	Equal(t, innerSliceError1.IsSliceOrArray, false)
+	Equal(t, len(innerSliceError1.SliceOrArrayErrs), 0)
+	Equal(t, innerSliceError1.Field, "Errs[0][1]")
+
+	type Inner struct {
+		Name string `validate:"required"`
+	}
+
+	type TestMultiDimensionalStructs struct {
+		Errs [][]Inner `validate:"gt=0,dive,dive"`
+	}
+
+	var errStructArray [][]Inner
+
+	errStructArray = append(errStructArray, []Inner{{"ok"}, {""}, {""}})
+	errStructArray = append(errStructArray, []Inner{{"ok"}, {""}, {""}})
+
+	tms := &TestMultiDimensionalStructs{
+		Errs: errStructArray,
+	}
+
+	errs = validate.Struct(tms)
+	NotEqual(t, errs, nil)
+	Equal(t, len(errs.Errors), 1)
+
+	fieldErr, ok = errs.Errors["Errs"]
+	Equal(t, ok, true)
+	Equal(t, fieldErr.IsPlaceholderErr, true)
+	Equal(t, fieldErr.IsSliceOrArray, true)
+	Equal(t, len(fieldErr.SliceOrArrayErrs), 2)
+
+	sliceError1, ok = fieldErr.SliceOrArrayErrs[0].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, sliceError1.IsPlaceholderErr, true)
+	Equal(t, sliceError1.IsSliceOrArray, true)
+	Equal(t, len(sliceError1.SliceOrArrayErrs), 2)
+
+	innerSliceStructError1, ok := sliceError1.SliceOrArrayErrs[1].(*StructErrors)
+	Equal(t, ok, true)
+	Equal(t, len(innerSliceStructError1.Errors), 1)
+
+	innerInnersliceError1 := innerSliceStructError1.Errors["Name"]
+	Equal(t, innerInnersliceError1.IsPlaceholderErr, false)
+	Equal(t, innerInnersliceError1.IsSliceOrArray, false)
+	Equal(t, len(innerInnersliceError1.SliceOrArrayErrs), 0)
+
+	type TestMultiDimensionalStructsPtr struct {
+		Errs [][]*Inner `validate:"gt=0,dive,dive"`
+	}
+
+	var errStructPtrArray [][]*Inner
+
+	errStructPtrArray = append(errStructPtrArray, []*Inner{{"ok"}, {""}, {""}})
+	errStructPtrArray = append(errStructPtrArray, []*Inner{{"ok"}, {""}, {""}})
+	errStructPtrArray = append(errStructPtrArray, []*Inner{{"ok"}, {""}, nil})
+
+	tmsp := &TestMultiDimensionalStructsPtr{
+		Errs: errStructPtrArray,
+	}
+
+	errs = validate.Struct(tmsp)
+	NotEqual(t, errs, nil)
+	Equal(t, len(errs.Errors), 1)
+	// for full test coverage
+	fmt.Sprint(errs.Error())
+
+	// flat := errs.Flatten()
+	// // fmt.Println(errs)
+	// fmt.Println(flat)
+	// expect Errs[0][1].Inner.Name
+	// me, ok := flat["Errs[1]"]
+	// Equal(t, ok, true)
+	// Equal(t, me.Field, "Errs[1]")
+	// Equal(t, me.Tag, "required")
+
+	fieldErr, ok = errs.Errors["Errs"]
+	Equal(t, ok, true)
+	Equal(t, fieldErr.IsPlaceholderErr, true)
+	Equal(t, fieldErr.IsSliceOrArray, true)
+	Equal(t, len(fieldErr.SliceOrArrayErrs), 3)
+
+	// flat := fieldErr.Flatten()
+	// fmt.Println(errs)
+	// fmt.Println(flat)
+
+	sliceError1, ok = fieldErr.SliceOrArrayErrs[0].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, sliceError1.IsPlaceholderErr, true)
+	Equal(t, sliceError1.IsSliceOrArray, true)
+	Equal(t, len(sliceError1.SliceOrArrayErrs), 2)
+
+	innerSliceStructError1, ok = sliceError1.SliceOrArrayErrs[1].(*StructErrors)
+	Equal(t, ok, true)
+	Equal(t, len(innerSliceStructError1.Errors), 1)
+
+	innerInnersliceError1 = innerSliceStructError1.Errors["Name"]
+	Equal(t, innerInnersliceError1.IsPlaceholderErr, false)
+	Equal(t, innerInnersliceError1.IsSliceOrArray, false)
+	Equal(t, len(innerInnersliceError1.SliceOrArrayErrs), 0)
+
+	type TestMultiDimensionalStructsPtr2 struct {
+		Errs [][]*Inner `validate:"gt=0,dive,dive,required"`
+	}
+
+	var errStructPtr2Array [][]*Inner
+
+	errStructPtr2Array = append(errStructPtr2Array, []*Inner{{"ok"}, {""}, {""}})
+	errStructPtr2Array = append(errStructPtr2Array, []*Inner{{"ok"}, {""}, {""}})
+	errStructPtr2Array = append(errStructPtr2Array, []*Inner{{"ok"}, {""}, nil})
+
+	tmsp2 := &TestMultiDimensionalStructsPtr2{
+		Errs: errStructPtr2Array,
+	}
+
+	errs = validate.Struct(tmsp2)
+	NotEqual(t, errs, nil)
+	Equal(t, len(errs.Errors), 1)
+
+	fieldErr, ok = errs.Errors["Errs"]
+	Equal(t, ok, true)
+	Equal(t, fieldErr.IsPlaceholderErr, true)
+	Equal(t, fieldErr.IsSliceOrArray, true)
+	Equal(t, len(fieldErr.SliceOrArrayErrs), 3)
+
+	sliceError1, ok = fieldErr.SliceOrArrayErrs[2].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, sliceError1.IsPlaceholderErr, true)
+	Equal(t, sliceError1.IsSliceOrArray, true)
+	Equal(t, len(sliceError1.SliceOrArrayErrs), 2)
+
+	innerSliceStructError1, ok = sliceError1.SliceOrArrayErrs[1].(*StructErrors)
+	Equal(t, ok, true)
+	Equal(t, len(innerSliceStructError1.Errors), 1)
+
+	innerSliceStructError2, ok := sliceError1.SliceOrArrayErrs[2].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, innerSliceStructError2.IsPlaceholderErr, false)
+	Equal(t, innerSliceStructError2.IsSliceOrArray, false)
+	Equal(t, len(innerSliceStructError2.SliceOrArrayErrs), 0)
+	Equal(t, innerSliceStructError2.Field, "Errs[2][2]")
+
+	innerInnersliceError1 = innerSliceStructError1.Errors["Name"]
+	Equal(t, innerInnersliceError1.IsPlaceholderErr, false)
+	Equal(t, innerInnersliceError1.IsSliceOrArray, false)
+	Equal(t, len(innerInnersliceError1.SliceOrArrayErrs), 0)
+
+	type TestMultiDimensionalStructsPtr3 struct {
+		Errs [][]*Inner `validate:"gt=0,dive,dive,omitempty"`
+	}
+
+	var errStructPtr3Array [][]*Inner
+
+	errStructPtr3Array = append(errStructPtr3Array, []*Inner{{"ok"}, {""}, {""}})
+	errStructPtr3Array = append(errStructPtr3Array, []*Inner{{"ok"}, {""}, {""}})
+	errStructPtr3Array = append(errStructPtr3Array, []*Inner{{"ok"}, {""}, nil})
+
+	tmsp3 := &TestMultiDimensionalStructsPtr3{
+		Errs: errStructPtr3Array,
+	}
+
+	errs = validate.Struct(tmsp3)
+	NotEqual(t, errs, nil)
+	Equal(t, len(errs.Errors), 1)
+
+	fieldErr, ok = errs.Errors["Errs"]
+	Equal(t, ok, true)
+	Equal(t, fieldErr.IsPlaceholderErr, true)
+	Equal(t, fieldErr.IsSliceOrArray, true)
+	Equal(t, len(fieldErr.SliceOrArrayErrs), 3)
+
+	sliceError1, ok = fieldErr.SliceOrArrayErrs[0].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, sliceError1.IsPlaceholderErr, true)
+	Equal(t, sliceError1.IsSliceOrArray, true)
+	Equal(t, len(sliceError1.SliceOrArrayErrs), 2)
+
+	innerSliceStructError1, ok = sliceError1.SliceOrArrayErrs[1].(*StructErrors)
+	Equal(t, ok, true)
+	Equal(t, len(innerSliceStructError1.Errors), 1)
+
+	innerInnersliceError1 = innerSliceStructError1.Errors["Name"]
+	Equal(t, innerInnersliceError1.IsPlaceholderErr, false)
+	Equal(t, innerInnersliceError1.IsSliceOrArray, false)
+	Equal(t, len(innerInnersliceError1.SliceOrArrayErrs), 0)
+
+	type TestMultiDimensionalTimeTime struct {
+		Errs [][]*time.Time `validate:"gt=0,dive,dive,required"`
+	}
+
+	var errTimePtr3Array [][]*time.Time
+
+	t1 := time.Now().UTC()
+	t2 := time.Now().UTC()
+	t3 := time.Now().UTC().Add(time.Hour * 24)
+
+	errTimePtr3Array = append(errTimePtr3Array, []*time.Time{&t1, &t2, &t3})
+	errTimePtr3Array = append(errTimePtr3Array, []*time.Time{&t1, &t2, nil})
+	errTimePtr3Array = append(errTimePtr3Array, []*time.Time{&t1, nil, nil})
+
+	tmtp3 := &TestMultiDimensionalTimeTime{
+		Errs: errTimePtr3Array,
+	}
+
+	errs = validate.Struct(tmtp3)
+	NotEqual(t, errs, nil)
+	Equal(t, len(errs.Errors), 1)
+
+	fieldErr, ok = errs.Errors["Errs"]
+	Equal(t, ok, true)
+	Equal(t, fieldErr.IsPlaceholderErr, true)
+	Equal(t, fieldErr.IsSliceOrArray, true)
+	Equal(t, len(fieldErr.SliceOrArrayErrs), 2)
+
+	sliceError1, ok = fieldErr.SliceOrArrayErrs[2].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, sliceError1.IsPlaceholderErr, true)
+	Equal(t, sliceError1.IsSliceOrArray, true)
+	Equal(t, len(sliceError1.SliceOrArrayErrs), 2)
+
+	innerSliceError1, ok = sliceError1.SliceOrArrayErrs[1].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, innerSliceError1.IsPlaceholderErr, false)
+	Equal(t, innerSliceError1.IsSliceOrArray, false)
+	Equal(t, len(innerSliceError1.SliceOrArrayErrs), 0)
+	Equal(t, innerSliceError1.Field, "Errs[2][1]")
+	Equal(t, innerSliceError1.Tag, required)
+
+	type TestMultiDimensionalTimeTime2 struct {
+		Errs [][]*time.Time `validate:"gt=0,dive,dive,required"`
+	}
+
+	var errTimeArray [][]*time.Time
+
+	t1 = time.Now().UTC()
+	t2 = time.Now().UTC()
+	t3 = time.Now().UTC().Add(time.Hour * 24)
+
+	errTimeArray = append(errTimeArray, []*time.Time{&t1, &t2, &t3})
+	errTimeArray = append(errTimeArray, []*time.Time{&t1, &t2, nil})
+	errTimeArray = append(errTimeArray, []*time.Time{&t1, nil, nil})
+
+	tmtp := &TestMultiDimensionalTimeTime2{
+		Errs: errTimeArray,
+	}
+
+	errs = validate.Struct(tmtp)
+	NotEqual(t, errs, nil)
+	Equal(t, len(errs.Errors), 1)
+
+	fieldErr, ok = errs.Errors["Errs"]
+	Equal(t, ok, true)
+	Equal(t, fieldErr.IsPlaceholderErr, true)
+	Equal(t, fieldErr.IsSliceOrArray, true)
+	Equal(t, len(fieldErr.SliceOrArrayErrs), 2)
+
+	sliceError1, ok = fieldErr.SliceOrArrayErrs[2].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, sliceError1.IsPlaceholderErr, true)
+	Equal(t, sliceError1.IsSliceOrArray, true)
+	Equal(t, len(sliceError1.SliceOrArrayErrs), 2)
+
+	innerSliceError1, ok = sliceError1.SliceOrArrayErrs[1].(*FieldError)
+	Equal(t, ok, true)
+	Equal(t, innerSliceError1.IsPlaceholderErr, false)
+	Equal(t, innerSliceError1.IsSliceOrArray, false)
+	Equal(t, len(innerSliceError1.SliceOrArrayErrs), 0)
+	Equal(t, innerSliceError1.Field, "Errs[2][1]")
+	Equal(t, innerSliceError1.Tag, required)
+}
+
+func TestNilStructPointerValidation(t *testing.T) {
+	type Inner struct {
+		Data string
+	}
+
+	type Outer struct {
+		Inner *Inner `validate:"omitempty"`
+	}
+
+	inner := &Inner{
+		Data: "test",
+	}
+
+	outer := &Outer{
+		Inner: inner,
+	}
+
+	errs := validate.Struct(outer)
+	Equal(t, errs, nil)
+
+	outer = &Outer{
+		Inner: nil,
+	}
+
+	errs = validate.Struct(outer)
+	Equal(t, errs, nil)
+
+	type Inner2 struct {
+		Data string
+	}
+
+	type Outer2 struct {
+		Inner2 *Inner2 `validate:"required"`
+	}
+
+	inner2 := &Inner2{
+		Data: "test",
+	}
+
+	outer2 := &Outer2{
+		Inner2: inner2,
+	}
+
+	errs = validate.Struct(outer2)
+	Equal(t, errs, nil)
+
+	outer2 = &Outer2{
+		Inner2: nil,
+	}
+
+	errs = validate.Struct(outer2)
+	NotEqual(t, errs, nil)
+
+	type Inner3 struct {
+		Data string
+	}
+
+	type Outer3 struct {
+		Inner3 *Inner3
+	}
+
+	inner3 := &Inner3{
+		Data: "test",
+	}
+
+	outer3 := &Outer3{
+		Inner3: inner3,
+	}
+
+	errs = validate.Struct(outer3)
+	Equal(t, errs, nil)
+
+	type Inner4 struct {
+		Data string
+	}
+
+	type Outer4 struct {
+		Inner4 *Inner4 `validate:"-"`
+	}
+
+	inner4 := &Inner4{
+		Data: "test",
+	}
+
+	outer4 := &Outer4{
+		Inner4: inner4,
+	}
+
+	errs = validate.Struct(outer4)
+	Equal(t, errs, nil)
+}
+
 func TestSSNValidation(t *testing.T) {
 	tests := []struct {
 		param    string
@@ -1100,7 +2216,7 @@ func TestStructOnlyValidation(t *testing.T) {
 		InnerStruct: nil,
 	}
 
-	errs := validate.Struct(outer).Flatten()
+	errs := validate.Struct(outer)
 	NotEqual(t, errs, nil)
 
 	inner := &Inner{
@@ -1111,9 +2227,8 @@ func TestStructOnlyValidation(t *testing.T) {
 		InnerStruct: inner,
 	}
 
-	errs = validate.Struct(outer).Flatten()
-	NotEqual(t, errs, nil)
-	Equal(t, len(errs), 0)
+	errs = validate.Struct(outer)
+	Equal(t, errs, nil)
 }
 
 func TestGtField(t *testing.T) {
@@ -2727,14 +3842,14 @@ func TestStructSliceValidation(t *testing.T) {
 		Min:       []int{1, 2},
 		Max:       []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 0},
 		MinMax:    []int{1, 2, 3, 4, 5},
-		OmitEmpty: []int{},
+		OmitEmpty: nil,
 	}
 
 	err := validate.Struct(tSuccess)
 	Equal(t, err, nil)
 
 	tFail := &TestSlice{
-		Required:  []int{},
+		Required:  nil,
 		Len:       []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1},
 		Min:       []int{},
 		Max:       []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1},
@@ -2772,7 +3887,7 @@ func TestInvalidField(t *testing.T) {
 		Test: "1",
 	}
 
-	PanicMatches(t, func() { validate.Field(s, "required") }, "Invalid field passed to ValidateFieldWithTag")
+	PanicMatches(t, func() { validate.Field(s, "required") }, "Invalid field passed to fieldWithNameAndValue")
 }
 
 func TestInvalidTagField(t *testing.T) {
@@ -2790,3 +3905,23 @@ func TestInvalidValidatorFunction(t *testing.T) {
 
 	PanicMatches(t, func() { validate.Field(s.Test, "zzxxBadFunction") }, fmt.Sprintf("Undefined validation function on field %s", ""))
 }
+
+func TestPoolObjectMaxSizeValidation(t *testing.T) {
+	// this will ensure that the pool objects are let go
+	// when the pool is saturated
+	validate.SetMaxStructPoolSize(0)
+
+	tSuccess := &TestSlice{
+		Required:  []int{1},
+		Len:       []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 0},
+		Min:       []int{1, 2},
+		Max:       []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 0},
+		MinMax:    []int{1, 2, 3, 4, 5},
+		OmitEmpty: nil,
+	}
+
+	for i := 0; i < 2; i++ {
+		err := validate.Struct(tSuccess)
+		Equal(t, err, nil)
+	}
+}
diff --git a/README.md b/README.md
index d2bbd08c1487db2146b45766b578b91e5c552a02..67606b92b2a26efb6a9809df4b16f57065e54106 100644
--- a/README.md
+++ b/README.md
@@ -14,9 +14,11 @@ The recommended way of working with eris-db is through [eris-cli](https://github
 
 Make sure you have the proper [Go](https://golang.org/) distribution for your OS and architecture. The recommended version is `1.4.2`. Follow the instructions on how to set up GOROOT and GOPATH.
 
-You will also need the following libraries: `git, mercurial, libgmp3-dev`
+You will also need the following libraries: `git, libgmp3-dev`
 
-On Ubuntu: `sudo apt-get install git mercurial libgmp3-dev`
+On Ubuntu: `sudo apt-get install git libgmp3-dev`
+
+On Mac: `brew install git gmp`
 
 Next you pull in the code:
 
@@ -168,4 +170,4 @@ In root: `go test ./...`
 
 ### Benchmarking
 
-As of `0.11.0`, there are no benchmarks. We aim to have a framework built before `1.0`.
\ No newline at end of file
+As of `0.11.0`, there are no benchmarks. We aim to have a framework built before `1.0`.
diff --git a/circle.yml b/circle.yml
index ed4d76dae6d9e869d9797c3d907c2773b2dc4cbb..9700be15a7e015139a28c5696dfabaef1a942cf6 100644
--- a/circle.yml
+++ b/circle.yml
@@ -8,7 +8,7 @@ machine:
 
 dependencies:
   pre:
-    - sudo curl -L -o /usr/bin/docker http://s3-external-1.amazonaws.com/circle-downloads/docker-$DOCKER_VERSION-circleci; chmod 0755 /usr/bin/docker; true
+    - sudo curl -L -o /usr/bin/docker 'http://s3-external-1.amazonaws.com/circle-downloads/docker-1.8.2-circleci'
     - sudo service docker start
     - "sudo apt-get update && sudo apt-get install -y libgmp3-dev"
 
diff --git a/circle.yml~ b/circle.yml~
new file mode 100644
index 0000000000000000000000000000000000000000..268e14477dd4ab374a63facb52bef8ff47040daf
--- /dev/null
+++ b/circle.yml~
@@ -0,0 +1,30 @@
+machine:
+  post:
+    - rm -rf ${GOPATH%%:*}/src/github.com/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}
+    - mkdir -p ${GOPATH%%:*}/src/github.com/${CIRCLE_PROJECT_USERNAME}
+    - cp -r ${HOME}/${CIRCLE_PROJECT_REPONAME} ${GOPATH%%:*}/src/github.com/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}
+    - git config --global user.email "billings@erisindustries.com"
+    - git config --global user.name "Billings the Bot"
+
+dependencies:
+  pre:
+    - sudo curl -L -o /usr/bin/docker 'http://s3-external-1.amazonaws.com/circle-downloads/docker-1.8.2-circleci'; chmod 0755 /usr/bin/docker; true
+    - sudo service docker start
+    - "sudo apt-get update && sudo apt-get install -y libgmp3-dev"
+
+  override:
+    - "cd ./cmd/erisdb && go get -d && go build"
+    - "mv ~/eris-db/cmd/erisdb/erisdb ~/bin"
+    - chmod +x ~/bin/erisdb
+
+test:
+  override:
+    - go test -v ./...
+
+deployment:
+  master:
+    branch: master
+    commands:
+      - docker login -e $DOCKER_EMAIL -u $DOCKER_USER -p $DOCKER_PASS quay.io
+      - DOCKER/build.sh
+      - docker push quay.io/eris/erisdb
diff --git a/erisdb/erisdbss/server_manager.go b/erisdb/erisdbss/server_manager.go
index 701e807112bf6c8e1ec75da26104dbac8592d1c3..5d25ef4ac9072b370b1a3eec25c15bad836f1faa 100644
--- a/erisdb/erisdbss/server_manager.go
+++ b/erisdb/erisdbss/server_manager.go
@@ -63,7 +63,7 @@ func (this *CmdProcess) Start(doneChan chan<- error) {
 		log.Debug(text)
 		if strings.Index(text, this.token) != -1 {
 			log.Debug("Token found", "token", this.token)
-			go func(){
+			go func() {
 				for scanner.Scan() {
 					text := scanner.Text()
 					log.Debug(text)
diff --git a/erisdb/methods.go b/erisdb/methods.go
index 7a27628da3939ec2b4edb1ae0102017fa8c006b4..0b0a370eee5fdb224a8d6f52a686caef700c5ff4 100644
--- a/erisdb/methods.go
+++ b/erisdb/methods.go
@@ -42,6 +42,8 @@ const (
 	SIGN_TX                   = SERVICE_NAME + ".signTx"
 	TRANSACT                  = SERVICE_NAME + ".transact"
 	TRANSACT_AND_HOLD         = SERVICE_NAME + ".transactAndHold"
+	SEND                      = SERVICE_NAME + ".send"
+	SEND_AND_HOLD             = SERVICE_NAME + ".sendAndHold"
 	TRANSACT_NAMEREG          = SERVICE_NAME + ".transactNameReg"
 	EVENT_SUBSCRIBE           = SERVICE_NAME + ".eventSubscribe" // Events
 	EVENT_UNSUBSCRIBE         = SERVICE_NAME + ".eventUnsubscribe"
@@ -96,6 +98,8 @@ func (this *ErisDbMethods) getMethods() map[string]RequestHandlerFunc {
 	dhMap[SIGN_TX] = this.SignTx
 	dhMap[TRANSACT] = this.Transact
 	dhMap[TRANSACT_AND_HOLD] = this.TransactAndHold
+	dhMap[SEND] = this.Send
+	dhMap[SEND_AND_HOLD] = this.SendAndHold
 	dhMap[TRANSACT_NAMEREG] = this.TransactNameReg
 	// Namereg
 	dhMap[GET_NAMEREG_ENTRY] = this.NameRegEntry
@@ -415,6 +419,32 @@ func (this *ErisDbMethods) TransactAndHold(request *rpc.RPCRequest, requester in
 	return ce, 0, nil
 }
 
+func (this *ErisDbMethods) Send(request *rpc.RPCRequest, requester interface{}) (interface{}, int, error) {
+	param := &SendParam{}
+	err := this.codec.DecodeBytes(param, request.Params)
+	if err != nil {
+		return nil, rpc.INVALID_PARAMS, err
+	}
+	receipt, errC := this.pipe.Transactor().Send(param.PrivKey, param.ToAddress, param.Amount)
+	if errC != nil {
+		return nil, rpc.INTERNAL_ERROR, errC
+	}
+	return receipt, 0, nil
+}
+
+func (this *ErisDbMethods) SendAndHold(request *rpc.RPCRequest, requester interface{}) (interface{}, int, error) {
+	param := &SendParam{}
+	err := this.codec.DecodeBytes(param, request.Params)
+	if err != nil {
+		return nil, rpc.INVALID_PARAMS, err
+	}
+	rec, errC := this.pipe.Transactor().SendAndHold(param.PrivKey, param.ToAddress, param.Amount)
+	if errC != nil {
+		return nil, rpc.INTERNAL_ERROR, errC
+	}
+	return rec, 0, nil
+}
+
 func (this *ErisDbMethods) TransactNameReg(request *rpc.RPCRequest, requester interface{}) (interface{}, int, error) {
 	param := &TransactNameRegParam{}
 	err := this.codec.DecodeBytes(param, request.Params)
diff --git a/erisdb/params.go b/erisdb/params.go
index eb1db8d808a03015a8c9e68ba38283eff964c356..2046756577d4ff3a0527180be8569f168d905b5f 100644
--- a/erisdb/params.go
+++ b/erisdb/params.go
@@ -90,6 +90,13 @@ type (
 		GasLimit int64  `json:"gas_limit"`
 	}
 
+	// Used when sending a 'Send' transaction.
+	SendParam struct {
+		PrivKey   []byte `json:"priv_key"`
+		ToAddress []byte `json:"to_address"`
+		Amount    int64  `json:"amount"`
+	}
+
 	NameRegEntryParam struct {
 		Name string `json:"name"`
 	}
diff --git a/erisdb/pipe/blockchain.go b/erisdb/pipe/blockchain.go
index 2f564135289e1735ee2d73dbf70e9d333a0a6865..cb01eb4bce238fb7315707d5a7532c27262bbc99 100644
--- a/erisdb/pipe/blockchain.go
+++ b/erisdb/pipe/blockchain.go
@@ -219,7 +219,7 @@ func getHeightMinMax(fda []*FilterData, height int) (int, int, []*FilterData, er
 			}
 			switch fd.Op {
 			case "==":
-				if val > height && val < 0 {
+				if val > height || val < 0 {
 					return 0, 0, nil, fmt.Errorf("No such block: %d (chain height: %d\n", val, height)
 				}
 				min = val
diff --git a/erisdb/pipe/pipe.go b/erisdb/pipe/pipe.go
index 58603661ccfd87f5a3523900c9c8bc81e676ae9c..e01a6788315261f6832c883a040c520d5d3d7dd9 100644
--- a/erisdb/pipe/pipe.go
+++ b/erisdb/pipe/pipe.go
@@ -69,6 +69,8 @@ type (
 		Call(fromAddress, toAddress, data []byte) (*Call, error)
 		CallCode(fromAddress, code, data []byte) (*Call, error)
 		BroadcastTx(tx types.Tx) (*Receipt, error)
+		Send(privKey, toAddress []byte, amount int64) (*Receipt, error)
+		SendAndHold(privKey, toAddress []byte, amount int64) (*Receipt, error)
 		Transact(privKey, address, data []byte, gasLimit, fee int64) (*Receipt, error)
 		TransactAndHold(privKey, address, data []byte, gasLimit, fee int64) (*types.EventDataCall, error)
 		TransactNameReg(privKey []byte, name, data string, amount, fee int64) (*Receipt, error)
@@ -98,6 +100,7 @@ func NewPipe(tNode *node.Node) Pipe {
 	namereg := newNamereg(tNode.ConsensusState())
 	net := newNetwork(tNode.Switch())
 	txs := newTransactor(tNode.EventSwitch(), tNode.ConsensusState(), tNode.MempoolReactor(), events)
+
 	return &PipeImpl{
 		tNode,
 		accounts,
diff --git a/erisdb/pipe/transactor.go b/erisdb/pipe/transactor.go
index b8693485ee9ce8b05e8c163cc31df66b36ecec15..c406d9c66d775c77bcc54912dbac002f8f8d16cd 100644
--- a/erisdb/pipe/transactor.go
+++ b/erisdb/pipe/transactor.go
@@ -144,7 +144,6 @@ func (this *transactor) Transact(privKey, address, data []byte, gasLimit, fee in
 	} else {
 		sequence = acc.Sequence + 1
 	}
-	// fmt.Printf("Sequence %d\n", sequence)
 	txInput := &types.TxInput{
 		Address:  pa.Address,
 		Amount:   1,
@@ -208,6 +207,92 @@ func (this *transactor) TransactAndHold(privKey, address, data []byte, gasLimit,
 	return ret, rErr
 }
 
+func (this *transactor) Send(privKey, toAddress []byte, amount int64) (*Receipt, error) {
+	var toAddr []byte
+	if len(toAddress) == 0 {
+		toAddr = nil
+	} else if len(toAddress) != 20 {
+		return nil, fmt.Errorf("To-address is not of the right length: %d\n", len(toAddress))
+	} else {
+		toAddr = toAddress
+	}
+
+	if len(privKey) != 64 {
+		return nil, fmt.Errorf("Private key is not of the right length: %d\n", len(privKey))
+	}
+
+	pk := &[64]byte{}
+	copy(pk[:], privKey)
+	this.txMtx.Lock()
+	defer this.txMtx.Unlock()
+	pa := account.GenPrivAccountFromPrivKeyBytes(privKey)
+	cache := this.mempoolReactor.Mempool.GetCache()
+	acc := cache.GetAccount(pa.Address)
+	var sequence int
+	if acc == nil {
+		sequence = 1
+	} else {
+		sequence = acc.Sequence + 1
+	}
+
+	tx := types.NewSendTx()
+
+	txInput := &types.TxInput{
+		Address:  pa.Address,
+		Amount:   amount,
+		Sequence: sequence,
+		PubKey:   pa.PubKey,
+	}
+
+	tx.Inputs = append(tx.Inputs, txInput)
+
+	txOutput := &types.TxOutput{toAddr, amount}
+
+	tx.Outputs = append(tx.Outputs, txOutput)
+
+	// Got ourselves a tx.
+	txS, errS := this.SignTx(tx, []*account.PrivAccount{pa})
+	if errS != nil {
+		return nil, errS
+	}
+	return this.BroadcastTx(txS)
+}
+
+func (this *transactor) SendAndHold(privKey, toAddress []byte, amount int64) (*Receipt, error) {
+	rec, tErr := this.Send(privKey, toAddress, amount)
+	if tErr != nil {
+		return nil, tErr
+	}
+
+	wc := make(chan *types.SendTx)
+	subId := fmt.Sprintf("%X", rec.TxHash)
+
+	this.eventEmitter.Subscribe(subId, types.EventStringAccOutput(toAddress), func(evt types.EventData) {
+		event := evt.(types.EventDataTx)
+		tx := event.Tx.(*types.SendTx)
+		wc <- tx
+	})
+
+	timer := time.NewTimer(300 * time.Second)
+	toChan := timer.C
+
+	var rErr error
+
+	pa := account.GenPrivAccountFromPrivKeyBytes(privKey)
+
+	select {
+	case <-toChan:
+		rErr = fmt.Errorf("Transaction timed out. Hash: " + subId)
+	case e := <-wc:
+		if bytes.Equal(e.Inputs[0].Address, pa.Address) && e.Inputs[0].Amount == amount {
+			timer.Stop()
+			this.eventEmitter.Unsubscribe(subId)
+			return rec, rErr
+		}
+	}
+	return nil, rErr
+}
+
 func (this *transactor) TransactNameReg(privKey []byte, name, data string, amount, fee int64) (*Receipt, error) {
 
 	if len(privKey) != 64 {
diff --git a/erisdb/restServer.go b/erisdb/restServer.go
index e9131b0ec3a5d73afba64d81e201851ecf62c6c4..5585d572559fc2e1c8caebad3889167939df25c7 100644
--- a/erisdb/restServer.go
+++ b/erisdb/restServer.go
@@ -424,9 +424,9 @@ func (this *RestServer) handleCallCode(c *gin.Context) {
 }
 
 func (this *RestServer) handleTransact(c *gin.Context) {
-	
+
 	_, hold := c.Get("hold")
-	
+
 	param := &TransactParam{}
 	errD := this.codec.Decode(param, c.Request.Body)
 	if errD != nil {
@@ -537,7 +537,7 @@ func parseTxModifier(c *gin.Context) {
 	hold := c.Query("hold")
 	if hold == "true" {
 		c.Set("hold", true)
-	} else if (hold != "") {
+	} else if hold != "" {
 		if hold != "false" {
 			c.Writer.WriteHeader(400)
 			c.Writer.Write([]byte("tx hold must be either 'true' or 'false', found: " + hold))
diff --git a/erisdb/serve.go b/erisdb/serve.go
index adf5c4d8e0696e5d6179ff415afd4dfd844fece5..f3b090247d6c056e79f0ed5c1d32067805a17e8e 100644
--- a/erisdb/serve.go
+++ b/erisdb/serve.go
@@ -23,9 +23,6 @@ import (
 	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/tendermint/tendermint/types"
 )
 
-const ERISDB_VERSION = "0.11.5"
-const TENDERMINT_VERSION = "0.5.0"
-
 var log = log15.New("module", "eris/erisdb_server")
 var tmConfig cfg.Config
 
@@ -62,7 +59,7 @@ func ServeErisDB(workDir string) (*server.ServeProcess, error) {
 
 	// Get tendermint configuration
 	tmConfig = tmcfg.GetConfig(workDir)
-	tmConfig.Set("version", TENDERMINT_VERSION)
+	tmConfig.Set("version", node.Version)
 	cfg.ApplyConfig(tmConfig) // Notify modules of new config
 
 	// load the priv validator
diff --git a/erisdb/wsService.go b/erisdb/wsService.go
index f146a50e9eefc9cffe67d15d57af2a18cf13741b..490d284d3e569289d534852a01f30ddc3bee6c8b 100644
--- a/erisdb/wsService.go
+++ b/erisdb/wsService.go
@@ -75,15 +75,15 @@ func (this *ErisDbWsService) writeError(msg, id string, code int, session *serve
 }
 
 // Convenience method for writing responses.
-func (this *ErisDbWsService) writeResponse(id string, result interface{}, session *server.WSSession) {
+func (this *ErisDbWsService) writeResponse(id string, result interface{}, session *server.WSSession) error {
 	response := rpc.NewRPCResponse(id, result)
 	bts, err := this.codec.EncodeBytes(response)
 	log.Debug("RESPONSE: %v\n", response)
 	if err != nil {
 		this.writeError("Internal error: "+err.Error(), id, rpc.INTERNAL_ERROR, session)
-		return
+		return err
 	}
-	session.Write(bts)
+	return session.Write(bts)
 }
 
 // *************************************** Events ************************************
@@ -104,7 +104,10 @@ func (this *ErisDbWsService) EventSubscribe(request *rpc.RPCRequest, requester i
 		return nil, rpc.INTERNAL_ERROR, errSID
 	}
 	callback := func(ret types.EventData) {
-		this.writeResponse(subId, ret, session)
+		writeErr := this.writeResponse(subId, ret, session)
+		if writeErr != nil {
+			this.pipe.Events().Unsubscribe(subId)
+		}
 	}
 	_, errC := this.pipe.Events().Subscribe(subId, eventId, callback)
 	if errC != nil {
diff --git a/license.md b/license.md
new file mode 100644
index 0000000000000000000000000000000000000000..8dada3edaf50dbc082c9a125058f25def75e625a
--- /dev/null
+++ b/license.md
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/server/websocket.go b/server/websocket.go
index 377de80ebe01471373abe35263580c1521f0eb1a..b5d4b4caae74308238f48a5b9ba9ea4f25d25e51 100644
--- a/server/websocket.go
+++ b/server/websocket.go
@@ -223,14 +223,14 @@ func (this *WSSession) readPump() {
 			this.writeCloseChan <- struct{}{}
 			return
 		}
-		
+
 		if msgType != websocket.TextMessage {
 			log.Info("Receiving non text-message from client, closing.")
 			this.writeCloseChan <- struct{}{}
 			return
 		}
-		
-		go func(){
+
+		go func() {
 			// Process the request.
 			this.service.Process(msg, this)
 		}()
@@ -274,16 +274,16 @@ func (this *WSSession) writePump() {
 			}
 		case <-this.writeCloseChan:
 			return
-		// Ticker run out. Time for another ping message.
-		/*
-		case <-ticker.C:
-			if err := this.write(websocket.PingMessage, []byte{}); err != nil {
-				log.Debug("Failed to write ping message to socket. Closing.")
-				return
-			}
+			// Ticker run out. Time for another ping message.
+			/*
+				case <-ticker.C:
+					if err := this.write(websocket.PingMessage, []byte{}); err != nil {
+						log.Debug("Failed to write ping message to socket. Closing.")
+						return
+					}
 			*/
 		}
-		
+
 	}
 }
 
diff --git a/test/mock/pipe.go b/test/mock/pipe.go
index 8408236bab4da78d199eadb66fc251c6961739d3..85aa484c7297a958110703bd8d47ad0ef092671b 100644
--- a/test/mock/pipe.go
+++ b/test/mock/pipe.go
@@ -244,6 +244,14 @@ func (this *transactor) TransactAndHold(privKey, address, data []byte, gasLimit,
 	return nil, nil
 }
 
+func (this *transactor) Send(privKey, toAddress []byte, amount int64) (*ep.Receipt, error) {
+	return nil, nil
+}
+
+func (this *transactor) SendAndHold(privKey, toAddress []byte, amount int64) (*ep.Receipt, error) {
+	return nil, nil
+}
+
 func (this *transactor) TransactNameReg(privKey []byte, name, data string, amount, fee int64) (*ep.Receipt, error) {
 	return this.testData.TransactNameReg.Output, nil
 }
diff --git a/test/server/ws_burst_test.go b/test/server/ws_burst_test.go
index 129ddb7d1f908a44021aa8caa4a2e001b111a791..2d62401310e52cd5b3b523eec4466b2336e8b458 100644
--- a/test/server/ws_burst_test.go
+++ b/test/server/ws_burst_test.go
@@ -102,7 +102,7 @@ func wsClient(doneChan chan bool, errChan chan error) {
 		i++
 	}
 	client.Close()
-	time.Sleep(100*time.Millisecond)
-	
+	time.Sleep(100 * time.Millisecond)
+
 	doneChan <- true
 }
diff --git a/test/testdata/testdata/testdata.go b/test/testdata/testdata/testdata.go
index 2b9ff63c0d9388b064de263fc2c48d6c847ec2e7..b51b6a8f7a67cbd057e62fc54815dd458e83915a 100644
--- a/test/testdata/testdata/testdata.go
+++ b/test/testdata/testdata/testdata.go
@@ -305,7 +305,7 @@ var testDataJson = `{
   },
   "GetNetworkInfo": {
     "output": {
-      "client_version": "0.5.0",
+      "client_version": "0.5.2",
       "moniker": "__MONIKER__",
       "listening": false,
       "listeners": [],
@@ -314,7 +314,7 @@ var testDataJson = `{
   },
   "GetClientVersion": {
     "output": {
-      "client_version": "0.5.0"
+      "client_version": "0.5.2"
     }
   },
   "GetMoniker": {
diff --git a/test/transacting/transacting_tes.go b/test/transacting/transacting_tes.go
index 4d5717fc0e1f4bb6d8ed7072b2ffb8884990b5db..e7fa397b82a8d4a623bd22f6ef1eab3237a7d3eb 100644
--- a/test/transacting/transacting_tes.go
+++ b/test/transacting/transacting_tes.go
@@ -5,10 +5,12 @@ import (
 	"bytes"
 	"fmt"
 	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/stretchr/testify/suite"
-	// "github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/tendermint/tendermint/types"
+	// "github.com/tendermint/tendermint/types"
 	edb "github.com/eris-ltd/eris-db/erisdb"
 	ess "github.com/eris-ltd/eris-db/erisdb/erisdbss"
 	// ep "github.com/eris-ltd/eris-db/erisdb/pipe"
+	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/gin-gonic/gin"
+	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/tendermint/log15"
 	"github.com/eris-ltd/eris-db/rpc"
 	"github.com/eris-ltd/eris-db/server"
 	td "github.com/eris-ltd/eris-db/test/testdata/testdata"
@@ -16,10 +18,8 @@ import (
 	"net/http"
 	"os"
 	"path"
-	"testing"
-	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/gin-gonic/gin"
-	"github.com/eris-ltd/eris-db/Godeps/_workspace/src/github.com/tendermint/log15"
 	"runtime"
+	"testing"
 )
 
 func init() {
@@ -136,4 +136,4 @@ func (this *TxSuite) postJson(endpoint string, v interface{}) *http.Response {
 
 func TestQuerySuite(t *testing.T) {
 	suite.Run(t, &TxSuite{})
-}
\ No newline at end of file
+}
diff --git a/test/web_api/query_test.go b/test/web_api/query_test.go
index ae18061502a7608bb5d2fa419f3a6813bbeb76d5..3acb0a7ee4033d2c12ae1594a94ca9759af0761f 100644
--- a/test/web_api/query_test.go
+++ b/test/web_api/query_test.go
@@ -133,4 +133,4 @@ func generateQuery(fda []*ep.FilterData) string {
 
 func TestQuerySuite(t *testing.T) {
 	suite.Run(t, &QuerySuite{})
-}
\ No newline at end of file
+}
diff --git a/version/version.go b/version/version.go
index 2deaf3ad32b1d4de51baab5a0cbb7f9cb9ce9008..ed42d7045d61d567772c6bc9febc7869a14a1509 100644
--- a/version/version.go
+++ b/version/version.go
@@ -1,3 +1,7 @@
 package version
 
-const VERSION = "0.10.4"
+import (
+
+)
+
+const Version = "0.11.0"