diff --git a/Gopkg.lock b/Gopkg.lock
index 30b89e8a527a4fc2e73f8f079782885b817ddfe0..ac4f2f285d14ed8b28b9331e02b41f7106c6ba3d 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -27,11 +27,11 @@
 
 [[projects]]
   branch = "master"
-  digest = "1:8bd66090a4fc1d422f0cb065a3023bf98f07f44920c3f87da5c3107f990434e4"
+  digest = "1:ad87504ef74b1c36880f9287126dbc8dc4146d86acf902dd776ac6064cc75396"
   name = "github.com/btcsuite/btcd"
   packages = ["btcec"]
   pruneopts = "NUT"
-  revision = "86fed781132ac890ee03e906e4ecd5d6fa180c64"
+  revision = "f673a4b563b57b9a95832545c878669a7fa801d9"
 
 [[projects]]
   branch = "master"
@@ -66,7 +66,6 @@
   version = "v1.1.0"
 
 [[projects]]
-  branch = "master"
   digest = "1:294576320c5093015ca9130ed928c84b2fdc8b9db6d136505a513795f3a64e3e"
   name = "github.com/ebuchman/fail-test"
   packages = ["."]
@@ -74,12 +73,16 @@
   revision = "95f809107225be108efcf10a3509e4ea6ceef3c4"
 
 [[projects]]
-  digest = "1:5a6f43bb19bea1e7894542a80c6e95bab099c380dd90abebd76d3938e8c66567"
+  digest = "1:2c03593aec9fa7d6969dc7295c636b46369ec2dd45dcf4acfb298281d62528ff"
   name = "github.com/go-kit/kit"
   packages = [
     "log",
     "log/level",
     "log/term",
+    "metrics",
+    "metrics/discard",
+    "metrics/internal/lv",
+    "metrics/prometheus",
   ]
   pruneopts = "NUT"
   revision = "4dc7be5d2d12881735283bcab7352178e190fc71"
@@ -113,8 +116,8 @@
     "types",
   ]
   pruneopts = "UT"
-  revision = "7d68e886eac4f7e34d0d82241a6273d6c304c5cf"
-  version = "v1.1.0"
+  revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
+  version = "v1.1.1"
 
 [[projects]]
   digest = "1:713cc7628304d027a7e9edcb52da888a8912d6405250a8d9c8eff6f41dd54398"
@@ -228,20 +231,22 @@
   version = "v1.0.0"
 
 [[projects]]
-  branch = "master"
-  digest = "1:22deec4bab258f99bf1e4394c51d976e04ddf9a52f55b0e2912d9cdd503dff16"
+  digest = "1:76463f8d9f141bb3673ccece5fb0d1d0f9588395e94d01253667db733f865b18"
   name = "github.com/prometheus/client_golang"
-  packages = ["prometheus"]
+  packages = [
+    "prometheus",
+    "prometheus/promhttp",
+  ]
   pruneopts = "NUT"
-  revision = "d6a9817c4afc94d51115e4a30d449056a3fbf547"
+  revision = "ae27198cdd90bf12cd134ad79d1366a6cf49f632"
 
 [[projects]]
   branch = "master"
-  digest = "1:53a76eb11bdc815fcf0c757a9648fda0ab6887da13f07587181ff2223b67956c"
+  digest = "1:0f37e09b3e92aaeda5991581311f8dbf38944b36a3edec61cc2d1991f527554a"
   name = "github.com/prometheus/client_model"
   packages = ["go"]
   pruneopts = "NUT"
-  revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
+  revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
 
 [[projects]]
   branch = "master"
@@ -257,7 +262,7 @@
 
 [[projects]]
   branch = "master"
-  digest = "1:b161534d0cce842f1e1eaf087e2c5f28191f0bac0c3070fc9a7e1b6269238d88"
+  digest = "1:37e418257b05a9e9fabbf836df2d8f3613313e80a909da6b9597b759ebca61cd"
   name = "github.com/prometheus/procfs"
   packages = [
     ".",
@@ -266,7 +271,7 @@
     "xfs",
   ]
   pruneopts = "NUT"
-  revision = "40f013a808ec4fa79def444a1a56de4d1727efcb"
+  revision = "ae68e2d4c00fed4943b5f6698d504a5fe083da8a"
 
 [[projects]]
   branch = "master"
@@ -297,7 +302,7 @@
 
 [[projects]]
   branch = "master"
-  digest = "1:bd62f27525a36697564991b8e6071ff56afa99d3235261924a0212db5ce780bd"
+  digest = "1:922191411ad8f61bcd8018ac127589bb489712c1d1a0ab2497aca4b16de417d2"
   name = "github.com/syndtr/goleveldb"
   packages = [
     "leveldb",
@@ -314,20 +319,7 @@
     "leveldb/util",
   ]
   pruneopts = "NUT"
-  revision = "0d5a0ceb10cf9ab89fdd744cc8c50a83134f6697"
-
-[[projects]]
-  digest = "1:b8a05190f9aa37b0ab5b29b480797306b221709ffcb4a0ed9946cc7110849c07"
-  name = "github.com/tendermint/abci"
-  packages = [
-    "client",
-    "example/code",
-    "example/kvstore",
-    "types",
-  ]
-  pruneopts = "UT"
-  revision = "198dccf0ddfd1bb176f87657e3286a05a6ed9540"
-  version = "v0.12.0"
+  revision = "c4c61651e9e37fa117f53c5a906d3b63090d8445"
 
 [[projects]]
   branch = "master"
@@ -342,42 +334,44 @@
   revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057"
 
 [[projects]]
-  digest = "1:0457834dfbe7dfd39c9e2d99147bde666094819e85728eb235a122943660256c"
+  digest = "1:0fbb744c7842baaec7f5b2e0eb9814668bf34c4eb3c4fe6fc9ac856a937a5eff"
   name = "github.com/tendermint/go-amino"
   packages = ["."]
   pruneopts = "NUT"
-  revision = "ed62928576cfcaf887209dc96142cd79cdfff389"
-  version = "0.9.9"
-
-[[projects]]
-  digest = "1:9b0716c497677d6404499b2931cbfac90943d20302b90128b2979b5e1fcb09f5"
-  name = "github.com/tendermint/go-crypto"
-  packages = ["."]
-  pruneopts = "NUT"
-  revision = "915416979bf70efa4bcbf1c6cd5d64c5fff9fc19"
-  version = "v0.6.2"
+  revision = "2106ca61d91029c931fd54968c2bb02dc96b1412"
+  version = "0.10.1"
 
 [[projects]]
-  digest = "1:91de33917aa98c06cb0f32867908071dd6992fff80347c37fc592165051957a0"
+  digest = "1:5a2904670bdaf13e71cd3a726795b2d0ee65a0862d499a1440799bd62618efac"
   name = "github.com/tendermint/iavl"
-  packages = [
-    ".",
-    "sha256truncated",
-  ]
+  packages = ["."]
   pruneopts = "NUT"
-  revision = "c9206995e8f948e99927f5084a88a7e94ca256da"
-  version = "v0.8.0-rc0"
+  revision = "35f66e53d9b01e83b30de68b931f54b2477a94c9"
+  version = "v0.9.2"
 
 [[projects]]
-  digest = "1:0bde6563ae9ebe48770735120ee223a8761e8ec23fde4f36995bbbb4ea69d1ca"
+  digest = "1:46ac5207f9074a8ca246a3fce7aa576f5c5f43a30c8f3ef833e247777fdb8a4f"
   name = "github.com/tendermint/tendermint"
   packages = [
+    "abci/client",
+    "abci/example/code",
+    "abci/example/kvstore",
+    "abci/types",
     "blockchain",
     "config",
     "consensus",
     "consensus/types",
+    "crypto",
+    "crypto/merkle",
+    "crypto/tmhash",
     "evidence",
+    "libs/autofile",
+    "libs/clist",
+    "libs/common",
+    "libs/db",
     "libs/events",
+    "libs/flowrate",
+    "libs/log",
     "libs/pubsub",
     "libs/pubsub/query",
     "mempool",
@@ -401,25 +395,9 @@
     "types",
     "version",
   ]
-  pruneopts = "NUT"
-  revision = "46369a1ab76f274ab47179c4176221842b8207b4"
-  version = "v0.21.0"
-
-[[projects]]
-  digest = "1:1e17077c52b3c68e5ec1059ed078210e8b1030eb30ad23a69b924e7f30bd0132"
-  name = "github.com/tendermint/tmlibs"
-  packages = [
-    "autofile",
-    "clist",
-    "common",
-    "db",
-    "flowrate",
-    "log",
-    "merkle",
-  ]
   pruneopts = "UT"
-  revision = "692f1d86a6e2c0efa698fd1e4541b68c74ffaf38"
-  version = "v0.8.4"
+  revision = "c64a3c74c870d725ba1356f75b4afadf0928c297"
+  version = "v0.22.4"
 
 [[projects]]
   branch = "master"
@@ -458,11 +436,10 @@
     "ssh/terminal",
   ]
   pruneopts = "NUT"
-  revision = "a49355c7e3f8fe157a85be2f77e6e269a0f89602"
+  revision = "a2144134853fc9a27a7b1e3eb4f19f1a76df13c9"
 
 [[projects]]
-  branch = "master"
-  digest = "1:cdc8cc9377bcb05ff97816bf398120c0a242f502b5eb2bb27d1d650b523e67e1"
+  digest = "1:15dbe437d38eb2103f6b55348758958a6f85a400ecc16fcb53b3f271d38cd8ea"
   name = "golang.org/x/net"
   packages = [
     "context",
@@ -471,14 +448,15 @@
     "http2/hpack",
     "idna",
     "internal/timeseries",
+    "netutil",
     "trace",
   ]
   pruneopts = "NUT"
-  revision = "ed29d75add3d7c4bf7ca65aac0c6df3d1420216f"
+  revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f"
 
 [[projects]]
   branch = "master"
-  digest = "1:569e1719852f85b01939e6040b3eee417913c9e6d0368d3d1460d0af4eb702bc"
+  digest = "1:48419582c83b5715e244977ca617a3ff596dc6808368e3c1dcaf1b3ad2218e53"
   name = "golang.org/x/sys"
   packages = [
     "cpu",
@@ -486,7 +464,7 @@
     "windows",
   ]
   pruneopts = "NUT"
-  revision = "151529c776cdc58ddbe7963ba9af779f3577b419"
+  revision = "ac767d655b305d4e9612f5f6e33120b9176c4ad4"
 
 [[projects]]
   digest = "1:a0f29009397dc27c9dc8440f0945d49e5cbb9b72d0b0fc745474d9bfdea2d9f8"
@@ -517,7 +495,7 @@
   name = "google.golang.org/genproto"
   packages = ["googleapis/rpc/status"]
   pruneopts = "NUT"
-  revision = "ff3583edef7de132f219f0efc00e097cabcc0ec0"
+  revision = "fedd2861243fd1a8152376292b921b394c7bef7e"
 
 [[projects]]
   digest = "1:f778941d5c2e46da5e0f5d553d3e80bf70eb40d2e80bb4c649b625b9133f3d5f"
@@ -586,24 +564,26 @@
     "github.com/streadway/simpleuuid",
     "github.com/stretchr/testify/assert",
     "github.com/stretchr/testify/require",
-    "github.com/tendermint/abci/types",
     "github.com/tendermint/go-amino",
-    "github.com/tendermint/go-crypto",
     "github.com/tendermint/iavl",
+    "github.com/tendermint/tendermint/abci/types",
     "github.com/tendermint/tendermint/blockchain",
     "github.com/tendermint/tendermint/config",
     "github.com/tendermint/tendermint/consensus",
     "github.com/tendermint/tendermint/consensus/types",
+    "github.com/tendermint/tendermint/crypto",
+    "github.com/tendermint/tendermint/crypto/tmhash",
+    "github.com/tendermint/tendermint/libs/common",
+    "github.com/tendermint/tendermint/libs/db",
+    "github.com/tendermint/tendermint/libs/log",
     "github.com/tendermint/tendermint/libs/pubsub",
     "github.com/tendermint/tendermint/node",
     "github.com/tendermint/tendermint/p2p",
     "github.com/tendermint/tendermint/proxy",
+    "github.com/tendermint/tendermint/rpc/core",
     "github.com/tendermint/tendermint/rpc/core/types",
     "github.com/tendermint/tendermint/state",
     "github.com/tendermint/tendermint/types",
-    "github.com/tendermint/tmlibs/common",
-    "github.com/tendermint/tmlibs/db",
-    "github.com/tendermint/tmlibs/log",
     "github.com/tmthrgd/go-hex",
     "golang.org/x/crypto/ed25519",
     "golang.org/x/crypto/ripemd160",
diff --git a/Gopkg.toml b/Gopkg.toml
index d96822fe28ea347d92737bd9ea6ef0f241afdc76..4d10b7e138c4c50ffd1d69034f35c6a25401efe6 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -7,34 +7,21 @@
     name = "github.com/gogo/protobuf"
     non-go = false
   [[prune.project]]
-    name = "github.com/tendermint/abci"
+    name = "github.com/tendermint/tendermint"
     non-go = false
   [[prune.project]]
     name = "github.com/tendermint/tmlibs"
     non-go = false
 
-# From Tendermint
-[[constraint]]
+# overriding here because of IAVL
+[[override]]
   name = "github.com/tendermint/tendermint"
-  version = "=0.21.0"
-
-[[constraint]]
-  name = "github.com/tendermint/go-amino"
-  version = "=0.9.9"
+  version = "=0.22.4"
 
 [[constraint]]
   name = "github.com/tendermint/iavl"
-  version = "=0.8.0-rc0"
-
-[[constraint]]
-  name = "github.com/prometheus/client_golang"
-  branch = "master"
-
-[[override]]
-  name = "github.com/tendermint/tmlibs"
-  version = "~0.8.4"
+  version = "=0.9.2"
 
-# We don't care which version Tendermint wants
 [[override]]
   name = "github.com/gogo/protobuf"
   version = "~1.1.0"
diff --git a/README.md b/README.md
index 285449c1549ee1a31f38f50cac993dd5f9754728..2cd557c77833d30ac533c1f5120247f5f6c373da 100644
--- a/README.md
+++ b/README.md
@@ -14,7 +14,7 @@ Hyperledger Burrow is a permissioned Ethereum smart-contract blockchain node. It
 Hyperledger Burrow is a permissioned blockchain node that executes smart contract code following the Ethereum specification. Burrow is built for a multi-chain universe with application specific optimization in mind. Burrow as a node is constructed out of three main components: the consensus engine, the permissioned Ethereum virtual machine and the rpc gateway. More specifically Burrow consists of the following:
 
 - **Consensus Engine:** Transactions are ordered and finalised with the Byzantine fault-tolerant Tendermint protocol.  The Tendermint protocol provides high transaction throughput over a set of known validators and prevents the blockchain from forking.
-- **Application Blockchain Interface (ABCI):** The smart contract application interfaces with the consensus engine over the [ABCI](https://github.com/tendermint/abci). The ABCI allows for the consensus engine to remain agnostic from the smart contract application.
+- **Application Blockchain Interface (ABCI):** The smart contract application interfaces with the consensus engine over the [ABCI](https://github.com/tendermint/tendermint/abci). The ABCI allows for the consensus engine to remain agnostic from the smart contract application.
 - **Smart Contract Application:** Transactions are validated and applied to the application state in the order that the consensus engine has finalised them. The application state consists of all accounts, the validator set and the name registry. Accounts in Burrow have permissions and either contain smart contract code or correspond to a public-private key pair. A transaction that calls on the smart contract code in a given account will activate the execution of that account’s code in a permissioned virtual machine.
 - **Permissioned Ethereum Virtual Machine:** This virtual machine is built to observe the Ethereum operation code specification and additionally asserts the correct permissions have been granted. Permissioning is enforced through secure native functions and underlies all smart contract code. An arbitrary but finite amount of gas is handed out for every execution to ensure a finite execution duration - “You don’t need money to play, when you have permission to play”.
 - **Application Binary Interface (ABI):** Transactions need to be formulated in a binary format that can be processed by the blockchain node. Current tooling provides functionality to compile, deploy and link solidity smart contracts and formulate transactions to call smart contracts on the chain.
diff --git a/acm/account.go b/acm/account.go
index 4c3b11700378538c78204cb8f4d9742f43befe8c..73d1a0a385c4ae1fd2eddddefa3824d7872de0ed 100644
--- a/acm/account.go
+++ b/acm/account.go
@@ -27,16 +27,9 @@ import (
 
 var GlobalPermissionsAddress = crypto.Address(binary.Zero160)
 
-type Addressable interface {
-	// Get the 20 byte EVM address of this account
-	Address() crypto.Address
-	// Public key from which the Address is derived
-	PublicKey() crypto.PublicKey
-}
-
 // The default immutable interface to an account
 type Account interface {
-	Addressable
+	crypto.Addressable
 	// The value held by this account in terms of the chain-native token
 	Balance() uint64
 	// The EVM byte code held by this account (or equivalently, this contract)
@@ -117,7 +110,7 @@ func AsConcreteAccount(account Account) *ConcreteAccount {
 }
 
 // Creates an otherwise zeroed Account from an Addressable and returns it as MutableAccount
-func FromAddressable(addressable Addressable) *MutableAccount {
+func FromAddressable(addressable crypto.Addressable) *MutableAccount {
 	ca := &ConcreteAccount{
 		Address:   addressable.Address(),
 		PublicKey: addressable.PublicKey(),
@@ -138,6 +131,8 @@ func AsMutableAccount(account Account) *MutableAccount {
 	return AsConcreteAccount(account).MutableAccount()
 }
 
+var _ Account = &MutableAccount{}
+
 func (acc ConcreteAccount) String() string {
 	return fmt.Sprintf("ConcreteAccount{Address: %s; Sequence: %v; PublicKey: %v Balance: %v; CodeLength: %v; Permissions: %s}",
 		acc.Address, acc.Sequence, acc.PublicKey, acc.Balance, len(acc.Code), acc.Permissions)
@@ -177,6 +172,11 @@ func (acc *MutableAccount) AddToBalance(amount uint64) error {
 	return nil
 }
 
+func (acc *MutableAccount) SetBalance(amount uint64) error {
+	acc.concreteAccount.Balance = amount
+	return nil
+}
+
 func (acc *MutableAccount) SetCode(code []byte) error {
 	acc.concreteAccount.Code = code
 	return nil
@@ -186,8 +186,11 @@ func (acc *MutableAccount) IncSequence() {
 	acc.concreteAccount.Sequence++
 }
 
-func (acc *MutableAccount) SetPermissions(permissions permission.AccountPermissions) error {
-	acc.concreteAccount.Permissions = permissions
+func (acc *MutableAccount) SetPermissions(accPerms permission.AccountPermissions) error {
+	if !accPerms.Base.Perms.IsValid() {
+		return fmt.Errorf("attempt to set invalid perm 0%b on account %v", accPerms.Base.Perms, acc)
+	}
+	acc.concreteAccount.Permissions = accPerms
 	return nil
 }
 
diff --git a/acm/balance/balance.go b/acm/balance/balance.go
new file mode 100644
index 0000000000000000000000000000000000000000..20b5cc85a45fd878afd958af0ee47acc9819c710
--- /dev/null
+++ b/acm/balance/balance.go
@@ -0,0 +1,118 @@
+package balance
+
+import "fmt"
+
+type Balances []Balance
+
+func (b Balance) String() string {
+	return fmt.Sprintf("{%v: %d}", b.Type, b.Amount)
+}
+
+func New() Balances {
+	return []Balance{}
+}
+
+func (bs Balances) Len() int {
+	return len(bs)
+}
+
+func (bs Balances) Less(i, j int) bool {
+	if bs[i].Type < bs[j].Type {
+		return true
+	}
+	return bs[i].Type == bs[j].Type && bs[i].Amount < bs[j].Amount
+}
+
+func (bs Balances) Swap(i, j int) {
+	bs[i], bs[j] = bs[j], bs[i]
+}
+
+func (bs Balances) Add(ty Type, amount uint64) Balances {
+	return append(bs, Balance{
+		Type:   ty,
+		Amount: amount,
+	})
+}
+
+func (bs Balances) Native(amount uint64) Balances {
+	return bs.Add(TypeNative, amount)
+}
+
+func (bs Balances) Power(amount uint64) Balances {
+	return bs.Add(TypePower, amount)
+}
+
+func (bs Balances) Sum(bss ...Balances) Balances {
+	return Sum(append(bss, bs)...)
+}
+
+func Sum(bss ...Balances) Balances {
+	sum := New()
+	sumMap := make(map[Type]uint64)
+	for _, bs := range bss {
+		for _, b := range bs {
+			sumMap[b.Type] += b.Amount
+		}
+	}
+	for k, v := range sumMap {
+		sum = sum.Add(k, v)
+	}
+	return sum
+}
+
+func Native(native uint64) Balance {
+	return Balance{
+		Type:   TypeNative,
+		Amount: native,
+	}
+}
+
+func Power(power uint64) Balance {
+	return Balance{
+		Type:   TypePower,
+		Amount: power,
+	}
+}
+
+func (bs Balances) Has(ty Type) bool {
+	for _, b := range bs {
+		if b.Type == ty {
+			return true
+		}
+	}
+	return false
+}
+
+func (bs Balances) Get(ty Type) *uint64 {
+	for _, b := range bs {
+		if b.Type == ty {
+			return &b.Amount
+		}
+	}
+	return nil
+}
+
+func (bs Balances) GetFallback(ty Type, fallback uint64) uint64 {
+	for _, b := range bs {
+		if b.Type == ty {
+			return b.Amount
+		}
+	}
+	return fallback
+}
+
+func (bs Balances) GetNative(fallback uint64) uint64 {
+	return bs.GetFallback(TypeNative, fallback)
+}
+
+func (bs Balances) GetPower(fallback uint64) uint64 {
+	return bs.GetFallback(TypePower, fallback)
+}
+
+func (bs Balances) HasNative() bool {
+	return bs.Has(TypeNative)
+}
+
+func (bs Balances) HasPower() bool {
+	return bs.Has(TypePower)
+}
diff --git a/acm/balance/balance.pb.go b/acm/balance/balance.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..c1e0bdbd29ff97f91072eeea680e2adad28f5b88
--- /dev/null
+++ b/acm/balance/balance.pb.go
@@ -0,0 +1,337 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: balance.proto
+
+/*
+	Package balance is a generated protocol buffer package.
+
+	It is generated from these files:
+		balance.proto
+
+	It has these top-level messages:
+		Balance
+*/
+package balance
+
+import proto "github.com/gogo/protobuf/proto"
+import golang_proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = golang_proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type Balance struct {
+	Type   Type   `protobuf:"varint,1,opt,name=Type,proto3,casttype=Type" json:"Type,omitempty"`
+	Amount uint64 `protobuf:"varint,2,opt,name=Amount,proto3" json:"Amount,omitempty"`
+}
+
+func (m *Balance) Reset()                    { *m = Balance{} }
+func (*Balance) ProtoMessage()               {}
+func (*Balance) Descriptor() ([]byte, []int) { return fileDescriptorBalance, []int{0} }
+
+func (m *Balance) GetType() Type {
+	if m != nil {
+		return m.Type
+	}
+	return 0
+}
+
+func (m *Balance) GetAmount() uint64 {
+	if m != nil {
+		return m.Amount
+	}
+	return 0
+}
+
+func (*Balance) XXX_MessageName() string {
+	return "balance.Balance"
+}
+func init() {
+	proto.RegisterType((*Balance)(nil), "balance.Balance")
+	golang_proto.RegisterType((*Balance)(nil), "balance.Balance")
+}
+func (m *Balance) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Balance) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Type != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintBalance(dAtA, i, uint64(m.Type))
+	}
+	if m.Amount != 0 {
+		dAtA[i] = 0x10
+		i++
+		i = encodeVarintBalance(dAtA, i, uint64(m.Amount))
+	}
+	return i, nil
+}
+
+func encodeVarintBalance(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *Balance) Size() (n int) {
+	var l int
+	_ = l
+	if m.Type != 0 {
+		n += 1 + sovBalance(uint64(m.Type))
+	}
+	if m.Amount != 0 {
+		n += 1 + sovBalance(uint64(m.Amount))
+	}
+	return n
+}
+
+func sovBalance(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozBalance(x uint64) (n int) {
+	return sovBalance(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Balance) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowBalance
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Balance: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Balance: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowBalance
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Type |= (Type(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType)
+			}
+			m.Amount = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowBalance
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Amount |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipBalance(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthBalance
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipBalance(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowBalance
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowBalance
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowBalance
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthBalance
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowBalance
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipBalance(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthBalance = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowBalance   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("balance.proto", fileDescriptorBalance) }
+func init() { golang_proto.RegisterFile("balance.proto", fileDescriptorBalance) }
+
+var fileDescriptorBalance = []byte{
+	// 187 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4d, 0x4a, 0xcc, 0x49,
+	0xcc, 0x4b, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x87, 0x72, 0xa5, 0x74, 0xd3,
+	0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xd3, 0xf3, 0xd3, 0xf3, 0xf5, 0xc1,
+	0xf2, 0x49, 0xa5, 0x69, 0x60, 0x1e, 0x98, 0x03, 0x66, 0x41, 0xf4, 0x29, 0xb9, 0x72, 0xb1, 0x3b,
+	0x41, 0x74, 0x0a, 0xc9, 0x70, 0xb1, 0x84, 0x54, 0x16, 0xa4, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0xf0,
+	0x3a, 0x71, 0xfc, 0xba, 0x27, 0x0f, 0xe6, 0x07, 0x81, 0x49, 0x21, 0x31, 0x2e, 0x36, 0xc7, 0xdc,
+	0xfc, 0xd2, 0xbc, 0x12, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x96, 0x20, 0x28, 0xcf, 0x8a, 0x65, 0xc6,
+	0x02, 0x79, 0x06, 0x27, 0xfb, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48,
+	0x8e, 0xf1, 0xc0, 0x63, 0x39, 0xc6, 0x13, 0x8f, 0xe5, 0x18, 0xa3, 0x34, 0x91, 0xdc, 0x92, 0x51,
+	0x59, 0x90, 0x5a, 0x94, 0x93, 0x9a, 0x92, 0x9e, 0x5a, 0xa4, 0x9f, 0x54, 0x5a, 0x54, 0x94, 0x5f,
+	0xae, 0x9f, 0x98, 0x9c, 0xab, 0x0f, 0x75, 0x76, 0x12, 0x1b, 0xd8, 0x39, 0xc6, 0x80, 0x00, 0x00,
+	0x00, 0xff, 0xff, 0x1d, 0x22, 0xfc, 0x8b, 0xd7, 0x00, 0x00, 0x00,
+}
diff --git a/acm/balance/balance_test.go b/acm/balance/balance_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..d6710e09a07f5ef5450a56a0b997e4656a10c06f
--- /dev/null
+++ b/acm/balance/balance_test.go
@@ -0,0 +1,22 @@
+package balance
+
+import (
+	"sort"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestSum(t *testing.T) {
+	one := New().Power(23223).Native(34).Native(1111)
+	two := New().Power(3).Native(22)
+	sum := one.Sum(two)
+	assert.Equal(t, New().Power(23226).Native(1167), sum)
+}
+
+func TestSort(t *testing.T) {
+	balances := New().Power(232).Native(2523543).Native(232).Power(2).Power(4).Native(1)
+	sortedBalances := New().Native(1).Native(232).Native(2523543).Power(2).Power(4).Power(232)
+	sort.Sort(balances)
+	assert.Equal(t, sortedBalances, balances)
+}
diff --git a/acm/balance/type.go b/acm/balance/type.go
new file mode 100644
index 0000000000000000000000000000000000000000..8b8ece9a49cc0a766c2e96e5d3fbd829c4db3d2d
--- /dev/null
+++ b/acm/balance/type.go
@@ -0,0 +1,51 @@
+package balance
+
+type Type uint32
+
+const (
+	TypeNative Type = 1
+	TypePower  Type = 2
+)
+
+var nameFromType = map[Type]string{
+	TypeNative: "Native",
+	TypePower:  "Power",
+}
+
+var typeFromName = make(map[string]Type)
+
+func init() {
+	for t, n := range nameFromType {
+		typeFromName[n] = t
+	}
+}
+
+func TypeFromString(name string) Type {
+	return typeFromName[name]
+}
+
+func (typ Type) String() string {
+	name, ok := nameFromType[typ]
+	if ok {
+		return name
+	}
+	return "UnknownBalanceType"
+}
+
+func (typ Type) MarshalText() ([]byte, error) {
+	return []byte(typ.String()), nil
+}
+
+func (typ *Type) UnmarshalText(data []byte) error {
+	*typ = TypeFromString(string(data))
+	return nil
+}
+
+// Protobuf support
+func (typ Type) Marshal() ([]byte, error) {
+	return typ.MarshalText()
+}
+
+func (typ *Type) Unmarshal(data []byte) error {
+	return typ.UnmarshalText(data)
+}
diff --git a/acm/private_account.go b/acm/private_account.go
index f9cf705d52ff95e71e58ab20b032c5789b3e7676..f6f80ba33d7596242c5424e3fb29c59f44252cf9 100644
--- a/acm/private_account.go
+++ b/acm/private_account.go
@@ -23,7 +23,7 @@ import (
 )
 
 type AddressableSigner interface {
-	Addressable
+	crypto.Addressable
 	crypto.Signer
 }
 
diff --git a/acm/validator.go b/acm/validator.go
deleted file mode 100644
index 51027539dbed524aa202a4b5c33b65ab89a7db73..0000000000000000000000000000000000000000
--- a/acm/validator.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package acm
-
-import (
-	"encoding/json"
-
-	"github.com/hyperledger/burrow/crypto"
-)
-
-type Validator interface {
-	Addressable
-	// The validator's voting power
-	Power() uint64
-}
-
-// Neither abci_types or tm_types has quite the representation we want
-type ConcreteValidator struct {
-	Address   crypto.Address
-	PublicKey crypto.PublicKey
-	Power     uint64
-}
-
-type concreteValidatorWrapper struct {
-	*ConcreteValidator `json:"unwrap"`
-}
-
-var _ Validator = concreteValidatorWrapper{}
-
-func AsValidator(account Account) Validator {
-	return ConcreteValidator{
-		Address:   account.Address(),
-		PublicKey: account.PublicKey(),
-		Power:     account.Balance(),
-	}.Validator()
-}
-
-func AsConcreteValidator(validator Validator) *ConcreteValidator {
-	if validator == nil {
-		return nil
-	}
-	if ca, ok := validator.(concreteValidatorWrapper); ok {
-		return ca.ConcreteValidator
-	}
-	return &ConcreteValidator{
-		Address:   validator.Address(),
-		PublicKey: validator.PublicKey(),
-		Power:     validator.Power(),
-	}
-}
-
-func (cvw concreteValidatorWrapper) Address() crypto.Address {
-	return cvw.ConcreteValidator.Address
-}
-
-func (cvw concreteValidatorWrapper) PublicKey() crypto.PublicKey {
-	return cvw.ConcreteValidator.PublicKey
-}
-
-func (cvw concreteValidatorWrapper) Power() uint64 {
-	return cvw.ConcreteValidator.Power
-}
-
-func (cvw concreteValidatorWrapper) WithNewPower(power uint64) Validator {
-	cv := cvw.Copy()
-	cv.Power = power
-	return concreteValidatorWrapper{
-		ConcreteValidator: cv,
-	}
-}
-
-func (cv ConcreteValidator) Validator() Validator {
-	return concreteValidatorWrapper{
-		ConcreteValidator: &cv,
-	}
-}
-
-func (cv *ConcreteValidator) Copy() *ConcreteValidator {
-	cvCopy := *cv
-	return &cvCopy
-}
-
-func (cv *ConcreteValidator) String() string {
-	if cv == nil {
-		return "Nil Validator"
-	}
-
-	bs, err := json.Marshal(cv)
-	if err != nil {
-		return "error serialising Validator"
-	}
-
-	return string(bs)
-}
diff --git a/acm/validator/ring.go b/acm/validator/ring.go
new file mode 100644
index 0000000000000000000000000000000000000000..bbc6fa56e2b73840249be5f9bf4fbe7dc28280cb
--- /dev/null
+++ b/acm/validator/ring.go
@@ -0,0 +1,265 @@
+package validator
+
+import (
+	"fmt"
+	"math/big"
+
+	"github.com/hyperledger/burrow/crypto"
+)
+
+type Ring struct {
+	// The validator power history stored in buckets as a ring buffer
+	// The changes committed at rotation i
+	delta []*Set
+	// The cumulative changes at rotation i - 1
+	cum []*Set
+	// Totals for each validator across all buckets
+	power *Set
+	// Current flow totals for each validator in the Head bucket
+	flow *Set
+	// Index of current head bucket
+	head int64
+	// Number of buckets
+	size int64
+}
+
+var big1 = big.NewInt(1)
+var big3 = big.NewInt(3)
+
+// Provides a sliding window over the last size buckets of validator power changes
+func NewRing(initialSet Iterable, windowSize int) *Ring {
+	if windowSize < 1 {
+		windowSize = 1
+	}
+	vw := &Ring{
+		delta: make([]*Set, windowSize),
+		cum:   make([]*Set, windowSize),
+		power: NewSet(),
+		flow:  NewSet(),
+		size:  int64(windowSize),
+	}
+	for i := 0; i < windowSize; i++ {
+		vw.delta[i] = NewSet()
+		// Important that this is trim set for accurate count
+		vw.cum[i] = NewTrimSet()
+	}
+	vw.cum[0] = Copy(initialSet)
+
+	return vw
+}
+
+// Implement Reader
+// Get power at index from the delta bucket then falling through to the cumulative
+func (vc *Ring) PowerAt(index int64, id crypto.Addressable) *big.Int {
+	power := vc.Head().MaybePower(id)
+	if power != nil {
+		return power
+	}
+	return vc.Cum().Power(id)
+}
+
+func (vc *Ring) Power(id crypto.Addressable) *big.Int {
+	return vc.PowerAt(vc.head, id)
+}
+
+// Return the resultant set at index of current cum plus delta
+func (vc *Ring) Resultant(index int64) *Set {
+	i := vc.index(index)
+	cum := CopyTrim(vc.cum[i])
+	vc.delta[i].Iterate(func(id crypto.Addressable, power *big.Int) (stop bool) {
+		cum.AlterPower(id, power)
+		return
+	})
+	return cum
+}
+
+func (vc *Ring) TotalPower() *big.Int {
+	return vc.Resultant(vc.head).totalPower
+}
+
+// Updates the current head bucket (accumulator) with some safety checks
+func (vc *Ring) AlterPower(id crypto.Addressable, power *big.Int) (*big.Int, error) {
+	if power.Sign() == -1 {
+		return nil, fmt.Errorf("cannot set negative validator power: %v", power)
+	}
+	if !power.IsInt64() {
+		return nil, fmt.Errorf("for tendermint compatibility validator power must fit within an Int64 bur %v "+
+			"does not", power)
+	}
+	// if flow > maxflow then we cannot alter the power
+	flow := vc.Flow(id, power)
+	maxFlow := vc.MaxFlow()
+	// Set flow for this id to update flow.totalPower (total flow) for comparison below, keep track of flow for each id
+	// so that we only count flow once for each id
+	vc.flow.ChangePower(id, flow)
+	// The totalPower of the Flow Set is the absolute value of all power changes made so far
+	if vc.flow.totalPower.Cmp(maxFlow) == 1 {
+		// Reset flow to previous value to undo update above
+		prevFlow := vc.Flow(id, vc.Head().Power(id))
+		vc.flow.ChangePower(id, prevFlow)
+		allowable := new(big.Int).Sub(maxFlow, vc.flow.totalPower)
+		return nil, fmt.Errorf("cannot change validator power of %v from %v to %v because that would result in a flow "+
+			"greater than or equal to 1/3 of total power for the next commit: flow induced by change: %v, "+
+			"current total flow: %v/%v (cumulative/max), remaining allowable flow: %v",
+			id.Address(), vc.Cum().Power(id), power, flow, vc.flow.totalPower, maxFlow, allowable)
+	}
+	// Add to total power
+	vc.Head().ChangePower(id, power)
+	return flow, nil
+}
+
+// Returns the flow that would be induced by a validator change by comparing the head accumulater with the current set
+func (vc *Ring) Flow(id crypto.Addressable, power *big.Int) *big.Int {
+	flow := new(big.Int)
+	return flow.Abs(flow.Sub(power, vc.Cum().Power(id)))
+}
+
+// To ensure that in the maximum valildator shift at least one unit
+// of validator power in the intersection of last block validators and this block validators must have at least one
+// non-byzantine validator who can tell you if you've been lied to about the validator set
+// So need at most ceiling((Total Power)/3) - 1, in integer division we have ceiling(X*p/q) = (p(X+1)-1)/q
+// For p = 1 just X/q
+// So we want (Total Power)/3 - 1
+func (vc *Ring) MaxFlow() *big.Int {
+	max := vc.Cum().TotalPower()
+	return max.Sub(max.Div(max, big3), big1)
+}
+
+// Advance the current head bucket to the next bucket and returns the change in total power between the previous bucket
+// and the current head, and the total flow which is the sum of absolute values of all changes each validator's power
+// after rotation the next head is a copy of the current head
+func (vc *Ring) Rotate() (totalPowerChange *big.Int, totalFlow *big.Int, err error) {
+	// Subtract the tail bucket (if any) from the total
+	err = Subtract(vc.power, vc.Next())
+	if err != nil {
+		return
+	}
+	// Add head delta to total power
+	err = Add(vc.power, vc.Head())
+	if err != nil {
+		return
+	}
+	// Copy current cumulative bucket
+	cum := CopyTrim(vc.Cum())
+	// Copy delta into what will be the next cumulative bucket
+	err = Alter(cum, vc.Head())
+	if err != nil {
+		return
+	}
+	// Advance the ring buffer
+	vc.head = vc.index(1)
+	// Overwrite new head bucket (previous tail) with a fresh delta accumulator
+	vc.delta[vc.head] = NewSet()
+	// Set the next cum
+	vc.cum[vc.head] = cum
+	// Capture flow before we wipe it
+	totalFlow = vc.flow.totalPower
+	// New flow accumulator
+	vc.flow = NewSet()
+	// Subtract the previous bucket total power so we can add on the current buckets power after this
+	totalPowerChange = new(big.Int).Sub(vc.Cum().TotalPower(), vc.cum[vc.index(-1)].TotalPower())
+	return
+}
+
+func (vc *Ring) CurrentSet() *Set {
+	return vc.cum[vc.head]
+}
+
+func (vc *Ring) PreviousSet() *Set {
+	return vc.cum[vc.index(-1)]
+}
+
+func (vc *Ring) Cum() *Set {
+	return vc.cum[vc.head]
+}
+
+// Get the current accumulator bucket
+func (vc *Ring) Head() *Set {
+	return vc.delta[vc.head]
+}
+
+func (vc *Ring) Next() *Set {
+	return vc.delta[vc.index(1)]
+}
+
+func (vc *Ring) index(i int64) int64 {
+	idx := (vc.size + vc.head + i) % vc.size
+	return idx
+}
+
+// Get the number of buckets in the ring (use Current().Count() to get the current number of validators)
+func (vc *Ring) Size() int64 {
+	return vc.size
+}
+
+// Returns buckets in order head, previous, ...
+func (vc *Ring) OrderedBuckets() (delta, cum []*Set) {
+	delta = make([]*Set, len(vc.delta))
+	cum = make([]*Set, len(vc.cum))
+	for i := int64(0); i < vc.size; i++ {
+		index := vc.index(-i)
+		delta[i] = vc.delta[index]
+		cum[i] = vc.cum[index]
+	}
+	return
+}
+
+func (vc *Ring) String() string {
+	delta, _ := vc.OrderedBuckets()
+	return fmt.Sprintf("ValidatorsWindow{Total: %v; Delta: Head->%v<-Tail}", vc.power, delta)
+}
+
+func (vc *Ring) Equal(vwOther *Ring) bool {
+	if vc.size != vwOther.size || vc.head != vwOther.head || len(vc.delta) != len(vwOther.delta) ||
+		!vc.flow.Equal(vwOther.flow) || !vc.power.Equal(vwOther.power) {
+		return false
+	}
+	for i := 0; i < len(vc.delta); i++ {
+		if !vc.delta[i].Equal(vwOther.delta[i]) || !vc.cum[i].Equal(vwOther.cum[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+type PersistedRing struct {
+	Delta [][]*Validator
+	Cum   [][]*Validator
+	Power []*Validator
+	Flow  []*Validator
+	Head  int64
+}
+
+func (vc *Ring) Persistable() PersistedRing {
+	delta := make([][]*Validator, len(vc.delta))
+	cum := make([][]*Validator, len(vc.cum))
+	for i := 0; i < len(delta); i++ {
+		delta[i] = vc.delta[i].Validators()
+		cum[i] = vc.cum[i].Validators()
+
+	}
+	return PersistedRing{
+		Delta: delta,
+		Cum:   cum,
+		Power: vc.power.Validators(),
+		Flow:  vc.flow.Validators(),
+		Head:  vc.head,
+	}
+}
+
+func UnpersistRing(pc PersistedRing) *Ring {
+	delta := make([]*Set, len(pc.Delta))
+	cum := make([]*Set, len(pc.Cum))
+	for i := 0; i < len(delta); i++ {
+		delta[i] = UnpersistSet(pc.Delta[i])
+		cum[i] = UnpersistSet(pc.Cum[i])
+	}
+	return &Ring{
+		delta: delta,
+		cum:   cum,
+		head:  pc.Head,
+		power: UnpersistSet(pc.Power),
+		flow:  UnpersistSet(pc.Flow),
+		size:  int64(len(delta)),
+	}
+}
diff --git a/acm/validator/ring_test.go b/acm/validator/ring_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..66d00db9d50c2da940e0bf06798deb4175461403
--- /dev/null
+++ b/acm/validator/ring_test.go
@@ -0,0 +1,125 @@
+package validator
+
+import (
+	"fmt"
+	"math/big"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+var pubA = pubKey(1)
+var pubB = pubKey(2)
+var pubC = pubKey(3)
+
+func TestValidatorsWindow_AlterPower(t *testing.T) {
+	vsBase := NewSet()
+	powAInitial := int64(10000)
+	vsBase.ChangePower(pubA, big.NewInt(powAInitial))
+
+	vs := Copy(vsBase)
+	vw := NewRing(vs, 3)
+
+	// Just allowable validator tide
+	var powA, powB, powC int64 = 7000, 23, 309
+	powerChange, totalFlow, err := alterPowers(t, vw, powA, powB, powC)
+	require.NoError(t, err)
+	assert.Equal(t, big.NewInt(powA+powB+powC-powAInitial), powerChange)
+	assert.Equal(t, big.NewInt(powAInitial/3-1), totalFlow)
+
+	// This one is not
+	vs = Copy(vsBase)
+	vw = NewRing(vs, 5)
+	powA, powB, powC = 7000, 23, 310
+	powerChange, totalFlow, err = alterPowers(t, vw, powA, powB, powC)
+	require.Error(t, err)
+
+	powA, powB, powC = 7000, 23, 309
+	powerChange, totalFlow, err = alterPowers(t, vw, powA, powB, powC)
+	require.NoError(t, err)
+	assert.Equal(t, big.NewInt(powA+powB+powC-powAInitial), powerChange)
+	assert.Equal(t, big.NewInt(powAInitial/3-1), totalFlow)
+
+	powA, powB, powC = 7000, 23, 309
+	powerChange, totalFlow, err = alterPowers(t, vw, powA, powB, powC)
+	require.NoError(t, err)
+	assertZero(t, powerChange)
+	assertZero(t, totalFlow)
+
+	_, err = vw.AlterPower(pubA, big.NewInt(8000))
+	assert.NoError(t, err)
+
+	// Should fail - not enough flow left
+	_, err = vw.AlterPower(pubB, big.NewInt(2000))
+	assert.Error(t, err)
+
+	// Take a bit off shouhd work
+	_, err = vw.AlterPower(pubA, big.NewInt(7000))
+	assert.NoError(t, err)
+
+	_, err = vw.AlterPower(pubB, big.NewInt(2000))
+	assert.NoError(t, err)
+	vw.Rotate()
+
+	powerChange, totalFlow, err = alterPowers(t, vw, powA, powB, powC)
+	require.NoError(t, err)
+	assert.Equal(t, big.NewInt(-1977), powerChange)
+	assert.Equal(t, big.NewInt(1977), totalFlow)
+
+	powerChange, totalFlow, err = alterPowers(t, vw, powA, powB, powC)
+	require.NoError(t, err)
+	assertZero(t, powerChange)
+	assert.Equal(t, big0, totalFlow)
+
+	powerChange, totalFlow, err = alterPowers(t, vw, powA, powB, powC)
+	require.NoError(t, err)
+	assertZero(t, powerChange)
+	assert.Equal(t, big0, totalFlow)
+}
+
+func TestValidatorsRing_Persistable(t *testing.T) {
+
+	vs := NewSet()
+	powAInitial := int64(10000)
+	vs.ChangePower(pubA, big.NewInt(powAInitial))
+	vw := NewRing(vs, 30)
+
+	for i := int64(0); i < 61; i++ {
+		_, _, err := alterPowers(t, vw, 10000, 200*i, 200*((i+1)%4))
+		require.NoError(t, err)
+	}
+
+	vwOut := UnpersistRing(vw.Persistable())
+	assert.True(t, vw.Equal(vwOut), "should re equal across persistence")
+}
+
+func alterPowers(t testing.TB, vw *Ring, powA, powB, powC int64) (powerChange, totalFlow *big.Int, err error) {
+	fmt.Println(vw)
+	_, err = vw.AlterPower(pubA, big.NewInt(powA))
+	if err != nil {
+		return nil, nil, err
+	}
+	_, err = vw.AlterPower(pubB, big.NewInt(powB))
+	if err != nil {
+		return nil, nil, err
+	}
+	_, err = vw.AlterPower(pubC, big.NewInt(powC))
+	if err != nil {
+		return nil, nil, err
+	}
+	maxFlow := vw.MaxFlow()
+	powerChange, totalFlow, err = vw.Rotate()
+	require.NoError(t, err)
+	// totalFlow > maxFlow
+	if totalFlow.Cmp(maxFlow) == 1 {
+		return powerChange, totalFlow, fmt.Errorf("totalFlow (%v) exceeds maxFlow (%v)", totalFlow, maxFlow)
+	}
+
+	return powerChange, totalFlow, nil
+}
+
+// Since we have -0 and 0 with big.Int due to its representation with a neg flag
+func assertZero(t testing.TB, i *big.Int) {
+	assert.True(t, big0.Cmp(i) == 0, "expected 0 but got %v", i)
+}
diff --git a/acm/validator/set.go b/acm/validator/set.go
new file mode 100644
index 0000000000000000000000000000000000000000..1ce7d0101e9e94fa57ecdfda6016884611774231
--- /dev/null
+++ b/acm/validator/set.go
@@ -0,0 +1,163 @@
+package validator
+
+import (
+	"fmt"
+	"math/big"
+	"sort"
+	"strings"
+
+	"github.com/hyperledger/burrow/crypto"
+)
+
+var big0 = big.NewInt(0)
+
+// A Validator multiset - can be used to capture the global state of validators or as an accumulator each block
+type Set struct {
+	powers     map[crypto.Address]*big.Int
+	publicKeys map[crypto.Address]crypto.Addressable
+	totalPower *big.Int
+	trim       bool
+}
+
+func newSet() *Set {
+	return &Set{
+		totalPower: new(big.Int),
+		powers:     make(map[crypto.Address]*big.Int),
+		publicKeys: make(map[crypto.Address]crypto.Addressable),
+	}
+}
+
+// Create a new Validators which can act as an accumulator for validator power changes
+func NewSet() *Set {
+	return newSet()
+}
+
+// Like Set but removes entries when power is set to 0 this make Count() == CountNonZero() and prevents a set from leaking
+// but does mean that a zero will not be iterated over when performing an update which is necessary in Ring
+func NewTrimSet() *Set {
+	s := newSet()
+	s.trim = true
+	return s
+}
+
+// Implements Writer, but will never error
+func (vs *Set) AlterPower(id crypto.Addressable, power *big.Int) (flow *big.Int, err error) {
+	return vs.ChangePower(id, power), nil
+}
+
+// Add the power of a validator and returns the flow into that validator
+func (vs *Set) ChangePower(id crypto.Addressable, power *big.Int) *big.Int {
+	address := id.Address()
+	// Calculcate flow into this validator (postive means in, negative means out)
+	flow := new(big.Int).Sub(power, vs.Power(id))
+	vs.totalPower.Add(vs.totalPower, flow)
+
+	if vs.trim && power.Sign() == 0 {
+		delete(vs.publicKeys, address)
+		delete(vs.powers, address)
+	} else {
+		vs.publicKeys[address] = crypto.MemoizeAddressable(id)
+		vs.powers[address] = new(big.Int).Set(power)
+	}
+	return flow
+}
+
+func (vs *Set) TotalPower() *big.Int {
+	return new(big.Int).Set(vs.totalPower)
+}
+
+// Returns the power of id but only if it is set
+func (vs *Set) MaybePower(id crypto.Addressable) *big.Int {
+	if vs.powers[id.Address()] == nil {
+		return nil
+	}
+	return new(big.Int).Set(vs.powers[id.Address()])
+}
+
+func (vs *Set) Power(id crypto.Addressable) *big.Int {
+	if vs.powers[id.Address()] == nil {
+		return new(big.Int)
+	}
+	return new(big.Int).Set(vs.powers[id.Address()])
+}
+
+func (vs *Set) Equal(vsOther *Set) bool {
+	if vs.Count() != vsOther.Count() {
+		return false
+	}
+	// Stop iteration IFF we find a non-matching validator
+	return !vs.Iterate(func(id crypto.Addressable, power *big.Int) (stop bool) {
+		otherPower := vsOther.Power(id)
+		if otherPower.Cmp(power) != 0 {
+			return true
+		}
+		return false
+	})
+}
+
+// Iterates over validators sorted by address
+func (vs *Set) Iterate(iter func(id crypto.Addressable, power *big.Int) (stop bool)) (stopped bool) {
+	if vs == nil {
+		return
+	}
+	addresses := make(crypto.Addresses, 0, len(vs.powers))
+	for address := range vs.powers {
+		addresses = append(addresses, address)
+	}
+	sort.Sort(addresses)
+	for _, address := range addresses {
+		if iter(vs.publicKeys[address], new(big.Int).Set(vs.powers[address])) {
+			return true
+		}
+	}
+	return
+}
+
+func (vs *Set) CountNonZero() int {
+	var count int
+	vs.Iterate(func(id crypto.Addressable, power *big.Int) (stop bool) {
+		if power.Sign() != 0 {
+			count++
+		}
+		return
+	})
+	return count
+}
+
+func (vs *Set) Count() int {
+	return len(vs.publicKeys)
+}
+
+func (vs *Set) Validators() []*Validator {
+	if vs == nil {
+		return nil
+	}
+	pvs := make([]*Validator, 0, vs.Count())
+	vs.Iterate(func(id crypto.Addressable, power *big.Int) (stop bool) {
+		pvs = append(pvs, &Validator{PublicKey: id.PublicKey(), Power: power.Uint64()})
+		return
+	})
+	return pvs
+}
+
+func UnpersistSet(pvs []*Validator) *Set {
+	vs := NewSet()
+	for _, pv := range pvs {
+		vs.ChangePower(pv.PublicKey, new(big.Int).SetUint64(pv.Power))
+	}
+	return vs
+}
+
+func (vs *Set) String() string {
+	return fmt.Sprintf("Validators{TotalPower: %v; Count: %v; %v}", vs.TotalPower(), vs.Count(),
+		vs.Strings())
+}
+
+func (vs *Set) Strings() string {
+	strs := make([]string, 0, vs.Count())
+	vs.Iterate(func(id crypto.Addressable, power *big.Int) (stop bool) {
+		strs = append(strs, fmt.Sprintf("%v->%v", id.Address(), power))
+		return
+	})
+	return strings.Join(strs, ", ")
+}
diff --git a/acm/validator/set_test.go b/acm/validator/set_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..ea0eeb635a9d998893132b68ac1b0a4d0496584e
--- /dev/null
+++ b/acm/validator/set_test.go
@@ -0,0 +1,25 @@
+package validator
+
+import (
+	"fmt"
+	"math/big"
+	"testing"
+
+	"github.com/hyperledger/burrow/acm"
+	"github.com/hyperledger/burrow/crypto"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestValidators_AlterPower(t *testing.T) {
+	vs := NewSet()
+	pow1 := big.NewInt(2312312321)
+	pubA := pubKey(1)
+	vs.ChangePower(pubA, pow1)
+	assert.Equal(t, pow1, vs.TotalPower())
+	vs.ChangePower(pubA, big.NewInt(0))
+	assertZero(t, vs.TotalPower())
+}
+
+func pubKey(secret interface{}) crypto.PublicKey {
+	return acm.NewConcreteAccountFromSecret(fmt.Sprintf("%v", secret)).PublicKey
+}
diff --git a/acm/validator/validator.go b/acm/validator/validator.go
new file mode 100644
index 0000000000000000000000000000000000000000..d2bdc174c6c4c237f6c55cd4bdd7247448162d8f
--- /dev/null
+++ b/acm/validator/validator.go
@@ -0,0 +1,27 @@
+package validator
+
+import (
+	"fmt"
+
+	"github.com/hyperledger/burrow/acm"
+)
+
+func (v Validator) String() string {
+	return fmt.Sprintf("Validator{Address: %v, PublicKey: %v, Power: %v}", v.Address, v.PublicKey, v.Power)
+}
+
+func (v Validator) FillAddress() {
+	if v.Address == nil {
+		address := v.PublicKey.Address()
+		v.Address = &address
+	}
+}
+
+func FromAccount(acc acm.Account, power uint64) Validator {
+	address := acc.Address()
+	return Validator{
+		Address:   &address,
+		PublicKey: acc.PublicKey(),
+		Power:     power,
+	}
+}
diff --git a/acm/validator/validator.pb.go b/acm/validator/validator.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..93c8b77d7b476877c6709de87971b03a0694bc96
--- /dev/null
+++ b/acm/validator/validator.pb.go
@@ -0,0 +1,405 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: validator.proto
+
+/*
+	Package validator is a generated protocol buffer package.
+
+	It is generated from these files:
+		validator.proto
+
+	It has these top-level messages:
+		Validator
+*/
+package validator
+
+import proto "github.com/gogo/protobuf/proto"
+import golang_proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+import _ "github.com/hyperledger/burrow/permission"
+import crypto "github.com/hyperledger/burrow/crypto"
+
+import github_com_hyperledger_burrow_crypto "github.com/hyperledger/burrow/crypto"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = golang_proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type Validator struct {
+	Address   *github_com_hyperledger_burrow_crypto.Address `protobuf:"bytes,1,opt,name=Address,proto3,customtype=github.com/hyperledger/burrow/crypto.Address" json:"Address,omitempty"`
+	PublicKey crypto.PublicKey                              `protobuf:"bytes,2,opt,name=PublicKey" json:"PublicKey"`
+	Power     uint64                                        `protobuf:"varint,3,opt,name=Power,proto3" json:"Power,omitempty"`
+}
+
+func (m *Validator) Reset()                    { *m = Validator{} }
+func (*Validator) ProtoMessage()               {}
+func (*Validator) Descriptor() ([]byte, []int) { return fileDescriptorValidator, []int{0} }
+
+func (m *Validator) GetPublicKey() crypto.PublicKey {
+	if m != nil {
+		return m.PublicKey
+	}
+	return crypto.PublicKey{}
+}
+
+func (m *Validator) GetPower() uint64 {
+	if m != nil {
+		return m.Power
+	}
+	return 0
+}
+
+func (*Validator) XXX_MessageName() string {
+	return "validator.Validator"
+}
+func init() {
+	proto.RegisterType((*Validator)(nil), "validator.Validator")
+	golang_proto.RegisterType((*Validator)(nil), "validator.Validator")
+}
+func (m *Validator) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Validator) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Address != nil {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintValidator(dAtA, i, uint64(m.Address.Size()))
+		n1, err := m.Address.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n1
+	}
+	dAtA[i] = 0x12
+	i++
+	i = encodeVarintValidator(dAtA, i, uint64(m.PublicKey.Size()))
+	n2, err := m.PublicKey.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n2
+	if m.Power != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintValidator(dAtA, i, uint64(m.Power))
+	}
+	return i, nil
+}
+
+func encodeVarintValidator(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *Validator) Size() (n int) {
+	var l int
+	_ = l
+	if m.Address != nil {
+		l = m.Address.Size()
+		n += 1 + l + sovValidator(uint64(l))
+	}
+	l = m.PublicKey.Size()
+	n += 1 + l + sovValidator(uint64(l))
+	if m.Power != 0 {
+		n += 1 + sovValidator(uint64(m.Power))
+	}
+	return n
+}
+
+func sovValidator(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozValidator(x uint64) (n int) {
+	return sovValidator(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Validator) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowValidator
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Validator: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Validator: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowValidator
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthValidator
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			var v github_com_hyperledger_burrow_crypto.Address
+			m.Address = &v
+			if err := m.Address.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PublicKey", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowValidator
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthValidator
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.PublicKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Power", wireType)
+			}
+			m.Power = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowValidator
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Power |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipValidator(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthValidator
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipValidator(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowValidator
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowValidator
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowValidator
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthValidator
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowValidator
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipValidator(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthValidator = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowValidator   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("validator.proto", fileDescriptorValidator) }
+func init() { golang_proto.RegisterFile("validator.proto", fileDescriptorValidator) }
+
+var fileDescriptorValidator = []byte{
+	// 255 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2f, 0x4b, 0xcc, 0xc9,
+	0x4c, 0x49, 0x2c, 0xc9, 0x2f, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x84, 0x0b, 0x48,
+	0xe9, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xa7, 0xe7,
+	0xeb, 0x83, 0x55, 0x24, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0x29, 0x25, 0x50,
+	0x90, 0x5a, 0x94, 0x9b, 0x59, 0x5c, 0x9c, 0x99, 0x9f, 0x07, 0x15, 0xe1, 0x49, 0x2e, 0xaa, 0x2c,
+	0x28, 0x81, 0xca, 0x2b, 0xad, 0x62, 0xe4, 0xe2, 0x0c, 0x83, 0x19, 0x2e, 0xe4, 0xc5, 0xc5, 0xee,
+	0x98, 0x92, 0x52, 0x94, 0x5a, 0x5c, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xe3, 0x64, 0x70, 0xeb,
+	0x9e, 0xbc, 0x0e, 0x92, 0x8d, 0x19, 0x95, 0x05, 0xa9, 0x45, 0x39, 0xa9, 0x29, 0xe9, 0xa9, 0x45,
+	0xfa, 0x49, 0xa5, 0x45, 0x45, 0xf9, 0xe5, 0xfa, 0x50, 0xe3, 0xa0, 0xfa, 0x82, 0x60, 0x06, 0x08,
+	0x99, 0x72, 0x71, 0x06, 0x94, 0x26, 0xe5, 0x64, 0x26, 0x7b, 0xa7, 0x56, 0x4a, 0x30, 0x29, 0x30,
+	0x6a, 0x70, 0x1b, 0x09, 0xea, 0x41, 0x15, 0xc3, 0x25, 0x9c, 0x58, 0x4e, 0xdc, 0x93, 0x67, 0x08,
+	0x42, 0xa8, 0x14, 0x12, 0xe1, 0x62, 0x0d, 0xc8, 0x2f, 0x4f, 0x2d, 0x92, 0x60, 0x56, 0x60, 0xd4,
+	0x60, 0x09, 0x82, 0x70, 0xac, 0x58, 0x66, 0x2c, 0x90, 0x67, 0x70, 0x72, 0x3c, 0xf1, 0x48, 0x8e,
+	0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x0f, 0x3c, 0x96, 0x63, 0x3c, 0xf1, 0x58,
+	0x8e, 0x31, 0x4a, 0x1b, 0xbf, 0xfb, 0x12, 0x93, 0x73, 0xf5, 0xe1, 0xc1, 0x97, 0xc4, 0x06, 0xf6,
+	0xb6, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x1c, 0xca, 0xd9, 0x6f, 0x63, 0x01, 0x00, 0x00,
+}
diff --git a/acm/validator/validators.go b/acm/validator/validators.go
new file mode 100644
index 0000000000000000000000000000000000000000..21313b7a7868ce8494278f02390d2419ba8666dd
--- /dev/null
+++ b/acm/validator/validators.go
@@ -0,0 +1,111 @@
+package validator
+
+import (
+	"math/big"
+
+	"sync"
+
+	"github.com/hyperledger/burrow/crypto"
+)
+
+type Writer interface {
+	AlterPower(id crypto.Addressable, power *big.Int) (flow *big.Int, err error)
+}
+
+type Reader interface {
+	Power(id crypto.Addressable) *big.Int
+}
+
+type Iterable interface {
+	Iterate(func(id crypto.Addressable, power *big.Int) (stop bool)) (stopped bool)
+}
+
+type IterableReader interface {
+	Reader
+	Iterable
+}
+
+type ReaderWriter interface {
+	Reader
+	Writer
+}
+
+type IterableReaderWriter interface {
+	ReaderWriter
+	Iterable
+}
+
+type WriterFunc func(id crypto.Addressable, power *big.Int) (flow *big.Int, err error)
+
+func SyncWriter(locker sync.Locker, writerFunc WriterFunc) WriterFunc {
+	return WriterFunc(func(id crypto.Addressable, power *big.Int) (flow *big.Int, err error) {
+		locker.Lock()
+		defer locker.Unlock()
+		return writerFunc(id, power)
+	})
+}
+
+func (wf WriterFunc) AlterPower(id crypto.Addressable, power *big.Int) (flow *big.Int, err error) {
+	return wf(id, power)
+}
+
+func AddPower(vs ReaderWriter, id crypto.Addressable, power *big.Int) error {
+	// Current power + power
+	_, err := vs.AlterPower(id, new(big.Int).Add(vs.Power(id), power))
+	return err
+}
+
+func SubtractPower(vs ReaderWriter, id crypto.Addressable, power *big.Int) error {
+	_, err := vs.AlterPower(id, new(big.Int).Sub(vs.Power(id), power))
+	return err
+}
+
+func Alter(vs Writer, vsOther Iterable) (err error) {
+	vsOther.Iterate(func(id crypto.Addressable, power *big.Int) (stop bool) {
+		_, err = vs.AlterPower(id, power)
+		if err != nil {
+			return true
+		}
+		return
+	})
+	return
+}
+
+// Adds vsOther to vs
+func Add(vs ReaderWriter, vsOther Iterable) (err error) {
+	vsOther.Iterate(func(id crypto.Addressable, power *big.Int) (stop bool) {
+		err = AddPower(vs, id, power)
+		if err != nil {
+			return true
+		}
+		return
+	})
+	return
+}
+
+// Subtracts vsOther from vs
+func Subtract(vs ReaderWriter, vsOther Iterable) (err error) {
+	vsOther.Iterate(func(id crypto.Addressable, power *big.Int) (stop bool) {
+		err = SubtractPower(vs, id, power)
+		if err != nil {
+			return true
+		}
+		return
+	})
+	return
+}
+
+func Copy(vs Iterable) *Set {
+	vsCopy := NewSet()
+	vs.Iterate(func(id crypto.Addressable, power *big.Int) (stop bool) {
+		vsCopy.ChangePower(id, power)
+		return
+	})
+	return vsCopy
+}
+
+func CopyTrim(vs Iterable) *Set {
+	s := Copy(vs)
+	s.trim = true
+	return s
+}
diff --git a/acm/validator_test.go b/acm/validator_test.go
deleted file mode 100644
index 5129908197baa3b9ea8193f8d279ac3d06c58d3d..0000000000000000000000000000000000000000
--- a/acm/validator_test.go
+++ /dev/null
@@ -1 +0,0 @@
-package acm
diff --git a/bcm/blockchain.go b/bcm/blockchain.go
new file mode 100644
index 0000000000000000000000000000000000000000..d12afd6faee4313aa4e6994a401a4f52008a404c
--- /dev/null
+++ b/bcm/blockchain.go
@@ -0,0 +1,279 @@
+// Copyright 2017 Monax Industries Limited
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bcm
+
+import (
+	"bytes"
+	"fmt"
+	"math/big"
+	"time"
+
+	"sync"
+
+	"github.com/hyperledger/burrow/acm/validator"
+	"github.com/hyperledger/burrow/genesis"
+	"github.com/hyperledger/burrow/logging"
+	"github.com/tendermint/go-amino"
+	dbm "github.com/tendermint/tendermint/libs/db"
+)
+
+// Blocks to average validator power over
+const DefaultValidatorsWindowSize = 10
+
+var stateKey = []byte("BlockchainState")
+
+type BlockchainInfo interface {
+	GenesisHash() []byte
+	GenesisDoc() genesis.GenesisDoc
+	ChainID() string
+	LastBlockHeight() uint64
+	LastBlockTime() time.Time
+	LastBlockHash() []byte
+	AppHashAfterLastBlock() []byte
+	Validators() validator.IterableReader
+	ValidatorsHistory() (currentSet *validator.Set, deltas []*validator.Set, height uint64)
+	NumValidators() int
+}
+
+type Blockchain struct {
+	sync.RWMutex
+	db                    dbm.DB
+	genesisHash           []byte
+	genesisDoc            genesis.GenesisDoc
+	chainID               string
+	lastBlockHeight       uint64
+	lastBlockTime         time.Time
+	lastBlockHash         []byte
+	appHashAfterLastBlock []byte
+	validatorCache        *validator.Ring
+	validatorCheckCache   *validator.Ring
+}
+
+var _ BlockchainInfo = &Blockchain{}
+
+type PersistedState struct {
+	AppHashAfterLastBlock []byte
+	LastBlockHeight       uint64
+	GenesisDoc            genesis.GenesisDoc
+	ValidatorSet          []validator.Validator
+	ValidatorCache        validator.PersistedRing
+}
+
+func LoadOrNewBlockchain(db dbm.DB, genesisDoc *genesis.GenesisDoc, logger *logging.Logger) (*Blockchain, error) {
+	logger = logger.WithScope("LoadOrNewBlockchain")
+	logger.InfoMsg("Trying to load blockchain state from database",
+		"database_key", stateKey)
+	bc, err := loadBlockchain(db)
+	if err != nil {
+		return nil, fmt.Errorf("error loading blockchain state from database: %v", err)
+	}
+	if bc != nil {
+		dbHash := bc.genesisDoc.Hash()
+		argHash := genesisDoc.Hash()
+		if !bytes.Equal(dbHash, argHash) {
+			return nil, fmt.Errorf("GenesisDoc passed to LoadOrNewBlockchain has hash: 0x%X, which does not "+
+				"match the one found in database: 0x%X, database genesis:\n%v\npassed genesis:\n%v\n",
+				argHash, dbHash, bc.genesisDoc.JSONString(), genesisDoc.JSONString())
+		}
+		return bc, nil
+	}
+
+	logger.InfoMsg("No existing blockchain state found in database, making new blockchain")
+	return newBlockchain(db, genesisDoc), nil
+}
+
+// Pointer to blockchain state initialised from genesis
+func newBlockchain(db dbm.DB, genesisDoc *genesis.GenesisDoc) *Blockchain {
+	vs := validator.NewTrimSet()
+	for _, gv := range genesisDoc.Validators {
+		vs.ChangePower(gv.PublicKey, new(big.Int).SetUint64(gv.Amount))
+	}
+	bc := &Blockchain{
+		db:                    db,
+		genesisHash:           genesisDoc.Hash(),
+		genesisDoc:            *genesisDoc,
+		chainID:               genesisDoc.ChainID(),
+		lastBlockTime:         genesisDoc.GenesisTime,
+		appHashAfterLastBlock: genesisDoc.Hash(),
+		validatorCache:        validator.NewRing(vs, DefaultValidatorsWindowSize),
+		validatorCheckCache:   validator.NewRing(vs, 1),
+	}
+	return bc
+}
+
+func loadBlockchain(db dbm.DB) (*Blockchain, error) {
+	buf := db.Get(stateKey)
+	if len(buf) == 0 {
+		return nil, nil
+	}
+	bc, err := DecodeBlockchain(buf)
+	if err != nil {
+		return nil, err
+	}
+	bc.db = db
+	return bc, nil
+}
+
+func (bc *Blockchain) ValidatorChecker() validator.Writer {
+	return validator.SyncWriter(bc, bc.validatorCheckCache.AlterPower)
+}
+
+func (bc *Blockchain) ValidatorWriter() validator.Writer {
+	return validator.SyncWriter(bc, bc.validatorCache.AlterPower)
+}
+
+func (bc *Blockchain) CommitBlock(blockTime time.Time,
+	blockHash, appHash []byte) (totalPowerChange, totalFlow *big.Int, err error) {
+	bc.Lock()
+	defer bc.Unlock()
+	// Checkpoint on the _previous_ block. If we die, this is where we will resume since we know it must have been
+	// committed since we are committing the next block. If we fall over we can resume a safe committed state and
+	// Tendermint will catch us up
+	err = bc.save()
+	if err != nil {
+		return
+	}
+	totalPowerChange, totalFlow, err = bc.validatorCache.Rotate()
+	if err != nil {
+		return
+	}
+	_, _, err = bc.validatorCheckCache.Rotate()
+	if err != nil {
+		return
+	}
+	bc.lastBlockHeight += 1
+	bc.lastBlockTime = blockTime
+	bc.lastBlockHash = blockHash
+	bc.appHashAfterLastBlock = appHash
+	return
+}
+
+func (bc *Blockchain) save() error {
+	if bc.db != nil {
+		encodedState, err := bc.Encode()
+		if err != nil {
+			return err
+		}
+		bc.db.SetSync(stateKey, encodedState)
+	}
+	return nil
+}
+
+var cdc = amino.NewCodec()
+
+func (bc *Blockchain) Encode() ([]byte, error) {
+	persistedState := &PersistedState{
+		GenesisDoc:            bc.genesisDoc,
+		AppHashAfterLastBlock: bc.appHashAfterLastBlock,
+		LastBlockHeight:       bc.lastBlockHeight,
+		ValidatorCache:        bc.validatorCache.Persistable(),
+	}
+	encodedState, err := cdc.MarshalBinary(persistedState)
+	if err != nil {
+		return nil, err
+	}
+	return encodedState, nil
+}
+
+func DecodeBlockchain(encodedState []byte) (*Blockchain, error) {
+	persistedState := new(PersistedState)
+	err := cdc.UnmarshalBinary(encodedState, persistedState)
+	if err != nil {
+		return nil, err
+	}
+	bc := newBlockchain(nil, &persistedState.GenesisDoc)
+	bc.lastBlockHeight = persistedState.LastBlockHeight
+	bc.appHashAfterLastBlock = persistedState.AppHashAfterLastBlock
+	bc.validatorCache = validator.UnpersistRing(persistedState.ValidatorCache)
+	bc.validatorCheckCache = validator.UnpersistRing(persistedState.ValidatorCache)
+	return bc, nil
+}
+
+func (bc *Blockchain) GenesisHash() []byte {
+	return bc.genesisHash
+}
+
+func (bc *Blockchain) GenesisDoc() genesis.GenesisDoc {
+	return bc.genesisDoc
+}
+
+func (bc *Blockchain) ChainID() string {
+	return bc.chainID
+}
+
+func (bc *Blockchain) LastBlockHeight() uint64 {
+	bc.RLock()
+	defer bc.RUnlock()
+	return bc.lastBlockHeight
+}
+
+func (bc *Blockchain) LastBlockTime() time.Time {
+	bc.RLock()
+	defer bc.RUnlock()
+	return bc.lastBlockTime
+}
+
+func (bc *Blockchain) LastBlockHash() []byte {
+	bc.RLock()
+	defer bc.RUnlock()
+	return bc.lastBlockHash
+}
+
+func (bc *Blockchain) AppHashAfterLastBlock() []byte {
+	bc.RLock()
+	defer bc.RUnlock()
+	return bc.appHashAfterLastBlock
+}
+
+func (bc *Blockchain) PendingValidators() validator.IterableReader {
+	bc.RLock()
+	defer bc.RUnlock()
+	return bc.validatorCache.Head()
+}
+
+func (bc *Blockchain) Validators() validator.IterableReader {
+	bc.RLock()
+	defer bc.RUnlock()
+	return bc.validatorCache.CurrentSet()
+}
+
+func (bc *Blockchain) ValidatorsHistory() (*validator.Set, []*validator.Set, uint64) {
+	bc.RLock()
+	defer bc.RUnlock()
+	delta, _ := bc.validatorCache.OrderedBuckets()
+	deltas := make([]*validator.Set, len(delta))
+	for i, d := range delta {
+		deltas[i] = validator.Copy(d)
+	}
+	return validator.CopyTrim(bc.validatorCache.CurrentSet()), deltas, bc.lastBlockHeight
+}
+
+func (bc *Blockchain) CurrentValidators() *validator.Set {
+	bc.RLock()
+	defer bc.RUnlock()
+	return bc.validatorCache.CurrentSet()
+}
+
+func (bc *Blockchain) PreviousValidators() *validator.Set {
+	bc.RLock()
+	defer bc.RUnlock()
+	return bc.validatorCache.PreviousSet()
+}
+
+func (bc *Blockchain) NumValidators() int {
+	bc.RLock()
+	defer bc.RUnlock()
+	return bc.validatorCache.CurrentSet().Count()
+}
diff --git a/bcm/blockchain_test.go b/bcm/blockchain_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..c5f5bc62761a1d4cbd06b1613849dad90682dc16
--- /dev/null
+++ b/bcm/blockchain_test.go
@@ -0,0 +1,53 @@
+package bcm
+
+import (
+	"fmt"
+	"math/big"
+	"testing"
+	"time"
+
+	"github.com/hyperledger/burrow/genesis"
+	"github.com/hyperledger/burrow/logging/logconfig"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+	"github.com/tendermint/tendermint/libs/db"
+)
+
+var big0 = big.NewInt(0)
+
+func TestBlockchain_Encode(t *testing.T) {
+	genesisDoc, _, validators := genesis.NewDeterministicGenesis(234).
+		GenesisDoc(5, true, 232, 3, true, 34)
+	bc := newBlockchain(db.NewMemDB(), genesisDoc)
+	bs, err := bc.Encode()
+	require.NoError(t, err)
+	bcOut, err := DecodeBlockchain(bs)
+	require.True(t, bc.validatorCache.Equal(bcOut.validatorCache))
+	require.Equal(t, bc.genesisDoc.GenesisTime, bcOut.genesisDoc.GenesisTime)
+	assert.Equal(t, logconfig.JSONString(bc.genesisDoc), logconfig.JSONString(bcOut.genesisDoc))
+	require.Equal(t, bc.genesisDoc.Hash(), bcOut.genesisDoc.Hash())
+	power := new(big.Int).SetUint64(genesisDoc.Validators[1].Amount)
+	id1 := validators[1].PublicKey()
+	var flow *big.Int
+	for i := 0; i < 100; i++ {
+		power := power.Div(power, big.NewInt(2))
+		flow, err = bc.ValidatorWriter().AlterPower(id1, power)
+		fmt.Println(flow)
+		require.NoError(t, err)
+		_, _, err = bc.CommitBlock(time.Now(), []byte("blockhash"), []byte("apphash"))
+		require.NoError(t, err)
+		bs, err = bc.Encode()
+		require.NoError(t, err)
+		bcOut, err = DecodeBlockchain(bs)
+		require.True(t, bc.validatorCache.Equal(bcOut.validatorCache))
+	}
+
+	// Should have exponentially decayed to 0
+	assertZero(t, flow)
+	assertZero(t, bc.validatorCache.Power(id1))
+}
+
+// Since we have -0 and 0 with big.Int due to its representation with a neg flag
+func assertZero(t testing.TB, i *big.Int) {
+	assert.True(t, big0.Cmp(i) == 0, "expected 0 but got %v", i)
+}
diff --git a/blockchain/blockchain.go b/blockchain/blockchain.go
deleted file mode 100644
index 2d1f3553f7bd3568897f5b578622ec71e30cca32..0000000000000000000000000000000000000000
--- a/blockchain/blockchain.go
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright 2017 Monax Industries Limited
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//    http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package blockchain
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"time"
-
-	"sync"
-
-	"github.com/hyperledger/burrow/crypto"
-	"github.com/hyperledger/burrow/genesis"
-	"github.com/hyperledger/burrow/logging"
-	dbm "github.com/tendermint/tmlibs/db"
-)
-
-// Blocks to average validator power over
-const DefaultValidatorsWindowSize = 10
-
-var stateKey = []byte("BlockchainState")
-
-type TipInfo interface {
-	ChainID() string
-	LastBlockHeight() uint64
-	LastBlockTime() time.Time
-	LastBlockHash() []byte
-	AppHashAfterLastBlock() []byte
-	IterateValidators(iter func(publicKey crypto.PublicKey, power uint64) (stop bool)) (stopped bool)
-	NumValidators() int
-}
-
-type BlockchainInfo interface {
-	TipInfo
-	GenesisHash() []byte
-	GenesisDoc() genesis.GenesisDoc
-}
-
-type Root struct {
-	genesisHash []byte
-	genesisDoc  genesis.GenesisDoc
-}
-
-type Tip struct {
-	chainID               string
-	lastBlockHeight       uint64
-	lastBlockTime         time.Time
-	lastBlockHash         []byte
-	appHashAfterLastBlock []byte
-	validators            Validators
-	validatorsWindow      ValidatorsWindow
-}
-
-type Blockchain struct {
-	*Root
-	*Tip
-	sync.RWMutex
-	db dbm.DB
-}
-
-type PersistedState struct {
-	AppHashAfterLastBlock []byte
-	LastBlockHeight       uint64
-	GenesisDoc            genesis.GenesisDoc
-}
-
-func LoadOrNewBlockchain(db dbm.DB, genesisDoc *genesis.GenesisDoc,
-	logger *logging.Logger) (*Blockchain, error) {
-
-	logger = logger.WithScope("LoadOrNewBlockchain")
-	logger.InfoMsg("Trying to load blockchain state from database",
-		"database_key", stateKey)
-	bc, err := loadBlockchain(db)
-	if err != nil {
-		return nil, fmt.Errorf("error loading blockchain state from database: %v", err)
-	}
-	if bc != nil {
-		dbHash := bc.genesisDoc.Hash()
-		argHash := genesisDoc.Hash()
-		if !bytes.Equal(dbHash, argHash) {
-			return nil, fmt.Errorf("GenesisDoc passed to LoadOrNewBlockchain has hash: 0x%X, which does not "+
-				"match the one found in database: 0x%X", argHash, dbHash)
-		}
-		return bc, nil
-	}
-
-	logger.InfoMsg("No existing blockchain state found in database, making new blockchain")
-	return newBlockchain(db, genesisDoc), nil
-}
-
-// Pointer to blockchain state initialised from genesis
-func newBlockchain(db dbm.DB, genesisDoc *genesis.GenesisDoc) *Blockchain {
-	bc := &Blockchain{
-		db:   db,
-		Root: NewRoot(genesisDoc),
-		Tip:  NewTip(genesisDoc.ChainID(), NewRoot(genesisDoc).genesisDoc.GenesisTime, NewRoot(genesisDoc).genesisHash),
-	}
-	for _, gv := range genesisDoc.Validators {
-		bc.validators.AlterPower(gv.PublicKey, gv.Amount)
-	}
-	return bc
-}
-
-func loadBlockchain(db dbm.DB) (*Blockchain, error) {
-	buf := db.Get(stateKey)
-	if len(buf) == 0 {
-		return nil, nil
-	}
-	persistedState, err := Decode(buf)
-	if err != nil {
-		return nil, err
-	}
-	bc := newBlockchain(db, &persistedState.GenesisDoc)
-	bc.lastBlockHeight = persistedState.LastBlockHeight
-	bc.appHashAfterLastBlock = persistedState.AppHashAfterLastBlock
-	return bc, nil
-}
-
-func NewRoot(genesisDoc *genesis.GenesisDoc) *Root {
-	return &Root{
-		genesisHash: genesisDoc.Hash(),
-		genesisDoc:  *genesisDoc,
-	}
-}
-
-// Create genesis Tip
-func NewTip(chainID string, genesisTime time.Time, genesisHash []byte) *Tip {
-	return &Tip{
-		chainID:               chainID,
-		lastBlockTime:         genesisTime,
-		appHashAfterLastBlock: genesisHash,
-		validators:            NewValidators(),
-		validatorsWindow:      NewValidatorsWindow(DefaultValidatorsWindowSize),
-	}
-}
-
-func (bc *Blockchain) CommitBlock(blockTime time.Time, blockHash, appHash []byte) error {
-	bc.Lock()
-	defer bc.Unlock()
-	// Checkpoint on the _previous_ block. If we die, this is where we will resume since we know it must have been
-	// committed since we are committing the next block. If we fall over we can resume a safe committed state and
-	// Tendermint will catch us up
-	err := bc.save()
-	if err != nil {
-		return err
-	}
-	bc.lastBlockHeight += 1
-	bc.lastBlockTime = blockTime
-	bc.lastBlockHash = blockHash
-	bc.appHashAfterLastBlock = appHash
-	return nil
-}
-
-func (bc *Blockchain) save() error {
-	if bc.db != nil {
-		encodedState, err := bc.Encode()
-		if err != nil {
-			return err
-		}
-		bc.db.SetSync(stateKey, encodedState)
-	}
-	return nil
-}
-
-func (bc *Blockchain) Encode() ([]byte, error) {
-	persistedState := &PersistedState{
-		GenesisDoc:            bc.genesisDoc,
-		AppHashAfterLastBlock: bc.appHashAfterLastBlock,
-		LastBlockHeight:       bc.lastBlockHeight,
-	}
-	encodedState, err := json.Marshal(persistedState)
-	if err != nil {
-		return nil, err
-	}
-	return encodedState, nil
-}
-
-func Decode(encodedState []byte) (*PersistedState, error) {
-	persistedState := new(PersistedState)
-	err := json.Unmarshal(encodedState, persistedState)
-	if err != nil {
-		return nil, err
-	}
-	return persistedState, nil
-}
-
-func (r *Root) GenesisHash() []byte {
-	return r.genesisHash
-}
-
-func (r *Root) GenesisDoc() genesis.GenesisDoc {
-	return r.genesisDoc
-}
-
-func (t *Tip) ChainID() string {
-	return t.chainID
-}
-
-func (t *Tip) LastBlockHeight() uint64 {
-	return t.lastBlockHeight
-}
-
-func (t *Tip) LastBlockTime() time.Time {
-	return t.lastBlockTime
-}
-
-func (t *Tip) LastBlockHash() []byte {
-	return t.lastBlockHash
-}
-
-func (t *Tip) AppHashAfterLastBlock() []byte {
-	return t.appHashAfterLastBlock
-}
-
-func (t *Tip) IterateValidators(iter func(publicKey crypto.PublicKey, power uint64) (stop bool)) (stopped bool) {
-	return t.validators.Iterate(iter)
-}
-
-func (t *Tip) NumValidators() int {
-	return t.validators.Length()
-}
diff --git a/blockchain/validators.go b/blockchain/validators.go
deleted file mode 100644
index 0ba398af0cd0d2103664d84158223865134978bd..0000000000000000000000000000000000000000
--- a/blockchain/validators.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package blockchain
-
-import (
-	"fmt"
-
-	"sort"
-
-	"bytes"
-	"encoding/binary"
-
-	burrowBinary "github.com/hyperledger/burrow/binary"
-	"github.com/hyperledger/burrow/crypto"
-)
-
-// A Validator multiset
-type Validators struct {
-	power      map[crypto.Address]uint64
-	publicKey  map[crypto.Address]crypto.PublicKey
-	totalPower uint64
-}
-
-func NewValidators() Validators {
-	return Validators{
-		power:     make(map[crypto.Address]uint64),
-		publicKey: make(map[crypto.Address]crypto.PublicKey),
-	}
-}
-
-// Add the power of a validator
-func (vs *Validators) AlterPower(publicKey crypto.PublicKey, power uint64) error {
-	address := publicKey.Address()
-	// Remove existing power (possibly 0) from total
-	vs.totalPower -= vs.power[address]
-	if burrowBinary.IsUint64SumOverflow(vs.totalPower, power) {
-		// Undo removing existing power
-		vs.totalPower += vs.power[address]
-		return fmt.Errorf("could not increase total validator power by %v from %v since that would overflow "+
-			"uint64", power, vs.totalPower)
-	}
-	vs.publicKey[address] = publicKey
-	vs.power[address] = power
-	// Note we are adjusting by the difference in power (+/-) since we subtracted the previous amount above
-	vs.totalPower += power
-	return nil
-}
-
-func (vs *Validators) AddPower(publicKey crypto.PublicKey, power uint64) error {
-	currentPower := vs.power[publicKey.Address()]
-	if burrowBinary.IsUint64SumOverflow(currentPower, power) {
-		return fmt.Errorf("could add power %v to validator %v with power %v because that would overflow uint64",
-			power, publicKey.Address(), currentPower)
-	}
-	return vs.AlterPower(publicKey, vs.power[publicKey.Address()]+power)
-}
-
-func (vs *Validators) SubtractPower(publicKey crypto.PublicKey, power uint64) error {
-	currentPower := vs.power[publicKey.Address()]
-	if currentPower < power {
-		return fmt.Errorf("could subtract power %v from validator %v with power %v because that would "+
-			"underflow uint64", power, publicKey.Address(), currentPower)
-	}
-	return vs.AlterPower(publicKey, vs.power[publicKey.Address()]-power)
-}
-
-// Iterates over validators sorted by address
-func (vs *Validators) Iterate(iter func(publicKey crypto.PublicKey, power uint64) (stop bool)) (stopped bool) {
-	addresses := make(crypto.Addresses, 0, len(vs.power))
-	for address := range vs.power {
-		addresses = append(addresses, address)
-	}
-	sort.Sort(addresses)
-	for _, address := range addresses {
-		if iter(vs.publicKey[address], vs.power[address]) {
-			return true
-		}
-	}
-	return false
-}
-
-func (vs *Validators) Length() int {
-	return len(vs.power)
-}
-
-func (vs *Validators) TotalPower() uint64 {
-	return vs.totalPower
-}
-
-// Uses the fixed width public key encoding to
-func (vs *Validators) Encode() []byte {
-	buffer := new(bytes.Buffer)
-	// varint buffer
-	buf := make([]byte, 8)
-	vs.Iterate(func(publicKey crypto.PublicKey, power uint64) (stop bool) {
-		buffer.Write(publicKey.Encode())
-		buffer.Write(buf[:binary.PutUvarint(buf, power)])
-		return
-	})
-	return buffer.Bytes()
-}
-
-// Decodes validators encoded with Encode - expects the exact encoded size with no trailing bytes
-func DecodeValidators(encoded []byte, validators *Validators) error {
-	publicKey := new(crypto.PublicKey)
-	i := 0
-	for i < len(encoded) {
-		n, err := crypto.DecodePublicKeyFixedWidth(encoded[i:], publicKey)
-		if err != nil {
-			return err
-		}
-		i += n
-		power, n := binary.Uvarint(encoded[i:])
-		if n <= 0 {
-			return fmt.Errorf("error decoding uint64 from validators binary encoding")
-		}
-		i += n
-		err = validators.AlterPower(*publicKey, power)
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
diff --git a/blockchain/validators_test.go b/blockchain/validators_test.go
deleted file mode 100644
index 433db89eb006de0cd176a6851f9e7460e6435168..0000000000000000000000000000000000000000
--- a/blockchain/validators_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package blockchain
-
-import (
-	"testing"
-
-	"fmt"
-
-	"math/rand"
-
-	"github.com/hyperledger/burrow/acm"
-	"github.com/hyperledger/burrow/crypto"
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
-)
-
-func TestValidators_AlterPower(t *testing.T) {
-	vs := NewValidators()
-	pow1 := uint64(2312312321)
-	assert.NoError(t, vs.AlterPower(pubKey(1), pow1))
-	assert.Equal(t, pow1, vs.TotalPower())
-}
-
-func TestValidators_Encode(t *testing.T) {
-	vs := NewValidators()
-	rnd := rand.New(rand.NewSource(43534543))
-	for i := 0; i < 100; i++ {
-		power := uint64(rnd.Intn(10))
-		require.NoError(t, vs.AlterPower(pubKey(rnd.Int63()), power))
-	}
-	encoded := vs.Encode()
-	vsOut := NewValidators()
-	require.NoError(t, DecodeValidators(encoded, &vsOut))
-	// Check decoded matches encoded
-	var publicKeyPower []interface{}
-	vs.Iterate(func(publicKey crypto.PublicKey, power uint64) (stop bool) {
-		publicKeyPower = append(publicKeyPower, publicKey, power)
-		return
-	})
-	vsOut.Iterate(func(publicKey crypto.PublicKey, power uint64) (stop bool) {
-		assert.Equal(t, publicKeyPower[0], publicKey)
-		assert.Equal(t, publicKeyPower[1], power)
-		publicKeyPower = publicKeyPower[2:]
-		return
-	})
-	assert.Len(t, publicKeyPower, 0, "should exhaust all validators in decoded multiset")
-}
-
-func pubKey(secret interface{}) crypto.PublicKey {
-	return acm.NewConcreteAccountFromSecret(fmt.Sprintf("%v", secret)).PublicKey
-}
diff --git a/blockchain/validators_window.go b/blockchain/validators_window.go
deleted file mode 100644
index 11ebf209b36e441b3826785725abdd7975e485fa..0000000000000000000000000000000000000000
--- a/blockchain/validators_window.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package blockchain
-
-import (
-	"github.com/hyperledger/burrow/crypto"
-)
-
-type ValidatorsWindow struct {
-	Buckets []Validators
-	Total   Validators
-	head    int
-}
-
-// Provides a sliding window over the last size buckets of validator power changes
-func NewValidatorsWindow(size int) ValidatorsWindow {
-	if size < 1 {
-		size = 1
-	}
-	vw := ValidatorsWindow{
-		Buckets: make([]Validators, size),
-		Total:   NewValidators(),
-	}
-	vw.Buckets[vw.head] = NewValidators()
-	return vw
-}
-
-// Updates the current head bucket (accumulator)
-func (vw *ValidatorsWindow) AlterPower(publicKey crypto.PublicKey, power uint64) error {
-	return vw.Buckets[vw.head].AlterPower(publicKey, power)
-}
-
-func (vw *ValidatorsWindow) CommitInto(validatorsToUpdate *Validators) error {
-	var err error
-	if vw.Buckets[vw.head].Iterate(func(publicKey crypto.PublicKey, power uint64) (stop bool) {
-		// Update the sink validators
-		err = validatorsToUpdate.AlterPower(publicKey, power)
-		if err != nil {
-			return true
-		}
-		// Add to total power
-		err = vw.Total.AddPower(publicKey, power)
-		if err != nil {
-			return true
-		}
-		return false
-	}) {
-		// If iteration stopped there was an error
-		return err
-	}
-	// move the ring buffer on
-	vw.head = (vw.head + 1) % len(vw.Buckets)
-	// Subtract the tail bucket (if any) from the total
-	if vw.Buckets[vw.head].Iterate(func(publicKey crypto.PublicKey, power uint64) (stop bool) {
-		err = vw.Total.SubtractPower(publicKey, power)
-		if err != nil {
-			return false
-		}
-		return true
-	}) {
-		return err
-	}
-	// Clear new head bucket (and possibly previous tail)
-	vw.Buckets[vw.head] = NewValidators()
-	return nil
-}
diff --git a/cmd/burrow/commands/configure.go b/cmd/burrow/commands/configure.go
index c64d4c72a2cb953bf901e489241060d803f1250b..d0c149b26e28b5262e1eccf97a296316ebc24e11 100644
--- a/cmd/burrow/commands/configure.go
+++ b/cmd/burrow/commands/configure.go
@@ -15,11 +15,11 @@ import (
 	"github.com/hyperledger/burrow/genesis/spec"
 	"github.com/hyperledger/burrow/keys"
 	"github.com/hyperledger/burrow/logging"
-	logging_config "github.com/hyperledger/burrow/logging/config"
-	"github.com/hyperledger/burrow/logging/config/presets"
+	logging_config "github.com/hyperledger/burrow/logging/logconfig"
+	"github.com/hyperledger/burrow/logging/logconfig/presets"
 	cli "github.com/jawher/mow.cli"
 	amino "github.com/tendermint/go-amino"
-	tm_crypto "github.com/tendermint/go-crypto"
+	tm_crypto "github.com/tendermint/tendermint/crypto"
 	"github.com/tendermint/tendermint/p2p"
 )
 
@@ -155,7 +155,7 @@ func Configure(output Output) func(cmd *cli.Cmd) {
 
 						if nodeKey {
 							privKey := tm_crypto.GenPrivKeyEd25519()
-							copy(privKey[:], key.PrivateKey.PrivateKey)
+							copy(privKey[:], key.PrivateKey.Key)
 							nodeKey := &p2p.NodeKey{
 								PrivKey: privKey,
 							}
diff --git a/cmd/burrow/commands/dump.go b/cmd/burrow/commands/dump.go
index 1e4ef041254bd5a09095ca017a5c1a8fc7c7f898..46a4aaf83c5d4aabe5c033bf631b3e8a980c24f6 100644
--- a/cmd/burrow/commands/dump.go
+++ b/cmd/burrow/commands/dump.go
@@ -6,7 +6,7 @@ import (
 	"github.com/hyperledger/burrow/forensics"
 	"github.com/hyperledger/burrow/txs"
 	cli "github.com/jawher/mow.cli"
-	"github.com/tendermint/tmlibs/db"
+	"github.com/tendermint/tendermint/libs/db"
 )
 
 func Dump(output Output) func(cmd *cli.Cmd) {
diff --git a/cmd/burrow/commands/helpers.go b/cmd/burrow/commands/helpers.go
index ee8807e085a957b469568307b7a239ef44fe754d..2ac4c7415a441bbea5103fc974dacf5912ad6e07 100644
--- a/cmd/burrow/commands/helpers.go
+++ b/cmd/burrow/commands/helpers.go
@@ -9,7 +9,7 @@ import (
 	"github.com/hyperledger/burrow/config"
 	"github.com/hyperledger/burrow/config/source"
 	"github.com/hyperledger/burrow/genesis"
-	logging_config "github.com/hyperledger/burrow/logging/config"
+	logging_config "github.com/hyperledger/burrow/logging/logconfig"
 )
 
 type Output interface {
diff --git a/cmd/burrow/commands/tx.go b/cmd/burrow/commands/tx.go
new file mode 100644
index 0000000000000000000000000000000000000000..cdff10da75a9b02f8657b3b60631599137203efe
--- /dev/null
+++ b/cmd/burrow/commands/tx.go
@@ -0,0 +1 @@
+package commands
diff --git a/config/config.go b/config/config.go
index c22ddbb1194d883ff262431a62c7fa4663e1620e..8e3f7b8e71ef5c0630809ca72a7312b9e607bc11 100644
--- a/config/config.go
+++ b/config/config.go
@@ -1,20 +1,18 @@
 package config
 
 import (
-	"fmt"
-
 	"context"
+	"fmt"
 
 	"github.com/hyperledger/burrow/config/source"
 	"github.com/hyperledger/burrow/consensus/tendermint"
-	"github.com/hyperledger/burrow/consensus/tendermint/validator"
 	"github.com/hyperledger/burrow/core"
 	"github.com/hyperledger/burrow/crypto"
 	"github.com/hyperledger/burrow/execution"
 	"github.com/hyperledger/burrow/genesis"
 	"github.com/hyperledger/burrow/keys"
-	logging_config "github.com/hyperledger/burrow/logging/config"
 	"github.com/hyperledger/burrow/logging/lifecycle"
+	logging_config "github.com/hyperledger/burrow/logging/logconfig"
 	"github.com/hyperledger/burrow/rpc"
 )
 
@@ -76,7 +74,7 @@ func (conf *BurrowConfig) Kernel(ctx context.Context) (*core.Kernel, error) {
 	if err != nil {
 		return nil, err
 	}
-	privValidator := validator.NewPrivValidatorMemory(val, signer)
+	privValidator := tendermint.NewPrivValidatorMemory(val, signer)
 
 	var exeOptions []execution.ExecutionOption
 	if conf.Execution != nil {
@@ -86,8 +84,8 @@ func (conf *BurrowConfig) Kernel(ctx context.Context) (*core.Kernel, error) {
 		}
 	}
 
-	return core.NewKernel(ctx, keyClient, privValidator, conf.GenesisDoc, conf.Tendermint.TendermintConfig(), conf.RPC, conf.Keys,
-		keyStore, exeOptions, logger)
+	return core.NewKernel(ctx, keyClient, privValidator, conf.GenesisDoc, conf.Tendermint.TendermintConfig(), conf.RPC,
+		conf.Keys, keyStore, exeOptions, logger)
 }
 
 func (conf *BurrowConfig) JSONString() string {
diff --git a/consensus/tendermint/abci/app.go b/consensus/tendermint/abci/app.go
index 6c5247481b86a1e59fd3a7d71ee61efb66d0a9dd..cbacbc94ed70f77d13623b312376535d7173b201 100644
--- a/consensus/tendermint/abci/app.go
+++ b/consensus/tendermint/abci/app.go
@@ -2,26 +2,29 @@ package abci
 
 import (
 	"fmt"
+	"math/big"
 	"sync"
 	"time"
 
 	"runtime/debug"
 
-	bcm "github.com/hyperledger/burrow/blockchain"
+	"github.com/hyperledger/burrow/acm/validator"
+	"github.com/hyperledger/burrow/bcm"
 	"github.com/hyperledger/burrow/consensus/tendermint/codes"
 	"github.com/hyperledger/burrow/crypto"
 	"github.com/hyperledger/burrow/execution"
+	errors2 "github.com/hyperledger/burrow/execution/errors"
 	"github.com/hyperledger/burrow/logging"
 	"github.com/hyperledger/burrow/logging/structure"
 	"github.com/hyperledger/burrow/project"
 	"github.com/hyperledger/burrow/txs"
 	"github.com/pkg/errors"
-	abciTypes "github.com/tendermint/abci/types"
+	abciTypes "github.com/tendermint/tendermint/abci/types"
 )
 
-const responseInfoName = "Burrow"
-
 type App struct {
+	// Node information to return in Info
+	nodeInfo string
 	// State
 	blockchain    *bcm.Blockchain
 	checker       execution.BatchExecutor
@@ -39,16 +42,18 @@ type App struct {
 
 var _ abciTypes.Application = &App{}
 
-func NewApp(blockchain *bcm.Blockchain, checker execution.BatchExecutor, committer execution.BatchCommitter,
+func NewApp(nodeInfo string, blockchain *bcm.Blockchain, checker execution.BatchExecutor, committer execution.BatchCommitter,
 	txDecoder txs.Decoder, panicFunc func(error), logger *logging.Logger) *App {
 	return &App{
+		nodeInfo:   nodeInfo,
 		blockchain: blockchain,
 		checker:    checker,
 		committer:  committer,
 		checkTx:    txExecutor(checker, txDecoder, logger.WithScope("CheckTx")),
 		deliverTx:  txExecutor(committer, txDecoder, logger.WithScope("DeliverTx")),
 		panicFunc:  panicFunc,
-		logger:     logger.WithScope("abci.NewApp").With(structure.ComponentKey, "ABCI_App"),
+		logger: logger.WithScope("abci.NewApp").With(structure.ComponentKey, "ABCI_App",
+			"node_info", nodeInfo),
 	}
 }
 
@@ -60,12 +65,11 @@ func (app *App) SetMempoolLocker(mempoolLocker sync.Locker) {
 }
 
 func (app *App) Info(info abciTypes.RequestInfo) abciTypes.ResponseInfo {
-	tip := app.blockchain.Tip
 	return abciTypes.ResponseInfo{
-		Data:             responseInfoName,
+		Data:             app.nodeInfo,
 		Version:          project.History.CurrentVersion().String(),
-		LastBlockHeight:  int64(tip.LastBlockHeight()),
-		LastBlockAppHash: tip.AppHashAfterLastBlock(),
+		LastBlockHeight:  int64(app.blockchain.LastBlockHeight()),
+		LastBlockAppHash: app.blockchain.AppHashAfterLastBlock(),
 	}
 }
 
@@ -82,18 +86,68 @@ func (app *App) Query(reqQuery abciTypes.RequestQuery) (respQuery abciTypes.Resp
 }
 
 func (app *App) InitChain(chain abciTypes.RequestInitChain) (respInitChain abciTypes.ResponseInitChain) {
-	// Could verify agreement on initial validator set here
+	defer func() {
+		if r := recover(); r != nil {
+			app.panicFunc(fmt.Errorf("panic occurred in abci.App/InitChain: %v\n%s", r, debug.Stack()))
+		}
+	}()
+	if len(chain.Validators) != app.blockchain.NumValidators() {
+		panic(fmt.Errorf("Tendermint passes %d validators to InitChain but Burrow's Blockchain has %d",
+			len(chain.Validators), app.blockchain.NumValidators()))
+	}
+	for _, v := range chain.Validators {
+		err := app.checkValidatorMatches(app.blockchain.Validators(), v)
+		if err != nil {
+			panic(err)
+		}
+	}
+	app.logger.InfoMsg("Initial validator set matches")
 	return
 }
 
 func (app *App) BeginBlock(block abciTypes.RequestBeginBlock) (respBeginBlock abciTypes.ResponseBeginBlock) {
 	app.block = &block
+	defer func() {
+		if r := recover(); r != nil {
+			app.panicFunc(fmt.Errorf("panic occurred in abci.App/BeginBlock: %v\n%s", r, debug.Stack()))
+		}
+	}()
+	if block.Header.Height > 1 {
+		var err error
+		// Tendermint runs a block behind with the validators passed in here
+		previousValidators := app.blockchain.PreviousValidators()
+		if len(block.Validators) != previousValidators.Count() {
+			err = fmt.Errorf("Tendermint passes %d validators to BeginBlock but Burrow's Blockchain has %d",
+				len(block.Validators), previousValidators.Count())
+			panic(err)
+		}
+		for _, v := range block.Validators {
+			err = app.checkValidatorMatches(previousValidators, v.Validator)
+			if err != nil {
+				panic(err)
+			}
+		}
+	}
 	return
 }
 
+func (app *App) checkValidatorMatches(ours validator.Reader, v abciTypes.Validator) error {
+	publicKey, err := crypto.PublicKeyFromABCIPubKey(v.PubKey)
+	if err != nil {
+		return err
+	}
+	power := ours.Power(publicKey)
+	if power.Cmp(big.NewInt(v.Power)) != 0 {
+		return fmt.Errorf("validator %v has power %d from Tendermint but power %d from Burrow",
+			publicKey.Address(), v.Power, power)
+	}
+	return nil
+}
+
 func (app *App) CheckTx(txBytes []byte) abciTypes.ResponseCheckTx {
 	defer func() {
 		if r := recover(); r != nil {
+			fmt.Println("BeginBlock")
 			app.panicFunc(fmt.Errorf("panic occurred in abci.App/CheckTx: %v\n%s", r, debug.Stack()))
 		}
 	}()
@@ -134,19 +188,20 @@ func txExecutor(executor execution.BatchExecutor, txDecoder txs.Decoder, logger
 
 		txe, err := executor.Execute(txEnv)
 		if err != nil {
+			ex := errors2.AsException(err)
 			logger.TraceMsg("Execution error",
 				structure.ErrorKey, err,
 				"tx_hash", txEnv.Tx.Hash())
 			return abciTypes.ResponseCheckTx{
-				Code: codes.EncodingErrorCode,
-				Log:  fmt.Sprintf("Could not execute transaction: %s, error: %v", txEnv, err),
+				Code: codes.TxExecutionErrorCode,
+				Log:  fmt.Sprintf("Could not execute transaction: %s, error: %v", txEnv, ex.Exception),
 			}
 		}
 
 		bs, err := txe.Receipt.Encode()
 		if err != nil {
 			return abciTypes.ResponseCheckTx{
-				Code: codes.TxExecutionErrorCode,
+				Code: codes.EncodingErrorCode,
 				Log:  fmt.Sprintf("Could not serialise receipt: %s", err),
 			}
 		}
@@ -163,13 +218,15 @@ func txExecutor(executor execution.BatchExecutor, txDecoder txs.Decoder, logger
 }
 
 func (app *App) EndBlock(reqEndBlock abciTypes.RequestEndBlock) abciTypes.ResponseEndBlock {
-	// Validator mutation goes here
 	var validatorUpdates abciTypes.Validators
-	app.blockchain.IterateValidators(func(publicKey crypto.PublicKey, power uint64) (stop bool) {
+	app.blockchain.PendingValidators().Iterate(func(id crypto.Addressable, power *big.Int) (stop bool) {
+		app.logger.InfoMsg("Updating validator power", "validator_address", id.Address(),
+			"new_power", power)
 		validatorUpdates = append(validatorUpdates, abciTypes.Validator{
-			Address: publicKey.Address().Bytes(),
-			PubKey:  publicKey.ABCIPubKey(),
-			Power:   int64(power),
+			Address: id.Address().Bytes(),
+			PubKey:  id.PublicKey().ABCIPubKey(),
+			// Must ensure power fits in an int64 during execution
+			Power: power.Int64(),
 		})
 		return
 	})
@@ -184,15 +241,16 @@ func (app *App) Commit() abciTypes.ResponseCommit {
 			app.panicFunc(fmt.Errorf("panic occurred in abci.App/Commit: %v\n%s", r, debug.Stack()))
 		}
 	}()
+	blockTime := time.Unix(app.block.Header.Time, 0)
 	app.logger.InfoMsg("Committing block",
 		"tag", "Commit",
 		structure.ScopeKey, "Commit()",
 		"height", app.block.Header.Height,
 		"hash", app.block.Hash,
 		"txs", app.block.Header.NumTxs,
-		"block_time", app.block.Header.Time, // [CSK] this sends a fairly non-sensical number; should be human readable
-		"last_block_time", app.blockchain.Tip.LastBlockTime(),
-		"last_block_hash", app.blockchain.Tip.LastBlockHash())
+		"block_time", blockTime,
+		"last_block_time", app.blockchain.LastBlockTime(),
+		"last_block_hash", app.blockchain.LastBlockHash())
 
 	// Lock the checker while we reset it and possibly while recheckTxs replays transactions
 	app.checker.Lock()
@@ -219,37 +277,16 @@ func (app *App) Commit() abciTypes.ResponseCommit {
 		}
 	}()
 
-	// First commit the app start, this app hash will not get checkpointed until the next block when we are sure
-	// that nothing in the downstream commit process could have failed. At worst we go back one block.
-	blockHeader := app.block.Header
-	appHash, err := app.committer.Commit(&blockHeader)
+	appHash, err := app.committer.Commit(app.block.Hash, blockTime, &app.block.Header)
 	if err != nil {
 		panic(errors.Wrap(err, "Could not commit transactions in block to execution state"))
 	}
 
-	// Commit to our blockchain state which will checkpoint the previous app hash by saving it to the database
-	// (we know the previous app hash is safely committed because we are about to commit the next)
-	err = app.blockchain.CommitBlock(time.Unix(int64(app.block.Header.Time), 0), app.block.Hash, appHash)
-	if err != nil {
-		panic(errors.Wrap(err, "could not commit block to blockchain state"))
-	}
-
 	err = app.checker.Reset()
 	if err != nil {
 		panic(errors.Wrap(err, "could not reset check cache during commit"))
 	}
 
-	// Perform a sanity check our block height
-	if app.blockchain.LastBlockHeight() != uint64(app.block.Header.Height) {
-		app.logger.InfoMsg("Burrow block height disagrees with Tendermint block height",
-			structure.ScopeKey, "Commit()",
-			"burrow_height", app.blockchain.LastBlockHeight(),
-			"tendermint_height", app.block.Header.Height)
-
-		panic(fmt.Errorf("burrow has recorded a block height of %v, "+
-			"but Tendermint reports a block height of %v, and the two should agree",
-			app.blockchain.LastBlockHeight(), app.block.Header.Height))
-	}
 	return abciTypes.ResponseCommit{
 		Data: appHash,
 	}
diff --git a/consensus/tendermint/codes/codes.go b/consensus/tendermint/codes/codes.go
index 9fe76ea60fa878436971ea3f993708cb5074fcc7..3e882570e81766b18609b3d02255fe95b7d7233a 100644
--- a/consensus/tendermint/codes/codes.go
+++ b/consensus/tendermint/codes/codes.go
@@ -1,7 +1,7 @@
 package codes
 
 import (
-	abci_types "github.com/tendermint/abci/types"
+	abci_types "github.com/tendermint/tendermint/abci/types"
 )
 
 const (
diff --git a/consensus/tendermint/config.go b/consensus/tendermint/config.go
index 4f9db3c7a3737c62e981cdec475bc5b58713fa52..92acbf9c802881f413254796798c0f636847e467 100644
--- a/consensus/tendermint/config.go
+++ b/consensus/tendermint/config.go
@@ -13,6 +13,8 @@ type BurrowTendermintConfig struct {
 	// Peers to which we automatically connect
 	PersistentPeers string
 	ListenAddress   string
+	// Optional external that nodes my provide with their NodeInfo
+	ExternalAddress string
 	Moniker         string
 	TendermintRoot  string
 }
@@ -20,8 +22,9 @@ type BurrowTendermintConfig struct {
 func DefaultBurrowTendermintConfig() *BurrowTendermintConfig {
 	tmDefaultConfig := tm_config.DefaultConfig()
 	return &BurrowTendermintConfig{
-		ListenAddress:  tmDefaultConfig.P2P.ListenAddress,
-		TendermintRoot: ".burrow",
+		ListenAddress:   tmDefaultConfig.P2P.ListenAddress,
+		ExternalAddress: tmDefaultConfig.P2P.ExternalAddress,
+		TendermintRoot:  ".burrow",
 	}
 }
 
@@ -37,7 +40,10 @@ func (btc *BurrowTendermintConfig) TendermintConfig() *tm_config.Config {
 		conf.P2P.Seeds = btc.Seeds
 		conf.P2P.PersistentPeers = btc.PersistentPeers
 		conf.P2P.ListenAddress = btc.ListenAddress
+		conf.P2P.ExternalAddress = btc.ExternalAddress
 		conf.Moniker = btc.Moniker
+		// Unfortunately this stops metrics from being used at all
+		conf.Instrumentation.Prometheus = false
 	}
 	// Disable Tendermint RPC
 	conf.RPC.ListenAddress = ""
diff --git a/consensus/tendermint/logger.go b/consensus/tendermint/logger.go
index 2a73c22fb234525dd95d3abd0ae5602e91ce3443..1d174e90efe26ba324eb836a218e39b5458ee662 100644
--- a/consensus/tendermint/logger.go
+++ b/consensus/tendermint/logger.go
@@ -2,7 +2,7 @@ package tendermint
 
 import (
 	"github.com/hyperledger/burrow/logging"
-	"github.com/tendermint/tmlibs/log"
+	"github.com/tendermint/tendermint/libs/log"
 )
 
 type tendermintLogger struct {
diff --git a/consensus/tendermint/query/node_view.go b/consensus/tendermint/node_view.go
similarity index 87%
rename from consensus/tendermint/query/node_view.go
rename to consensus/tendermint/node_view.go
index d998846756897d8da2ad2ed7969845b2e97a614d..62a50085d82dd9d3b80eed78c5c0f90cb1c845c4 100644
--- a/consensus/tendermint/query/node_view.go
+++ b/consensus/tendermint/node_view.go
@@ -1,26 +1,24 @@
-package query
+package tendermint
 
 import (
 	"fmt"
 
-	"github.com/hyperledger/burrow/consensus/tendermint"
 	"github.com/hyperledger/burrow/crypto"
-	tm_crypto "github.com/tendermint/go-crypto"
-
 	"github.com/hyperledger/burrow/txs"
 	"github.com/tendermint/tendermint/consensus"
 	ctypes "github.com/tendermint/tendermint/consensus/types"
+	tmCrypto "github.com/tendermint/tendermint/crypto"
 	"github.com/tendermint/tendermint/p2p"
 	"github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/types"
 )
 
 type NodeView struct {
-	tmNode    *tendermint.Node
+	tmNode    *Node
 	txDecoder txs.Decoder
 }
 
-func NewNodeView(tmNode *tendermint.Node, txDecoder txs.Decoder) *NodeView {
+func NewNodeView(tmNode *Node, txDecoder txs.Decoder) *NodeView {
 	return &NodeView{
 		tmNode:    tmNode,
 		txDecoder: txDecoder,
@@ -28,7 +26,7 @@ func NewNodeView(tmNode *tendermint.Node, txDecoder txs.Decoder) *NodeView {
 }
 
 func (nv *NodeView) PrivValidatorPublicKey() (crypto.PublicKey, error) {
-	pub := nv.tmNode.PrivValidator().GetPubKey().(tm_crypto.PubKeyEd25519)
+	pub := nv.tmNode.PrivValidator().GetPubKey().(tmCrypto.PubKeyEd25519)
 
 	return crypto.PublicKeyFromBytes(pub[:], crypto.CurveTypeEd25519)
 }
diff --git a/consensus/tendermint/validator/priv_validator_memory.go b/consensus/tendermint/priv_validator_memory.go
similarity index 88%
rename from consensus/tendermint/validator/priv_validator_memory.go
rename to consensus/tendermint/priv_validator_memory.go
index 05455cba2ab81ec8ef4a256d33d5b46836a65136..fb63b76ae4b4980dd68b6ab81a161f0e427b4b46 100644
--- a/consensus/tendermint/validator/priv_validator_memory.go
+++ b/consensus/tendermint/priv_validator_memory.go
@@ -1,14 +1,13 @@
-package validator
+package tendermint
 
 import (
-	"github.com/hyperledger/burrow/acm"
 	"github.com/hyperledger/burrow/crypto"
-	tm_crypto "github.com/tendermint/go-crypto"
+	tm_crypto "github.com/tendermint/tendermint/crypto"
 	tm_types "github.com/tendermint/tendermint/types"
 )
 
 type privValidatorMemory struct {
-	acm.Addressable
+	crypto.Addressable
 	signer         goCryptoSigner
 	lastSignedInfo *LastSignedInfo
 }
@@ -17,7 +16,7 @@ var _ tm_types.PrivValidator = &privValidatorMemory{}
 
 // Create a PrivValidator with in-memory state that takes an addressable representing the validator identity
 // and a signer providing private signing for that identity.
-func NewPrivValidatorMemory(addressable acm.Addressable, signer crypto.Signer) *privValidatorMemory {
+func NewPrivValidatorMemory(addressable crypto.Addressable, signer crypto.Signer) *privValidatorMemory {
 	return &privValidatorMemory{
 		Addressable:    addressable,
 		signer:         asTendermintSigner(signer),
diff --git a/consensus/tendermint/validator/sign_info.go b/consensus/tendermint/sign_info.go
similarity index 96%
rename from consensus/tendermint/validator/sign_info.go
rename to consensus/tendermint/sign_info.go
index 975538656cad456bfd55c07df5419e340d37ac11..1a11cbf75f025b6be25e8d9acec915be58efb385 100644
--- a/consensus/tendermint/validator/sign_info.go
+++ b/consensus/tendermint/sign_info.go
@@ -1,4 +1,4 @@
-package validator
+package tendermint
 
 import (
 	"bytes"
@@ -7,9 +7,9 @@ import (
 	"sync"
 	"time"
 
-	"github.com/tendermint/go-crypto"
+	"github.com/hyperledger/burrow/binary"
+	"github.com/tendermint/tendermint/crypto"
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
 )
 
 // TODO: type ?
@@ -27,7 +27,7 @@ func voteToStep(vote *types.Vote) int8 {
 	case types.VoteTypePrecommit:
 		return stepPrecommit
 	default:
-		cmn.PanicSanity("Unknown vote type")
+		panic("Unknown vote type")
 		return 0
 	}
 }
@@ -40,7 +40,7 @@ type LastSignedInfo struct {
 	Round     int              `json:"round"`
 	Step      int8             `json:"step"`
 	Signature crypto.Signature `json:"signature,omitempty"` // so we dont lose signatures
-	SignBytes cmn.HexBytes     `json:"signbytes,omitempty"` // so we dont lose signatures
+	SignBytes binary.HexBytes  `json:"signbytes,omitempty"` // so we dont lose signatures
 }
 
 func NewLastSignedInfo() *LastSignedInfo {
@@ -57,7 +57,7 @@ func (lsi *LastSignedInfo) SignVote(sign goCryptoSigner, chainID string, vote *t
 	lsi.Lock()
 	defer lsi.Unlock()
 	if err := lsi.signVote(sign, chainID, vote); err != nil {
-		return errors.New(cmn.Fmt("Error signing vote: %v", err))
+		return fmt.Errorf("error signing vote: %v", err)
 	}
 	return nil
 }
@@ -68,7 +68,7 @@ func (lsi *LastSignedInfo) SignProposal(sign goCryptoSigner, chainID string, pro
 	lsi.Lock()
 	defer lsi.Unlock()
 	if err := lsi.signProposal(sign, chainID, proposal); err != nil {
-		return fmt.Errorf("Error signing proposal: %v", err)
+		return fmt.Errorf("error signing proposal: %v", err)
 	}
 	return nil
 }
diff --git a/consensus/tendermint/tendermint.go b/consensus/tendermint/tendermint.go
index 3ada43cf2c49f6fd915e3de4b6342593cbf854e0..0122f580eea7f9b3d70ab7c5f0cb355abcdcca08 100644
--- a/consensus/tendermint/tendermint.go
+++ b/consensus/tendermint/tendermint.go
@@ -4,20 +4,16 @@ import (
 	"os"
 	"path"
 
-	bcm "github.com/hyperledger/burrow/blockchain"
 	"github.com/hyperledger/burrow/consensus/tendermint/abci"
-	"github.com/hyperledger/burrow/event"
-	"github.com/hyperledger/burrow/execution"
 	"github.com/hyperledger/burrow/genesis"
 	"github.com/hyperledger/burrow/logging"
 	"github.com/hyperledger/burrow/logging/structure"
-	"github.com/hyperledger/burrow/txs"
-	tmCrypto "github.com/tendermint/go-crypto"
 	"github.com/tendermint/tendermint/config"
+	tmCrypto "github.com/tendermint/tendermint/crypto"
+	dbm "github.com/tendermint/tendermint/libs/db"
 	"github.com/tendermint/tendermint/node"
 	"github.com/tendermint/tendermint/proxy"
 	tmTypes "github.com/tendermint/tendermint/types"
-	dbm "github.com/tendermint/tmlibs/db"
 )
 
 // Serves as a wrapper around the Tendermint node's closeable resources (database connections)
@@ -28,8 +24,6 @@ type Node struct {
 	}
 }
 
-var NewBlockQuery = event.QueryForEventID(tmTypes.EventNewBlock)
-
 func DBProvider(ID string, backendType dbm.DBBackendType, dbDir string) dbm.DB {
 	return dbm.NewDB(ID, backendType, dbDir)
 }
@@ -48,8 +42,7 @@ func (n *Node) Close() {
 }
 
 func NewNode(conf *config.Config, privValidator tmTypes.PrivValidator, genesisDoc *tmTypes.GenesisDoc,
-	blockchain *bcm.Blockchain, checker execution.BatchExecutor, committer execution.BatchCommitter,
-	txDecoder txs.Decoder, panicFunc func(error), logger *logging.Logger) (*Node, error) {
+	app *abci.App, metricsProvider node.MetricsProvider, logger *logging.Logger) (*Node, error) {
 
 	var err error
 	// disable Tendermint's RPC
@@ -61,14 +54,13 @@ func NewNode(conf *config.Config, privValidator tmTypes.PrivValidator, genesisDo
 	}
 
 	nde := &Node{}
-	app := abci.NewApp(blockchain, checker, committer, txDecoder, panicFunc, logger)
-	conf.NodeKeyFile()
 	nde.Node, err = node.NewNode(conf, privValidator,
 		proxy.NewLocalClientCreator(app),
 		func() (*tmTypes.GenesisDoc, error) {
 			return genesisDoc, nil
 		},
 		nde.DBProvider,
+		metricsProvider,
 		NewLogger(logger.WithPrefix(structure.ComponentKey, "Tendermint").
 			With(structure.ScopeKey, "tendermint.NewNode")))
 	if err != nil {
diff --git a/consensus/tendermint/validator/wire.go b/consensus/tendermint/validator/wire.go
deleted file mode 100644
index 7f063ca389e87af133f79a100e2c81c424d21960..0000000000000000000000000000000000000000
--- a/consensus/tendermint/validator/wire.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package validator
-
-import (
-	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
-)
-
-var cdc = amino.NewCodec()
-
-func init() {
-	crypto.RegisterAmino(cdc)
-}
diff --git a/consensus/tendermint/wire.go b/consensus/tendermint/wire.go
new file mode 100644
index 0000000000000000000000000000000000000000..24f0bc111f87bf6385e84cd1fc9ef2c781ed6520
--- /dev/null
+++ b/consensus/tendermint/wire.go
@@ -0,0 +1,12 @@
+package tendermint
+
+import (
+	amino "github.com/tendermint/go-amino"
+	"github.com/tendermint/tendermint/crypto"
+)
+
+var cdc = amino.NewCodec()
+
+func init() {
+	crypto.RegisterAmino(cdc)
+}
diff --git a/core/kernel.go b/core/kernel.go
index fee805da88c4770fe9bfd719330f711607627428..46854844acfc90d1d9214deccb44c5cc8444abcb 100644
--- a/core/kernel.go
+++ b/core/kernel.go
@@ -27,9 +27,9 @@ import (
 	"time"
 
 	kitlog "github.com/go-kit/kit/log"
-	bcm "github.com/hyperledger/burrow/blockchain"
+	"github.com/hyperledger/burrow/bcm"
 	"github.com/hyperledger/burrow/consensus/tendermint"
-	"github.com/hyperledger/burrow/consensus/tendermint/query"
+	"github.com/hyperledger/burrow/consensus/tendermint/abci"
 	"github.com/hyperledger/burrow/event"
 	"github.com/hyperledger/burrow/execution"
 	"github.com/hyperledger/burrow/genesis"
@@ -45,15 +45,17 @@ import (
 	"github.com/hyperledger/burrow/rpc/tm"
 	"github.com/hyperledger/burrow/txs"
 	tmConfig "github.com/tendermint/tendermint/config"
+	dbm "github.com/tendermint/tendermint/libs/db"
+	"github.com/tendermint/tendermint/node"
 	tmTypes "github.com/tendermint/tendermint/types"
-	dbm "github.com/tendermint/tmlibs/db"
 )
 
 const (
-	CooldownMilliseconds              = 1000
-	ServerShutdownTimeoutMilliseconds = 1000
-	LoggingCallerDepth                = 5
-	AccountsRingMutexCount            = 100
+	FirstBlockTimeout      = 3 * time.Second
+	CooldownTime           = 1000 * time.Millisecond
+	ServerShutdownTimeout  = 1000 * time.Millisecond
+	LoggingCallerDepth     = 5
+	AccountsRingMutexCount = 100
 )
 
 // Kernel is the root structure of Burrow
@@ -63,8 +65,10 @@ type Kernel struct {
 	Service        *rpc.Service
 	Launchers      []process.Launcher
 	State          *execution.State
-	Blockchain     bcm.BlockchainInfo
+	Blockchain     *bcm.Blockchain
+	Node           *tendermint.Node
 	Logger         *logging.Logger
+	nodeInfo       string
 	processes      map[string]process.Process
 	shutdownNotify chan struct{}
 	shutdownOnce   sync.Once
@@ -74,53 +78,58 @@ func NewKernel(ctx context.Context, keyClient keys.KeyClient, privValidator tmTy
 	genesisDoc *genesis.GenesisDoc, tmConf *tmConfig.Config, rpcConfig *rpc.RPCConfig, keyConfig *keys.KeysConfig,
 	keyStore *keys.KeyStore, exeOptions []execution.ExecutionOption, logger *logging.Logger) (*Kernel, error) {
 
+	var err error
 	kern := &Kernel{
 		processes:      make(map[string]process.Process),
 		shutdownNotify: make(chan struct{}),
 	}
 	logger = logger.WithScope("NewKernel()").With(structure.TimeKey, kitlog.DefaultTimestampUTC)
 	tmLogger := logger.With(structure.CallerKey, kitlog.Caller(LoggingCallerDepth+1))
-	logger = logger.WithInfo(structure.CallerKey, kitlog.Caller(LoggingCallerDepth))
+	kern.Logger = logger.WithInfo(structure.CallerKey, kitlog.Caller(LoggingCallerDepth))
 	stateDB := dbm.NewDB("burrow_state", dbm.GoLevelDBBackend, tmConf.DBDir())
 
-	blockchain, err := bcm.LoadOrNewBlockchain(stateDB, genesisDoc, logger)
+	kern.Blockchain, err = bcm.LoadOrNewBlockchain(stateDB, genesisDoc, kern.Logger)
 	if err != nil {
 		return nil, fmt.Errorf("error creating or loading blockchain state: %v", err)
 	}
 
-	var state *execution.State
 	// These should be in sync unless we are at the genesis block
-	if blockchain.LastBlockHeight() > 0 {
-		state, err = execution.LoadState(stateDB, blockchain.AppHashAfterLastBlock())
+	if kern.Blockchain.LastBlockHeight() > 0 {
+		kern.State, err = execution.LoadState(stateDB, kern.Blockchain.AppHashAfterLastBlock())
 		if err != nil {
 			return nil, fmt.Errorf("could not load persisted execution state at hash 0x%X: %v",
-				blockchain.AppHashAfterLastBlock(), err)
+				kern.Blockchain.AppHashAfterLastBlock(), err)
 		}
 	} else {
-		state, err = execution.MakeGenesisState(stateDB, genesisDoc)
+		kern.State, err = execution.MakeGenesisState(stateDB, genesisDoc)
 	}
 
 	txCodec := txs.NewAminoCodec()
 	tmGenesisDoc := tendermint.DeriveGenesisDoc(genesisDoc)
-	checker := execution.NewBatchChecker(state, blockchain.Tip, logger)
+	checker := execution.NewBatchChecker(kern.State, kern.Blockchain, kern.Logger)
 
-	emitter := event.NewEmitter(logger)
-	committer := execution.NewBatchCommitter(state, blockchain.Tip, emitter, logger, exeOptions...)
-	tmNode, err := tendermint.NewNode(tmConf, privValidator, tmGenesisDoc, blockchain, checker, committer, txCodec,
-		kern.Panic, tmLogger)
+	kern.Emitter = event.NewEmitter(kern.Logger)
+	committer := execution.NewBatchCommitter(kern.State, kern.Blockchain, kern.Emitter, kern.Logger, exeOptions...)
+
+	kern.nodeInfo = fmt.Sprintf("Burrow_%s_%X", genesisDoc.ChainID(), privValidator.GetAddress())
+	app := abci.NewApp(kern.nodeInfo, kern.Blockchain, checker, committer, txCodec, kern.Panic, logger)
+	// We could use this to provide/register our own metrics (though this will register them with us). Unfortunately
+	// Tendermint currently ignores the metrics passed unless its own server is turned on.
+	metricsProvider := node.DefaultMetricsProvider
+	kern.Node, err = tendermint.NewNode(tmConf, privValidator, tmGenesisDoc, app, metricsProvider, tmLogger)
 	if err != nil {
 		return nil, err
 	}
 
-	transactor := execution.NewTransactor(blockchain.Tip, emitter, execution.NewAccounts(checker, keyClient, AccountsRingMutexCount),
-		tmNode.MempoolReactor().BroadcastTx, txCodec, logger)
+	transactor := execution.NewTransactor(kern.Blockchain, kern.Emitter, execution.NewAccounts(checker, keyClient, AccountsRingMutexCount),
+		kern.Node.MempoolReactor().BroadcastTx, txCodec, kern.Logger)
 
-	nameRegState := state
-	accountState := state
-	service := rpc.NewService(accountState, nameRegState, blockchain, transactor,
-		query.NewNodeView(tmNode, txCodec), logger)
+	nameRegState := kern.State
+	accountState := kern.State
+	nodeView := tendermint.NewNodeView(kern.Node, txCodec)
+	kern.Service = rpc.NewService(accountState, nameRegState, kern.Blockchain, nodeView, kern.Logger)
 
-	launchers := []process.Launcher{
+	kern.Launchers = []process.Launcher{
 		{
 			Name:    "Profiling Server",
 			Enabled: rpcConfig.Profiler.Enabled,
@@ -131,7 +140,7 @@ func NewKernel(ctx context.Context, keyClient keys.KeyClient, privValidator tmTy
 				go func() {
 					err := debugServer.ListenAndServe()
 					if err != nil {
-						logger.InfoMsg("Error from pprof debug server", structure.ErrorKey, err)
+						kern.Logger.InfoMsg("Error from pprof debug server", structure.ErrorKey, err)
 					}
 				}()
 				return debugServer, nil
@@ -152,22 +161,22 @@ func NewKernel(ctx context.Context, keyClient keys.KeyClient, privValidator tmTy
 			Name:    "Tendermint",
 			Enabled: true,
 			Launch: func() (process.Process, error) {
-				err := tmNode.Start()
+				err := kern.Node.Start()
 				if err != nil {
 					return nil, fmt.Errorf("error starting Tendermint node: %v", err)
 				}
 				return process.ShutdownFunc(func(ctx context.Context) error {
-					err := tmNode.Stop()
+					err := kern.Node.Stop()
 					// Close tendermint database connections using our wrapper
-					defer tmNode.Close()
+					defer kern.Node.Close()
 					if err != nil {
 						return err
 					}
 					select {
 					case <-ctx.Done():
 						return ctx.Err()
-					case <-tmNode.Quit():
-						logger.InfoMsg("Tendermint Node has quit, closing DB connections...")
+					case <-kern.Node.Quit():
+						kern.Logger.InfoMsg("Tendermint Node has quit, closing DB connections...")
 						return nil
 					}
 					return err
@@ -178,7 +187,7 @@ func NewKernel(ctx context.Context, keyClient keys.KeyClient, privValidator tmTy
 			Name:    "RPC/tm",
 			Enabled: rpcConfig.TM.Enabled,
 			Launch: func() (process.Process, error) {
-				server, err := tm.StartServer(service, "/websocket", rpcConfig.TM.ListenAddress, logger)
+				server, err := tm.StartServer(kern.Service, "/websocket", rpcConfig.TM.ListenAddress, kern.Logger)
 				if err != nil {
 					return nil, err
 				}
@@ -189,7 +198,8 @@ func NewKernel(ctx context.Context, keyClient keys.KeyClient, privValidator tmTy
 			Name:    "RPC/metrics",
 			Enabled: rpcConfig.Metrics.Enabled,
 			Launch: func() (process.Process, error) {
-				server, err := metrics.StartServer(service, rpcConfig.Metrics.MetricsPath, rpcConfig.Metrics.ListenAddress, rpcConfig.Metrics.BlockSampleSize, logger)
+				server, err := metrics.StartServer(kern.Service, rpcConfig.Metrics.MetricsPath,
+					rpcConfig.Metrics.ListenAddress, rpcConfig.Metrics.BlockSampleSize, kern.Logger)
 				if err != nil {
 					return nil, err
 				}
@@ -204,9 +214,8 @@ func NewKernel(ctx context.Context, keyClient keys.KeyClient, privValidator tmTy
 				if err != nil {
 					return nil, err
 				}
-				listen.Addr()
 
-				grpcServer := rpc.NewGRPCServer(logger)
+				grpcServer := rpc.NewGRPCServer(kern.Logger)
 				var ks *keys.KeyStore
 				if keyStore != nil {
 					ks = keyStore
@@ -214,17 +223,18 @@ func NewKernel(ctx context.Context, keyClient keys.KeyClient, privValidator tmTy
 
 				if keyConfig.GRPCServiceEnabled {
 					if keyStore == nil {
-						ks = keys.NewKeyStore(keyConfig.KeysDirectory, keyConfig.AllowBadFilePermissions, logger)
+						ks = keys.NewKeyStore(keyConfig.KeysDirectory, keyConfig.AllowBadFilePermissions, kern.Logger)
 					}
 					keys.RegisterKeysServer(grpcServer, ks)
 				}
 
-				rpcquery.RegisterQueryServer(grpcServer, rpcquery.NewQueryServer(state, nameRegState))
+				rpcquery.RegisterQueryServer(grpcServer, rpcquery.NewQueryServer(kern.State, nameRegState,
+					kern.Blockchain, nodeView, kern.Logger))
 
 				rpctransact.RegisterTransactServer(grpcServer, rpctransact.NewTransactServer(transactor, txCodec))
 
-				rpcevents.RegisterExecutionEventsServer(grpcServer, rpcevents.NewExecutionEventsServer(state, emitter,
-					blockchain.Tip, logger))
+				rpcevents.RegisterExecutionEventsServer(grpcServer, rpcevents.NewExecutionEventsServer(kern.State,
+					kern.Emitter, kern.Blockchain, kern.Logger))
 
 				// Provides metadata about services registered
 				//reflection.Register(grpcServer)
@@ -240,12 +250,6 @@ func NewKernel(ctx context.Context, keyClient keys.KeyClient, privValidator tmTy
 		},
 	}
 
-	kern.Emitter = emitter
-	kern.Service = service
-	kern.Launchers = launchers
-	kern.State = state
-	kern.Blockchain = blockchain
-	kern.Logger = logger
 	return kern, nil
 }
 
@@ -266,7 +270,7 @@ func (kern *Kernel) Boot() error {
 }
 
 func (kern *Kernel) Panic(err error) {
-	fmt.Fprintf(os.Stderr, "Kernel shutting down due to panic: %v", err)
+	fmt.Fprintf(os.Stderr, "%s: Kernel shutting down due to panic: %v", kern.nodeInfo, err)
 	kern.Shutdown(context.Background())
 	os.Exit(1)
 }
@@ -279,7 +283,6 @@ func (kern *Kernel) WaitForShutdown() {
 
 // Supervise kernel once booted
 func (kern *Kernel) supervise() {
-	// TODO: Consider capturing kernel panics from boot and sending them here via a channel where we could
 	// perform disaster restarts of the kernel; rejoining the network as if we were a new node.
 	signals := make(chan os.Signal, 1)
 	signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
@@ -295,7 +298,7 @@ func (kern *Kernel) Shutdown(ctx context.Context) (err error) {
 		logger := kern.Logger.WithScope("Shutdown")
 		logger.InfoMsg("Attempting graceful shutdown...")
 		logger.InfoMsg("Shutting down servers")
-		ctx, cancel := context.WithTimeout(ctx, ServerShutdownTimeoutMilliseconds*time.Millisecond)
+		ctx, cancel := context.WithTimeout(ctx, ServerShutdownTimeout)
 		defer cancel()
 		// Shutdown servers in reverse order to boot
 		for i := len(kern.Launchers) - 1; i >= 0; i-- {
@@ -319,7 +322,7 @@ func (kern *Kernel) Shutdown(ctx context.Context) (err error) {
 		structure.Sync(kern.Logger.Trace)
 		// We don't want to wait for them, but yielding for a cooldown Let other goroutines flush
 		// potentially interesting final output (e.g. log messages)
-		time.Sleep(time.Millisecond * CooldownMilliseconds)
+		time.Sleep(CooldownTime)
 		close(kern.shutdownNotify)
 	})
 	return
diff --git a/crypto/address.go b/crypto/address.go
index 9dab201555b8262b792bcfc3328ba81100c7ae3b..0a1a705798ea796105c809ef73fa36115bda08a6 100644
--- a/crypto/address.go
+++ b/crypto/address.go
@@ -10,6 +10,40 @@ import (
 	"golang.org/x/crypto/ripemd160"
 )
 
+type Addressable interface {
+	// Get the 20 byte EVM address of this account
+	Address() Address
+	// Public key from which the Address is derived
+	PublicKey() PublicKey
+}
+
+func NewAddressable(address Address, publicKey PublicKey) Addressable {
+	return &memoizedAddressable{
+		address:   address,
+		publicKey: publicKey,
+	}
+}
+
+type memoizedAddressable struct {
+	publicKey PublicKey
+	address   Address
+}
+
+func MemoizeAddressable(addressable Addressable) Addressable {
+	if a, ok := addressable.(*memoizedAddressable); ok {
+		return a
+	}
+	return NewAddressable(addressable.Address(), addressable.PublicKey())
+}
+
+func (a *memoizedAddressable) PublicKey() PublicKey {
+	return a.publicKey
+}
+
+func (a *memoizedAddressable) Address() Address {
+	return a.address
+}
+
 type Address binary.Word160
 
 type Addresses []Address
@@ -25,7 +59,8 @@ func (as Addresses) Swap(i, j int) {
 	as[i], as[j] = as[j], as[i]
 }
 
-const AddressHexLength = 2 * binary.Word160Length
+const AddressLength = binary.Word160Length
+const AddressHexLength = 2 * AddressLength
 
 var ZeroAddress = Address{}
 
diff --git a/crypto/crypto.go b/crypto/crypto.go
index e5f62db1024847f61d3e239ce7322be26cc6ec75..eab93deac88c8c5ce76c0d518a0953fdd0e92f37 100644
--- a/crypto/crypto.go
+++ b/crypto/crypto.go
@@ -21,6 +21,16 @@ func (k CurveType) String() string {
 		return "unknown"
 	}
 }
+func (k CurveType) ABCIType() string {
+	switch k {
+	case CurveTypeSecp256k1:
+		return "secp256k1"
+	case CurveTypeEd25519:
+		return "ed25519"
+	default:
+		return "unknown"
+	}
+}
 
 // Get this CurveType's 8 bit identifier as a byte
 func (k CurveType) Byte() byte {
diff --git a/crypto/crypto.pb.go b/crypto/crypto.pb.go
index 230f0f5e5efe55c40feffe0ac493737355a061e3..2a8e152bb0fb066fee08c72e896c05d82eeb4d2c 100644
--- a/crypto/crypto.pb.go
+++ b/crypto/crypto.pb.go
@@ -36,7 +36,7 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 // PublicKey
 type PublicKey struct {
 	CurveType CurveType `protobuf:"varint,1,opt,name=CurveType,proto3,casttype=CurveType" json:"CurveType,omitempty"`
-	PublicKey []byte    `protobuf:"bytes,2,opt,name=PublicKey,proto3" json:"PublicKey,omitempty"`
+	Key       []byte    `protobuf:"bytes,2,opt,name=Key,proto3" json:"Key,omitempty"`
 }
 
 func (m *PublicKey) Reset()                    { *m = PublicKey{} }
@@ -50,9 +50,9 @@ func (m *PublicKey) GetCurveType() CurveType {
 	return 0
 }
 
-func (m *PublicKey) GetPublicKey() []byte {
+func (m *PublicKey) GetKey() []byte {
 	if m != nil {
-		return m.PublicKey
+		return m.Key
 	}
 	return nil
 }
@@ -64,8 +64,8 @@ func (*PublicKey) XXX_MessageName() string {
 type PrivateKey struct {
 	CurveType CurveType `protobuf:"varint,1,opt,name=CurveType,proto3,casttype=CurveType" json:"CurveType,omitempty"`
 	// Note may need initialisation
-	PublicKey  []byte `protobuf:"bytes,2,opt,name=PublicKey,proto3" json:"PublicKey,omitempty"`
-	PrivateKey []byte `protobuf:"bytes,3,opt,name=PrivateKey,proto3" json:"PrivateKey,omitempty"`
+	PublicKey []byte `protobuf:"bytes,2,opt,name=PublicKey,proto3" json:"PublicKey,omitempty"`
+	Key       []byte `protobuf:"bytes,3,opt,name=Key,proto3" json:"Key,omitempty"`
 }
 
 func (m *PrivateKey) Reset()                    { *m = PrivateKey{} }
@@ -101,11 +101,11 @@ func (m *PublicKey) MarshalTo(dAtA []byte) (int, error) {
 		i++
 		i = encodeVarintCrypto(dAtA, i, uint64(m.CurveType))
 	}
-	if len(m.PublicKey) > 0 {
+	if len(m.Key) > 0 {
 		dAtA[i] = 0x12
 		i++
-		i = encodeVarintCrypto(dAtA, i, uint64(len(m.PublicKey)))
-		i += copy(dAtA[i:], m.PublicKey)
+		i = encodeVarintCrypto(dAtA, i, uint64(len(m.Key)))
+		i += copy(dAtA[i:], m.Key)
 	}
 	return i, nil
 }
@@ -136,11 +136,11 @@ func (m *PrivateKey) MarshalTo(dAtA []byte) (int, error) {
 		i = encodeVarintCrypto(dAtA, i, uint64(len(m.PublicKey)))
 		i += copy(dAtA[i:], m.PublicKey)
 	}
-	if len(m.PrivateKey) > 0 {
+	if len(m.Key) > 0 {
 		dAtA[i] = 0x1a
 		i++
-		i = encodeVarintCrypto(dAtA, i, uint64(len(m.PrivateKey)))
-		i += copy(dAtA[i:], m.PrivateKey)
+		i = encodeVarintCrypto(dAtA, i, uint64(len(m.Key)))
+		i += copy(dAtA[i:], m.Key)
 	}
 	return i, nil
 }
@@ -160,7 +160,7 @@ func (m *PublicKey) Size() (n int) {
 	if m.CurveType != 0 {
 		n += 1 + sovCrypto(uint64(m.CurveType))
 	}
-	l = len(m.PublicKey)
+	l = len(m.Key)
 	if l > 0 {
 		n += 1 + l + sovCrypto(uint64(l))
 	}
@@ -177,7 +177,7 @@ func (m *PrivateKey) Size() (n int) {
 	if l > 0 {
 		n += 1 + l + sovCrypto(uint64(l))
 	}
-	l = len(m.PrivateKey)
+	l = len(m.Key)
 	if l > 0 {
 		n += 1 + l + sovCrypto(uint64(l))
 	}
@@ -247,7 +247,7 @@ func (m *PublicKey) Unmarshal(dAtA []byte) error {
 			}
 		case 2:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PublicKey", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
 			}
 			var byteLen int
 			for shift := uint(0); ; shift += 7 {
@@ -271,9 +271,9 @@ func (m *PublicKey) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.PublicKey = append(m.PublicKey[:0], dAtA[iNdEx:postIndex]...)
-			if m.PublicKey == nil {
-				m.PublicKey = []byte{}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
 			}
 			iNdEx = postIndex
 		default:
@@ -378,7 +378,7 @@ func (m *PrivateKey) Unmarshal(dAtA []byte) error {
 			iNdEx = postIndex
 		case 3:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PrivateKey", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
 			}
 			var byteLen int
 			for shift := uint(0); ; shift += 7 {
@@ -402,9 +402,9 @@ func (m *PrivateKey) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.PrivateKey = append(m.PrivateKey[:0], dAtA[iNdEx:postIndex]...)
-			if m.PrivateKey == nil {
-				m.PrivateKey = []byte{}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
 			}
 			iNdEx = postIndex
 		default:
@@ -537,19 +537,19 @@ func init() { proto.RegisterFile("crypto.proto", fileDescriptorCrypto) }
 func init() { golang_proto.RegisterFile("crypto.proto", fileDescriptorCrypto) }
 
 var fileDescriptorCrypto = []byte{
-	// 221 bytes of a gzipped FileDescriptorProto
+	// 224 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x49, 0x2e, 0xaa, 0x2c,
 	0x28, 0xc9, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x83, 0xf0, 0xa4, 0x74, 0xd3, 0x33,
 	0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xd3, 0xf3, 0xd3, 0xf3, 0xf5, 0xc1, 0xd2,
-	0x49, 0xa5, 0x69, 0x60, 0x1e, 0x98, 0x03, 0x66, 0x41, 0xb4, 0x29, 0xc5, 0x70, 0x71, 0x06, 0x94,
+	0x49, 0xa5, 0x69, 0x60, 0x1e, 0x98, 0x03, 0x66, 0x41, 0xb4, 0x29, 0x05, 0x70, 0x71, 0x06, 0x94,
 	0x26, 0xe5, 0x64, 0x26, 0x7b, 0xa7, 0x56, 0x0a, 0x69, 0x73, 0x71, 0x3a, 0x97, 0x16, 0x95, 0xa5,
 	0x86, 0x54, 0x16, 0xa4, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x3a, 0xf1, 0xfe, 0xba, 0x27, 0x8f,
-	0x10, 0x0c, 0x42, 0x30, 0x85, 0x64, 0x90, 0x74, 0x4a, 0x30, 0x29, 0x30, 0x6a, 0xf0, 0x04, 0x21,
-	0x04, 0xac, 0x58, 0x66, 0x2c, 0x90, 0x67, 0x50, 0x6a, 0x64, 0xe4, 0xe2, 0x0a, 0x28, 0xca, 0x2c,
-	0x4b, 0x2c, 0x49, 0xa5, 0xae, 0xf9, 0x42, 0x72, 0xc8, 0x06, 0x4b, 0x30, 0x83, 0xa5, 0x91, 0x44,
-	0xac, 0x38, 0x3a, 0x16, 0xc8, 0x33, 0x80, 0xdc, 0xe0, 0x64, 0x75, 0xe2, 0x91, 0x1c, 0xe3, 0x85,
-	0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x1e, 0x78, 0x2c, 0xc7, 0x78, 0xe2, 0xb1, 0x1c, 0x63,
-	0x94, 0x0a, 0x52, 0x30, 0x65, 0x54, 0x16, 0xa4, 0x16, 0xe5, 0xa4, 0xa6, 0xa4, 0xa7, 0x16, 0xe9,
-	0x27, 0x95, 0x16, 0x15, 0xe5, 0x97, 0xeb, 0x43, 0x02, 0x33, 0x89, 0x0d, 0x1c, 0x48, 0xc6, 0x80,
-	0x00, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x49, 0x55, 0xc8, 0x6b, 0x01, 0x00, 0x00,
+	0x10, 0x0c, 0x42, 0x30, 0x85, 0x04, 0xb8, 0x98, 0xbd, 0x53, 0x2b, 0x25, 0x98, 0x14, 0x18, 0x35,
+	0x78, 0x82, 0x40, 0x4c, 0x2b, 0x96, 0x19, 0x0b, 0xe4, 0x19, 0x94, 0x8a, 0xb9, 0xb8, 0x02, 0x8a,
+	0x32, 0xcb, 0x12, 0x4b, 0x52, 0x49, 0x36, 0x52, 0x06, 0xc9, 0x31, 0x50, 0x83, 0x91, 0x5c, 0x07,
+	0xb5, 0x90, 0x19, 0x61, 0x21, 0x47, 0xc7, 0x02, 0x79, 0x06, 0x90, 0xa5, 0x4e, 0x56, 0x27, 0x1e,
+	0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x81, 0xc7, 0x72, 0x8c, 0x27,
+	0x1e, 0xcb, 0x31, 0x46, 0xa9, 0x20, 0x85, 0x45, 0x46, 0x65, 0x41, 0x6a, 0x51, 0x4e, 0x6a, 0x4a,
+	0x7a, 0x6a, 0x91, 0x7e, 0x52, 0x69, 0x51, 0x51, 0x7e, 0xb9, 0x3e, 0x24, 0xc4, 0x92, 0xd8, 0xc0,
+	0x21, 0x61, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x1f, 0x6f, 0x9c, 0xc7, 0x50, 0x01, 0x00, 0x00,
 }
diff --git a/crypto/private_key.go b/crypto/private_key.go
index 91252e09f1e1543921e108962db1291d7cd820af..fddafda0b0eea2ab8247c411e04d8852b05df3d0 100644
--- a/crypto/private_key.go
+++ b/crypto/private_key.go
@@ -29,28 +29,28 @@ func PublicKeyFromBytes(bs []byte, curveType CurveType) (PublicKey, error) {
 		return PublicKey{}, ErrInvalidCurve(curveType)
 	}
 
-	return PublicKey{PublicKey: bs, CurveType: curveType}, nil
+	return PublicKey{Key: bs, CurveType: curveType}, nil
 }
 
 func (p PrivateKey) RawBytes() []byte {
-	return p.PrivateKey
+	return p.Key
 }
 
 func (p PrivateKey) Sign(msg []byte) (Signature, error) {
 	switch p.CurveType {
 	case CurveTypeEd25519:
-		if len(p.PrivateKey) != ed25519.PrivateKeySize {
+		if len(p.Key) != ed25519.PrivateKeySize {
 			return nil, fmt.Errorf("bytes passed have length %v but ed25519 private keys have %v bytes",
-				len(p.PrivateKey), ed25519.PrivateKeySize)
+				len(p.Key), ed25519.PrivateKeySize)
 		}
-		privKey := ed25519.PrivateKey(p.PrivateKey)
+		privKey := ed25519.PrivateKey(p.Key)
 		return ed25519.Sign(privKey, msg), nil
 	case CurveTypeSecp256k1:
-		if len(p.PrivateKey) != btcec.PrivKeyBytesLen {
+		if len(p.Key) != btcec.PrivKeyBytesLen {
 			return nil, fmt.Errorf("bytes passed have length %v but secp256k1 private keys have %v bytes",
-				len(p.PrivateKey), btcec.PrivKeyBytesLen)
+				len(p.Key), btcec.PrivKeyBytesLen)
 		}
-		privKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), p.PrivateKey)
+		privKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), p.Key)
 
 		sig, err := privKey.Sign(msg)
 		if err != nil {
@@ -63,7 +63,7 @@ func (p PrivateKey) Sign(msg []byte) (Signature, error) {
 }
 
 func (p PrivateKey) GetPublicKey() PublicKey {
-	return PublicKey{CurveType: p.CurveType, PublicKey: p.PublicKey}
+	return PublicKey{CurveType: p.CurveType, Key: p.PublicKey}
 }
 
 // Reinitialise after serialisation
@@ -87,7 +87,7 @@ func PrivateKeyFromRawBytes(privKeyBytes []byte, curveType CurveType) (PrivateKe
 			return PrivateKey{}, fmt.Errorf("bytes passed have length %v but ed25519 private keys have %v bytes",
 				len(privKeyBytes), ed25519.PrivateKeySize)
 		}
-		return PrivateKey{PrivateKey: privKeyBytes, PublicKey: privKeyBytes[32:], CurveType: CurveTypeEd25519}, nil
+		return PrivateKey{Key: privKeyBytes, PublicKey: privKeyBytes[32:], CurveType: CurveTypeEd25519}, nil
 	case CurveTypeSecp256k1:
 		if len(privKeyBytes) != btcec.PrivKeyBytesLen {
 			return PrivateKey{}, fmt.Errorf("bytes passed have length %v but secp256k1 private keys have %v bytes",
@@ -97,7 +97,7 @@ func PrivateKeyFromRawBytes(privKeyBytes []byte, curveType CurveType) (PrivateKe
 		if !bytes.Equal(privKey.Serialize(), privKeyBytes) {
 			return PrivateKey{}, fmt.Errorf("serialisation of Secp256k1 private key bytes does not equal")
 		}
-		return PrivateKey{PrivateKey: privKeyBytes, PublicKey: pubKey.SerializeCompressed(), CurveType: CurveTypeSecp256k1}, nil
+		return PrivateKey{Key: privKeyBytes, PublicKey: pubKey.SerializeCompressed(), CurveType: CurveTypeSecp256k1}, nil
 	default:
 		return PrivateKey{}, ErrInvalidCurve(curveType)
 	}
diff --git a/crypto/public_key.go b/crypto/public_key.go
index f837b38d1a27a201b4c5affe1101b9dc0bf78b0b..27d5920295ec27fdfd0d69bb626d85d7351f54cc 100644
--- a/crypto/public_key.go
+++ b/crypto/public_key.go
@@ -6,8 +6,9 @@ import (
 	"fmt"
 
 	"github.com/btcsuite/btcd/btcec"
-	abci "github.com/tendermint/abci/types"
-	tmCrypto "github.com/tendermint/go-crypto"
+	abci "github.com/tendermint/tendermint/abci/types"
+	tmCrypto "github.com/tendermint/tendermint/crypto"
+	"github.com/tendermint/tendermint/crypto/tmhash"
 	"github.com/tmthrgd/go-hex"
 	"golang.org/x/crypto/ed25519"
 	"golang.org/x/crypto/ripemd160"
@@ -34,7 +35,7 @@ func PublicKeyLength(curveType CurveType) int {
 func (p PublicKey) MarshalJSON() ([]byte, error) {
 	jStruct := PublicKeyJSON{
 		CurveType: p.CurveType.String(),
-		PublicKey: hex.EncodeUpperToString(p.PublicKey),
+		PublicKey: hex.EncodeUpperToString(p.Key),
 	}
 	txt, err := json.Marshal(jStruct)
 	return txt, err
@@ -59,7 +60,7 @@ func (p *PublicKey) UnmarshalJSON(text []byte) error {
 		return err
 	}
 	p.CurveType = CurveType
-	p.PublicKey = bs
+	p.Key = bs
 	return nil
 }
 
@@ -69,15 +70,15 @@ func (p *PublicKey) UnmarshalText(text []byte) error {
 
 func (p PublicKey) IsValid() bool {
 	publicKeyLength := PublicKeyLength(p.CurveType)
-	return publicKeyLength != 0 && publicKeyLength == len(p.PublicKey)
+	return publicKeyLength != 0 && publicKeyLength == len(p.Key)
 }
 
 func (p PublicKey) Verify(msg []byte, signature Signature) bool {
 	switch p.CurveType {
 	case CurveTypeEd25519:
-		return ed25519.Verify(p.PublicKey, msg, signature)
+		return ed25519.Verify(p.Key, msg, signature)
 	case CurveTypeSecp256k1:
-		pub, err := btcec.ParsePubKey(p.PublicKey, btcec.S256())
+		pub, err := btcec.ParsePubKey(p.Key, btcec.S256())
 		if err != nil {
 			return false
 		}
@@ -91,17 +92,18 @@ func (p PublicKey) Verify(msg []byte, signature Signature) bool {
 	}
 }
 
+func (p PublicKey) PublicKey() PublicKey {
+	return p
+}
+
 func (p PublicKey) Address() Address {
 	switch p.CurveType {
 	case CurveTypeEd25519:
-		// FIMXE: tendermint go-crypto-0.5.0 uses weird scheme, this is fixed in 0.6.0
-		tmPubKey := new(tmCrypto.PubKeyEd25519)
-		copy(tmPubKey[:], p.PublicKey)
-		addr, _ := AddressFromBytes(tmPubKey.Address())
+		addr, _ := AddressFromBytes(tmhash.Sum(p.Key))
 		return addr
 	case CurveTypeSecp256k1:
 		sha := sha256.New()
-		sha.Write(p.PublicKey[:])
+		sha.Write(p.Key[:])
 
 		hash := ripemd160.New()
 		hash.Write(sha.Sum(nil))
@@ -124,29 +126,46 @@ func (p PublicKey) AddressHashType() string {
 }
 
 func (p PublicKey) RawBytes() []byte {
-	return p.PublicKey[:]
+	return p.Key[:]
 }
 
 // Return the ABCI PubKey. See Tendermint protobuf.go for the go-crypto conversion this is based on
 func (p PublicKey) ABCIPubKey() abci.PubKey {
-	switch p.CurveType {
-	case CurveTypeEd25519:
-		return abci.PubKey{
-			Type: "ed25519",
-			Data: p.RawBytes(),
-		}
-	case CurveTypeSecp256k1:
-		return abci.PubKey{
-			Type: "secp256k1",
-			Data: p.RawBytes(),
-		}
+	return abci.PubKey{
+		Type: p.CurveType.ABCIType(),
+		Data: p.RawBytes(),
+	}
+}
+
+func PublicKeyFromTendermintPubKey(pubKey tmCrypto.PubKey) (PublicKey, error) {
+	switch pk := pubKey.(type) {
+	case tmCrypto.PubKeyEd25519:
+		return PublicKeyFromBytes(pk[:], CurveTypeEd25519)
+	case tmCrypto.PubKeySecp256k1:
+		return PublicKeyFromBytes(pk[:], CurveTypeSecp256k1)
 	default:
-		return abci.PubKey{}
+		return PublicKey{}, fmt.Errorf("unrecognised tendermint public key type: %v", pk)
 	}
+
+}
+func PublicKeyFromABCIPubKey(pubKey abci.PubKey) (PublicKey, error) {
+	switch pubKey.Type {
+	case CurveTypeEd25519.ABCIType():
+		return PublicKey{
+			CurveType: CurveTypeEd25519,
+			Key:       pubKey.Data,
+		}, nil
+	case CurveTypeSecp256k1.ABCIType():
+		return PublicKey{
+			CurveType: CurveTypeEd25519,
+			Key:       pubKey.Data,
+		}, nil
+	}
+	return PublicKey{}, fmt.Errorf("did not recognise ABCI PubKey type: %s", pubKey.Type)
 }
 
 func (p PublicKey) String() string {
-	return hex.EncodeUpperToString(p.PublicKey)
+	return hex.EncodeUpperToString(p.Key)
 }
 
 // Produces a binary encoding of the CurveType byte plus
@@ -154,29 +173,6 @@ func (p PublicKey) String() string {
 func (p PublicKey) Encode() []byte {
 	encoded := make([]byte, PublicKeyLength(p.CurveType)+1)
 	encoded[0] = p.CurveType.Byte()
-	copy(encoded[1:], p.PublicKey)
+	copy(encoded[1:], p.Key)
 	return encoded
 }
-
-// Decodes an encoded public key returning the number of bytes consumed
-func DecodePublicKeyFixedWidth(buf []byte, publicKey *PublicKey) (int, error) {
-	if len(buf) < 1 {
-		return 0, fmt.Errorf("encoded bytes buffer must not be empty")
-	}
-	curveType := CurveType(buf[0])
-	publicKeyEnd := PublicKeyLength(curveType) + 1
-	if publicKeyEnd <= 0 {
-		return 0, fmt.Errorf("CurveType with identifier %v is unknown", curveType.Byte())
-	}
-	if len(buf) < publicKeyEnd {
-		return 0, fmt.Errorf("encoded bytes buffer has length %v but public key encoding for %v needs %v bytes",
-			len(buf), curveType, publicKeyEnd)
-	}
-
-	publicKey.CurveType = curveType
-	publicKey.PublicKey = buf[1:publicKeyEnd]
-	if !publicKey.IsValid() {
-		return publicKeyEnd, fmt.Errorf("decoded public key %v is not valid", publicKey)
-	}
-	return publicKeyEnd, nil
-}
diff --git a/crypto/public_key_test.go b/crypto/public_key_test.go
index f6c211a35d2c64de9050ab75a689fb85881bdf33..9d49e884bbc36d5fd1b59cd487e06e944dbff02b 100644
--- a/crypto/public_key_test.go
+++ b/crypto/public_key_test.go
@@ -13,8 +13,8 @@ func TestPublicKeySerialisation(t *testing.T) {
 	priv := PrivateKeyFromSecret("foo", CurveTypeEd25519)
 	pub := priv.GetPublicKey()
 	expectedAddress := Address{
-		0x83, 0x20, 0x78, 0x17, 0xdc, 0x38, 0x14, 0xb9, 0x6f, 0x57,
-		0xef, 0xf9, 0x25, 0xf4, 0x67, 0xe0, 0x7c, 0xaa, 0x91, 0x38,
+		0x4, 0x5f, 0x56, 0x0, 0x65, 0x41, 0x82, 0xcf, 0xea, 0xcc,
+		0xfe, 0x6c, 0xb1, 0x9f, 0x6, 0x42, 0xe8, 0xa5, 0x98, 0x98,
 	}
 	assert.Equal(t, expectedAddress, pub.Address())
 	bs, err := proto.Marshal(&pub)
diff --git a/event/emitter.go b/event/emitter.go
index c4d1fd413aba16c811aa7be9fdb0fa37d269c627..8cc254af764032722e065ebd67ee882dd470b8df 100644
--- a/event/emitter.go
+++ b/event/emitter.go
@@ -23,7 +23,7 @@ import (
 	"github.com/hyperledger/burrow/logging"
 	"github.com/hyperledger/burrow/logging/structure"
 	"github.com/hyperledger/burrow/process"
-	"github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/libs/common"
 	"github.com/tmthrgd/go-hex"
 )
 
diff --git a/event/pubsub/pubsub.go b/event/pubsub/pubsub.go
index 8d710773036e6cc828b870c76c83e9be5b69a5a6..41904fed71116e7f627d7f51050bb9ce6157c3d0 100644
--- a/event/pubsub/pubsub.go
+++ b/event/pubsub/pubsub.go
@@ -19,7 +19,7 @@ import (
 	"sync"
 
 	"github.com/hyperledger/burrow/event/query"
-	"github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/libs/common"
 )
 
 type operation int
diff --git a/execution/executors/call_context.go b/execution/contexts/call_context.go
similarity index 98%
rename from execution/executors/call_context.go
rename to execution/contexts/call_context.go
index 0f343819c67bdc7e4167fcdb5a6c0a936f0b8472..fcf7e146c0dd5269311ef64497d3b9c4b038633f 100644
--- a/execution/executors/call_context.go
+++ b/execution/contexts/call_context.go
@@ -1,12 +1,12 @@
-package executors
+package contexts
 
 import (
 	"fmt"
 
 	"github.com/hyperledger/burrow/acm"
 	"github.com/hyperledger/burrow/acm/state"
+	"github.com/hyperledger/burrow/bcm"
 	"github.com/hyperledger/burrow/binary"
-	"github.com/hyperledger/burrow/blockchain"
 	"github.com/hyperledger/burrow/execution/errors"
 	"github.com/hyperledger/burrow/execution/evm"
 	"github.com/hyperledger/burrow/execution/exec"
@@ -19,7 +19,7 @@ import (
 const GasLimit = uint64(1000000)
 
 type CallContext struct {
-	Tip         blockchain.TipInfo
+	Tip         bcm.BlockchainInfo
 	StateWriter state.ReaderWriter
 	RunCall     bool
 	VMOptions   []func(*evm.VM)
@@ -101,7 +101,7 @@ func (ctx *CallContext) Precheck() (*acm.MutableAccount, acm.Account, error) {
 			return nil, nil, fmt.Errorf("attempt to call a native contract at %s, "+
 				"but native contracts cannot be called using CallTx. Use a "+
 				"contract that calls the native contract or the appropriate tx "+
-				"type (eg. PermissionsTx, NameTx)", ctx.tx.Address)
+				"type (eg. PermsTx, NameTx)", ctx.tx.Address)
 		}
 
 		// Output account may be nil if we are still in mempool and contract was created in same block as this tx
diff --git a/execution/contexts/governance_context.go b/execution/contexts/governance_context.go
new file mode 100644
index 0000000000000000000000000000000000000000..39457b827289b5677a424636bc6d567875294464
--- /dev/null
+++ b/execution/contexts/governance_context.go
@@ -0,0 +1,128 @@
+package contexts
+
+import (
+	"fmt"
+	"math/big"
+
+	"github.com/hyperledger/burrow/acm"
+	"github.com/hyperledger/burrow/acm/state"
+	"github.com/hyperledger/burrow/acm/validator"
+	"github.com/hyperledger/burrow/execution/errors"
+	"github.com/hyperledger/burrow/execution/exec"
+	"github.com/hyperledger/burrow/genesis/spec"
+	"github.com/hyperledger/burrow/logging"
+	"github.com/hyperledger/burrow/permission"
+	"github.com/hyperledger/burrow/txs/payload"
+)
+
+type GovernanceContext struct {
+	StateWriter  state.ReaderWriter
+	ValidatorSet validator.Writer
+	Logger       *logging.Logger
+	tx           *payload.GovTx
+	txe          *exec.TxExecution
+}
+
+// GovTx provides a set of TemplateAccounts and GovernanceContext tries to alter the chain state to match the
+// specification given
+func (ctx *GovernanceContext) Execute(txe *exec.TxExecution) error {
+	var ok bool
+	ctx.txe = txe
+	ctx.tx, ok = txe.Envelope.Tx.Payload.(*payload.GovTx)
+	if !ok {
+		return fmt.Errorf("payload must be NameTx, but is: %v", txe.Envelope.Tx.Payload)
+	}
+	accounts, err := getInputs(ctx.StateWriter, ctx.tx.Inputs)
+	if err != nil {
+		return err
+	}
+
+	// ensure all inputs have root permissions
+	err = allHavePermission(ctx.StateWriter, permission.Root, accounts, ctx.Logger)
+	if err != nil {
+		return errors.Wrap(err, "at least one input lacks permission for GovTx")
+	}
+
+	for _, i := range ctx.tx.Inputs {
+		txe.Input(i.Address, nil)
+	}
+
+	for _, update := range ctx.tx.AccountUpdates {
+		if update.Address == nil && update.PublicKey == nil {
+			// We do not want to generate a key
+			return fmt.Errorf("could not execution GovTx since account template %v contains neither "+
+				"address or public key", update)
+		}
+		if update.PublicKey != nil {
+			address := update.PublicKey.Address()
+			if update.Address != nil && address != *update.Address {
+				return fmt.Errorf("supplied public key %v whose address %v does not match %v provided by"+
+					"GovTx", update.PublicKey, address, update.Address)
+			}
+			update.Address = &address
+		}
+		if update.PublicKey == nil && update.Balances().HasPower() {
+			// If we are updating power we will need the key
+			return fmt.Errorf("GovTx must be provided with public key when updating validator power")
+		}
+		account, err := getOrMakeOutput(ctx.StateWriter, accounts, *update.Address, ctx.Logger)
+		if err != nil {
+			return err
+		}
+		governAccountEvent, err := ctx.updateAccount(account, update)
+		if err != nil {
+			txe.GovernAccount(governAccountEvent, errors.AsException(err))
+			return err
+		}
+		txe.GovernAccount(governAccountEvent, nil)
+	}
+	return nil
+}
+
+func (ctx *GovernanceContext) updateAccount(account *acm.MutableAccount, update *spec.TemplateAccount) (ev *exec.GovernAccountEvent, err error) {
+	ev = &exec.GovernAccountEvent{
+		AccountUpdate: update,
+	}
+	if update.Balances().HasNative() {
+		err = account.SetBalance(update.Balances().GetNative(0))
+		if err != nil {
+			return
+		}
+	}
+	if update.NodeAddress != nil {
+		// TODO: can we do something useful if provided with a NodeAddress for an account about to become a validator
+		// like add it to persistent peers or pre gossip so it gets inbound connections? If so under which circumstances?
+	}
+	if update.Balances().HasPower() {
+		if update.PublicKey == nil {
+			err = fmt.Errorf("updateAccount should have PublicKey by this point but appears not to for "+
+				"template account: %v", update)
+			return
+		}
+		power := new(big.Int).SetUint64(update.Balances().GetPower(0))
+		if !power.IsInt64() {
+			err = fmt.Errorf("power supplied in update to validator power for %v does not fit into int64 and "+
+				"so is not supported by Tendermint", update.Address)
+		}
+		_, err := ctx.ValidatorSet.AlterPower(*update.PublicKey, power)
+		if err != nil {
+			return ev, err
+		}
+	}
+	perms := account.Permissions()
+	if len(update.Permissions) > 0 {
+		perms.Base, err = permission.BasePermissionsFromStringList(update.Permissions)
+		if err != nil {
+			return
+		}
+	}
+	if len(update.Roles) > 0 {
+		perms.Roles = update.Roles
+	}
+	err = account.SetPermissions(perms)
+	if err != nil {
+		return
+	}
+	err = ctx.StateWriter.UpdateAccount(account)
+	return
+}
diff --git a/execution/executors/name_context.go b/execution/contexts/name_context.go
similarity index 98%
rename from execution/executors/name_context.go
rename to execution/contexts/name_context.go
index 3be220f9f4d5d23ce0eab129b324495c77a6568e..10c37a15bf50e408e73039e547d430eba203a5ac 100644
--- a/execution/executors/name_context.go
+++ b/execution/contexts/name_context.go
@@ -1,4 +1,4 @@
-package executors
+package contexts
 
 import (
 	"fmt"
@@ -6,7 +6,7 @@ import (
 	"regexp"
 
 	"github.com/hyperledger/burrow/acm/state"
-	"github.com/hyperledger/burrow/blockchain"
+	"github.com/hyperledger/burrow/bcm"
 	"github.com/hyperledger/burrow/execution/errors"
 	"github.com/hyperledger/burrow/execution/exec"
 	"github.com/hyperledger/burrow/execution/names"
@@ -21,7 +21,7 @@ var regexpAlphaNum = regexp.MustCompile("^[a-zA-Z0-9._/-@]*$")
 var regexpJSON = regexp.MustCompile(`^[a-zA-Z0-9_/ \-+"':,\n\t.{}()\[\]]*$`)
 
 type NameContext struct {
-	Tip         blockchain.TipInfo
+	Tip         bcm.BlockchainInfo
 	StateWriter state.ReaderWriter
 	NameReg     names.ReaderWriter
 	Logger      *logging.Logger
diff --git a/execution/executors/permissions_context.go b/execution/contexts/permissions_context.go
similarity index 90%
rename from execution/executors/permissions_context.go
rename to execution/contexts/permissions_context.go
index 11730e1376886a0dfebaf22b39bd00adc45e8491..00cbd1a5589823ba72a7d59f55e361a59238c197 100644
--- a/execution/executors/permissions_context.go
+++ b/execution/contexts/permissions_context.go
@@ -1,11 +1,11 @@
-package executors
+package contexts
 
 import (
 	"fmt"
 
 	"github.com/hyperledger/burrow/acm"
 	"github.com/hyperledger/burrow/acm/state"
-	"github.com/hyperledger/burrow/blockchain"
+	"github.com/hyperledger/burrow/bcm"
 	"github.com/hyperledger/burrow/crypto"
 	"github.com/hyperledger/burrow/execution/errors"
 	"github.com/hyperledger/burrow/execution/exec"
@@ -16,17 +16,17 @@ import (
 )
 
 type PermissionsContext struct {
-	Tip         blockchain.TipInfo
+	Tip         bcm.BlockchainInfo
 	StateWriter state.ReaderWriter
 	Logger      *logging.Logger
-	tx          *payload.PermissionsTx
+	tx          *payload.PermsTx
 }
 
 func (ctx *PermissionsContext) Execute(txe *exec.TxExecution) error {
 	var ok bool
-	ctx.tx, ok = txe.Envelope.Tx.Payload.(*payload.PermissionsTx)
+	ctx.tx, ok = txe.Envelope.Tx.Payload.(*payload.PermsTx)
 	if !ok {
-		return fmt.Errorf("payload must be PermissionsTx, but is: %v", txe.Envelope.Tx.Payload)
+		return fmt.Errorf("payload must be PermsTx, but is: %v", txe.Envelope.Tx.Payload)
 	}
 	// Validate input
 	inAcc, err := state.GetMutableAccount(ctx.StateWriter, ctx.tx.Input.Address)
@@ -41,7 +41,7 @@ func (ctx *PermissionsContext) Execute(txe *exec.TxExecution) error {
 
 	err = ctx.tx.PermArgs.EnsureValid()
 	if err != nil {
-		return fmt.Errorf("PermissionsTx received containing invalid PermArgs: %v", err)
+		return fmt.Errorf("PermsTx received containing invalid PermArgs: %v", err)
 	}
 
 	permFlag := ctx.tx.PermArgs.PermFlag
@@ -61,7 +61,7 @@ func (ctx *PermissionsContext) Execute(txe *exec.TxExecution) error {
 
 	value := ctx.tx.Input.Amount
 
-	ctx.Logger.TraceMsg("New PermissionsTx",
+	ctx.Logger.TraceMsg("New PermsTx",
 		"perm_args", ctx.tx.PermArgs.String())
 
 	var permAcc acm.Account
@@ -114,7 +114,7 @@ func (ctx *PermissionsContext) Execute(txe *exec.TxExecution) error {
 	}
 
 	// Good!
-	ctx.Logger.TraceMsg("Incrementing sequence number for PermissionsTx",
+	ctx.Logger.TraceMsg("Incrementing sequence number for PermsTx",
 		"tag", "sequence",
 		"account", inAcc.Address(),
 		"old_sequence", inAcc.Sequence(),
diff --git a/execution/executors/send_context.go b/execution/contexts/send_context.go
similarity index 85%
rename from execution/executors/send_context.go
rename to execution/contexts/send_context.go
index 813a433bf6b093550331ab101a14c0a8b0f04db9..a50c0b8839c6733f7b5705121a43106b7faccd3b 100644
--- a/execution/executors/send_context.go
+++ b/execution/contexts/send_context.go
@@ -1,18 +1,19 @@
-package executors
+package contexts
 
 import (
 	"fmt"
 
 	"github.com/hyperledger/burrow/acm/state"
-	"github.com/hyperledger/burrow/blockchain"
+	"github.com/hyperledger/burrow/bcm"
 	"github.com/hyperledger/burrow/execution/errors"
 	"github.com/hyperledger/burrow/execution/exec"
 	"github.com/hyperledger/burrow/logging"
+	"github.com/hyperledger/burrow/permission"
 	"github.com/hyperledger/burrow/txs/payload"
 )
 
 type SendContext struct {
-	Tip         blockchain.TipInfo
+	Tip         bcm.BlockchainInfo
 	StateWriter state.ReaderWriter
 	Logger      *logging.Logger
 	tx          *payload.SendTx
@@ -30,8 +31,9 @@ func (ctx *SendContext) Execute(txe *exec.TxExecution) error {
 	}
 
 	// ensure all inputs have send permissions
-	if !hasSendPermission(ctx.StateWriter, accounts, ctx.Logger) {
-		return fmt.Errorf("at least one input lacks permission for SendTx")
+	err = allHavePermission(ctx.StateWriter, permission.Send, accounts, ctx.Logger)
+	if err != nil {
+		return errors.Wrap(err, "at least one input lacks permission for SendTx")
 	}
 
 	// add outputs to accounts map
diff --git a/execution/executors/shared.go b/execution/contexts/shared.go
similarity index 82%
rename from execution/executors/shared.go
rename to execution/contexts/shared.go
index 33ad90a0de8bfe602cdf8d60c5d169c4d958da80..23f645f6b782d4319955237379b088c672538359 100644
--- a/execution/executors/shared.go
+++ b/execution/contexts/shared.go
@@ -1,4 +1,4 @@
-package executors
+package contexts
 
 import (
 	"fmt"
@@ -43,38 +43,45 @@ func getOrMakeOutputs(accountGetter state.AccountGetter, accs map[crypto.Address
 	if accs == nil {
 		accs = make(map[crypto.Address]*acm.MutableAccount)
 	}
-
 	// we should err if an account is being created but the inputs don't have permission
-	var checkedCreatePerms bool
+	var err error
 	for _, out := range outs {
-		// Account shouldn't be duplicated
-		if _, ok := accs[out.Address]; ok {
-			return nil, errors.ErrorCodeDuplicateAddress
-		}
-		acc, err := state.GetMutableAccount(accountGetter, out.Address)
+		accs[out.Address], err = getOrMakeOutput(accountGetter, accs, out.Address, logger)
 		if err != nil {
 			return nil, err
 		}
-		// output account may be nil (new)
-		if acc == nil {
-			if !checkedCreatePerms {
-				if !hasCreateAccountPermission(accountGetter, accs, logger) {
-					return nil, fmt.Errorf("at least one input does not have permission to create accounts")
-				}
-				checkedCreatePerms = true
-			}
-			acc = acm.ConcreteAccount{
-				Address:     out.Address,
-				Sequence:    0,
-				Balance:     0,
-				Permissions: permission.ZeroAccountPermissions,
-			}.MutableAccount()
-		}
-		accs[out.Address] = acc
 	}
 	return accs, nil
 }
 
+func getOrMakeOutput(accountGetter state.AccountGetter, accs map[crypto.Address]*acm.MutableAccount,
+	outputAddress crypto.Address, logger *logging.Logger) (*acm.MutableAccount, error) {
+
+	// Account shouldn't be duplicated
+	if _, ok := accs[outputAddress]; ok {
+		return nil, errors.ErrorCodeDuplicateAddress
+	}
+	acc, err := state.GetMutableAccount(accountGetter, outputAddress)
+	if err != nil {
+		return nil, err
+	}
+	// output account may be nil (new)
+	if acc == nil {
+		if !hasCreateAccountPermission(accountGetter, accs, logger) {
+			return nil, fmt.Errorf("at least one input does not have permission to create accounts")
+		}
+		logger.InfoMsg("Account not found so attempting to create it", "address", outputAddress)
+		acc = acm.ConcreteAccount{
+			Address:     outputAddress,
+			Sequence:    0,
+			Balance:     0,
+			Permissions: permission.ZeroAccountPermissions,
+		}.MutableAccount()
+	}
+
+	return acc, nil
+}
+
 func validateInputs(accs map[crypto.Address]*acm.MutableAccount, ins []*payload.TxInput) (uint64, error) {
 	total := uint64(0)
 	for _, in := range ins {
@@ -188,15 +195,17 @@ func HasPermission(accountGetter state.AccountGetter, acc acm.Account, perm perm
 	return v
 }
 
-// TODO: for debug log the failed accounts
-func hasSendPermission(accountGetter state.AccountGetter, accs map[crypto.Address]*acm.MutableAccount,
-	logger *logging.Logger) bool {
+func allHavePermission(accountGetter state.AccountGetter, perm permission.PermFlag,
+	accs map[crypto.Address]*acm.MutableAccount, logger *logging.Logger) error {
 	for _, acc := range accs {
-		if !HasPermission(accountGetter, acc, permission.Send, logger) {
-			return false
+		if !HasPermission(accountGetter, acc, perm, logger) {
+			return errors.PermissionDenied{
+				Address: acc.Address(),
+				Perm:    perm,
+			}
 		}
 	}
-	return true
+	return nil
 }
 
 func hasNamePermission(accountGetter state.AccountGetter, acc acm.Account,
diff --git a/execution/errors/errors.go b/execution/errors/errors.go
index b3bc554166d19ec2900db0c186d47069a8d06edc..00e49c47bcb3074ce038ed7ae3dc85b1322679fe 100644
--- a/execution/errors/errors.go
+++ b/execution/errors/errors.go
@@ -44,6 +44,10 @@ func (c Code) ErrorCode() Code {
 	return c
 }
 
+func (c Code) Uint32() uint32 {
+	return uint32(c)
+}
+
 func (c Code) Error() string {
 	return fmt.Sprintf("Error %d: %s", c, c.String())
 }
@@ -76,6 +80,8 @@ func (c Code) String() string {
 		return "Data stack underflow"
 	case ErrorCodeInvalidContract:
 		return "Invalid contract"
+	case ErrorCodePermissionDenied:
+		return "Permission denied"
 	case ErrorCodeNativeContractCodeCopy:
 		return "Tried to copy native contract code"
 	case ErrorCodeExecutionAborted:
@@ -134,8 +140,9 @@ func AsException(err error) *Exception {
 	}
 }
 
-func Wrap(err CodedError, message string) *Exception {
-	return NewCodedError(err.ErrorCode(), message+": "+err.Error())
+func Wrap(err error, message string) *Exception {
+	ex := AsException(err)
+	return NewCodedError(ex.ErrorCode(), message+": "+ex.Error())
 }
 
 func Errorf(format string, a ...interface{}) *Exception {
diff --git a/execution/errors/vm.go b/execution/errors/vm.go
index 7e97d37504205e5d509f7f4262932f3a48c98046..95b36ccaaab66399927942423884cfd1c7e38a0e 100644
--- a/execution/errors/vm.go
+++ b/execution/errors/vm.go
@@ -9,7 +9,8 @@ import (
 )
 
 type PermissionDenied struct {
-	Perm permission.PermFlag
+	Address crypto.Address
+	Perm    permission.PermFlag
 }
 
 func (err PermissionDenied) ErrorCode() Code {
@@ -17,7 +18,7 @@ func (err PermissionDenied) ErrorCode() Code {
 }
 
 func (err PermissionDenied) Error() string {
-	return fmt.Sprintf("Contract does not have permission to %v", err.Perm)
+	return fmt.Sprintf("Account/contract %v does not have permission %v", err.Address, err.Perm)
 }
 
 type NestedCall struct {
diff --git a/execution/evm/snative.go b/execution/evm/snative.go
index 06254915bdccdf5a4ee94fa7ff814cf24ae487ce..d10b851dfa4748bd893047d986f8bbf298902d3a 100644
--- a/execution/evm/snative.go
+++ b/execution/evm/snative.go
@@ -356,7 +356,7 @@ func hasBase(state state.ReaderWriter, caller acm.Account, args []byte, gas *uin
 		return nil, fmt.Errorf("unknown account %s", address)
 	}
 	permN := permission.PermFlag(Uint64FromWord256(permNum)) // already shifted
-	if !ValidPermN(permN) {
+	if !permN.IsValid() {
 		return nil, permission.ErrInvalidPermission(permN)
 	}
 	hasPermission := HasPermission(state, acc, permN)
@@ -382,7 +382,7 @@ func setBase(stateWriter state.ReaderWriter, caller acm.Account, args []byte, ga
 		return nil, fmt.Errorf("unknown account %s", address)
 	}
 	permN := permission.PermFlag(Uint64FromWord256(permNum))
-	if !ValidPermN(permN) {
+	if !permN.IsValid() {
 		return nil, permission.ErrInvalidPermission(permN)
 	}
 	permV := !permVal.IsZero()
@@ -409,7 +409,7 @@ func unsetBase(stateWriter state.ReaderWriter, caller acm.Account, args []byte,
 		return nil, fmt.Errorf("unknown account %s", address)
 	}
 	permN := permission.PermFlag(Uint64FromWord256(permNum))
-	if !ValidPermN(permN) {
+	if !permN.IsValid() {
 		return nil, permission.ErrInvalidPermission(permN)
 	}
 	if err = acc.MutablePermissions().Base.Unset(permN); err != nil {
@@ -435,7 +435,7 @@ func setGlobal(stateWriter state.ReaderWriter, caller acm.Account, args []byte,
 		panic("cant find the global permissions account")
 	}
 	permN := permission.PermFlag(Uint64FromWord256(permNum))
-	if !ValidPermN(permN) {
+	if !permN.IsValid() {
 		return nil, permission.ErrInvalidPermission(permN)
 	}
 	permV := !permVal.IsZero()
@@ -517,11 +517,6 @@ func removeRole(stateWriter state.ReaderWriter, caller acm.Account, args []byte,
 //------------------------------------------------------------------------------------------------
 // Errors and utility funcs
 
-// Checks if a permission flag is valid (a known base chain or snative permission)
-func ValidPermN(n permission.PermFlag) bool {
-	return n <= permission.AllPermFlags
-}
-
 // Get the global BasePermissions
 func globalPerms(stateWriter state.ReaderWriter) permission.BasePermissions {
 	return state.GlobalAccountPermissions(stateWriter).Base
diff --git a/execution/evm/vm.go b/execution/evm/vm.go
index 53d9b1ebc68667bd5955e2af2aa49dea279dae64..0182a3f5ea62b5afe37549526e9685b4ac9a8ba4 100644
--- a/execution/evm/vm.go
+++ b/execution/evm/vm.go
@@ -855,7 +855,10 @@ func (vm *VM) call(callState *state.Cache, caller acm.Account, callee *acm.Mutab
 			vm.returnData = nil
 
 			if !HasPermission(callState, callee, permission.CreateContract) {
-				return nil, errors.PermissionDenied{Perm: permission.CreateContract}
+				return nil, errors.PermissionDenied{
+					Address: callee.Address(),
+					Perm:    permission.CreateContract,
+				}
 			}
 			contractValue, popErr := stack.PopU64()
 			if popErr != nil {
@@ -902,7 +905,10 @@ func (vm *VM) call(callState *state.Cache, caller acm.Account, callee *acm.Mutab
 			vm.returnData = nil
 
 			if !HasPermission(callState, callee, permission.Call) {
-				return nil, errors.PermissionDenied{Perm: permission.Call}
+				return nil, errors.PermissionDenied{
+					Address: callee.Address(),
+					Perm:    permission.Call,
+				}
 			}
 			gasLimit, popErr := stack.PopU64()
 			if popErr != nil {
@@ -982,7 +988,10 @@ func (vm *VM) call(callState *state.Cache, caller acm.Account, callee *acm.Mutab
 					// nil account means we're sending funds to a new account
 					if acc == nil {
 						if !HasPermission(callState, caller, permission.CreateAccount) {
-							return nil, errors.PermissionDenied{Perm: permission.CreateAccount}
+							return nil, errors.PermissionDenied{
+								Address: callee.Address(),
+								Perm:    permission.CreateAccount,
+							}
 						}
 						acc = acm.ConcreteAccount{Address: crypto.AddressFromWord256(addr)}.MutableAccount()
 					}
@@ -1077,7 +1086,10 @@ func (vm *VM) call(callState *state.Cache, caller acm.Account, callee *acm.Mutab
 					return nil, firstErr(err, gasErr)
 				}
 				if !HasPermission(callState, callee, permission.CreateContract) {
-					return nil, firstErr(err, errors.PermissionDenied{Perm: permission.CreateContract})
+					return nil, firstErr(err, errors.PermissionDenied{
+						Address: callee.Address(),
+						Perm:    permission.CreateContract,
+					})
 				}
 				var createErr errors.CodedError
 				receiver, createErr = vm.createAccount(callState, callee, logger)
diff --git a/execution/exec/block_execution.go b/execution/exec/block_execution.go
index 3fc8a905de6d74e5177928981e1ddeb89d6f4b2a..15b35d8b016181f51465495641d1be25ed0078be 100644
--- a/execution/exec/block_execution.go
+++ b/execution/exec/block_execution.go
@@ -9,7 +9,7 @@ import (
 	"github.com/hyperledger/burrow/event"
 	"github.com/hyperledger/burrow/event/query"
 	"github.com/hyperledger/burrow/txs"
-	abciTypes "github.com/tendermint/abci/types"
+	abciTypes "github.com/tendermint/tendermint/abci/types"
 )
 
 func EventStringBlockExecution(height uint64) string { return fmt.Sprintf("Execution/Block/%v", height) }
diff --git a/execution/exec/event.go b/execution/exec/event.go
index b629a265ce93a9cee8f22ce9037bfe9cdb30260c..eb320f07a94889b283e33e225b749bbc251ccaf2 100644
--- a/execution/exec/event.go
+++ b/execution/exec/event.go
@@ -21,6 +21,7 @@ const (
 	TypeAccountOutput  = EventType(0x03)
 	TypeTxExecution    = EventType(0x04)
 	TypeBlockExecution = EventType(0x05)
+	TypeGovernAccount  = EventType(0x06)
 )
 
 var nameFromType = map[EventType]string{
@@ -30,6 +31,7 @@ var nameFromType = map[EventType]string{
 	TypeAccountOutput:  "AccountOutputEvent",
 	TypeTxExecution:    "TxExecutionEvent",
 	TypeBlockExecution: "BlockExecutionEvent",
+	TypeGovernAccount:  "GovernAccountEvent",
 }
 
 var typeFromName = make(map[string]EventType)
diff --git a/execution/exec/exec.pb.go b/execution/exec/exec.pb.go
index 6c7cfe26c6fb967a2b217cc76ed5d08fcf0d45e0..df1d5e80537e109d07ea0c0c907407967b3b07d0 100644
--- a/execution/exec/exec.pb.go
+++ b/execution/exec/exec.pb.go
@@ -16,6 +16,7 @@
 		Result
 		LogEvent
 		CallEvent
+		GovernAccountEvent
 		InputEvent
 		OutputEvent
 		CallData
@@ -27,11 +28,11 @@ import golang_proto "github.com/golang/protobuf/proto"
 import fmt "fmt"
 import math "math"
 import _ "github.com/gogo/protobuf/gogoproto"
-import _ "github.com/tendermint/abci/types"
 import errors "github.com/hyperledger/burrow/execution/errors"
 import names "github.com/hyperledger/burrow/execution/names"
 import txs "github.com/hyperledger/burrow/txs"
 import permission "github.com/hyperledger/burrow/permission"
+import spec "github.com/hyperledger/burrow/genesis/spec"
 
 import github_com_hyperledger_burrow_txs_payload "github.com/hyperledger/burrow/txs/payload"
 import github_com_hyperledger_burrow_binary "github.com/hyperledger/burrow/binary"
@@ -266,11 +267,12 @@ func (*Header) XXX_MessageName() string {
 }
 
 type Event struct {
-	Header *Header      `protobuf:"bytes,1,opt,name=Header" json:"Header,omitempty"`
-	Input  *InputEvent  `protobuf:"bytes,2,opt,name=Input" json:"Input,omitempty"`
-	Output *OutputEvent `protobuf:"bytes,3,opt,name=Output" json:"Output,omitempty"`
-	Call   *CallEvent   `protobuf:"bytes,4,opt,name=Call" json:"Call,omitempty"`
-	Log    *LogEvent    `protobuf:"bytes,5,opt,name=Log" json:"Log,omitempty"`
+	Header        *Header             `protobuf:"bytes,1,opt,name=Header" json:"Header,omitempty"`
+	Input         *InputEvent         `protobuf:"bytes,2,opt,name=Input" json:"Input,omitempty"`
+	Output        *OutputEvent        `protobuf:"bytes,3,opt,name=Output" json:"Output,omitempty"`
+	Call          *CallEvent          `protobuf:"bytes,4,opt,name=Call" json:"Call,omitempty"`
+	Log           *LogEvent           `protobuf:"bytes,5,opt,name=Log" json:"Log,omitempty"`
+	GovernAccount *GovernAccountEvent `protobuf:"bytes,6,opt,name=GovernAccount" json:"GovernAccount,omitempty"`
 }
 
 func (m *Event) Reset()                    { *m = Event{} }
@@ -312,6 +314,13 @@ func (m *Event) GetLog() *LogEvent {
 	return nil
 }
 
+func (m *Event) GetGovernAccount() *GovernAccountEvent {
+	if m != nil {
+		return m.GovernAccount
+	}
+	return nil
+}
+
 func (*Event) XXX_MessageName() string {
 	return "exec.Event"
 }
@@ -410,6 +419,26 @@ func (*CallEvent) XXX_MessageName() string {
 	return "exec.CallEvent"
 }
 
+type GovernAccountEvent struct {
+	AccountUpdate *spec.TemplateAccount `protobuf:"bytes,1,opt,name=AccountUpdate" json:"AccountUpdate,omitempty"`
+}
+
+func (m *GovernAccountEvent) Reset()                    { *m = GovernAccountEvent{} }
+func (m *GovernAccountEvent) String() string            { return proto.CompactTextString(m) }
+func (*GovernAccountEvent) ProtoMessage()               {}
+func (*GovernAccountEvent) Descriptor() ([]byte, []int) { return fileDescriptorExec, []int{8} }
+
+func (m *GovernAccountEvent) GetAccountUpdate() *spec.TemplateAccount {
+	if m != nil {
+		return m.AccountUpdate
+	}
+	return nil
+}
+
+func (*GovernAccountEvent) XXX_MessageName() string {
+	return "exec.GovernAccountEvent"
+}
+
 type InputEvent struct {
 	Address github_com_hyperledger_burrow_crypto.Address `protobuf:"bytes,1,opt,name=Address,proto3,customtype=github.com/hyperledger/burrow/crypto.Address" json:"Address"`
 }
@@ -417,7 +446,7 @@ type InputEvent struct {
 func (m *InputEvent) Reset()                    { *m = InputEvent{} }
 func (m *InputEvent) String() string            { return proto.CompactTextString(m) }
 func (*InputEvent) ProtoMessage()               {}
-func (*InputEvent) Descriptor() ([]byte, []int) { return fileDescriptorExec, []int{8} }
+func (*InputEvent) Descriptor() ([]byte, []int) { return fileDescriptorExec, []int{9} }
 
 func (*InputEvent) XXX_MessageName() string {
 	return "exec.InputEvent"
@@ -430,7 +459,7 @@ type OutputEvent struct {
 func (m *OutputEvent) Reset()                    { *m = OutputEvent{} }
 func (m *OutputEvent) String() string            { return proto.CompactTextString(m) }
 func (*OutputEvent) ProtoMessage()               {}
-func (*OutputEvent) Descriptor() ([]byte, []int) { return fileDescriptorExec, []int{9} }
+func (*OutputEvent) Descriptor() ([]byte, []int) { return fileDescriptorExec, []int{10} }
 
 func (*OutputEvent) XXX_MessageName() string {
 	return "exec.OutputEvent"
@@ -447,7 +476,7 @@ type CallData struct {
 func (m *CallData) Reset()                    { *m = CallData{} }
 func (m *CallData) String() string            { return proto.CompactTextString(m) }
 func (*CallData) ProtoMessage()               {}
-func (*CallData) Descriptor() ([]byte, []int) { return fileDescriptorExec, []int{10} }
+func (*CallData) Descriptor() ([]byte, []int) { return fileDescriptorExec, []int{11} }
 
 func (m *CallData) GetValue() uint64 {
 	if m != nil {
@@ -483,6 +512,8 @@ func init() {
 	golang_proto.RegisterType((*LogEvent)(nil), "exec.LogEvent")
 	proto.RegisterType((*CallEvent)(nil), "exec.CallEvent")
 	golang_proto.RegisterType((*CallEvent)(nil), "exec.CallEvent")
+	proto.RegisterType((*GovernAccountEvent)(nil), "exec.GovernAccountEvent")
+	golang_proto.RegisterType((*GovernAccountEvent)(nil), "exec.GovernAccountEvent")
 	proto.RegisterType((*InputEvent)(nil), "exec.InputEvent")
 	golang_proto.RegisterType((*InputEvent)(nil), "exec.InputEvent")
 	proto.RegisterType((*OutputEvent)(nil), "exec.OutputEvent")
@@ -784,6 +815,16 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) {
 		}
 		i += n13
 	}
+	if m.GovernAccount != nil {
+		dAtA[i] = 0x32
+		i++
+		i = encodeVarintExec(dAtA, i, uint64(m.GovernAccount.Size()))
+		n14, err := m.GovernAccount.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n14
+	}
 	return i, nil
 }
 
@@ -817,21 +858,21 @@ func (m *Result) MarshalTo(dAtA []byte) (int, error) {
 		dAtA[i] = 0x1a
 		i++
 		i = encodeVarintExec(dAtA, i, uint64(m.NameEntry.Size()))
-		n14, err := m.NameEntry.MarshalTo(dAtA[i:])
+		n15, err := m.NameEntry.MarshalTo(dAtA[i:])
 		if err != nil {
 			return 0, err
 		}
-		i += n14
+		i += n15
 	}
 	if m.PermArgs != nil {
 		dAtA[i] = 0x22
 		i++
 		i = encodeVarintExec(dAtA, i, uint64(m.PermArgs.Size()))
-		n15, err := m.PermArgs.MarshalTo(dAtA[i:])
+		n16, err := m.PermArgs.MarshalTo(dAtA[i:])
 		if err != nil {
 			return 0, err
 		}
-		i += n15
+		i += n16
 	}
 	return i, nil
 }
@@ -854,19 +895,19 @@ func (m *LogEvent) MarshalTo(dAtA []byte) (int, error) {
 	dAtA[i] = 0xa
 	i++
 	i = encodeVarintExec(dAtA, i, uint64(m.Address.Size()))
-	n16, err := m.Address.MarshalTo(dAtA[i:])
+	n17, err := m.Address.MarshalTo(dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n16
+	i += n17
 	dAtA[i] = 0x12
 	i++
 	i = encodeVarintExec(dAtA, i, uint64(m.Data.Size()))
-	n17, err := m.Data.MarshalTo(dAtA[i:])
+	n18, err := m.Data.MarshalTo(dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n17
+	i += n18
 	if len(m.Topics) > 0 {
 		for _, msg := range m.Topics {
 			dAtA[i] = 0x1a
@@ -901,20 +942,20 @@ func (m *CallEvent) MarshalTo(dAtA []byte) (int, error) {
 		dAtA[i] = 0xa
 		i++
 		i = encodeVarintExec(dAtA, i, uint64(m.CallData.Size()))
-		n18, err := m.CallData.MarshalTo(dAtA[i:])
+		n19, err := m.CallData.MarshalTo(dAtA[i:])
 		if err != nil {
 			return 0, err
 		}
-		i += n18
+		i += n19
 	}
 	dAtA[i] = 0x12
 	i++
 	i = encodeVarintExec(dAtA, i, uint64(m.Origin.Size()))
-	n19, err := m.Origin.MarshalTo(dAtA[i:])
+	n20, err := m.Origin.MarshalTo(dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n19
+	i += n20
 	if m.StackDepth != 0 {
 		dAtA[i] = 0x18
 		i++
@@ -923,11 +964,39 @@ func (m *CallEvent) MarshalTo(dAtA []byte) (int, error) {
 	dAtA[i] = 0x22
 	i++
 	i = encodeVarintExec(dAtA, i, uint64(m.Return.Size()))
-	n20, err := m.Return.MarshalTo(dAtA[i:])
+	n21, err := m.Return.MarshalTo(dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n20
+	i += n21
+	return i, nil
+}
+
+func (m *GovernAccountEvent) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GovernAccountEvent) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.AccountUpdate != nil {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintExec(dAtA, i, uint64(m.AccountUpdate.Size()))
+		n22, err := m.AccountUpdate.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n22
+	}
 	return i, nil
 }
 
@@ -949,11 +1018,11 @@ func (m *InputEvent) MarshalTo(dAtA []byte) (int, error) {
 	dAtA[i] = 0xa
 	i++
 	i = encodeVarintExec(dAtA, i, uint64(m.Address.Size()))
-	n21, err := m.Address.MarshalTo(dAtA[i:])
+	n23, err := m.Address.MarshalTo(dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n21
+	i += n23
 	return i, nil
 }
 
@@ -975,11 +1044,11 @@ func (m *OutputEvent) MarshalTo(dAtA []byte) (int, error) {
 	dAtA[i] = 0xa
 	i++
 	i = encodeVarintExec(dAtA, i, uint64(m.Address.Size()))
-	n22, err := m.Address.MarshalTo(dAtA[i:])
+	n24, err := m.Address.MarshalTo(dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n22
+	i += n24
 	return i, nil
 }
 
@@ -1001,27 +1070,27 @@ func (m *CallData) MarshalTo(dAtA []byte) (int, error) {
 	dAtA[i] = 0xa
 	i++
 	i = encodeVarintExec(dAtA, i, uint64(m.Caller.Size()))
-	n23, err := m.Caller.MarshalTo(dAtA[i:])
+	n25, err := m.Caller.MarshalTo(dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n23
+	i += n25
 	dAtA[i] = 0x12
 	i++
 	i = encodeVarintExec(dAtA, i, uint64(m.Callee.Size()))
-	n24, err := m.Callee.MarshalTo(dAtA[i:])
+	n26, err := m.Callee.MarshalTo(dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n24
+	i += n26
 	dAtA[i] = 0x1a
 	i++
 	i = encodeVarintExec(dAtA, i, uint64(m.Data.Size()))
-	n25, err := m.Data.MarshalTo(dAtA[i:])
+	n27, err := m.Data.MarshalTo(dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n25
+	i += n27
 	if m.Value != 0 {
 		dAtA[i] = 0x20
 		i++
@@ -1166,6 +1235,10 @@ func (m *Event) Size() (n int) {
 		l = m.Log.Size()
 		n += 1 + l + sovExec(uint64(l))
 	}
+	if m.GovernAccount != nil {
+		l = m.GovernAccount.Size()
+		n += 1 + l + sovExec(uint64(l))
+	}
 	return n
 }
 
@@ -1223,6 +1296,16 @@ func (m *CallEvent) Size() (n int) {
 	return n
 }
 
+func (m *GovernAccountEvent) Size() (n int) {
+	var l int
+	_ = l
+	if m.AccountUpdate != nil {
+		l = m.AccountUpdate.Size()
+		n += 1 + l + sovExec(uint64(l))
+	}
+	return n
+}
+
 func (m *InputEvent) Size() (n int) {
 	var l int
 	_ = l
@@ -2213,6 +2296,39 @@ func (m *Event) Unmarshal(dAtA []byte) error {
 				return err
 			}
 			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field GovernAccount", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowExec
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthExec
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.GovernAccount == nil {
+				m.GovernAccount = &GovernAccountEvent{}
+			}
+			if err := m.GovernAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipExec(dAtA[iNdEx:])
@@ -2704,6 +2820,89 @@ func (m *CallEvent) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
+func (m *GovernAccountEvent) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowExec
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GovernAccountEvent: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GovernAccountEvent: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AccountUpdate", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowExec
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthExec
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AccountUpdate == nil {
+				m.AccountUpdate = &spec.TemplateAccount{}
+			}
+			if err := m.AccountUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipExec(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthExec
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
 func (m *InputEvent) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
@@ -3151,66 +3350,67 @@ func init() { proto.RegisterFile("exec.proto", fileDescriptorExec) }
 func init() { golang_proto.RegisterFile("exec.proto", fileDescriptorExec) }
 
 var fileDescriptorExec = []byte{
-	// 963 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x6f, 0x1b, 0x45,
-	0x14, 0xef, 0xf8, 0x63, 0x6d, 0x3f, 0x3b, 0x25, 0x8c, 0x22, 0xb4, 0xea, 0xc1, 0x6b, 0xa5, 0xa8,
-	0x0a, 0xa1, 0x5d, 0x23, 0x97, 0x20, 0xc1, 0xad, 0x26, 0x56, 0x93, 0x2a, 0x24, 0x68, 0x6a, 0x40,
-	0x70, 0x5b, 0xef, 0x0e, 0x9b, 0x25, 0xf6, 0xee, 0x6a, 0x76, 0xb6, 0xac, 0x8f, 0x70, 0xe2, 0xc8,
-	0x81, 0x03, 0xc7, 0xe6, 0x02, 0xff, 0x06, 0xc7, 0x1c, 0xb9, 0x52, 0x24, 0x0b, 0x25, 0xff, 0x45,
-	0x4f, 0x68, 0x3e, 0x76, 0xbd, 0x8e, 0xaa, 0xb6, 0x6a, 0xd2, 0x4b, 0x34, 0xef, 0xbd, 0xdf, 0xfe,
-	0xe6, 0x7d, 0xfc, 0xe6, 0xc5, 0x00, 0x34, 0xa3, 0xae, 0x1d, 0xb3, 0x88, 0x47, 0xb8, 0x26, 0xce,
-	0xb7, 0xee, 0xf9, 0x01, 0x3f, 0x4e, 0x27, 0xb6, 0x1b, 0xcd, 0xfa, 0x7e, 0xe4, 0x47, 0x7d, 0x19,
-	0x9c, 0xa4, 0xdf, 0x4b, 0x4b, 0x1a, 0xf2, 0xa4, 0x3e, 0xba, 0x75, 0xb7, 0x04, 0xe7, 0x34, 0xf4,
-	0x28, 0x9b, 0x05, 0x21, 0xef, 0x3b, 0x13, 0x37, 0xe8, 0xf3, 0x79, 0x4c, 0x13, 0xf5, 0x57, 0xa3,
-	0x3b, 0x94, 0xb1, 0x88, 0xe5, 0x56, 0x3b, 0x74, 0x66, 0x45, 0xa8, 0xc5, 0xb3, 0xfc, 0xb8, 0x1e,
-	0x0b, 0x96, 0x24, 0x09, 0xa2, 0x50, 0x79, 0x36, 0x7f, 0x43, 0x70, 0x73, 0x38, 0x8d, 0xdc, 0x93,
-	0x51, 0x46, 0xdd, 0x94, 0x07, 0x51, 0x88, 0xdf, 0x03, 0x63, 0x8f, 0x06, 0xfe, 0x31, 0x37, 0x51,
-	0x0f, 0x6d, 0xd5, 0x88, 0xb6, 0xf0, 0x7d, 0x68, 0x4b, 0xe4, 0x1e, 0x75, 0x3c, 0xca, 0xcc, 0x4a,
-	0x0f, 0x6d, 0xb5, 0x07, 0xef, 0xda, 0xb2, 0xce, 0x52, 0x80, 0x94, 0x51, 0x78, 0x07, 0x3a, 0xe3,
-	0xac, 0xe0, 0x4e, 0xcc, 0x6a, 0xaf, 0xba, 0xfc, 0xaa, 0x14, 0x21, 0x2b, 0xb0, 0xcd, 0x4f, 0x57,
-	0xee, 0xc2, 0x18, 0x6a, 0x8f, 0x1e, 0x1f, 0x1d, 0xca, 0x84, 0x5a, 0x44, 0x9e, 0x45, 0x9a, 0x87,
-	0xe9, 0x6c, 0x9c, 0x25, 0x32, 0x93, 0x3a, 0xd1, 0xd6, 0xe6, 0x45, 0x15, 0xda, 0x25, 0x2e, 0xfc,
-	0x08, 0x8c, 0x71, 0x36, 0x9e, 0xc7, 0x54, 0xe2, 0xd6, 0x86, 0x83, 0xe7, 0x0b, 0xcb, 0x2e, 0xf5,
-	0xf6, 0x78, 0x1e, 0x53, 0x36, 0xa5, 0x9e, 0x4f, 0x59, 0x7f, 0x92, 0x32, 0x16, 0xfd, 0xd8, 0xe7,
-	0x59, 0xd2, 0x8f, 0x9d, 0xf9, 0x34, 0x72, 0x3c, 0x5b, 0x7c, 0x49, 0x34, 0x03, 0xfe, 0x42, 0x70,
-	0xed, 0x39, 0xc9, 0xb1, 0x59, 0xed, 0xa1, 0xad, 0xce, 0x70, 0xe7, 0x6c, 0x61, 0xdd, 0x78, 0xb6,
-	0xb0, 0xee, 0xbd, 0x9c, 0x6f, 0x12, 0x84, 0x0e, 0x9b, 0xdb, 0x7b, 0x34, 0x1b, 0xce, 0x39, 0x4d,
-	0x88, 0x26, 0x29, 0x75, 0xba, 0xb6, 0xd2, 0xe9, 0x0d, 0xa8, 0xef, 0x87, 0x1e, 0xcd, 0xcc, 0xba,
-	0x74, 0x2b, 0x03, 0x7f, 0x0b, 0xcd, 0x51, 0xf8, 0x84, 0x4e, 0xa3, 0x98, 0x9a, 0x86, 0x6c, 0xfe,
-	0x9a, 0x2d, 0x46, 0x9b, 0x3b, 0x87, 0xf6, 0xb3, 0x85, 0xb5, 0xfd, 0xca, 0xca, 0x0a, 0x3c, 0x29,
-	0xe8, 0xf0, 0x6d, 0x30, 0x46, 0x4f, 0x68, 0xc8, 0x13, 0xb3, 0x21, 0xe7, 0xd3, 0x56, 0xf3, 0x91,
-	0x3e, 0xa2, 0x43, 0xf8, 0x7d, 0x30, 0x08, 0x4d, 0xd2, 0x29, 0x37, 0x9b, 0xf2, 0xf6, 0x8e, 0x02,
-	0x29, 0x1f, 0xd1, 0x31, 0x7c, 0x07, 0x1a, 0x84, 0xba, 0x34, 0x88, 0xb9, 0xd9, 0xd2, 0x30, 0x71,
-	0xa9, 0xf6, 0x91, 0x3c, 0x88, 0xfb, 0xd0, 0x1a, 0x65, 0x2e, 0x8d, 0xc5, 0x8c, 0x4c, 0xc8, 0xb5,
-	0xa4, 0x44, 0x5c, 0x04, 0xc8, 0x12, 0xf3, 0x59, 0xe7, 0x97, 0x53, 0x0b, 0xfd, 0x7a, 0x6a, 0xa1,
-	0xa7, 0xa7, 0x16, 0xda, 0xfc, 0xb7, 0x22, 0x7a, 0x27, 0xc5, 0xb1, 0x1c, 0x30, 0xba, 0xc6, 0x01,
-	0x57, 0xae, 0x63, 0xc0, 0x1f, 0x42, 0x4b, 0x36, 0x4f, 0x66, 0x57, 0x95, 0xd9, 0xad, 0x3d, 0x5f,
-	0x58, 0x4b, 0x27, 0x59, 0x1e, 0xb1, 0x09, 0x0d, 0x69, 0xec, 0xef, 0x4a, 0x39, 0xb4, 0x48, 0x6e,
-	0x96, 0x74, 0x52, 0x7f, 0xb1, 0x4e, 0x8c, 0xb2, 0x4e, 0x56, 0x3a, 0xdb, 0x78, 0x8d, 0xce, 0xae,
-	0xff, 0xfe, 0xd4, 0xba, 0xb1, 0xd2, 0xdd, 0x7f, 0x10, 0xd4, 0xe5, 0xe5, 0x62, 0xe8, 0xfa, 0xbd,
-	0xa3, 0xf2, 0xd0, 0xf5, 0x53, 0xcf, 0x47, 0x70, 0x47, 0x24, 0x12, 0xa7, 0x5c, 0x2f, 0x85, 0x75,
-	0x05, 0x92, 0x2e, 0xa5, 0x21, 0x15, 0xc6, 0x1f, 0x80, 0x71, 0x94, 0x72, 0x01, 0xac, 0x96, 0xb7,
-	0x87, 0xf2, 0x69, 0xb5, 0x29, 0x03, 0xdf, 0x86, 0xda, 0xe7, 0xce, 0x74, 0x2a, 0x5b, 0xd1, 0x1e,
-	0xbc, 0xa3, 0x80, 0xc2, 0xa3, 0x60, 0x32, 0x88, 0x7b, 0x50, 0x3d, 0x88, 0x7c, 0xd9, 0x95, 0xf6,
-	0xe0, 0xa6, 0xc2, 0x1c, 0x44, 0xbe, 0x82, 0x88, 0xd0, 0x0b, 0x6a, 0xfb, 0x03, 0xe5, 0x3a, 0x16,
-	0x7d, 0x25, 0x94, 0xa7, 0x2c, 0x94, 0xc5, 0x75, 0x88, 0xb6, 0xc4, 0x24, 0x1e, 0x3a, 0xc9, 0x57,
-	0x09, 0xf5, 0x64, 0x41, 0x35, 0x92, 0x9b, 0x78, 0x1b, 0x5a, 0x87, 0xce, 0x8c, 0x8e, 0x42, 0xce,
-	0xe6, 0xba, 0x86, 0x8e, 0xad, 0x96, 0xad, 0xf4, 0x91, 0x65, 0x18, 0x7f, 0x04, 0xcd, 0x2f, 0x29,
-	0x9b, 0x3d, 0x60, 0x7e, 0xa2, 0xab, 0xd8, 0xb0, 0x4b, 0xfb, 0x37, 0x8f, 0x91, 0x02, 0x75, 0x49,
-	0xe2, 0x3f, 0x55, 0xa0, 0x99, 0x17, 0x83, 0x0f, 0xa1, 0xf1, 0xc0, 0xf3, 0x18, 0x4d, 0x12, 0x95,
-	0xeb, 0xf0, 0x63, 0xad, 0xcc, 0xbb, 0x2f, 0x57, 0xa6, 0xcb, 0xe6, 0x31, 0x8f, 0x6c, 0xfd, 0x2d,
-	0xc9, 0x49, 0xf0, 0x3e, 0xd4, 0x76, 0x1d, 0xee, 0x5c, 0x4d, 0xe6, 0x92, 0x02, 0x1f, 0x80, 0x31,
-	0x8e, 0xe2, 0xc0, 0x55, 0xcb, 0xfd, 0xb5, 0x33, 0xd3, 0x64, 0xdf, 0x44, 0xcc, 0x1b, 0xec, 0x7c,
-	0x42, 0x34, 0xc7, 0xa5, 0x1e, 0xfc, 0x5c, 0x81, 0x56, 0x31, 0x74, 0xbc, 0x0d, 0x4d, 0x61, 0xc8,
-	0xc4, 0x51, 0x79, 0xe6, 0xb9, 0x97, 0x14, 0x71, 0x91, 0xd5, 0x11, 0x0b, 0xfc, 0x20, 0xd4, 0x25,
-	0xbe, 0x59, 0xbf, 0x34, 0x07, 0xee, 0x02, 0x3c, 0xe6, 0x8e, 0x7b, 0xb2, 0x4b, 0x63, 0xae, 0x96,
-	0x7f, 0x8d, 0x94, 0x3c, 0x62, 0x6f, 0x68, 0x25, 0xd5, 0xae, 0xb4, 0x37, 0x14, 0xc9, 0xa5, 0x26,
-	0xfc, 0x00, 0xb0, 0x7c, 0x4a, 0xd7, 0xad, 0x84, 0x4b, 0x77, 0x9d, 0x40, 0xbb, 0xf4, 0x1a, 0xdf,
-	0xf2, 0x65, 0x7f, 0x56, 0x60, 0x65, 0x60, 0xe2, 0xac, 0x37, 0xcd, 0x1b, 0x0f, 0x4c, 0x71, 0x14,
-	0x6c, 0xf4, 0x6a, 0xe3, 0x57, 0x1c, 0xc5, 0x6b, 0xa9, 0x5e, 0xfd, 0xb5, 0x6c, 0x40, 0xfd, 0x6b,
-	0x67, 0x9a, 0x52, 0xfd, 0x2f, 0x5f, 0x19, 0x78, 0x1d, 0xaa, 0x0f, 0x9d, 0x44, 0xaf, 0x77, 0x71,
-	0x5c, 0xed, 0xd4, 0x70, 0x78, 0x76, 0xde, 0x45, 0x7f, 0x9f, 0x77, 0xd1, 0x7f, 0xe7, 0x5d, 0xf4,
-	0xd7, 0x45, 0x17, 0x9d, 0x5d, 0x74, 0xd1, 0x77, 0xaf, 0x28, 0x86, 0xe6, 0xbf, 0x81, 0xe4, 0x69,
-	0x62, 0xc8, 0x5f, 0x7c, 0xf7, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x4c, 0xf0, 0xd7, 0x87, 0x9a,
+	// 979 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4f, 0x6f, 0xe3, 0x44,
+	0x14, 0x5f, 0x37, 0xce, 0xbf, 0x97, 0x64, 0x29, 0xa3, 0x82, 0xac, 0x3d, 0x24, 0x91, 0x17, 0xad,
+	0x4a, 0x61, 0x1d, 0x94, 0xa5, 0x48, 0x80, 0x84, 0xd4, 0xd0, 0xa8, 0xed, 0xaa, 0xb4, 0x30, 0x9b,
+	0x05, 0x81, 0xe0, 0xe0, 0x38, 0x83, 0x6b, 0xad, 0xe3, 0xb1, 0xc6, 0xe3, 0xe2, 0x7c, 0x08, 0x6e,
+	0x1c, 0x96, 0x1b, 0x1f, 0x85, 0x63, 0x6f, 0x70, 0xe1, 0xb2, 0x87, 0x08, 0x75, 0x3f, 0x02, 0x9c,
+	0xf6, 0x84, 0xe6, 0x8f, 0x1d, 0x47, 0xa0, 0x2d, 0xda, 0xf6, 0x36, 0xef, 0xbd, 0xdf, 0xfc, 0xe6,
+	0xcd, 0x7b, 0xbf, 0x37, 0x36, 0x00, 0xc9, 0x88, 0xe7, 0xc4, 0x8c, 0x72, 0x8a, 0x4c, 0xb1, 0xbe,
+	0x73, 0xdf, 0x0f, 0xf8, 0x59, 0x3a, 0x75, 0x3c, 0x3a, 0x1f, 0xf8, 0xd4, 0xa7, 0x03, 0x19, 0x9c,
+	0xa6, 0xdf, 0x4b, 0x4b, 0x1a, 0x72, 0xa5, 0x36, 0xdd, 0x69, 0x13, 0xc6, 0x28, 0x4b, 0xb4, 0xd5,
+	0x8a, 0xdc, 0x39, 0xc9, 0x8d, 0x26, 0xcf, 0xf2, 0xe5, 0x66, 0x4c, 0xd8, 0x3c, 0x48, 0x92, 0x80,
+	0x46, 0xda, 0x03, 0x49, 0x9c, 0x1f, 0x6c, 0xff, 0x64, 0xc0, 0xed, 0x51, 0x48, 0xbd, 0x27, 0xe3,
+	0x8c, 0x78, 0x29, 0x0f, 0x68, 0x84, 0xde, 0x84, 0xda, 0x21, 0x09, 0xfc, 0x33, 0x6e, 0x19, 0x7d,
+	0x63, 0xdb, 0xc4, 0xda, 0x42, 0x0f, 0xa0, 0x25, 0x91, 0x87, 0xc4, 0x9d, 0x11, 0x66, 0x6d, 0xf4,
+	0x8d, 0xed, 0xd6, 0xf0, 0x75, 0x47, 0xde, 0xa2, 0x14, 0xc0, 0x65, 0x14, 0xda, 0x85, 0xf6, 0x24,
+	0x2b, 0xb8, 0x13, 0xab, 0xd2, 0xaf, 0xac, 0x76, 0x95, 0x22, 0x78, 0x0d, 0x66, 0x7f, 0xb8, 0x76,
+	0x16, 0x42, 0x60, 0x3e, 0x7c, 0x74, 0x7a, 0x22, 0x13, 0x6a, 0x62, 0xb9, 0x16, 0x69, 0x9e, 0xa4,
+	0xf3, 0x49, 0x96, 0xc8, 0x4c, 0xaa, 0x58, 0x5b, 0xf6, 0x1f, 0x15, 0x68, 0x95, 0xb8, 0xd0, 0x43,
+	0xa8, 0x4d, 0xb2, 0xc9, 0x22, 0x26, 0x12, 0xd7, 0x19, 0x0d, 0x5f, 0x2c, 0x7b, 0x4e, 0xa9, 0xd0,
+	0x67, 0x8b, 0x98, 0xb0, 0x90, 0xcc, 0x7c, 0xc2, 0x06, 0xd3, 0x94, 0x31, 0xfa, 0xc3, 0x80, 0x67,
+	0xc9, 0x20, 0x76, 0x17, 0x21, 0x75, 0x67, 0x8e, 0xd8, 0x89, 0x35, 0x03, 0xfa, 0x4c, 0x70, 0x1d,
+	0xba, 0xc9, 0x99, 0x55, 0xe9, 0x1b, 0xdb, 0xed, 0xd1, 0xee, 0xc5, 0xb2, 0x77, 0xeb, 0xd9, 0xb2,
+	0x77, 0xff, 0xe5, 0x7c, 0xd3, 0x20, 0x72, 0xd9, 0xc2, 0x39, 0x24, 0xd9, 0x68, 0xc1, 0x49, 0x82,
+	0x35, 0x49, 0xa9, 0xd2, 0xe6, 0x5a, 0xa5, 0xb7, 0xa0, 0x7a, 0x14, 0xcd, 0x48, 0x66, 0x55, 0xa5,
+	0x5b, 0x19, 0xe8, 0x6b, 0x68, 0x8c, 0xa3, 0x73, 0x12, 0xd2, 0x98, 0x58, 0x35, 0x59, 0xfc, 0x8e,
+	0x23, 0xda, 0x9c, 0x3b, 0x47, 0xce, 0xb3, 0x65, 0x6f, 0xe7, 0xca, 0x9b, 0x15, 0x78, 0x5c, 0xd0,
+	0xa1, 0xbb, 0x50, 0x1b, 0x9f, 0x93, 0x88, 0x27, 0x56, 0x5d, 0xf6, 0xa7, 0xa5, 0xfa, 0x23, 0x7d,
+	0x58, 0x87, 0xd0, 0x5b, 0x50, 0xc3, 0x24, 0x49, 0x43, 0x6e, 0x35, 0xe4, 0xe9, 0x6d, 0x05, 0x52,
+	0x3e, 0xac, 0x63, 0xe8, 0x1e, 0xd4, 0x31, 0xf1, 0x48, 0x10, 0x73, 0xab, 0xa9, 0x61, 0xe2, 0x50,
+	0xed, 0xc3, 0x79, 0x10, 0x0d, 0xa0, 0x39, 0xce, 0x3c, 0x12, 0x8b, 0x1e, 0x59, 0x90, 0x6b, 0x49,
+	0x09, 0xba, 0x08, 0xe0, 0x15, 0xc6, 0xfe, 0x6d, 0x43, 0x54, 0x4b, 0xca, 0x61, 0xd5, 0x52, 0xe3,
+	0x06, 0x5b, 0xba, 0x71, 0x13, 0x2d, 0x7d, 0x07, 0x9a, 0xb2, 0x5c, 0x32, 0xbb, 0x8a, 0xcc, 0xae,
+	0xf3, 0x62, 0xd9, 0x5b, 0x39, 0xf1, 0x6a, 0x89, 0x2c, 0xa8, 0x4b, 0xe3, 0x68, 0x5f, 0x0a, 0xa0,
+	0x89, 0x73, 0xb3, 0xa4, 0x8c, 0xea, 0x7f, 0x2b, 0xa3, 0x56, 0x56, 0xc6, 0x5a, 0x2d, 0xeb, 0x57,
+	0xd7, 0xf2, 0x23, 0xf3, 0xe9, 0x2f, 0xbd, 0x5b, 0xf6, 0x8f, 0x1b, 0x50, 0x95, 0x07, 0x8a, 0xd6,
+	0xea, 0xa9, 0x36, 0xca, 0xad, 0xd5, 0x03, 0x9d, 0x97, 0xfd, 0x9e, 0x38, 0x3c, 0x4e, 0xb9, 0x1e,
+	0xfd, 0x4d, 0x05, 0x92, 0x2e, 0xa5, 0x14, 0x15, 0x46, 0x6f, 0x43, 0xed, 0x34, 0xe5, 0x02, 0x58,
+	0x29, 0xbf, 0x11, 0xca, 0xa7, 0x35, 0xa5, 0x0c, 0x74, 0x17, 0xcc, 0x4f, 0xdd, 0x30, 0x94, 0xd7,
+	0x6f, 0x0d, 0x5f, 0x53, 0x40, 0xe1, 0x51, 0x30, 0x19, 0x44, 0x7d, 0xa8, 0x1c, 0x53, 0x5f, 0x56,
+	0xa2, 0x35, 0xbc, 0xad, 0x30, 0xc7, 0xd4, 0x57, 0x10, 0x11, 0x42, 0x9f, 0x40, 0xe7, 0x80, 0x9e,
+	0x13, 0x16, 0xed, 0x79, 0x1e, 0x4d, 0x23, 0xae, 0xe7, 0xc3, 0x52, 0xd8, 0xb5, 0x90, 0xda, 0xb5,
+	0x0e, 0xd7, 0xf5, 0x78, 0x6a, 0xe4, 0x0a, 0x17, 0xf5, 0xc7, 0x84, 0xa7, 0x2c, 0x92, 0x05, 0x69,
+	0x63, 0x6d, 0x89, 0x8e, 0x1d, 0xb8, 0xc9, 0xe3, 0x84, 0xcc, 0x64, 0x11, 0x4c, 0x9c, 0x9b, 0x68,
+	0x07, 0x9a, 0x27, 0xee, 0x9c, 0x8c, 0x23, 0xce, 0x16, 0xfa, 0xde, 0x6d, 0x47, 0x3d, 0xc9, 0xd2,
+	0x87, 0x57, 0x61, 0xf4, 0x1e, 0x34, 0x3e, 0x27, 0x6c, 0xbe, 0xc7, 0xfc, 0x44, 0xdf, 0x7c, 0xcb,
+	0x29, 0xbd, 0xd2, 0x79, 0x0c, 0x17, 0x28, 0xfb, 0x2f, 0x03, 0x1a, 0xf9, 0x95, 0xd1, 0x09, 0xd4,
+	0xf7, 0x66, 0x33, 0x46, 0x92, 0x44, 0x65, 0x37, 0x7a, 0x5f, 0x6b, 0xf6, 0xdd, 0x97, 0x6b, 0xd6,
+	0x63, 0x8b, 0x98, 0x53, 0x47, 0xef, 0xc5, 0x39, 0x09, 0x3a, 0x02, 0x73, 0xdf, 0xe5, 0xee, 0xf5,
+	0x06, 0x40, 0x52, 0xa0, 0x63, 0xa8, 0x4d, 0x68, 0x1c, 0x78, 0xea, 0xa1, 0xff, 0xdf, 0x99, 0x69,
+	0xb2, 0xaf, 0x28, 0x9b, 0x0d, 0x77, 0x3f, 0xc0, 0x9a, 0xc3, 0xfe, 0xdb, 0x80, 0x66, 0x21, 0x06,
+	0xb4, 0x03, 0x0d, 0x61, 0xc8, 0x54, 0x8d, 0xb2, 0x16, 0x72, 0x2f, 0x2e, 0xe2, 0x22, 0x8f, 0x53,
+	0x16, 0xf8, 0x41, 0xa4, 0x2f, 0xf5, 0x6a, 0x15, 0xd2, 0x1c, 0xa8, 0x0b, 0xf0, 0x88, 0xbb, 0xde,
+	0x93, 0x7d, 0x12, 0x73, 0xf5, 0xf4, 0x9b, 0xb8, 0xe4, 0x11, 0x6f, 0x88, 0x56, 0x8b, 0x79, 0xad,
+	0x37, 0x44, 0x91, 0xd8, 0x5f, 0x00, 0xfa, 0xb7, 0x64, 0xd1, 0xc7, 0xd0, 0xd1, 0xf6, 0xe3, 0x78,
+	0xe6, 0x72, 0xa2, 0x6b, 0xf0, 0x86, 0x23, 0xbf, 0xe6, 0x13, 0x32, 0x8f, 0x43, 0x97, 0x13, 0x0d,
+	0xc1, 0xeb, 0x58, 0xfb, 0x5b, 0x80, 0xd5, 0x9c, 0xde, 0xb4, 0x80, 0xec, 0xef, 0xa0, 0x55, 0x1a,
+	0xee, 0x1b, 0xa7, 0xff, 0x79, 0x03, 0xd6, 0x3a, 0x2b, 0xd6, 0xfa, 0xa9, 0x7a, 0xe5, 0xce, 0x2a,
+	0x8e, 0x82, 0x8d, 0x5c, 0x4f, 0x27, 0x8a, 0xa3, 0x18, 0xa4, 0xca, 0xf5, 0x07, 0x69, 0x0b, 0xaa,
+	0x5f, 0xba, 0x61, 0x4a, 0xf4, 0x9f, 0x81, 0x32, 0xd0, 0x26, 0x54, 0x0e, 0xdc, 0x44, 0x7f, 0x13,
+	0xc4, 0x72, 0x34, 0xba, 0xb8, 0xec, 0x1a, 0xbf, 0x5f, 0x76, 0x8d, 0x3f, 0x2f, 0xbb, 0xc6, 0xaf,
+	0xcf, 0xbb, 0xc6, 0xc5, 0xf3, 0xae, 0xf1, 0xcd, 0x15, 0xe9, 0x93, 0xfc, 0xe7, 0x48, 0xae, 0xa6,
+	0x35, 0xf9, 0x2b, 0xf8, 0xe0, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xaa, 0x48, 0xa0, 0x91,
 	0x0a, 0x00, 0x00,
 }
diff --git a/execution/exec/govern_account_event.go b/execution/exec/govern_account_event.go
new file mode 100644
index 0000000000000000000000000000000000000000..81c33c44ff9ef10dc796234000963818a8686df3
--- /dev/null
+++ b/execution/exec/govern_account_event.go
@@ -0,0 +1 @@
+package exec
diff --git a/execution/exec/tx_execution.go b/execution/exec/tx_execution.go
index 493af29207bd8bf63f40c1091f6cae7c41cd91c9..9de377ffea91255ce67c5ecab96228ca8547b4c4 100644
--- a/execution/exec/tx_execution.go
+++ b/execution/exec/tx_execution.go
@@ -15,9 +15,10 @@ import (
 func EventStringAccountInput(addr crypto.Address) string  { return fmt.Sprintf("Acc/%s/Input", addr) }
 func EventStringAccountOutput(addr crypto.Address) string { return fmt.Sprintf("Acc/%s/Output", addr) }
 
-func EventStringAccountCall(addr crypto.Address) string { return fmt.Sprintf("Acc/%s/Call", addr) }
-func EventStringLogEvent(addr crypto.Address) string    { return fmt.Sprintf("Log/%s", addr) }
-func EventStringTxExecution(txHash []byte) string       { return fmt.Sprintf("Execution/Tx/%X", txHash) }
+func EventStringAccountCall(addr crypto.Address) string    { return fmt.Sprintf("Acc/%s/Call", addr) }
+func EventStringLogEvent(addr crypto.Address) string       { return fmt.Sprintf("Log/%s", addr) }
+func EventStringTxExecution(txHash []byte) string          { return fmt.Sprintf("Execution/Tx/%X", txHash) }
+func EventStringGovernAccount(addr *crypto.Address) string { return fmt.Sprintf("Govern/Acc/%v", addr) }
 
 func NewTxExecution(txEnv *txs.Envelope) *TxExecution {
 	return &TxExecution{
@@ -89,6 +90,13 @@ func (txe *TxExecution) Call(call *CallEvent, exception *errors.Exception) {
 	})
 }
 
+func (txe *TxExecution) GovernAccount(governAccount *GovernAccountEvent, exception *errors.Exception) {
+	txe.Append(&Event{
+		Header:        txe.Header(TypeCall, EventStringGovernAccount(governAccount.AccountUpdate.Address), exception),
+		GovernAccount: governAccount,
+	})
+}
+
 // Set result
 func (txe *TxExecution) Return(returnValue []byte, gasUsed uint64) {
 	if txe.Result == nil {
diff --git a/execution/execution.go b/execution/execution.go
index 23412014eddc452a9260e697a073403f8e87c123..2fe05f133416bb1f24c00f0619d127438478a920 100644
--- a/execution/execution.go
+++ b/execution/execution.go
@@ -15,27 +15,27 @@
 package execution
 
 import (
+	"context"
 	"fmt"
 	"runtime/debug"
 	"sync"
-
-	"context"
+	"time"
 
 	"github.com/hyperledger/burrow/acm"
 	"github.com/hyperledger/burrow/acm/state"
+	"github.com/hyperledger/burrow/bcm"
 	"github.com/hyperledger/burrow/binary"
-	bcm "github.com/hyperledger/burrow/blockchain"
 	"github.com/hyperledger/burrow/crypto"
 	"github.com/hyperledger/burrow/event"
+	"github.com/hyperledger/burrow/execution/contexts"
 	"github.com/hyperledger/burrow/execution/evm"
 	"github.com/hyperledger/burrow/execution/exec"
-	"github.com/hyperledger/burrow/execution/executors"
 	"github.com/hyperledger/burrow/execution/names"
 	"github.com/hyperledger/burrow/logging"
 	"github.com/hyperledger/burrow/logging/structure"
 	"github.com/hyperledger/burrow/txs"
 	"github.com/hyperledger/burrow/txs/payload"
-	abciTypes "github.com/tendermint/abci/types"
+	abciTypes "github.com/tendermint/tendermint/abci/types"
 )
 
 type Executor interface {
@@ -67,13 +67,13 @@ type BatchCommitter interface {
 	BatchExecutor
 	// Commit execution results to underlying State and provide opportunity
 	// to mutate state before it is saved
-	Commit(*abciTypes.Header) (stateHash []byte, err error)
+	Commit(blockHash []byte, blockTime time.Time, header *abciTypes.Header) (stateHash []byte, err error)
 }
 
 type executor struct {
 	sync.RWMutex
 	runCall        bool
-	tip            bcm.TipInfo
+	blockchain     *bcm.Blockchain
 	state          ExecutorState
 	stateCache     *state.Cache
 	nameRegCache   *names.Cache
@@ -81,65 +81,80 @@ type executor struct {
 	blockExecution *exec.BlockExecution
 	logger         *logging.Logger
 	vmOptions      []func(*evm.VM)
-	txExecutors    map[payload.Type]Context
+	contexts       map[payload.Type]Context
 }
 
 var _ BatchExecutor = (*executor)(nil)
 
 // Wraps a cache of what is variously known as the 'check cache' and 'mempool'
-func NewBatchChecker(backend ExecutorState, tip bcm.TipInfo, logger *logging.Logger,
+func NewBatchChecker(backend ExecutorState, blockchain *bcm.Blockchain, logger *logging.Logger,
 	options ...ExecutionOption) BatchExecutor {
 
-	return newExecutor("CheckCache", false, backend, tip, event.NewNoOpPublisher(),
+	exe := newExecutor("CheckCache", false, backend, blockchain, event.NewNoOpPublisher(),
 		logger.WithScope("NewBatchExecutor"), options...)
+
+	return exe.AddContext(payload.TypeGovernance,
+		&contexts.GovernanceContext{
+			ValidatorSet: exe.blockchain.ValidatorChecker(),
+			StateWriter:  exe.stateCache,
+			Logger:       exe.logger,
+		},
+	)
 }
 
-func NewBatchCommitter(backend ExecutorState, tip bcm.TipInfo, emitter event.Publisher, logger *logging.Logger,
+func NewBatchCommitter(backend ExecutorState, blockchain *bcm.Blockchain, emitter event.Publisher, logger *logging.Logger,
 	options ...ExecutionOption) BatchCommitter {
 
-	return newExecutor("CommitCache", true, backend, tip, emitter,
+	exe := newExecutor("CommitCache", true, backend, blockchain, emitter,
 		logger.WithScope("NewBatchCommitter"), options...)
+
+	return exe.AddContext(payload.TypeGovernance,
+		&contexts.GovernanceContext{
+			ValidatorSet: exe.blockchain.ValidatorWriter(),
+			StateWriter:  exe.stateCache,
+			Logger:       exe.logger,
+		},
+	)
 }
 
-func newExecutor(name string, runCall bool, backend ExecutorState, tip bcm.TipInfo, publisher event.Publisher,
+func newExecutor(name string, runCall bool, backend ExecutorState, blockchain *bcm.Blockchain, publisher event.Publisher,
 	logger *logging.Logger, options ...ExecutionOption) *executor {
-
 	exe := &executor{
 		runCall:      runCall,
 		state:        backend,
-		tip:          tip,
+		blockchain:   blockchain,
 		stateCache:   state.NewCache(backend, state.Name(name)),
 		nameRegCache: names.NewCache(backend),
 		publisher:    publisher,
 		blockExecution: &exec.BlockExecution{
-			Height: tip.LastBlockHeight() + 1,
+			Height: blockchain.LastBlockHeight() + 1,
 		},
 		logger: logger.With(structure.ComponentKey, "Executor"),
 	}
 	for _, option := range options {
 		option(exe)
 	}
-	exe.txExecutors = map[payload.Type]Context{
-		payload.TypeSend: &executors.SendContext{
-			Tip:         tip,
+	exe.contexts = map[payload.Type]Context{
+		payload.TypeSend: &contexts.SendContext{
+			Tip:         blockchain,
 			StateWriter: exe.stateCache,
 			Logger:      exe.logger,
 		},
-		payload.TypeCall: &executors.CallContext{
-			Tip:         tip,
+		payload.TypeCall: &contexts.CallContext{
+			Tip:         blockchain,
 			StateWriter: exe.stateCache,
 			RunCall:     runCall,
 			VMOptions:   exe.vmOptions,
 			Logger:      exe.logger,
 		},
-		payload.TypeName: &executors.NameContext{
-			Tip:         tip,
+		payload.TypeName: &contexts.NameContext{
+			Tip:         blockchain,
 			StateWriter: exe.stateCache,
 			NameReg:     exe.nameRegCache,
 			Logger:      exe.logger,
 		},
-		payload.TypePermissions: &executors.PermissionsContext{
-			Tip:         tip,
+		payload.TypePermissions: &contexts.PermissionsContext{
+			Tip:         blockchain,
 			StateWriter: exe.stateCache,
 			Logger:      exe.logger,
 		},
@@ -147,6 +162,11 @@ func newExecutor(name string, runCall bool, backend ExecutorState, tip bcm.TipIn
 	return exe
 }
 
+func (exe *executor) AddContext(ty payload.Type, ctx Context) *executor {
+	exe.contexts[ty] = ctx
+	return exe
+}
+
 // If the tx is invalid, an error will be returned.
 // Unlike ExecBlock(), state will not be altered.
 func (exe *executor) Execute(txEnv *txs.Envelope) (txe *exec.TxExecution, err error) {
@@ -169,7 +189,7 @@ func (exe *executor) Execute(txEnv *txs.Envelope) (txe *exec.TxExecution, err er
 		return nil, err
 	}
 
-	if txExecutor, ok := exe.txExecutors[txEnv.Tx.Type()]; ok {
+	if txExecutor, ok := exe.contexts[txEnv.Tx.Type()]; ok {
 		// Establish new TxExecution
 		txe := exe.blockExecution.Tx(txEnv)
 		err = txExecutor.Execute(txe)
@@ -198,7 +218,8 @@ func (exe *executor) finaliseBlockExecution(header *abciTypes.Header) (*exec.Blo
 	return be, nil
 }
 
-func (exe *executor) Commit(header *abciTypes.Header) (_ []byte, err error) {
+func (exe *executor) Commit(blockHash []byte, blockTime time.Time, header *abciTypes.Header) (_ []byte, err error) {
+
 	// The write lock to the executor is controlled by the caller (e.g. abci.App) so we do not acquire it here to avoid
 	// deadlock
 	defer func() {
@@ -212,6 +233,9 @@ func (exe *executor) Commit(header *abciTypes.Header) (_ []byte, err error) {
 	if err != nil {
 		return nil, err
 	}
+
+	// First commit the app state, this app hash will not get checkpointed until the next block when we are sure
+	// that nothing in the downstream commit process could have failed. At worst we go back one block.
 	hash, err := exe.state.Update(func(ws Updatable) error {
 		// flush the caches
 		err := exe.stateCache.Flush(ws, exe.state)
@@ -240,8 +264,18 @@ func (exe *executor) Commit(header *abciTypes.Header) (_ []byte, err error) {
 	}
 	publishErr := exe.publisher.Publish(context.Background(), blockExecution, blockExecution.Tagged())
 	exe.logger.InfoMsg("Error publishing TxExecution",
-		"height", blockExecution.Height,
-		structure.ErrorKey, publishErr)
+		"height", blockExecution.Height, structure.ErrorKey, publishErr)
+	// Commit to our blockchain state which will checkpoint the previous app hash by saving it to the database
+	// (we know the previous app hash is safely committed because we are about to commit the next)
+	totalPowerChange, totalFlow, err := exe.blockchain.CommitBlock(blockTime, blockHash, hash)
+	if err != nil {
+		panic(fmt.Errorf("could not commit block to blockchain state: %v", err))
+	}
+	exe.logger.InfoMsg("Committed block",
+		"total_validator_power", exe.blockchain.CurrentValidators().TotalPower(),
+		"total_validator_power_change", totalPowerChange,
+		"total_validator_flow", totalFlow)
+
 	return hash, nil
 }
 
diff --git a/execution/execution_test.go b/execution/execution_test.go
index 608da343cfbfdb99750fc30f747d7007ff0530a0..58b28ab4bfb66407c612ab89dec0fc56a617e750 100644
--- a/execution/execution_test.go
+++ b/execution/execution_test.go
@@ -17,16 +17,15 @@ package execution
 import (
 	"bytes"
 	"fmt"
+	"runtime/debug"
 	"strconv"
 	"testing"
 	"time"
 
-	"runtime/debug"
-
 	"github.com/hyperledger/burrow/acm"
 	"github.com/hyperledger/burrow/acm/state"
+	"github.com/hyperledger/burrow/bcm"
 	. "github.com/hyperledger/burrow/binary"
-	bcm "github.com/hyperledger/burrow/blockchain"
 	"github.com/hyperledger/burrow/crypto"
 	"github.com/hyperledger/burrow/event"
 	"github.com/hyperledger/burrow/execution/errors"
@@ -42,7 +41,7 @@ import (
 	"github.com/hyperledger/burrow/txs"
 	"github.com/hyperledger/burrow/txs/payload"
 	"github.com/stretchr/testify/require"
-	dbm "github.com/tendermint/tmlibs/db"
+	dbm "github.com/tendermint/tendermint/libs/db"
 	"github.com/tmthrgd/go-hex"
 )
 
@@ -123,15 +122,13 @@ var testChainID = testGenesisDoc.ChainID()
 
 type testExecutor struct {
 	*executor
-	blockchain *bcm.Blockchain
 }
 
 func makeExecutor(state *State) *testExecutor {
 	blockchain := newBlockchain(testGenesisDoc)
 	return &testExecutor{
-		executor: newExecutor("makeExecutorCache", true, state, blockchain.Tip, event.NewNoOpPublisher(),
+		executor: newExecutor("makeExecutorCache", true, state, blockchain, event.NewNoOpPublisher(),
 			logger),
-		blockchain: blockchain,
 	}
 }
 
@@ -145,11 +142,8 @@ func (te *testExecutor) signExecuteCommit(tx payload.Payload, signer acm.Address
 	if err != nil {
 		return err
 	}
-	appHash, err := te.Commit(nil)
-	if err != nil {
-		return err
-	}
-	return te.blockchain.CommitBlock(time.Now(), nil, appHash)
+	_, err = te.Commit(nil, time.Now(), nil)
+	return err
 }
 
 func makeUsers(n int) []acm.AddressableSigner {
@@ -903,10 +897,10 @@ func TestSNativeTx(t *testing.T) {
 }
 
 func TestTxSequence(t *testing.T) {
-	state, privAccounts := makeGenesisState(3, true, 1000, 1, true, 1000)
-	acc0 := getAccount(state, privAccounts[0].Address())
+	st, privAccounts := makeGenesisState(3, true, 1000, 1, true, 1000)
+	acc0 := getAccount(st, privAccounts[0].Address())
 	acc0PubKey := privAccounts[0].PublicKey()
-	acc1 := getAccount(state, privAccounts[1].Address())
+	acc1 := getAccount(st, privAccounts[1].Address())
 
 	// Test a variety of sequence numbers for the tx.
 	// The tx should only pass when i == 1.
@@ -917,7 +911,7 @@ func TestTxSequence(t *testing.T) {
 		tx.AddOutput(acc1.Address(), 1)
 		txEnv := txs.Enclose(testChainID, tx)
 		require.NoError(t, txEnv.Sign(privAccounts[0]))
-		stateCopy, err := state.Copy(dbm.NewMemDB())
+		stateCopy, err := st.Copy(dbm.NewMemDB())
 		require.NoError(t, err)
 		err = execTxWithState(stateCopy, txEnv)
 		if i == 1 {
@@ -1038,7 +1032,7 @@ func TestNameTxs(t *testing.T) {
 
 	// fail to update it as non-owner
 	// Fast forward
-	for exe.blockchain.Tip.LastBlockHeight() < entry.Expires-1 {
+	for exe.blockchain.LastBlockHeight() < entry.Expires-1 {
 		commitNewBlock(st, exe.blockchain)
 	}
 	tx, _ = payload.NewNameTx(st, testPrivAccounts[1].PublicKey(), name, data, amt, fee)
@@ -1091,7 +1085,7 @@ func TestNameTxs(t *testing.T) {
 	require.NoError(t, err)
 	validateEntry(t, entry, name, data, testPrivAccounts[0].Address(), startingBlock+numDesiredBlocks)
 	// Fast forward
-	for exe.blockchain.Tip.LastBlockHeight() < entry.Expires {
+	for exe.blockchain.LastBlockHeight() < entry.Expires {
 		commitNewBlock(st, exe.blockchain)
 	}
 
@@ -1546,7 +1540,7 @@ func TestSelfDestruct(t *testing.T) {
 	tx := payload.NewCallTxWithSequence(acc0PubKey, addressPtr(acc1), nil, sendingAmount, 1000, 0, acc0.Sequence()+1)
 
 	// we use cache instead of execTxWithState so we can run the tx twice
-	exe := NewBatchCommitter(st, newBlockchain(testGenesisDoc).Tip, event.NewNoOpPublisher(), logger)
+	exe := NewBatchCommitter(st, newBlockchain(testGenesisDoc), event.NewNoOpPublisher(), logger)
 	signAndExecute(t, false, exe, testChainID, tx, privAccounts[0])
 
 	// if we do it again, we won't get an error, but the self-destruct
@@ -1555,7 +1549,8 @@ func TestSelfDestruct(t *testing.T) {
 	signAndExecute(t, false, exe, testChainID, tx, privAccounts[0])
 
 	// commit the block
-	exe.Commit(nil)
+	_, err = exe.Commit([]byte("Blocky McHash"), time.Now(), nil)
+	require.NoError(t, err)
 
 	// acc2 should receive the sent funds and the contracts balance
 	newAcc2 := getAccount(st, acc2.Address())
@@ -1585,12 +1580,12 @@ func signAndExecute(t *testing.T, shouldFail bool, exe BatchExecutor, chainID st
 }
 
 func execTxWithStateAndBlockchain(state *State, blockchain *bcm.Blockchain, txEnv *txs.Envelope) error {
-	exe := newExecutor("execTxWithStateAndBlockchainCache", true, state, blockchain.Tip,
+	exe := newExecutor("execTxWithStateAndBlockchainCache", true, state, blockchain,
 		event.NewNoOpPublisher(), logger)
 	if _, err := exe.Execute(txEnv); err != nil {
 		return err
 	} else {
-		_, err := exe.Commit(nil)
+		_, err = exe.Commit([]byte("Blocky McHash"), time.Now(), nil)
 		if err != nil {
 			return err
 		}
@@ -1617,7 +1612,7 @@ func execTxWithStateNewBlock(state *State, blockchain *bcm.Blockchain, txEnv *tx
 }
 
 func makeGenesisState(numAccounts int, randBalance bool, minBalance uint64, numValidators int, randBonded bool,
-	minBonded int64) (*State, []acm.AddressableSigner) {
+	minBonded int64) (*State, []*acm.PrivateAccount) {
 	testGenesisDoc, privAccounts, _ := deterministicGenesis.GenesisDoc(numAccounts, randBalance, minBalance,
 		numValidators, randBonded, minBonded)
 	s0, err := MakeGenesisState(dbm.NewMemDB(), testGenesisDoc)
@@ -1629,7 +1624,10 @@ func makeGenesisState(numAccounts int, randBalance bool, minBalance uint64, numV
 }
 
 func getAccount(accountGetter state.AccountGetter, address crypto.Address) *acm.MutableAccount {
-	acc, _ := state.GetMutableAccount(accountGetter, address)
+	acc, err := state.GetMutableAccount(accountGetter, address)
+	if err != nil {
+		panic(err)
+	}
 	return acc
 }
 
@@ -1660,9 +1658,9 @@ func execTxWaitAccountCall(t *testing.T, exe *testExecutor, txEnv *txs.Envelope,
 	if err != nil {
 		return nil, err
 	}
-	_, err = exe.Commit(nil)
+	_, err = exe.Commit([]byte("Blocky McHash"), time.Now(), nil)
 	require.NoError(t, err)
-	err = exe.blockchain.CommitBlock(time.Time{}, nil, nil)
+	_, _, err = exe.blockchain.CommitBlock(time.Time{}, nil, nil)
 	require.NoError(t, err)
 
 	for _, ev := range evs.TaggedEvents().Filter(qry) {
@@ -1729,7 +1727,7 @@ func testSNativeTx(t *testing.T, expectPass bool, batchCommitter *testExecutor,
 		acc.MutablePermissions().Base.Set(perm, true)
 		batchCommitter.stateCache.UpdateAccount(acc)
 	}
-	tx, _ := payload.NewPermissionsTx(batchCommitter.stateCache, users[0].PublicKey(), snativeArgs)
+	tx, _ := payload.NewPermsTx(batchCommitter.stateCache, users[0].PublicKey(), snativeArgs)
 	txEnv := txs.Enclose(testChainID, tx)
 	require.NoError(t, txEnv.Sign(users[0]))
 	_, err := batchCommitter.Execute(txEnv)
diff --git a/execution/simulated_call.go b/execution/simulated_call.go
index fc7156a41678477afcbfcdbda062376b648e7b71..0274a7393396017dc99c9d553dc9899acdb17ba1 100644
--- a/execution/simulated_call.go
+++ b/execution/simulated_call.go
@@ -6,18 +6,18 @@ import (
 
 	"github.com/hyperledger/burrow/acm"
 	"github.com/hyperledger/burrow/acm/state"
+	"github.com/hyperledger/burrow/bcm"
 	"github.com/hyperledger/burrow/binary"
-	"github.com/hyperledger/burrow/blockchain"
 	"github.com/hyperledger/burrow/crypto"
+	"github.com/hyperledger/burrow/execution/contexts"
 	"github.com/hyperledger/burrow/execution/evm"
 	"github.com/hyperledger/burrow/execution/exec"
-	"github.com/hyperledger/burrow/execution/executors"
 	"github.com/hyperledger/burrow/logging"
 )
 
 // Run a contract's code on an isolated and unpersisted state
 // Cannot be used to create new contracts
-func CallSim(reader state.Reader, tip blockchain.TipInfo, fromAddress, address crypto.Address, data []byte,
+func CallSim(reader state.Reader, tip bcm.BlockchainInfo, fromAddress, address crypto.Address, data []byte,
 	logger *logging.Logger) (*exec.TxExecution, error) {
 
 	if evm.IsRegisteredNativeContract(address.Word256()) {
@@ -38,7 +38,7 @@ func CallSim(reader state.Reader, tip blockchain.TipInfo, fromAddress, address c
 
 // Run the given code on an isolated and unpersisted state
 // Cannot be used to create new contracts.
-func CallCodeSim(reader state.Reader, tip blockchain.TipInfo, fromAddress, address crypto.Address, code, data []byte,
+func CallCodeSim(reader state.Reader, tip bcm.BlockchainInfo, fromAddress, address crypto.Address, code, data []byte,
 	logger *logging.Logger) (_ *exec.TxExecution, err error) {
 	// This was being run against CheckTx cache, need to understand the reasoning
 	caller := acm.ConcreteAccount{Address: fromAddress}.MutableAccount()
@@ -65,11 +65,11 @@ func CallCodeSim(reader state.Reader, tip blockchain.TipInfo, fromAddress, addre
 	return txe, nil
 }
 
-func vmParams(tip blockchain.TipInfo) evm.Params {
+func vmParams(tip bcm.BlockchainInfo) evm.Params {
 	return evm.Params{
 		BlockHeight: tip.LastBlockHeight(),
 		BlockHash:   binary.LeftPadWord256(tip.LastBlockHash()),
 		BlockTime:   tip.LastBlockTime().Unix(),
-		GasLimit:    executors.GasLimit,
+		GasLimit:    contexts.GasLimit,
 	}
 }
diff --git a/execution/state.go b/execution/state.go
index 12460d7837b8e62c74da7a8b7314b3b99dc3db7b..b9717ef11d636297968783a758bdae0dac0455db 100644
--- a/execution/state.go
+++ b/execution/state.go
@@ -29,7 +29,7 @@ import (
 	"github.com/hyperledger/burrow/logging"
 	"github.com/hyperledger/burrow/permission"
 	"github.com/tendermint/iavl"
-	dbm "github.com/tendermint/tmlibs/db"
+	dbm "github.com/tendermint/tendermint/libs/db"
 )
 
 const (
diff --git a/execution/state_test.go b/execution/state_test.go
index 7a821c340c16fa4a63c2830344c491ce008bde8f..4ed6a5c9ca0f3863ee94d9c5172bc6b07be47b3f 100644
--- a/execution/state_test.go
+++ b/execution/state_test.go
@@ -27,7 +27,7 @@ import (
 	"github.com/hyperledger/burrow/permission"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
-	"github.com/tendermint/tmlibs/db"
+	"github.com/tendermint/tendermint/libs/db"
 )
 
 func TestState_UpdateAccount(t *testing.T) {
diff --git a/execution/transactor.go b/execution/transactor.go
index 3a5cbd2743f598dc1421c153df1373dc9c177354..c1a12edc33179e7e73d7d3f1e69593e2d0e2f737 100644
--- a/execution/transactor.go
+++ b/execution/transactor.go
@@ -20,15 +20,16 @@ import (
 	"time"
 
 	"github.com/hyperledger/burrow/acm"
-	"github.com/hyperledger/burrow/blockchain"
+	"github.com/hyperledger/burrow/bcm"
 	"github.com/hyperledger/burrow/consensus/tendermint/codes"
 	"github.com/hyperledger/burrow/crypto"
 	"github.com/hyperledger/burrow/event"
+	"github.com/hyperledger/burrow/execution/errors"
 	"github.com/hyperledger/burrow/execution/exec"
 	"github.com/hyperledger/burrow/logging"
 	"github.com/hyperledger/burrow/logging/structure"
 	"github.com/hyperledger/burrow/txs"
-	abciTypes "github.com/tendermint/abci/types"
+	abciTypes "github.com/tendermint/tendermint/abci/types"
 	tmTypes "github.com/tendermint/tendermint/types"
 )
 
@@ -46,7 +47,7 @@ const (
 // for a key it holds or is provided - it is down to the key-holder to manage the mutual information between transactions
 // concurrent within a new block window.
 type Transactor struct {
-	Tip             blockchain.TipInfo
+	Tip             bcm.BlockchainInfo
 	Subscribable    event.Subscribable
 	MempoolAccounts *Accounts
 	checkTxAsync    func(tx tmTypes.Tx, cb func(*abciTypes.Response)) error
@@ -54,7 +55,7 @@ type Transactor struct {
 	logger          *logging.Logger
 }
 
-func NewTransactor(tip blockchain.TipInfo, subscribable event.Subscribable, mempoolAccounts *Accounts,
+func NewTransactor(tip bcm.BlockchainInfo, subscribable event.Subscribable, mempoolAccounts *Accounts,
 	checkTxAsync func(tx tmTypes.Tx, cb func(*abciTypes.Response)) error, txEncoder txs.Encoder,
 	logger *logging.Logger) *Transactor {
 
@@ -76,7 +77,7 @@ func (trans *Transactor) BroadcastTxSync(ctx context.Context, txEnv *txs.Envelop
 		var unlock UnlockFunc
 		txEnv, unlock, err = trans.SignTxMempool(txEnv)
 		if err != nil {
-			return nil, fmt.Errorf("error signing trnasction: %v", err)
+			return nil, fmt.Errorf("error signing transaction: %v", err)
 		}
 		defer unlock()
 	}
@@ -215,8 +216,8 @@ func (trans *Transactor) CheckTxSyncRaw(txBytes []byte) (*txs.Receipt, error) {
 			}
 			return receipt, nil
 		default:
-			return nil, fmt.Errorf("error returned by Tendermint in BroadcastTxSync "+
-				"ABCI code: %v, ABCI log: %v", checkTxResponse.Code, checkTxResponse.Log)
+			return nil, errors.ErrorCodef(errors.Code(checkTxResponse.Code),
+				"error returned by Tendermint in BroadcastTxSync ABCI log: %v", checkTxResponse.Log)
 		}
 	}
 }
diff --git a/execution/transactor_test.go b/execution/transactor_test.go
index f8c6389544d60ff422c52f4293b1a890a0e6a26d..3e9f82e64ae447a2f4a40116ab100e1468b33624 100644
--- a/execution/transactor_test.go
+++ b/execution/transactor_test.go
@@ -17,11 +17,10 @@ package execution
 import (
 	"context"
 	"testing"
-	"time"
 
 	"github.com/hyperledger/burrow/acm"
 	"github.com/hyperledger/burrow/acm/state"
-	"github.com/hyperledger/burrow/blockchain"
+	"github.com/hyperledger/burrow/bcm"
 	"github.com/hyperledger/burrow/consensus/tendermint/codes"
 	"github.com/hyperledger/burrow/crypto"
 	"github.com/hyperledger/burrow/event"
@@ -32,13 +31,13 @@ import (
 	"github.com/hyperledger/burrow/txs/payload"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
-	abciTypes "github.com/tendermint/abci/types"
+	abciTypes "github.com/tendermint/tendermint/abci/types"
 	tmTypes "github.com/tendermint/tendermint/types"
 )
 
 func TestTransactor_BroadcastTxSync(t *testing.T) {
 	chainID := "TestChain"
-	tip := blockchain.NewTip(chainID, time.Time{}, []byte("genesis"))
+	bc := &bcm.Blockchain{}
 	logger := logging.NewNoopLogger()
 	evc := event.NewEmitter(logger)
 	txCodec := txs.NewAminoCodec()
@@ -53,7 +52,7 @@ func TestTransactor_BroadcastTxSync(t *testing.T) {
 	err := txEnv.Sign(privAccount)
 	require.NoError(t, err)
 	height := uint64(35)
-	trans := NewTransactor(tip, evc, NewAccounts(state.NewMemoryState(), mock.NewKeyClient(privAccount), 100),
+	trans := NewTransactor(bc, evc, NewAccounts(state.NewMemoryState(), mock.NewKeyClient(privAccount), 100),
 		func(tx tmTypes.Tx, cb func(*abciTypes.Response)) error {
 			txe := exec.NewTxExecution(txEnv)
 			txe.Height = height
diff --git a/forensics/block_explorer.go b/forensics/block_explorer.go
index a8d2c84c1d62b7779bd9f45cd37bfcb94a165122..ce2f492e2a01ee1b7af304b5e34f196575ed9991 100644
--- a/forensics/block_explorer.go
+++ b/forensics/block_explorer.go
@@ -6,7 +6,7 @@ import (
 	"github.com/hyperledger/burrow/consensus/tendermint"
 	"github.com/hyperledger/burrow/txs"
 	"github.com/tendermint/tendermint/blockchain"
-	"github.com/tendermint/tmlibs/db"
+	"github.com/tendermint/tendermint/libs/db"
 )
 
 type BlockExplorer struct {
diff --git a/genesis/deterministic_genesis.go b/genesis/deterministic_genesis.go
index 6c4e6a2090209fbe60a503ab5dbc0425d3b0fe18..d7da9d22b35a5234fd89179ce46a1255e7cf9bc8 100644
--- a/genesis/deterministic_genesis.go
+++ b/genesis/deterministic_genesis.go
@@ -22,10 +22,10 @@ func NewDeterministicGenesis(seed int64) *deterministicGenesis {
 }
 
 func (dg *deterministicGenesis) GenesisDoc(numAccounts int, randBalance bool, minBalance uint64, numValidators int,
-	randBonded bool, minBonded int64) (*GenesisDoc, []acm.AddressableSigner, []acm.AddressableSigner) {
+	randBonded bool, minBonded int64) (*GenesisDoc, []*acm.PrivateAccount, []*acm.PrivateAccount) {
 
 	accounts := make([]Account, numAccounts)
-	privAccounts := make([]acm.AddressableSigner, numAccounts)
+	privAccounts := make([]*acm.PrivateAccount, numAccounts)
 	defaultPerms := permission.DefaultAccountPermissions
 	for i := 0; i < numAccounts; i++ {
 		account, privAccount := dg.Account(randBalance, minBalance)
@@ -39,7 +39,7 @@ func (dg *deterministicGenesis) GenesisDoc(numAccounts int, randBalance bool, mi
 		privAccounts[i] = privAccount
 	}
 	validators := make([]Validator, numValidators)
-	privValidators := make([]acm.AddressableSigner, numValidators)
+	privValidators := make([]*acm.PrivateAccount, numValidators)
 	for i := 0; i < numValidators; i++ {
 		validator := acm.GeneratePrivateAccountFromSecret(fmt.Sprintf("val_%v", i))
 		privValidators[i] = validator
@@ -59,14 +59,14 @@ func (dg *deterministicGenesis) GenesisDoc(numAccounts int, randBalance bool, mi
 	}
 	return &GenesisDoc{
 		ChainName:   "TestChain",
-		GenesisTime: time.Unix(1506172037, 0),
+		GenesisTime: time.Unix(1506172037, 0).UTC(),
 		Accounts:    accounts,
 		Validators:  validators,
 	}, privAccounts, privValidators
 
 }
 
-func (dg *deterministicGenesis) Account(randBalance bool, minBalance uint64) (acm.Account, acm.AddressableSigner) {
+func (dg *deterministicGenesis) Account(randBalance bool, minBalance uint64) (acm.Account, *acm.PrivateAccount) {
 	privateKey, err := crypto.GeneratePrivateKey(dg.random, crypto.CurveTypeEd25519)
 	if err != nil {
 		panic(fmt.Errorf("could not generate private key deterministically"))
diff --git a/genesis/genesis.go b/genesis/genesis.go
index 6d91fd472710b9be879090eb63f55e74cf886c17..bfc30c2f995a07f8f0180ac472a71ef8926ca45f 100644
--- a/genesis/genesis.go
+++ b/genesis/genesis.go
@@ -22,6 +22,7 @@ import (
 	"time"
 
 	"github.com/hyperledger/burrow/acm"
+	"github.com/hyperledger/burrow/acm/validator"
 	"github.com/hyperledger/burrow/crypto"
 	"github.com/hyperledger/burrow/permission"
 )
@@ -66,10 +67,18 @@ type GenesisDoc struct {
 	Validators        []Validator
 }
 
-// JSONBytes returns the JSON (not-yet) canonical bytes for a given
-// GenesisDoc or an error.
+func (genesisDoc *GenesisDoc) JSONString() string {
+	bs, err := genesisDoc.JSONBytes()
+	if err != nil {
+		return fmt.Sprintf("error marshalling GenesisDoc: %v", err)
+	}
+	return string(bs)
+}
+
+// JSONBytes returns the JSON canonical bytes for a given GenesisDoc or an error.
 func (genesisDoc *GenesisDoc) JSONBytes() ([]byte, error) {
-	// TODO: write JSON in canonical order
+	// Just in case
+	genesisDoc.GenesisTime = genesisDoc.GenesisTime.UTC()
 	return json.MarshalIndent(genesisDoc, "", "\t")
 }
 
@@ -133,12 +142,13 @@ func (genesisAccount *Account) Clone() Account {
 //------------------------------------------------------------
 // Validator methods
 
-func (gv *Validator) Validator() acm.Validator {
-	return acm.ConcreteValidator{
-		Address:   gv.PublicKey.Address(),
+func (gv *Validator) Validator() validator.Validator {
+	address := gv.PublicKey.Address()
+	return validator.Validator{
+		Address:   &address,
 		PublicKey: gv.PublicKey,
 		Power:     uint64(gv.Amount),
-	}.Validator()
+	}
 }
 
 // Clone clones the genesis validator
@@ -175,7 +185,7 @@ func (basicAccount *BasicAccount) Clone() BasicAccount {
 // failure.  In particular MakeGenesisDocFromAccount uses the local time as a
 // timestamp for the GenesisDoc.
 func MakeGenesisDocFromAccounts(chainName string, salt []byte, genesisTime time.Time, accounts map[string]acm.Account,
-	validators map[string]acm.Validator) *GenesisDoc {
+	validators map[string]validator.Validator) *GenesisDoc {
 
 	// Establish deterministic order of accounts by name so we obtain identical GenesisDoc
 	// from identical input
@@ -202,15 +212,15 @@ func MakeGenesisDocFromAccounts(chainName string, salt []byte, genesisTime time.
 		genesisValidators = append(genesisValidators, Validator{
 			Name: name,
 			BasicAccount: BasicAccount{
-				Address:   val.Address(),
-				PublicKey: val.PublicKey(),
-				Amount:    val.Power(),
+				Address:   *val.Address,
+				PublicKey: val.PublicKey,
+				Amount:    val.Power,
 			},
 			// Simpler to just do this by convention
 			UnbondTo: []BasicAccount{
 				{
-					Amount:  val.Power(),
-					Address: val.Address(),
+					Amount:  val.Power,
+					Address: *val.Address,
 				},
 			},
 		})
diff --git a/genesis/genesis_test.go b/genesis/genesis_test.go
index 640bf18d975c07102436609a01159c79c11ec18a..cfeb6f41e7732d43ee5b254b080a5a8d6f3aaf0a 100644
--- a/genesis/genesis_test.go
+++ b/genesis/genesis_test.go
@@ -6,6 +6,7 @@ import (
 	"time"
 
 	"github.com/hyperledger/burrow/acm"
+	"github.com/hyperledger/burrow/acm/validator"
 	"github.com/hyperledger/burrow/permission"
 	"github.com/stretchr/testify/assert"
 )
@@ -41,10 +42,11 @@ func accountMap(names ...string) map[string]acm.Account {
 	return accounts
 }
 
-func validatorMap(names ...string) map[string]acm.Validator {
-	validators := make(map[string]acm.Validator, len(names))
+func validatorMap(names ...string) map[string]validator.Validator {
+	validators := make(map[string]validator.Validator, len(names))
 	for _, name := range names {
-		validators[name] = acm.AsValidator(accountFromName(name))
+		acc := accountFromName(name)
+		validators[name] = validator.FromAccount(acc, acc.Balance())
 	}
 	return validators
 }
diff --git a/genesis/spec/genesis_spec.go b/genesis/spec/genesis_spec.go
index cb4fb76cfb3a9d8f4e1f4c30ad4b1ac29bcc4347..6990afa1273983ff9db44b1f9adc28434502e4b3 100644
--- a/genesis/spec/genesis_spec.go
+++ b/genesis/spec/genesis_spec.go
@@ -7,6 +7,7 @@ import (
 	"fmt"
 	"time"
 
+	"github.com/hyperledger/burrow/acm/balance"
 	"github.com/hyperledger/burrow/genesis"
 	"github.com/hyperledger/burrow/keys"
 	"github.com/hyperledger/burrow/permission"
@@ -68,20 +69,19 @@ func (gs *GenesisSpec) GenesisDoc(keyClient keys.KeyClient, generateNodeKeys boo
 
 	templateAccounts := gs.Accounts
 	if len(gs.Accounts) == 0 {
-		Power := DefaultPower
 		templateAccounts = append(templateAccounts, TemplateAccount{
-			Power: &Power,
+			Amounts: balance.New().Power(DefaultPower),
 		})
 	}
 
 	for i, templateAccount := range templateAccounts {
-		account, err := templateAccount.Account(keyClient, i)
+		account, err := templateAccount.GenesisAccount(keyClient, i)
 		if err != nil {
 			return nil, fmt.Errorf("could not create Account from template: %v", err)
 		}
 		genesisDoc.Accounts = append(genesisDoc.Accounts, *account)
-		// Create a corresponding validator
-		if templateAccount.Power != nil {
+
+		if templateAccount.Balances().HasPower() {
 			// Note this does not modify the input template
 			templateAccount.Address = &account.Address
 			validator, err := templateAccount.Validator(keyClient, i, generateNodeKeys)
diff --git a/genesis/spec/genesis_spec_test.go b/genesis/spec/genesis_spec_test.go
index 98665055d9ef3c1d0b4e6ed4f13bb386c2cce92b..1d52df39af6ae3157909913ba60847e2f325de65 100644
--- a/genesis/spec/genesis_spec_test.go
+++ b/genesis/spec/genesis_spec_test.go
@@ -3,6 +3,7 @@ package spec
 import (
 	"testing"
 
+	"github.com/hyperledger/burrow/acm/balance"
 	"github.com/hyperledger/burrow/crypto"
 	"github.com/hyperledger/burrow/keys/mock"
 	"github.com/hyperledger/burrow/permission"
@@ -17,7 +18,7 @@ func TestGenesisSpec_GenesisDoc(t *testing.T) {
 	amtBonded := uint64(100)
 	genesisSpec := GenesisSpec{
 		Accounts: []TemplateAccount{{
-			Power: &amtBonded,
+			Amounts: balance.New().Power(amtBonded),
 		}},
 	}
 
@@ -46,7 +47,7 @@ func TestGenesisSpec_GenesisDoc(t *testing.T) {
 				Address: &address,
 			},
 			{
-				Amount:      &amt,
+				Amounts:     balance.New().Native(amt),
 				Permissions: []string{permission.CreateAccountString, permission.CallString},
 			}},
 	}
diff --git a/genesis/spec/presets.go b/genesis/spec/presets.go
index 3025d98c0f4b9e9a3f995f8aea33c91f9e77f82f..223ae0f4fc89645e0e363dd7345e5ee8bdaf284f 100644
--- a/genesis/spec/presets.go
+++ b/genesis/spec/presets.go
@@ -3,6 +3,7 @@ package spec
 import (
 	"sort"
 
+	"github.com/hyperledger/burrow/acm/balance"
 	"github.com/hyperledger/burrow/permission"
 )
 
@@ -16,8 +17,7 @@ func FullAccount(name string) GenesisSpec {
 	return GenesisSpec{
 		Accounts: []TemplateAccount{{
 			Name:        name,
-			Amount:      &amount,
-			Power:       &Power,
+			Amounts:     balance.New().Native(amount).Power(Power),
 			Permissions: []string{permission.AllString},
 		},
 		},
@@ -30,7 +30,7 @@ func RootAccount(name string) GenesisSpec {
 	return GenesisSpec{
 		Accounts: []TemplateAccount{{
 			Name:        name,
-			Amount:      &amount,
+			Amounts:     balance.New().Native(amount),
 			Permissions: []string{permission.AllString},
 		},
 		},
@@ -42,8 +42,8 @@ func ParticipantAccount(name string) GenesisSpec {
 	amount := uint64(9999999999)
 	return GenesisSpec{
 		Accounts: []TemplateAccount{{
-			Name:   name,
-			Amount: &amount,
+			Name:    name,
+			Amounts: balance.New().Native(amount),
 			Permissions: []string{permission.SendString, permission.CallString, permission.NameString,
 				permission.HasRoleString},
 		}},
@@ -55,8 +55,8 @@ func DeveloperAccount(name string) GenesisSpec {
 	amount := uint64(9999999999)
 	return GenesisSpec{
 		Accounts: []TemplateAccount{{
-			Name:   name,
-			Amount: &amount,
+			Name:    name,
+			Amounts: balance.New().Native(amount),
 			Permissions: []string{permission.SendString, permission.CallString, permission.CreateContractString,
 				permission.CreateAccountString, permission.NameString, permission.HasRoleString,
 				permission.RemoveRoleString},
@@ -71,8 +71,7 @@ func ValidatorAccount(name string) GenesisSpec {
 	return GenesisSpec{
 		Accounts: []TemplateAccount{{
 			Name:        name,
-			Amount:      &amount,
-			Power:       &Power,
+			Amounts:     balance.New().Native(amount).Power(Power),
 			Permissions: []string{permission.BondString},
 		}},
 	}
@@ -146,8 +145,7 @@ func mergeAccount(base, override TemplateAccount) TemplateAccount {
 		base.Name = override.Name
 	}
 
-	base.Amount = addUint64Pointers(base.Amount, override.Amount)
-	base.Power = addUint64Pointers(base.Power, override.Power)
+	base.Amounts = base.Balances().Sum(override.Balances())
 
 	base.Permissions = mergeStrings(base.Permissions, override.Permissions)
 	base.Roles = mergeStrings(base.Roles, override.Roles)
diff --git a/genesis/spec/presets_test.go b/genesis/spec/presets_test.go
index 762e397bd3154b6368e4ae01fe5f7532632d81d5..1c11015f95ab5a5594e2e107e9edf1616243eba7 100644
--- a/genesis/spec/presets_test.go
+++ b/genesis/spec/presets_test.go
@@ -3,6 +3,7 @@ package spec
 import (
 	"testing"
 
+	"github.com/hyperledger/burrow/acm/balance"
 	"github.com/hyperledger/burrow/keys/mock"
 	"github.com/hyperledger/burrow/permission"
 	"github.com/stretchr/testify/assert"
@@ -43,7 +44,7 @@ func TestMergeGenesisSpecsRepeatedAccounts(t *testing.T) {
 		Accounts: []TemplateAccount{
 			{
 				Name:        name1,
-				Amount:      &amt1,
+				Amounts:     balance.New().Native(amt1),
 				Permissions: []string{permission.SendString, permission.CreateAccountString, permission.HasRoleString},
 				Roles:       []string{"fooer"},
 			},
@@ -53,7 +54,7 @@ func TestMergeGenesisSpecsRepeatedAccounts(t *testing.T) {
 		Accounts: []TemplateAccount{
 			{
 				Name:        name1,
-				Amount:      &amt2,
+				Amounts:     balance.New().Native(amt2),
 				Permissions: []string{permission.SendString, permission.CreateAccountString},
 				Roles:       []string{"barer"},
 			},
@@ -62,8 +63,8 @@ func TestMergeGenesisSpecsRepeatedAccounts(t *testing.T) {
 	gs3 := GenesisSpec{
 		Accounts: []TemplateAccount{
 			{
-				Name:   name3,
-				Amount: &amt3,
+				Name:    name3,
+				Amounts: balance.New().Native(amt3),
 			},
 		},
 	}
@@ -77,7 +78,7 @@ func TestMergeGenesisSpecsRepeatedAccounts(t *testing.T) {
 		Accounts: []TemplateAccount{
 			{
 				Name:        name1,
-				Amount:      &amtExpected,
+				Amounts:     balance.New().Native(amtExpected),
 				Permissions: []string{permission.CreateAccountString, permission.HasRoleString, permission.SendString},
 				Roles:       []string{"barer", "fooer"},
 			},
diff --git a/genesis/spec/spec.pb.go b/genesis/spec/spec.pb.go
index 9d568bfd4db830e6f7a3ab147d301289699a3b46..ffaaef3afa6b3f0a7b9aec1c28d93e5cfee62be9 100644
--- a/genesis/spec/spec.pb.go
+++ b/genesis/spec/spec.pb.go
@@ -18,6 +18,7 @@ import fmt "fmt"
 import math "math"
 import _ "github.com/gogo/protobuf/gogoproto"
 import crypto "github.com/hyperledger/burrow/crypto"
+import balance "github.com/hyperledger/burrow/acm/balance"
 
 import github_com_hyperledger_burrow_crypto "github.com/hyperledger/burrow/crypto"
 
@@ -36,14 +37,13 @@ var _ = math.Inf
 const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 
 type TemplateAccount struct {
-	Name        string                                        `protobuf:"bytes,1,opt,name=Name" json:"Name"`
-	Address     *github_com_hyperledger_burrow_crypto.Address `protobuf:"bytes,2,opt,name=Address,customtype=github.com/hyperledger/burrow/crypto.Address" json:",omitempty" toml:",omitempty"`
-	NodeAddress *github_com_hyperledger_burrow_crypto.Address `protobuf:"bytes,3,opt,name=NodeAddress,customtype=github.com/hyperledger/burrow/crypto.Address" json:",omitempty" toml:",omitempty"`
+	Name        string                                        `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"`
+	Address     *github_com_hyperledger_burrow_crypto.Address `protobuf:"bytes,2,opt,name=Address,proto3,customtype=github.com/hyperledger/burrow/crypto.Address" json:",omitempty" toml:",omitempty"`
+	NodeAddress *github_com_hyperledger_burrow_crypto.Address `protobuf:"bytes,3,opt,name=NodeAddress,proto3,customtype=github.com/hyperledger/burrow/crypto.Address" json:",omitempty" toml:",omitempty"`
 	PublicKey   *crypto.PublicKey                             `protobuf:"bytes,4,opt,name=PublicKey" json:",omitempty" toml:",omitempty"`
-	Amount      *uint64                                       `protobuf:"varint,5,opt,name=Amount" json:",omitempty" toml:",omitempty"`
-	Power       *uint64                                       `protobuf:"varint,6,opt,name=Power" json:",omitempty" toml:",omitempty"`
-	Permissions []string                                      `protobuf:"bytes,7,rep,name=Permissions" json:",omitempty" toml:",omitempty"`
-	Roles       []string                                      `protobuf:"bytes,8,rep,name=Roles" json:",omitempty" toml:",omitempty"`
+	Amounts     []balance.Balance                             `protobuf:"bytes,5,rep,name=Amounts" json:",omitempty" toml:",omitempty"`
+	Permissions []string                                      `protobuf:"bytes,6,rep,name=Permissions" json:",omitempty" toml:",omitempty"`
+	Roles       []string                                      `protobuf:"bytes,7,rep,name=Roles" json:",omitempty" toml:",omitempty"`
 }
 
 func (m *TemplateAccount) Reset()                    { *m = TemplateAccount{} }
@@ -65,18 +65,11 @@ func (m *TemplateAccount) GetPublicKey() *crypto.PublicKey {
 	return nil
 }
 
-func (m *TemplateAccount) GetAmount() uint64 {
-	if m != nil && m.Amount != nil {
-		return *m.Amount
-	}
-	return 0
-}
-
-func (m *TemplateAccount) GetPower() uint64 {
-	if m != nil && m.Power != nil {
-		return *m.Power
+func (m *TemplateAccount) GetAmounts() []balance.Balance {
+	if m != nil {
+		return m.Amounts
 	}
-	return 0
+	return nil
 }
 
 func (m *TemplateAccount) GetPermissions() []string {
@@ -115,10 +108,12 @@ func (m *TemplateAccount) MarshalTo(dAtA []byte) (int, error) {
 	_ = i
 	var l int
 	_ = l
-	dAtA[i] = 0xa
-	i++
-	i = encodeVarintSpec(dAtA, i, uint64(len(m.Name)))
-	i += copy(dAtA[i:], m.Name)
+	if len(m.Name) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintSpec(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
 	if m.Address != nil {
 		dAtA[i] = 0x12
 		i++
@@ -149,19 +144,21 @@ func (m *TemplateAccount) MarshalTo(dAtA []byte) (int, error) {
 		}
 		i += n3
 	}
-	if m.Amount != nil {
-		dAtA[i] = 0x28
-		i++
-		i = encodeVarintSpec(dAtA, i, uint64(*m.Amount))
-	}
-	if m.Power != nil {
-		dAtA[i] = 0x30
-		i++
-		i = encodeVarintSpec(dAtA, i, uint64(*m.Power))
+	if len(m.Amounts) > 0 {
+		for _, msg := range m.Amounts {
+			dAtA[i] = 0x2a
+			i++
+			i = encodeVarintSpec(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
 	}
 	if len(m.Permissions) > 0 {
 		for _, s := range m.Permissions {
-			dAtA[i] = 0x3a
+			dAtA[i] = 0x32
 			i++
 			l = len(s)
 			for l >= 1<<7 {
@@ -176,7 +173,7 @@ func (m *TemplateAccount) MarshalTo(dAtA []byte) (int, error) {
 	}
 	if len(m.Roles) > 0 {
 		for _, s := range m.Roles {
-			dAtA[i] = 0x42
+			dAtA[i] = 0x3a
 			i++
 			l = len(s)
 			for l >= 1<<7 {
@@ -205,7 +202,9 @@ func (m *TemplateAccount) Size() (n int) {
 	var l int
 	_ = l
 	l = len(m.Name)
-	n += 1 + l + sovSpec(uint64(l))
+	if l > 0 {
+		n += 1 + l + sovSpec(uint64(l))
+	}
 	if m.Address != nil {
 		l = m.Address.Size()
 		n += 1 + l + sovSpec(uint64(l))
@@ -218,11 +217,11 @@ func (m *TemplateAccount) Size() (n int) {
 		l = m.PublicKey.Size()
 		n += 1 + l + sovSpec(uint64(l))
 	}
-	if m.Amount != nil {
-		n += 1 + sovSpec(uint64(*m.Amount))
-	}
-	if m.Power != nil {
-		n += 1 + sovSpec(uint64(*m.Power))
+	if len(m.Amounts) > 0 {
+		for _, e := range m.Amounts {
+			l = e.Size()
+			n += 1 + l + sovSpec(uint64(l))
+		}
 	}
 	if len(m.Permissions) > 0 {
 		for _, s := range m.Permissions {
@@ -408,10 +407,10 @@ func (m *TemplateAccount) Unmarshal(dAtA []byte) error {
 			}
 			iNdEx = postIndex
 		case 5:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType)
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Amounts", wireType)
 			}
-			var v uint64
+			var msglen int
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowSpec
@@ -421,33 +420,24 @@ func (m *TemplateAccount) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (uint64(b) & 0x7F) << shift
+				msglen |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			m.Amount = &v
-		case 6:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Power", wireType)
+			if msglen < 0 {
+				return ErrInvalidLengthSpec
 			}
-			var v uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowSpec
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
 			}
-			m.Power = &v
-		case 7:
+			m.Amounts = append(m.Amounts, balance.Balance{})
+			if err := m.Amounts[len(m.Amounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
 			if wireType != 2 {
 				return fmt.Errorf("proto: wrong wireType = %d for field Permissions", wireType)
 			}
@@ -476,7 +466,7 @@ func (m *TemplateAccount) Unmarshal(dAtA []byte) error {
 			}
 			m.Permissions = append(m.Permissions, string(dAtA[iNdEx:postIndex]))
 			iNdEx = postIndex
-		case 8:
+		case 7:
 			if wireType != 2 {
 				return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
 			}
@@ -635,28 +625,28 @@ func init() { proto.RegisterFile("spec.proto", fileDescriptorSpec) }
 func init() { golang_proto.RegisterFile("spec.proto", fileDescriptorSpec) }
 
 var fileDescriptorSpec = []byte{
-	// 359 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x92, 0x3f, 0x6f, 0xe2, 0x30,
-	0x18, 0x87, 0xf1, 0x11, 0xe0, 0x30, 0x48, 0x27, 0x3c, 0x59, 0x0c, 0x49, 0xc4, 0x2d, 0xd1, 0x89,
-	0x23, 0xd2, 0x6d, 0x47, 0x97, 0x12, 0xa9, 0x53, 0x25, 0x84, 0x28, 0x53, 0x37, 0x92, 0xbc, 0x0d,
-	0x91, 0x62, 0x1c, 0xd9, 0x8e, 0x50, 0x3e, 0x4e, 0xbf, 0x49, 0x47, 0x46, 0xe6, 0x0e, 0x51, 0x05,
-	0x43, 0xa5, 0x8e, 0xfd, 0x04, 0x55, 0x42, 0x28, 0x4c, 0x55, 0x96, 0x6e, 0xef, 0x1f, 0x3d, 0xbf,
-	0xc7, 0xb2, 0x8d, 0xb1, 0x8c, 0xc1, 0x1b, 0xc5, 0x82, 0x2b, 0x4e, 0xb4, 0xbc, 0xee, 0xff, 0x0d,
-	0x42, 0xb5, 0x4a, 0xdc, 0x91, 0xc7, 0x99, 0x1d, 0xf0, 0x80, 0xdb, 0xc5, 0xd2, 0x4d, 0x1e, 0x8a,
-	0xae, 0x68, 0x8a, 0xea, 0x08, 0xf5, 0xbb, 0x9e, 0x48, 0x63, 0x55, 0x76, 0x83, 0x57, 0x0d, 0xff,
-	0x5a, 0x00, 0x8b, 0xa3, 0xa5, 0x82, 0x89, 0xe7, 0xf1, 0x64, 0xad, 0x08, 0xc5, 0xda, 0x74, 0xc9,
-	0x80, 0x22, 0x13, 0x59, 0x6d, 0x47, 0xdb, 0x66, 0x46, 0x6d, 0x5e, 0x4c, 0x08, 0xc3, 0xad, 0x89,
-	0xef, 0x0b, 0x90, 0x92, 0xfe, 0x30, 0x91, 0xd5, 0x75, 0xee, 0x9e, 0x33, 0x63, 0x78, 0xe1, 0x5f,
-	0xa5, 0x31, 0x88, 0x08, 0xfc, 0x00, 0x84, 0xed, 0x26, 0x42, 0xf0, 0x8d, 0x5d, 0xea, 0x4a, 0xee,
-	0x2d, 0x33, 0xf0, 0x90, 0xb3, 0x50, 0x01, 0x8b, 0x55, 0xfa, 0x9e, 0x19, 0x3d, 0xc5, 0x59, 0x34,
-	0x1e, 0x9c, 0x67, 0x83, 0xf9, 0xc9, 0x41, 0x12, 0xdc, 0x99, 0x72, 0x1f, 0x4e, 0xca, 0xfa, 0xf7,
-	0x29, 0x2f, 0x3d, 0x64, 0x81, 0xdb, 0xb3, 0xc4, 0x8d, 0x42, 0xef, 0x16, 0x52, 0xaa, 0x99, 0xc8,
-	0xea, 0xfc, 0xeb, 0x8d, 0xca, 0xcc, 0xcf, 0x85, 0xf3, 0xbb, 0x4a, 0xee, 0x39, 0x88, 0x5c, 0xe1,
-	0xe6, 0x84, 0xe5, 0xf7, 0x4b, 0x1b, 0x26, 0xb2, 0xb4, 0x6a, 0x7c, 0x89, 0x90, 0xff, 0xb8, 0x31,
-	0xe3, 0x1b, 0x10, 0xb4, 0x59, 0x9d, 0x3d, 0x12, 0xe4, 0x06, 0x77, 0x66, 0x20, 0x58, 0x28, 0x65,
-	0xc8, 0xd7, 0x92, 0xb6, 0xcc, 0xba, 0xd5, 0xae, 0x16, 0x70, 0xc9, 0xe5, 0x27, 0x98, 0xf3, 0x08,
-	0x24, 0xfd, 0x59, 0x3d, 0xe0, 0x48, 0x8c, 0xb5, 0xdd, 0xa3, 0x51, 0x73, 0xae, 0xb7, 0x7b, 0x1d,
-	0xed, 0xf6, 0x3a, 0x7a, 0xd9, 0xeb, 0xe8, 0xe9, 0xa0, 0xa3, 0xed, 0x41, 0x47, 0xf7, 0x7f, 0xbe,
-	0x7e, 0xc9, 0x00, 0xd6, 0x20, 0x43, 0x69, 0xe7, 0x1f, 0xfd, 0x23, 0x00, 0x00, 0xff, 0xff, 0x4b,
-	0xcc, 0xe3, 0x66, 0xfb, 0x02, 0x00, 0x00,
+	// 367 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x52, 0xbf, 0x6f, 0xda, 0x40,
+	0x14, 0xee, 0x15, 0x03, 0xe2, 0x4c, 0xd5, 0x72, 0x93, 0xc5, 0x60, 0x5b, 0x74, 0xa8, 0x55, 0x51,
+	0x5b, 0xa2, 0x53, 0x3b, 0x15, 0x4b, 0x9d, 0x22, 0x21, 0x04, 0x4c, 0xd9, 0xfc, 0xe3, 0xc5, 0x58,
+	0xf2, 0xf9, 0xac, 0xbb, 0xb3, 0x22, 0xff, 0x67, 0x19, 0x33, 0x32, 0x66, 0xce, 0x60, 0x45, 0xb0,
+	0x65, 0xcc, 0x5f, 0x10, 0x61, 0xe3, 0xc0, 0x14, 0x79, 0xc9, 0x74, 0xdf, 0x77, 0x4f, 0xdf, 0xf7,
+	0x3d, 0xbd, 0xf7, 0x30, 0x16, 0x19, 0x04, 0x76, 0xc6, 0x99, 0x64, 0x44, 0x39, 0xe2, 0xf1, 0xaf,
+	0x28, 0x96, 0xdb, 0xdc, 0xb7, 0x03, 0x46, 0x9d, 0x88, 0x45, 0xcc, 0xa9, 0x8a, 0x7e, 0x7e, 0x53,
+	0xb1, 0x8a, 0x54, 0xa8, 0x16, 0x8d, 0x87, 0x01, 0x2f, 0x32, 0xd9, 0xb0, 0x2f, 0xbe, 0x97, 0x78,
+	0x69, 0x00, 0x35, 0x9d, 0xdc, 0x29, 0xf8, 0xeb, 0x06, 0x68, 0x96, 0x78, 0x12, 0xe6, 0x41, 0xc0,
+	0xf2, 0x54, 0x12, 0x82, 0x95, 0x85, 0x47, 0x41, 0x43, 0x26, 0xb2, 0x06, 0xab, 0x0a, 0x13, 0x8a,
+	0xfb, 0xf3, 0x30, 0xe4, 0x20, 0x84, 0xf6, 0xd9, 0x44, 0xd6, 0xd0, 0x5d, 0x3f, 0x96, 0xc6, 0xf4,
+	0xa2, 0x91, 0x6d, 0x91, 0x01, 0x4f, 0x20, 0x8c, 0x80, 0x3b, 0x7e, 0xce, 0x39, 0xbb, 0x75, 0x4e,
+	0xb9, 0x27, 0xdd, 0x73, 0x69, 0xe0, 0x29, 0xa3, 0xb1, 0x04, 0x9a, 0xc9, 0xe2, 0xa5, 0x34, 0x46,
+	0x92, 0xd1, 0xe4, 0xef, 0xe4, 0xfc, 0x37, 0x59, 0x35, 0x19, 0x24, 0xc7, 0xea, 0x82, 0x85, 0xd0,
+	0x44, 0x76, 0x3e, 0x2e, 0xf2, 0x32, 0x87, 0x6c, 0xf0, 0x60, 0x99, 0xfb, 0x49, 0x1c, 0x5c, 0x41,
+	0xa1, 0x29, 0x26, 0xb2, 0xd4, 0xd9, 0xc8, 0x3e, 0x79, 0xbe, 0x15, 0xdc, 0xef, 0x6d, 0x7c, 0xcf,
+	0x46, 0x64, 0x8d, 0xfb, 0x73, 0x7a, 0x9c, 0xac, 0xd0, 0xba, 0x66, 0xc7, 0x52, 0x67, 0xdf, 0xec,
+	0x66, 0x09, 0x6e, 0xfd, 0xba, 0x3f, 0x76, 0xa5, 0xf1, 0xa9, 0xdd, 0x84, 0x6a, 0x27, 0xf2, 0x1f,
+	0xab, 0x4b, 0xe0, 0x34, 0x16, 0x22, 0x66, 0xa9, 0xd0, 0x7a, 0x66, 0xc7, 0x1a, 0xb4, 0xeb, 0xec,
+	0x52, 0x47, 0xfe, 0xe0, 0xee, 0x8a, 0x25, 0x20, 0xb4, 0x7e, 0x7b, 0x83, 0x5a, 0xe1, 0xfe, 0xdb,
+	0xed, 0x75, 0xf4, 0xb0, 0xd7, 0xd1, 0xd3, 0x5e, 0x47, 0xf7, 0x07, 0x1d, 0xed, 0x0e, 0x3a, 0xba,
+	0xfe, 0xf9, 0xfe, 0x82, 0x22, 0x48, 0x41, 0xc4, 0xc2, 0x39, 0x1e, 0xb2, 0xdf, 0xab, 0x6e, 0xf0,
+	0xf7, 0x6b, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8b, 0xc3, 0x66, 0xb6, 0xe3, 0x02, 0x00, 0x00,
 }
diff --git a/genesis/spec/template_account.go b/genesis/spec/template_account.go
index 1362ec117623dcd038f82460883ccecfb378bf15..152a8097f0ceb81f9eada960e7232bbe431a8b03 100644
--- a/genesis/spec/template_account.go
+++ b/genesis/spec/template_account.go
@@ -3,6 +3,7 @@ package spec
 import (
 	"fmt"
 
+	"github.com/hyperledger/burrow/acm/balance"
 	"github.com/hyperledger/burrow/crypto"
 	"github.com/hyperledger/burrow/genesis"
 	"github.com/hyperledger/burrow/keys"
@@ -24,11 +25,7 @@ func (ta TemplateAccount) Validator(keyClient keys.KeyClient, index int, generat
 		}
 		ta.NodeAddress = &address
 	}
-	if ta.Power == nil {
-		gv.Amount = DefaultPower
-	} else {
-		gv.Amount = *ta.Power
-	}
+	gv.Amount = ta.Balances().GetPower(DefaultPower)
 	if ta.Name == "" {
 		gv.Name = accountNameFromIndex(index)
 	} else {
@@ -55,18 +52,14 @@ func (ta TemplateAccount) AccountPermissions() (permission.AccountPermissions, e
 	}, nil
 }
 
-func (ta TemplateAccount) Account(keyClient keys.KeyClient, index int) (*genesis.Account, error) {
+func (ta TemplateAccount) GenesisAccount(keyClient keys.KeyClient, index int) (*genesis.Account, error) {
 	var err error
 	ga := new(genesis.Account)
 	ga.PublicKey, ga.Address, err = ta.RealisePubKeyAndAddress(keyClient)
 	if err != nil {
 		return nil, err
 	}
-	if ta.Amount == nil {
-		ga.Amount = DefaultAmount
-	} else {
-		ga.Amount = *ta.Amount
-	}
+	ga.Amount = ta.Balances().GetNative(DefaultAmount)
 	if ta.Name == "" {
 		ga.Name = accountNameFromIndex(index)
 	} else {
@@ -111,3 +104,7 @@ func (ta TemplateAccount) RealisePubKeyAndAddress(keyClient keys.KeyClient) (pub
 	}
 	return
 }
+
+func (ta TemplateAccount) Balances() balance.Balances {
+	return ta.Amounts
+}
diff --git a/governance/governance.go b/governance/governance.go
index b3e56f5b8307a645379c05d533e2eb64361700b7..5ea2e125092f4377563b199507c1781a68b257f3 100644
--- a/governance/governance.go
+++ b/governance/governance.go
@@ -2,6 +2,14 @@
 // validators, and network forks.
 package governance
 
+import (
+	"github.com/hyperledger/burrow/acm/balance"
+	"github.com/hyperledger/burrow/crypto"
+	"github.com/hyperledger/burrow/genesis/spec"
+	"github.com/hyperledger/burrow/permission"
+	"github.com/hyperledger/burrow/txs/payload"
+)
+
 // TODO:
 // - Set validator power
 // - Set account amount(s)
@@ -15,3 +23,33 @@ package governance
 // - Network administered proxies (i.e. instead of keys have password authentication for identities - allow calls to originate as if from address without key?)
 // Subject to:
 // - Less than 1/3 validator power change per block
+
+// Creates a GovTx that alters the validator power of id to the power passed
+func AlterPowerTx(inputAddress crypto.Address, id crypto.Addressable, power uint64) *payload.GovTx {
+	return AlterBalanceTx(inputAddress, id, balance.New().Power(power))
+}
+
+func AlterBalanceTx(inputAddress crypto.Address, id crypto.Addressable, bal balance.Balances) *payload.GovTx {
+	publicKey := id.PublicKey()
+	return UpdateAccountTx(inputAddress, &spec.TemplateAccount{
+		PublicKey: &publicKey,
+		Amounts:   bal,
+	})
+}
+
+func AlterPermissionsTx(inputAddress crypto.Address, id crypto.Addressable, perms permission.PermFlag) *payload.GovTx {
+	address := id.Address()
+	return UpdateAccountTx(inputAddress, &spec.TemplateAccount{
+		Address:     &address,
+		Permissions: permission.PermFlagToStringList(perms),
+	})
+}
+
+func UpdateAccountTx(inputAddress crypto.Address, updates ...*spec.TemplateAccount) *payload.GovTx {
+	return &payload.GovTx{
+		Inputs: []*payload.TxInput{{
+			Address: inputAddress,
+		}},
+		AccountUpdates: updates,
+	}
+}
diff --git a/governance/governance_test.go b/governance/governance_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..42d4d396fd4c01834c55bd1b08fcc5c7f5fa6e92
--- /dev/null
+++ b/governance/governance_test.go
@@ -0,0 +1,23 @@
+package governance
+
+import (
+	"testing"
+
+	"github.com/hyperledger/burrow/acm"
+	"github.com/hyperledger/burrow/execution/exec"
+	"github.com/hyperledger/burrow/txs"
+	"github.com/stretchr/testify/require"
+)
+
+func TestSerialise(t *testing.T) {
+	priv := acm.GeneratePrivateAccountFromSecret("hhelo")
+	tx := AlterPowerTx(priv.Address(), priv, 3242323)
+	txEnv := txs.Enclose("OOh", tx)
+
+	txe := exec.NewTxExecution(txEnv)
+	bs, err := txe.Marshal()
+	require.NoError(t, err)
+	txeOut := new(exec.TxExecution)
+	err = txeOut.Unmarshal(bs)
+	require.NoError(t, err)
+}
diff --git a/integration/core/kernel_test.go b/integration/core/kernel_test.go
index d4003e2127c916b97fd2bc9bf5feae91e6f7b928..3850fbc92d6119ebb44241abc5865819fc7643b6 100644
--- a/integration/core/kernel_test.go
+++ b/integration/core/kernel_test.go
@@ -5,73 +5,75 @@ package core
 import (
 	"context"
 	"fmt"
-	"os"
 	"testing"
 	"time"
 
 	"github.com/hyperledger/burrow/config"
-	"github.com/hyperledger/burrow/consensus/tendermint/validator"
+	"github.com/hyperledger/burrow/consensus/tendermint"
 	"github.com/hyperledger/burrow/core"
 	"github.com/hyperledger/burrow/event"
 	"github.com/hyperledger/burrow/execution/exec"
 	"github.com/hyperledger/burrow/genesis"
 	"github.com/hyperledger/burrow/integration"
+	"github.com/hyperledger/burrow/integration/rpctest"
 	"github.com/hyperledger/burrow/keys"
+	"github.com/hyperledger/burrow/keys/mock"
 	"github.com/hyperledger/burrow/logging"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
-	tmConfig "github.com/tendermint/tendermint/config"
 	tmTypes "github.com/tendermint/tendermint/types"
 )
 
-const testDir = "./test_scratch/kernel_test"
+var genesisDoc, privateAccounts, privateValidators = genesis.NewDeterministicGenesis(123).GenesisDoc(1, true, 1000, 1, true, 1000)
 
 func TestBootThenShutdown(t *testing.T) {
-	os.RemoveAll(testDir)
-	os.MkdirAll(testDir, 0777)
-	os.Chdir(testDir)
-	tmConf := tmConfig.DefaultConfig()
+	cleanup := integration.EnterTestDirectory()
+	defer cleanup()
 	//logger, _, _ := lifecycle.NewStdErrLogger()
 	logger := logging.NewNoopLogger()
-	genesisDoc, _, privateValidators := genesis.NewDeterministicGenesis(123).GenesisDoc(1, true, 1000, 1, true, 1000)
-	privValidator := validator.NewPrivValidatorMemory(privateValidators[0], privateValidators[0])
-	assert.NoError(t, bootWaitBlocksShutdown(privValidator, integration.NewTestConfig(genesisDoc), tmConf, logger, nil))
+	privValidator := tendermint.NewPrivValidatorMemory(privateValidators[0], privateValidators[0])
+	assert.NoError(t, bootWaitBlocksShutdown(t, privValidator, integration.NewTestConfig(genesisDoc), logger, nil))
 }
 
 func TestBootShutdownResume(t *testing.T) {
-	os.RemoveAll(testDir)
-	os.MkdirAll(testDir, 0777)
-	os.Chdir(testDir)
-	tmConf := tmConfig.DefaultConfig()
-	//logger, _, _ := lifecycle.NewStdErrLogger()
+	cleanup := integration.EnterTestDirectory()
+	defer cleanup()
+	//logger, _ := lifecycle.NewStdErrLogger()
 	logger := logging.NewNoopLogger()
-	genesisDoc, _, privateValidators := genesis.NewDeterministicGenesis(123).GenesisDoc(1, true, 1000, 1, true, 1000)
-	privValidator := validator.NewPrivValidatorMemory(privateValidators[0], privateValidators[0])
+	privValidator := tendermint.NewPrivValidatorMemory(privateValidators[0], privateValidators[0])
 
+	testConfig := integration.NewTestConfig(genesisDoc)
 	i := uint64(0)
 	// asserts we get a consecutive run of blocks
 	blockChecker := func(block *exec.BlockExecution) bool {
-		assert.Equal(t, i+1, block.Height)
+		if i == 0 {
+			// We send some synchronous transactions so catch up to latest block
+			i = block.Height - 1
+		}
+		require.Equal(t, i+1, block.Height)
 		i++
 		// stop every third block
-		return i%3 != 0
+		if i%3 == 0 {
+			i = 0
+			return false
+		}
+		return true
 	}
-	testConfig := integration.NewTestConfig(genesisDoc)
 	// First run
-	require.NoError(t, bootWaitBlocksShutdown(privValidator, testConfig, tmConf, logger, blockChecker))
+	require.NoError(t, bootWaitBlocksShutdown(t, privValidator, testConfig, logger, blockChecker))
 	// Resume and check we pick up where we left off
-	require.NoError(t, bootWaitBlocksShutdown(privValidator, testConfig, tmConf, logger, blockChecker))
+	require.NoError(t, bootWaitBlocksShutdown(t, privValidator, testConfig, logger, blockChecker))
 	// Resuming with mismatched genesis should fail
 	genesisDoc.Salt = []byte("foo")
-	assert.Error(t, bootWaitBlocksShutdown(privValidator, testConfig, tmConf, logger, blockChecker))
+	assert.Error(t, bootWaitBlocksShutdown(t, privValidator, testConfig, logger, blockChecker))
 }
 
-func bootWaitBlocksShutdown(privValidator tmTypes.PrivValidator, testConfig *config.BurrowConfig,
-	tmConf *tmConfig.Config, logger *logging.Logger,
+func bootWaitBlocksShutdown(t testing.TB, privValidator tmTypes.PrivValidator, testConfig *config.BurrowConfig,
+	logger *logging.Logger,
 	blockChecker func(block *exec.BlockExecution) (cont bool)) error {
 
 	keyStore := keys.NewKeyStore(keys.DefaultKeysDir, false, logger)
-	keyClient := keys.NewLocalKeyClient(keyStore, logging.NewNoopLogger())
+	keyClient := mock.NewKeyClient(privateAccounts...)
 	kern, err := core.NewKernel(context.Background(), keyClient, privValidator,
 		testConfig.GenesisDoc,
 		testConfig.Tendermint.TendermintConfig(),
@@ -87,6 +89,13 @@ func bootWaitBlocksShutdown(privValidator tmTypes.PrivValidator, testConfig *con
 		return err
 	}
 
+	inputAddress := privateAccounts[0].Address()
+	tcli := rpctest.NewTransactClient(t, testConfig.RPC.GRPC.ListenAddress)
+	// Generate a few transactions
+	for i := 0; i < 3; i++ {
+		rpctest.CreateContract(t, tcli, inputAddress)
+	}
+
 	subID := event.GenSubID()
 	ch, err := kern.Emitter.Subscribe(context.Background(), subID, exec.QueryForBlockExecution(), 10)
 	if err != nil {
diff --git a/integration/governance/governance_test.go b/integration/governance/governance_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..b188331c516d685c19ca9c7c82f61148caa0c90e
--- /dev/null
+++ b/integration/governance/governance_test.go
@@ -0,0 +1,172 @@
+// +build integration
+
+package governance
+
+import (
+	"context"
+	"fmt"
+	"math/big"
+	"testing"
+
+	"github.com/hyperledger/burrow/acm"
+	"github.com/hyperledger/burrow/acm/balance"
+	"github.com/hyperledger/burrow/acm/validator"
+	"github.com/hyperledger/burrow/crypto"
+	"github.com/hyperledger/burrow/execution/errors"
+	"github.com/hyperledger/burrow/execution/exec"
+	"github.com/hyperledger/burrow/governance"
+	"github.com/hyperledger/burrow/integration/rpctest"
+	"github.com/hyperledger/burrow/permission"
+	"github.com/hyperledger/burrow/rpc/rpcevents"
+	"github.com/hyperledger/burrow/rpc/rpcquery"
+	"github.com/hyperledger/burrow/rpc/rpctransact"
+	"github.com/hyperledger/burrow/txs/payload"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+	"github.com/tendermint/tendermint/rpc/core"
+)
+
+func TestAlterValidators(t *testing.T) {
+	inputAddress := privateAccounts[0].Address()
+	grpcAddress := testConfigs[0].RPC.GRPC.ListenAddress
+	tcli := rpctest.NewTransactClient(t, grpcAddress)
+	qcli := rpctest.NewQueryClient(t, grpcAddress)
+	ecli := rpctest.NewExecutionEventsClient(t, grpcAddress)
+
+	// Build a batch of validator alterations to make
+	vs := validator.NewTrimSet()
+	alterPower(vs, 3, 2131)
+	alterPower(vs, 2, 4561)
+	alterPower(vs, 5, 7831)
+	alterPower(vs, 8, 9931)
+
+	vs.Iterate(func(id crypto.Addressable, power *big.Int) (stop bool) {
+		_, err := govSync(t, tcli, governance.AlterPowerTx(inputAddress, id, power.Uint64()))
+		require.NoError(t, err)
+		return
+	})
+
+	vsOut := getValidatorHistory(t, qcli)
+	// Include the genesis validator and compare the sets
+	alterPower(vs, 0, genesisDoc.Validators[0].Amount)
+	assertValidatorsEqual(t, vs, vsOut)
+
+	// Remove validator from chain
+	txe, err := govSync(t, tcli, governance.AlterPowerTx(inputAddress, account(3), 0))
+	// Mirror in our check set
+	alterPower(vs, 3, 0)
+	fmt.Println(txe.Events)
+	vsOut = getValidatorHistory(t, qcli)
+	assertValidatorsEqual(t, vs, vsOut)
+
+	waitNBlocks(t, ecli, 3)
+	height := int64(kernels[4].Blockchain.LastBlockHeight())
+	kernels[4].Node.ConfigureRPC()
+	tmVals, err := core.Validators(&height)
+	require.NoError(t, err)
+	vsOut = validator.NewTrimSet()
+
+	for _, v := range tmVals.Validators {
+		publicKey, err := crypto.PublicKeyFromTendermintPubKey(v.PubKey)
+		require.NoError(t, err)
+		vsOut.AlterPower(publicKey, big.NewInt(v.VotingPower))
+	}
+	assertValidatorsEqual(t, vs, vsOut)
+}
+
+func TestNoRootPermission(t *testing.T) {
+	grpcAddress := testConfigs[0].RPC.GRPC.ListenAddress
+	tcli := rpctest.NewTransactClient(t, grpcAddress)
+	// Account does not have Root permission
+	inputAddress := privateAccounts[4].Address()
+	_, err := govSync(t, tcli, governance.AlterPowerTx(inputAddress, account(5), 3433))
+	require.Error(t, err)
+	assert.Contains(t, err.Error(), errors.ErrorCodePermissionDenied.Error())
+}
+
+func TestAlterAmount(t *testing.T) {
+	inputAddress := privateAccounts[0].Address()
+	grpcAddress := testConfigs[0].RPC.GRPC.ListenAddress
+	tcli := rpctest.NewTransactClient(t, grpcAddress)
+	qcli := rpctest.NewQueryClient(t, grpcAddress)
+	var amount uint64 = 18889
+	acc := account(5)
+	_, err := govSync(t, tcli, governance.AlterBalanceTx(inputAddress, acc, balance.New().Native(amount)))
+	require.NoError(t, err)
+	ca, err := qcli.GetAccount(context.Background(), &rpcquery.GetAccountParam{Address: acc.Address()})
+	require.NoError(t, err)
+	assert.Equal(t, amount, ca.Balance)
+}
+
+func TestAlterPermissions(t *testing.T) {
+	inputAddress := privateAccounts[0].Address()
+	grpcAddress := testConfigs[0].RPC.GRPC.ListenAddress
+	tcli := rpctest.NewTransactClient(t, grpcAddress)
+	qcli := rpctest.NewQueryClient(t, grpcAddress)
+	acc := account(5)
+	_, err := govSync(t, tcli, governance.AlterPermissionsTx(inputAddress, acc, permission.Send))
+	require.NoError(t, err)
+	ca, err := qcli.GetAccount(context.Background(), &rpcquery.GetAccountParam{Address: acc.Address()})
+	require.NoError(t, err)
+	assert.Equal(t, permission.AccountPermissions{
+		Base: permission.BasePermissions{
+			Perms:  permission.Send,
+			SetBit: permission.Send,
+		},
+	}, ca.Permissions)
+}
+
+func TestCreateAccount(t *testing.T) {
+	inputAddress := privateAccounts[0].Address()
+	grpcAddress := testConfigs[0].RPC.GRPC.ListenAddress
+	tcli := rpctest.NewTransactClient(t, grpcAddress)
+	qcli := rpctest.NewQueryClient(t, grpcAddress)
+	var amount uint64 = 18889
+	acc := acm.GeneratePrivateAccountFromSecret("we almost certainly don't exist")
+	_, err := govSync(t, tcli, governance.AlterBalanceTx(inputAddress, acc, balance.New().Native(amount)))
+	require.NoError(t, err)
+	ca, err := qcli.GetAccount(context.Background(), &rpcquery.GetAccountParam{Address: acc.Address()})
+	require.NoError(t, err)
+	assert.Equal(t, amount, ca.Balance)
+}
+
+func getValidatorHistory(t testing.TB, qcli rpcquery.QueryClient) *validator.Set {
+	history, err := qcli.GetValidatorSet(context.Background(), &rpcquery.GetValidatorSetParam{
+		IncludeHistory: true,
+	})
+	require.NoError(t, err)
+
+	// Include the genesis validator and compare the sets
+	return validator.UnpersistSet(history.Set)
+}
+
+func account(i int) *acm.PrivateAccount {
+	return rpctest.PrivateAccounts[i]
+}
+
+func govSync(t testing.TB, cli rpctransact.TransactClient, tx *payload.GovTx) (*exec.TxExecution, error) {
+	return cli.BroadcastTxSync(context.Background(), &rpctransact.TxEnvelopeParam{
+		Payload: tx.Any(),
+	})
+}
+
+func assertValidatorsEqual(t testing.TB, expected, actual *validator.Set) {
+	if !assert.True(t, expected.Equal(actual), "sets should be equal") {
+		fmt.Printf("Expected:\n%v\nActual:\n%v\n", expected, actual)
+	}
+}
+
+func alterPower(vs *validator.Set, i int, power uint64) {
+	vs.AlterPower(account(i), new(big.Int).SetUint64(power))
+}
+
+func waitNBlocks(t testing.TB, ecli rpcevents.ExecutionEventsClient, n int) {
+	stream, err := ecli.GetBlocks(context.Background(), &rpcevents.BlocksRequest{
+		BlockRange: rpcevents.NewBlockRange(rpcevents.LatestBound(), rpcevents.StreamBound()),
+	})
+	defer stream.CloseSend()
+	for i := 0; i < n; i++ {
+		require.NoError(t, err)
+		_, err = stream.Recv()
+	}
+}
diff --git a/integration/governance/main_test.go b/integration/governance/main_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..65e338e35545bc22764c38d3780dfb23fbb69ea7
--- /dev/null
+++ b/integration/governance/main_test.go
@@ -0,0 +1,79 @@
+// +build integration
+
+// Space above here matters
+// Copyright 2017 Monax Industries Limited
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package governance
+
+import (
+	"context"
+	"os"
+	"testing"
+	"time"
+
+	"github.com/hyperledger/burrow/config"
+	"github.com/hyperledger/burrow/core"
+	"github.com/hyperledger/burrow/integration"
+	"github.com/hyperledger/burrow/logging/logconfig"
+	"github.com/hyperledger/burrow/permission"
+)
+
+var privateAccounts = integration.MakePrivateAccounts(10) // make keys
+var genesisDoc = integration.TestGenesisDoc(privateAccounts)
+var _ = integration.ClaimPorts()
+var testConfigs []*config.BurrowConfig
+var kernels []*core.Kernel
+
+// Needs to be in a _test.go file to be picked up
+func TestMain(m *testing.M) {
+	cleanup := integration.EnterTestDirectory()
+	defer cleanup()
+	testConfigs = make([]*config.BurrowConfig, len(privateAccounts))
+	kernels = make([]*core.Kernel, len(privateAccounts))
+	genesisDoc.Accounts[4].Permissions = permission.NewAccountPermissions(permission.Send | permission.Call)
+	for i, acc := range privateAccounts {
+		testConfig := integration.NewTestConfig(genesisDoc)
+		testConfigs[i] = testConfig
+		kernels[i] = integration.TestKernel(acc, privateAccounts, testConfigs[i],
+			logconfig.New().Root(func(sink *logconfig.SinkConfig) *logconfig.SinkConfig {
+				return sink.SetTransform(logconfig.FilterTransform(logconfig.IncludeWhenAllMatch,
+					"total_validator")).SetOutput(logconfig.StdoutOutput())
+			}))
+		err := kernels[i].Boot()
+		if err != nil {
+			panic(err)
+		}
+		// Sometimes better to not shutdown as logging errors on shutdown may obscure real issue
+		defer func() {
+			kernels[i].Shutdown(context.Background())
+		}()
+	}
+	time.Sleep(1 * time.Second)
+	for i := 0; i < len(kernels); i++ {
+		for j := i; j < len(kernels); j++ {
+			if i != j {
+				connectKernels(kernels[i], kernels[j])
+			}
+		}
+	}
+	os.Exit(m.Run())
+}
+
+func connectKernels(k1, k2 *core.Kernel) {
+	err := k1.Node.Switch().DialPeerWithAddress(k2.Node.NodeInfo().NetAddress(), false)
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/integration/integration.go b/integration/integration.go
index 388cae7d617b7b366195479fc85357c404616894..69267521513c2ac9b14ec1f9aebf9283f44d451d 100644
--- a/integration/integration.go
+++ b/integration/integration.go
@@ -25,27 +25,25 @@ import (
 	"time"
 
 	"github.com/hyperledger/burrow/acm"
+	"github.com/hyperledger/burrow/acm/validator"
 	"github.com/hyperledger/burrow/config"
-	"github.com/hyperledger/burrow/consensus/tendermint/validator"
+	"github.com/hyperledger/burrow/consensus/tendermint"
 	"github.com/hyperledger/burrow/core"
 	"github.com/hyperledger/burrow/execution/evm/sha3"
 	"github.com/hyperledger/burrow/genesis"
 	"github.com/hyperledger/burrow/keys/mock"
 	"github.com/hyperledger/burrow/logging"
-	lConfig "github.com/hyperledger/burrow/logging/config"
 	"github.com/hyperledger/burrow/logging/lifecycle"
-	"github.com/hyperledger/burrow/logging/structure"
+	lConfig "github.com/hyperledger/burrow/logging/logconfig"
 	"github.com/hyperledger/burrow/permission"
 )
 
 const (
-	chainName = "Integration_Test_Chain"
+	ChainName = "Integration_Test_Chain"
 	testDir   = "./test_scratch/tm_test"
 )
 
 // Enable logger output during tests
-//var debugLogging = true
-var debugLogging = false
 
 // Starting point for assigning range of ports for tests
 // Start at unprivileged port (hoping for the best)
@@ -58,40 +56,25 @@ const startingPortBuckets = 1000
 // Mutable port to assign to next claimant
 var port = uint32(startingPort)
 
-// We use this to wrap tests
-func TestWrapper(privateAccounts []*acm.PrivateAccount, testConfig *config.BurrowConfig, runner func(*core.Kernel) int) int {
-	fmt.Println("Running with integration TestWrapper (core/integration/integration.go)...")
-
-	os.RemoveAll(testDir)
-	os.MkdirAll(testDir, 0777)
-	os.Chdir(testDir)
+var node uint64 = 0
 
-	os.MkdirAll("config", 0777)
+// We use this to wrap tests
+func TestKernel(validatorAccount *acm.PrivateAccount, keysAccounts []*acm.PrivateAccount,
+	testConfig *config.BurrowConfig, loggingConfig *lConfig.LoggingConfig) *core.Kernel {
+	fmt.Println("Creating integration test Kernel...")
 
 	logger := logging.NewNoopLogger()
-	if debugLogging {
+	if loggingConfig != nil {
 		var err error
 		// Change config as needed
-		logger, err = lifecycle.NewLoggerFromLoggingConfig(&lConfig.LoggingConfig{
-			ExcludeTrace: false,
-			RootSink: lConfig.Sink().
-				SetTransform(lConfig.FilterTransform(lConfig.IncludeWhenAnyMatches,
-					structure.ComponentKey, "Tendermint",
-					structure.ScopeKey, "executor.Execute\\(tx txs.Tx\\)",
-				)).
-				//AddSinks(config.Sink().SetTransform(config.FilterTransform(config.ExcludeWhenAnyMatches, "run_call", "false")).
-				AddSinks(lConfig.Sink().SetTransform(lConfig.PruneTransform("log_channel", "trace", "scope", "returns", "run_id", "args")).
-					AddSinks(lConfig.Sink().SetTransform(lConfig.SortTransform("tx_hash", "time", "message", "method")).
-						SetOutput(lConfig.StdoutOutput()))),
-		})
+		logger, err = lifecycle.NewLoggerFromLoggingConfig(loggingConfig)
 		if err != nil {
 			panic(err)
 		}
 	}
 
-	validatorAccount := privateAccounts[0]
-	privValidator := validator.NewPrivValidatorMemory(validatorAccount, validatorAccount)
-	keyClient := mock.NewKeyClient(privateAccounts...)
+	privValidator := tendermint.NewPrivValidatorMemory(validatorAccount, validatorAccount)
+	keyClient := mock.NewKeyClient(keysAccounts...)
 	kernel, err := core.NewKernel(context.Background(), keyClient, privValidator,
 		testConfig.GenesisDoc,
 		testConfig.Tendermint.TendermintConfig(),
@@ -101,17 +84,16 @@ func TestWrapper(privateAccounts []*acm.PrivateAccount, testConfig *config.Burro
 	if err != nil {
 		panic(err)
 	}
-	// Sometimes better to not shutdown as logging errors on shutdown may obscure real issue
-	defer func() {
-		kernel.Shutdown(context.Background())
-	}()
 
-	err = kernel.Boot()
-	if err != nil {
-		panic(err)
-	}
+	return kernel
+}
 
-	return runner(kernel)
+func EnterTestDirectory() (cleanup func()) {
+	os.RemoveAll(testDir)
+	os.MkdirAll(testDir, 0777)
+	os.Chdir(testDir)
+	os.MkdirAll("config", 0777)
+	return func() { os.RemoveAll(testDir) }
 }
 
 func TestGenesisDoc(addressables []*acm.PrivateAccount) *genesis.GenesisDoc {
@@ -126,9 +108,9 @@ func TestGenesisDoc(addressables []*acm.PrivateAccount) *genesis.GenesisDoc {
 	if err != nil {
 		panic("could not parse test genesis time")
 	}
-	return genesis.MakeGenesisDocFromAccounts(chainName, nil, genesisTime, accounts,
-		map[string]acm.Validator{
-			"genesis_validator": acm.AsValidator(accounts["user_0"]),
+	return genesis.MakeGenesisDocFromAccounts(ChainName, nil, genesisTime, accounts,
+		map[string]validator.Validator{
+			"genesis_validator": validator.FromAccount(accounts["user_0"], 1<<16),
 		})
 }
 
@@ -160,18 +142,28 @@ func GetPort() uint16 {
 	return uint16(atomic.AddUint32(&port, 1))
 }
 
+// Gets an name based on an incrementing counter for running multiple nodes
+func GetName() string {
+	nodeNumber := atomic.AddUint64(&node, 1)
+	return fmt.Sprintf("node_%03d", nodeNumber)
+}
+
 func GetLocalAddress() string {
-	return fmt.Sprintf("localhost:%v", GetPort())
+	return fmt.Sprintf("127.0.0.1:%v", GetPort())
 }
 
 func GetTCPLocalAddress() string {
-	return fmt.Sprintf("tcp://localhost:%v", GetPort())
+	return fmt.Sprintf("tcp://127.0.0.1:%v", GetPort())
 }
 
 func NewTestConfig(genesisDoc *genesis.GenesisDoc) *config.BurrowConfig {
+	name := GetName()
 	cnf := config.DefaultBurrowConfig()
 	cnf.GenesisDoc = genesisDoc
+	cnf.Tendermint.Moniker = name
+	cnf.Tendermint.TendermintRoot = fmt.Sprintf(".burrow_%s", name)
 	cnf.Tendermint.ListenAddress = GetTCPLocalAddress()
+	cnf.Tendermint.ExternalAddress = cnf.Tendermint.ListenAddress
 	cnf.RPC.GRPC.ListenAddress = GetLocalAddress()
 	cnf.RPC.Metrics.ListenAddress = GetTCPLocalAddress()
 	cnf.RPC.TM.ListenAddress = GetTCPLocalAddress()
diff --git a/integration/integration_test.go b/integration/integration_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..485a5bd912aefd3ed5c39ee9a40cecb86b7d8599
--- /dev/null
+++ b/integration/integration_test.go
@@ -0,0 +1,28 @@
+// Copyright 2017 Monax Industries Limited
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package integration
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestNamesAndPorts(t *testing.T) {
+	assert.Equal(t, "node_001", GetName())
+	assert.Equal(t, "node_002", GetName())
+	assert.Equal(t, startingPort+1, GetPort())
+	assert.Equal(t, startingPort+2, GetPort())
+}
diff --git a/integration/rpcevents/main_test.go b/integration/rpcevents/main_test.go
index 8418fbd740cffae49f2d109dbfadad1a6f739f15..1acbd46f3e00d1b3d2e5e56e55decaea2bd00a95 100644
--- a/integration/rpcevents/main_test.go
+++ b/integration/rpcevents/main_test.go
@@ -18,6 +18,7 @@
 package rpcevents
 
 import (
+	"context"
 	"os"
 	"testing"
 
@@ -33,11 +34,16 @@ var kern *core.Kernel
 
 // Needs to be in a _test.go file to be picked up
 func TestMain(m *testing.M) {
-	returnValue := integration.TestWrapper(rpctest.PrivateAccounts, testConfig,
-		func(k *core.Kernel) int {
-			kern = k
-			return m.Run()
-		})
-
-	os.Exit(returnValue)
+	cleanup := integration.EnterTestDirectory()
+	defer cleanup()
+	kern = integration.TestKernel(rpctest.PrivateAccounts[0], rpctest.PrivateAccounts, testConfig, nil)
+	err := kern.Boot()
+	if err != nil {
+		panic(err)
+	}
+	// Sometimes better to not shutdown as logging errors on shutdown may obscure real issue
+	defer func() {
+		kern.Shutdown(context.Background())
+	}()
+	os.Exit(m.Run())
 }
diff --git a/integration/rpcquery/main_test.go b/integration/rpcquery/main_test.go
index 423a22bd5b0791222a3733bdf84e54c44d8c9b62..d6e9694eabd32c32f4110bdba8fecdbb1f7941da 100644
--- a/integration/rpcquery/main_test.go
+++ b/integration/rpcquery/main_test.go
@@ -18,6 +18,7 @@
 package rpcquery
 
 import (
+	"context"
 	"os"
 	"testing"
 
@@ -32,11 +33,16 @@ var kern *core.Kernel
 
 // Needs to be in a _test.go file to be picked up
 func TestMain(m *testing.M) {
-	returnValue := integration.TestWrapper(rpctest.PrivateAccounts, testConfig,
-		func(k *core.Kernel) int {
-			kern = k
-			return m.Run()
-		})
-
-	os.Exit(returnValue)
+	cleanup := integration.EnterTestDirectory()
+	defer cleanup()
+	kern = integration.TestKernel(rpctest.PrivateAccounts[0], rpctest.PrivateAccounts, testConfig, nil)
+	err := kern.Boot()
+	if err != nil {
+		panic(err)
+	}
+	// Sometimes better to not shutdown as logging errors on shutdown may obscure real issue
+	defer func() {
+		kern.Shutdown(context.Background())
+	}()
+	os.Exit(m.Run())
 }
diff --git a/integration/rpctransact/main_test.go b/integration/rpctransact/main_test.go
index bda89290e649e0046a1564904829e1072ffee103..5b0d3d05a02b0c8ee8d7b57f5573f8720f9822c6 100644
--- a/integration/rpctransact/main_test.go
+++ b/integration/rpctransact/main_test.go
@@ -18,6 +18,7 @@
 package rpctransact
 
 import (
+	"context"
 	"os"
 	"testing"
 
@@ -32,11 +33,16 @@ var kern *core.Kernel
 
 // Needs to be in a _test.go file to be picked up
 func TestMain(m *testing.M) {
-	returnValue := integration.TestWrapper(rpctest.PrivateAccounts, testConfig,
-		func(k *core.Kernel) int {
-			kern = k
-			return m.Run()
-		})
-
-	os.Exit(returnValue)
+	cleanup := integration.EnterTestDirectory()
+	defer cleanup()
+	kern = integration.TestKernel(rpctest.PrivateAccounts[0], rpctest.PrivateAccounts, testConfig, nil)
+	err := kern.Boot()
+	if err != nil {
+		panic(err)
+	}
+	// Sometimes better to not shutdown as logging errors on shutdown may obscure real issue
+	defer func() {
+		kern.Shutdown(context.Background())
+	}()
+	os.Exit(m.Run())
 }
diff --git a/integration/rpctransact/transact_server_test.go b/integration/rpctransact/transact_server_test.go
index 6ab30c545161e70bafbab0efaf558776399e8a23..1132faf6a6b6fb54bb174e9f4908b995fe8b51f5 100644
--- a/integration/rpctransact/transact_server_test.go
+++ b/integration/rpctransact/transact_server_test.go
@@ -88,7 +88,7 @@ func TestBroadcastTxLocallySigned(t *testing.T) {
 
 func TestFormulateTx(t *testing.T) {
 	cli := rpctest.NewTransactClient(t, testConfig.RPC.GRPC.ListenAddress)
-	txEnv, err := cli.FormulateTx(context.Background(), &rpctransact.PayloadParam{
+	txEnv, err := cli.FormulateTx(context.Background(), &payload.Any{
 		CallTx: &payload.CallTx{
 			Input: &payload.TxInput{
 				Address: inputAddress,
@@ -102,7 +102,7 @@ func TestFormulateTx(t *testing.T) {
 	require.NoError(t, err)
 	// We should see the sign bytes embedded
 	if !assert.Contains(t, string(bs), fmt.Sprintf("{\"ChainID\":\"%s\",\"Type\":\"CallTx\","+
-		"\"Payload\":{\"Input\":{\"Address\":\"4A6DFB649EF0D50780998A686BD69AB175C08E26\",\"Amount\":230},"+
+		"\"Payload\":{\"Input\":{\"Address\":\"E80BB91C2F0F4C3C39FC53E89BF8416B219BE6E4\",\"Amount\":230},"+
 		"\"Data\":\"0203060403\"}}", rpctest.GenesisDoc.ChainID())) {
 		fmt.Println(string(bs))
 	}
diff --git a/integration/tm/main_test.go b/integration/tm/main_test.go
index cdee8c090e6460240c21842b944fc1116daf8f4d..44e733ab8e8bddb1cb1babd1e9932a70a17b4ad9 100644
--- a/integration/tm/main_test.go
+++ b/integration/tm/main_test.go
@@ -18,6 +18,7 @@
 package tm
 
 import (
+	"context"
 	"os"
 	"testing"
 
@@ -40,10 +41,16 @@ var clients = map[string]tmClient.RPCClient{
 
 // Needs to be in a _test.go file to be picked up
 func TestMain(m *testing.M) {
-	returnValue := integration.TestWrapper(rpctest.PrivateAccounts, testConfig, func(k *core.Kernel) int {
-		kern = k
-		return m.Run()
-	})
-
-	os.Exit(returnValue)
+	cleanup := integration.EnterTestDirectory()
+	defer cleanup()
+	kern = integration.TestKernel(rpctest.PrivateAccounts[0], rpctest.PrivateAccounts, testConfig, nil)
+	err := kern.Boot()
+	if err != nil {
+		panic(err)
+	}
+	// Sometimes better to not shutdown as logging errors on shutdown may obscure real issue
+	defer func() {
+		kern.Shutdown(context.Background())
+	}()
+	os.Exit(m.Run())
 }
diff --git a/keys/server.go b/keys/server.go
index 5613a7b529a90cea442250101c69dd0952eb147d..1ba471afb4d05ce96093ed3b26d90037c13b4f53 100644
--- a/keys/server.go
+++ b/keys/server.go
@@ -75,8 +75,8 @@ func (k *KeyStore) Export(ctx context.Context, in *ExportRequest) (*ExportRespon
 	return &ExportResponse{
 		Address:    addrB[:],
 		CurveType:  key.CurveType.String(),
-		Publickey:  key.PublicKey.PublicKey[:],
-		Privatekey: key.PrivateKey.PrivateKey[:],
+		Publickey:  key.PublicKey.Key[:],
+		Privatekey: key.PrivateKey.Key[:],
 	}, nil
 }
 
diff --git a/keys/server_test.go b/keys/server_test.go
index 68ed229b225c1d2a6c3ea5a2104e4b14950ff5fb..68485dcd58514ba0218a70cfd5f4ffa24ddbe0e8 100644
--- a/keys/server_test.go
+++ b/keys/server_test.go
@@ -14,7 +14,7 @@ import (
 	"github.com/hyperledger/burrow/crypto"
 	"github.com/hyperledger/burrow/execution/evm/sha3"
 	"github.com/hyperledger/burrow/logging"
-	tm_crypto "github.com/tendermint/go-crypto"
+	tm_crypto "github.com/tendermint/tendermint/crypto"
 	"google.golang.org/grpc"
 )
 
diff --git a/logging/config/presets/instructions_test.go b/logging/config/presets/instructions_test.go
deleted file mode 100644
index c8398cc5c76183f09ae40f6480ff738272ae2df7..0000000000000000000000000000000000000000
--- a/logging/config/presets/instructions_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package presets
-
-import (
-	"testing"
-
-	"github.com/hyperledger/burrow/logging/config"
-	"github.com/hyperledger/burrow/logging/loggers"
-	"github.com/hyperledger/burrow/logging/structure"
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
-)
-
-func TestBuildSinkConfig(t *testing.T) {
-	builtSink, err := BuildSinkConfig(IncludeAny, Info, Stdout, Terminal, Down, Down, Info, Stdout, Up, Info, Stderr)
-	require.NoError(t, err)
-	expectedSink := config.Sink().
-		SetTransform(config.FilterTransform(config.IncludeWhenAnyMatches,
-			structure.ChannelKey, structure.InfoChannelName)).SetOutput(config.StdoutOutput().SetFormat(loggers.TerminalFormat)).AddSinks(
-		config.Sink().SetTransform(config.FilterTransform(config.NoFilterMode,
-			structure.ChannelKey, structure.InfoChannelName)).SetOutput(config.StderrOutput()).AddSinks(
-			config.Sink().SetTransform(config.FilterTransform(config.NoFilterMode,
-				structure.ChannelKey, structure.InfoChannelName)).SetOutput(config.StdoutOutput())))
-
-	//fmt.Println(config.JSONString(expectedSink), "\n", config.JSONString(builtSink))
-	assert.Equal(t, config.JSONString(expectedSink), config.JSONString(builtSink))
-}
-
-func TestMinimalPreset(t *testing.T) {
-	builtSink, err := BuildSinkConfig(Minimal, Stderr)
-	require.NoError(t, err)
-	expectedSink := config.Sink().
-		AddSinks(config.Sink().SetTransform(config.PruneTransform(structure.TraceKey, structure.RunId)).
-			AddSinks(config.Sink().SetTransform(config.FilterTransform(config.IncludeWhenAllMatch,
-				structure.ChannelKey, structure.InfoChannelName)).
-				AddSinks(config.Sink().SetTransform(config.FilterTransform(config.ExcludeWhenAnyMatches,
-					structure.ComponentKey, "Tendermint",
-					"module", "p2p",
-					"module", "mempool")).SetOutput(config.StderrOutput()))))
-	//fmt.Println(config.TOMLString(expectedSink), "\n", config.TOMLString(builtSink))
-	assert.Equal(t, config.TOMLString(expectedSink), config.TOMLString(builtSink))
-}
-
-func TestFileOutput(t *testing.T) {
-	path := "foo.log"
-	builtSink, err := BuildSinkConfig(Down, File, path, JSON)
-	require.NoError(t, err)
-	expectedSink := config.Sink().
-		AddSinks(config.Sink().SetOutput(config.FileOutput(path).SetFormat(loggers.JSONFormat)))
-	//fmt.Println(config.TOMLString(expectedSink), "\n", config.TOMLString(builtSink))
-	assert.Equal(t, config.TOMLString(expectedSink), config.TOMLString(builtSink))
-}
diff --git a/logging/config/sort.go b/logging/config/sort.go
deleted file mode 100644
index d912156bec00a9f00850ab2ec3a3baf1016c2141..0000000000000000000000000000000000000000
--- a/logging/config/sort.go
+++ /dev/null
@@ -1 +0,0 @@
-package config
diff --git a/logging/lifecycle/lifecycle.go b/logging/lifecycle/lifecycle.go
index f17622e694153483e4cccd99aa686687cb430982..911525bfeb6c8383cdb44bacac956a485dfee849 100644
--- a/logging/lifecycle/lifecycle.go
+++ b/logging/lifecycle/lifecycle.go
@@ -21,7 +21,7 @@ import (
 	"time"
 
 	"github.com/hyperledger/burrow/logging/adapters/stdlib"
-	"github.com/hyperledger/burrow/logging/config"
+	"github.com/hyperledger/burrow/logging/logconfig"
 	"github.com/hyperledger/burrow/logging/loggers"
 	"github.com/hyperledger/burrow/logging/structure"
 
@@ -37,7 +37,7 @@ import (
 // to set up their root logger and capture any other logging output.
 
 // Obtain a logger from a LoggingConfig
-func NewLoggerFromLoggingConfig(loggingConfig *config.LoggingConfig) (*logging.Logger, error) {
+func NewLoggerFromLoggingConfig(loggingConfig *logconfig.LoggingConfig) (*logging.Logger, error) {
 	if loggingConfig == nil {
 		return NewStdErrLogger()
 	} else {
@@ -61,7 +61,7 @@ func NewLoggerFromLoggingConfig(loggingConfig *config.LoggingConfig) (*logging.L
 
 // Hot swap logging config by replacing output loggers of passed InfoTraceLogger
 // with those built from loggingConfig
-func SwapOutputLoggersFromLoggingConfig(logger *logging.Logger, loggingConfig *config.LoggingConfig) (error, channels.Channel) {
+func SwapOutputLoggersFromLoggingConfig(logger *logging.Logger, loggingConfig *logconfig.LoggingConfig) (error, channels.Channel) {
 	outputLogger, errCh, err := loggerFromLoggingConfig(loggingConfig)
 	if err != nil {
 		return err, channels.NewDeadChannel()
@@ -99,7 +99,7 @@ func CaptureStdlibLogOutput(infoTraceLogger *logging.Logger) {
 }
 
 // Helpers
-func loggerFromLoggingConfig(loggingConfig *config.LoggingConfig) (kitlog.Logger, channels.Channel, error) {
+func loggerFromLoggingConfig(loggingConfig *logconfig.LoggingConfig) (kitlog.Logger, channels.Channel, error) {
 	outputLogger, _, err := loggingConfig.RootSink.BuildLogger()
 	if err != nil {
 		return nil, nil, err
diff --git a/logging/config/config.go b/logging/logconfig/config.go
similarity index 84%
rename from logging/config/config.go
rename to logging/logconfig/config.go
index 8616aeb7ac0aec1a18098c127f9bc6daf343d8af..d5ffd0e89929664949699fa0288ffd741cc6813c 100644
--- a/logging/config/config.go
+++ b/logging/logconfig/config.go
@@ -1,4 +1,4 @@
-package config
+package logconfig
 
 import (
 	"bytes"
@@ -39,6 +39,20 @@ func DefaultClientLoggingConfig() *LoggingConfig {
 	}
 }
 
+func New() *LoggingConfig {
+	return &LoggingConfig{}
+}
+
+func (lc *LoggingConfig) NoTrace() *LoggingConfig {
+	lc.ExcludeTrace = true
+	return lc
+}
+
+func (lc *LoggingConfig) Root(configure func(sink *SinkConfig) *SinkConfig) *LoggingConfig {
+	lc.RootSink = configure(Sink())
+	return lc
+}
+
 // Returns the TOML for a top-level logging config wrapped with [logging]
 func (lc *LoggingConfig) RootTOMLString() string {
 	return TOMLString(LoggingConfigWrapper{lc})
diff --git a/logging/config/config_test.go b/logging/logconfig/config_test.go
similarity index 97%
rename from logging/config/config_test.go
rename to logging/logconfig/config_test.go
index 089710aa7e9cf5ec24ffcfba1977df09443704d6..af0c3a5e738984c37a94fb2a60ea643939f98c90 100644
--- a/logging/config/config_test.go
+++ b/logging/logconfig/config_test.go
@@ -1,4 +1,4 @@
-package config
+package logconfig
 
 import (
 	"testing"
diff --git a/logging/config/filter.go b/logging/logconfig/filter.go
similarity index 99%
rename from logging/config/filter.go
rename to logging/logconfig/filter.go
index d4d4aede83b8751c76407d2f2234685b388315a1..a048ebc8c2c53e982224c70ee831169e1ba215db 100644
--- a/logging/config/filter.go
+++ b/logging/logconfig/filter.go
@@ -1,4 +1,4 @@
-package config
+package logconfig
 
 import (
 	"fmt"
diff --git a/logging/config/filter_test.go b/logging/logconfig/filter_test.go
similarity index 99%
rename from logging/config/filter_test.go
rename to logging/logconfig/filter_test.go
index 9602fa71e5a41bdaeef8c7c703f0b7ca4265a099..967d0c0d7c7ed4c5e30c98a4a2e9d13ed76048f3 100644
--- a/logging/config/filter_test.go
+++ b/logging/logconfig/filter_test.go
@@ -1,4 +1,4 @@
-package config
+package logconfig
 
 import (
 	"testing"
diff --git a/logging/config/presets/instructions.go b/logging/logconfig/presets/instructions.go
similarity index 66%
rename from logging/config/presets/instructions.go
rename to logging/logconfig/presets/instructions.go
index 91065d2faa9effbfc8bf796175b7fa7eb691deae..6bc4c4f7b42650c5c82dbce32baa7a21d9e497a3 100644
--- a/logging/config/presets/instructions.go
+++ b/logging/logconfig/presets/instructions.go
@@ -3,7 +3,7 @@ package presets
 import (
 	"fmt"
 
-	"github.com/hyperledger/burrow/logging/config"
+	"github.com/hyperledger/burrow/logging/logconfig"
 	"github.com/hyperledger/burrow/logging/loggers"
 	"github.com/hyperledger/burrow/logging/structure"
 )
@@ -20,7 +20,7 @@ type Instruction struct {
 	// by mutating the sink at the top of the stack and may move the cursor or by pushing child sinks
 	// to the stack. The builder may also return a modified ops slice whereby it may insert Instruction calls
 	// acting as a macro or consume ops as arguments.
-	builder func(stack []*config.SinkConfig, args []string) ([]*config.SinkConfig, error)
+	builder func(stack []*logconfig.SinkConfig, args []string) ([]*logconfig.SinkConfig, error)
 }
 
 func (i Instruction) Name() string {
@@ -48,26 +48,26 @@ var instructions = []Instruction{
 	{
 		name: Up,
 		desc: "Ascend the sink tree by travelling up the stack to the previous sink recorded on the stack",
-		builder: func(stack []*config.SinkConfig, args []string) ([]*config.SinkConfig, error) {
+		builder: func(stack []*logconfig.SinkConfig, args []string) ([]*logconfig.SinkConfig, error) {
 			return pop(stack), nil
 		},
 	},
 	{
 		name: Down,
 		desc: "Descend the sink tree by inserting a sink as a child to the current sink and adding it to the stack",
-		builder: func(stack []*config.SinkConfig, args []string) ([]*config.SinkConfig, error) {
-			return push(stack, config.Sink()), nil
+		builder: func(stack []*logconfig.SinkConfig, args []string) ([]*logconfig.SinkConfig, error) {
+			return push(stack, logconfig.Sink()), nil
 		},
 	},
 	{
 		name: Minimal,
 		desc: "A generally less chatty log output, follow with output options",
-		builder: func(stack []*config.SinkConfig, args []string) ([]*config.SinkConfig, error) {
+		builder: func(stack []*logconfig.SinkConfig, args []string) ([]*logconfig.SinkConfig, error) {
 			return push(stack,
-				config.Sink().SetTransform(config.PruneTransform(structure.TraceKey, structure.RunId)),
-				config.Sink().SetTransform(config.FilterTransform(config.IncludeWhenAllMatch,
+				logconfig.Sink().SetTransform(logconfig.PruneTransform(structure.TraceKey, structure.RunId)),
+				logconfig.Sink().SetTransform(logconfig.FilterTransform(logconfig.IncludeWhenAllMatch,
 					structure.ChannelKey, structure.InfoChannelName)),
-				config.Sink().SetTransform(config.FilterTransform(config.ExcludeWhenAnyMatches,
+				logconfig.Sink().SetTransform(logconfig.FilterTransform(logconfig.ExcludeWhenAnyMatches,
 					structure.ComponentKey, "Tendermint",
 					"module", "p2p",
 					"module", "mempool"))), nil
@@ -76,17 +76,17 @@ var instructions = []Instruction{
 	{
 		name: IncludeAny,
 		desc: "Establish an 'include when any predicate matches' filter transform at this this sink",
-		builder: func(stack []*config.SinkConfig, args []string) ([]*config.SinkConfig, error) {
+		builder: func(stack []*logconfig.SinkConfig, args []string) ([]*logconfig.SinkConfig, error) {
 			sink := peek(stack)
 			ensureFilter(sink)
-			sink.Transform.FilterConfig.FilterMode = config.IncludeWhenAnyMatches
+			sink.Transform.FilterConfig.FilterMode = logconfig.IncludeWhenAnyMatches
 			return stack, nil
 		},
 	},
 	{
 		name: Info,
 		desc: "Add a filter predicate to match the Info logging channel",
-		builder: func(stack []*config.SinkConfig, args []string) ([]*config.SinkConfig, error) {
+		builder: func(stack []*logconfig.SinkConfig, args []string) ([]*logconfig.SinkConfig, error) {
 			sink := peek(stack)
 			ensureFilter(sink)
 			sink.Transform.FilterConfig.AddPredicate(structure.ChannelKey, structure.InfoChannelName)
@@ -96,27 +96,27 @@ var instructions = []Instruction{
 	{
 		name: Stdout,
 		desc: "Use Stdout output for this sink",
-		builder: func(stack []*config.SinkConfig, args []string) ([]*config.SinkConfig, error) {
+		builder: func(stack []*logconfig.SinkConfig, args []string) ([]*logconfig.SinkConfig, error) {
 			sink := peek(stack)
 			ensureOutput(sink)
-			sink.Output.OutputType = config.Stdout
+			sink.Output.OutputType = logconfig.Stdout
 			return stack, nil
 		},
 	},
 	{
 		name: Stderr,
 		desc: "Use Stderr output for this sink",
-		builder: func(stack []*config.SinkConfig, args []string) ([]*config.SinkConfig, error) {
+		builder: func(stack []*logconfig.SinkConfig, args []string) ([]*logconfig.SinkConfig, error) {
 			sink := peek(stack)
 			ensureOutput(sink)
-			sink.Output.OutputType = config.Stderr
+			sink.Output.OutputType = logconfig.Stderr
 			return stack, nil
 		},
 	},
 	{
 		name: Terminal,
 		desc: "Use the the terminal output format for this sink",
-		builder: func(stack []*config.SinkConfig, args []string) ([]*config.SinkConfig, error) {
+		builder: func(stack []*logconfig.SinkConfig, args []string) ([]*logconfig.SinkConfig, error) {
 			sink := peek(stack)
 			ensureOutput(sink)
 			sink.Output.Format = loggers.TerminalFormat
@@ -126,7 +126,7 @@ var instructions = []Instruction{
 	{
 		name: JSON,
 		desc: "Use the the terminal output format for this sink",
-		builder: func(stack []*config.SinkConfig, args []string) ([]*config.SinkConfig, error) {
+		builder: func(stack []*logconfig.SinkConfig, args []string) ([]*logconfig.SinkConfig, error) {
 			sink := peek(stack)
 			ensureOutput(sink)
 			sink.Output.Format = loggers.JSONFormat
@@ -137,11 +137,11 @@ var instructions = []Instruction{
 		name:  File,
 		desc:  "Use the the terminal output format for this sink",
 		nargs: 1,
-		builder: func(stack []*config.SinkConfig, args []string) ([]*config.SinkConfig, error) {
+		builder: func(stack []*logconfig.SinkConfig, args []string) ([]*logconfig.SinkConfig, error) {
 			sink := peek(stack)
 			ensureOutput(sink)
-			sink.Output.OutputType = config.File
-			sink.Output.FileConfig = &config.FileConfig{
+			sink.Output.OutputType = logconfig.File
+			sink.Output.FileConfig = &logconfig.FileConfig{
 				Path: args[0],
 			}
 			return stack, nil
@@ -172,8 +172,8 @@ func Describe(name string) string {
 	return preset.desc
 }
 
-func BuildSinkConfig(ops ...string) (*config.SinkConfig, error) {
-	stack := []*config.SinkConfig{config.Sink()}
+func BuildSinkConfig(ops ...string) (*logconfig.SinkConfig, error) {
+	stack := []*logconfig.SinkConfig{logconfig.Sink()}
 	var err error
 	pos := 0
 	for len(ops) > 0 {
@@ -199,24 +199,24 @@ func BuildSinkConfig(ops ...string) (*config.SinkConfig, error) {
 	return stack[0], nil
 }
 
-func ensureFilter(sinkConfig *config.SinkConfig) {
+func ensureFilter(sinkConfig *logconfig.SinkConfig) {
 	if sinkConfig.Transform == nil {
-		sinkConfig.Transform = &config.TransformConfig{}
+		sinkConfig.Transform = &logconfig.TransformConfig{}
 	}
 	if sinkConfig.Transform.FilterConfig == nil {
-		sinkConfig.Transform.FilterConfig = &config.FilterConfig{}
+		sinkConfig.Transform.FilterConfig = &logconfig.FilterConfig{}
 	}
-	sinkConfig.Transform.TransformType = config.Filter
+	sinkConfig.Transform.TransformType = logconfig.Filter
 }
 
-func ensureOutput(sinkConfig *config.SinkConfig) {
+func ensureOutput(sinkConfig *logconfig.SinkConfig) {
 	if sinkConfig.Output == nil {
-		sinkConfig.Output = &config.OutputConfig{}
+		sinkConfig.Output = &logconfig.OutputConfig{}
 	}
 }
 
 // Push a path sequence of sinks onto the stack
-func push(stack []*config.SinkConfig, sinkConfigs ...*config.SinkConfig) []*config.SinkConfig {
+func push(stack []*logconfig.SinkConfig, sinkConfigs ...*logconfig.SinkConfig) []*logconfig.SinkConfig {
 	for _, sinkConfig := range sinkConfigs {
 		peek(stack).AddSinks(sinkConfig)
 		stack = append(stack, sinkConfig)
@@ -224,10 +224,10 @@ func push(stack []*config.SinkConfig, sinkConfigs ...*config.SinkConfig) []*conf
 	return stack
 }
 
-func pop(stack []*config.SinkConfig) []*config.SinkConfig {
+func pop(stack []*logconfig.SinkConfig) []*logconfig.SinkConfig {
 	return stack[:len(stack)-1]
 }
 
-func peek(stack []*config.SinkConfig) *config.SinkConfig {
+func peek(stack []*logconfig.SinkConfig) *logconfig.SinkConfig {
 	return stack[len(stack)-1]
 }
diff --git a/logging/logconfig/presets/instructions_test.go b/logging/logconfig/presets/instructions_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..7cd1adb123fa83c8c0897c6d967a7d88dfe02a9c
--- /dev/null
+++ b/logging/logconfig/presets/instructions_test.go
@@ -0,0 +1,51 @@
+package presets
+
+import (
+	"testing"
+
+	"github.com/hyperledger/burrow/logging/logconfig"
+	"github.com/hyperledger/burrow/logging/loggers"
+	"github.com/hyperledger/burrow/logging/structure"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestBuildSinkConfig(t *testing.T) {
+	builtSink, err := BuildSinkConfig(IncludeAny, Info, Stdout, Terminal, Down, Down, Info, Stdout, Up, Info, Stderr)
+	require.NoError(t, err)
+	expectedSink := logconfig.Sink().
+		SetTransform(logconfig.FilterTransform(logconfig.IncludeWhenAnyMatches,
+			structure.ChannelKey, structure.InfoChannelName)).SetOutput(logconfig.StdoutOutput().SetFormat(loggers.TerminalFormat)).AddSinks(
+		logconfig.Sink().SetTransform(logconfig.FilterTransform(logconfig.NoFilterMode,
+			structure.ChannelKey, structure.InfoChannelName)).SetOutput(logconfig.StderrOutput()).AddSinks(
+			logconfig.Sink().SetTransform(logconfig.FilterTransform(logconfig.NoFilterMode,
+				structure.ChannelKey, structure.InfoChannelName)).SetOutput(logconfig.StdoutOutput())))
+
+	//fmt.Println(config.JSONString(expectedSink), "\n", config.JSONString(builtSink))
+	assert.Equal(t, logconfig.JSONString(expectedSink), logconfig.JSONString(builtSink))
+}
+
+func TestMinimalPreset(t *testing.T) {
+	builtSink, err := BuildSinkConfig(Minimal, Stderr)
+	require.NoError(t, err)
+	expectedSink := logconfig.Sink().
+		AddSinks(logconfig.Sink().SetTransform(logconfig.PruneTransform(structure.TraceKey, structure.RunId)).
+			AddSinks(logconfig.Sink().SetTransform(logconfig.FilterTransform(logconfig.IncludeWhenAllMatch,
+				structure.ChannelKey, structure.InfoChannelName)).
+				AddSinks(logconfig.Sink().SetTransform(logconfig.FilterTransform(logconfig.ExcludeWhenAnyMatches,
+					structure.ComponentKey, "Tendermint",
+					"module", "p2p",
+					"module", "mempool")).SetOutput(logconfig.StderrOutput()))))
+	//fmt.Println(config.TOMLString(expectedSink), "\n", config.TOMLString(builtSink))
+	assert.Equal(t, logconfig.TOMLString(expectedSink), logconfig.TOMLString(builtSink))
+}
+
+func TestFileOutput(t *testing.T) {
+	path := "foo.log"
+	builtSink, err := BuildSinkConfig(Down, File, path, JSON)
+	require.NoError(t, err)
+	expectedSink := logconfig.Sink().
+		AddSinks(logconfig.Sink().SetOutput(logconfig.FileOutput(path).SetFormat(loggers.JSONFormat)))
+	//fmt.Println(config.TOMLString(expectedSink), "\n", config.TOMLString(builtSink))
+	assert.Equal(t, logconfig.TOMLString(expectedSink), logconfig.TOMLString(builtSink))
+}
diff --git a/logging/config/sinks.go b/logging/logconfig/sinks.go
similarity index 97%
rename from logging/config/sinks.go
rename to logging/logconfig/sinks.go
index aa30a69e1e07172d6e9751b7a21cd8515ded60ed..e323fdabd304c497c29aa8514f2e47a74416592d 100644
--- a/logging/config/sinks.go
+++ b/logging/logconfig/sinks.go
@@ -1,4 +1,4 @@
-package config
+package logconfig
 
 import (
 	"fmt"
@@ -236,14 +236,17 @@ func PruneTransform(keys ...string) *TransformConfig {
 	}
 }
 
-func FilterTransform(fmode filterMode, keyvalueRegexes ...string) *TransformConfig {
-	length := len(keyvalueRegexes) / 2
+func FilterTransform(fmode filterMode, keyValueRegexes ...string) *TransformConfig {
+	if len(keyValueRegexes)%2 == 1 {
+		keyValueRegexes = append(keyValueRegexes, "")
+	}
+	length := len(keyValueRegexes) / 2
 	predicates := make([]*KeyValuePredicateConfig, length)
 	for i := 0; i < length; i++ {
 		kv := i * 2
 		predicates[i] = &KeyValuePredicateConfig{
-			KeyRegex:   keyvalueRegexes[kv],
-			ValueRegex: keyvalueRegexes[kv+1],
+			KeyRegex:   keyValueRegexes[kv],
+			ValueRegex: keyValueRegexes[kv+1],
 		}
 	}
 	return &TransformConfig{
diff --git a/logging/config/sinks_test.go b/logging/logconfig/sinks_test.go
similarity index 99%
rename from logging/config/sinks_test.go
rename to logging/logconfig/sinks_test.go
index ba722a9d09341b5ab7be85ab9467eace0fbda70d..8eba26284d62823af676d3cf5168fb084c75bc02 100644
--- a/logging/config/sinks_test.go
+++ b/logging/logconfig/sinks_test.go
@@ -1,4 +1,4 @@
-package config
+package logconfig
 
 import (
 	"encoding/json"
diff --git a/logging/logconfig/sort.go b/logging/logconfig/sort.go
new file mode 100644
index 0000000000000000000000000000000000000000..6b6aa674123b5711f8e44b9712a036b1bba59bde
--- /dev/null
+++ b/logging/logconfig/sort.go
@@ -0,0 +1 @@
+package logconfig
diff --git a/logging/loggers/burrow_format_logger.go b/logging/loggers/burrow_format_logger.go
index f32d0933de7c18fd8abe6d640f529460158253a1..1a8070bf39147e0a9b660b3500e4589e777c96ff 100644
--- a/logging/loggers/burrow_format_logger.go
+++ b/logging/loggers/burrow_format_logger.go
@@ -20,6 +20,7 @@ import (
 
 	kitlog "github.com/go-kit/kit/log"
 	"github.com/hyperledger/burrow/logging/structure"
+	"github.com/tmthrgd/go-hex"
 )
 
 // Logger that implements some formatting conventions for burrow and burrow-client
@@ -48,7 +49,7 @@ func (bfl *burrowFormatLogger) Log(keyvals ...interface{}) error {
 			case fmt.Stringer:
 				value = v.String()
 			case []byte:
-				value = fmt.Sprintf("%X", v)
+				value = hex.EncodeUpperToString(v)
 			case time.Time:
 				value = v.Format(time.RFC3339Nano)
 			}
diff --git a/permission/account_permissions.go b/permission/account_permissions.go
index 6de0a44312c892bfbe1639caeba610ddb3ba0727..fdc3042730a8e6c736f696780e84637fdd0747cf 100644
--- a/permission/account_permissions.go
+++ b/permission/account_permissions.go
@@ -2,6 +2,19 @@ package permission
 
 import "github.com/hyperledger/burrow/binary"
 
+func NewAccountPermissions(pss ...PermFlag) AccountPermissions {
+	var perms PermFlag
+	for _, ps := range pss {
+		perms |= ps
+	}
+	return AccountPermissions{
+		Base: BasePermissions{
+			Perms:  perms,
+			SetBit: perms,
+		},
+	}
+}
+
 // Returns true if the role is found
 func (ap AccountPermissions) HasRole(role string) bool {
 	role = string(binary.RightPadBytes([]byte(role), 32))
diff --git a/permission/perm_flag.go b/permission/perm_flag.go
index 617aa561fcc425fdff99ffc3e5d84bf0f3fb79c3..2ddece98d2f22553646764e01a7b7f8094abbcc1 100644
--- a/permission/perm_flag.go
+++ b/permission/perm_flag.go
@@ -76,6 +76,11 @@ const (
 // A particular permission
 type PermFlag uint64
 
+// Checks if a permission flag is valid (a known base chain or snative permission)
+func (pf PermFlag) IsValid() bool {
+	return pf <= AllPermFlags
+}
+
 // Returns the string name of a single bit non-composite PermFlag, or otherwise UnknownString
 // See BasePermissionsToStringList to generate a string representation of a composite PermFlag
 func (pf PermFlag) String() string {
diff --git a/permission/util.go b/permission/util.go
index 6f4ba1ebe412695f6e8bb79b265999414f4bd037..0436f93902c2439ce97cee0c44d959a3038b73d0 100644
--- a/permission/util.go
+++ b/permission/util.go
@@ -15,7 +15,6 @@
 package permission
 
 import (
-	"fmt"
 	"strings"
 )
 
@@ -83,40 +82,28 @@ func PermFlagFromStringList(permissions []string) (PermFlag, error) {
 
 // Builds a list of set permissions from a BasePermission by creating a list of permissions strings
 // from the resultant permissions of basePermissions
-func BasePermissionsToStringList(basePermissions BasePermissions) ([]string, error) {
+func BasePermissionsToStringList(basePermissions BasePermissions) []string {
 	return PermFlagToStringList(basePermissions.ResultantPerms())
 }
 
 // Creates a list of individual permission flag strings from a possibly composite PermFlag
 // by projecting out each bit and adding its permission string if it is set
-func PermFlagToStringList(permFlag PermFlag) ([]string, error) {
+func PermFlagToStringList(permFlag PermFlag) []string {
 	permStrings := make([]string, 0, NumPermissions)
-	if permFlag > AllPermFlags {
-		return nil, fmt.Errorf("resultant permission 0b%b is invalid: has permission flag set above top flag 0b%b",
-			permFlag, TopPermFlag)
-	}
 	for i := uint(0); i < NumPermissions; i++ {
 		permFlag := permFlag & (1 << i)
 		if permFlag > 0 {
 			permStrings = append(permStrings, permFlag.String())
 		}
 	}
-	return permStrings, nil
+	return permStrings
 }
 
 // Generates a human readable string from the resultant permissions of basePermission
 func BasePermissionsString(basePermissions BasePermissions) string {
-	permStrings, err := BasePermissionsToStringList(basePermissions)
-	if err != nil {
-		return UnknownString
-	}
-	return strings.Join(permStrings, " | ")
+	return strings.Join(BasePermissionsToStringList(basePermissions), " | ")
 }
 
 func String(permFlag PermFlag) string {
-	permStrings, err := PermFlagToStringList(permFlag)
-	if err != nil {
-		return UnknownString
-	}
-	return strings.Join(permStrings, " | ")
+	return strings.Join(PermFlagToStringList(permFlag), " | ")
 }
diff --git a/permission/util_test.go b/permission/util_test.go
index 1055361206e1a36745c7611439cf87f9a049571e..86f0ed6259c6885e9564dc4f8cd77c604142449f 100644
--- a/permission/util_test.go
+++ b/permission/util_test.go
@@ -25,17 +25,15 @@ func TestBasePermissionsFromStringList(t *testing.T) {
 }
 
 func TestBasePermissionsToStringList(t *testing.T) {
-	permStrings, err := BasePermissionsToStringList(allSetBasePermission(Root | HasRole | SetBase | Call))
-	require.NoError(t, err)
+	permStrings := BasePermissionsToStringList(allSetBasePermission(Root | HasRole | SetBase | Call))
 	assert.Equal(t, []string{"root", "call", "setBase", "hasRole"}, permStrings)
 
-	permStrings, err = BasePermissionsToStringList(allSetBasePermission(AllPermFlags))
-	require.NoError(t, err)
+	permStrings = BasePermissionsToStringList(allSetBasePermission(AllPermFlags))
 	assert.Equal(t, []string{"root", "send", "call", "createContract", "createAccount", "bond", "name", "hasBase",
 		"setBase", "unsetBase", "setGlobal", "hasRole", "addRole", "removeRole"}, permStrings)
 
-	permStrings, err = BasePermissionsToStringList(allSetBasePermission(AllPermFlags + 1))
-	assert.Error(t, err)
+	permStrings = BasePermissionsToStringList(allSetBasePermission(AllPermFlags + 1))
+	assert.Equal(t, []string{}, permStrings)
 }
 
 func TestBasePermissionsString(t *testing.T) {
diff --git a/protobuf/acm.proto b/protobuf/acm.proto
index 8a52a277de260e10ea9fe44d7f271db2b8d88561..1fef3bd9a6d4d5c80e5acc43cc23c45e0791f55b 100644
--- a/protobuf/acm.proto
+++ b/protobuf/acm.proto
@@ -9,15 +9,10 @@ import "github.com/gogo/protobuf/gogoproto/gogo.proto";
 import "permission.proto";
 import "crypto.proto";
 
-// Enable custom Marshal method.
 option (gogoproto.marshaler_all) = true;
-// Enable custom Unmarshal method.
 option (gogoproto.unmarshaler_all) = true;
-// Enable custom Size method (Required by Marshal and Unmarshal).
 option (gogoproto.sizer_all) = true;
-// Enable registration with golang/protobuf for the grpc-gateway.
 option (gogoproto.goproto_registration) = true;
-// Enable generation of XXX_MessageName methods for grpc-go/status.
 option (gogoproto.messagename_all) = true;
 
 message ConcreteAccount {
@@ -28,4 +23,4 @@ message ConcreteAccount {
     uint64 Balance = 4;
     bytes Code = 5 [(gogoproto.customtype) = "Bytecode", (gogoproto.nullable) = false];
     permission.AccountPermissions Permissions = 6 [(gogoproto.nullable) = false];
-}
\ No newline at end of file
+}
diff --git a/protobuf/balance.proto b/protobuf/balance.proto
new file mode 100644
index 0000000000000000000000000000000000000000..ea71f6c4a5a0bd02bbd707d4484ae7e931803081
--- /dev/null
+++ b/protobuf/balance.proto
@@ -0,0 +1,20 @@
+// Needed to proto2 rather than proto3 to get pointer field for PermArg
+syntax = 'proto3';
+
+option go_package = "github.com/hyperledger/burrow/acm/balance";
+
+import "github.com/gogo/protobuf/gogoproto/gogo.proto";
+
+package balance;
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.goproto_registration) = true;
+option (gogoproto.messagename_all) = true;
+
+message Balance {
+    option (gogoproto.goproto_stringer) = false;
+    uint32 Type = 1 [(gogoproto.casttype) = "Type"];
+    uint64 Amount = 2;
+}
\ No newline at end of file
diff --git a/protobuf/crypto.proto b/protobuf/crypto.proto
index 425236f4c87154b12bd7d762edde8ab0fb2396c8..9b95283b39a5b78fdcdfbc4f6c8e86a12f7131e9 100644
--- a/protobuf/crypto.proto
+++ b/protobuf/crypto.proto
@@ -16,7 +16,7 @@ option (gogoproto.messagename_all) = true;
 message PublicKey {
     option (gogoproto.goproto_stringer) = false;
     uint32 CurveType = 1 [(gogoproto.casttype) = "CurveType"];
-    bytes PublicKey = 2;
+    bytes Key = 2;
 }
 
 message PrivateKey {
@@ -25,5 +25,5 @@ message PrivateKey {
     uint32 CurveType = 1 [(gogoproto.casttype) = "CurveType"];
     // Note may need initialisation
     bytes PublicKey = 2;
-    bytes PrivateKey = 3;
+    bytes Key = 3;
 }
diff --git a/protobuf/exec.proto b/protobuf/exec.proto
index 6eb935117ab1d814db1aadc9bfa348aaf7774de7..fde241498f95dc1c6366f8a608cce142c1f3b724 100644
--- a/protobuf/exec.proto
+++ b/protobuf/exec.proto
@@ -5,12 +5,13 @@ package exec;
 option go_package = "github.com/hyperledger/burrow/execution/exec";
 
 import "github.com/gogo/protobuf/gogoproto/gogo.proto";
-import "github.com/tendermint/abci/types/types.proto";
+//import "github.com/tendermint/tendermint/abci/types/types.proto";
 
 import "errors.proto";
 import "names.proto";
 import "txs.proto";
 import "permission.proto";
+import "spec.proto";
 
 option (gogoproto.marshaler_all) = true;
 option (gogoproto.unmarshaler_all) = true;
@@ -33,9 +34,6 @@ message BlockHeader {
 }
 
 message TxExecution {
-    option (gogoproto.sizer) = true;
-    option (gogoproto.marshaler) = true;
-    option (gogoproto.unmarshaler) = true;
     // Transaction type
     uint32 TxType = 2 [(gogoproto.casttype) = "github.com/hyperledger/burrow/txs/payload.Type"];
     // The hash of the transaction that caused this event to be generated
@@ -58,9 +56,6 @@ message TxExecution {
 
 message Header {
     option (gogoproto.goproto_stringer) = false;
-    option (gogoproto.sizer) = true;
-    option (gogoproto.marshaler) = true;
-    option (gogoproto.unmarshaler) = true;
     // Transaction type
     uint32 TxType = 1 [(gogoproto.casttype) = "github.com/hyperledger/burrow/txs/payload.Type"];
     // The hash of the transaction that caused this event to be generated
@@ -78,22 +73,17 @@ message Header {
 }
 
 message Event {
-    option (gogoproto.sizer) = true;
-    option (gogoproto.marshaler) = true;
-    option (gogoproto.unmarshaler) = true;
     option (gogoproto.goproto_stringer) = false;
     Header Header = 1;
     InputEvent Input = 2;
     OutputEvent Output = 3;
     CallEvent Call = 4;
     LogEvent Log = 5;
+    GovernAccountEvent GovernAccount = 6;
 }
 
 // Could structure this further if needed - sum type of various results relevant to different transaction types
 message Result {
-    option (gogoproto.sizer) = true;
-    option (gogoproto.marshaler) = true;
-    option (gogoproto.unmarshaler) = true;
     // EVM execution return
     bytes Return = 1;
     // Gas used in computation
@@ -105,42 +95,31 @@ message Result {
 }
 
 message LogEvent {
-    option (gogoproto.sizer) = true;
-    option (gogoproto.marshaler) = true;
-    option (gogoproto.unmarshaler) = true;
     bytes Address = 1 [(gogoproto.customtype) = "github.com/hyperledger/burrow/crypto.Address", (gogoproto.nullable) = false];
     bytes Data = 2 [(gogoproto.customtype) = "github.com/hyperledger/burrow/binary.HexBytes", (gogoproto.nullable) = false];
     repeated bytes Topics = 3 [(gogoproto.customtype) = "github.com/hyperledger/burrow/binary.Word256", (gogoproto.nullable) = false];
 }
 
 message CallEvent {
-    option (gogoproto.sizer) = true;
-    option (gogoproto.marshaler) = true;
-    option (gogoproto.unmarshaler) = true;
     CallData CallData = 1;
     bytes Origin = 2 [(gogoproto.customtype) = "github.com/hyperledger/burrow/crypto.Address", (gogoproto.nullable) = false];
     uint64 StackDepth = 3;
     bytes Return = 4 [(gogoproto.customtype) = "github.com/hyperledger/burrow/binary.HexBytes", (gogoproto.nullable) = false];
 }
 
+message GovernAccountEvent {
+    spec.TemplateAccount AccountUpdate = 1;
+}
+
 message InputEvent {
-    option (gogoproto.sizer) = true;
-    option (gogoproto.marshaler) = true;
-    option (gogoproto.unmarshaler) = true;
     bytes Address = 1 [(gogoproto.customtype) = "github.com/hyperledger/burrow/crypto.Address", (gogoproto.nullable) = false];
 }
 
 message OutputEvent {
-    option (gogoproto.sizer) = true;
-    option (gogoproto.marshaler) = true;
-    option (gogoproto.unmarshaler) = true;
     bytes Address = 1 [(gogoproto.customtype) = "github.com/hyperledger/burrow/crypto.Address", (gogoproto.nullable) = false];
 }
 
 message CallData {
-    option (gogoproto.sizer) = true;
-    option (gogoproto.marshaler) = true;
-    option (gogoproto.unmarshaler) = true;
     bytes Caller = 1 [(gogoproto.customtype) = "github.com/hyperledger/burrow/crypto.Address", (gogoproto.nullable) = false];
     bytes Callee = 2 [(gogoproto.customtype) = "github.com/hyperledger/burrow/crypto.Address", (gogoproto.nullable) = false];
     bytes Data = 3 [(gogoproto.customtype) = "github.com/hyperledger/burrow/binary.HexBytes", (gogoproto.nullable) = false];
diff --git a/protobuf/payload.proto b/protobuf/payload.proto
index 1e75fdf40072f6f150d40c022e77e26d64492a1a..55938021e6544c6b571a656e10b1308fb3b2daba 100644
--- a/protobuf/payload.proto
+++ b/protobuf/payload.proto
@@ -20,12 +20,14 @@ option (gogoproto.goproto_registration) = true;
 // Enable generation of XXX_MessageName methods for grpc-go/status.
 option (gogoproto.messagename_all) = true;
 
-message AnyPayload {
+message Any {
     CallTx CallTx = 1;
     SendTx SendTx = 2;
     NameTx NameTx = 3;
-    PermissionsTx PermissionsTx = 4;
-    GovernanceTx GovernanceTx = 5;
+    PermsTx PermsTx = 4;
+    GovTx GovTx = 5;
+    BondTx BondTx = 6;
+    UnbondTx UnbondTx = 7;
 }
 
 // An input to a transaction that may carry an Amount as a charge and whose sequence number must be one greater than
@@ -76,7 +78,7 @@ message SendTx {
 }
 
 // An update to the on-chain permissions
-message PermissionsTx {
+message PermsTx {
     option (gogoproto.goproto_stringer) = false;
     // The permission moderator
     TxInput Input = 1;
@@ -114,7 +116,7 @@ message UnbondTx {
     uint64 Height = 3;
 }
 
-message GovernanceTx {
+message GovTx {
     option (gogoproto.goproto_stringer) = false;
     option (gogoproto.goproto_getters) = false;
 
diff --git a/protobuf/rpcquery.proto b/protobuf/rpcquery.proto
index bbb5122559808e119f59aa259996752c69dc6cb5..fc8b66262e5fe309df5e52657782c93f87365473 100644
--- a/protobuf/rpcquery.proto
+++ b/protobuf/rpcquery.proto
@@ -8,6 +8,7 @@ import "github.com/gogo/protobuf/gogoproto/gogo.proto";
 
 import "names.proto";
 import "acm.proto";
+import "validator.proto";
 
 option (gogoproto.marshaler_all) = true;
 option (gogoproto.unmarshaler_all) = true;
@@ -20,6 +21,7 @@ service Query {
     rpc ListAccounts (ListAccountsParam) returns (stream acm.ConcreteAccount);
     rpc GetName (GetNameParam) returns (names.Entry);
     rpc ListNames (ListNamesParam) returns (stream names.Entry);
+    rpc GetValidatorSet (GetValidatorSetParam) returns (ValidatorSet);
 }
 
 message GetAccountParam {
@@ -38,3 +40,16 @@ message ListNamesParam {
     string Query = 1;
 }
 
+message GetValidatorSetParam {
+    bool IncludeHistory = 1;
+}
+
+message ValidatorSet {
+    uint64 height = 1;
+    repeated validator.Validator Set = 2;
+    repeated ValidatorSetDeltas History = 3;
+}
+
+message ValidatorSetDeltas {
+    repeated validator.Validator Validators = 2;
+}
\ No newline at end of file
diff --git a/protobuf/rpctransact.proto b/protobuf/rpctransact.proto
index b2753a171b9a9980935671964bc7d0200b16fa4c..64b778213e72b39f95275f26a7568def8b5cb6ff 100644
--- a/protobuf/rpctransact.proto
+++ b/protobuf/rpctransact.proto
@@ -32,7 +32,7 @@ service Transact {
     // Sign transaction server-side
     rpc SignTx (TxEnvelopeParam) returns (TxEnvelope);
     // Formulate a transaction from a Payload and retrun the envelop with the Tx bytes ready to sign
-    rpc FormulateTx (PayloadParam) returns (TxEnvelope);
+    rpc FormulateTx (payload.Any) returns (TxEnvelope);
 
     // Formulate and sign a CallTx transaction signed server-side and wait for it to be included in a block, retrieving response
     rpc CallTxSync (payload.CallTx) returns (exec.TxExecution);
@@ -61,12 +61,6 @@ message CallCodeParam {
     bytes Data = 3;
 }
 
-message PayloadParam {
-    payload.CallTx CallTx = 1;
-    payload.SendTx SendTx = 2;
-    payload.NameTx NameTx = 3;
-}
-
 message TxEnvelope {
     txs.Envelope Envelope = 1 [(gogoproto.customtype) = "github.com/hyperledger/burrow/txs.Envelope"];
 }
@@ -75,6 +69,6 @@ message TxEnvelopeParam {
     // An existing Envelope - either signed or unsigned - if the latter will be signed server-side
     txs.Envelope Envelope = 1 [(gogoproto.customtype) = "github.com/hyperledger/burrow/txs.Envelope"];
     // If no Envelope provided then one will be generated from the provided payload and signed server-side
-    PayloadParam Payload = 2;
+    payload.Any Payload = 2;
 }
 
diff --git a/protobuf/spec.proto b/protobuf/spec.proto
index 7f0c6abec74ff569deef8370c0bf7ded2ed7825d..03a2090b9fd29fe57ffb0da7187722e9f383ef0a 100644
--- a/protobuf/spec.proto
+++ b/protobuf/spec.proto
@@ -1,35 +1,28 @@
 // Needed to proto2 rather than proto3 to get pointer field for PermArg
-syntax = 'proto2';
+syntax = 'proto3';
 
 option go_package = "github.com/hyperledger/burrow/genesis/spec";
 
 import "github.com/gogo/protobuf/gogoproto/gogo.proto";
 
 import "crypto.proto";
+import "balance.proto";
 
 package spec;
 
-// Enable custom Marshal method.
 option (gogoproto.marshaler_all) = true;
-// Enable custom Unmarshal method.
 option (gogoproto.unmarshaler_all) = true;
-// Enable custom Size method (Required by Marshal and Unmarshal).
 option (gogoproto.sizer_all) = true;
-// Enable registration with golang/protobuf for the grpc-gateway.
 option (gogoproto.goproto_registration) = true;
-// Enable generation of XXX_MessageName methods for grpc-go/status.
 option (gogoproto.messagename_all) = true;
 
 message TemplateAccount {
-    option (gogoproto.goproto_unrecognized) = false;
-
-    optional string Name = 1 [(gogoproto.nullable) = false];
-    optional bytes Address = 2 [(gogoproto.customtype) = "github.com/hyperledger/burrow/crypto.Address", (gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "toml:\",omitempty\""];
-    optional bytes NodeAddress = 3 [(gogoproto.customtype) = "github.com/hyperledger/burrow/crypto.Address", (gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "toml:\",omitempty\""];
-    optional crypto.PublicKey PublicKey = 4 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "toml:\",omitempty\""];
-    optional uint64 Amount = 5 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "toml:\",omitempty\""];
-    optional uint64 Power = 6 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "toml:\",omitempty\""];
-    repeated string Permissions = 7 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "toml:\",omitempty\""];
-    repeated string Roles = 8 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "toml:\",omitempty\""];
+    string Name = 1;
+    bytes Address = 2 [(gogoproto.customtype) = "github.com/hyperledger/burrow/crypto.Address", (gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "toml:\",omitempty\""];
+    bytes NodeAddress = 3 [(gogoproto.customtype) = "github.com/hyperledger/burrow/crypto.Address", (gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "toml:\",omitempty\""];
+    crypto.PublicKey PublicKey = 4 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "toml:\",omitempty\""];
+    repeated balance.Balance Amounts = 5 [(gogoproto.nullable) = false, (gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "toml:\",omitempty\""];
+    repeated string Permissions = 6 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "toml:\",omitempty\""];
+    repeated string Roles = 7 [(gogoproto.jsontag) = ",omitempty", (gogoproto.moretags) = "toml:\",omitempty\""];
 }
 
diff --git a/protobuf/validator.proto b/protobuf/validator.proto
new file mode 100644
index 0000000000000000000000000000000000000000..dc621ab7ac092feff80c90acb2ced1950940d3a5
--- /dev/null
+++ b/protobuf/validator.proto
@@ -0,0 +1,22 @@
+syntax = 'proto3';
+
+package validator;
+
+option go_package = "github.com/hyperledger/burrow/acm/validator";
+
+import "github.com/gogo/protobuf/gogoproto/gogo.proto";
+
+import "crypto.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.goproto_registration) = true;
+option (gogoproto.messagename_all) = true;
+
+message Validator {
+    option (gogoproto.goproto_stringer) = false;
+    bytes Address = 1 [(gogoproto.customtype) = "github.com/hyperledger/burrow/crypto.Address"];
+    crypto.PublicKey PublicKey = 2 [(gogoproto.nullable) = false];
+    uint64 Power = 3;
+}
diff --git a/rpc/config.go b/rpc/config.go
index 44acb555525c94fa93c0a686b434845322f417d2..23bfbdbb763c48fae661f22ae9d686147866b45d 100644
--- a/rpc/config.go
+++ b/rpc/config.go
@@ -1,5 +1,11 @@
 package rpc
 
+import "fmt"
+
+// 'localhost' gets interpreted as ipv6
+// TODO: revisit this
+const localhost = "127.0.0.1"
+
 type RPCConfig struct {
 	TM       *ServerConfig  `json:",omitempty" toml:",omitempty"`
 	Profiler *ServerConfig  `json:",omitempty" toml:",omitempty"`
@@ -41,28 +47,28 @@ func DefaultRPCConfig() *RPCConfig {
 func DefaultTMConfig() *ServerConfig {
 	return &ServerConfig{
 		Enabled:       true,
-		ListenAddress: "tcp://localhost:26658",
+		ListenAddress: fmt.Sprintf("tcp://%s:26658", localhost),
 	}
 }
 
 func DefaultGRPCConfig() *ServerConfig {
 	return &ServerConfig{
 		Enabled:       true,
-		ListenAddress: "localhost:10997",
+		ListenAddress: fmt.Sprintf("%s:10997", localhost),
 	}
 }
 
 func DefaultProfilerConfig() *ServerConfig {
 	return &ServerConfig{
 		Enabled:       false,
-		ListenAddress: "tcp://localhost:6060",
+		ListenAddress: fmt.Sprintf("tcp://%s:6060", localhost),
 	}
 }
 
 func DefaultMetricsConfig() *MetricsConfig {
 	return &MetricsConfig{
 		Enabled:         false,
-		ListenAddress:   "tcp://localhost:9102",
+		ListenAddress:   fmt.Sprintf("tcp://%s:9102", localhost),
 		MetricsPath:     "/metrics",
 		BlockSampleSize: 100,
 	}
diff --git a/rpc/lib/client/ws_client.go b/rpc/lib/client/ws_client.go
index e93f069087b988383c8aeaade16faf076479f4bb..751de2708941281058bcbbafe9df2700389a5ad3 100644
--- a/rpc/lib/client/ws_client.go
+++ b/rpc/lib/client/ws_client.go
@@ -13,7 +13,7 @@ import (
 	"github.com/hyperledger/burrow/rpc/lib/types"
 	"github.com/pkg/errors"
 	metrics "github.com/rcrowley/go-metrics"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 const (
diff --git a/rpc/lib/rpc_test.go b/rpc/lib/rpc_test.go
index 1d21c21a1298eaaeac9a7fb4354856e486ba3a3e..db1f99a973cf10dec44e2e1b0be18d6f86e8ade3 100644
--- a/rpc/lib/rpc_test.go
+++ b/rpc/lib/rpc_test.go
@@ -20,8 +20,8 @@ import (
 	"github.com/hyperledger/burrow/rpc/lib/types"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 )
 
 // Client and Server should work over tcp or unix sockets
diff --git a/rpc/lib/server/handlers.go b/rpc/lib/server/handlers.go
index 506425b725eaa372428e9fb5f165a93ac80aa6d7..274ee30f207d9d5041e1d73e51a9cbab1906a34c 100644
--- a/rpc/lib/server/handlers.go
+++ b/rpc/lib/server/handlers.go
@@ -20,7 +20,7 @@ import (
 	"github.com/hyperledger/burrow/logging/structure"
 	"github.com/hyperledger/burrow/rpc/lib/types"
 	"github.com/pkg/errors"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // RegisterRPCFuncs adds a route for each function in the funcMap, as well as general jsonrpc and websocket handlers for all functions.
diff --git a/rpc/lib/server/parse_test.go b/rpc/lib/server/parse_test.go
index 0b5550c23a06878f1a0a5523fbb7317a68f95d79..6f3f7dcbebe9b39c3a23e9b5e5857d6d80c1f8ea 100644
--- a/rpc/lib/server/parse_test.go
+++ b/rpc/lib/server/parse_test.go
@@ -6,7 +6,7 @@ import (
 	"testing"
 
 	"github.com/stretchr/testify/assert"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 func TestParseJSONMap(t *testing.T) {
diff --git a/rpc/result.go b/rpc/result.go
index af777d9406642929bb03fbe31f34ea858ea07ade..35d21892f7d912c622a8cacc710a56ecdf66763e 100644
--- a/rpc/result.go
+++ b/rpc/result.go
@@ -18,6 +18,7 @@ import (
 	"time"
 
 	"github.com/hyperledger/burrow/acm"
+	"github.com/hyperledger/burrow/acm/validator"
 	"github.com/hyperledger/burrow/binary"
 	"github.com/hyperledger/burrow/crypto"
 	"github.com/hyperledger/burrow/execution/names"
@@ -96,7 +97,7 @@ func (b *Block) UnmarshalJSON(data []byte) (err error) {
 type ResultStatus struct {
 	NodeInfo          p2p.NodeInfo
 	GenesisHash       binary.HexBytes
-	PubKey            crypto.PublicKey
+	PublicKey         crypto.PublicKey
 	LatestBlockHash   binary.HexBytes
 	LatestBlockHeight uint64
 	LatestBlockTime   int64
@@ -130,6 +131,7 @@ type Peer struct {
 }
 
 type ResultNetInfo struct {
+	ThisNode  p2p.NodeInfo
 	Listening bool
 	Listeners []string
 	Peers     []*Peer
@@ -137,8 +139,8 @@ type ResultNetInfo struct {
 
 type ResultListValidators struct {
 	BlockHeight         uint64
-	BondedValidators    []*acm.ConcreteValidator
-	UnbondingValidators []*acm.ConcreteValidator
+	BondedValidators    []*validator.Validator
+	UnbondingValidators []*validator.Validator
 }
 
 type ResultDumpConsensusState struct {
diff --git a/rpc/result_test.go b/rpc/result_test.go
index ca972de49f7f1357ca6646a7bc7bddc72aba3ca7..9c66f47ad838dd8aaab94e017f7c7da4e23749e5 100644
--- a/rpc/result_test.go
+++ b/rpc/result_test.go
@@ -26,10 +26,10 @@ import (
 	"github.com/hyperledger/burrow/binary"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
-	goCrypto "github.com/tendermint/go-crypto"
 	"github.com/tendermint/tendermint/consensus/types"
+	goCrypto "github.com/tendermint/tendermint/crypto"
+	"github.com/tendermint/tendermint/libs/common"
 	tmTypes "github.com/tendermint/tendermint/types"
-	"github.com/tendermint/tmlibs/common"
 )
 
 func TestResultListAccounts(t *testing.T) {
diff --git a/rpc/rpcevents/execution_events_server.go b/rpc/rpcevents/execution_events_server.go
index 2b042c77f19851db816fe367e0b12775355b5bd4..d8f48e1470de1a8ffad6a333276ddb74d42238a1 100644
--- a/rpc/rpcevents/execution_events_server.go
+++ b/rpc/rpcevents/execution_events_server.go
@@ -7,7 +7,7 @@ import (
 	"io"
 
 	"github.com/gogo/protobuf/proto"
-	bcm "github.com/hyperledger/burrow/blockchain"
+	bcm "github.com/hyperledger/burrow/bcm"
 	"github.com/hyperledger/burrow/event"
 	"github.com/hyperledger/burrow/event/query"
 	"github.com/hyperledger/burrow/execution/exec"
@@ -29,12 +29,12 @@ type Provider interface {
 type executionEventsServer struct {
 	eventsProvider Provider
 	subscribable   event.Subscribable
-	tip            bcm.TipInfo
+	tip            bcm.BlockchainInfo
 	logger         *logging.Logger
 }
 
 func NewExecutionEventsServer(eventsProvider Provider, subscribable event.Subscribable,
-	tip bcm.TipInfo, logger *logging.Logger) ExecutionEventsServer {
+	tip bcm.BlockchainInfo, logger *logging.Logger) ExecutionEventsServer {
 
 	return &executionEventsServer{
 		eventsProvider: eventsProvider,
diff --git a/rpc/rpcquery/query_server.go b/rpc/rpcquery/query_server.go
index edcb968b6b1810329395acfb90bb5d1d8b11c97e..debdc05b53f06473b252cff097b76e1512b56e62 100644
--- a/rpc/rpcquery/query_server.go
+++ b/rpc/rpcquery/query_server.go
@@ -5,24 +5,36 @@ import (
 
 	"github.com/hyperledger/burrow/acm"
 	"github.com/hyperledger/burrow/acm/state"
+	"github.com/hyperledger/burrow/bcm"
+	"github.com/hyperledger/burrow/consensus/tendermint"
 	"github.com/hyperledger/burrow/event/query"
 	"github.com/hyperledger/burrow/execution/names"
+	"github.com/hyperledger/burrow/logging"
 )
 
 type queryServer struct {
-	accounts state.IterableReader
-	nameReg  names.IterableReader
+	accounts   state.IterableReader
+	nameReg    names.IterableReader
+	blockchain bcm.BlockchainInfo
+	nodeView   *tendermint.NodeView
+	logger     *logging.Logger
 }
 
 var _ QueryServer = &queryServer{}
 
-func NewQueryServer(state state.IterableReader, nameReg names.IterableReader) *queryServer {
+func NewQueryServer(state state.IterableReader, nameReg names.IterableReader, blockchain bcm.BlockchainInfo,
+	nodeView *tendermint.NodeView, logger *logging.Logger) *queryServer {
 	return &queryServer{
-		accounts: state,
-		nameReg:  nameReg,
+		accounts:   state,
+		nameReg:    nameReg,
+		blockchain: blockchain,
+		nodeView:   nodeView,
+		logger:     logger,
 	}
 }
 
+// Account state
+
 func (qs *queryServer) GetAccount(ctx context.Context, param *GetAccountParam) (*acm.ConcreteAccount, error) {
 	acc, err := qs.accounts.GetAccount(param.Address)
 	if err != nil {
@@ -74,3 +86,20 @@ func (qs *queryServer) ListNames(param *ListNamesParam, stream Query_ListNamesSe
 	}
 	return streamErr
 }
+
+func (qs *queryServer) GetValidatorSet(ctx context.Context, param *GetValidatorSetParam) (*ValidatorSet, error) {
+	set, deltas, height := qs.blockchain.ValidatorsHistory()
+	vs := &ValidatorSet{
+		Height: height,
+		Set:    set.Validators(),
+	}
+	if param.IncludeHistory {
+		vs.History = make([]*ValidatorSetDeltas, len(deltas))
+		for i, d := range deltas {
+			vs.History[i] = &ValidatorSetDeltas{
+				Validators: d.Validators(),
+			}
+		}
+	}
+	return vs, nil
+}
diff --git a/rpc/rpcquery/rpcquery.pb.go b/rpc/rpcquery/rpcquery.pb.go
index ce9b9f17d5a7fc89e4b3eb2efbe19c953b6dc72a..ec9b81710c9f6a760e4d693965d95b4f70cbf1f0 100644
--- a/rpc/rpcquery/rpcquery.pb.go
+++ b/rpc/rpcquery/rpcquery.pb.go
@@ -12,6 +12,9 @@
 		ListAccountsParam
 		GetNameParam
 		ListNamesParam
+		GetValidatorSetParam
+		ValidatorSet
+		ValidatorSetDeltas
 */
 package rpcquery
 
@@ -22,6 +25,7 @@ import math "math"
 import _ "github.com/gogo/protobuf/gogoproto"
 import names "github.com/hyperledger/burrow/execution/names"
 import acm "github.com/hyperledger/burrow/acm"
+import validator "github.com/hyperledger/burrow/acm/validator"
 
 import github_com_hyperledger_burrow_crypto "github.com/hyperledger/burrow/crypto"
 
@@ -114,6 +118,82 @@ func (m *ListNamesParam) GetQuery() string {
 func (*ListNamesParam) XXX_MessageName() string {
 	return "rpcquery.ListNamesParam"
 }
+
+type GetValidatorSetParam struct {
+	IncludeHistory bool `protobuf:"varint,1,opt,name=IncludeHistory,proto3" json:"IncludeHistory,omitempty"`
+}
+
+func (m *GetValidatorSetParam) Reset()                    { *m = GetValidatorSetParam{} }
+func (m *GetValidatorSetParam) String() string            { return proto.CompactTextString(m) }
+func (*GetValidatorSetParam) ProtoMessage()               {}
+func (*GetValidatorSetParam) Descriptor() ([]byte, []int) { return fileDescriptorRpcquery, []int{4} }
+
+func (m *GetValidatorSetParam) GetIncludeHistory() bool {
+	if m != nil {
+		return m.IncludeHistory
+	}
+	return false
+}
+
+func (*GetValidatorSetParam) XXX_MessageName() string {
+	return "rpcquery.GetValidatorSetParam"
+}
+
+type ValidatorSet struct {
+	Height  uint64                 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"`
+	Set     []*validator.Validator `protobuf:"bytes,2,rep,name=Set" json:"Set,omitempty"`
+	History []*ValidatorSetDeltas  `protobuf:"bytes,3,rep,name=History" json:"History,omitempty"`
+}
+
+func (m *ValidatorSet) Reset()                    { *m = ValidatorSet{} }
+func (m *ValidatorSet) String() string            { return proto.CompactTextString(m) }
+func (*ValidatorSet) ProtoMessage()               {}
+func (*ValidatorSet) Descriptor() ([]byte, []int) { return fileDescriptorRpcquery, []int{5} }
+
+func (m *ValidatorSet) GetHeight() uint64 {
+	if m != nil {
+		return m.Height
+	}
+	return 0
+}
+
+func (m *ValidatorSet) GetSet() []*validator.Validator {
+	if m != nil {
+		return m.Set
+	}
+	return nil
+}
+
+func (m *ValidatorSet) GetHistory() []*ValidatorSetDeltas {
+	if m != nil {
+		return m.History
+	}
+	return nil
+}
+
+func (*ValidatorSet) XXX_MessageName() string {
+	return "rpcquery.ValidatorSet"
+}
+
+type ValidatorSetDeltas struct {
+	Validators []*validator.Validator `protobuf:"bytes,2,rep,name=Validators" json:"Validators,omitempty"`
+}
+
+func (m *ValidatorSetDeltas) Reset()                    { *m = ValidatorSetDeltas{} }
+func (m *ValidatorSetDeltas) String() string            { return proto.CompactTextString(m) }
+func (*ValidatorSetDeltas) ProtoMessage()               {}
+func (*ValidatorSetDeltas) Descriptor() ([]byte, []int) { return fileDescriptorRpcquery, []int{6} }
+
+func (m *ValidatorSetDeltas) GetValidators() []*validator.Validator {
+	if m != nil {
+		return m.Validators
+	}
+	return nil
+}
+
+func (*ValidatorSetDeltas) XXX_MessageName() string {
+	return "rpcquery.ValidatorSetDeltas"
+}
 func init() {
 	proto.RegisterType((*GetAccountParam)(nil), "rpcquery.GetAccountParam")
 	golang_proto.RegisterType((*GetAccountParam)(nil), "rpcquery.GetAccountParam")
@@ -123,6 +203,12 @@ func init() {
 	golang_proto.RegisterType((*GetNameParam)(nil), "rpcquery.GetNameParam")
 	proto.RegisterType((*ListNamesParam)(nil), "rpcquery.ListNamesParam")
 	golang_proto.RegisterType((*ListNamesParam)(nil), "rpcquery.ListNamesParam")
+	proto.RegisterType((*GetValidatorSetParam)(nil), "rpcquery.GetValidatorSetParam")
+	golang_proto.RegisterType((*GetValidatorSetParam)(nil), "rpcquery.GetValidatorSetParam")
+	proto.RegisterType((*ValidatorSet)(nil), "rpcquery.ValidatorSet")
+	golang_proto.RegisterType((*ValidatorSet)(nil), "rpcquery.ValidatorSet")
+	proto.RegisterType((*ValidatorSetDeltas)(nil), "rpcquery.ValidatorSetDeltas")
+	golang_proto.RegisterType((*ValidatorSetDeltas)(nil), "rpcquery.ValidatorSetDeltas")
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -140,6 +226,7 @@ type QueryClient interface {
 	ListAccounts(ctx context.Context, in *ListAccountsParam, opts ...grpc.CallOption) (Query_ListAccountsClient, error)
 	GetName(ctx context.Context, in *GetNameParam, opts ...grpc.CallOption) (*names.Entry, error)
 	ListNames(ctx context.Context, in *ListNamesParam, opts ...grpc.CallOption) (Query_ListNamesClient, error)
+	GetValidatorSet(ctx context.Context, in *GetValidatorSetParam, opts ...grpc.CallOption) (*ValidatorSet, error)
 }
 
 type queryClient struct {
@@ -232,6 +319,15 @@ func (x *queryListNamesClient) Recv() (*names.Entry, error) {
 	return m, nil
 }
 
+func (c *queryClient) GetValidatorSet(ctx context.Context, in *GetValidatorSetParam, opts ...grpc.CallOption) (*ValidatorSet, error) {
+	out := new(ValidatorSet)
+	err := grpc.Invoke(ctx, "/rpcquery.Query/GetValidatorSet", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
 // Server API for Query service
 
 type QueryServer interface {
@@ -239,6 +335,7 @@ type QueryServer interface {
 	ListAccounts(*ListAccountsParam, Query_ListAccountsServer) error
 	GetName(context.Context, *GetNameParam) (*names.Entry, error)
 	ListNames(*ListNamesParam, Query_ListNamesServer) error
+	GetValidatorSet(context.Context, *GetValidatorSetParam) (*ValidatorSet, error)
 }
 
 func RegisterQueryServer(s *grpc.Server, srv QueryServer) {
@@ -323,6 +420,24 @@ func (x *queryListNamesServer) Send(m *names.Entry) error {
 	return x.ServerStream.SendMsg(m)
 }
 
+func _Query_GetValidatorSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(GetValidatorSetParam)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(QueryServer).GetValidatorSet(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/rpcquery.Query/GetValidatorSet",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(QueryServer).GetValidatorSet(ctx, req.(*GetValidatorSetParam))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
 var _Query_serviceDesc = grpc.ServiceDesc{
 	ServiceName: "rpcquery.Query",
 	HandlerType: (*QueryServer)(nil),
@@ -335,6 +450,10 @@ var _Query_serviceDesc = grpc.ServiceDesc{
 			MethodName: "GetName",
 			Handler:    _Query_GetName_Handler,
 		},
+		{
+			MethodName: "GetValidatorSet",
+			Handler:    _Query_GetValidatorSet_Handler,
+		},
 	},
 	Streams: []grpc.StreamDesc{
 		{
@@ -449,6 +568,111 @@ func (m *ListNamesParam) MarshalTo(dAtA []byte) (int, error) {
 	return i, nil
 }
 
+func (m *GetValidatorSetParam) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GetValidatorSetParam) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.IncludeHistory {
+		dAtA[i] = 0x8
+		i++
+		if m.IncludeHistory {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	return i, nil
+}
+
+func (m *ValidatorSet) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ValidatorSet) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Height != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintRpcquery(dAtA, i, uint64(m.Height))
+	}
+	if len(m.Set) > 0 {
+		for _, msg := range m.Set {
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintRpcquery(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	if len(m.History) > 0 {
+		for _, msg := range m.History {
+			dAtA[i] = 0x1a
+			i++
+			i = encodeVarintRpcquery(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *ValidatorSetDeltas) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ValidatorSetDeltas) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Validators) > 0 {
+		for _, msg := range m.Validators {
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintRpcquery(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
 func encodeVarintRpcquery(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -496,6 +720,48 @@ func (m *ListNamesParam) Size() (n int) {
 	return n
 }
 
+func (m *GetValidatorSetParam) Size() (n int) {
+	var l int
+	_ = l
+	if m.IncludeHistory {
+		n += 2
+	}
+	return n
+}
+
+func (m *ValidatorSet) Size() (n int) {
+	var l int
+	_ = l
+	if m.Height != 0 {
+		n += 1 + sovRpcquery(uint64(m.Height))
+	}
+	if len(m.Set) > 0 {
+		for _, e := range m.Set {
+			l = e.Size()
+			n += 1 + l + sovRpcquery(uint64(l))
+		}
+	}
+	if len(m.History) > 0 {
+		for _, e := range m.History {
+			l = e.Size()
+			n += 1 + l + sovRpcquery(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ValidatorSetDeltas) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Validators) > 0 {
+		for _, e := range m.Validators {
+			l = e.Size()
+			n += 1 + l + sovRpcquery(uint64(l))
+		}
+	}
+	return n
+}
+
 func sovRpcquery(x uint64) (n int) {
 	for {
 		n++
@@ -826,6 +1092,288 @@ func (m *ListNamesParam) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
+func (m *GetValidatorSetParam) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpcquery
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GetValidatorSetParam: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GetValidatorSetParam: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IncludeHistory", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpcquery
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.IncludeHistory = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpcquery(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpcquery
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ValidatorSet) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpcquery
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ValidatorSet: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ValidatorSet: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType)
+			}
+			m.Height = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpcquery
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Height |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Set", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpcquery
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpcquery
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Set = append(m.Set, &validator.Validator{})
+			if err := m.Set[len(m.Set)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field History", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpcquery
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpcquery
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.History = append(m.History, &ValidatorSetDeltas{})
+			if err := m.History[len(m.History)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpcquery(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpcquery
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ValidatorSetDeltas) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpcquery
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ValidatorSetDeltas: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ValidatorSetDeltas: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpcquery
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpcquery
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Validators = append(m.Validators, &validator.Validator{})
+			if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpcquery(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpcquery
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
 func skipRpcquery(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
@@ -935,27 +1483,36 @@ func init() { proto.RegisterFile("rpcquery.proto", fileDescriptorRpcquery) }
 func init() { golang_proto.RegisterFile("rpcquery.proto", fileDescriptorRpcquery) }
 
 var fileDescriptorRpcquery = []byte{
-	// 349 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcd, 0x4a, 0xc3, 0x40,
-	0x14, 0x85, 0x1d, 0x50, 0x6b, 0xaf, 0xa1, 0xe2, 0x50, 0xa4, 0x46, 0x48, 0x25, 0x0b, 0x51, 0xd1,
-	0xa4, 0xf8, 0xb3, 0x14, 0x6c, 0x8b, 0x74, 0x23, 0x45, 0xbb, 0x74, 0x97, 0x4c, 0xc7, 0xb4, 0x60,
-	0x32, 0xf1, 0x66, 0x82, 0xe4, 0xed, 0x5c, 0x76, 0xe9, 0xda, 0x45, 0x91, 0xf6, 0x11, 0x7c, 0x01,
-	0xc9, 0x24, 0xe9, 0x8f, 0x4a, 0x77, 0xf7, 0x84, 0x7b, 0xce, 0x99, 0x6f, 0x26, 0x50, 0xc1, 0x90,
-	0xbd, 0xc6, 0x1c, 0x13, 0x2b, 0x44, 0x21, 0x05, 0xdd, 0x2a, 0xb4, 0x7e, 0xee, 0x0d, 0xe5, 0x20,
-	0x76, 0x2d, 0x26, 0x7c, 0xdb, 0x13, 0x9e, 0xb0, 0xd5, 0x82, 0x1b, 0x3f, 0x2b, 0xa5, 0x84, 0x9a,
-	0x32, 0xa3, 0xbe, 0x1d, 0x38, 0x3e, 0x8f, 0x72, 0x51, 0x76, 0x98, 0x9f, 0x8d, 0xa6, 0x03, 0x3b,
-	0x1d, 0x2e, 0x9b, 0x8c, 0x89, 0x38, 0x90, 0x0f, 0x0e, 0x3a, 0x3e, 0xed, 0x42, 0xa9, 0xd9, 0xef,
-	0x23, 0x8f, 0xa2, 0x1a, 0x39, 0x24, 0xc7, 0x5a, 0xeb, 0x6a, 0x34, 0xae, 0xaf, 0x7d, 0x8e, 0xeb,
-	0x67, 0x0b, 0x95, 0x83, 0x24, 0xe4, 0xf8, 0xc2, 0xfb, 0x1e, 0x47, 0xdb, 0x8d, 0x11, 0xc5, 0x9b,
-	0xcd, 0x30, 0x09, 0xa5, 0xb0, 0x72, 0x6f, 0xaf, 0x08, 0x31, 0x4f, 0x60, 0xf7, 0x7e, 0x18, 0x15,
-	0x1d, 0x51, 0x56, 0x52, 0x85, 0x8d, 0xc7, 0x94, 0x43, 0x55, 0x94, 0x7b, 0x99, 0x30, 0x4d, 0xd0,
-	0x3a, 0x5c, 0x76, 0x1d, 0x9f, 0x67, 0x5b, 0x14, 0xd6, 0x53, 0x91, 0x2f, 0xa9, 0xd9, 0x3c, 0x82,
-	0x4a, 0x1a, 0x97, 0xce, 0xab, 0xb2, 0x2e, 0xbe, 0x49, 0xfe, 0x99, 0xde, 0x00, 0xcc, 0x19, 0xe9,
-	0xbe, 0x35, 0xbb, 0xd3, 0x5f, 0xe4, 0x7a, 0xd5, 0x4a, 0x2f, 0xa6, 0x2d, 0x02, 0x86, 0x5c, 0xf2,
-	0xc2, 0xd0, 0x06, 0x6d, 0xf1, 0xfc, 0xf4, 0x60, 0x1e, 0xf0, 0x87, 0xeb, 0xff, 0x88, 0x06, 0xa1,
-	0x36, 0x94, 0x72, 0x32, 0xba, 0xb7, 0x74, 0x80, 0x19, 0xac, 0xae, 0x59, 0xd9, 0x1b, 0xdd, 0x05,
-	0x12, 0x13, 0x7a, 0x0d, 0xe5, 0x19, 0x26, 0xad, 0x2d, 0x57, 0xce, 0xd9, 0x97, 0x4d, 0x0d, 0xd2,
-	0xba, 0x1d, 0x4d, 0x0c, 0xf2, 0x31, 0x31, 0xc8, 0xd7, 0xc4, 0x20, 0xef, 0x53, 0x83, 0x8c, 0xa6,
-	0x06, 0x79, 0x3a, 0x5d, 0xfd, 0x72, 0x18, 0x32, 0xbb, 0x28, 0x70, 0x37, 0xd5, 0x8f, 0x71, 0xf9,
-	0x13, 0x00, 0x00, 0xff, 0xff, 0xfa, 0xef, 0x21, 0x21, 0x7b, 0x02, 0x00, 0x00,
+	// 487 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x53, 0x4f, 0x6f, 0xd3, 0x30,
+	0x1c, 0xc5, 0xeb, 0x58, 0xd7, 0xdf, 0xaa, 0x4e, 0x58, 0x55, 0x55, 0x02, 0xca, 0xa6, 0x1c, 0xaa,
+	0x81, 0x20, 0x99, 0xc6, 0xe0, 0x06, 0x62, 0x1b, 0xa8, 0x0c, 0xa1, 0x09, 0x32, 0x89, 0x03, 0x37,
+	0xd7, 0x31, 0x69, 0xa4, 0x26, 0x0e, 0x8e, 0x03, 0xea, 0x17, 0xe0, 0x63, 0x21, 0x8e, 0x3d, 0x72,
+	0xe6, 0x30, 0xa1, 0xee, 0x8b, 0xa0, 0x38, 0xce, 0xbf, 0x75, 0xea, 0xcd, 0xcf, 0x79, 0xef, 0xf7,
+	0xec, 0xbc, 0x67, 0xe8, 0x89, 0x98, 0x7e, 0x4b, 0x99, 0x98, 0xdb, 0xb1, 0xe0, 0x92, 0xe3, 0xed,
+	0x02, 0x1b, 0x4f, 0xfd, 0x40, 0x4e, 0xd3, 0x89, 0x4d, 0x79, 0xe8, 0xf8, 0xdc, 0xe7, 0x8e, 0x22,
+	0x4c, 0xd2, 0xaf, 0x0a, 0x29, 0xa0, 0x56, 0xb9, 0xd0, 0xd8, 0x89, 0x48, 0xc8, 0x12, 0x0d, 0x3a,
+	0x84, 0x86, 0x7a, 0xb9, 0xfb, 0x9d, 0xcc, 0x02, 0x8f, 0x48, 0x2e, 0xf2, 0x0d, 0x8b, 0xc0, 0xee,
+	0x98, 0xc9, 0x13, 0x4a, 0x79, 0x1a, 0xc9, 0x8f, 0x44, 0x90, 0x10, 0x5f, 0x40, 0xfb, 0xc4, 0xf3,
+	0x04, 0x4b, 0x92, 0x21, 0xda, 0x47, 0x07, 0xdd, 0xd3, 0xe3, 0xc5, 0xd5, 0xde, 0x9d, 0xbf, 0x57,
+	0x7b, 0x4f, 0x6a, 0x67, 0x98, 0xce, 0x63, 0x26, 0x66, 0xcc, 0xf3, 0x99, 0x70, 0x26, 0xa9, 0x10,
+	0xfc, 0x87, 0x43, 0xc5, 0x3c, 0x96, 0xdc, 0xd6, 0x5a, 0xb7, 0x18, 0x62, 0x3d, 0x82, 0x7b, 0x1f,
+	0x82, 0xa4, 0xf0, 0x48, 0x72, 0x93, 0x3e, 0xdc, 0xfd, 0x94, 0x5d, 0x4c, 0x59, 0x74, 0xdc, 0x1c,
+	0x58, 0x16, 0x74, 0xc7, 0x4c, 0x5e, 0x90, 0x90, 0xe5, 0x2c, 0x0c, 0x9b, 0x19, 0xd0, 0x24, 0xb5,
+	0xb6, 0x46, 0xd0, 0xcb, 0xc6, 0x65, 0xeb, 0xb5, 0xb3, 0x5e, 0x41, 0x7f, 0xcc, 0xe4, 0xe7, 0xe2,
+	0xbe, 0x97, 0x4c, 0x5f, 0x6f, 0x04, 0xbd, 0xf3, 0x88, 0xce, 0x52, 0x8f, 0xbd, 0x0b, 0x12, 0xc9,
+	0xb5, 0x6c, 0xdb, 0xbd, 0xb1, 0x6b, 0xfd, 0x44, 0xd0, 0xad, 0xab, 0xf1, 0x00, 0xb6, 0xa6, 0x2c,
+	0xf0, 0xa7, 0x52, 0x09, 0x36, 0x5d, 0x8d, 0xf0, 0x08, 0x5a, 0x97, 0x4c, 0x0e, 0x37, 0xf6, 0x5b,
+	0x07, 0x3b, 0x47, 0x7d, 0xbb, 0xfa, 0xc3, 0xa5, 0xda, 0xcd, 0x08, 0xf8, 0x05, 0xb4, 0x0b, 0xc7,
+	0x96, 0xe2, 0x3e, 0xb4, 0xcb, 0xb8, 0xeb, 0x46, 0x6f, 0xd8, 0x4c, 0x92, 0xc4, 0x2d, 0xc8, 0xd6,
+	0x7b, 0xc0, 0xab, 0x9f, 0xf1, 0x31, 0x40, 0xb9, 0x9b, 0xac, 0x35, 0xaf, 0xf1, 0x8e, 0x7e, 0x6d,
+	0xe8, 0x7f, 0x85, 0x5f, 0x02, 0x54, 0xc1, 0xe3, 0xfb, 0xd5, 0x51, 0x6e, 0xd4, 0xc1, 0xe8, 0xdb,
+	0x59, 0x7d, 0xce, 0x78, 0x44, 0x05, 0x93, 0xac, 0x10, 0x9c, 0x41, 0xb7, 0x1e, 0x2a, 0x7e, 0x50,
+	0x0d, 0x58, 0x09, 0xfb, 0xf6, 0x11, 0x87, 0x08, 0x3b, 0xd0, 0xd6, 0x71, 0xe3, 0x41, 0xe3, 0x00,
+	0x65, 0x03, 0x8c, 0xae, 0x9d, 0x37, 0xf9, 0x6d, 0x24, 0xc5, 0x1c, 0x3f, 0x87, 0x4e, 0x99, 0x3d,
+	0x1e, 0x36, 0x2d, 0xab, 0x42, 0x34, 0x45, 0x87, 0x08, 0x9f, 0xab, 0x92, 0x37, 0xc2, 0x34, 0x1b,
+	0x7e, 0x2b, 0x2d, 0x31, 0x06, 0xb7, 0x67, 0x73, 0xfa, 0x7a, 0xb1, 0x34, 0xd1, 0x9f, 0xa5, 0x89,
+	0xfe, 0x2d, 0x4d, 0xf4, 0xfb, 0xda, 0x44, 0x8b, 0x6b, 0x13, 0x7d, 0x79, 0xbc, 0xfe, 0x65, 0x88,
+	0x98, 0x3a, 0xc5, 0xb8, 0xc9, 0x96, 0x7a, 0x78, 0xcf, 0xfe, 0x07, 0x00, 0x00, 0xff, 0xff, 0xfd,
+	0xa4, 0xb6, 0xf9, 0xec, 0x03, 0x00, 0x00,
 }
diff --git a/rpc/rpctransact/rpctransact.pb.go b/rpc/rpctransact/rpctransact.pb.go
index 903e977b455a69b1ca70085ad713b1995aac5134..98861547663c8cbb57bdf24af1887b5f5c3cbe98 100644
--- a/rpc/rpctransact/rpctransact.pb.go
+++ b/rpc/rpctransact/rpctransact.pb.go
@@ -9,7 +9,6 @@
 
 	It has these top-level messages:
 		CallCodeParam
-		PayloadParam
 		TxEnvelope
 		TxEnvelopeParam
 */
@@ -73,42 +72,6 @@ func (*CallCodeParam) XXX_MessageName() string {
 	return "rpctransact.CallCodeParam"
 }
 
-type PayloadParam struct {
-	CallTx *payload.CallTx `protobuf:"bytes,1,opt,name=CallTx" json:"CallTx,omitempty"`
-	SendTx *payload.SendTx `protobuf:"bytes,2,opt,name=SendTx" json:"SendTx,omitempty"`
-	NameTx *payload.NameTx `protobuf:"bytes,3,opt,name=NameTx" json:"NameTx,omitempty"`
-}
-
-func (m *PayloadParam) Reset()                    { *m = PayloadParam{} }
-func (m *PayloadParam) String() string            { return proto.CompactTextString(m) }
-func (*PayloadParam) ProtoMessage()               {}
-func (*PayloadParam) Descriptor() ([]byte, []int) { return fileDescriptorRpctransact, []int{1} }
-
-func (m *PayloadParam) GetCallTx() *payload.CallTx {
-	if m != nil {
-		return m.CallTx
-	}
-	return nil
-}
-
-func (m *PayloadParam) GetSendTx() *payload.SendTx {
-	if m != nil {
-		return m.SendTx
-	}
-	return nil
-}
-
-func (m *PayloadParam) GetNameTx() *payload.NameTx {
-	if m != nil {
-		return m.NameTx
-	}
-	return nil
-}
-
-func (*PayloadParam) XXX_MessageName() string {
-	return "rpctransact.PayloadParam"
-}
-
 type TxEnvelope struct {
 	Envelope *github_com_hyperledger_burrow_txs.Envelope `protobuf:"bytes,1,opt,name=Envelope,customtype=github.com/hyperledger/burrow/txs.Envelope" json:"Envelope,omitempty"`
 }
@@ -116,7 +79,7 @@ type TxEnvelope struct {
 func (m *TxEnvelope) Reset()                    { *m = TxEnvelope{} }
 func (m *TxEnvelope) String() string            { return proto.CompactTextString(m) }
 func (*TxEnvelope) ProtoMessage()               {}
-func (*TxEnvelope) Descriptor() ([]byte, []int) { return fileDescriptorRpctransact, []int{2} }
+func (*TxEnvelope) Descriptor() ([]byte, []int) { return fileDescriptorRpctransact, []int{1} }
 
 func (*TxEnvelope) XXX_MessageName() string {
 	return "rpctransact.TxEnvelope"
@@ -126,15 +89,15 @@ type TxEnvelopeParam struct {
 	// An existing Envelope - either signed or unsigned - if the latter will be signed server-side
 	Envelope *github_com_hyperledger_burrow_txs.Envelope `protobuf:"bytes,1,opt,name=Envelope,customtype=github.com/hyperledger/burrow/txs.Envelope" json:"Envelope,omitempty"`
 	// If no Envelope provided then one will be generated from the provided payload and signed server-side
-	Payload *PayloadParam `protobuf:"bytes,2,opt,name=Payload" json:"Payload,omitempty"`
+	Payload *payload.Any `protobuf:"bytes,2,opt,name=Payload" json:"Payload,omitempty"`
 }
 
 func (m *TxEnvelopeParam) Reset()                    { *m = TxEnvelopeParam{} }
 func (m *TxEnvelopeParam) String() string            { return proto.CompactTextString(m) }
 func (*TxEnvelopeParam) ProtoMessage()               {}
-func (*TxEnvelopeParam) Descriptor() ([]byte, []int) { return fileDescriptorRpctransact, []int{3} }
+func (*TxEnvelopeParam) Descriptor() ([]byte, []int) { return fileDescriptorRpctransact, []int{2} }
 
-func (m *TxEnvelopeParam) GetPayload() *PayloadParam {
+func (m *TxEnvelopeParam) GetPayload() *payload.Any {
 	if m != nil {
 		return m.Payload
 	}
@@ -147,8 +110,6 @@ func (*TxEnvelopeParam) XXX_MessageName() string {
 func init() {
 	proto.RegisterType((*CallCodeParam)(nil), "rpctransact.CallCodeParam")
 	golang_proto.RegisterType((*CallCodeParam)(nil), "rpctransact.CallCodeParam")
-	proto.RegisterType((*PayloadParam)(nil), "rpctransact.PayloadParam")
-	golang_proto.RegisterType((*PayloadParam)(nil), "rpctransact.PayloadParam")
 	proto.RegisterType((*TxEnvelope)(nil), "rpctransact.TxEnvelope")
 	golang_proto.RegisterType((*TxEnvelope)(nil), "rpctransact.TxEnvelope")
 	proto.RegisterType((*TxEnvelopeParam)(nil), "rpctransact.TxEnvelopeParam")
@@ -174,7 +135,7 @@ type TransactClient interface {
 	// Sign transaction server-side
 	SignTx(ctx context.Context, in *TxEnvelopeParam, opts ...grpc.CallOption) (*TxEnvelope, error)
 	// Formulate a transaction from a Payload and retrun the envelop with the Tx bytes ready to sign
-	FormulateTx(ctx context.Context, in *PayloadParam, opts ...grpc.CallOption) (*TxEnvelope, error)
+	FormulateTx(ctx context.Context, in *payload.Any, opts ...grpc.CallOption) (*TxEnvelope, error)
 	// Formulate and sign a CallTx transaction signed server-side and wait for it to be included in a block, retrieving response
 	CallTxSync(ctx context.Context, in *payload.CallTx, opts ...grpc.CallOption) (*exec.TxExecution, error)
 	// Formulate and sign a CallTx transaction signed server-side
@@ -229,7 +190,7 @@ func (c *transactClient) SignTx(ctx context.Context, in *TxEnvelopeParam, opts .
 	return out, nil
 }
 
-func (c *transactClient) FormulateTx(ctx context.Context, in *PayloadParam, opts ...grpc.CallOption) (*TxEnvelope, error) {
+func (c *transactClient) FormulateTx(ctx context.Context, in *payload.Any, opts ...grpc.CallOption) (*TxEnvelope, error) {
 	out := new(TxEnvelope)
 	err := grpc.Invoke(ctx, "/rpctransact.Transact/FormulateTx", in, out, c.cc, opts...)
 	if err != nil {
@@ -321,7 +282,7 @@ type TransactServer interface {
 	// Sign transaction server-side
 	SignTx(context.Context, *TxEnvelopeParam) (*TxEnvelope, error)
 	// Formulate a transaction from a Payload and retrun the envelop with the Tx bytes ready to sign
-	FormulateTx(context.Context, *PayloadParam) (*TxEnvelope, error)
+	FormulateTx(context.Context, *payload.Any) (*TxEnvelope, error)
 	// Formulate and sign a CallTx transaction signed server-side and wait for it to be included in a block, retrieving response
 	CallTxSync(context.Context, *payload.CallTx) (*exec.TxExecution, error)
 	// Formulate and sign a CallTx transaction signed server-side
@@ -400,7 +361,7 @@ func _Transact_SignTx_Handler(srv interface{}, ctx context.Context, dec func(int
 }
 
 func _Transact_FormulateTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(PayloadParam)
+	in := new(payload.Any)
 	if err := dec(in); err != nil {
 		return nil, err
 	}
@@ -412,7 +373,7 @@ func _Transact_FormulateTx_Handler(srv interface{}, ctx context.Context, dec fun
 		FullMethod: "/rpctransact.Transact/FormulateTx",
 	}
 	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(TransactServer).FormulateTx(ctx, req.(*PayloadParam))
+		return srv.(TransactServer).FormulateTx(ctx, req.(*payload.Any))
 	}
 	return interceptor(ctx, in, info, handler)
 }
@@ -656,54 +617,6 @@ func (m *CallCodeParam) MarshalTo(dAtA []byte) (int, error) {
 	return i, nil
 }
 
-func (m *PayloadParam) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalTo(dAtA)
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *PayloadParam) MarshalTo(dAtA []byte) (int, error) {
-	var i int
-	_ = i
-	var l int
-	_ = l
-	if m.CallTx != nil {
-		dAtA[i] = 0xa
-		i++
-		i = encodeVarintRpctransact(dAtA, i, uint64(m.CallTx.Size()))
-		n2, err := m.CallTx.MarshalTo(dAtA[i:])
-		if err != nil {
-			return 0, err
-		}
-		i += n2
-	}
-	if m.SendTx != nil {
-		dAtA[i] = 0x12
-		i++
-		i = encodeVarintRpctransact(dAtA, i, uint64(m.SendTx.Size()))
-		n3, err := m.SendTx.MarshalTo(dAtA[i:])
-		if err != nil {
-			return 0, err
-		}
-		i += n3
-	}
-	if m.NameTx != nil {
-		dAtA[i] = 0x1a
-		i++
-		i = encodeVarintRpctransact(dAtA, i, uint64(m.NameTx.Size()))
-		n4, err := m.NameTx.MarshalTo(dAtA[i:])
-		if err != nil {
-			return 0, err
-		}
-		i += n4
-	}
-	return i, nil
-}
-
 func (m *TxEnvelope) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -723,11 +636,11 @@ func (m *TxEnvelope) MarshalTo(dAtA []byte) (int, error) {
 		dAtA[i] = 0xa
 		i++
 		i = encodeVarintRpctransact(dAtA, i, uint64(m.Envelope.Size()))
-		n5, err := m.Envelope.MarshalTo(dAtA[i:])
+		n2, err := m.Envelope.MarshalTo(dAtA[i:])
 		if err != nil {
 			return 0, err
 		}
-		i += n5
+		i += n2
 	}
 	return i, nil
 }
@@ -751,21 +664,21 @@ func (m *TxEnvelopeParam) MarshalTo(dAtA []byte) (int, error) {
 		dAtA[i] = 0xa
 		i++
 		i = encodeVarintRpctransact(dAtA, i, uint64(m.Envelope.Size()))
-		n6, err := m.Envelope.MarshalTo(dAtA[i:])
+		n3, err := m.Envelope.MarshalTo(dAtA[i:])
 		if err != nil {
 			return 0, err
 		}
-		i += n6
+		i += n3
 	}
 	if m.Payload != nil {
 		dAtA[i] = 0x12
 		i++
 		i = encodeVarintRpctransact(dAtA, i, uint64(m.Payload.Size()))
-		n7, err := m.Payload.MarshalTo(dAtA[i:])
+		n4, err := m.Payload.MarshalTo(dAtA[i:])
 		if err != nil {
 			return 0, err
 		}
-		i += n7
+		i += n4
 	}
 	return i, nil
 }
@@ -795,24 +708,6 @@ func (m *CallCodeParam) Size() (n int) {
 	return n
 }
 
-func (m *PayloadParam) Size() (n int) {
-	var l int
-	_ = l
-	if m.CallTx != nil {
-		l = m.CallTx.Size()
-		n += 1 + l + sovRpctransact(uint64(l))
-	}
-	if m.SendTx != nil {
-		l = m.SendTx.Size()
-		n += 1 + l + sovRpctransact(uint64(l))
-	}
-	if m.NameTx != nil {
-		l = m.NameTx.Size()
-		n += 1 + l + sovRpctransact(uint64(l))
-	}
-	return n
-}
-
 func (m *TxEnvelope) Size() (n int) {
 	var l int
 	_ = l
@@ -992,155 +887,6 @@ func (m *CallCodeParam) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
-func (m *PayloadParam) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpctransact
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: PayloadParam: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: PayloadParam: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CallTx", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpctransact
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpctransact
-			}
-			postIndex := iNdEx + msglen
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.CallTx == nil {
-				m.CallTx = &payload.CallTx{}
-			}
-			if err := m.CallTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field SendTx", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpctransact
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpctransact
-			}
-			postIndex := iNdEx + msglen
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.SendTx == nil {
-				m.SendTx = &payload.SendTx{}
-			}
-			if err := m.SendTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field NameTx", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpctransact
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpctransact
-			}
-			postIndex := iNdEx + msglen
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.NameTx == nil {
-				m.NameTx = &payload.NameTx{}
-			}
-			if err := m.NameTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpctransact(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpctransact
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
 func (m *TxEnvelope) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
@@ -1313,7 +1059,7 @@ func (m *TxEnvelopeParam) Unmarshal(dAtA []byte) error {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Payload == nil {
-				m.Payload = &PayloadParam{}
+				m.Payload = &payload.Any{}
 			}
 			if err := m.Payload.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -1449,40 +1195,37 @@ func init() { proto.RegisterFile("rpctransact.proto", fileDescriptorRpctransact)
 func init() { golang_proto.RegisterFile("rpctransact.proto", fileDescriptorRpctransact) }
 
 var fileDescriptorRpctransact = []byte{
-	// 552 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0xcd, 0x6e, 0xd3, 0x40,
-	0x10, 0xc6, 0xfc, 0x84, 0x76, 0x92, 0x28, 0x74, 0x2f, 0x84, 0x08, 0x25, 0x28, 0x17, 0x10, 0x6a,
-	0xed, 0xa8, 0xe5, 0x88, 0x40, 0x49, 0x68, 0x8f, 0xa8, 0x72, 0x2c, 0x24, 0xb8, 0x6d, 0xec, 0xc5,
-	0x8d, 0x64, 0x7b, 0xad, 0xf5, 0x06, 0x36, 0x8f, 0xc0, 0x89, 0x67, 0xe8, 0x9b, 0x70, 0xcc, 0x91,
-	0x73, 0x0f, 0x11, 0x4a, 0x5f, 0x04, 0xed, 0x4f, 0x60, 0x6d, 0x9a, 0x96, 0x4b, 0x6f, 0xe3, 0x6f,
-	0x66, 0xbe, 0x99, 0xef, 0xd3, 0x8e, 0x61, 0x8f, 0xe5, 0x21, 0x67, 0x38, 0x2b, 0x70, 0xc8, 0xdd,
-	0x9c, 0x51, 0x4e, 0x51, 0xdd, 0x82, 0x3a, 0x07, 0xf1, 0x8c, 0x9f, 0xcd, 0xa7, 0x6e, 0x48, 0x53,
-	0x2f, 0xa6, 0x31, 0xf5, 0x54, 0xcd, 0x74, 0xfe, 0x59, 0x7d, 0xa9, 0x0f, 0x15, 0xe9, 0xde, 0x0e,
-	0x10, 0x41, 0x42, 0x13, 0x37, 0x73, 0xbc, 0x48, 0x28, 0x8e, 0xcc, 0xe7, 0x2e, 0x17, 0x85, 0x0e,
-	0xfb, 0xdf, 0x1d, 0x68, 0x8e, 0x71, 0x92, 0x8c, 0x69, 0x44, 0x4e, 0x31, 0xc3, 0x29, 0xfa, 0x00,
-	0xf5, 0x13, 0x46, 0xd3, 0x61, 0x14, 0x31, 0x52, 0x14, 0x6d, 0xe7, 0x99, 0xf3, 0xa2, 0x31, 0x7a,
-	0xb5, 0x5c, 0xf5, 0xee, 0x5c, 0xac, 0x7a, 0xfb, 0xd6, 0x0e, 0x67, 0x8b, 0x9c, 0xb0, 0x84, 0x44,
-	0x31, 0x61, 0xde, 0x74, 0xce, 0x18, 0xfd, 0xea, 0x85, 0x6c, 0x91, 0x73, 0xea, 0x9a, 0x5e, 0xdf,
-	0x26, 0x42, 0x08, 0xee, 0xcb, 0x21, 0xed, 0xbb, 0x92, 0xd0, 0x57, 0xb1, 0xc4, 0xde, 0x61, 0x8e,
-	0xdb, 0xf7, 0x34, 0x26, 0xe3, 0xfe, 0x37, 0x07, 0x1a, 0xa7, 0x7a, 0x5d, 0xbd, 0xd0, 0x73, 0xa8,
-	0xc9, 0x0d, 0x03, 0xa1, 0x76, 0xa9, 0x1f, 0xb6, 0xdc, 0x8d, 0x1a, 0x0d, 0xfb, 0x26, 0x2d, 0x0b,
-	0x27, 0x24, 0x8b, 0x02, 0xa1, 0x66, 0xd8, 0x85, 0x1a, 0xf6, 0x4d, 0x5a, 0x16, 0xbe, 0xc7, 0x29,
-	0x09, 0x84, 0x1a, 0x6c, 0x17, 0x6a, 0xd8, 0x37, 0xe9, 0x7e, 0x0c, 0x10, 0x88, 0xe3, 0xec, 0x0b,
-	0x49, 0x68, 0x4e, 0xd0, 0x47, 0xd8, 0xd9, 0xc4, 0x66, 0x95, 0xa6, 0x2b, 0x9d, 0xdc, 0x80, 0x23,
-	0xf7, 0x62, 0xd5, 0x7b, 0x79, 0xbd, 0x43, 0x76, 0xbd, 0xff, 0x87, 0xae, 0x7f, 0xee, 0x40, 0xeb,
-	0xef, 0x24, 0xad, 0xfb, 0xf6, 0xc6, 0xa1, 0x23, 0x78, 0x68, 0x2c, 0x36, 0x56, 0x3d, 0x71, 0xed,
-	0xc7, 0x67, 0xdb, 0xef, 0x6f, 0x2a, 0x0f, 0xcf, 0x1f, 0xc0, 0x4e, 0x60, 0x4a, 0xd0, 0x08, 0x5a,
-	0x23, 0x46, 0x71, 0x14, 0xe2, 0x82, 0x07, 0x62, 0xb2, 0xc8, 0x42, 0xf4, 0xb4, 0xc4, 0x51, 0x51,
-	0xd3, 0xd9, 0x73, 0xd5, 0x7b, 0x0c, 0xc4, 0xb1, 0x20, 0xe1, 0x9c, 0xcf, 0x68, 0x86, 0xde, 0xc0,
-	0x23, 0x8b, 0x63, 0x58, 0xdc, 0x4c, 0xd2, 0x50, 0x06, 0xf8, 0x24, 0x24, 0xb3, 0x9c, 0xa3, 0xb7,
-	0x50, 0x9b, 0xcc, 0xe2, 0x2c, 0x10, 0x37, 0x74, 0x3d, 0xde, 0x92, 0x45, 0x43, 0xa8, 0x9f, 0x50,
-	0x96, 0xce, 0x13, 0xcc, 0x49, 0x20, 0xd0, 0x76, 0x13, 0xb6, 0x53, 0x0c, 0x00, 0xf4, 0xeb, 0x53,
-	0x16, 0x54, 0x9f, 0xe6, 0x55, 0xaa, 0xf7, 0xa1, 0xae, 0x93, 0x5a, 0xf0, 0x3f, 0x2d, 0x65, 0x8d,
-	0x1e, 0xec, 0x1a, 0xfe, 0x59, 0xfa, 0x5f, 0xf4, 0xaf, 0x35, 0xbd, 0x3c, 0x2f, 0xd9, 0xd2, 0x29,
-	0x2d, 0x5e, 0xba, 0xf4, 0xab, 0xba, 0x07, 0x00, 0xfa, 0x46, 0x2a, 0x72, 0x34, 0xb8, 0x45, 0x8e,
-	0x4e, 0x56, 0xe5, 0x98, 0x96, 0xb2, 0x9c, 0x01, 0x80, 0x3e, 0xad, 0x0a, 0xbf, 0x06, 0xb7, 0xf0,
-	0xeb, 0x64, 0x95, 0xdf, 0xb4, 0x94, 0xf8, 0x47, 0xe3, 0xe5, 0xba, 0xeb, 0xfc, 0x5c, 0x77, 0x9d,
-	0x5f, 0xeb, 0xae, 0xf3, 0xe3, 0xb2, 0xeb, 0x2c, 0x2f, 0xbb, 0xce, 0xa7, 0x83, 0xeb, 0x8f, 0x84,
-	0xe5, 0xa1, 0x67, 0xb9, 0x34, 0xad, 0xa9, 0x5f, 0xe3, 0xd1, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff,
-	0x0f, 0x15, 0x24, 0xfe, 0x91, 0x05, 0x00, 0x00,
+	// 506 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0xcf, 0x6f, 0xd3, 0x30,
+	0x18, 0x25, 0xfc, 0x18, 0xdb, 0x97, 0x56, 0x65, 0xbe, 0x50, 0x55, 0x28, 0x45, 0x3d, 0x20, 0x84,
+	0xb6, 0xa4, 0x2a, 0x3b, 0x22, 0x50, 0x53, 0xb6, 0x23, 0x9a, 0xd2, 0x08, 0x09, 0x6e, 0xae, 0x63,
+	0xb2, 0x48, 0x49, 0x1c, 0x39, 0x0e, 0x24, 0x7f, 0x05, 0x97, 0xfd, 0x41, 0x1c, 0x7b, 0xe4, 0xbc,
+	0x43, 0x85, 0xba, 0x7f, 0x04, 0xc5, 0x4e, 0x47, 0xd2, 0xae, 0x2b, 0x97, 0xdd, 0x9e, 0xdf, 0x97,
+	0xf7, 0xfc, 0x3d, 0x7f, 0x76, 0xe0, 0x90, 0x27, 0x44, 0x70, 0x1c, 0xa7, 0x98, 0x08, 0x33, 0xe1,
+	0x4c, 0x30, 0xa4, 0xd7, 0xa8, 0xde, 0xb1, 0x1f, 0x88, 0x8b, 0x6c, 0x66, 0x12, 0x16, 0x59, 0x3e,
+	0xf3, 0x99, 0x25, 0xbf, 0x99, 0x65, 0xdf, 0xe4, 0x4a, 0x2e, 0x24, 0x52, 0xda, 0x1e, 0xd0, 0x9c,
+	0x92, 0x0a, 0xb7, 0x13, 0x5c, 0x84, 0x0c, 0x7b, 0xd5, 0xf2, 0x40, 0xe4, 0xa9, 0x82, 0x83, 0x9f,
+	0x1a, 0xb4, 0x27, 0x38, 0x0c, 0x27, 0xcc, 0xa3, 0xe7, 0x98, 0xe3, 0x08, 0x7d, 0x06, 0xfd, 0x8c,
+	0xb3, 0x68, 0xec, 0x79, 0x9c, 0xa6, 0x69, 0x57, 0x7b, 0xa9, 0xbd, 0x6e, 0xd9, 0x27, 0xf3, 0x45,
+	0xff, 0xc1, 0xd5, 0xa2, 0x7f, 0x54, 0xeb, 0xe1, 0xa2, 0x48, 0x28, 0x0f, 0xa9, 0xe7, 0x53, 0x6e,
+	0xcd, 0x32, 0xce, 0xd9, 0x0f, 0x8b, 0xf0, 0x22, 0x11, 0xcc, 0xac, 0xb4, 0x4e, 0xdd, 0x08, 0x21,
+	0x78, 0x5c, 0x6e, 0xd2, 0x7d, 0x58, 0x1a, 0x3a, 0x12, 0x97, 0xdc, 0x47, 0x2c, 0x70, 0xf7, 0x91,
+	0xe2, 0x4a, 0x3c, 0xf0, 0x01, 0xdc, 0xfc, 0x34, 0xfe, 0x4e, 0x43, 0x96, 0x50, 0xf4, 0x05, 0xf6,
+	0x57, 0x58, 0xb6, 0xa2, 0x8f, 0xda, 0x66, 0xd9, 0xfd, 0x8a, 0xb4, 0xcd, 0xab, 0x45, 0xff, 0xcd,
+	0xdd, 0x5d, 0xd5, 0xbf, 0x77, 0x6e, 0xec, 0x06, 0x97, 0x1a, 0x74, 0xfe, 0xed, 0xa4, 0xc2, 0xdf,
+	0xdf, 0x76, 0xe8, 0x15, 0x3c, 0x3d, 0x57, 0x53, 0x90, 0x47, 0xa0, 0x8f, 0x5a, 0xe6, 0x6a, 0x2a,
+	0xe3, 0xb8, 0x70, 0x56, 0xc5, 0xd1, 0xe5, 0x13, 0xd8, 0x77, 0xab, 0x99, 0x23, 0x1b, 0x3a, 0x36,
+	0x67, 0xd8, 0x23, 0x38, 0x15, 0x6e, 0x3e, 0x2d, 0x62, 0x82, 0x5e, 0x98, 0xf5, 0x7b, 0xb2, 0x16,
+	0xa0, 0x77, 0x68, 0xca, 0xb1, 0xbb, 0xf9, 0x69, 0x4e, 0x49, 0x26, 0x02, 0x16, 0xa3, 0xf7, 0xf0,
+	0xac, 0xe6, 0x31, 0x4e, 0x77, 0x9b, 0xb4, 0x64, 0x66, 0x87, 0x12, 0x1a, 0x24, 0x02, 0x7d, 0x80,
+	0xbd, 0x69, 0xe0, 0xc7, 0x6e, 0xbe, 0x43, 0xf5, 0x7c, 0x4b, 0x15, 0x9d, 0x80, 0x7e, 0xc6, 0x78,
+	0x94, 0x85, 0x58, 0x50, 0x37, 0x47, 0x8d, 0xdc, 0xdb, 0x55, 0x43, 0x80, 0xf2, 0x62, 0x56, 0xa9,
+	0x3b, 0x37, 0x22, 0x45, 0xde, 0x16, 0xf4, 0x08, 0x74, 0x55, 0x54, 0x19, 0x37, 0x24, 0xcd, 0x58,
+	0x16, 0x1c, 0x54, 0xfe, 0x41, 0xf4, 0x5f, 0xf6, 0xef, 0x94, 0x7d, 0x79, 0x71, 0x4b, 0x49, 0xaf,
+	0xd1, 0x78, 0xe3, 0x0d, 0xdd, 0xa6, 0x1e, 0x02, 0x4c, 0x69, 0xec, 0x6d, 0xc4, 0x51, 0xe4, 0x96,
+	0x38, 0xaa, 0xb8, 0x1e, 0xa7, 0x92, 0x34, 0xe3, 0x0c, 0x01, 0x3e, 0xe1, 0x88, 0x6e, 0xf8, 0x2b,
+	0x72, 0x8b, 0xbf, 0x2a, 0xae, 0xfb, 0x57, 0x92, 0x86, 0xbf, 0x3d, 0x99, 0x2f, 0x0d, 0xed, 0xf7,
+	0xd2, 0xd0, 0xfe, 0x2c, 0x0d, 0xed, 0xd7, 0xb5, 0xa1, 0xcd, 0xaf, 0x0d, 0xed, 0xeb, 0xf1, 0xdd,
+	0x4f, 0x81, 0x27, 0xc4, 0xaa, 0x9d, 0xd2, 0x6c, 0x4f, 0xfe, 0x74, 0xde, 0xfe, 0x0d, 0x00, 0x00,
+	0xff, 0xff, 0xd4, 0x01, 0x16, 0xdf, 0xeb, 0x04, 0x00, 0x00,
 }
diff --git a/rpc/rpctransact/transact_server.go b/rpc/rpctransact/transact_server.go
index ab3a8c6ea712dfebd4d81f08345b3b5070ef98d2..d2e0a9b45684dfecc9e3277745c47c2c370f0404 100644
--- a/rpc/rpctransact/transact_server.go
+++ b/rpc/rpctransact/transact_server.go
@@ -52,8 +52,8 @@ func (ts *transactServer) SignTx(ctx context.Context, param *TxEnvelopeParam) (*
 	}, nil
 }
 
-func (ts *transactServer) FormulateTx(ctx context.Context, param *PayloadParam) (*TxEnvelope, error) {
-	txEnv := param.Envelope(ts.transactor.Tip.ChainID())
+func (ts *transactServer) FormulateTx(ctx context.Context, param *payload.Any) (*TxEnvelope, error) {
+	txEnv := EnvelopeFromAny(ts.transactor.Tip.ChainID(), param)
 	if txEnv == nil {
 		return nil, fmt.Errorf("no payload provided to FormulateTx")
 	}
@@ -63,11 +63,11 @@ func (ts *transactServer) FormulateTx(ctx context.Context, param *PayloadParam)
 }
 
 func (ts *transactServer) CallTxSync(ctx context.Context, param *payload.CallTx) (*exec.TxExecution, error) {
-	return ts.BroadcastTxSync(ctx, txEnvelopeParam(param))
+	return ts.BroadcastTxSync(ctx, &TxEnvelopeParam{Payload: param.Any()})
 }
 
 func (ts *transactServer) CallTxAsync(ctx context.Context, param *payload.CallTx) (*txs.Receipt, error) {
-	return ts.BroadcastTxAsync(ctx, txEnvelopeParam(param))
+	return ts.BroadcastTxAsync(ctx, &TxEnvelopeParam{Payload: param.Any()})
 }
 
 func (ts *transactServer) CallTxSim(ctx context.Context, param *payload.CallTx) (*exec.TxExecution, error) {
@@ -82,19 +82,19 @@ func (ts *transactServer) CallCodeSim(ctx context.Context, param *CallCodeParam)
 }
 
 func (ts *transactServer) SendTxSync(ctx context.Context, param *payload.SendTx) (*exec.TxExecution, error) {
-	return ts.BroadcastTxSync(ctx, txEnvelopeParam(param))
+	return ts.BroadcastTxSync(ctx, &TxEnvelopeParam{Payload: param.Any()})
 }
 
 func (ts *transactServer) SendTxAsync(ctx context.Context, param *payload.SendTx) (*txs.Receipt, error) {
-	return ts.BroadcastTxAsync(ctx, txEnvelopeParam(param))
+	return ts.BroadcastTxAsync(ctx, &TxEnvelopeParam{Payload: param.Any()})
 }
 
 func (ts *transactServer) NameTxSync(ctx context.Context, param *payload.NameTx) (*exec.TxExecution, error) {
-	return ts.BroadcastTxSync(ctx, txEnvelopeParam(param))
+	return ts.BroadcastTxSync(ctx, &TxEnvelopeParam{Payload: param.Any()})
 }
 
 func (ts *transactServer) NameTxAsync(ctx context.Context, param *payload.NameTx) (*txs.Receipt, error) {
-	return ts.BroadcastTxAsync(ctx, txEnvelopeParam(param))
+	return ts.BroadcastTxAsync(ctx, &TxEnvelopeParam{Payload: param.Any()})
 }
 
 func (te *TxEnvelopeParam) GetEnvelope(chainID string) *txs.Envelope {
@@ -105,44 +105,26 @@ func (te *TxEnvelopeParam) GetEnvelope(chainID string) *txs.Envelope {
 		return te.Envelope
 	}
 	if te.Payload != nil {
-		return te.Payload.Envelope(chainID)
+		return EnvelopeFromAny(chainID, te.Payload)
 	}
 	return nil
 }
 
-func (pp *PayloadParam) Envelope(chainID string) *txs.Envelope {
-	if pp.CallTx != nil {
-		return txs.Enclose(chainID, pp.CallTx)
+func EnvelopeFromAny(chainID string, p *payload.Any) *txs.Envelope {
+	if p.CallTx != nil {
+		return txs.Enclose(chainID, p.CallTx)
 	}
-	if pp.SendTx != nil {
-		return txs.Enclose(chainID, pp.SendTx)
+	if p.SendTx != nil {
+		return txs.Enclose(chainID, p.SendTx)
 	}
-	if pp.NameTx != nil {
-		return txs.Enclose(chainID, pp.NameTx)
+	if p.NameTx != nil {
+		return txs.Enclose(chainID, p.NameTx)
 	}
-	return nil
-}
-
-func txEnvelopeParam(pl payload.Payload) *TxEnvelopeParam {
-	switch tx := pl.(type) {
-	case *payload.CallTx:
-		return &TxEnvelopeParam{
-			Payload: &PayloadParam{
-				CallTx: tx,
-			},
-		}
-	case *payload.SendTx:
-		return &TxEnvelopeParam{
-			Payload: &PayloadParam{
-				SendTx: tx,
-			},
-		}
-	case *payload.NameTx:
-		return &TxEnvelopeParam{
-			Payload: &PayloadParam{
-				NameTx: tx,
-			},
-		}
+	if p.PermsTx != nil {
+		return txs.Enclose(chainID, p.PermsTx)
+	}
+	if p.GovTx != nil {
+		return txs.Enclose(chainID, p.GovTx)
 	}
 	return nil
 }
diff --git a/rpc/service.go b/rpc/service.go
index 098dd150386500ea623adbdb9cf31aff8a2d27a8..3c266ad257c5576ff0136dfe80c673e2feec43c5 100644
--- a/rpc/service.go
+++ b/rpc/service.go
@@ -17,15 +17,16 @@ package rpc
 import (
 	"encoding/json"
 	"fmt"
+	"math/big"
 	"time"
 
 	"github.com/hyperledger/burrow/acm"
 	"github.com/hyperledger/burrow/acm/state"
+	"github.com/hyperledger/burrow/acm/validator"
+	"github.com/hyperledger/burrow/bcm"
 	"github.com/hyperledger/burrow/binary"
-	bcm "github.com/hyperledger/burrow/blockchain"
-	"github.com/hyperledger/burrow/consensus/tendermint/query"
+	"github.com/hyperledger/burrow/consensus/tendermint"
 	"github.com/hyperledger/burrow/crypto"
-	"github.com/hyperledger/burrow/execution"
 	"github.com/hyperledger/burrow/execution/names"
 	"github.com/hyperledger/burrow/logging"
 	"github.com/hyperledger/burrow/logging/structure"
@@ -44,31 +45,24 @@ type Service struct {
 	state      state.IterableReader
 	nameReg    names.IterableReader
 	blockchain bcm.BlockchainInfo
-	transactor *execution.Transactor
-	nodeView   *query.NodeView
+	nodeView   *tendermint.NodeView
 	logger     *logging.Logger
 }
 
-func NewService(state state.IterableReader, nameReg names.IterableReader,
-	blockchain bcm.BlockchainInfo, transactor *execution.Transactor, nodeView *query.NodeView,
-	logger *logging.Logger) *Service {
+// Service provides an internal query and information service with serialisable return types on which can accomodate
+// a number of transport front ends
+func NewService(state state.IterableReader, nameReg names.IterableReader, blockchain bcm.BlockchainInfo,
+	nodeView *tendermint.NodeView, logger *logging.Logger) *Service {
 
 	return &Service{
 		state:      state,
 		nameReg:    nameReg,
 		blockchain: blockchain,
-		transactor: transactor,
 		nodeView:   nodeView,
 		logger:     logger.With(structure.ComponentKey, "Service"),
 	}
 }
 
-// Get a Transactor providing methods for delegating signing and the core BroadcastTx function for publishing
-// transactions to the network
-func (s *Service) Transactor() *execution.Transactor {
-	return s.transactor
-}
-
 func (s *Service) State() state.Reader {
 	return s.state
 }
@@ -116,7 +110,7 @@ func (s *Service) Status() (*ResultStatus, error) {
 	return &ResultStatus{
 		NodeInfo:          s.nodeView.NodeInfo(),
 		GenesisHash:       s.blockchain.GenesisHash(),
-		PubKey:            publicKey,
+		PublicKey:         publicKey,
 		LatestBlockHash:   latestBlockHash,
 		LatestBlockHeight: latestHeight,
 		LatestBlockTime:   latestBlockTime,
@@ -156,6 +150,7 @@ func (s *Service) NetInfo() (*ResultNetInfo, error) {
 		return nil, err
 	}
 	return &ResultNetInfo{
+		ThisNode:  s.nodeView.NodeInfo(),
 		Listening: listening,
 		Listeners: listeners,
 		Peers:     peers.Peers,
@@ -241,7 +236,7 @@ func (s *Service) GetAccountHumanReadable(address crypto.Address) (*ResultGetAcc
 	if acc == nil {
 		return &ResultGetAccountHumanReadable{}, nil
 	}
-	perms, err := permission.BasePermissionsToStringList(acc.Permissions().Base)
+	perms := permission.BasePermissionsToStringList(acc.Permissions().Base)
 	if acc == nil {
 		return &ResultGetAccountHumanReadable{}, nil
 	}
@@ -322,18 +317,19 @@ func (s *Service) ListBlocks(minHeight, maxHeight uint64) (*ResultListBlocks, er
 }
 
 func (s *Service) ListValidators() (*ResultListValidators, error) {
-	concreteValidators := make([]*acm.ConcreteValidator, 0, s.blockchain.NumValidators())
-	s.blockchain.IterateValidators(func(publicKey crypto.PublicKey, power uint64) (stop bool) {
-		concreteValidators = append(concreteValidators, &acm.ConcreteValidator{
-			Address:   publicKey.Address(),
-			PublicKey: publicKey,
-			Power:     power,
+	validators := make([]*validator.Validator, 0, s.blockchain.NumValidators())
+	s.blockchain.Validators().Iterate(func(id crypto.Addressable, power *big.Int) (stop bool) {
+		address := id.Address()
+		validators = append(validators, &validator.Validator{
+			Address:   &address,
+			PublicKey: id.PublicKey(),
+			Power:     power.Uint64(),
 		})
 		return
 	})
 	return &ResultListValidators{
 		BlockHeight:         s.blockchain.LastBlockHeight(),
-		BondedValidators:    concreteValidators,
+		BondedValidators:    validators,
 		UnbondingValidators: nil,
 	}, nil
 }
diff --git a/txs/amino_codec.go b/txs/amino_codec.go
index 9d85480b206a5b818c080d5ba87c332e4db98d3b..2ff423317dbc551256ca808f153009ae81641d31 100644
--- a/txs/amino_codec.go
+++ b/txs/amino_codec.go
@@ -18,9 +18,9 @@ func NewAminoCodec() *aminoCodec {
 	registerTx(cdc, &payload.CallTx{})
 	registerTx(cdc, &payload.BondTx{})
 	registerTx(cdc, &payload.UnbondTx{})
-	registerTx(cdc, &payload.PermissionsTx{})
+	registerTx(cdc, &payload.PermsTx{})
 	registerTx(cdc, &payload.NameTx{})
-	registerTx(cdc, &payload.GovernanceTx{})
+	registerTx(cdc, &payload.GovTx{})
 	return &aminoCodec{cdc}
 }
 
diff --git a/txs/payload/bond_tx.go b/txs/payload/bond_tx.go
index 59585b6e01611c145006aaca73ed30701d92ad0c..f81b213368ecb62eecc29223cc3f65b6b279b8e0 100644
--- a/txs/payload/bond_tx.go
+++ b/txs/payload/bond_tx.go
@@ -54,3 +54,9 @@ func (tx *BondTx) AddOutput(addr crypto.Address, amt uint64) error {
 	})
 	return nil
 }
+
+func (tx *BondTx) Any() *Any {
+	return &Any{
+		BondTx: tx,
+	}
+}
diff --git a/txs/payload/call_tx.go b/txs/payload/call_tx.go
index 68638de3e46eca620b55d658801dc54437b4f804..9855e296df7b63c2869850412d6d26edfd8a59ac 100644
--- a/txs/payload/call_tx.go
+++ b/txs/payload/call_tx.go
@@ -59,3 +59,9 @@ func (tx *CallTx) CreatesContractAddress() *crypto.Address {
 	address := crypto.NewContractAddress(tx.Input.Address, tx.Input.Sequence)
 	return &address
 }
+
+func (tx *CallTx) Any() *Any {
+	return &Any{
+		CallTx: tx,
+	}
+}
diff --git a/txs/payload/gov_tx.go b/txs/payload/gov_tx.go
new file mode 100644
index 0000000000000000000000000000000000000000..eb4e5a1d40e2730c723d74441ab761874cc0d8bf
--- /dev/null
+++ b/txs/payload/gov_tx.go
@@ -0,0 +1,23 @@
+package payload
+
+import (
+	"fmt"
+)
+
+func (tx *GovTx) Type() Type {
+	return TypeGovernance
+}
+
+func (tx *GovTx) GetInputs() []*TxInput {
+	return tx.Inputs
+}
+
+func (tx *GovTx) String() string {
+	return fmt.Sprintf("GovTx{%v -> %v}", tx.Inputs, tx.AccountUpdates)
+}
+
+func (tx *GovTx) Any() *Any {
+	return &Any{
+		GovTx: tx,
+	}
+}
diff --git a/txs/payload/governance_tx.go b/txs/payload/governance_tx.go
deleted file mode 100644
index 046f912916a4397f147ffbc717ac4c9c30bc38b1..0000000000000000000000000000000000000000
--- a/txs/payload/governance_tx.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package payload
-
-import (
-	"fmt"
-
-	"github.com/hyperledger/burrow/acm/state"
-	"github.com/hyperledger/burrow/crypto"
-	"github.com/hyperledger/burrow/genesis/spec"
-)
-
-func NewGovTx(st state.AccountGetter, from crypto.Address, accounts ...*spec.TemplateAccount) (*GovernanceTx, error) {
-	acc, err := st.GetAccount(from)
-	if err != nil {
-		return nil, err
-	}
-	if acc == nil {
-		return nil, fmt.Errorf("could not get account %v", from)
-	}
-
-	sequence := acc.Sequence() + 1
-	return NewGovTxWithSequence(from, sequence, accounts), nil
-}
-
-func NewGovTxWithSequence(from crypto.Address, sequence uint64, accounts []*spec.TemplateAccount) *GovernanceTx {
-	return &GovernanceTx{
-		Inputs: []*TxInput{{
-			Address:  from,
-			Sequence: sequence,
-		}},
-		AccountUpdates: accounts,
-	}
-}
-
-func (tx *GovernanceTx) Type() Type {
-	return TypeGovernance
-}
-
-func (tx *GovernanceTx) GetInputs() []*TxInput {
-	return tx.Inputs
-}
-
-func (tx *GovernanceTx) String() string {
-	return fmt.Sprintf("GovernanceTx{%v -> %v}", tx.Inputs, tx.AccountUpdates)
-}
diff --git a/txs/payload/name_tx.go b/txs/payload/name_tx.go
index 334fa6df32d0a2d940be81e65ec935e9517d581d..13f85318fd2181b885e5d0cdcb13873ef12cf8e6 100644
--- a/txs/payload/name_tx.go
+++ b/txs/payload/name_tx.go
@@ -47,3 +47,9 @@ func (tx *NameTx) GetInputs() []*TxInput {
 func (tx *NameTx) String() string {
 	return fmt.Sprintf("NameTx{%v -> %s: %s}", tx.Input, tx.Name, tx.Data)
 }
+
+func (tx *NameTx) Any() *Any {
+	return &Any{
+		NameTx: tx,
+	}
+}
diff --git a/txs/payload/payload.go b/txs/payload/payload.go
index f415d8f8331e06f85e3dc8d9488fe07a56ffcc74..d55a8cadd48e940fa2fccfa4f1e8fa3230a7736d 100644
--- a/txs/payload/payload.go
+++ b/txs/payload/payload.go
@@ -1,5 +1,7 @@
 package payload
 
+import "fmt"
+
 /*
 Payload (Transaction) is an atomic operation on the ledger state.
 
@@ -13,7 +15,7 @@ Validation Txs:
  - UnbondTx       Validator leaves
 
 Admin Txs:
- - PermissionsTx
+ - PermsTx
 */
 
 type Type uint32
@@ -42,8 +44,8 @@ var nameFromType = map[Type]string{
 	TypeName:        "NameTx",
 	TypeBond:        "BondTx",
 	TypeUnbond:      "UnbondTx",
-	TypePermissions: "PermissionsTx",
-	TypeGovernance:  "GovernanceTx",
+	TypePermissions: "PermsTx",
+	TypeGovernance:  "GovTx",
 }
 
 var typeFromName = make(map[string]Type)
@@ -88,24 +90,50 @@ type Payload interface {
 	String() string
 	GetInputs() []*TxInput
 	Type() Type
+	Any() *Any
 	// The serialised size in bytes
 	Size() int
 }
 
-func New(txType Type) Payload {
+type UnknownTx struct {
+}
+
+func (UnknownTx) String() string {
+	panic("implement me")
+}
+
+func (UnknownTx) GetInputs() []*TxInput {
+	panic("implement me")
+}
+
+func (UnknownTx) Type() Type {
+	panic("implement me")
+}
+
+func (UnknownTx) Any() *Any {
+	panic("implement me")
+}
+
+func (UnknownTx) Size() int {
+	panic("implement me")
+}
+
+func New(txType Type) (Payload, error) {
 	switch txType {
 	case TypeSend:
-		return &SendTx{}
+		return &SendTx{}, nil
 	case TypeCall:
-		return &CallTx{}
+		return &CallTx{}, nil
 	case TypeName:
-		return &NameTx{}
+		return &NameTx{}, nil
 	case TypeBond:
-		return &BondTx{}
+		return &BondTx{}, nil
 	case TypeUnbond:
-		return &UnbondTx{}
+		return &UnbondTx{}, nil
 	case TypePermissions:
-		return &PermissionsTx{}
+		return &PermsTx{}, nil
+	case TypeGovernance:
+		return &GovTx{}, nil
 	}
-	return nil
+	return nil, fmt.Errorf("unknown payload type: %d", txType)
 }
diff --git a/txs/payload/payload.pb.go b/txs/payload/payload.pb.go
index a5e27c742767e7080a284510272574ec29ecf60a..f1e5755ef999a398482a3c933ae67b3e7c7f29a2 100644
--- a/txs/payload/payload.pb.go
+++ b/txs/payload/payload.pb.go
@@ -8,16 +8,16 @@
 		payload.proto
 
 	It has these top-level messages:
-		AnyPayload
+		Any
 		TxInput
 		TxOutput
 		CallTx
 		SendTx
-		PermissionsTx
+		PermsTx
 		NameTx
 		BondTx
 		UnbondTx
-		GovernanceTx
+		GovTx
 */
 package payload
 
@@ -46,56 +46,72 @@ var _ = math.Inf
 // proto package needs to be updated.
 const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 
-type AnyPayload struct {
-	CallTx        *CallTx        `protobuf:"bytes,1,opt,name=CallTx" json:"CallTx,omitempty"`
-	SendTx        *SendTx        `protobuf:"bytes,2,opt,name=SendTx" json:"SendTx,omitempty"`
-	NameTx        *NameTx        `protobuf:"bytes,3,opt,name=NameTx" json:"NameTx,omitempty"`
-	PermissionsTx *PermissionsTx `protobuf:"bytes,4,opt,name=PermissionsTx" json:"PermissionsTx,omitempty"`
-	GovernanceTx  *GovernanceTx  `protobuf:"bytes,5,opt,name=GovernanceTx" json:"GovernanceTx,omitempty"`
+type Any struct {
+	CallTx   *CallTx   `protobuf:"bytes,1,opt,name=CallTx" json:"CallTx,omitempty"`
+	SendTx   *SendTx   `protobuf:"bytes,2,opt,name=SendTx" json:"SendTx,omitempty"`
+	NameTx   *NameTx   `protobuf:"bytes,3,opt,name=NameTx" json:"NameTx,omitempty"`
+	PermsTx  *PermsTx  `protobuf:"bytes,4,opt,name=PermsTx" json:"PermsTx,omitempty"`
+	GovTx    *GovTx    `protobuf:"bytes,5,opt,name=GovTx" json:"GovTx,omitempty"`
+	BondTx   *BondTx   `protobuf:"bytes,6,opt,name=BondTx" json:"BondTx,omitempty"`
+	UnbondTx *UnbondTx `protobuf:"bytes,7,opt,name=UnbondTx" json:"UnbondTx,omitempty"`
 }
 
-func (m *AnyPayload) Reset()                    { *m = AnyPayload{} }
-func (m *AnyPayload) String() string            { return proto.CompactTextString(m) }
-func (*AnyPayload) ProtoMessage()               {}
-func (*AnyPayload) Descriptor() ([]byte, []int) { return fileDescriptorPayload, []int{0} }
+func (m *Any) Reset()                    { *m = Any{} }
+func (m *Any) String() string            { return proto.CompactTextString(m) }
+func (*Any) ProtoMessage()               {}
+func (*Any) Descriptor() ([]byte, []int) { return fileDescriptorPayload, []int{0} }
 
-func (m *AnyPayload) GetCallTx() *CallTx {
+func (m *Any) GetCallTx() *CallTx {
 	if m != nil {
 		return m.CallTx
 	}
 	return nil
 }
 
-func (m *AnyPayload) GetSendTx() *SendTx {
+func (m *Any) GetSendTx() *SendTx {
 	if m != nil {
 		return m.SendTx
 	}
 	return nil
 }
 
-func (m *AnyPayload) GetNameTx() *NameTx {
+func (m *Any) GetNameTx() *NameTx {
 	if m != nil {
 		return m.NameTx
 	}
 	return nil
 }
 
-func (m *AnyPayload) GetPermissionsTx() *PermissionsTx {
+func (m *Any) GetPermsTx() *PermsTx {
 	if m != nil {
-		return m.PermissionsTx
+		return m.PermsTx
 	}
 	return nil
 }
 
-func (m *AnyPayload) GetGovernanceTx() *GovernanceTx {
+func (m *Any) GetGovTx() *GovTx {
 	if m != nil {
-		return m.GovernanceTx
+		return m.GovTx
 	}
 	return nil
 }
 
-func (*AnyPayload) XXX_MessageName() string {
-	return "payload.AnyPayload"
+func (m *Any) GetBondTx() *BondTx {
+	if m != nil {
+		return m.BondTx
+	}
+	return nil
+}
+
+func (m *Any) GetUnbondTx() *UnbondTx {
+	if m != nil {
+		return m.UnbondTx
+	}
+	return nil
+}
+
+func (*Any) XXX_MessageName() string {
+	return "payload.Any"
 }
 
 // An input to a transaction that may carry an Amount as a charge and whose sequence number must be one greater than
@@ -214,33 +230,33 @@ func (*SendTx) XXX_MessageName() string {
 }
 
 // An update to the on-chain permissions
-type PermissionsTx struct {
+type PermsTx struct {
 	// The permission moderator
 	Input *TxInput `protobuf:"bytes,1,opt,name=Input" json:"Input,omitempty"`
 	// The modified permissions
 	PermArgs permission.PermArgs `protobuf:"bytes,2,opt,name=PermArgs" json:"PermArgs"`
 }
 
-func (m *PermissionsTx) Reset()                    { *m = PermissionsTx{} }
-func (*PermissionsTx) ProtoMessage()               {}
-func (*PermissionsTx) Descriptor() ([]byte, []int) { return fileDescriptorPayload, []int{5} }
+func (m *PermsTx) Reset()                    { *m = PermsTx{} }
+func (*PermsTx) ProtoMessage()               {}
+func (*PermsTx) Descriptor() ([]byte, []int) { return fileDescriptorPayload, []int{5} }
 
-func (m *PermissionsTx) GetInput() *TxInput {
+func (m *PermsTx) GetInput() *TxInput {
 	if m != nil {
 		return m.Input
 	}
 	return nil
 }
 
-func (m *PermissionsTx) GetPermArgs() permission.PermArgs {
+func (m *PermsTx) GetPermArgs() permission.PermArgs {
 	if m != nil {
 		return m.PermArgs
 	}
 	return permission.PermArgs{}
 }
 
-func (*PermissionsTx) XXX_MessageName() string {
-	return "payload.PermissionsTx"
+func (*PermsTx) XXX_MessageName() string {
+	return "payload.PermsTx"
 }
 
 // A request to claim a globally unique name across the entire chain with some optional data storage leased for a fee
@@ -318,21 +334,21 @@ func (*UnbondTx) XXX_MessageName() string {
 	return "payload.UnbondTx"
 }
 
-type GovernanceTx struct {
+type GovTx struct {
 	Inputs         []*TxInput              `protobuf:"bytes,1,rep,name=Inputs" json:"Inputs,omitempty"`
 	AccountUpdates []*spec.TemplateAccount `protobuf:"bytes,2,rep,name=AccountUpdates" json:"AccountUpdates,omitempty"`
 }
 
-func (m *GovernanceTx) Reset()                    { *m = GovernanceTx{} }
-func (*GovernanceTx) ProtoMessage()               {}
-func (*GovernanceTx) Descriptor() ([]byte, []int) { return fileDescriptorPayload, []int{9} }
+func (m *GovTx) Reset()                    { *m = GovTx{} }
+func (*GovTx) ProtoMessage()               {}
+func (*GovTx) Descriptor() ([]byte, []int) { return fileDescriptorPayload, []int{9} }
 
-func (*GovernanceTx) XXX_MessageName() string {
-	return "payload.GovernanceTx"
+func (*GovTx) XXX_MessageName() string {
+	return "payload.GovTx"
 }
 func init() {
-	proto.RegisterType((*AnyPayload)(nil), "payload.AnyPayload")
-	golang_proto.RegisterType((*AnyPayload)(nil), "payload.AnyPayload")
+	proto.RegisterType((*Any)(nil), "payload.Any")
+	golang_proto.RegisterType((*Any)(nil), "payload.Any")
 	proto.RegisterType((*TxInput)(nil), "payload.TxInput")
 	golang_proto.RegisterType((*TxInput)(nil), "payload.TxInput")
 	proto.RegisterType((*TxOutput)(nil), "payload.TxOutput")
@@ -341,18 +357,18 @@ func init() {
 	golang_proto.RegisterType((*CallTx)(nil), "payload.CallTx")
 	proto.RegisterType((*SendTx)(nil), "payload.SendTx")
 	golang_proto.RegisterType((*SendTx)(nil), "payload.SendTx")
-	proto.RegisterType((*PermissionsTx)(nil), "payload.PermissionsTx")
-	golang_proto.RegisterType((*PermissionsTx)(nil), "payload.PermissionsTx")
+	proto.RegisterType((*PermsTx)(nil), "payload.PermsTx")
+	golang_proto.RegisterType((*PermsTx)(nil), "payload.PermsTx")
 	proto.RegisterType((*NameTx)(nil), "payload.NameTx")
 	golang_proto.RegisterType((*NameTx)(nil), "payload.NameTx")
 	proto.RegisterType((*BondTx)(nil), "payload.BondTx")
 	golang_proto.RegisterType((*BondTx)(nil), "payload.BondTx")
 	proto.RegisterType((*UnbondTx)(nil), "payload.UnbondTx")
 	golang_proto.RegisterType((*UnbondTx)(nil), "payload.UnbondTx")
-	proto.RegisterType((*GovernanceTx)(nil), "payload.GovernanceTx")
-	golang_proto.RegisterType((*GovernanceTx)(nil), "payload.GovernanceTx")
+	proto.RegisterType((*GovTx)(nil), "payload.GovTx")
+	golang_proto.RegisterType((*GovTx)(nil), "payload.GovTx")
 }
-func (m *AnyPayload) Marshal() (dAtA []byte, err error) {
+func (m *Any) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalTo(dAtA)
@@ -362,7 +378,7 @@ func (m *AnyPayload) Marshal() (dAtA []byte, err error) {
 	return dAtA[:n], nil
 }
 
-func (m *AnyPayload) MarshalTo(dAtA []byte) (int, error) {
+func (m *Any) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	_ = i
 	var l int
@@ -397,26 +413,46 @@ func (m *AnyPayload) MarshalTo(dAtA []byte) (int, error) {
 		}
 		i += n3
 	}
-	if m.PermissionsTx != nil {
+	if m.PermsTx != nil {
 		dAtA[i] = 0x22
 		i++
-		i = encodeVarintPayload(dAtA, i, uint64(m.PermissionsTx.Size()))
-		n4, err := m.PermissionsTx.MarshalTo(dAtA[i:])
+		i = encodeVarintPayload(dAtA, i, uint64(m.PermsTx.Size()))
+		n4, err := m.PermsTx.MarshalTo(dAtA[i:])
 		if err != nil {
 			return 0, err
 		}
 		i += n4
 	}
-	if m.GovernanceTx != nil {
+	if m.GovTx != nil {
 		dAtA[i] = 0x2a
 		i++
-		i = encodeVarintPayload(dAtA, i, uint64(m.GovernanceTx.Size()))
-		n5, err := m.GovernanceTx.MarshalTo(dAtA[i:])
+		i = encodeVarintPayload(dAtA, i, uint64(m.GovTx.Size()))
+		n5, err := m.GovTx.MarshalTo(dAtA[i:])
 		if err != nil {
 			return 0, err
 		}
 		i += n5
 	}
+	if m.BondTx != nil {
+		dAtA[i] = 0x32
+		i++
+		i = encodeVarintPayload(dAtA, i, uint64(m.BondTx.Size()))
+		n6, err := m.BondTx.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n6
+	}
+	if m.UnbondTx != nil {
+		dAtA[i] = 0x3a
+		i++
+		i = encodeVarintPayload(dAtA, i, uint64(m.UnbondTx.Size()))
+		n7, err := m.UnbondTx.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n7
+	}
 	return i, nil
 }
 
@@ -438,11 +474,11 @@ func (m *TxInput) MarshalTo(dAtA []byte) (int, error) {
 	dAtA[i] = 0xa
 	i++
 	i = encodeVarintPayload(dAtA, i, uint64(m.Address.Size()))
-	n6, err := m.Address.MarshalTo(dAtA[i:])
+	n8, err := m.Address.MarshalTo(dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n6
+	i += n8
 	if m.Amount != 0 {
 		dAtA[i] = 0x10
 		i++
@@ -474,11 +510,11 @@ func (m *TxOutput) MarshalTo(dAtA []byte) (int, error) {
 	dAtA[i] = 0xa
 	i++
 	i = encodeVarintPayload(dAtA, i, uint64(m.Address.Size()))
-	n7, err := m.Address.MarshalTo(dAtA[i:])
+	n9, err := m.Address.MarshalTo(dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n7
+	i += n9
 	if m.Amount != 0 {
 		dAtA[i] = 0x10
 		i++
@@ -506,21 +542,21 @@ func (m *CallTx) MarshalTo(dAtA []byte) (int, error) {
 		dAtA[i] = 0xa
 		i++
 		i = encodeVarintPayload(dAtA, i, uint64(m.Input.Size()))
-		n8, err := m.Input.MarshalTo(dAtA[i:])
+		n10, err := m.Input.MarshalTo(dAtA[i:])
 		if err != nil {
 			return 0, err
 		}
-		i += n8
+		i += n10
 	}
 	if m.Address != nil {
 		dAtA[i] = 0x12
 		i++
 		i = encodeVarintPayload(dAtA, i, uint64(m.Address.Size()))
-		n9, err := m.Address.MarshalTo(dAtA[i:])
+		n11, err := m.Address.MarshalTo(dAtA[i:])
 		if err != nil {
 			return 0, err
 		}
-		i += n9
+		i += n11
 	}
 	if m.GasLimit != 0 {
 		dAtA[i] = 0x18
@@ -535,11 +571,11 @@ func (m *CallTx) MarshalTo(dAtA []byte) (int, error) {
 	dAtA[i] = 0x2a
 	i++
 	i = encodeVarintPayload(dAtA, i, uint64(m.Data.Size()))
-	n10, err := m.Data.MarshalTo(dAtA[i:])
+	n12, err := m.Data.MarshalTo(dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n10
+	i += n12
 	return i, nil
 }
 
@@ -585,7 +621,7 @@ func (m *SendTx) MarshalTo(dAtA []byte) (int, error) {
 	return i, nil
 }
 
-func (m *PermissionsTx) Marshal() (dAtA []byte, err error) {
+func (m *PermsTx) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalTo(dAtA)
@@ -595,7 +631,7 @@ func (m *PermissionsTx) Marshal() (dAtA []byte, err error) {
 	return dAtA[:n], nil
 }
 
-func (m *PermissionsTx) MarshalTo(dAtA []byte) (int, error) {
+func (m *PermsTx) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	_ = i
 	var l int
@@ -604,20 +640,20 @@ func (m *PermissionsTx) MarshalTo(dAtA []byte) (int, error) {
 		dAtA[i] = 0xa
 		i++
 		i = encodeVarintPayload(dAtA, i, uint64(m.Input.Size()))
-		n11, err := m.Input.MarshalTo(dAtA[i:])
+		n13, err := m.Input.MarshalTo(dAtA[i:])
 		if err != nil {
 			return 0, err
 		}
-		i += n11
+		i += n13
 	}
 	dAtA[i] = 0x12
 	i++
 	i = encodeVarintPayload(dAtA, i, uint64(m.PermArgs.Size()))
-	n12, err := m.PermArgs.MarshalTo(dAtA[i:])
+	n14, err := m.PermArgs.MarshalTo(dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n12
+	i += n14
 	return i, nil
 }
 
@@ -640,11 +676,11 @@ func (m *NameTx) MarshalTo(dAtA []byte) (int, error) {
 		dAtA[i] = 0xa
 		i++
 		i = encodeVarintPayload(dAtA, i, uint64(m.Input.Size()))
-		n13, err := m.Input.MarshalTo(dAtA[i:])
+		n15, err := m.Input.MarshalTo(dAtA[i:])
 		if err != nil {
 			return 0, err
 		}
-		i += n13
+		i += n15
 	}
 	if len(m.Name) > 0 {
 		dAtA[i] = 0x12
@@ -727,20 +763,20 @@ func (m *UnbondTx) MarshalTo(dAtA []byte) (int, error) {
 		dAtA[i] = 0xa
 		i++
 		i = encodeVarintPayload(dAtA, i, uint64(m.Input.Size()))
-		n14, err := m.Input.MarshalTo(dAtA[i:])
+		n16, err := m.Input.MarshalTo(dAtA[i:])
 		if err != nil {
 			return 0, err
 		}
-		i += n14
+		i += n16
 	}
 	dAtA[i] = 0x12
 	i++
 	i = encodeVarintPayload(dAtA, i, uint64(m.Address.Size()))
-	n15, err := m.Address.MarshalTo(dAtA[i:])
+	n17, err := m.Address.MarshalTo(dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n15
+	i += n17
 	if m.Height != 0 {
 		dAtA[i] = 0x18
 		i++
@@ -749,7 +785,7 @@ func (m *UnbondTx) MarshalTo(dAtA []byte) (int, error) {
 	return i, nil
 }
 
-func (m *GovernanceTx) Marshal() (dAtA []byte, err error) {
+func (m *GovTx) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalTo(dAtA)
@@ -759,7 +795,7 @@ func (m *GovernanceTx) Marshal() (dAtA []byte, err error) {
 	return dAtA[:n], nil
 }
 
-func (m *GovernanceTx) MarshalTo(dAtA []byte) (int, error) {
+func (m *GovTx) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	_ = i
 	var l int
@@ -800,7 +836,7 @@ func encodeVarintPayload(dAtA []byte, offset int, v uint64) int {
 	dAtA[offset] = uint8(v)
 	return offset + 1
 }
-func (m *AnyPayload) Size() (n int) {
+func (m *Any) Size() (n int) {
 	var l int
 	_ = l
 	if m.CallTx != nil {
@@ -815,12 +851,20 @@ func (m *AnyPayload) Size() (n int) {
 		l = m.NameTx.Size()
 		n += 1 + l + sovPayload(uint64(l))
 	}
-	if m.PermissionsTx != nil {
-		l = m.PermissionsTx.Size()
+	if m.PermsTx != nil {
+		l = m.PermsTx.Size()
 		n += 1 + l + sovPayload(uint64(l))
 	}
-	if m.GovernanceTx != nil {
-		l = m.GovernanceTx.Size()
+	if m.GovTx != nil {
+		l = m.GovTx.Size()
+		n += 1 + l + sovPayload(uint64(l))
+	}
+	if m.BondTx != nil {
+		l = m.BondTx.Size()
+		n += 1 + l + sovPayload(uint64(l))
+	}
+	if m.UnbondTx != nil {
+		l = m.UnbondTx.Size()
 		n += 1 + l + sovPayload(uint64(l))
 	}
 	return n
@@ -891,7 +935,7 @@ func (m *SendTx) Size() (n int) {
 	return n
 }
 
-func (m *PermissionsTx) Size() (n int) {
+func (m *PermsTx) Size() (n int) {
 	var l int
 	_ = l
 	if m.Input != nil {
@@ -957,7 +1001,7 @@ func (m *UnbondTx) Size() (n int) {
 	return n
 }
 
-func (m *GovernanceTx) Size() (n int) {
+func (m *GovTx) Size() (n int) {
 	var l int
 	_ = l
 	if len(m.Inputs) > 0 {
@@ -988,7 +1032,7 @@ func sovPayload(x uint64) (n int) {
 func sozPayload(x uint64) (n int) {
 	return sovPayload(uint64((x << 1) ^ uint64((int64(x) >> 63))))
 }
-func (m *AnyPayload) Unmarshal(dAtA []byte) error {
+func (m *Any) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
@@ -1011,10 +1055,10 @@ func (m *AnyPayload) Unmarshal(dAtA []byte) error {
 		fieldNum := int32(wire >> 3)
 		wireType := int(wire & 0x7)
 		if wireType == 4 {
-			return fmt.Errorf("proto: AnyPayload: wiretype end group for non-group")
+			return fmt.Errorf("proto: Any: wiretype end group for non-group")
 		}
 		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AnyPayload: illegal tag %d (wire type %d)", fieldNum, wire)
+			return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire)
 		}
 		switch fieldNum {
 		case 1:
@@ -1118,7 +1162,7 @@ func (m *AnyPayload) Unmarshal(dAtA []byte) error {
 			iNdEx = postIndex
 		case 4:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PermissionsTx", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field PermsTx", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -1142,16 +1186,82 @@ func (m *AnyPayload) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if m.PermissionsTx == nil {
-				m.PermissionsTx = &PermissionsTx{}
+			if m.PermsTx == nil {
+				m.PermsTx = &PermsTx{}
 			}
-			if err := m.PermissionsTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			if err := m.PermsTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
 		case 5:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field GovernanceTx", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field GovTx", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowPayload
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthPayload
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.GovTx == nil {
+				m.GovTx = &GovTx{}
+			}
+			if err := m.GovTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field BondTx", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowPayload
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthPayload
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.BondTx == nil {
+				m.BondTx = &BondTx{}
+			}
+			if err := m.BondTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UnbondTx", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -1175,10 +1285,10 @@ func (m *AnyPayload) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if m.GovernanceTx == nil {
-				m.GovernanceTx = &GovernanceTx{}
+			if m.UnbondTx == nil {
+				m.UnbondTx = &UnbondTx{}
 			}
-			if err := m.GovernanceTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			if err := m.UnbondTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -1715,7 +1825,7 @@ func (m *SendTx) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
-func (m *PermissionsTx) Unmarshal(dAtA []byte) error {
+func (m *PermsTx) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
@@ -1738,10 +1848,10 @@ func (m *PermissionsTx) Unmarshal(dAtA []byte) error {
 		fieldNum := int32(wire >> 3)
 		wireType := int(wire & 0x7)
 		if wireType == 4 {
-			return fmt.Errorf("proto: PermissionsTx: wiretype end group for non-group")
+			return fmt.Errorf("proto: PermsTx: wiretype end group for non-group")
 		}
 		if fieldNum <= 0 {
-			return fmt.Errorf("proto: PermissionsTx: illegal tag %d (wire type %d)", fieldNum, wire)
+			return fmt.Errorf("proto: PermsTx: illegal tag %d (wire type %d)", fieldNum, wire)
 		}
 		switch fieldNum {
 		case 1:
@@ -2232,7 +2342,7 @@ func (m *UnbondTx) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
-func (m *GovernanceTx) Unmarshal(dAtA []byte) error {
+func (m *GovTx) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
@@ -2255,10 +2365,10 @@ func (m *GovernanceTx) Unmarshal(dAtA []byte) error {
 		fieldNum := int32(wire >> 3)
 		wireType := int(wire & 0x7)
 		if wireType == 4 {
-			return fmt.Errorf("proto: GovernanceTx: wiretype end group for non-group")
+			return fmt.Errorf("proto: GovTx: wiretype end group for non-group")
 		}
 		if fieldNum <= 0 {
-			return fmt.Errorf("proto: GovernanceTx: illegal tag %d (wire type %d)", fieldNum, wire)
+			return fmt.Errorf("proto: GovTx: illegal tag %d (wire type %d)", fieldNum, wire)
 		}
 		switch fieldNum {
 		case 1:
@@ -2453,46 +2563,47 @@ func init() { proto.RegisterFile("payload.proto", fileDescriptorPayload) }
 func init() { golang_proto.RegisterFile("payload.proto", fileDescriptorPayload) }
 
 var fileDescriptorPayload = []byte{
-	// 652 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xbf, 0x6f, 0xd3, 0x40,
-	0x14, 0xee, 0x35, 0x6e, 0x62, 0x8e, 0x16, 0xca, 0x89, 0x56, 0x51, 0x86, 0x04, 0x75, 0x80, 0x22,
-	0xa8, 0x83, 0xf8, 0x25, 0x51, 0x21, 0xa1, 0xb8, 0x88, 0xb6, 0x08, 0x85, 0xea, 0xea, 0x2e, 0x6c,
-	0xfe, 0x71, 0x38, 0x16, 0xb1, 0xcf, 0xd8, 0x67, 0xb0, 0x37, 0x36, 0xd8, 0x59, 0x18, 0x3b, 0xf0,
-	0x87, 0x30, 0x66, 0x64, 0x66, 0xa8, 0x50, 0xfa, 0x67, 0xb0, 0xa0, 0x3b, 0x9f, 0x5d, 0x27, 0x40,
-	0x95, 0x82, 0xc4, 0x76, 0xef, 0x7d, 0xdf, 0xbb, 0xf7, 0xde, 0x77, 0xef, 0xd9, 0x70, 0x29, 0x34,
-	0xb3, 0x21, 0x35, 0x1d, 0x2d, 0x8c, 0x28, 0xa3, 0xa8, 0x21, 0xcd, 0xd6, 0x86, 0xeb, 0xb1, 0x41,
-	0x62, 0x69, 0x36, 0xf5, 0xbb, 0x2e, 0x75, 0x69, 0x57, 0xe0, 0x56, 0xf2, 0x52, 0x58, 0xc2, 0x10,
-	0xa7, 0x3c, 0xae, 0xb5, 0x1c, 0x92, 0xc8, 0xf7, 0xe2, 0xd8, 0xa3, 0x81, 0xf4, 0xc0, 0x38, 0x24,
-	0x76, 0x7e, 0x5e, 0x7b, 0x37, 0x0f, 0x61, 0x2f, 0xc8, 0xf6, 0xf2, 0xbb, 0xd1, 0x35, 0x58, 0xdf,
-	0x32, 0x87, 0x43, 0x23, 0x6d, 0x82, 0x2b, 0x60, 0xfd, 0xfc, 0xed, 0x8b, 0x5a, 0x51, 0x44, 0xee,
-	0xc6, 0x12, 0xe6, 0xc4, 0x7d, 0x12, 0x38, 0x46, 0xda, 0x9c, 0x9f, 0x22, 0xe6, 0x6e, 0x2c, 0x61,
-	0x4e, 0xec, 0x9b, 0x3e, 0x31, 0xd2, 0x66, 0x6d, 0x8a, 0x98, 0xbb, 0xb1, 0x84, 0xd1, 0x43, 0xb8,
-	0xb4, 0x57, 0x56, 0x1a, 0x1b, 0x69, 0x53, 0x11, 0xfc, 0xd5, 0x92, 0x3f, 0x81, 0xe2, 0x49, 0x32,
-	0x7a, 0x00, 0x17, 0xb7, 0xe9, 0x1b, 0x12, 0x05, 0x66, 0x60, 0xf3, 0x64, 0x0b, 0x22, 0x78, 0xa5,
-	0x0c, 0xae, 0x82, 0x78, 0x82, 0xba, 0xf6, 0x11, 0xc0, 0x86, 0x91, 0xee, 0x06, 0x61, 0xc2, 0x50,
-	0x1f, 0x36, 0x7a, 0x8e, 0x13, 0x91, 0x38, 0x16, 0x02, 0x2c, 0xea, 0x77, 0x47, 0x47, 0x9d, 0xb9,
-	0x6f, 0x47, 0x9d, 0x9b, 0x15, 0xd1, 0x07, 0x59, 0x48, 0xa2, 0x21, 0x71, 0x5c, 0x12, 0x75, 0xad,
-	0x24, 0x8a, 0xe8, 0xdb, 0xae, 0x1d, 0x65, 0x21, 0xa3, 0x9a, 0x8c, 0xc5, 0xc5, 0x25, 0x68, 0x15,
-	0xd6, 0x7b, 0x3e, 0x4d, 0x02, 0x26, 0x64, 0x52, 0xb0, 0xb4, 0x50, 0x0b, 0xaa, 0xfb, 0xe4, 0x75,
-	0x42, 0x02, 0x9b, 0x08, 0x5d, 0x14, 0x5c, 0xda, 0x9b, 0xca, 0xa7, 0xc3, 0xce, 0xdc, 0x5a, 0x0a,
-	0x55, 0x23, 0x7d, 0x9e, 0xb0, 0xff, 0x58, 0x95, 0xcc, 0xfc, 0x03, 0x14, 0x43, 0x80, 0xae, 0xc2,
-	0x05, 0xa1, 0x8b, 0x9c, 0x86, 0xe5, 0x52, 0x4e, 0xa9, 0x17, 0xce, 0x61, 0xf4, 0xf4, 0xa4, 0xc0,
-	0x79, 0x51, 0xe0, 0xad, 0xbf, 0x2f, 0xae, 0x05, 0xd5, 0x6d, 0x33, 0x7e, 0xe6, 0xf9, 0x1e, 0x2b,
-	0xa4, 0x29, 0x6c, 0xb4, 0x0c, 0x6b, 0x4f, 0x08, 0x11, 0x93, 0xa1, 0x60, 0x7e, 0x44, 0xbb, 0x50,
-	0x79, 0x6c, 0x32, 0x53, 0xbc, 0xf7, 0xa2, 0x7e, 0x4f, 0xea, 0xb2, 0x71, 0x7a, 0x6a, 0xcb, 0x0b,
-	0xcc, 0x28, 0xd3, 0x76, 0x48, 0xaa, 0x67, 0x8c, 0xc4, 0x58, 0x5c, 0x21, 0xbb, 0xf7, 0x8a, 0xc1,
-	0x46, 0xeb, 0xb0, 0x2e, 0xba, 0xe3, 0xa2, 0xd7, 0x7e, 0xdb, 0xbd, 0xc4, 0xd1, 0x0d, 0xd8, 0xc8,
-	0x5f, 0x8a, 0xb7, 0xcf, 0xa9, 0x97, 0x2a, 0xd4, 0x1c, 0xc1, 0x05, 0x63, 0x53, 0xfd, 0x70, 0xd8,
-	0x99, 0x13, 0xa9, 0x92, 0xa9, 0x89, 0x9f, 0x59, 0xee, 0xfb, 0x50, 0xe5, 0x81, 0xbd, 0xc8, 0x8d,
-	0xe5, 0xfa, 0x5d, 0xd6, 0x2a, 0x5b, 0x5e, 0x60, 0xba, 0xc2, 0xe5, 0xc0, 0x25, 0x57, 0x76, 0x18,
-	0x16, 0x1b, 0x39, 0x73, 0x3e, 0x04, 0x15, 0x1e, 0x21, 0x72, 0x9d, 0xc3, 0xe2, 0xcc, 0x7d, 0x42,
-	0xf8, 0x5a, 0xee, 0xe3, 0xe7, 0x5f, 0x9f, 0x47, 0x66, 0x7c, 0x05, 0xeb, 0x3a, 0x3d, 0xa3, 0xa6,
-	0x1b, 0x50, 0x3d, 0x08, 0x2c, 0x1e, 0x45, 0xff, 0x2c, 0x6a, 0x49, 0xa9, 0xa8, 0xfa, 0x19, 0x94,
-	0x91, 0xb3, 0x77, 0xd8, 0x9f, 0x1e, 0xe0, 0x7f, 0xdf, 0xb0, 0x1d, 0xe2, 0xb9, 0x83, 0x62, 0x84,
-	0xa5, 0x55, 0x29, 0xf3, 0x3d, 0x98, 0xfc, 0x62, 0x9d, 0x41, 0x9a, 0x2d, 0x78, 0xa1, 0x67, 0xdb,
-	0x7c, 0x63, 0x0f, 0x42, 0xc7, 0x64, 0xa4, 0x98, 0xba, 0x15, 0x4d, 0x7c, 0xd8, 0x0d, 0xe2, 0x87,
-	0x43, 0x93, 0x11, 0xc9, 0x11, 0x53, 0x00, 0xf0, 0x54, 0xc8, 0x49, 0x25, 0xfa, 0xa3, 0xd1, 0xb8,
-	0x0d, 0xbe, 0x8e, 0xdb, 0xe0, 0xfb, 0xb8, 0x0d, 0xbe, 0x1c, 0xb7, 0xc1, 0xe8, 0xb8, 0x0d, 0x5e,
-	0x5c, 0x3f, 0xbd, 0x71, 0x96, 0xc6, 0x5d, 0x59, 0x9f, 0x55, 0x17, 0xbf, 0x92, 0x3b, 0x3f, 0x03,
-	0x00, 0x00, 0xff, 0xff, 0xac, 0xa5, 0xe3, 0x1c, 0xb1, 0x06, 0x00, 0x00,
+	// 665 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0x3b, 0x6f, 0xd4, 0x40,
+	0x10, 0xce, 0xe6, 0x9c, 0xbb, 0x63, 0x09, 0x21, 0xac, 0x00, 0x9d, 0xae, 0xb8, 0x43, 0x11, 0x82,
+	0xf0, 0x88, 0x0f, 0xf1, 0x2a, 0xd2, 0xa0, 0xbb, 0x20, 0x92, 0x20, 0x14, 0xd0, 0xc6, 0x69, 0xe8,
+	0xfc, 0x58, 0x1c, 0x8b, 0xb3, 0xd7, 0xd8, 0x6b, 0xb0, 0x3b, 0x4a, 0x7a, 0x1a, 0xca, 0x14, 0x54,
+	0xfc, 0x0a, 0xca, 0x94, 0xd4, 0x14, 0x11, 0x4a, 0x7e, 0x06, 0x0d, 0xda, 0xf1, 0xae, 0xef, 0x38,
+	0x20, 0xba, 0x80, 0x44, 0xb7, 0x33, 0xdf, 0xb7, 0x9e, 0x99, 0x6f, 0x66, 0xd6, 0xf8, 0x4c, 0x6c,
+	0x17, 0x43, 0x6e, 0x7b, 0x66, 0x9c, 0x70, 0xc1, 0x49, 0x43, 0x99, 0xed, 0x15, 0x3f, 0x10, 0xbb,
+	0x99, 0x63, 0xba, 0x3c, 0xec, 0xf9, 0xdc, 0xe7, 0x3d, 0xc0, 0x9d, 0xec, 0x05, 0x58, 0x60, 0xc0,
+	0xa9, 0xbc, 0xd7, 0x5e, 0x8c, 0x59, 0x12, 0x06, 0x69, 0x1a, 0xf0, 0x48, 0x79, 0x70, 0x1a, 0x33,
+	0xb7, 0x3c, 0x2f, 0x7d, 0x9a, 0xc5, 0xb5, 0x7e, 0x54, 0x90, 0xab, 0xb8, 0xbe, 0x66, 0x0f, 0x87,
+	0x56, 0xde, 0x42, 0x97, 0xd0, 0xf2, 0xe9, 0xdb, 0x67, 0x4d, 0x1d, 0xbd, 0x74, 0x53, 0x05, 0x4b,
+	0xe2, 0x36, 0x8b, 0x3c, 0x2b, 0x6f, 0xcd, 0x4e, 0x10, 0x4b, 0x37, 0x55, 0xb0, 0x24, 0x6e, 0xd9,
+	0x21, 0xb3, 0xf2, 0x56, 0x6d, 0x82, 0x58, 0xba, 0xa9, 0x82, 0xc9, 0x75, 0xdc, 0x78, 0xc6, 0x92,
+	0x30, 0xb5, 0xf2, 0x96, 0x01, 0xcc, 0xc5, 0x8a, 0xa9, 0xfc, 0x54, 0x13, 0xc8, 0x65, 0x3c, 0xb7,
+	0xce, 0x5f, 0x5b, 0x79, 0x6b, 0x0e, 0x98, 0x0b, 0x15, 0x13, 0xbc, 0xb4, 0x04, 0x65, 0xe8, 0x01,
+	0x87, 0x1c, 0xeb, 0x13, 0xa1, 0x4b, 0x37, 0x55, 0x30, 0x59, 0xc1, 0xcd, 0x9d, 0xc8, 0x29, 0xa9,
+	0x0d, 0xa0, 0x9e, 0xab, 0xa8, 0x1a, 0xa0, 0x15, 0x65, 0xe9, 0x3d, 0xc2, 0x0d, 0x2b, 0xdf, 0x8c,
+	0xe2, 0x4c, 0x90, 0x2d, 0xdc, 0xe8, 0x7b, 0x5e, 0xc2, 0xd2, 0x14, 0x14, 0x9b, 0x1f, 0xdc, 0xdd,
+	0x3f, 0xe8, 0xce, 0x7c, 0x3d, 0xe8, 0xde, 0x1c, 0x6b, 0xcf, 0x6e, 0x11, 0xb3, 0x64, 0xc8, 0x3c,
+	0x9f, 0x25, 0x3d, 0x27, 0x4b, 0x12, 0xfe, 0xa6, 0xe7, 0x26, 0x45, 0x2c, 0xb8, 0xa9, 0xee, 0x52,
+	0xfd, 0x11, 0x72, 0x11, 0xd7, 0xfb, 0x21, 0xcf, 0x22, 0x01, 0xba, 0x1a, 0x54, 0x59, 0xa4, 0x8d,
+	0x9b, 0xdb, 0xec, 0x55, 0xc6, 0x22, 0x97, 0x81, 0x90, 0x06, 0xad, 0xec, 0x55, 0xe3, 0xc3, 0x5e,
+	0x77, 0x66, 0x29, 0xc7, 0x4d, 0x2b, 0x7f, 0x9a, 0x89, 0xff, 0x98, 0x95, 0x8a, 0xfc, 0x1d, 0xe9,
+	0xa9, 0x21, 0x57, 0xf0, 0x1c, 0xe8, 0xa2, 0xc6, 0x67, 0xd4, 0x42, 0xa5, 0x17, 0x2d, 0x61, 0xf2,
+	0x78, 0x94, 0xe0, 0x2c, 0x24, 0x78, 0xeb, 0xef, 0x93, 0x6b, 0xe3, 0xe6, 0xba, 0x9d, 0x3e, 0x09,
+	0xc2, 0x40, 0x68, 0x69, 0xb4, 0x4d, 0x16, 0x71, 0xed, 0x11, 0x63, 0x30, 0x50, 0x06, 0x95, 0x47,
+	0xb2, 0x89, 0x8d, 0x87, 0xb6, 0xb0, 0x61, 0x72, 0xe6, 0x07, 0xf7, 0x94, 0x2e, 0x2b, 0xc7, 0x87,
+	0x76, 0x82, 0xc8, 0x4e, 0x0a, 0x73, 0x83, 0xe5, 0x83, 0x42, 0xb0, 0x94, 0xc2, 0x27, 0x54, 0xf5,
+	0x81, 0xde, 0x04, 0xb2, 0x8c, 0xeb, 0x50, 0x9d, 0x14, 0xbd, 0xf6, 0xdb, 0xea, 0x15, 0x4e, 0x6e,
+	0xe0, 0x46, 0xd9, 0x29, 0x59, 0x7e, 0xed, 0xa7, 0x79, 0xd3, 0x3d, 0xa4, 0x9a, 0xb1, 0xda, 0x7c,
+	0xb7, 0xd7, 0x9d, 0x81, 0x50, 0xbc, 0x5a, 0x91, 0xa9, 0x85, 0xbe, 0x8f, 0x9b, 0xf2, 0x4a, 0x3f,
+	0xf1, 0x53, 0xb5, 0xa9, 0xe7, 0xcd, 0xb1, 0x97, 0x40, 0x63, 0x03, 0x43, 0x0a, 0x41, 0x2b, 0xae,
+	0xaa, 0x2d, 0xd6, 0xcb, 0x3b, 0x75, 0x3c, 0x82, 0x0d, 0x79, 0x03, 0x62, 0x9d, 0xa2, 0x70, 0x96,
+	0x3e, 0x90, 0xbc, 0x56, 0xfa, 0xe4, 0xf9, 0xd7, 0xc6, 0xa8, 0x88, 0x2f, 0xf5, 0xce, 0x9e, 0x40,
+	0xcd, 0xd1, 0xfa, 0xf2, 0x3f, 0xcb, 0x59, 0x51, 0xc6, 0xf4, 0xfc, 0x88, 0x46, 0x8b, 0x3f, 0x75,
+	0x85, 0x5b, 0x93, 0xa3, 0xfb, 0xef, 0xbb, 0xb5, 0xc1, 0x02, 0x7f, 0x57, 0x0f, 0xaf, 0xb2, 0xc6,
+	0xd2, 0x7c, 0x8b, 0xd4, 0x73, 0x77, 0x02, 0x4d, 0xd6, 0xf0, 0x42, 0xdf, 0x75, 0xe5, 0x92, 0xee,
+	0xc4, 0x9e, 0x2d, 0x98, 0x1e, 0xb4, 0x0b, 0x26, 0xbc, 0xfa, 0x16, 0x0b, 0xe3, 0xa1, 0x2d, 0x98,
+	0xe2, 0x40, 0xfb, 0x11, 0x9d, 0xb8, 0x32, 0x4a, 0x61, 0xf0, 0x60, 0xff, 0xb0, 0x83, 0xbe, 0x1c,
+	0x76, 0xd0, 0xb7, 0xc3, 0x0e, 0xfa, 0x7c, 0xd4, 0x41, 0xfb, 0x47, 0x1d, 0xf4, 0xfc, 0xda, 0xf1,
+	0x15, 0x8b, 0x3c, 0xed, 0xa9, 0xfc, 0x9c, 0x3a, 0xfc, 0x67, 0xee, 0xfc, 0x08, 0x00, 0x00, 0xff,
+	0xff, 0xf8, 0x92, 0x62, 0xb5, 0xce, 0x06, 0x00, 0x00,
 }
diff --git a/txs/payload/permission_tx.go b/txs/payload/perms_tx.go
similarity index 53%
rename from txs/payload/permission_tx.go
rename to txs/payload/perms_tx.go
index d22984f93d4b03efac04f0ff469f3a7d0335daa6..5dbff87e7fedd6c0e0fbe354add291fcf3652e64 100644
--- a/txs/payload/permission_tx.go
+++ b/txs/payload/perms_tx.go
@@ -8,7 +8,7 @@ import (
 	"github.com/hyperledger/burrow/permission"
 )
 
-func NewPermissionsTx(st state.AccountGetter, from crypto.PublicKey, args permission.PermArgs) (*PermissionsTx, error) {
+func NewPermsTx(st state.AccountGetter, from crypto.PublicKey, args permission.PermArgs) (*PermsTx, error) {
 	addr := from.Address()
 	acc, err := st.GetAccount(addr)
 	if err != nil {
@@ -19,30 +19,36 @@ func NewPermissionsTx(st state.AccountGetter, from crypto.PublicKey, args permis
 	}
 
 	sequence := acc.Sequence() + 1
-	return NewPermissionsTxWithSequence(from, args, sequence), nil
+	return NewPermsTxWithSequence(from, args, sequence), nil
 }
 
-func NewPermissionsTxWithSequence(from crypto.PublicKey, args permission.PermArgs, sequence uint64) *PermissionsTx {
+func NewPermsTxWithSequence(from crypto.PublicKey, args permission.PermArgs, sequence uint64) *PermsTx {
 	input := &TxInput{
 		Address:  from.Address(),
 		Amount:   1, // NOTE: amounts can't be 0 ...
 		Sequence: sequence,
 	}
 
-	return &PermissionsTx{
+	return &PermsTx{
 		Input:    input,
 		PermArgs: args,
 	}
 }
 
-func (tx *PermissionsTx) Type() Type {
+func (tx *PermsTx) Type() Type {
 	return TypePermissions
 }
 
-func (tx *PermissionsTx) GetInputs() []*TxInput {
+func (tx *PermsTx) GetInputs() []*TxInput {
 	return []*TxInput{tx.Input}
 }
 
-func (tx *PermissionsTx) String() string {
-	return fmt.Sprintf("PermissionsTx{%v -> %v}", tx.Input, tx.PermArgs)
+func (tx *PermsTx) String() string {
+	return fmt.Sprintf("PermsTx{%v -> %v}", tx.Input, tx.PermArgs)
+}
+
+func (tx *PermsTx) Any() *Any {
+	return &Any{
+		PermsTx: tx,
+	}
 }
diff --git a/txs/payload/send_tx.go b/txs/payload/send_tx.go
index f83fcf953956effa402bf5f0cbcdfe8d9f379fc6..c67babef836035a8bfdbe6fb0f6efa2b35d7965f 100644
--- a/txs/payload/send_tx.go
+++ b/txs/payload/send_tx.go
@@ -55,3 +55,9 @@ func (tx *SendTx) AddOutput(addr crypto.Address, amt uint64) error {
 	})
 	return nil
 }
+
+func (tx *SendTx) Any() *Any {
+	return &Any{
+		SendTx: tx,
+	}
+}
diff --git a/txs/payload/unbond_tx.go b/txs/payload/unbond_tx.go
index c6fe036fa7bd07e813d8c1fe3cc3ece83df8ee82..c295cfad46486e06ab322e3b564fca9197023265 100644
--- a/txs/payload/unbond_tx.go
+++ b/txs/payload/unbond_tx.go
@@ -24,3 +24,9 @@ func (tx *UnbondTx) GetInputs() []*TxInput {
 func (tx *UnbondTx) String() string {
 	return fmt.Sprintf("UnbondTx{%v -> %s,%v}", tx.Input, tx.Address, tx.Height)
 }
+
+func (tx *UnbondTx) Any() *Any {
+	return &Any{
+		UnbondTx: tx,
+	}
+}
diff --git a/txs/tx.go b/txs/tx.go
index ee773c3cce87d2feaea4d4f18f29f52dfa10042f..45b15ca85d815ca92bf71fcc5861665589b06b50 100644
--- a/txs/tx.go
+++ b/txs/tx.go
@@ -103,7 +103,7 @@ func (tx *Tx) UnmarshalJSON(data []byte) error {
 	}
 	tx.ChainID = w.ChainID
 	// Now we know the Type we can deserialise the Payload
-	tx.Payload = payload.New(w.Type)
+	tx.Payload, err = payload.New(w.Type)
 	return json.Unmarshal(w.Payload, tx.Payload)
 }
 
diff --git a/txs/tx_test.go b/txs/tx_test.go
index 4716bc28d5a356878a3592bdea62578d5bb48750..b3d0d4a41239efbb06f7a83c7db754fea978eecc 100644
--- a/txs/tx_test.go
+++ b/txs/tx_test.go
@@ -153,7 +153,7 @@ func TestUnbondTxSignable(t *testing.T) {
 }
 
 func TestPermissionsTxSignable(t *testing.T) {
-	permsTx := &payload.PermissionsTx{
+	permsTx := &payload.PermsTx{
 		Input: &payload.TxInput{
 			Address:  makePrivateAccount("input1").Address(),
 			Amount:   12345,
@@ -191,7 +191,7 @@ func TestTxWrapper_MarshalJSON(t *testing.T) {
 func TestNewPermissionsTxWithSequence(t *testing.T) {
 	privateAccount := makePrivateAccount("shhhhh")
 	args := permission.SetBaseArgs(privateAccount.PublicKey().Address(), permission.HasRole, true)
-	permTx := payload.NewPermissionsTxWithSequence(privateAccount.PublicKey(), args, 1)
+	permTx := payload.NewPermsTxWithSequence(privateAccount.PublicKey(), args, 1)
 	testTxMarshalJSON(t, permTx)
 	testTxSignVerify(t, permTx)
 }
diff --git a/util/logging/cmd/main.go b/util/logging/cmd/main.go
index cc6960b6bb389c396bdbc18b9d7662d8a9769165..d0b8acda284f4ddf28472048b5d640521cc59432 100644
--- a/util/logging/cmd/main.go
+++ b/util/logging/cmd/main.go
@@ -17,7 +17,7 @@ package main
 import (
 	"fmt"
 
-	. "github.com/hyperledger/burrow/logging/config"
+	. "github.com/hyperledger/burrow/logging/logconfig"
 )
 
 // Dump an example logging configuration
diff --git a/vendor/github.com/btcsuite/btcd/btcec/pubkey.go b/vendor/github.com/btcsuite/btcd/btcec/pubkey.go
index b74917718f9f713597fb4271d59b19476d956fad..cf49807522573a78b61b1ae0926c893af47d7939 100644
--- a/vendor/github.com/btcsuite/btcd/btcec/pubkey.go
+++ b/vendor/github.com/btcsuite/btcd/btcec/pubkey.go
@@ -32,8 +32,9 @@ func decompressPoint(curve *KoblitzCurve, x *big.Int, ybit bool) (*big.Int, erro
 	x3 := new(big.Int).Mul(x, x)
 	x3.Mul(x3, x)
 	x3.Add(x3, curve.Params().B)
+	x3.Mod(x3, curve.Params().P)
 
-	// now calculate sqrt mod p of x2 + B
+	// Now calculate sqrt mod p of x^3 + B
 	// This code used to do a full sqrt based on tonelli/shanks,
 	// but this was replaced by the algorithms referenced in
 	// https://bitcointalk.org/index.php?topic=162805.msg1712294#msg1712294
@@ -42,9 +43,19 @@ func decompressPoint(curve *KoblitzCurve, x *big.Int, ybit bool) (*big.Int, erro
 	if ybit != isOdd(y) {
 		y.Sub(curve.Params().P, y)
 	}
+
+	// Check that y is a square root of x^3 + B.
+	y2 := new(big.Int).Mul(y, y)
+	y2.Mod(y2, curve.Params().P)
+	if y2.Cmp(x3) != 0 {
+		return nil, fmt.Errorf("invalid square root")
+	}
+
+	// Verify that y-coord has expected parity.
 	if ybit != isOdd(y) {
 		return nil, fmt.Errorf("ybit doesn't match oddness")
 	}
+
 	return y, nil
 }
 
diff --git a/vendor/github.com/go-kit/kit/metrics/discard/discard.go b/vendor/github.com/go-kit/kit/metrics/discard/discard.go
new file mode 100644
index 0000000000000000000000000000000000000000..a0d3b14946b9109046886398c14f0347879a8088
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/metrics/discard/discard.go
@@ -0,0 +1,40 @@
+// Package discard provides a no-op metrics backend.
+package discard
+
+import "github.com/go-kit/kit/metrics"
+
+type counter struct{}
+
+// NewCounter returns a new no-op counter.
+func NewCounter() metrics.Counter { return counter{} }
+
+// With implements Counter.
+func (c counter) With(labelValues ...string) metrics.Counter { return c }
+
+// Add implements Counter.
+func (c counter) Add(delta float64) {}
+
+type gauge struct{}
+
+// NewGauge returns a new no-op gauge.
+func NewGauge() metrics.Gauge { return gauge{} }
+
+// With implements Gauge.
+func (g gauge) With(labelValues ...string) metrics.Gauge { return g }
+
+// Set implements Gauge.
+func (g gauge) Set(value float64) {}
+
+// Add implements metrics.Gauge.
+func (g gauge) Add(delta float64) {}
+
+type histogram struct{}
+
+// NewHistogram returns a new no-op histogram.
+func NewHistogram() metrics.Histogram { return histogram{} }
+
+// With implements Histogram.
+func (h histogram) With(labelValues ...string) metrics.Histogram { return h }
+
+// Observe implements histogram.
+func (h histogram) Observe(value float64) {}
diff --git a/vendor/github.com/go-kit/kit/metrics/doc.go b/vendor/github.com/go-kit/kit/metrics/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..25cda4f7c81253ab208eae10d4f2a12b1fe80fc9
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/metrics/doc.go
@@ -0,0 +1,97 @@
+// Package metrics provides a framework for application instrumentation. It's
+// primarily designed to help you get started with good and robust
+// instrumentation, and to help you migrate from a less-capable system like
+// Graphite to a more-capable system like Prometheus. If your organization has
+// already standardized on an instrumentation system like Prometheus, and has no
+// plans to change, it may make sense to use that system's instrumentation
+// library directly.
+//
+// This package provides three core metric abstractions (Counter, Gauge, and
+// Histogram) and implementations for almost all common instrumentation
+// backends. Each metric has an observation method (Add, Set, or Observe,
+// respectively) used to record values, and a With method to "scope" the
+// observation by various parameters. For example, you might have a Histogram to
+// record request durations, parameterized by the method that's being called.
+//
+//    var requestDuration metrics.Histogram
+//    // ...
+//    requestDuration.With("method", "MyMethod").Observe(time.Since(begin))
+//
+// This allows a single high-level metrics object (requestDuration) to work with
+// many code paths somewhat dynamically. The concept of With is fully supported
+// in some backends like Prometheus, and not supported in other backends like
+// Graphite. So, With may be a no-op, depending on the concrete implementation
+// you choose. Please check the implementation to know for sure. For
+// implementations that don't provide With, it's necessary to fully parameterize
+// each metric in the metric name, e.g.
+//
+//    // Statsd
+//    c := statsd.NewCounter("request_duration_MyMethod_200")
+//    c.Add(1)
+//
+//    // Prometheus
+//    c := prometheus.NewCounter(stdprometheus.CounterOpts{
+//        Name: "request_duration",
+//        ...
+//    }, []string{"method", "status_code"})
+//    c.With("method", "MyMethod", "status_code", strconv.Itoa(code)).Add(1)
+//
+// Usage
+//
+// Metrics are dependencies, and should be passed to the components that need
+// them in the same way you'd construct and pass a database handle, or reference
+// to another component. Metrics should *not* be created in the global scope.
+// Instead, instantiate metrics in your func main, using whichever concrete
+// implementation is appropriate for your organization.
+//
+//    latency := prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{
+//        Namespace: "myteam",
+//        Subsystem: "foosvc",
+//        Name:      "request_latency_seconds",
+//        Help:      "Incoming request latency in seconds.",
+//    }, []string{"method", "status_code"})
+//
+// Write your components to take the metrics they will use as parameters to
+// their constructors. Use the interface types, not the concrete types. That is,
+//
+//    // NewAPI takes metrics.Histogram, not *prometheus.Summary
+//    func NewAPI(s Store, logger log.Logger, latency metrics.Histogram) *API {
+//        // ...
+//    }
+//
+//    func (a *API) ServeFoo(w http.ResponseWriter, r *http.Request) {
+//        begin := time.Now()
+//        // ...
+//        a.latency.Observe(time.Since(begin).Seconds())
+//    }
+//
+// Finally, pass the metrics as dependencies when building your object graph.
+// This should happen in func main, not in the global scope.
+//
+//    api := NewAPI(store, logger, latency)
+//    http.ListenAndServe("/", api)
+//
+// Note that metrics are "write-only" interfaces.
+//
+// Implementation details
+//
+// All metrics are safe for concurrent use. Considerable design influence has
+// been taken from https://github.com/codahale/metrics and
+// https://prometheus.io.
+//
+// Each telemetry system has different semantics for label values, push vs.
+// pull, support for histograms, etc. These properties influence the design of
+// their respective packages. This table attempts to summarize the key points of
+// distinction.
+//
+//    SYSTEM      DIM  COUNTERS               GAUGES                 HISTOGRAMS
+//    dogstatsd   n    batch, push-aggregate  batch, push-aggregate  native, batch, push-each
+//    statsd      1    batch, push-aggregate  batch, push-aggregate  native, batch, push-each
+//    graphite    1    batch, push-aggregate  batch, push-aggregate  synthetic, batch, push-aggregate
+//    expvar      1    atomic                 atomic                 synthetic, batch, in-place expose
+//    influx      n    custom                 custom                 custom
+//    prometheus  n    native                 native                 native
+//    pcp         1    native                 native                 native
+//    cloudwatch  n    batch push-aggregate   batch push-aggregate   synthetic, batch, push-aggregate
+//
+package metrics
diff --git a/vendor/github.com/go-kit/kit/metrics/internal/lv/labelvalues.go b/vendor/github.com/go-kit/kit/metrics/internal/lv/labelvalues.go
new file mode 100644
index 0000000000000000000000000000000000000000..8bb1ba0941487e6eabbcaf046dd4f8ba9f05d721
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/metrics/internal/lv/labelvalues.go
@@ -0,0 +1,14 @@
+package lv
+
+// LabelValues is a type alias that provides validation on its With method.
+// Metrics may include it as a member to help them satisfy With semantics and
+// save some code duplication.
+type LabelValues []string
+
+// With validates the input, and returns a new aggregate labelValues.
+func (lvs LabelValues) With(labelValues ...string) LabelValues {
+	if len(labelValues)%2 != 0 {
+		labelValues = append(labelValues, "unknown")
+	}
+	return append(lvs, labelValues...)
+}
diff --git a/vendor/github.com/go-kit/kit/metrics/internal/lv/space.go b/vendor/github.com/go-kit/kit/metrics/internal/lv/space.go
new file mode 100644
index 0000000000000000000000000000000000000000..672c900749beac1d62280817b7cb4bacb8504e36
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/metrics/internal/lv/space.go
@@ -0,0 +1,145 @@
+package lv
+
+import "sync"
+
+// NewSpace returns an N-dimensional vector space.
+func NewSpace() *Space {
+	return &Space{}
+}
+
+// Space represents an N-dimensional vector space. Each name and unique label
+// value pair establishes a new dimension and point within that dimension. Order
+// matters, i.e. [a=1 b=2] identifies a different timeseries than [b=2 a=1].
+type Space struct {
+	mtx   sync.RWMutex
+	nodes map[string]*node
+}
+
+// Observe locates the time series identified by the name and label values in
+// the vector space, and appends the value to the list of observations.
+func (s *Space) Observe(name string, lvs LabelValues, value float64) {
+	s.nodeFor(name).observe(lvs, value)
+}
+
+// Add locates the time series identified by the name and label values in
+// the vector space, and appends the delta to the last value in the list of
+// observations.
+func (s *Space) Add(name string, lvs LabelValues, delta float64) {
+	s.nodeFor(name).add(lvs, delta)
+}
+
+// Walk traverses the vector space and invokes fn for each non-empty time series
+// which is encountered. Return false to abort the traversal.
+func (s *Space) Walk(fn func(name string, lvs LabelValues, observations []float64) bool) {
+	s.mtx.RLock()
+	defer s.mtx.RUnlock()
+	for name, node := range s.nodes {
+		f := func(lvs LabelValues, observations []float64) bool { return fn(name, lvs, observations) }
+		if !node.walk(LabelValues{}, f) {
+			return
+		}
+	}
+}
+
+// Reset empties the current space and returns a new Space with the old
+// contents. Reset a Space to get an immutable copy suitable for walking.
+func (s *Space) Reset() *Space {
+	s.mtx.Lock()
+	defer s.mtx.Unlock()
+	n := NewSpace()
+	n.nodes, s.nodes = s.nodes, n.nodes
+	return n
+}
+
+func (s *Space) nodeFor(name string) *node {
+	s.mtx.Lock()
+	defer s.mtx.Unlock()
+	if s.nodes == nil {
+		s.nodes = map[string]*node{}
+	}
+	n, ok := s.nodes[name]
+	if !ok {
+		n = &node{}
+		s.nodes[name] = n
+	}
+	return n
+}
+
+// node exists at a specific point in the N-dimensional vector space of all
+// possible label values. The node collects observations and has child nodes
+// with greater specificity.
+type node struct {
+	mtx          sync.RWMutex
+	observations []float64
+	children     map[pair]*node
+}
+
+type pair struct{ label, value string }
+
+func (n *node) observe(lvs LabelValues, value float64) {
+	n.mtx.Lock()
+	defer n.mtx.Unlock()
+	if len(lvs) == 0 {
+		n.observations = append(n.observations, value)
+		return
+	}
+	if len(lvs) < 2 {
+		panic("too few LabelValues; programmer error!")
+	}
+	head, tail := pair{lvs[0], lvs[1]}, lvs[2:]
+	if n.children == nil {
+		n.children = map[pair]*node{}
+	}
+	child, ok := n.children[head]
+	if !ok {
+		child = &node{}
+		n.children[head] = child
+	}
+	child.observe(tail, value)
+}
+
+func (n *node) add(lvs LabelValues, delta float64) {
+	n.mtx.Lock()
+	defer n.mtx.Unlock()
+	if len(lvs) == 0 {
+		var value float64
+		if len(n.observations) > 0 {
+			value = last(n.observations) + delta
+		} else {
+			value = delta
+		}
+		n.observations = append(n.observations, value)
+		return
+	}
+	if len(lvs) < 2 {
+		panic("too few LabelValues; programmer error!")
+	}
+	head, tail := pair{lvs[0], lvs[1]}, lvs[2:]
+	if n.children == nil {
+		n.children = map[pair]*node{}
+	}
+	child, ok := n.children[head]
+	if !ok {
+		child = &node{}
+		n.children[head] = child
+	}
+	child.add(tail, delta)
+}
+
+func (n *node) walk(lvs LabelValues, fn func(LabelValues, []float64) bool) bool {
+	n.mtx.RLock()
+	defer n.mtx.RUnlock()
+	if len(n.observations) > 0 && !fn(lvs, n.observations) {
+		return false
+	}
+	for p, child := range n.children {
+		if !child.walk(append(lvs, p.label, p.value), fn) {
+			return false
+		}
+	}
+	return true
+}
+
+func last(a []float64) float64 {
+	return a[len(a)-1]
+}
diff --git a/vendor/github.com/go-kit/kit/metrics/metrics.go b/vendor/github.com/go-kit/kit/metrics/metrics.go
new file mode 100644
index 0000000000000000000000000000000000000000..a7ba1b1fe3ff060acded5b0c769c2394c96d81dc
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/metrics/metrics.go
@@ -0,0 +1,25 @@
+package metrics
+
+// Counter describes a metric that accumulates values monotonically.
+// An example of a counter is the number of received HTTP requests.
+type Counter interface {
+	With(labelValues ...string) Counter
+	Add(delta float64)
+}
+
+// Gauge describes a metric that takes specific values over time.
+// An example of a gauge is the current depth of a job queue.
+type Gauge interface {
+	With(labelValues ...string) Gauge
+	Set(value float64)
+	Add(delta float64)
+}
+
+// Histogram describes a metric that takes repeated observations of the same
+// kind of thing, and produces a statistical summary of those observations,
+// typically expressed as quantiles or buckets. An example of a histogram is
+// HTTP request latencies.
+type Histogram interface {
+	With(labelValues ...string) Histogram
+	Observe(value float64)
+}
diff --git a/vendor/github.com/go-kit/kit/metrics/prometheus/prometheus.go b/vendor/github.com/go-kit/kit/metrics/prometheus/prometheus.go
new file mode 100644
index 0000000000000000000000000000000000000000..7a364c316affa0eb50550dcf5ecbf7e8ef057017
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/metrics/prometheus/prometheus.go
@@ -0,0 +1,165 @@
+// Package prometheus provides Prometheus implementations for metrics.
+// Individual metrics are mapped to their Prometheus counterparts, and
+// (depending on the constructor used) may be automatically registered in the
+// global Prometheus metrics registry.
+package prometheus
+
+import (
+	"github.com/prometheus/client_golang/prometheus"
+
+	"github.com/go-kit/kit/metrics"
+	"github.com/go-kit/kit/metrics/internal/lv"
+)
+
+// Counter implements Counter, via a Prometheus CounterVec.
+type Counter struct {
+	cv  *prometheus.CounterVec
+	lvs lv.LabelValues
+}
+
+// NewCounterFrom constructs and registers a Prometheus CounterVec,
+// and returns a usable Counter object.
+func NewCounterFrom(opts prometheus.CounterOpts, labelNames []string) *Counter {
+	cv := prometheus.NewCounterVec(opts, labelNames)
+	prometheus.MustRegister(cv)
+	return NewCounter(cv)
+}
+
+// NewCounter wraps the CounterVec and returns a usable Counter object.
+func NewCounter(cv *prometheus.CounterVec) *Counter {
+	return &Counter{
+		cv: cv,
+	}
+}
+
+// With implements Counter.
+func (c *Counter) With(labelValues ...string) metrics.Counter {
+	return &Counter{
+		cv:  c.cv,
+		lvs: c.lvs.With(labelValues...),
+	}
+}
+
+// Add implements Counter.
+func (c *Counter) Add(delta float64) {
+	c.cv.With(makeLabels(c.lvs...)).Add(delta)
+}
+
+// Gauge implements Gauge, via a Prometheus GaugeVec.
+type Gauge struct {
+	gv  *prometheus.GaugeVec
+	lvs lv.LabelValues
+}
+
+// NewGaugeFrom construts and registers a Prometheus GaugeVec,
+// and returns a usable Gauge object.
+func NewGaugeFrom(opts prometheus.GaugeOpts, labelNames []string) *Gauge {
+	gv := prometheus.NewGaugeVec(opts, labelNames)
+	prometheus.MustRegister(gv)
+	return NewGauge(gv)
+}
+
+// NewGauge wraps the GaugeVec and returns a usable Gauge object.
+func NewGauge(gv *prometheus.GaugeVec) *Gauge {
+	return &Gauge{
+		gv: gv,
+	}
+}
+
+// With implements Gauge.
+func (g *Gauge) With(labelValues ...string) metrics.Gauge {
+	return &Gauge{
+		gv:  g.gv,
+		lvs: g.lvs.With(labelValues...),
+	}
+}
+
+// Set implements Gauge.
+func (g *Gauge) Set(value float64) {
+	g.gv.With(makeLabels(g.lvs...)).Set(value)
+}
+
+// Add is supported by Prometheus GaugeVecs.
+func (g *Gauge) Add(delta float64) {
+	g.gv.With(makeLabels(g.lvs...)).Add(delta)
+}
+
+// Summary implements Histogram, via a Prometheus SummaryVec. The difference
+// between a Summary and a Histogram is that Summaries don't require predefined
+// quantile buckets, but cannot be statistically aggregated.
+type Summary struct {
+	sv  *prometheus.SummaryVec
+	lvs lv.LabelValues
+}
+
+// NewSummaryFrom constructs and registers a Prometheus SummaryVec,
+// and returns a usable Summary object.
+func NewSummaryFrom(opts prometheus.SummaryOpts, labelNames []string) *Summary {
+	sv := prometheus.NewSummaryVec(opts, labelNames)
+	prometheus.MustRegister(sv)
+	return NewSummary(sv)
+}
+
+// NewSummary wraps the SummaryVec and returns a usable Summary object.
+func NewSummary(sv *prometheus.SummaryVec) *Summary {
+	return &Summary{
+		sv: sv,
+	}
+}
+
+// With implements Histogram.
+func (s *Summary) With(labelValues ...string) metrics.Histogram {
+	return &Summary{
+		sv:  s.sv,
+		lvs: s.lvs.With(labelValues...),
+	}
+}
+
+// Observe implements Histogram.
+func (s *Summary) Observe(value float64) {
+	s.sv.With(makeLabels(s.lvs...)).Observe(value)
+}
+
+// Histogram implements Histogram via a Prometheus HistogramVec. The difference
+// between a Histogram and a Summary is that Histograms require predefined
+// quantile buckets, and can be statistically aggregated.
+type Histogram struct {
+	hv  *prometheus.HistogramVec
+	lvs lv.LabelValues
+}
+
+// NewHistogramFrom constructs and registers a Prometheus HistogramVec,
+// and returns a usable Histogram object.
+func NewHistogramFrom(opts prometheus.HistogramOpts, labelNames []string) *Histogram {
+	hv := prometheus.NewHistogramVec(opts, labelNames)
+	prometheus.MustRegister(hv)
+	return NewHistogram(hv)
+}
+
+// NewHistogram wraps the HistogramVec and returns a usable Histogram object.
+func NewHistogram(hv *prometheus.HistogramVec) *Histogram {
+	return &Histogram{
+		hv: hv,
+	}
+}
+
+// With implements Histogram.
+func (h *Histogram) With(labelValues ...string) metrics.Histogram {
+	return &Histogram{
+		hv:  h.hv,
+		lvs: h.lvs.With(labelValues...),
+	}
+}
+
+// Observe implements Histogram.
+func (h *Histogram) Observe(value float64) {
+	h.hv.With(makeLabels(h.lvs...)).Observe(value)
+}
+
+func makeLabels(labelValues ...string) prometheus.Labels {
+	labels := prometheus.Labels{}
+	for i := 0; i < len(labelValues); i += 2 {
+		labels[labelValues[i]] = labelValues[i+1]
+	}
+	return labels
+}
diff --git a/vendor/github.com/go-kit/kit/metrics/timer.go b/vendor/github.com/go-kit/kit/metrics/timer.go
new file mode 100644
index 0000000000000000000000000000000000000000..e12d9cd5c49b204511e7b230878f25f32d6b9e77
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/metrics/timer.go
@@ -0,0 +1,36 @@
+package metrics
+
+import "time"
+
+// Timer acts as a stopwatch, sending observations to a wrapped histogram.
+// It's a bit of helpful syntax sugar for h.Observe(time.Since(x)).
+type Timer struct {
+	h Histogram
+	t time.Time
+	u time.Duration
+}
+
+// NewTimer wraps the given histogram and records the current time.
+func NewTimer(h Histogram) *Timer {
+	return &Timer{
+		h: h,
+		t: time.Now(),
+		u: time.Second,
+	}
+}
+
+// ObserveDuration captures the number of seconds since the timer was
+// constructed, and forwards that observation to the histogram.
+func (t *Timer) ObserveDuration() {
+	d := float64(time.Since(t.t).Nanoseconds()) / float64(t.u)
+	if d < 0 {
+		d = 0
+	}
+	t.h.Observe(d)
+}
+
+// Unit sets the unit of the float64 emitted by the timer.
+// By default, the timer emits seconds.
+func (t *Timer) Unit(u time.Duration) {
+	t.u = u
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
index 623d3d83fefc849444c4efcb18c4db7a27f8affa..3c9bae24b9eb58a032a584dabb8c0401eeaccc52 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -29,24 +29,35 @@ type Collector interface {
 	// collected by this Collector to the provided channel and returns once
 	// the last descriptor has been sent. The sent descriptors fulfill the
 	// consistency and uniqueness requirements described in the Desc
-	// documentation. (It is valid if one and the same Collector sends
-	// duplicate descriptors. Those duplicates are simply ignored. However,
-	// two different Collectors must not send duplicate descriptors.) This
-	// method idempotently sends the same descriptors throughout the
-	// lifetime of the Collector. If a Collector encounters an error while
-	// executing this method, it must send an invalid descriptor (created
-	// with NewInvalidDesc) to signal the error to the registry.
+	// documentation.
+	//
+	// It is valid if one and the same Collector sends duplicate
+	// descriptors. Those duplicates are simply ignored. However, two
+	// different Collectors must not send duplicate descriptors.
+	//
+	// Sending no descriptor at all marks the Collector as “unchecked”,
+	// i.e. no checks will be performed at registration time, and the
+	// Collector may yield any Metric it sees fit in its Collect method.
+	//
+	// This method idempotently sends the same descriptors throughout the
+	// lifetime of the Collector.
+	//
+	// If a Collector encounters an error while executing this method, it
+	// must send an invalid descriptor (created with NewInvalidDesc) to
+	// signal the error to the registry.
 	Describe(chan<- *Desc)
 	// Collect is called by the Prometheus registry when collecting
 	// metrics. The implementation sends each collected metric via the
 	// provided channel and returns once the last metric has been sent. The
-	// descriptor of each sent metric is one of those returned by
-	// Describe. Returned metrics that share the same descriptor must differ
-	// in their variable label values. This method may be called
-	// concurrently and must therefore be implemented in a concurrency safe
-	// way. Blocking occurs at the expense of total performance of rendering
-	// all registered metrics. Ideally, Collector implementations support
-	// concurrent readers.
+	// descriptor of each sent metric is one of those returned by Describe
+	// (unless the Collector is unchecked, see above). Returned metrics that
+	// share the same descriptor must differ in their variable label
+	// values.
+	//
+	// This method may be called concurrently and must therefore be
+	// implemented in a concurrency safe way. Blocking occurs at the expense
+	// of total performance of rendering all registered metrics. Ideally,
+	// Collector implementations support concurrent readers.
 	Collect(chan<- Metric)
 }
 
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
index 83c3657d764a88f6c107e0362e37f479b9bdbd93..5d9525defc8dfd35b6bf990c17fe71b04faec6bf 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -121,7 +121,17 @@
 // NewConstSummary (and their respective Must… versions). That will happen in
 // the Collect method. The Describe method has to return separate Desc
 // instances, representative of the “throw-away” metrics to be created later.
-// NewDesc comes in handy to create those Desc instances.
+// NewDesc comes in handy to create those Desc instances. Alternatively, you
+// could return no Desc at all, which will marke the Collector “unchecked”.  No
+// checks are porformed at registration time, but metric consistency will still
+// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
+// errors. Thus, with unchecked Collectors, the responsibility to not collect
+// metrics that lead to inconsistencies in the total scrape result lies with the
+// implementer of the Collector. While this is not a desirable state, it is
+// sometimes necessary. The typical use case is a situatios where the exact
+// metrics to be returned by a Collector cannot be predicted at registration
+// time, but the implementer has sufficient knowledge of the whole system to
+// guarantee metric consistency.
 //
 // The Collector example illustrates the use case. You can also look at the
 // source code of the processCollector (mirroring process metrics), the
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
new file mode 100644
index 0000000000000000000000000000000000000000..9c1c66dcc7f2c374d937cb2054ade3717db4028e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
@@ -0,0 +1,199 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+	"bufio"
+	"io"
+	"net"
+	"net/http"
+)
+
+const (
+	closeNotifier = 1 << iota
+	flusher
+	hijacker
+	readerFrom
+	pusher
+)
+
+type delegator interface {
+	http.ResponseWriter
+
+	Status() int
+	Written() int64
+}
+
+type responseWriterDelegator struct {
+	http.ResponseWriter
+
+	handler, method    string
+	status             int
+	written            int64
+	wroteHeader        bool
+	observeWriteHeader func(int)
+}
+
+func (r *responseWriterDelegator) Status() int {
+	return r.status
+}
+
+func (r *responseWriterDelegator) Written() int64 {
+	return r.written
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+	r.status = code
+	r.wroteHeader = true
+	r.ResponseWriter.WriteHeader(code)
+	if r.observeWriteHeader != nil {
+		r.observeWriteHeader(code)
+	}
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+	if !r.wroteHeader {
+		r.WriteHeader(http.StatusOK)
+	}
+	n, err := r.ResponseWriter.Write(b)
+	r.written += int64(n)
+	return n, err
+}
+
+type closeNotifierDelegator struct{ *responseWriterDelegator }
+type flusherDelegator struct{ *responseWriterDelegator }
+type hijackerDelegator struct{ *responseWriterDelegator }
+type readerFromDelegator struct{ *responseWriterDelegator }
+
+func (d *closeNotifierDelegator) CloseNotify() <-chan bool {
+	return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+func (d *flusherDelegator) Flush() {
+	d.ResponseWriter.(http.Flusher).Flush()
+}
+func (d *hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+	return d.ResponseWriter.(http.Hijacker).Hijack()
+}
+func (d *readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
+	if !d.wroteHeader {
+		d.WriteHeader(http.StatusOK)
+	}
+	n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
+	d.written += n
+	return n, err
+}
+
+var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
+
+func init() {
+	// TODO(beorn7): Code generation would help here.
+	pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
+		return d
+	}
+	pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
+		return &closeNotifierDelegator{d}
+	}
+	pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
+		return &flusherDelegator{d}
+	}
+	pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
+		return struct {
+			*responseWriterDelegator
+			http.Flusher
+			http.CloseNotifier
+		}{d, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
+		return &hijackerDelegator{d}
+	}
+	pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
+		return struct {
+			*responseWriterDelegator
+			http.Hijacker
+			http.CloseNotifier
+		}{d, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
+		return struct {
+			*responseWriterDelegator
+			http.Hijacker
+			http.Flusher
+		}{d, &hijackerDelegator{d}, &flusherDelegator{d}}
+	}
+	pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
+		return struct {
+			*responseWriterDelegator
+			http.Hijacker
+			http.Flusher
+			http.CloseNotifier
+		}{d, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
+		return readerFromDelegator{d}
+	}
+	pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.CloseNotifier
+		}{d, &readerFromDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Flusher
+		}{d, &readerFromDelegator{d}, &flusherDelegator{d}}
+	}
+	pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Flusher
+			http.CloseNotifier
+		}{d, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Hijacker
+		}{d, &readerFromDelegator{d}, &hijackerDelegator{d}}
+	}
+	pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Hijacker
+			http.CloseNotifier
+		}{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Hijacker
+			http.Flusher
+		}{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
+	}
+	pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Hijacker
+			http.Flusher
+			http.CloseNotifier
+		}{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
new file mode 100644
index 0000000000000000000000000000000000000000..75a905e2f4cfc326830867ef4850a78b8f928d70
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
@@ -0,0 +1,181 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package promhttp
+
+import (
+	"io"
+	"net/http"
+)
+
+type pusherDelegator struct{ *responseWriterDelegator }
+
+func (d *pusherDelegator) Push(target string, opts *http.PushOptions) error {
+	return d.ResponseWriter.(http.Pusher).Push(target, opts)
+}
+
+func init() {
+	pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
+		return &pusherDelegator{d}
+	}
+	pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.CloseNotifier
+		}{d, &pusherDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Flusher
+		}{d, &pusherDelegator{d}, &flusherDelegator{d}}
+	}
+	pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Flusher
+			http.CloseNotifier
+		}{d, &pusherDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Hijacker
+		}{d, &pusherDelegator{d}, &hijackerDelegator{d}}
+	}
+	pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Hijacker
+			http.CloseNotifier
+		}{d, &pusherDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Hijacker
+			http.Flusher
+		}{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
+	}
+	pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Hijacker
+			http.Flusher
+			http.CloseNotifier
+		}{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+		}{d, &pusherDelegator{d}, &readerFromDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.CloseNotifier
+		}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Flusher
+		}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Flusher
+			http.CloseNotifier
+		}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Hijacker
+		}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Hijacker
+			http.CloseNotifier
+		}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Hijacker
+			http.Flusher
+		}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Hijacker
+			http.Flusher
+			http.CloseNotifier
+		}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+	}
+}
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+	d := &responseWriterDelegator{
+		ResponseWriter:     w,
+		observeWriteHeader: observeWriteHeaderFunc,
+	}
+
+	id := 0
+	if _, ok := w.(http.CloseNotifier); ok {
+		id += closeNotifier
+	}
+	if _, ok := w.(http.Flusher); ok {
+		id += flusher
+	}
+	if _, ok := w.(http.Hijacker); ok {
+		id += hijacker
+	}
+	if _, ok := w.(io.ReaderFrom); ok {
+		id += readerFrom
+	}
+	if _, ok := w.(http.Pusher); ok {
+		id += pusher
+	}
+
+	return pickDelegator[id](d)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
new file mode 100644
index 0000000000000000000000000000000000000000..8bb9b8b68f8b4a721d75b2d6e3e4f22af9088489
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
@@ -0,0 +1,44 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.8
+
+package promhttp
+
+import (
+	"io"
+	"net/http"
+)
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+	d := &responseWriterDelegator{
+		ResponseWriter:     w,
+		observeWriteHeader: observeWriteHeaderFunc,
+	}
+
+	id := 0
+	if _, ok := w.(http.CloseNotifier); ok {
+		id += closeNotifier
+	}
+	if _, ok := w.(http.Flusher); ok {
+		id += flusher
+	}
+	if _, ok := w.(http.Hijacker); ok {
+		id += hijacker
+	}
+	if _, ok := w.(io.ReaderFrom); ok {
+		id += readerFrom
+	}
+
+	return pickDelegator[id](d)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
new file mode 100644
index 0000000000000000000000000000000000000000..01357374feb1699d6d3c43d5d880fa9cfc8a42ee
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -0,0 +1,311 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package promhttp provides tooling around HTTP servers and clients.
+//
+// First, the package allows the creation of http.Handler instances to expose
+// Prometheus metrics via HTTP. promhttp.Handler acts on the
+// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
+// custom registry or anything that implements the Gatherer interface. It also
+// allows the creation of handlers that act differently on errors or allow to
+// log errors.
+//
+// Second, the package provides tooling to instrument instances of http.Handler
+// via middleware. Middleware wrappers follow the naming scheme
+// InstrumentHandlerX, where X describes the intended use of the middleware.
+// See each function's doc comment for specific details.
+//
+// Finally, the package allows for an http.RoundTripper to be instrumented via
+// middleware. Middleware wrappers follow the naming scheme
+// InstrumentRoundTripperX, where X describes the intended use of the
+// middleware. See each function's doc comment for specific details.
+package promhttp
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"io"
+	"net/http"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/prometheus/common/expfmt"
+
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+	contentTypeHeader     = "Content-Type"
+	contentLengthHeader   = "Content-Length"
+	contentEncodingHeader = "Content-Encoding"
+	acceptEncodingHeader  = "Accept-Encoding"
+)
+
+var bufPool sync.Pool
+
+func getBuf() *bytes.Buffer {
+	buf := bufPool.Get()
+	if buf == nil {
+		return &bytes.Buffer{}
+	}
+	return buf.(*bytes.Buffer)
+}
+
+func giveBuf(buf *bytes.Buffer) {
+	buf.Reset()
+	bufPool.Put(buf)
+}
+
+// Handler returns an http.Handler for the prometheus.DefaultGatherer, using
+// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
+// no error logging, and it applies compression if requested by the client.
+//
+// The returned http.Handler is already instrumented using the
+// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
+// create multiple http.Handlers by separate calls of the Handler function, the
+// metrics used for instrumentation will be shared between them, providing
+// global scrape counts.
+//
+// This function is meant to cover the bulk of basic use cases. If you are doing
+// anything that requires more customization (including using a non-default
+// Gatherer, different instrumentation, and non-default HandlerOpts), use the
+// HandlerFor function. See there for details.
+func Handler() http.Handler {
+	return InstrumentMetricHandler(
+		prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
+	)
+}
+
+// HandlerFor returns an uninstrumented http.Handler for the provided
+// Gatherer. The behavior of the Handler is defined by the provided
+// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
+// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
+// instrumentation. Use the InstrumentMetricHandler function to apply the same
+// kind of instrumentation as it is used by the Handler function.
+func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
+	var inFlightSem chan struct{}
+	if opts.MaxRequestsInFlight > 0 {
+		inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
+	}
+
+	h := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+		if inFlightSem != nil {
+			select {
+			case inFlightSem <- struct{}{}: // All good, carry on.
+				defer func() { <-inFlightSem }()
+			default:
+				http.Error(w, fmt.Sprintf(
+					"Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
+				), http.StatusServiceUnavailable)
+				return
+			}
+		}
+
+		mfs, err := reg.Gather()
+		if err != nil {
+			if opts.ErrorLog != nil {
+				opts.ErrorLog.Println("error gathering metrics:", err)
+			}
+			switch opts.ErrorHandling {
+			case PanicOnError:
+				panic(err)
+			case ContinueOnError:
+				if len(mfs) == 0 {
+					http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+					return
+				}
+			case HTTPErrorOnError:
+				http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError)
+				return
+			}
+		}
+
+		contentType := expfmt.Negotiate(req.Header)
+		buf := getBuf()
+		defer giveBuf(buf)
+		writer, encoding := decorateWriter(req, buf, opts.DisableCompression)
+		enc := expfmt.NewEncoder(writer, contentType)
+		var lastErr error
+		for _, mf := range mfs {
+			if err := enc.Encode(mf); err != nil {
+				lastErr = err
+				if opts.ErrorLog != nil {
+					opts.ErrorLog.Println("error encoding metric family:", err)
+				}
+				switch opts.ErrorHandling {
+				case PanicOnError:
+					panic(err)
+				case ContinueOnError:
+					// Handled later.
+				case HTTPErrorOnError:
+					http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
+					return
+				}
+			}
+		}
+		if closer, ok := writer.(io.Closer); ok {
+			closer.Close()
+		}
+		if lastErr != nil && buf.Len() == 0 {
+			http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError)
+			return
+		}
+		header := w.Header()
+		header.Set(contentTypeHeader, string(contentType))
+		header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+		if encoding != "" {
+			header.Set(contentEncodingHeader, encoding)
+		}
+		if _, err := w.Write(buf.Bytes()); err != nil && opts.ErrorLog != nil {
+			opts.ErrorLog.Println("error while sending encoded metrics:", err)
+		}
+		// TODO(beorn7): Consider streaming serving of metrics.
+	})
+
+	if opts.Timeout <= 0 {
+		return h
+	}
+	return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
+		"Exceeded configured timeout of %v.\n",
+		opts.Timeout,
+	))
+}
+
+// InstrumentMetricHandler is usually used with an http.Handler returned by the
+// HandlerFor function. It instruments the provided http.Handler with two
+// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
+// scrapes partitioned by HTTP status code, and a gauge
+// "promhttp_metric_handler_requests_in_flight" to track the number of
+// simultaneous scrapes. This function idempotently registers collectors for
+// both metrics with the provided Registerer. It panics if the registration
+// fails. The provided metrics are useful to see how many scrapes hit the
+// monitored target (which could be from different Prometheus servers or other
+// scrapers), and how often they overlap (which would result in more than one
+// scrape in flight at the same time). Note that the scrapes-in-flight gauge
+// will contain the scrape by which it is exposed, while the scrape counter will
+// only get incremented after the scrape is complete (as only then the status
+// code is known). For tracking scrape durations, use the
+// "scrape_duration_seconds" gauge created by the Prometheus server upon each
+// scrape.
+func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
+	cnt := prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Name: "promhttp_metric_handler_requests_total",
+			Help: "Total number of scrapes by HTTP status code.",
+		},
+		[]string{"code"},
+	)
+	// Initialize the most likely HTTP status codes.
+	cnt.WithLabelValues("200")
+	cnt.WithLabelValues("500")
+	cnt.WithLabelValues("503")
+	if err := reg.Register(cnt); err != nil {
+		if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+			cnt = are.ExistingCollector.(*prometheus.CounterVec)
+		} else {
+			panic(err)
+		}
+	}
+
+	gge := prometheus.NewGauge(prometheus.GaugeOpts{
+		Name: "promhttp_metric_handler_requests_in_flight",
+		Help: "Current number of scrapes being served.",
+	})
+	if err := reg.Register(gge); err != nil {
+		if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+			gge = are.ExistingCollector.(prometheus.Gauge)
+		} else {
+			panic(err)
+		}
+	}
+
+	return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
+}
+
+// HandlerErrorHandling defines how a Handler serving metrics will handle
+// errors.
+type HandlerErrorHandling int
+
+// These constants cause handlers serving metrics to behave as described if
+// errors are encountered.
+const (
+	// Serve an HTTP status code 500 upon the first error
+	// encountered. Report the error message in the body.
+	HTTPErrorOnError HandlerErrorHandling = iota
+	// Ignore errors and try to serve as many metrics as possible.  However,
+	// if no metrics can be served, serve an HTTP status code 500 and the
+	// last error message in the body. Only use this in deliberate "best
+	// effort" metrics collection scenarios. It is recommended to at least
+	// log errors (by providing an ErrorLog in HandlerOpts) to not mask
+	// errors completely.
+	ContinueOnError
+	// Panic upon the first error encountered (useful for "crash only" apps).
+	PanicOnError
+)
+
+// Logger is the minimal interface HandlerOpts needs for logging. Note that
+// log.Logger from the standard library implements this interface, and it is
+// easy to implement by custom loggers, if they don't do so already anyway.
+type Logger interface {
+	Println(v ...interface{})
+}
+
+// HandlerOpts specifies options how to serve metrics via an http.Handler. The
+// zero value of HandlerOpts is a reasonable default.
+type HandlerOpts struct {
+	// ErrorLog specifies an optional logger for errors collecting and
+	// serving metrics. If nil, errors are not logged at all.
+	ErrorLog Logger
+	// ErrorHandling defines how errors are handled. Note that errors are
+	// logged regardless of the configured ErrorHandling provided ErrorLog
+	// is not nil.
+	ErrorHandling HandlerErrorHandling
+	// If DisableCompression is true, the handler will never compress the
+	// response, even if requested by the client.
+	DisableCompression bool
+	// The number of concurrent HTTP requests is limited to
+	// MaxRequestsInFlight. Additional requests are responded to with 503
+	// Service Unavailable and a suitable message in the body. If
+	// MaxRequestsInFlight is 0 or negative, no limit is applied.
+	MaxRequestsInFlight int
+	// If handling a request takes longer than Timeout, it is responded to
+	// with 503 ServiceUnavailable and a suitable Message. No timeout is
+	// applied if Timeout is 0 or negative. Note that with the current
+	// implementation, reaching the timeout simply ends the HTTP requests as
+	// described above (and even that only if sending of the body hasn't
+	// started yet), while the bulk work of gathering all the metrics keeps
+	// running in the background (with the eventual result to be thrown
+	// away). Until the implementation is improved, it is recommended to
+	// implement a separate timeout in potentially slow Collectors.
+	Timeout time.Duration
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested.  It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) {
+	if compressionDisabled {
+		return writer, ""
+	}
+	header := request.Header.Get(acceptEncodingHeader)
+	parts := strings.Split(header, ",")
+	for _, part := range parts {
+		part = strings.TrimSpace(part)
+		if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+			return gzip.NewWriter(writer), "gzip"
+		}
+	}
+	return writer, ""
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
new file mode 100644
index 0000000000000000000000000000000000000000..86fd564470f8161f9696beab0bb51d00c69d1c1b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
@@ -0,0 +1,97 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+	"net/http"
+	"time"
+
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+// The RoundTripperFunc type is an adapter to allow the use of ordinary
+// functions as RoundTrippers. If f is a function with the appropriate
+// signature, RountTripperFunc(f) is a RoundTripper that calls f.
+type RoundTripperFunc func(req *http.Request) (*http.Response, error)
+
+// RoundTrip implements the RoundTripper interface.
+func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
+	return rt(r)
+}
+
+// InstrumentRoundTripperInFlight is a middleware that wraps the provided
+// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.RoundTripper.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
+	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+		gauge.Inc()
+		defer gauge.Dec()
+		return next.RoundTrip(r)
+	})
+}
+
+// InstrumentRoundTripperCounter is a middleware that wraps the provided
+// http.RoundTripper to observe the request result with the provided CounterVec.
+// The CounterVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. Partitioning of the CounterVec happens by HTTP status code
+// and/or HTTP method if the respective instance label names are present in the
+// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
+// is not incremented.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
+	code, method := checkLabels(counter)
+
+	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+		resp, err := next.RoundTrip(r)
+		if err == nil {
+			counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
+		}
+		return resp, err
+	})
+}
+
+// InstrumentRoundTripperDuration is a middleware that wraps the provided
+// http.RoundTripper to observe the request duration with the provided
+// ObserverVec.  The ObserverVec must have zero, one, or two non-const
+// non-curried labels. For those, the only allowed label names are "code" and
+// "method". The function panics otherwise. The Observe method of the Observer
+// in the ObserverVec is called with the request duration in
+// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
+// respective instance label names are present in the ObserverVec. For
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
+// partitioning of Histograms is expensive and should be used judiciously.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, no values are
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
+	code, method := checkLabels(obs)
+
+	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+		start := time.Now()
+		resp, err := next.RoundTrip(r)
+		if err == nil {
+			obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
+		}
+		return resp, err
+	})
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
new file mode 100644
index 0000000000000000000000000000000000000000..a034d1ec0f189a54e6269a24d04c39cfc7ce63d2
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
@@ -0,0 +1,144 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package promhttp
+
+import (
+	"context"
+	"crypto/tls"
+	"net/http"
+	"net/http/httptrace"
+	"time"
+)
+
+// InstrumentTrace is used to offer flexibility in instrumenting the available
+// httptrace.ClientTrace hook functions. Each function is passed a float64
+// representing the time in seconds since the start of the http request. A user
+// may choose to use separately buckets Histograms, or implement custom
+// instance labels on a per function basis.
+type InstrumentTrace struct {
+	GotConn              func(float64)
+	PutIdleConn          func(float64)
+	GotFirstResponseByte func(float64)
+	Got100Continue       func(float64)
+	DNSStart             func(float64)
+	DNSDone              func(float64)
+	ConnectStart         func(float64)
+	ConnectDone          func(float64)
+	TLSHandshakeStart    func(float64)
+	TLSHandshakeDone     func(float64)
+	WroteHeaders         func(float64)
+	Wait100Continue      func(float64)
+	WroteRequest         func(float64)
+}
+
+// InstrumentRoundTripperTrace is a middleware that wraps the provided
+// RoundTripper and reports times to hook functions provided in the
+// InstrumentTrace struct. Hook functions that are not present in the provided
+// InstrumentTrace struct are ignored. Times reported to the hook functions are
+// time since the start of the request. Only with Go1.9+, those times are
+// guaranteed to never be negative. (Earlier Go versions are not using a
+// monotonic clock.) Note that partitioning of Histograms is expensive and
+// should be used judiciously.
+//
+// For hook functions that receive an error as an argument, no observations are
+// made in the event of a non-nil error value.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
+	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+		start := time.Now()
+
+		trace := &httptrace.ClientTrace{
+			GotConn: func(_ httptrace.GotConnInfo) {
+				if it.GotConn != nil {
+					it.GotConn(time.Since(start).Seconds())
+				}
+			},
+			PutIdleConn: func(err error) {
+				if err != nil {
+					return
+				}
+				if it.PutIdleConn != nil {
+					it.PutIdleConn(time.Since(start).Seconds())
+				}
+			},
+			DNSStart: func(_ httptrace.DNSStartInfo) {
+				if it.DNSStart != nil {
+					it.DNSStart(time.Since(start).Seconds())
+				}
+			},
+			DNSDone: func(_ httptrace.DNSDoneInfo) {
+				if it.DNSDone != nil {
+					it.DNSDone(time.Since(start).Seconds())
+				}
+			},
+			ConnectStart: func(_, _ string) {
+				if it.ConnectStart != nil {
+					it.ConnectStart(time.Since(start).Seconds())
+				}
+			},
+			ConnectDone: func(_, _ string, err error) {
+				if err != nil {
+					return
+				}
+				if it.ConnectDone != nil {
+					it.ConnectDone(time.Since(start).Seconds())
+				}
+			},
+			GotFirstResponseByte: func() {
+				if it.GotFirstResponseByte != nil {
+					it.GotFirstResponseByte(time.Since(start).Seconds())
+				}
+			},
+			Got100Continue: func() {
+				if it.Got100Continue != nil {
+					it.Got100Continue(time.Since(start).Seconds())
+				}
+			},
+			TLSHandshakeStart: func() {
+				if it.TLSHandshakeStart != nil {
+					it.TLSHandshakeStart(time.Since(start).Seconds())
+				}
+			},
+			TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
+				if err != nil {
+					return
+				}
+				if it.TLSHandshakeDone != nil {
+					it.TLSHandshakeDone(time.Since(start).Seconds())
+				}
+			},
+			WroteHeaders: func() {
+				if it.WroteHeaders != nil {
+					it.WroteHeaders(time.Since(start).Seconds())
+				}
+			},
+			Wait100Continue: func() {
+				if it.Wait100Continue != nil {
+					it.Wait100Continue(time.Since(start).Seconds())
+				}
+			},
+			WroteRequest: func(_ httptrace.WroteRequestInfo) {
+				if it.WroteRequest != nil {
+					it.WroteRequest(time.Since(start).Seconds())
+				}
+			},
+		}
+		r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace))
+
+		return next.RoundTrip(r)
+	})
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
new file mode 100644
index 0000000000000000000000000000000000000000..9db24380533ad27f27d4f191a8e644f5ec4e1cde
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -0,0 +1,447 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+	"errors"
+	"net/http"
+	"strconv"
+	"strings"
+	"time"
+
+	dto "github.com/prometheus/client_model/go"
+
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+// magicString is used for the hacky label test in checkLabels. Remove once fixed.
+const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
+
+// InstrumentHandlerInFlight is a middleware that wraps the provided
+// http.Handler. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.Handler.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		g.Inc()
+		defer g.Dec()
+		next.ServeHTTP(w, r)
+	})
+}
+
+// InstrumentHandlerDuration is a middleware that wraps the provided
+// http.Handler to observe the request duration with the provided ObserverVec.
+// The ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the request duration in seconds. Partitioning happens by HTTP
+// status code and/or HTTP method if the respective instance label names are
+// present in the ObserverVec. For unpartitioned observations, use an
+// ObserverVec with zero labels. Note that partitioning of Histograms is
+// expensive and should be used judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+	code, method := checkLabels(obs)
+
+	if code {
+		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			now := time.Now()
+			d := newDelegator(w, nil)
+			next.ServeHTTP(d, r)
+
+			obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
+		})
+	}
+
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		now := time.Now()
+		next.ServeHTTP(w, r)
+		obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
+	})
+}
+
+// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
+// to observe the request result with the provided CounterVec.  The CounterVec
+// must have zero, one, or two non-const non-curried labels. For those, the only
+// allowed label names are "code" and "method". The function panics
+// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or
+// HTTP method if the respective instance label names are present in the
+// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, the Counter is not incremented.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
+	code, method := checkLabels(counter)
+
+	if code {
+		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			d := newDelegator(w, nil)
+			next.ServeHTTP(d, r)
+			counter.With(labels(code, method, r.Method, d.Status())).Inc()
+		})
+	}
+
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		next.ServeHTTP(w, r)
+		counter.With(labels(code, method, r.Method, 0)).Inc()
+	})
+}
+
+// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
+// http.Handler to observe with the provided ObserverVec the request duration
+// until the response headers are written. The ObserverVec must have zero, one,
+// or two non-const non-curried labels. For those, the only allowed label names
+// are "code" and "method". The function panics otherwise. The Observe method of
+// the Observer in the ObserverVec is called with the request duration in
+// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
+// respective instance label names are present in the ObserverVec. For
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
+// partitioning of Histograms is expensive and should be used judiciously.
+//
+// If the wrapped Handler panics before calling WriteHeader, no value is
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+	code, method := checkLabels(obs)
+
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		now := time.Now()
+		d := newDelegator(w, func(status int) {
+			obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
+		})
+		next.ServeHTTP(d, r)
+	})
+}
+
+// InstrumentHandlerRequestSize is a middleware that wraps the provided
+// http.Handler to observe the request size with the provided ObserverVec.  The
+// ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the request size in bytes. Partitioning happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present in
+// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
+// labels. Note that partitioning of Histograms is expensive and should be used
+// judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+	code, method := checkLabels(obs)
+
+	if code {
+		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			d := newDelegator(w, nil)
+			next.ServeHTTP(d, r)
+			size := computeApproximateRequestSize(r)
+			obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
+		})
+	}
+
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		next.ServeHTTP(w, r)
+		size := computeApproximateRequestSize(r)
+		obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
+	})
+}
+
+// InstrumentHandlerResponseSize is a middleware that wraps the provided
+// http.Handler to observe the response size with the provided ObserverVec.  The
+// ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the response size in bytes. Partitioning happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present in
+// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
+// labels. Note that partitioning of Histograms is expensive and should be used
+// judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
+	code, method := checkLabels(obs)
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		d := newDelegator(w, nil)
+		next.ServeHTTP(d, r)
+		obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
+	})
+}
+
+func checkLabels(c prometheus.Collector) (code bool, method bool) {
+	// TODO(beorn7): Remove this hacky way to check for instance labels
+	// once Descriptors can have their dimensionality queried.
+	var (
+		desc *prometheus.Desc
+		m    prometheus.Metric
+		pm   dto.Metric
+		lvs  []string
+	)
+
+	// Get the Desc from the Collector.
+	descc := make(chan *prometheus.Desc, 1)
+	c.Describe(descc)
+
+	select {
+	case desc = <-descc:
+	default:
+		panic("no description provided by collector")
+	}
+	select {
+	case <-descc:
+		panic("more than one description provided by collector")
+	default:
+	}
+
+	close(descc)
+
+	// Create a ConstMetric with the Desc. Since we don't know how many
+	// variable labels there are, try for as long as it needs.
+	for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {
+		m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...)
+	}
+
+	// Write out the metric into a proto message and look at the labels.
+	// If the value is not the magicString, it is a constLabel, which doesn't interest us.
+	// If the label is curried, it doesn't interest us.
+	// In all other cases, only "code" or "method" is allowed.
+	if err := m.Write(&pm); err != nil {
+		panic("error checking metric for labels")
+	}
+	for _, label := range pm.Label {
+		name, value := label.GetName(), label.GetValue()
+		if value != magicString || isLabelCurried(c, name) {
+			continue
+		}
+		switch name {
+		case "code":
+			code = true
+		case "method":
+			method = true
+		default:
+			panic("metric partitioned with non-supported labels")
+		}
+	}
+	return
+}
+
+func isLabelCurried(c prometheus.Collector, label string) bool {
+	// This is even hackier than the label test above.
+	// We essentially try to curry again and see if it works.
+	// But for that, we need to type-convert to the two
+	// types we use here, ObserverVec or *CounterVec.
+	switch v := c.(type) {
+	case *prometheus.CounterVec:
+		if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
+			return false
+		}
+	case prometheus.ObserverVec:
+		if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
+			return false
+		}
+	default:
+		panic("unsupported metric vec type")
+	}
+	return true
+}
+
+// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
+// unnecessary allocations on each request.
+var emptyLabels = prometheus.Labels{}
+
+func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
+	if !(code || method) {
+		return emptyLabels
+	}
+	labels := prometheus.Labels{}
+
+	if code {
+		labels["code"] = sanitizeCode(status)
+	}
+	if method {
+		labels["method"] = sanitizeMethod(reqMethod)
+	}
+
+	return labels
+}
+
+func computeApproximateRequestSize(r *http.Request) int {
+	s := 0
+	if r.URL != nil {
+		s += len(r.URL.String())
+	}
+
+	s += len(r.Method)
+	s += len(r.Proto)
+	for name, values := range r.Header {
+		s += len(name)
+		for _, value := range values {
+			s += len(value)
+		}
+	}
+	s += len(r.Host)
+
+	// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+	if r.ContentLength != -1 {
+		s += int(r.ContentLength)
+	}
+	return s
+}
+
+func sanitizeMethod(m string) string {
+	switch m {
+	case "GET", "get":
+		return "get"
+	case "PUT", "put":
+		return "put"
+	case "HEAD", "head":
+		return "head"
+	case "POST", "post":
+		return "post"
+	case "DELETE", "delete":
+		return "delete"
+	case "CONNECT", "connect":
+		return "connect"
+	case "OPTIONS", "options":
+		return "options"
+	case "NOTIFY", "notify":
+		return "notify"
+	default:
+		return strings.ToLower(m)
+	}
+}
+
+// If the wrapped http.Handler has not set a status code, i.e. the value is
+// currently 0, santizeCode will return 200, for consistency with behavior in
+// the stdlib.
+func sanitizeCode(s int) string {
+	switch s {
+	case 100:
+		return "100"
+	case 101:
+		return "101"
+
+	case 200, 0:
+		return "200"
+	case 201:
+		return "201"
+	case 202:
+		return "202"
+	case 203:
+		return "203"
+	case 204:
+		return "204"
+	case 205:
+		return "205"
+	case 206:
+		return "206"
+
+	case 300:
+		return "300"
+	case 301:
+		return "301"
+	case 302:
+		return "302"
+	case 304:
+		return "304"
+	case 305:
+		return "305"
+	case 307:
+		return "307"
+
+	case 400:
+		return "400"
+	case 401:
+		return "401"
+	case 402:
+		return "402"
+	case 403:
+		return "403"
+	case 404:
+		return "404"
+	case 405:
+		return "405"
+	case 406:
+		return "406"
+	case 407:
+		return "407"
+	case 408:
+		return "408"
+	case 409:
+		return "409"
+	case 410:
+		return "410"
+	case 411:
+		return "411"
+	case 412:
+		return "412"
+	case 413:
+		return "413"
+	case 414:
+		return "414"
+	case 415:
+		return "415"
+	case 416:
+		return "416"
+	case 417:
+		return "417"
+	case 418:
+		return "418"
+
+	case 500:
+		return "500"
+	case 501:
+		return "501"
+	case 502:
+		return "502"
+	case 503:
+		return "503"
+	case 504:
+		return "504"
+	case 505:
+		return "505"
+
+	case 428:
+		return "428"
+	case 429:
+		return "429"
+	case 431:
+		return "431"
+	case 511:
+		return "511"
+
+	default:
+		return strconv.Itoa(s)
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index fdb7badf7385d37913e11eb2f397aa9fda35ae01..5c5cdfe2afd03b8b2b6bacbe082cbba839e92e53 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -15,7 +15,6 @@ package prometheus
 
 import (
 	"bytes"
-	"errors"
 	"fmt"
 	"os"
 	"runtime"
@@ -68,7 +67,8 @@ func NewRegistry() *Registry {
 
 // NewPedanticRegistry returns a registry that checks during collection if each
 // collected Metric is consistent with its reported Desc, and if the Desc has
-// actually been registered with the registry.
+// actually been registered with the registry. Unchecked Collectors (those whose
+// Describe methed does not yield any descriptors) are excluded from the check.
 //
 // Usually, a Registry will be happy as long as the union of all collected
 // Metrics is consistent and valid even if some metrics are not consistent with
@@ -98,6 +98,14 @@ type Registerer interface {
 	// returned error is an instance of AlreadyRegisteredError, which
 	// contains the previously registered Collector.
 	//
+	// A Collector whose Describe method does not yield any Desc is treated
+	// as unchecked. Registration will always succeed. No check for
+	// re-registering (see previous paragraph) is performed. Thus, the
+	// caller is responsible for not double-registering the same unchecked
+	// Collector, and for providing a Collector that will not cause
+	// inconsistent metrics on collection. (This would lead to scrape
+	// errors.)
+	//
 	// It is in general not safe to register the same Collector multiple
 	// times concurrently.
 	Register(Collector) error
@@ -108,7 +116,9 @@ type Registerer interface {
 	// Unregister unregisters the Collector that equals the Collector passed
 	// in as an argument.  (Two Collectors are considered equal if their
 	// Describe method yields the same set of descriptors.) The function
-	// returns whether a Collector was unregistered.
+	// returns whether a Collector was unregistered. Note that an unchecked
+	// Collector cannot be unregistered (as its Describe method does not
+	// yield any descriptor).
 	//
 	// Note that even after unregistering, it will not be possible to
 	// register a new Collector that is inconsistent with the unregistered
@@ -243,6 +253,7 @@ type Registry struct {
 	collectorsByID        map[uint64]Collector // ID is a hash of the descIDs.
 	descIDs               map[uint64]struct{}
 	dimHashesByName       map[string]uint64
+	uncheckedCollectors   []Collector
 	pedanticChecksEnabled bool
 }
 
@@ -300,9 +311,10 @@ func (r *Registry) Register(c Collector) error {
 			}
 		}
 	}
-	// Did anything happen at all?
+	// A Collector yielding no Desc at all is considered unchecked.
 	if len(newDescIDs) == 0 {
-		return errors.New("collector has no descriptors")
+		r.uncheckedCollectors = append(r.uncheckedCollectors, c)
+		return nil
 	}
 	if existing, exists := r.collectorsByID[collectorID]; exists {
 		return AlreadyRegisteredError{
@@ -376,19 +388,24 @@ func (r *Registry) MustRegister(cs ...Collector) {
 // Gather implements Gatherer.
 func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
 	var (
-		metricChan        = make(chan Metric, capMetricChan)
-		metricHashes      = map[uint64]struct{}{}
-		wg                sync.WaitGroup
-		errs              MultiError          // The collected errors to return in the end.
-		registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
+		checkedMetricChan   = make(chan Metric, capMetricChan)
+		uncheckedMetricChan = make(chan Metric, capMetricChan)
+		metricHashes        = map[uint64]struct{}{}
+		wg                  sync.WaitGroup
+		errs                MultiError          // The collected errors to return in the end.
+		registeredDescIDs   map[uint64]struct{} // Only used for pedantic checks
 	)
 
 	r.mtx.RLock()
-	goroutineBudget := len(r.collectorsByID)
+	goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
 	metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
-	collectors := make(chan Collector, len(r.collectorsByID))
+	checkedCollectors := make(chan Collector, len(r.collectorsByID))
+	uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
 	for _, collector := range r.collectorsByID {
-		collectors <- collector
+		checkedCollectors <- collector
+	}
+	for _, collector := range r.uncheckedCollectors {
+		uncheckedCollectors <- collector
 	}
 	// In case pedantic checks are enabled, we have to copy the map before
 	// giving up the RLock.
@@ -405,12 +422,14 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
 	collectWorker := func() {
 		for {
 			select {
-			case collector := <-collectors:
-				collector.Collect(metricChan)
-				wg.Done()
+			case collector := <-checkedCollectors:
+				collector.Collect(checkedMetricChan)
+			case collector := <-uncheckedCollectors:
+				collector.Collect(uncheckedMetricChan)
 			default:
 				return
 			}
+			wg.Done()
 		}
 	}
 
@@ -418,51 +437,94 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
 	go collectWorker()
 	goroutineBudget--
 
-	// Close the metricChan once all collectors are collected.
+	// Close checkedMetricChan and uncheckedMetricChan once all collectors
+	// are collected.
 	go func() {
 		wg.Wait()
-		close(metricChan)
+		close(checkedMetricChan)
+		close(uncheckedMetricChan)
 	}()
 
-	// Drain metricChan in case of premature return.
+	// Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
 	defer func() {
-		for range metricChan {
+		if checkedMetricChan != nil {
+			for range checkedMetricChan {
+			}
+		}
+		if uncheckedMetricChan != nil {
+			for range uncheckedMetricChan {
+			}
 		}
 	}()
 
-collectLoop:
+	// Copy the channel references so we can nil them out later to remove
+	// them from the select statements below.
+	cmc := checkedMetricChan
+	umc := uncheckedMetricChan
+
 	for {
 		select {
-		case metric, ok := <-metricChan:
+		case metric, ok := <-cmc:
 			if !ok {
-				// metricChan is closed, we are done.
-				break collectLoop
+				cmc = nil
+				break
 			}
 			errs.Append(processMetric(
 				metric, metricFamiliesByName,
 				metricHashes,
 				registeredDescIDs,
 			))
+		case metric, ok := <-umc:
+			if !ok {
+				umc = nil
+				break
+			}
+			errs.Append(processMetric(
+				metric, metricFamiliesByName,
+				metricHashes,
+				nil,
+			))
 		default:
-			if goroutineBudget <= 0 || len(collectors) == 0 {
+			if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
 				// All collectors are already being worked on or
 				// we have already as many goroutines started as
-				// there are collectors. Just process metrics
-				// from now on.
-				for metric := range metricChan {
+				// there are collectors. Do the same as above,
+				// just without the default.
+				select {
+				case metric, ok := <-cmc:
+					if !ok {
+						cmc = nil
+						break
+					}
 					errs.Append(processMetric(
 						metric, metricFamiliesByName,
 						metricHashes,
 						registeredDescIDs,
 					))
+				case metric, ok := <-umc:
+					if !ok {
+						umc = nil
+						break
+					}
+					errs.Append(processMetric(
+						metric, metricFamiliesByName,
+						metricHashes,
+						nil,
+					))
 				}
-				break collectLoop
+				break
 			}
 			// Start more workers.
 			go collectWorker()
 			goroutineBudget--
 			runtime.Gosched()
 		}
+		// Once both checkedMetricChan and uncheckdMetricChan are closed
+		// and drained, the contraption above will nil out cmc and umc,
+		// and then we can leave the collect loop here.
+		if cmc == nil && umc == nil {
+			break
+		}
 	}
 	return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
 }
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
index b065f8683f0bd79dc34e57b81035195bda05a813..9805432c2a4cb8dc0ea8dc7cb62d6b3ca23205da 100644
--- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -1,34 +1,23 @@
-// Code generated by protoc-gen-go.
+// Code generated by protoc-gen-go. DO NOT EDIT.
 // source: metrics.proto
-// DO NOT EDIT!
-
-/*
-Package io_prometheus_client is a generated protocol buffer package.
-
-It is generated from these files:
-	metrics.proto
-
-It has these top-level messages:
-	LabelPair
-	Gauge
-	Counter
-	Quantile
-	Summary
-	Untyped
-	Histogram
-	Bucket
-	Metric
-	MetricFamily
-*/
-package io_prometheus_client
+
+package io_prometheus_client // import "github.com/prometheus/client_model/go"
 
 import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
 import math "math"
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
+var _ = fmt.Errorf
 var _ = math.Inf
 
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
 type MetricType int32
 
 const (
@@ -70,16 +59,41 @@ func (x *MetricType) UnmarshalJSON(data []byte) error {
 	*x = MetricType(value)
 	return nil
 }
+func (MetricType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
+}
 
 type LabelPair struct {
-	Name             *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Value            *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
-	XXX_unrecognized []byte  `json:"-"`
+	Name                 *string  `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Value                *string  `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
 func (m *LabelPair) Reset()         { *m = LabelPair{} }
 func (m *LabelPair) String() string { return proto.CompactTextString(m) }
 func (*LabelPair) ProtoMessage()    {}
+func (*LabelPair) Descriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
+}
+func (m *LabelPair) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_LabelPair.Unmarshal(m, b)
+}
+func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
+}
+func (dst *LabelPair) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LabelPair.Merge(dst, src)
+}
+func (m *LabelPair) XXX_Size() int {
+	return xxx_messageInfo_LabelPair.Size(m)
+}
+func (m *LabelPair) XXX_DiscardUnknown() {
+	xxx_messageInfo_LabelPair.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LabelPair proto.InternalMessageInfo
 
 func (m *LabelPair) GetName() string {
 	if m != nil && m.Name != nil {
@@ -96,13 +110,35 @@ func (m *LabelPair) GetValue() string {
 }
 
 type Gauge struct {
-	Value            *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
-	XXX_unrecognized []byte   `json:"-"`
+	Value                *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
 func (m *Gauge) Reset()         { *m = Gauge{} }
 func (m *Gauge) String() string { return proto.CompactTextString(m) }
 func (*Gauge) ProtoMessage()    {}
+func (*Gauge) Descriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1}
+}
+func (m *Gauge) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Gauge.Unmarshal(m, b)
+}
+func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
+}
+func (dst *Gauge) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Gauge.Merge(dst, src)
+}
+func (m *Gauge) XXX_Size() int {
+	return xxx_messageInfo_Gauge.Size(m)
+}
+func (m *Gauge) XXX_DiscardUnknown() {
+	xxx_messageInfo_Gauge.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Gauge proto.InternalMessageInfo
 
 func (m *Gauge) GetValue() float64 {
 	if m != nil && m.Value != nil {
@@ -112,13 +148,35 @@ func (m *Gauge) GetValue() float64 {
 }
 
 type Counter struct {
-	Value            *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
-	XXX_unrecognized []byte   `json:"-"`
+	Value                *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
 func (m *Counter) Reset()         { *m = Counter{} }
 func (m *Counter) String() string { return proto.CompactTextString(m) }
 func (*Counter) ProtoMessage()    {}
+func (*Counter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2}
+}
+func (m *Counter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Counter.Unmarshal(m, b)
+}
+func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
+}
+func (dst *Counter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Counter.Merge(dst, src)
+}
+func (m *Counter) XXX_Size() int {
+	return xxx_messageInfo_Counter.Size(m)
+}
+func (m *Counter) XXX_DiscardUnknown() {
+	xxx_messageInfo_Counter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Counter proto.InternalMessageInfo
 
 func (m *Counter) GetValue() float64 {
 	if m != nil && m.Value != nil {
@@ -128,14 +186,36 @@ func (m *Counter) GetValue() float64 {
 }
 
 type Quantile struct {
-	Quantile         *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
-	Value            *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
-	XXX_unrecognized []byte   `json:"-"`
+	Quantile             *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+	Value                *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
 func (m *Quantile) Reset()         { *m = Quantile{} }
 func (m *Quantile) String() string { return proto.CompactTextString(m) }
 func (*Quantile) ProtoMessage()    {}
+func (*Quantile) Descriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3}
+}
+func (m *Quantile) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Quantile.Unmarshal(m, b)
+}
+func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
+}
+func (dst *Quantile) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Quantile.Merge(dst, src)
+}
+func (m *Quantile) XXX_Size() int {
+	return xxx_messageInfo_Quantile.Size(m)
+}
+func (m *Quantile) XXX_DiscardUnknown() {
+	xxx_messageInfo_Quantile.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Quantile proto.InternalMessageInfo
 
 func (m *Quantile) GetQuantile() float64 {
 	if m != nil && m.Quantile != nil {
@@ -152,15 +232,37 @@ func (m *Quantile) GetValue() float64 {
 }
 
 type Summary struct {
-	SampleCount      *uint64     `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
-	SampleSum        *float64    `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
-	Quantile         []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
-	XXX_unrecognized []byte      `json:"-"`
+	SampleCount          *uint64     `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+	SampleSum            *float64    `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+	Quantile             []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
 }
 
 func (m *Summary) Reset()         { *m = Summary{} }
 func (m *Summary) String() string { return proto.CompactTextString(m) }
 func (*Summary) ProtoMessage()    {}
+func (*Summary) Descriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4}
+}
+func (m *Summary) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Summary.Unmarshal(m, b)
+}
+func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
+}
+func (dst *Summary) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Summary.Merge(dst, src)
+}
+func (m *Summary) XXX_Size() int {
+	return xxx_messageInfo_Summary.Size(m)
+}
+func (m *Summary) XXX_DiscardUnknown() {
+	xxx_messageInfo_Summary.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Summary proto.InternalMessageInfo
 
 func (m *Summary) GetSampleCount() uint64 {
 	if m != nil && m.SampleCount != nil {
@@ -184,13 +286,35 @@ func (m *Summary) GetQuantile() []*Quantile {
 }
 
 type Untyped struct {
-	Value            *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
-	XXX_unrecognized []byte   `json:"-"`
+	Value                *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
 func (m *Untyped) Reset()         { *m = Untyped{} }
 func (m *Untyped) String() string { return proto.CompactTextString(m) }
 func (*Untyped) ProtoMessage()    {}
+func (*Untyped) Descriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5}
+}
+func (m *Untyped) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Untyped.Unmarshal(m, b)
+}
+func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
+}
+func (dst *Untyped) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Untyped.Merge(dst, src)
+}
+func (m *Untyped) XXX_Size() int {
+	return xxx_messageInfo_Untyped.Size(m)
+}
+func (m *Untyped) XXX_DiscardUnknown() {
+	xxx_messageInfo_Untyped.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Untyped proto.InternalMessageInfo
 
 func (m *Untyped) GetValue() float64 {
 	if m != nil && m.Value != nil {
@@ -200,15 +324,37 @@ func (m *Untyped) GetValue() float64 {
 }
 
 type Histogram struct {
-	SampleCount      *uint64   `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
-	SampleSum        *float64  `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
-	Bucket           []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
-	XXX_unrecognized []byte    `json:"-"`
+	SampleCount          *uint64   `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+	SampleSum            *float64  `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+	Bucket               []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
 }
 
 func (m *Histogram) Reset()         { *m = Histogram{} }
 func (m *Histogram) String() string { return proto.CompactTextString(m) }
 func (*Histogram) ProtoMessage()    {}
+func (*Histogram) Descriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6}
+}
+func (m *Histogram) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Histogram.Unmarshal(m, b)
+}
+func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
+}
+func (dst *Histogram) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Histogram.Merge(dst, src)
+}
+func (m *Histogram) XXX_Size() int {
+	return xxx_messageInfo_Histogram.Size(m)
+}
+func (m *Histogram) XXX_DiscardUnknown() {
+	xxx_messageInfo_Histogram.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Histogram proto.InternalMessageInfo
 
 func (m *Histogram) GetSampleCount() uint64 {
 	if m != nil && m.SampleCount != nil {
@@ -232,14 +378,36 @@ func (m *Histogram) GetBucket() []*Bucket {
 }
 
 type Bucket struct {
-	CumulativeCount  *uint64  `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"`
-	UpperBound       *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"`
-	XXX_unrecognized []byte   `json:"-"`
+	CumulativeCount      *uint64  `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
+	UpperBound           *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
 func (m *Bucket) Reset()         { *m = Bucket{} }
 func (m *Bucket) String() string { return proto.CompactTextString(m) }
 func (*Bucket) ProtoMessage()    {}
+func (*Bucket) Descriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7}
+}
+func (m *Bucket) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Bucket.Unmarshal(m, b)
+}
+func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
+}
+func (dst *Bucket) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Bucket.Merge(dst, src)
+}
+func (m *Bucket) XXX_Size() int {
+	return xxx_messageInfo_Bucket.Size(m)
+}
+func (m *Bucket) XXX_DiscardUnknown() {
+	xxx_messageInfo_Bucket.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Bucket proto.InternalMessageInfo
 
 func (m *Bucket) GetCumulativeCount() uint64 {
 	if m != nil && m.CumulativeCount != nil {
@@ -256,19 +424,41 @@ func (m *Bucket) GetUpperBound() float64 {
 }
 
 type Metric struct {
-	Label            []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
-	Gauge            *Gauge       `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
-	Counter          *Counter     `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
-	Summary          *Summary     `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
-	Untyped          *Untyped     `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
-	Histogram        *Histogram   `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
-	TimestampMs      *int64       `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"`
-	XXX_unrecognized []byte       `json:"-"`
+	Label                []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+	Gauge                *Gauge       `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+	Counter              *Counter     `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+	Summary              *Summary     `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+	Untyped              *Untyped     `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+	Histogram            *Histogram   `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+	TimestampMs          *int64       `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
 }
 
 func (m *Metric) Reset()         { *m = Metric{} }
 func (m *Metric) String() string { return proto.CompactTextString(m) }
 func (*Metric) ProtoMessage()    {}
+func (*Metric) Descriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8}
+}
+func (m *Metric) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Metric.Unmarshal(m, b)
+}
+func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
+}
+func (dst *Metric) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Metric.Merge(dst, src)
+}
+func (m *Metric) XXX_Size() int {
+	return xxx_messageInfo_Metric.Size(m)
+}
+func (m *Metric) XXX_DiscardUnknown() {
+	xxx_messageInfo_Metric.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Metric proto.InternalMessageInfo
 
 func (m *Metric) GetLabel() []*LabelPair {
 	if m != nil {
@@ -320,16 +510,38 @@ func (m *Metric) GetTimestampMs() int64 {
 }
 
 type MetricFamily struct {
-	Name             *string     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Help             *string     `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
-	Type             *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
-	Metric           []*Metric   `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
-	XXX_unrecognized []byte      `json:"-"`
+	Name                 *string     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Help                 *string     `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+	Type                 *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+	Metric               []*Metric   `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
 }
 
 func (m *MetricFamily) Reset()         { *m = MetricFamily{} }
 func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
 func (*MetricFamily) ProtoMessage()    {}
+func (*MetricFamily) Descriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9}
+}
+func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
+}
+func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
+}
+func (dst *MetricFamily) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MetricFamily.Merge(dst, src)
+}
+func (m *MetricFamily) XXX_Size() int {
+	return xxx_messageInfo_MetricFamily.Size(m)
+}
+func (m *MetricFamily) XXX_DiscardUnknown() {
+	xxx_messageInfo_MetricFamily.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetricFamily proto.InternalMessageInfo
 
 func (m *MetricFamily) GetName() string {
 	if m != nil && m.Name != nil {
@@ -360,5 +572,58 @@ func (m *MetricFamily) GetMetric() []*Metric {
 }
 
 func init() {
+	proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
+	proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
+	proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
+	proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile")
+	proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary")
+	proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
+	proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
+	proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
+	proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
+	proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
 	proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
 }
+
+func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) }
+
+var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{
+	// 591 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e,
+	0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89,
+	0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81,
+	0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47,
+	0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77,
+	0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e,
+	0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64,
+	0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58,
+	0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c,
+	0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2,
+	0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4,
+	0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12,
+	0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c,
+	0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee,
+	0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f,
+	0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54,
+	0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea,
+	0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63,
+	0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45,
+	0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d,
+	0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5,
+	0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d,
+	0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d,
+	0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7,
+	0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8,
+	0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2,
+	0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58,
+	0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11,
+	0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff,
+	0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02,
+	0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd,
+	0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25,
+	0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9,
+	0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27,
+	0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9,
+	0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48,
+	0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00,
+}
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
index e95ddbc67c068083dc87a1b885d3fd24e1c4d7ec..7a8a1e0990143b5ccde82f9fd59a8a787ed5f606 100644
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -39,8 +39,11 @@ const (
 	statVersion10 = "1.0"
 	statVersion11 = "1.1"
 
-	fieldTransport10Len = 10
-	fieldTransport11Len = 13
+	fieldTransport10TCPLen = 10
+	fieldTransport10UDPLen = 7
+
+	fieldTransport11TCPLen = 13
+	fieldTransport11UDPLen = 10
 )
 
 // A Mount is a device mount parsed from /proc/[pid]/mountstats.
@@ -186,6 +189,8 @@ type NFSOperationStats struct {
 // A NFSTransportStats contains statistics for the NFS mount RPC requests and
 // responses.
 type NFSTransportStats struct {
+	// The transport protocol used for the NFS mount.
+	Protocol string
 	// The local port used for the NFS mount.
 	Port uint64
 	// Number of times the client has had to establish a connection from scratch
@@ -360,7 +365,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
 				return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
 			}
 
-			tstats, err := parseNFSTransportStats(ss[2:], statVersion)
+			tstats, err := parseNFSTransportStats(ss[1:], statVersion)
 			if err != nil {
 				return nil, err
 			}
@@ -522,13 +527,33 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
 // parseNFSTransportStats parses a NFSTransportStats line using an input set of
 // integer fields matched to a specific stats version.
 func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
+	// Extract the protocol field. It is the only string value in the line
+	protocol := ss[0]
+	ss = ss[1:]
+
 	switch statVersion {
 	case statVersion10:
-		if len(ss) != fieldTransport10Len {
+		var expectedLength int
+		if protocol == "tcp" {
+			expectedLength = fieldTransport10TCPLen
+		} else if protocol == "udp" {
+			expectedLength = fieldTransport10UDPLen
+		} else {
+			return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss)
+		}
+		if len(ss) != expectedLength {
 			return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
 		}
 	case statVersion11:
-		if len(ss) != fieldTransport11Len {
+		var expectedLength int
+		if protocol == "tcp" {
+			expectedLength = fieldTransport11TCPLen
+		} else if protocol == "udp" {
+			expectedLength = fieldTransport11UDPLen
+		} else {
+			return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss)
+		}
+		if len(ss) != expectedLength {
 			return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
 		}
 	default:
@@ -536,12 +561,13 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
 	}
 
 	// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
-	// in a v1.0 response.
+	// in a v1.0 response. Since the stat length is bigger for TCP stats, we use
+	// the TCP length here.
 	//
 	// Note: slice length must be set to length of v1.1 stats to avoid a panic when
 	// only v1.0 stats are present.
 	// See: https://github.com/prometheus/node_exporter/issues/571.
-	ns := make([]uint64, fieldTransport11Len)
+	ns := make([]uint64, fieldTransport11TCPLen)
 	for i, s := range ss {
 		n, err := strconv.ParseUint(s, 10, 64)
 		if err != nil {
@@ -551,7 +577,18 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
 		ns[i] = n
 	}
 
+	// The fields differ depending on the transport protocol (TCP or UDP)
+	// From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt
+	//
+	// For the udp RPC transport there is no connection count, connect idle time,
+	// or idle time (fields #3, #4, and #5); all other fields are the same. So
+	// we set them to 0 here.
+	if protocol == "udp" {
+		ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
+	}
+
 	return &NFSTransportStats{
+		Protocol:                 protocol,
 		Port:                     ns[0],
 		Bind:                     ns[1],
 		Connect:                  ns[2],
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
index b6563e87e475a72265f2cdb42e127ed0473dce4a..28e50906adb6cf15fa09a3a73dc46668357a42a6 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
@@ -640,6 +640,16 @@ func (db *DB) tableNeedCompaction() bool {
 	return v.needCompaction()
 }
 
+// resumeWrite returns an indicator whether we should resume write operation if enough level0 files are compacted.
+func (db *DB) resumeWrite() bool {
+	v := db.s.version()
+	defer v.release()
+	if v.tLen(0) < db.s.o.GetWriteL0PauseTrigger() {
+		return true
+	}
+	return false
+}
+
 func (db *DB) pauseCompaction(ch chan<- struct{}) {
 	select {
 	case ch <- struct{}{}:
@@ -653,6 +663,7 @@ type cCmd interface {
 }
 
 type cAuto struct {
+	// Note for table compaction, an empty ackC represents it's a compaction waiting command.
 	ackC chan<- error
 }
 
@@ -765,8 +776,10 @@ func (db *DB) mCompaction() {
 }
 
 func (db *DB) tCompaction() {
-	var x cCmd
-	var ackQ []cCmd
+	var (
+		x           cCmd
+		ackQ, waitQ []cCmd
+	)
 
 	defer func() {
 		if x := recover(); x != nil {
@@ -778,6 +791,10 @@ func (db *DB) tCompaction() {
 			ackQ[i].ack(ErrClosed)
 			ackQ[i] = nil
 		}
+		for i := range waitQ {
+			waitQ[i].ack(ErrClosed)
+			waitQ[i] = nil
+		}
 		if x != nil {
 			x.ack(ErrClosed)
 		}
@@ -795,12 +812,25 @@ func (db *DB) tCompaction() {
 				return
 			default:
 			}
+			// Resume write operation as soon as possible.
+			if len(waitQ) > 0 && db.resumeWrite() {
+				for i := range waitQ {
+					waitQ[i].ack(nil)
+					waitQ[i] = nil
+				}
+				waitQ = waitQ[:0]
+			}
 		} else {
 			for i := range ackQ {
 				ackQ[i].ack(nil)
 				ackQ[i] = nil
 			}
 			ackQ = ackQ[:0]
+			for i := range waitQ {
+				waitQ[i].ack(nil)
+				waitQ[i] = nil
+			}
+			waitQ = waitQ[:0]
 			select {
 			case x = <-db.tcompCmdC:
 			case ch := <-db.tcompPauseC:
@@ -813,7 +843,11 @@ func (db *DB) tCompaction() {
 		if x != nil {
 			switch cmd := x.(type) {
 			case cAuto:
-				ackQ = append(ackQ, x)
+				if cmd.ackC != nil {
+					waitQ = append(waitQ, x)
+				} else {
+					ackQ = append(ackQ, x)
+				}
 			case cRange:
 				x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max))
 			default:
diff --git a/vendor/github.com/tendermint/go-amino/amino.go b/vendor/github.com/tendermint/go-amino/amino.go
index 93831f074effc8d03e698877f2c7d46eceec3112..1f0f2a5313bbfbb74157702cb21d8baca1ac4774 100644
--- a/vendor/github.com/tendermint/go-amino/amino.go
+++ b/vendor/github.com/tendermint/go-amino/amino.go
@@ -11,62 +11,105 @@ import (
 )
 
 //----------------------------------------
-// Typ3 and Typ4
+// Global methods for global sealed codec.
+var gcdc *Codec
+
+func init() {
+	gcdc = NewCodec().Seal()
+}
+
+func MarshalBinary(o interface{}) ([]byte, error) {
+	return gcdc.MarshalBinary(o)
+}
+
+func MarshalBinaryWriter(w io.Writer, o interface{}) (n int64, err error) {
+	return gcdc.MarshalBinaryWriter(w, o)
+}
+
+func MustMarshalBinary(o interface{}) []byte {
+	return gcdc.MustMarshalBinary(o)
+}
+
+func MarshalBinaryBare(o interface{}) ([]byte, error) {
+	return gcdc.MarshalBinaryBare(o)
+}
+
+func MustMarshalBinaryBare(o interface{}) []byte {
+	return gcdc.MustMarshalBinaryBare(o)
+}
+
+func UnmarshalBinary(bz []byte, ptr interface{}) error {
+	return gcdc.UnmarshalBinary(bz, ptr)
+}
+
+func UnmarshalBinaryReader(r io.Reader, ptr interface{}, maxSize int64) (n int64, err error) {
+	return gcdc.UnmarshalBinaryReader(r, ptr, maxSize)
+}
+
+func MustUnmarshalBinary(bz []byte, ptr interface{}) {
+	gcdc.MustUnmarshalBinary(bz, ptr)
+}
+
+func UnmarshalBinaryBare(bz []byte, ptr interface{}) error {
+	return gcdc.UnmarshalBinaryBare(bz, ptr)
+}
+
+func MustUnmarshalBinaryBare(bz []byte, ptr interface{}) {
+	gcdc.MustUnmarshalBinaryBare(bz, ptr)
+}
+
+func MarshalJSON(o interface{}) ([]byte, error) {
+	return gcdc.MarshalJSON(o)
+}
+
+func UnmarshalJSON(bz []byte, ptr interface{}) error {
+	return gcdc.UnmarshalJSON(bz, ptr)
+}
+
+func MarshalJSONIndent(o interface{}, prefix, indent string) ([]byte, error) {
+	return gcdc.MarshalJSONIndent(o, prefix, indent)
+}
+
+//----------------------------------------
+// Typ3
 
 type Typ3 uint8
-type Typ4 uint8 // Typ3 | 0x08 (pointer bit)
 
 const (
 	// Typ3 types
 	Typ3_Varint     = Typ3(0)
 	Typ3_8Byte      = Typ3(1)
 	Typ3_ByteLength = Typ3(2)
-	Typ3_Struct     = Typ3(3)
-	Typ3_StructTerm = Typ3(4)
-	Typ3_4Byte      = Typ3(5)
-	Typ3_List       = Typ3(6)
-	Typ3_Interface  = Typ3(7)
-
-	// Typ4 bit
-	Typ4_Pointer = Typ4(0x08)
+	//Typ3_Struct     = Typ3(3)
+	//Typ3_StructTerm = Typ3(4)
+	Typ3_4Byte = Typ3(5)
+	//Typ3_List       = Typ3(6)
+	//Typ3_Interface  = Typ3(7)
 )
 
 func (typ Typ3) String() string {
 	switch typ {
 	case Typ3_Varint:
-		return "Varint"
+		return "(U)Varint"
 	case Typ3_8Byte:
 		return "8Byte"
 	case Typ3_ByteLength:
 		return "ByteLength"
-	case Typ3_Struct:
-		return "Struct"
-	case Typ3_StructTerm:
-		return "StructTerm"
+	//case Typ3_Struct:
+	//	return "Struct"
+	//case Typ3_StructTerm:
+	//	return "StructTerm"
 	case Typ3_4Byte:
 		return "4Byte"
-	case Typ3_List:
-		return "List"
-	case Typ3_Interface:
-		return "Interface"
+	//case Typ3_List:
+	//	return "List"
+	//case Typ3_Interface:
+	//	return "Interface"
 	default:
 		return fmt.Sprintf("<Invalid Typ3 %X>", byte(typ))
 	}
 }
 
-func (typ Typ4) Typ3() Typ3      { return Typ3(typ & 0x07) }
-func (typ Typ4) IsPointer() bool { return (typ & 0x08) > 0 }
-func (typ Typ4) String() string {
-	if typ&0xF0 != 0 {
-		return fmt.Sprintf("<Invalid Typ4 %X>", byte(typ))
-	}
-	if typ&0x08 != 0 {
-		return "*" + Typ3(typ&0x07).String()
-	} else {
-		return Typ3(typ).String()
-	}
-}
-
 //----------------------------------------
 // *Codec methods
 
@@ -146,12 +189,18 @@ func (cdc *Codec) MarshalBinaryBare(o interface{}) ([]byte, error) {
 	if err != nil {
 		return nil, err
 	}
-	err = cdc.encodeReflectBinary(buf, info, rv, FieldOptions{})
+	err = cdc.encodeReflectBinary(buf, info, rv, FieldOptions{}, true)
 	if err != nil {
 		return nil, err
 	}
 	bz = buf.Bytes()
 
+	// If registered concrete, prepend prefix bytes.
+	if info.Registered {
+		pb := info.Prefix.Bytes()
+		bz = append(pb, bz...)
+	}
+
 	return bz, nil
 }
 
@@ -256,25 +305,34 @@ func (cdc *Codec) MustUnmarshalBinary(bz []byte, ptr interface{}) {
 
 // UnmarshalBinaryBare will panic if ptr is a nil-pointer.
 func (cdc *Codec) UnmarshalBinaryBare(bz []byte, ptr interface{}) error {
-	if len(bz) == 0 {
-		return errors.New("UnmarshalBinaryBare cannot decode empty bytes")
-	}
 
-	rv, rt := reflect.ValueOf(ptr), reflect.TypeOf(ptr)
+	rv := reflect.ValueOf(ptr)
 	if rv.Kind() != reflect.Ptr {
 		panic("Unmarshal expects a pointer")
 	}
-	rv, rt = rv.Elem(), rt.Elem()
+	rv = rv.Elem()
+	rt := rv.Type()
 	info, err := cdc.getTypeInfo_wlock(rt)
 	if err != nil {
 		return err
 	}
-	n, err := cdc.decodeReflectBinary(bz, info, rv, FieldOptions{})
+	// If registered concrete, consume and verify prefix bytes.
+	if info.Registered {
+		pb := info.Prefix.Bytes()
+		if len(bz) < 4 {
+			return fmt.Errorf("UnmarshalBinaryBare expected to read prefix bytes %X (since it is registered concrete) but got %X", pb, bz)
+		} else if !bytes.Equal(bz[:4], pb) {
+			return fmt.Errorf("UnmarshalBinaryBare expected to read prefix bytes %X (since it is registered concrete) but got %X...", pb, bz[:4])
+		}
+		bz = bz[4:]
+	}
+	// Decode contents into rv.
+	n, err := cdc.decodeReflectBinary(bz, info, rv, FieldOptions{}, true)
 	if err != nil {
-		return err
+		return fmt.Errorf("unmarshal to %v failed after %d bytes (%v): %X", info.Type, n, err, bz)
 	}
 	if n != len(bz) {
-		return fmt.Errorf("Unmarshal didn't read all bytes. Expected to read %v, only read %v", len(bz), n)
+		return fmt.Errorf("unmarshal to %v didn't read all bytes. Expected to read %v, only read %v: %X", info.Type, len(bz), n, bz)
 	}
 	return nil
 }
@@ -293,25 +351,37 @@ func (cdc *Codec) MarshalJSON(o interface{}) ([]byte, error) {
 		return []byte("null"), nil
 	}
 	rt := rv.Type()
-
-	// Note that we can't yet skip directly
-	// to checking if a type implements
-	// json.Marshaler because in some cases
-	// var s GenericInterface = t1(v1)
-	// var t GenericInterface = t2(v1)
-	// but we need to be able to encode
-	// both s and t disambiguated, so:
-	//    {"type":<disfix>, "value":<data>}
-	// for the above case.
-
 	w := new(bytes.Buffer)
 	info, err := cdc.getTypeInfo_wlock(rt)
 	if err != nil {
 		return nil, err
 	}
+
+	// Write the disfix wrapper if it is a registered concrete type.
+	if info.Registered {
+		// Part 1:
+		err = writeStr(w, _fmt(`{"type":"%s","value":`, info.Name))
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// Write the rest from rv.
 	if err := cdc.encodeReflectJSON(w, info, rv, FieldOptions{}); err != nil {
 		return nil, err
 	}
+
+	// disfix wrapper continued...
+	if info.Registered {
+		// Part 2:
+		if err != nil {
+			return nil, err
+		}
+		err = writeStr(w, `}`)
+		if err != nil {
+			return nil, err
+		}
+	}
 	return w.Bytes(), nil
 }
 
@@ -324,20 +394,25 @@ func (cdc *Codec) UnmarshalJSON(bz []byte, ptr interface{}) error {
 	if rv.Kind() != reflect.Ptr {
 		return errors.New("UnmarshalJSON expects a pointer")
 	}
-
-	// If the type implements json.Unmarshaler, just
-	// automatically respect that and skip to it.
-	// if rv.Type().Implements(jsonUnmarshalerType) {
-	// 	return rv.Interface().(json.Unmarshaler).UnmarshalJSON(bz)
-	// }
-
-	// 1. Dereference until we find the first addressable type.
 	rv = rv.Elem()
 	rt := rv.Type()
 	info, err := cdc.getTypeInfo_wlock(rt)
 	if err != nil {
 		return err
 	}
+	// If registered concrete, consume and verify type wrapper.
+	if info.Registered {
+		// Consume type wrapper info.
+		name, bz_, err := decodeInterfaceJSON(bz)
+		if err != nil {
+			return err
+		}
+		// Check name against info.
+		if name != info.Name {
+			return fmt.Errorf("UnmarshalJSON wants to decode a %v but found a %v", info.Name, name)
+		}
+		bz = bz_
+	}
 	return cdc.decodeReflectJSON(bz, info, rv, FieldOptions{})
 }
 
diff --git a/vendor/github.com/tendermint/go-amino/binary-decode.go b/vendor/github.com/tendermint/go-amino/binary-decode.go
index c1fac922afae9c3d34dd21b23811e3b7815b086c..6e18bc37183d5878b15b655d2e3826db1ce96997 100644
--- a/vendor/github.com/tendermint/go-amino/binary-decode.go
+++ b/vendor/github.com/tendermint/go-amino/binary-decode.go
@@ -6,17 +6,18 @@ import (
 	"reflect"
 	"time"
 
+	"encoding/binary"
 	"github.com/davecgh/go-spew/spew"
 )
 
 //----------------------------------------
 // cdc.decodeReflectBinary
 
-// This is the main entrypoint for decoding all types from binary form.  This
+// This is the main entrypoint for decoding all types from binary form. This
 // function calls decodeReflectBinary*, and generally those functions should
 // only call this one, for the prefix bytes are consumed here when present.
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (n int, err error) {
+func (cdc *Codec) decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions, bare bool) (n int, err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -24,62 +25,12 @@ func (cdc *Codec) decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Valu
 		panic("should not happen")
 	}
 	if printLog {
-		spew.Printf("(D) decodeReflectBinary(bz: %X, info: %v, rv: %#v (%v), opts: %v)\n",
-			bz, info, rv.Interface(), rv.Type(), opts)
+		spew.Printf("(D) decodeReflectBinary(bz: %X, info: %v, rv: %#v (%v), fopts: %v)\n",
+			bz, info, rv.Interface(), rv.Type(), fopts)
 		defer func() {
 			fmt.Printf("(D) -> n: %v, err: %v\n", n, err)
 		}()
 	}
-
-	// TODO Read the disamb bytes here if necessary.
-	// e.g. rv isn't an interface, and
-	// info.ConcreteType.AlwaysDisambiguate.  But we don't support
-	// this yet.
-
-	// Read prefix+typ3 bytes if registered.
-	if info.Registered {
-		if len(bz) < PrefixBytesLen {
-			err = errors.New("EOF skipping prefix bytes.")
-			return
-		}
-		// Check prefix bytes.
-		prefix3 := NewPrefixBytes(bz[:PrefixBytesLen])
-		var prefix, typ = prefix3.SplitTyp3()
-		if info.Prefix != prefix {
-			panic("should not happen")
-		}
-		// Check that typ3 in prefix bytes is correct.
-		err = checkTyp3(info.Type, typ, opts)
-		if err != nil {
-			return
-		}
-		// Consume prefix.  Yum.
-		bz = bz[PrefixBytesLen:]
-		n += PrefixBytesLen
-	}
-
-	_n := 0
-	_n, err = cdc._decodeReflectBinary(bz, info, rv, opts)
-	slide(&bz, &n, _n)
-	return
-}
-
-// CONTRACT: any immediate disamb/prefix bytes have been consumed/stripped.
-// CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (n int, err error) {
-	if !rv.CanAddr() {
-		panic("rv not addressable")
-	}
-	if info.Type.Kind() == reflect.Interface && rv.Kind() == reflect.Ptr {
-		panic("should not happen")
-	}
-	if printLog {
-		spew.Printf("(_) _decodeReflectBinary(bz: %X, info: %v, rv: %#v (%v), opts: %v)\n",
-			bz, info, rv.Interface(), rv.Type(), opts)
-		defer func() {
-			fmt.Printf("(_) -> n: %v, err: %v\n", n, err)
-		}()
-	}
 	var _n int
 
 	// TODO consider the binary equivalent of json.Unmarshaller.
@@ -102,7 +53,7 @@ func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Val
 		if err != nil {
 			return
 		}
-		_n, err = cdc._decodeReflectBinary(bz, rinfo, rrv, opts)
+		_n, err = cdc.decodeReflectBinary(bz, rinfo, rrv, fopts, bare)
 		if slide(&bz, &n, _n) && err != nil {
 			return
 		}
@@ -122,17 +73,17 @@ func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Val
 	// Complex
 
 	case reflect.Interface:
-		_n, err = cdc.decodeReflectBinaryInterface(bz, info, rv, opts)
+		_n, err = cdc.decodeReflectBinaryInterface(bz, info, rv, fopts, bare)
 		n += _n
 		return
 
 	case reflect.Array:
 		ert := info.Type.Elem()
 		if ert.Kind() == reflect.Uint8 {
-			_n, err = cdc.decodeReflectBinaryByteArray(bz, info, rv, opts)
+			_n, err = cdc.decodeReflectBinaryByteArray(bz, info, rv, fopts)
 			n += _n
 		} else {
-			_n, err = cdc.decodeReflectBinaryArray(bz, info, rv, opts)
+			_n, err = cdc.decodeReflectBinaryArray(bz, info, rv, fopts, bare)
 			n += _n
 		}
 		return
@@ -140,16 +91,16 @@ func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Val
 	case reflect.Slice:
 		ert := info.Type.Elem()
 		if ert.Kind() == reflect.Uint8 {
-			_n, err = cdc.decodeReflectBinaryByteSlice(bz, info, rv, opts)
+			_n, err = cdc.decodeReflectBinaryByteSlice(bz, info, rv, fopts)
 			n += _n
 		} else {
-			_n, err = cdc.decodeReflectBinarySlice(bz, info, rv, opts)
+			_n, err = cdc.decodeReflectBinarySlice(bz, info, rv, fopts, bare)
 			n += _n
 		}
 		return
 
 	case reflect.Struct:
-		_n, err = cdc.decodeReflectBinaryStruct(bz, info, rv, opts)
+		_n, err = cdc.decodeReflectBinaryStruct(bz, info, rv, fopts, bare)
 		n += _n
 		return
 
@@ -158,14 +109,14 @@ func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Val
 
 	case reflect.Int64:
 		var num int64
-		if opts.BinVarint {
-			num, _n, err = DecodeVarint(bz)
+		if fopts.BinFixed64 {
+			num, _n, err = DecodeInt64(bz)
 			if slide(&bz, &n, _n) && err != nil {
 				return
 			}
 			rv.SetInt(num)
 		} else {
-			num, _n, err = DecodeInt64(bz)
+			num, _n, err = DecodeVarint(bz)
 			if slide(&bz, &n, _n) && err != nil {
 				return
 			}
@@ -174,12 +125,21 @@ func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Val
 		return
 
 	case reflect.Int32:
-		var num int32
-		num, _n, err = DecodeInt32(bz)
-		if slide(&bz, &n, _n) && err != nil {
-			return
+		if fopts.BinFixed32 {
+			var num int32
+			num, _n, err = DecodeInt32(bz)
+			if slide(&bz, &n, _n) && err != nil {
+				return
+			}
+			rv.SetInt(int64(num))
+		} else {
+			var num int64
+			num, _n, err = DecodeVarint(bz)
+			if slide(&bz, &n, _n) && err != nil {
+				return
+			}
+			rv.SetInt(int64(num))
 		}
-		rv.SetInt(int64(num))
 		return
 
 	case reflect.Int16:
@@ -214,14 +174,14 @@ func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Val
 
 	case reflect.Uint64:
 		var num uint64
-		if opts.BinVarint {
-			num, _n, err = DecodeUvarint(bz)
+		if fopts.BinFixed64 {
+			num, _n, err = DecodeUint64(bz)
 			if slide(&bz, &n, _n) && err != nil {
 				return
 			}
 			rv.SetUint(num)
 		} else {
-			num, _n, err = DecodeUint64(bz)
+			num, _n, err = DecodeUvarint(bz)
 			if slide(&bz, &n, _n) && err != nil {
 				return
 			}
@@ -230,12 +190,21 @@ func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Val
 		return
 
 	case reflect.Uint32:
-		var num uint32
-		num, _n, err = DecodeUint32(bz)
-		if slide(&bz, &n, _n) && err != nil {
-			return
+		if fopts.BinFixed32 {
+			var num uint32
+			num, _n, err = DecodeUint32(bz)
+			if slide(&bz, &n, _n) && err != nil {
+				return
+			}
+			rv.SetUint(uint64(num))
+		} else {
+			var num uint64
+			num, _n, err = DecodeUvarint(bz)
+			if slide(&bz, &n, _n) && err != nil {
+				return
+			}
+			rv.SetUint(uint64(num))
 		}
-		rv.SetUint(uint64(num))
 		return
 
 	case reflect.Uint16:
@@ -279,7 +248,7 @@ func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Val
 
 	case reflect.Float64:
 		var f float64
-		if !opts.Unsafe {
+		if !fopts.Unsafe {
 			err = errors.New("Float support requires `amino:\"unsafe\"`.")
 			return
 		}
@@ -292,7 +261,7 @@ func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Val
 
 	case reflect.Float32:
 		var f float32
-		if !opts.Unsafe {
+		if !fopts.Unsafe {
 			err = errors.New("Float support requires `amino:\"unsafe\"`.")
 			return
 		}
@@ -319,7 +288,7 @@ func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Val
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectBinaryInterface(bz []byte, iinfo *TypeInfo, rv reflect.Value, opts FieldOptions) (n int, err error) {
+func (cdc *Codec) decodeReflectBinaryInterface(bz []byte, iinfo *TypeInfo, rv reflect.Value, fopts FieldOptions, bare bool) (n int, err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -337,15 +306,21 @@ func (cdc *Codec) decodeReflectBinaryInterface(bz []byte, iinfo *TypeInfo, rv re
 		return
 	}
 
-	// Consume disambiguation / prefix+typ3 bytes.
-	disamb, hasDisamb, prefix, typ, hasPrefix, isNil, _n, err := DecodeDisambPrefixBytes(bz)
-	if slide(&bz, &n, _n) && err != nil {
-		return
+	if !bare {
+		// Read byte-length prefixed byteslice.
+		var buf, _n = []byte(nil), int(0)
+		buf, _n, err = DecodeByteSlice(bz)
+		if slide(&bz, nil, _n) && err != nil {
+			return
+		}
+		// This is a trick for debuggability -- we slide on &n more later.
+		n += UvarintSize(uint64(len(buf)))
+		bz = buf
 	}
 
-	// Special case for nil.
-	if isNil {
-		rv.Set(iinfo.ZeroValue)
+	// Consume disambiguation / prefix bytes.
+	disamb, hasDisamb, prefix, hasPrefix, _n, err := DecodeDisambPrefixBytes(bz)
+	if slide(&bz, &n, _n) && err != nil {
 		return
 	}
 
@@ -362,23 +337,23 @@ func (cdc *Codec) decodeReflectBinaryInterface(bz []byte, iinfo *TypeInfo, rv re
 		return
 	}
 
-	// Check and consume typ3 byte.
-	// It cannot be a typ4 byte because it cannot be nil.
-	err = checkTyp3(cinfo.Type, typ, opts)
-	if err != nil {
-		return
-	}
-
 	// Construct the concrete type.
 	var crv, irvSet = constructConcreteType(cinfo)
 
 	// Decode into the concrete type.
-	_n, err = cdc._decodeReflectBinary(bz, cinfo, crv, opts)
+	_n, err = cdc.decodeReflectBinary(bz, cinfo, crv, fopts, true)
 	if slide(&bz, &n, _n) && err != nil {
 		rv.Set(irvSet) // Helps with debugging
 		return
 	}
 
+	// Earlier, we set bz to the byteslice read from buf.
+	// Ensure that all of bz was consumed.
+	if len(bz) > 0 {
+		err = errors.New("bytes left over after reading interface contents")
+		return
+	}
+
 	// We need to set here, for when !PointerPreferred and the type
 	// is say, an array of bytes (e.g. [32]byte), then we must call
 	// rv.Set() *after* the value was acquired.
@@ -389,7 +364,7 @@ func (cdc *Codec) decodeReflectBinaryInterface(bz []byte, iinfo *TypeInfo, rv re
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectBinaryByteArray(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (n int, err error) {
+func (cdc *Codec) decodeReflectBinaryByteArray(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (n int, err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -426,7 +401,7 @@ func (cdc *Codec) decodeReflectBinaryByteArray(bz []byte, info *TypeInfo, rv ref
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectBinaryArray(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (n int, err error) {
+func (cdc *Codec) decodeReflectBinaryArray(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions, bare bool) (n int, err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -441,65 +416,106 @@ func (cdc *Codec) decodeReflectBinaryArray(bz []byte, info *TypeInfo, rv reflect
 		panic("should not happen")
 	}
 	length := info.Type.Len()
-	einfo := (*TypeInfo)(nil)
-	einfo, err = cdc.getTypeInfo_wlock(ert)
+	einfo, err := cdc.getTypeInfo_wlock(ert)
 	if err != nil {
 		return
 	}
 
-	// Check and consume typ4 byte.
-	var ptr, _n = false, int(0)
-	ptr, _n, err = decodeTyp4AndCheck(ert, bz, opts)
-	if slide(&bz, &n, _n) && err != nil {
-		return
-	}
-
-	// Read number of items.
-	var count = uint64(0)
-	count, _n, err = DecodeUvarint(bz)
-	if slide(&bz, &n, _n) && err != nil {
-		return
-	}
-	if int(count) != length {
-		err = fmt.Errorf("Expected num items of %v, decoded %v", length, count)
-		return
+	if !bare {
+		// Read byte-length prefixed byteslice.
+		var buf, _n = []byte(nil), int(0)
+		buf, _n, err = DecodeByteSlice(bz)
+		if slide(&bz, nil, _n) && err != nil {
+			return
+		}
+		// This is a trick for debuggability -- we slide on &n more later.
+		n += UvarintSize(uint64(len(buf)))
+		bz = buf
 	}
 
-	// NOTE: Unlike decodeReflectBinarySlice,
-	// there is nothing special to do for
-	// zero-length arrays.  Is that even possible?
-
-	// Read each item.
-	for i := 0; i < length; i++ {
-		var erv, _n = rv.Index(i), int(0)
-		// Maybe read nil.
-		if ptr {
-			numNil := int64(0)
-			numNil, _n, err = decodeNumNilBytes(bz)
+	// If elem is not already a ByteLength type, read in packed form.
+	// This is a Proto wart due to Proto backwards compatibility issues.
+	// Amino2 will probably migrate to use the List typ3.
+	typ3 := typeToTyp3(einfo.Type, fopts)
+	if typ3 != Typ3_ByteLength {
+		// Read elements in packed form.
+		for i := 0; i < length; i++ {
+			var erv, _n = rv.Index(i), int(0)
+			_n, err = cdc.decodeReflectBinary(bz, einfo, erv, fopts, false)
+			if slide(&bz, &n, _n) && err != nil {
+				err = fmt.Errorf("error reading array contents: %v", err)
+				return
+			}
+			// Special case when reading default value, prefer nil.
+			if erv.Kind() == reflect.Ptr {
+				_, isDefault := isDefaultValue(erv)
+				if isDefault {
+					erv.Set(reflect.Zero(erv.Type()))
+					continue
+				}
+			}
+		}
+		// Ensure that we read the whole buffer.
+		if len(bz) > 0 {
+			err = errors.New("bytes left over after reading array contents")
+			return
+		}
+	} else {
+		// Read elements in unpacked form.
+		for i := 0; i < length; i++ {
+			// Read field key (number and type).
+			var fnum, typ, _n = uint32(0), Typ3(0x00), int(0)
+			fnum, typ, _n, err = decodeFieldNumberAndTyp3(bz)
+			// Validate field number and typ3.
+			if fnum != fopts.BinFieldNum {
+				err = errors.New(fmt.Sprintf("expected repeated field number %v, got %v", fopts.BinFieldNum, fnum))
+				return
+			}
+			if typ != Typ3_ByteLength {
+				err = errors.New(fmt.Sprintf("expected repeated field type %v, got %v", Typ3_ByteLength, typ))
+				return
+			}
 			if slide(&bz, &n, _n) && err != nil {
 				return
 			}
-			if numNil == 0 {
-				// Good, continue decoding item.
-			} else if numNil == 1 {
-				// Set nil/zero.
+			// Decode the next ByteLength bytes into erv.
+			var erv = rv.Index(i)
+			// Special case if next ByteLength bytes are 0x00, set nil.
+			if len(bz) > 0 && bz[0] == 0x00 {
+				slide(&bz, &n, 1)
 				erv.Set(reflect.Zero(erv.Type()))
 				continue
-			} else {
-				panic("should not happen")
+			}
+			// Normal case, read next non-nil element from bz.
+			// In case of any inner lists in unpacked form.
+			efopts := fopts
+			efopts.BinFieldNum = 1
+			_n, err = cdc.decodeReflectBinary(bz, einfo, erv, efopts, false)
+			if slide(&bz, &n, _n) && err != nil {
+				err = fmt.Errorf("error reading array contents: %v", err)
+				return
 			}
 		}
-		// Decode non-nil value.
-		_n, err = cdc.decodeReflectBinary(bz, einfo, erv, opts)
-		if slide(&bz, &n, _n) && err != nil {
-			return
+		// Ensure that there are no more elements left,
+		// and no field number regression either.
+		// This is to provide better error messages.
+		if len(bz) > 0 {
+			var fnum = uint32(0)
+			fnum, _, _, err = decodeFieldNumberAndTyp3(bz)
+			if err != nil {
+				return
+			}
+			if fnum <= fopts.BinFieldNum {
+				err = fmt.Errorf("unexpected field number %v after repeated field number %v", fnum, fopts.BinFieldNum)
+				return
+			}
 		}
 	}
 	return
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectBinaryByteSlice(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (n int, err error) {
+func (cdc *Codec) decodeReflectBinaryByteSlice(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (n int, err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -531,7 +547,7 @@ func (cdc *Codec) decodeReflectBinaryByteSlice(bz []byte, info *TypeInfo, rv ref
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectBinarySlice(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (n int, err error) {
+func (cdc *Codec) decodeReflectBinarySlice(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions, bare bool) (n int, err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -545,72 +561,98 @@ func (cdc *Codec) decodeReflectBinarySlice(bz []byte, info *TypeInfo, rv reflect
 	if ert.Kind() == reflect.Uint8 {
 		panic("should not happen")
 	}
-	einfo := (*TypeInfo)(nil)
-	einfo, err = cdc.getTypeInfo_wlock(ert)
+	einfo, err := cdc.getTypeInfo_wlock(ert)
 	if err != nil {
 		return
 	}
 
-	// Check and consume typ4 byte.
-	var ptr, _n = false, int(0)
-	ptr, _n, err = decodeTyp4AndCheck(ert, bz, opts)
-	if slide(&bz, &n, _n) && err != nil {
-		return
-	}
+	// Construct slice to collect decoded items to.
+	// NOTE: This is due to Proto3.  How to best optimize?
+	esrt := reflect.SliceOf(ert)
+	var srv = reflect.Zero(esrt)
 
-	// Read number of items.
-	var count = uint64(0)
-	count, _n, err = DecodeUvarint(bz)
-	if slide(&bz, &n, _n) && err != nil {
-		return
-	}
-	if int(count) < 0 {
-		err = fmt.Errorf("Impossible number of elements (%v)", count)
-		return
-	}
-	if int(count) > len(bz) { // Currently, each item takes at least 1 byte.
-		err = fmt.Errorf("Impossible number of elements (%v) compared to buffer length (%v)",
-			count, len(bz))
-		return
-	}
-
-	// Special case when length is 0.
-	// NOTE: We prefer nil slices.
-	if count == 0 {
-		rv.Set(info.ZeroValue)
-		return
+	if !bare {
+		// Read byte-length prefixed byteslice.
+		var buf, _n = []byte(nil), int(0)
+		buf, _n, err = DecodeByteSlice(bz)
+		if slide(&bz, nil, _n) && err != nil {
+			return
+		}
+		// This is a trick for debuggability -- we slide on &n more later.
+		n += UvarintSize(uint64(len(buf)))
+		bz = buf
 	}
 
-	// Read each item.
-	// NOTE: Unlike decodeReflectBinaryArray,
-	// we need to construct a new slice before
-	// we populate it. Arrays on the other hand
-	// reserve space in the value itself.
-	var esrt = reflect.SliceOf(ert) // TODO could be optimized.
-	var srv = reflect.MakeSlice(esrt, int(count), int(count))
-	for i := 0; i < int(count); i++ {
-		var erv, _n = srv.Index(i), int(0)
-		// Maybe read nil.
-		if ptr {
-			var numNil = int64(0)
-			numNil, _n, err = decodeNumNilBytes(bz)
+	// If elem is not already a ByteLength type, read in packed form.
+	// This is a Proto wart due to Proto backwards compatibility issues.
+	// Amino2 will probably migrate to use the List typ3.
+	typ3 := typeToTyp3(einfo.Type, fopts)
+	if typ3 != Typ3_ByteLength {
+		// Read elems in packed form.
+		for {
+			if len(bz) == 0 {
+				break
+			}
+			erv, _n := reflect.New(ert).Elem(), int(0)
+			_n, err = cdc.decodeReflectBinary(bz, einfo, erv, fopts, false)
 			if slide(&bz, &n, _n) && err != nil {
+				err = fmt.Errorf("error reading array contents: %v", err)
 				return
 			}
-			if numNil == 0 {
-				// Good, continue decoding item.
-			} else if numNil == 1 {
-				// Set nil/zero.
+			// Special case when reading default value, prefer nil.
+			if ert.Kind() == reflect.Ptr {
+				_, isDefault := isDefaultValue(erv)
+				if isDefault {
+					srv = reflect.Append(srv, reflect.Zero(ert))
+					continue
+				}
+			}
+			// Otherwise append to slice.
+			srv = reflect.Append(srv, erv)
+		}
+	} else {
+		// Read elements in unpacked form.
+		for {
+			if len(bz) == 0 {
+				break
+			}
+			// Read field key (number and type).
+			var fnum, typ, _n = uint32(0), Typ3(0x00), int(0)
+			fnum, typ, _n, err = decodeFieldNumberAndTyp3(bz)
+			// Validate field number and typ3.
+			if fnum < fopts.BinFieldNum {
+				err = errors.New(fmt.Sprintf("expected repeated field number %v or greater, got %v", fopts.BinFieldNum, fnum))
+				return
+			}
+			if fnum > fopts.BinFieldNum {
+				break
+			}
+			if typ != Typ3_ByteLength {
+				err = errors.New(fmt.Sprintf("expected repeated field type %v, got %v", Typ3_ByteLength, typ))
+				return
+			}
+			if slide(&bz, &n, _n) && err != nil {
+				return
+			}
+			// Decode the next ByteLength bytes into erv.
+			erv, _n := reflect.New(ert).Elem(), int(0)
+			// Special case if next ByteLength bytes are 0x00, set nil.
+			if len(bz) > 0 && bz[0] == 0x00 {
+				slide(&bz, &n, 1)
 				erv.Set(reflect.Zero(erv.Type()))
+				srv = reflect.Append(srv, erv)
 				continue
-			} else {
-				panic("should not happen")
 			}
-		}
-		// Decode non-nil value.
-		_n, err = cdc.decodeReflectBinary(bz, einfo, erv, opts)
-		if slide(&bz, &n, _n) && err != nil {
-			return
+			// Normal case, read next non-nil element from bz.
+			// In case of any inner lists in unpacked form.
+			efopts := fopts
+			efopts.BinFieldNum = 1
+			_n, err = cdc.decodeReflectBinary(bz, einfo, erv, efopts, false)
+			if slide(&bz, &n, _n) && err != nil {
+				err = fmt.Errorf("error reading array contents: %v", err)
+				return
+			}
+			srv = reflect.Append(srv, erv)
 		}
 	}
 	rv.Set(srv)
@@ -618,7 +660,7 @@ func (cdc *Codec) decodeReflectBinarySlice(bz []byte, info *TypeInfo, rv reflect
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectBinaryStruct(bz []byte, info *TypeInfo, rv reflect.Value, _ FieldOptions) (n int, err error) {
+func (cdc *Codec) decodeReflectBinaryStruct(bz []byte, info *TypeInfo, rv reflect.Value, _ FieldOptions, bare bool) (n int, err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -630,9 +672,21 @@ func (cdc *Codec) decodeReflectBinaryStruct(bz []byte, info *TypeInfo, rv reflec
 	}
 	_n := 0 // nolint: ineffassign
 
-	// The "Struct" typ3 doesn't get read here.
+	// NOTE: The "Struct" typ3 doesn't get read here.
 	// It's already implied, either by struct-key or list-element-type-byte.
 
+	if !bare {
+		// Read byte-length prefixed byteslice.
+		var buf, _n = []byte(nil), int(0)
+		buf, _n, err = DecodeByteSlice(bz)
+		if slide(&bz, nil, _n) && err != nil {
+			return
+		}
+		// This is a trick for debuggability -- we slide on &n more later.
+		n += UvarintSize(uint64(len(buf)))
+		bz = buf
+	}
+
 	switch info.Type {
 
 	case timeType:
@@ -643,9 +697,10 @@ func (cdc *Codec) decodeReflectBinaryStruct(bz []byte, info *TypeInfo, rv reflec
 			return
 		}
 		rv.Set(reflect.ValueOf(t))
-		return
 
 	default:
+		// Track the last seen field number.
+		var lastFieldNum uint32
 		// Read each field.
 		for _, field := range info.Fields {
 
@@ -657,81 +712,157 @@ func (cdc *Codec) decodeReflectBinaryStruct(bz []byte, info *TypeInfo, rv reflec
 				return
 			}
 
-			// Read field key (number and type).
-			var fieldNum, typ = uint32(0), Typ3(0x00)
-			fieldNum, typ, _n, err = decodeFieldNumberAndTyp3(bz)
-			if field.BinFieldNum < fieldNum {
-				// Set nil field value.
+			// We're done if we've consumed all the bytes.
+			if len(bz) == 0 {
 				frv.Set(reflect.Zero(frv.Type()))
 				continue
-				// Do not slide, we will read it again.
 			}
-			if fieldNum == 0 {
-				// Probably a StructTerm.
-				break
+
+			if field.UnpackedList {
+				// This is a list that was encoded unpacked, e.g.
+				// with repeated field entries for each list item.
+				_n, err = cdc.decodeReflectBinary(bz, finfo, frv, field.FieldOptions, true)
+				if slide(&bz, &n, _n) && err != nil {
+					return
+				}
+			} else {
+				// Read field key (number and type).
+				var fnum, typ = uint32(0), Typ3(0x00)
+				fnum, typ, _n, err = decodeFieldNumberAndTyp3(bz)
+				if field.BinFieldNum < fnum {
+					// Set zero field value.
+					frv.Set(reflect.Zero(frv.Type()))
+					continue
+					// Do not slide, we will read it again.
+				}
+				if fnum <= lastFieldNum {
+					err = fmt.Errorf("encountered fieldnNum: %v, but we have already seen fnum: %v\nbytes:%X",
+						fnum, lastFieldNum, bz)
+					return
+				}
+				lastFieldNum = fnum
+				if slide(&bz, &n, _n) && err != nil {
+					return
+				}
+
+				// Validate fnum and typ.
+				// NOTE: In the future, we'll support upgradeability.
+				// So in the future, this may not match,
+				// so we will need to remove this sanity check.
+				if field.BinFieldNum != fnum {
+					err = errors.New(fmt.Sprintf("expected field # %v of %v, got %v",
+						field.BinFieldNum, info.Type, fnum))
+					return
+				}
+				typWanted := typeToTyp3(finfo.Type, field.FieldOptions)
+				if typ != typWanted {
+					err = errors.New(fmt.Sprintf("expected field type %v for # %v of %v, got %v",
+						typWanted, fnum, info.Type, typ))
+					return
+				}
+
+				// Decode field into frv.
+				_n, err = cdc.decodeReflectBinary(bz, finfo, frv, field.FieldOptions, false)
+				if slide(&bz, &n, _n) && err != nil {
+					return
+				}
 			}
+		}
+
+		// Consume any remaining fields.
+		var _n, fnum = 0, uint32(0)
+		var typ3 Typ3
+		for len(bz) > 0 {
+			fnum, typ3, _n, err = decodeFieldNumberAndTyp3(bz)
 			if slide(&bz, &n, _n) && err != nil {
 				return
 			}
-			// NOTE: In the future, we'll support upgradeability.
-			// So in the future, this may not match,
-			// so we will need to remove this sanity check.
-			if field.BinFieldNum != fieldNum {
-				err = errors.New(fmt.Sprintf("Expected field number %v, got %v", field.BinFieldNum, fieldNum))
-				return
-			}
-			typWanted := typeToTyp4(field.Type, field.FieldOptions).Typ3()
-			if typ != typWanted {
-				err = errors.New(fmt.Sprintf("Expected field type %X, got %X", typWanted, typ))
+			if fnum <= lastFieldNum {
+				err = fmt.Errorf("encountered fieldnNum: %v, but we have already seen fnum: %v\nbytes:%X",
+					fnum, lastFieldNum, bz)
 				return
 			}
+			lastFieldNum = fnum
 
-			// Decode field into frv.
-			_n, err = cdc.decodeReflectBinary(bz, finfo, frv, field.FieldOptions)
+			_n, err = consumeAny(typ3, bz)
 			if slide(&bz, &n, _n) && err != nil {
 				return
 			}
 		}
+	}
+	return
+}
+
+//----------------------------------------
+// consume* for skipping struct fields
 
-		// Read "StructTerm".
-		// NOTE: In the future, we'll need to break out of a loop
-		// when encoutering an StructTerm typ3 byte.
-		var typ = Typ3(0x00)
-		typ, _n, err = decodeTyp3(bz)
+// Read everything without doing anything with it. Report errors if they occur.
+func consumeAny(typ3 Typ3, bz []byte) (n int, err error) {
+	var _n int
+	switch typ3 {
+	case Typ3_Varint:
+		_, _n, err = DecodeVarint(bz)
+	case Typ3_8Byte:
+		_, _n, err = DecodeInt64(bz)
+	case Typ3_ByteLength:
+		_, _n, err = DecodeByteSlice(bz)
+	case Typ3_4Byte:
+		_, _n, err = DecodeInt32(bz)
+	default:
+		err = fmt.Errorf("invalid typ3 bytes %v", typ3)
+		return
+	}
+	if err != nil {
+		// do not slide
+		return
+	}
+	slide(&bz, &n, _n)
+	return
+}
+
+func consumeStruct(bz []byte) (n int, err error) {
+	var _n, typ = int(0), Typ3(0x00)
+	for {
+		typ, _n, err = consumeFieldKey(bz)
 		if slide(&bz, &n, _n) && err != nil {
 			return
 		}
-		if typ != Typ3_StructTerm {
-			err = errors.New(fmt.Sprintf("Expected StructTerm typ3 byte, got %X", typ))
+		_n, err = consumeAny(typ, bz)
+		if slide(&bz, &n, _n) && err != nil {
 			return
 		}
+	}
+	return
+}
+
+func consumeFieldKey(bz []byte) (typ Typ3, n int, err error) {
+	var u64 uint64
+	u64, n = binary.Uvarint(bz)
+	if n < 0 {
+		n = 0
+		err = errors.New("error decoding uvarint")
 		return
 	}
+	typ = Typ3(u64 & 0x07)
+	return
 }
 
 //----------------------------------------
 
-func DecodeDisambPrefixBytes(bz []byte) (db DisambBytes, hasDb bool, pb PrefixBytes, typ Typ3, hasPb bool, isNil bool, n int, err error) {
-	// Special case: nil
-	if len(bz) >= 2 && bz[0] == 0x00 && bz[1] == 0x00 {
-		isNil = true
-		n = 2
-		return
-	}
+func DecodeDisambPrefixBytes(bz []byte) (db DisambBytes, hasDb bool, pb PrefixBytes, hasPb bool, n int, err error) {
 	// Validate
 	if len(bz) < 4 {
-		err = errors.New("EOF reading prefix bytes.")
+		err = errors.New("EOF while reading prefix bytes.")
 		return // hasPb = false
 	}
 	if bz[0] == 0x00 { // Disfix
 		// Validate
 		if len(bz) < 8 {
-			err = errors.New("EOF reading disamb bytes.")
+			err = errors.New("EOF while reading disamb bytes.")
 			return // hasPb = false
 		}
 		copy(db[0:3], bz[1:4])
 		copy(pb[0:4], bz[4:8])
-		pb, typ = pb.SplitTyp3()
 		hasDb = true
 		hasPb = true
 		n = 8
@@ -739,7 +870,6 @@ func DecodeDisambPrefixBytes(bz []byte) (db DisambBytes, hasDb bool, pb PrefixBy
 	} else { // Prefix
 		// General case with no disambiguation
 		copy(pb[0:4], bz[0:4])
-		pb, typ = pb.SplitTyp3()
 		hasDb = false
 		hasPb = true
 		n = 4
@@ -764,49 +894,18 @@ func decodeFieldNumberAndTyp3(bz []byte) (num uint32, typ Typ3, n int, err error
 	var num64 uint64
 	num64 = value64 >> 3
 	if num64 > (1<<29 - 1) {
-		err = errors.New(fmt.Sprintf("invalid field num %v", num64))
+		err = fmt.Errorf("invalid field num %v", num64)
 		return
 	}
 	num = uint32(num64)
 	return
 }
 
-// Consume typ4 byte and error if it doesn't match rt.
-func decodeTyp4AndCheck(rt reflect.Type, bz []byte, opts FieldOptions) (ptr bool, n int, err error) {
-	var typ = Typ4(0x00)
-	typ, n, err = decodeTyp4(bz)
-	if err != nil {
-		return
-	}
-	var typWanted = typeToTyp4(rt, opts)
-	if typWanted != typ {
-		err = errors.New(fmt.Sprintf("Typ4 mismatch.  Expected %X, got %X", typWanted, typ))
-		return
-	}
-	ptr = (typ & 0x08) != 0
-	return
-}
-
-// Read Typ4 byte.
-func decodeTyp4(bz []byte) (typ Typ4, n int, err error) {
-	if len(bz) == 0 {
-		err = errors.New(fmt.Sprintf("EOF reading typ4 byte"))
-		return
-	}
-	if bz[0]&0xF0 != 0 {
-		err = errors.New(fmt.Sprintf("Invalid non-zero nibble reading typ4 byte"))
-		return
-	}
-	typ = Typ4(bz[0])
-	n = 1
-	return
-}
-
 // Error if typ doesn't match rt.
-func checkTyp3(rt reflect.Type, typ Typ3, opts FieldOptions) (err error) {
-	typWanted := typeToTyp3(rt, opts)
+func checkTyp3(rt reflect.Type, typ Typ3, fopts FieldOptions) (err error) {
+	typWanted := typeToTyp3(rt, fopts)
 	if typ != typWanted {
-		err = fmt.Errorf("Typ3 mismatch.  Expected %X, got %X", typWanted, typ)
+		err = fmt.Errorf("unexpected Typ3. want %v, got %v", typWanted, typ)
 	}
 	return
 }
@@ -814,11 +913,11 @@ func checkTyp3(rt reflect.Type, typ Typ3, opts FieldOptions) (err error) {
 // Read typ3 byte.
 func decodeTyp3(bz []byte) (typ Typ3, n int, err error) {
 	if len(bz) == 0 {
-		err = fmt.Errorf("EOF reading typ3 byte")
+		err = fmt.Errorf("EOF while reading typ3 byte")
 		return
 	}
 	if bz[0]&0xF8 != 0 {
-		err = fmt.Errorf("Invalid typ3 byte")
+		err = fmt.Errorf("invalid typ3 byte: %v", Typ3(bz[0]).String())
 		return
 	}
 	typ = Typ3(bz[0])
@@ -831,7 +930,7 @@ func decodeTyp3(bz []byte) (typ Typ3, n int, err error) {
 // other values will error.
 func decodeNumNilBytes(bz []byte) (numNil int64, n int, err error) {
 	if len(bz) == 0 {
-		err = errors.New("EOF reading nil byte(s)")
+		err = errors.New("EOF while reading nil byte(s)")
 		return
 	}
 	if bz[0] == 0x00 {
@@ -842,6 +941,6 @@ func decodeNumNilBytes(bz []byte) (numNil int64, n int, err error) {
 		numNil, n = 1, 1
 		return
 	}
-	n, err = 0, fmt.Errorf("Unexpected nil byte %X (sparse lists not supported)", bz[0])
+	n, err = 0, fmt.Errorf("unexpected nil byte, want: either '0x00' or '0x01' got: %X (sparse lists not supported)", bz[0])
 	return
 }
diff --git a/vendor/github.com/tendermint/go-amino/binary-encode.go b/vendor/github.com/tendermint/go-amino/binary-encode.go
index e141d4d224bf90fb2cf532c87a292952f51993ab..9f7e5831ac896e8adad9972bc67ebcb40f4820d3 100644
--- a/vendor/github.com/tendermint/go-amino/binary-encode.go
+++ b/vendor/github.com/tendermint/go-amino/binary-encode.go
@@ -1,6 +1,7 @@
 package amino
 
 import (
+	"bytes"
 	"encoding/binary"
 	"errors"
 	"fmt"
@@ -21,7 +22,7 @@ import (
 // The following contracts apply to all similar encode methods.
 // CONTRACT: rv is not a pointer
 // CONTRACT: rv is valid.
-func (cdc *Codec) encodeReflectBinary(w io.Writer, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) encodeReflectBinary(w io.Writer, info *TypeInfo, rv reflect.Value, fopts FieldOptions, bare bool) (err error) {
 	if rv.Kind() == reflect.Ptr {
 		panic("should not happen")
 	}
@@ -29,42 +30,13 @@ func (cdc *Codec) encodeReflectBinary(w io.Writer, info *TypeInfo, rv reflect.Va
 		panic("should not happen")
 	}
 	if printLog {
-		spew.Printf("(E) encodeReflectBinary(info: %v, rv: %#v (%v), opts: %v)\n",
-			info, rv.Interface(), rv.Type(), opts)
+		spew.Printf("(E) encodeReflectBinary(info: %v, rv: %#v (%v), fopts: %v)\n",
+			info, rv.Interface(), rv.Type(), fopts)
 		defer func() {
 			fmt.Printf("(E) -> err: %v\n", err)
 		}()
 	}
 
-	// Maybe write prefix+typ3 bytes.
-	if info.Registered {
-		var typ = typeToTyp4(info.Type, opts).Typ3()
-		_, err = w.Write(info.Prefix.WithTyp3(typ).Bytes())
-		if err != nil {
-			return
-		}
-	}
-
-	err = cdc._encodeReflectBinary(w, info, rv, opts)
-	return
-}
-
-// CONTRACT: any disamb/prefix+typ3 bytes have already been written.
-func (cdc *Codec) _encodeReflectBinary(w io.Writer, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
-	if rv.Kind() == reflect.Ptr {
-		panic("should not happen")
-	}
-	if !rv.IsValid() {
-		panic("should not happen")
-	}
-	if printLog {
-		spew.Printf("(_) _encodeReflectBinary(info: %v, rv: %#v (%v), opts: %v)\n",
-			info, rv.Interface(), rv.Type(), opts)
-		defer func() {
-			fmt.Printf("(_) -> err: %v\n", err)
-		}()
-	}
-
 	// Handle override if rv implements json.Marshaler.
 	if info.IsAminoMarshaler {
 		// First, encode rv into repr instance.
@@ -78,7 +50,7 @@ func (cdc *Codec) _encodeReflectBinary(w io.Writer, info *TypeInfo, rv reflect.V
 			return
 		}
 		// Then, encode the repr instance.
-		err = cdc._encodeReflectBinary(w, rinfo, rrv, opts)
+		err = cdc.encodeReflectBinary(w, rinfo, rrv, fopts, bare)
 		return
 	}
 
@@ -88,37 +60,41 @@ func (cdc *Codec) _encodeReflectBinary(w io.Writer, info *TypeInfo, rv reflect.V
 	// Complex
 
 	case reflect.Interface:
-		err = cdc.encodeReflectBinaryInterface(w, info, rv, opts)
+		err = cdc.encodeReflectBinaryInterface(w, info, rv, fopts, bare)
 
 	case reflect.Array:
 		if info.Type.Elem().Kind() == reflect.Uint8 {
-			err = cdc.encodeReflectBinaryByteArray(w, info, rv, opts)
+			err = cdc.encodeReflectBinaryByteArray(w, info, rv, fopts)
 		} else {
-			err = cdc.encodeReflectBinaryList(w, info, rv, opts)
+			err = cdc.encodeReflectBinaryList(w, info, rv, fopts, bare)
 		}
 
 	case reflect.Slice:
 		if info.Type.Elem().Kind() == reflect.Uint8 {
-			err = cdc.encodeReflectBinaryByteSlice(w, info, rv, opts)
+			err = cdc.encodeReflectBinaryByteSlice(w, info, rv, fopts)
 		} else {
-			err = cdc.encodeReflectBinaryList(w, info, rv, opts)
+			err = cdc.encodeReflectBinaryList(w, info, rv, fopts, bare)
 		}
 
 	case reflect.Struct:
-		err = cdc.encodeReflectBinaryStruct(w, info, rv, opts)
+		err = cdc.encodeReflectBinaryStruct(w, info, rv, fopts, bare)
 
 	//----------------------------------------
 	// Signed
 
 	case reflect.Int64:
-		if opts.BinVarint {
-			err = EncodeVarint(w, rv.Int())
-		} else {
+		if fopts.BinFixed64 {
 			err = EncodeInt64(w, rv.Int())
+		} else {
+			err = EncodeVarint(w, rv.Int())
 		}
 
 	case reflect.Int32:
-		err = EncodeInt32(w, int32(rv.Int()))
+		if fopts.BinFixed32 {
+			err = EncodeInt32(w, int32(rv.Int()))
+		} else {
+			err = EncodeVarint(w, rv.Int())
+		}
 
 	case reflect.Int16:
 		err = EncodeInt16(w, int16(rv.Int()))
@@ -133,14 +109,18 @@ func (cdc *Codec) _encodeReflectBinary(w io.Writer, info *TypeInfo, rv reflect.V
 	// Unsigned
 
 	case reflect.Uint64:
-		if opts.BinVarint {
-			err = EncodeUvarint(w, rv.Uint())
-		} else {
+		if fopts.BinFixed64 {
 			err = EncodeUint64(w, rv.Uint())
+		} else {
+			err = EncodeUvarint(w, rv.Uint())
 		}
 
 	case reflect.Uint32:
-		err = EncodeUint32(w, uint32(rv.Uint()))
+		if fopts.BinFixed32 {
+			err = EncodeUint32(w, uint32(rv.Uint()))
+		} else {
+			err = EncodeUvarint(w, rv.Uint())
+		}
 
 	case reflect.Uint16:
 		err = EncodeUint16(w, uint16(rv.Uint()))
@@ -158,14 +138,14 @@ func (cdc *Codec) _encodeReflectBinary(w io.Writer, info *TypeInfo, rv reflect.V
 		err = EncodeBool(w, rv.Bool())
 
 	case reflect.Float64:
-		if !opts.Unsafe {
+		if !fopts.Unsafe {
 			err = errors.New("Amino float* support requires `amino:\"unsafe\"`.")
 			return
 		}
 		err = EncodeFloat64(w, rv.Float())
 
 	case reflect.Float32:
-		if !opts.Unsafe {
+		if !fopts.Unsafe {
 			err = errors.New("Amino float* support requires `amino:\"unsafe\"`.")
 			return
 		}
@@ -184,7 +164,7 @@ func (cdc *Codec) _encodeReflectBinary(w io.Writer, info *TypeInfo, rv reflect.V
 	return
 }
 
-func (cdc *Codec) encodeReflectBinaryInterface(w io.Writer, iinfo *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) encodeReflectBinaryInterface(w io.Writer, iinfo *TypeInfo, rv reflect.Value, fopts FieldOptions, bare bool) (err error) {
 	if printLog {
 		fmt.Println("(e) encodeReflectBinaryInterface")
 		defer func() {
@@ -192,9 +172,9 @@ func (cdc *Codec) encodeReflectBinaryInterface(w io.Writer, iinfo *TypeInfo, rv
 		}()
 	}
 
-	// Special case when rv is nil, write 0x0000.
+	// Special case when rv is nil, write 0x00 to denote an empty byteslice.
 	if rv.IsNil() {
-		_, err = w.Write([]byte{0x00, 0x00})
+		_, err = w.Write([]byte{0x00})
 		return
 	}
 
@@ -221,6 +201,9 @@ func (cdc *Codec) encodeReflectBinaryInterface(w io.Writer, iinfo *TypeInfo, rv
 		return
 	}
 
+	// For Proto3 compatibility, encode interfaces as ByteLength.
+	buf := bytes.NewBuffer(nil)
+
 	// Write disambiguation bytes if needed.
 	var needDisamb bool = false
 	if iinfo.AlwaysDisambiguate {
@@ -229,25 +212,35 @@ func (cdc *Codec) encodeReflectBinaryInterface(w io.Writer, iinfo *TypeInfo, rv
 		needDisamb = true
 	}
 	if needDisamb {
-		_, err = w.Write(append([]byte{0x00}, cinfo.Disamb[:]...))
+		_, err = buf.Write(append([]byte{0x00}, cinfo.Disamb[:]...))
 		if err != nil {
 			return
 		}
 	}
 
-	// Write prefix+typ3 bytes.
-	var typ = typeToTyp3(crt, opts)
-	_, err = w.Write(cinfo.Prefix.WithTyp3(typ).Bytes())
+	// Write prefix bytes.
+	_, err = buf.Write(cinfo.Prefix.Bytes())
 	if err != nil {
 		return
 	}
 
 	// Write actual concrete value.
-	err = cdc._encodeReflectBinary(w, cinfo, crv, opts)
+	err = cdc.encodeReflectBinary(buf, cinfo, crv, fopts, true)
+	if err != nil {
+		return
+	}
+
+	if bare {
+		// Write byteslice without byte-length prefixing.
+		_, err = w.Write(buf.Bytes())
+	} else {
+		// Write byte-length prefixed byteslice.
+		err = EncodeByteSlice(w, buf.Bytes())
+	}
 	return
 }
 
-func (cdc *Codec) encodeReflectBinaryByteArray(w io.Writer, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) encodeReflectBinaryByteArray(w io.Writer, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	ert := info.Type.Elem()
 	if ert.Kind() != reflect.Uint8 {
 		panic("should not happen")
@@ -268,7 +261,7 @@ func (cdc *Codec) encodeReflectBinaryByteArray(w io.Writer, info *TypeInfo, rv r
 	return
 }
 
-func (cdc *Codec) encodeReflectBinaryList(w io.Writer, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) encodeReflectBinaryList(w io.Writer, info *TypeInfo, rv reflect.Value, fopts FieldOptions, bare bool) (err error) {
 	if printLog {
 		fmt.Println("(e) encodeReflectBinaryList")
 		defer func() {
@@ -279,57 +272,71 @@ func (cdc *Codec) encodeReflectBinaryList(w io.Writer, info *TypeInfo, rv reflec
 	if ert.Kind() == reflect.Uint8 {
 		panic("should not happen")
 	}
-
-	// Write element Typ4 byte.
-	var typ = typeToTyp4(ert, opts)
-	err = EncodeByte(w, byte(typ))
-	if err != nil {
-		return
-	}
-
-	// Write length.
-	err = EncodeUvarint(w, uint64(rv.Len()))
+	einfo, err := cdc.getTypeInfo_wlock(ert)
 	if err != nil {
 		return
 	}
 
-	// Write elems.
-	var einfo *TypeInfo
-	einfo, err = cdc.getTypeInfo_wlock(ert)
-	if err != nil {
-		return
-	}
-	for i := 0; i < rv.Len(); i++ {
-		// Get dereferenced element value and info.
-		var erv, void = isVoid(rv.Index(i))
-		if typ.IsPointer() {
-			// We must write a byte to denote whether element is nil.
-			if void {
-				// Value is nil or empty.
-				// e.g. nil pointer, nil/empty slice, pointer to nil/empty slice, pointer
-				// to nil pointer.  Write 0x01 for "is nil".
-				// NOTE: Do not use a pointer to nil/empty slices to denote
-				// existence or not.  We have to make a design choice here, and
-				// here we discourage using pointers to denote existence.
-				_, err = w.Write([]byte{0x01})
-				continue
-			} else {
-				// Value is not nil or empty.  Write 0x00 for "not nil/empty".
-				_, err = w.Write([]byte{0x00})
+	// Proto3 byte-length prefixing incurs alloc cost on the encoder.
+	// Here we incur it for unpacked form for ease of dev.
+	buf := bytes.NewBuffer(nil)
+
+	// If elem is not already a ByteLength type, write in packed form.
+	// This is a Proto wart due to Proto backwards compatibility issues.
+	// Amino2 will probably migrate to use the List typ3.  Please?  :)
+	typ3 := typeToTyp3(einfo.Type, fopts)
+	if typ3 != Typ3_ByteLength {
+		// Write elems in packed form.
+		for i := 0; i < rv.Len(); i++ {
+			// Get dereferenced element value (or zero).
+			var erv, _, _ = derefPointersZero(rv.Index(i))
+			// Write the element value.
+			err = cdc.encodeReflectBinary(buf, einfo, erv, fopts, false)
+			if err != nil {
+				return
 			}
 		}
-		// Write the element value.
-		// It may be a nil interface, but not a nil pointer.
-		err = cdc.encodeReflectBinary(w, einfo, erv, opts)
-		if err != nil {
-			return
+	} else {
+		// Write elems in unpacked form.
+		for i := 0; i < rv.Len(); i++ {
+			// Write elements as repeated fields of the parent struct.
+			err = encodeFieldNumberAndTyp3(buf, fopts.BinFieldNum, Typ3_ByteLength)
+			if err != nil {
+				return
+			}
+			// Get dereferenced element value and info.
+			var erv, isDefault = isDefaultValue(rv.Index(i))
+			if isDefault {
+				// Nothing to encode, so the length is 0.
+				err = EncodeByte(buf, byte(0x00))
+				if err != nil {
+					return
+				}
+			} else {
+				// Write the element value as a ByteLength.
+				// In case of any inner lists in unpacked form.
+				efopts := fopts
+				efopts.BinFieldNum = 1
+				err = cdc.encodeReflectBinary(buf, einfo, erv, efopts, false)
+				if err != nil {
+					return
+				}
+			}
 		}
 	}
+
+	if bare {
+		// Write byteslice without byte-length prefixing.
+		_, err = w.Write(buf.Bytes())
+	} else {
+		// Write byte-length prefixed byteslice.
+		err = EncodeByteSlice(w, buf.Bytes())
+	}
 	return
 }
 
 // CONTRACT: info.Type.Elem().Kind() == reflect.Uint8
-func (cdc *Codec) encodeReflectBinaryByteSlice(w io.Writer, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) encodeReflectBinaryByteSlice(w io.Writer, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if printLog {
 		fmt.Println("(e) encodeReflectBinaryByteSlice")
 		defer func() {
@@ -347,7 +354,7 @@ func (cdc *Codec) encodeReflectBinaryByteSlice(w io.Writer, info *TypeInfo, rv r
 	return
 }
 
-func (cdc *Codec) encodeReflectBinaryStruct(w io.Writer, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) encodeReflectBinaryStruct(w io.Writer, info *TypeInfo, rv reflect.Value, fopts FieldOptions, bare bool) (err error) {
 	if printLog {
 		fmt.Println("(e) encodeReflectBinaryBinaryStruct")
 		defer func() {
@@ -355,51 +362,62 @@ func (cdc *Codec) encodeReflectBinaryStruct(w io.Writer, info *TypeInfo, rv refl
 		}()
 	}
 
-	// The "Struct" Typ3 doesn't get written here.
-	// It's already implied, either by struct-key or list-element-type-byte.
+	// Proto3 incurs a cost in writing non-root structs.
+	// Here we incur it for root structs as well for ease of dev.
+	buf := bytes.NewBuffer(nil)
 
 	switch info.Type {
 
 	case timeType:
 		// Special case: time.Time
-		err = EncodeTime(w, rv.Interface().(time.Time))
-		return
+		err = EncodeTime(buf, rv.Interface().(time.Time))
+		if err != nil {
+			return
+		}
 
 	default:
 		for _, field := range info.Fields {
-			// Get dereferenced field value and info.
-			var frv, void = isVoid(rv.Field(field.Index))
-			if void {
-				// Do not encode nil or empty fields.
-				continue
-			}
+			// Get type info for field.
 			var finfo *TypeInfo
 			finfo, err = cdc.getTypeInfo_wlock(field.Type)
 			if err != nil {
 				return
 			}
-			// TODO Maybe allow omitempty somehow.
-			// Write field key (number and type).
-			err = encodeFieldNumberAndTyp3(w, field.BinFieldNum, field.BinTyp3)
-			if err != nil {
-				return
+			// Get dereferenced field value and info.
+			var frv, isDefault = isDefaultValue(rv.Field(field.Index))
+			if isDefault {
+				// Do not encode default value fields.
+				continue
 			}
-			// Write field from rv.
-			err = cdc.encodeReflectBinary(w, finfo, frv, field.FieldOptions)
-			if err != nil {
-				return
+			if field.UnpackedList {
+				// Write repeated field entries for each list item.
+				err = cdc.encodeReflectBinaryList(buf, finfo, frv, field.FieldOptions, true)
+				if err != nil {
+					return
+				}
+			} else {
+				// Write field key (number and type).
+				err = encodeFieldNumberAndTyp3(buf, field.BinFieldNum, typeToTyp3(finfo.Type, field.FieldOptions))
+				if err != nil {
+					return
+				}
+				// Write field from rv.
+				err = cdc.encodeReflectBinary(buf, finfo, frv, field.FieldOptions, false)
+				if err != nil {
+					return
+				}
 			}
 		}
-
-		// Write "StructTerm".
-		err = EncodeByte(w, byte(Typ3_StructTerm))
-		if err != nil {
-			return
-		}
-		return
-
 	}
 
+	if bare {
+		// Write byteslice without byte-length prefixing.
+		_, err = w.Write(buf.Bytes())
+	} else {
+		// Write byte-length prefixed byteslice.
+		err = EncodeByteSlice(w, buf.Bytes())
+	}
+	return
 }
 
 //----------------------------------------
@@ -408,7 +426,7 @@ func (cdc *Codec) encodeReflectBinaryStruct(w io.Writer, info *TypeInfo, rv refl
 // Write field key.
 func encodeFieldNumberAndTyp3(w io.Writer, num uint32, typ Typ3) (err error) {
 	if (typ & 0xF8) != 0 {
-		panic(fmt.Sprintf("invalid Typ3 byte %X", typ))
+		panic(fmt.Sprintf("invalid Typ3 byte %v", typ))
 	}
 	if num < 0 || num > (1<<29-1) {
 		panic(fmt.Sprintf("invalid field number %v", num))
diff --git a/vendor/github.com/tendermint/go-amino/codec.go b/vendor/github.com/tendermint/go-amino/codec.go
index 30af7e7e8ae891a55444f595d97a7a9a0b7c1e75..605b9993ab6a1a44fc1f53ed16bd28fb69d028ca 100644
--- a/vendor/github.com/tendermint/go-amino/codec.go
+++ b/vendor/github.com/tendermint/go-amino/codec.go
@@ -4,6 +4,7 @@ import (
 	"bytes"
 	"crypto/sha256"
 	"fmt"
+	"io"
 	"reflect"
 	"strings"
 	"sync"
@@ -34,14 +35,8 @@ func NewPrefixBytes(prefixBytes []byte) PrefixBytes {
 	return pb
 }
 
-func (pb PrefixBytes) Bytes() []byte                 { return pb[:] }
-func (pb PrefixBytes) EqualBytes(bz []byte) bool     { return bytes.Equal(pb[:], bz) }
-func (pb PrefixBytes) WithTyp3(typ Typ3) PrefixBytes { pb[3] |= byte(typ); return pb }
-func (pb PrefixBytes) SplitTyp3() (PrefixBytes, Typ3) {
-	typ := Typ3(pb[3] & 0x07)
-	pb[3] &= 0xF8
-	return pb, typ
-}
+func (pb PrefixBytes) Bytes() []byte             { return pb[:] }
+func (pb PrefixBytes) EqualBytes(bz []byte) bool { return bytes.Equal(pb[:], bz) }
 func (db DisambBytes) Bytes() []byte             { return db[:] }
 func (db DisambBytes) EqualBytes(bz []byte) bool { return bytes.Equal(db[:], bz) }
 func (df DisfixBytes) Bytes() []byte             { return df[:] }
@@ -79,12 +74,13 @@ type InterfaceOptions struct {
 type ConcreteInfo struct {
 
 	// These fields are only set when registered (as implementing an interface).
-	Registered       bool        // Registered with RegisterConcrete().
-	PointerPreferred bool        // Deserialize to pointer type if possible.
-	Name             string      // Registered name.
-	Disamb           DisambBytes // Disambiguation bytes derived from name.
-	Prefix           PrefixBytes // Prefix bytes derived from name.
-	ConcreteOptions              // Registration options.
+	Registered       bool // Registered with RegisterConcrete().
+	PointerPreferred bool // Deserialize to pointer type if possible.
+	// NilPreferred     bool        // Deserialize to nil for empty structs if PointerPreferred.
+	Name            string      // Registered name.
+	Disamb          DisambBytes // Disambiguation bytes derived from name.
+	Prefix          PrefixBytes // Prefix bytes derived from name.
+	ConcreteOptions             // Registration options.
 
 	// These fields get set for all concrete types,
 	// even those not manually registered (e.g. are never interface values).
@@ -110,14 +106,15 @@ type FieldInfo struct {
 	Type         reflect.Type  // Struct field type
 	Index        int           // Struct field index
 	ZeroValue    reflect.Value // Could be nil pointer unlike TypeInfo.ZeroValue.
+	UnpackedList bool          // True iff this field should be encoded as an unpacked list.
 	FieldOptions               // Encoding options
-	BinTyp3      Typ3          // (Binary) Typ3 byte
 }
 
 type FieldOptions struct {
 	JSONName      string // (JSON) field name
 	JSONOmitEmpty bool   // (JSON) omitempty
-	BinVarint     bool   // (Binary) Use length-prefixed encoding for (u)int64.
+	BinFixed64    bool   // (Binary) Encode as fixed64
+	BinFixed32    bool   // (Binary) Encode as fixed32
 	BinFieldNum   uint32 // (Binary) max 1<<29-1
 	Unsafe        bool   // e.g. if this field is a float.
 }
@@ -127,16 +124,20 @@ type FieldOptions struct {
 
 type Codec struct {
 	mtx              sync.RWMutex
+	sealed           bool
 	typeInfos        map[reflect.Type]*TypeInfo
 	interfaceInfos   []*TypeInfo
 	concreteInfos    []*TypeInfo
 	disfixToTypeInfo map[DisfixBytes]*TypeInfo
+	nameToTypeInfo   map[string]*TypeInfo
 }
 
 func NewCodec() *Codec {
 	cdc := &Codec{
+		sealed:           false,
 		typeInfos:        make(map[reflect.Type]*TypeInfo),
 		disfixToTypeInfo: make(map[DisfixBytes]*TypeInfo),
+		nameToTypeInfo:   make(map[string]*TypeInfo),
 	}
 	return cdc
 }
@@ -145,7 +146,8 @@ func NewCodec() *Codec {
 // encoded/decoded by go-amino.
 // Usage:
 // `amino.RegisterInterface((*MyInterface1)(nil), nil)`
-func (cdc *Codec) RegisterInterface(ptr interface{}, opts *InterfaceOptions) {
+func (cdc *Codec) RegisterInterface(ptr interface{}, iopts *InterfaceOptions) {
+	cdc.assertNotSealed()
 
 	// Get reflect.Type from ptr.
 	rt := getTypeFromPointer(ptr)
@@ -154,7 +156,7 @@ func (cdc *Codec) RegisterInterface(ptr interface{}, opts *InterfaceOptions) {
 	}
 
 	// Construct InterfaceInfo
-	var info = cdc.newTypeInfoFromInterfaceType(rt, opts)
+	var info = cdc.newTypeInfoFromInterfaceType(rt, iopts)
 
 	// Finally, check conflicts and register.
 	func() {
@@ -199,7 +201,8 @@ func (cdc *Codec) RegisterInterface(ptr interface{}, opts *InterfaceOptions) {
 // interface fields/elements to be encoded/decoded by go-amino.
 // Usage:
 // `amino.RegisterConcrete(MyStruct1{}, "com.tendermint/MyStruct1", nil)`
-func (cdc *Codec) RegisterConcrete(o interface{}, name string, opts *ConcreteOptions) {
+func (cdc *Codec) RegisterConcrete(o interface{}, name string, copts *ConcreteOptions) {
+	cdc.assertNotSealed()
 
 	var pointerPreferred bool
 
@@ -222,7 +225,7 @@ func (cdc *Codec) RegisterConcrete(o interface{}, name string, opts *ConcreteOpt
 	}
 
 	// Construct ConcreteInfo.
-	var info = cdc.newTypeInfoFromRegisteredConcreteType(rt, pointerPreferred, name, opts)
+	var info = cdc.newTypeInfoFromRegisteredConcreteType(rt, pointerPreferred, name, copts)
 
 	// Finally, check conflicts and register.
 	func() {
@@ -234,8 +237,95 @@ func (cdc *Codec) RegisterConcrete(o interface{}, name string, opts *ConcreteOpt
 	}()
 }
 
+func (cdc *Codec) Seal() *Codec {
+	cdc.mtx.Lock()
+	defer cdc.mtx.Unlock()
+
+	cdc.sealed = true
+	return cdc
+}
+
+// PrintTypes writes all registered types in a markdown-style table.
+// The table's header is:
+//
+// | Type  | Name | Prefix | Notes |
+//
+// Where Type is the golang type name and Name is the name the type was registered with.
+func (cdc Codec) PrintTypes(out io.Writer) error {
+	cdc.mtx.RLock()
+	defer cdc.mtx.RUnlock()
+	// print header
+	if _, err := io.WriteString(out, "| Type | Name | Prefix | Length | Notes |\n"); err != nil {
+		return err
+	}
+	if _, err := io.WriteString(out, "| ---- | ---- | ------ | ----- | ------ |\n"); err != nil {
+		return err
+	}
+	// only print concrete types for now (if we want everything, we can iterate over the typeInfos map instead)
+	for _, i := range cdc.concreteInfos {
+		io.WriteString(out, "| ")
+		// TODO(ismail): optionally create a link to code on github:
+		if _, err := io.WriteString(out, i.Type.Name()); err != nil {
+			return err
+		}
+		if _, err := io.WriteString(out, " | "); err != nil {
+			return err
+		}
+		if _, err := io.WriteString(out, i.Name); err != nil {
+			return err
+		}
+		if _, err := io.WriteString(out, " | "); err != nil {
+			return err
+		}
+		if _, err := io.WriteString(out, fmt.Sprintf("0x%X", i.Prefix)); err != nil {
+			return err
+		}
+		if _, err := io.WriteString(out, " | "); err != nil {
+			return err
+		}
+
+		if _, err := io.WriteString(out, getLengthStr(i)); err != nil {
+			return err
+		}
+
+		if _, err := io.WriteString(out, " | "); err != nil {
+			return err
+		}
+		// empty notes table data by default // TODO(ismail): make this configurable
+
+		io.WriteString(out, " |\n")
+	}
+	// finish table
+	return nil
+}
+
+// A heuristic to guess the size of a registered type and return it as a string.
+// If the size is not fixed it returns "variable".
+func getLengthStr(info *TypeInfo) string {
+	switch info.Type.Kind() {
+	case reflect.Array,
+		reflect.Int8,
+		reflect.Int16, reflect.Int32, reflect.Int64,
+		reflect.Float32, reflect.Float64,
+		reflect.Complex64, reflect.Complex128:
+		s := info.Type.Size()
+		return fmt.Sprintf("0x%X", s)
+	default:
+		return "variable"
+	}
+}
+
 //----------------------------------------
 
+func (cdc *Codec) assertNotSealed() {
+	cdc.mtx.Lock()
+	defer cdc.mtx.Unlock()
+
+	if cdc.sealed {
+		panic("codec sealed")
+	}
+}
+
 func (cdc *Codec) setTypeInfo_nolock(info *TypeInfo) {
 
 	if info.Type.Kind() == reflect.Ptr {
@@ -254,7 +344,11 @@ func (cdc *Codec) setTypeInfo_nolock(info *TypeInfo) {
 		if existing, ok := cdc.disfixToTypeInfo[disfix]; ok {
 			panic(fmt.Sprintf("disfix <%X> already registered for %v", disfix, existing.Type))
 		}
+		if existing, ok := cdc.nameToTypeInfo[info.Name]; ok {
+			panic(fmt.Sprintf("name <%s> already registered for %v", info.Name, existing.Type))
+		}
 		cdc.disfixToTypeInfo[disfix] = info
+		cdc.nameToTypeInfo[info.Name] = info
 		//cdc.prefixToTypeInfos[prefix] =
 		//	append(cdc.prefixToTypeInfos[prefix], info)
 	}
@@ -294,7 +388,7 @@ func (cdc *Codec) getTypeInfoFromPrefix_rlock(iinfo *TypeInfo, pb PrefixBytes) (
 		return
 	}
 	if len(infos) > 1 {
-		err = fmt.Errorf("Conflicting concrete types registered for %X: e.g. %v and %v.", pb, infos[0].Type, infos[1].Type)
+		err = fmt.Errorf("conflicting concrete types registered for %X: e.g. %v and %v", pb, infos[0].Type, infos[1].Type)
 		return
 	}
 	info = infos[0]
@@ -313,6 +407,18 @@ func (cdc *Codec) getTypeInfoFromDisfix_rlock(df DisfixBytes) (info *TypeInfo, e
 	return
 }
 
+func (cdc *Codec) getTypeInfoFromName_rlock(name string) (info *TypeInfo, err error) {
+	cdc.mtx.RLock()
+	defer cdc.mtx.RUnlock()
+
+	info, ok := cdc.nameToTypeInfo[name]
+	if !ok {
+		err = fmt.Errorf("unrecognized concrete type name %s", name)
+		return
+	}
+	return
+}
+
 func (cdc *Codec) parseStructInfo(rt reflect.Type) (sinfo StructInfo) {
 	if rt.Kind() != reflect.Struct {
 		panic("should not happen")
@@ -322,23 +428,40 @@ func (cdc *Codec) parseStructInfo(rt reflect.Type) (sinfo StructInfo) {
 	for i := 0; i < rt.NumField(); i++ {
 		var field = rt.Field(i)
 		var ftype = field.Type
+		var unpackedList = false
 		if !isExported(field) {
 			continue // field is unexported
 		}
-		skip, opts := cdc.parseFieldOptions(field)
+		skip, fopts := cdc.parseFieldOptions(field)
 		if skip {
 			continue // e.g. json:"-"
 		}
+		if ftype.Kind() == reflect.Array || ftype.Kind() == reflect.Slice {
+			if ftype.Elem().Kind() == reflect.Uint8 {
+				// These get handled by our optimized methods,
+				// encodeReflectBinaryByte[Slice/Array].
+				unpackedList = false
+			} else {
+				etype := ftype.Elem()
+				for etype.Kind() == reflect.Ptr {
+					etype = etype.Elem()
+				}
+				typ3 := typeToTyp3(etype, fopts)
+				if typ3 == Typ3_ByteLength {
+					unpackedList = true
+				}
+			}
+		}
 		// NOTE: This is going to change a bit.
 		// NOTE: BinFieldNum starts with 1.
-		opts.BinFieldNum = uint32(len(infos) + 1)
+		fopts.BinFieldNum = uint32(len(infos) + 1)
 		fieldInfo := FieldInfo{
 			Name:         field.Name, // Mostly for debugging.
 			Index:        i,
 			Type:         ftype,
 			ZeroValue:    reflect.Zero(ftype),
-			FieldOptions: opts,
-			BinTyp3:      typeToTyp4(ftype, opts).Typ3(),
+			UnpackedList: unpackedList,
+			FieldOptions: fopts,
 		}
 		checkUnsafe(fieldInfo)
 		infos = append(infos, fieldInfo)
@@ -347,7 +470,7 @@ func (cdc *Codec) parseStructInfo(rt reflect.Type) (sinfo StructInfo) {
 	return
 }
 
-func (cdc *Codec) parseFieldOptions(field reflect.StructField) (skip bool, opts FieldOptions) {
+func (cdc *Codec) parseFieldOptions(field reflect.StructField) (skip bool, fopts FieldOptions) {
 	binTag := field.Tag.Get("binary")
 	aminoTag := field.Tag.Get("amino")
 	jsonTag := field.Tag.Get("json")
@@ -362,26 +485,28 @@ func (cdc *Codec) parseFieldOptions(field reflect.StructField) (skip bool, opts
 	// Get JSON field name.
 	jsonTagParts := strings.Split(jsonTag, ",")
 	if jsonTagParts[0] == "" {
-		opts.JSONName = field.Name
+		fopts.JSONName = field.Name
 	} else {
-		opts.JSONName = jsonTagParts[0]
+		fopts.JSONName = jsonTagParts[0]
 	}
 
 	// Get JSON omitempty.
 	if len(jsonTagParts) > 1 {
 		if jsonTagParts[1] == "omitempty" {
-			opts.JSONOmitEmpty = true
+			fopts.JSONOmitEmpty = true
 		}
 	}
 
 	// Parse binary tags.
-	if binTag == "varint" { // TODO: extend
-		opts.BinVarint = true
+	if binTag == "fixed64" { // TODO: extend
+		fopts.BinFixed64 = true
+	} else if binTag == "fixed32" {
+		fopts.BinFixed32 = true
 	}
 
 	// Parse amino tags.
 	if aminoTag == "unsafe" {
-		opts.Unsafe = true
+		fopts.Unsafe = true
 	}
 
 	return
@@ -415,7 +540,7 @@ func (cdc *Codec) newTypeInfoUnregistered(rt reflect.Type) *TypeInfo {
 	return info
 }
 
-func (cdc *Codec) newTypeInfoFromInterfaceType(rt reflect.Type, opts *InterfaceOptions) *TypeInfo {
+func (cdc *Codec) newTypeInfoFromInterfaceType(rt reflect.Type, iopts *InterfaceOptions) *TypeInfo {
 	if rt.Kind() != reflect.Interface {
 		panic(fmt.Sprintf("expected interface type, got %v", rt))
 	}
@@ -426,11 +551,11 @@ func (cdc *Codec) newTypeInfoFromInterfaceType(rt reflect.Type, opts *InterfaceO
 	info.ZeroValue = reflect.Zero(rt)
 	info.ZeroProto = reflect.Zero(rt).Interface()
 	info.InterfaceInfo.Implementers = make(map[PrefixBytes][]*TypeInfo)
-	if opts != nil {
-		info.InterfaceInfo.InterfaceOptions = *opts
-		info.InterfaceInfo.Priority = make([]DisfixBytes, len(opts.Priority))
+	if iopts != nil {
+		info.InterfaceInfo.InterfaceOptions = *iopts
+		info.InterfaceInfo.Priority = make([]DisfixBytes, len(iopts.Priority))
 		// Construct Priority []DisfixBytes
-		for i, name := range opts.Priority {
+		for i, name := range iopts.Priority {
 			disamb, prefix := nameToDisfix(name)
 			disfix := toDisfix(disamb, prefix)
 			info.InterfaceInfo.Priority[i] = disfix
@@ -439,7 +564,7 @@ func (cdc *Codec) newTypeInfoFromInterfaceType(rt reflect.Type, opts *InterfaceO
 	return info
 }
 
-func (cdc *Codec) newTypeInfoFromRegisteredConcreteType(rt reflect.Type, pointerPreferred bool, name string, opts *ConcreteOptions) *TypeInfo {
+func (cdc *Codec) newTypeInfoFromRegisteredConcreteType(rt reflect.Type, pointerPreferred bool, name string, copts *ConcreteOptions) *TypeInfo {
 	if rt.Kind() == reflect.Interface ||
 		rt.Kind() == reflect.Ptr {
 		panic(fmt.Sprintf("expected non-interface non-pointer concrete type, got %v", rt))
@@ -451,8 +576,8 @@ func (cdc *Codec) newTypeInfoFromRegisteredConcreteType(rt reflect.Type, pointer
 	info.ConcreteInfo.Name = name
 	info.ConcreteInfo.Disamb = nameToDisamb(name)
 	info.ConcreteInfo.Prefix = nameToPrefix(name)
-	if opts != nil {
-		info.ConcreteOptions = *opts
+	if copts != nil {
+		info.ConcreteOptions = *copts
 	}
 	return info
 }
@@ -603,8 +728,6 @@ func nameToDisfix(name string) (db DisambBytes, pb PrefixBytes) {
 		bz = bz[1:]
 	}
 	copy(pb[:], bz[0:4])
-	// Drop the last 3 bits to make room for the Typ3.
-	pb[3] &= 0xF8
 	return
 }
 
diff --git a/vendor/github.com/tendermint/go-amino/decoder.go b/vendor/github.com/tendermint/go-amino/decoder.go
index 36a640e8251e9df53cbd5da9874640380232f514..9464a0c8174684bc925c74f224f24b2c55fac532 100644
--- a/vendor/github.com/tendermint/go-amino/decoder.go
+++ b/vendor/github.com/tendermint/go-amino/decoder.go
@@ -45,7 +45,7 @@ func DecodeInt32(bz []byte) (i int32, n int, err error) {
 		err = errors.New("EOF decoding int32")
 		return
 	}
-	i = int32(binary.BigEndian.Uint32(bz[:size]))
+	i = int32(binary.LittleEndian.Uint32(bz[:size]))
 	n = size
 	return
 }
@@ -56,15 +56,20 @@ func DecodeInt64(bz []byte) (i int64, n int, err error) {
 		err = errors.New("EOF decoding int64")
 		return
 	}
-	i = int64(binary.BigEndian.Uint64(bz[:size]))
+	i = int64(binary.LittleEndian.Uint64(bz[:size]))
 	n = size
 	return
 }
 
 func DecodeVarint(bz []byte) (i int64, n int, err error) {
 	i, n = binary.Varint(bz)
-	if n < 0 {
-		n = 0
+	if n == 0 {
+		// buf too small
+		err = errors.New("buffer too small")
+	} else if n < 0 {
+		// value larger than 64 bits (overflow)
+		// and -n is the number of bytes read
+		n = -n
 		err = errors.New("EOF decoding varint")
 	}
 	return
@@ -110,7 +115,7 @@ func DecodeUint32(bz []byte) (u uint32, n int, err error) {
 		err = errors.New("EOF decoding uint32")
 		return
 	}
-	u = binary.BigEndian.Uint32(bz[:size])
+	u = binary.LittleEndian.Uint32(bz[:size])
 	n = size
 	return
 }
@@ -121,15 +126,20 @@ func DecodeUint64(bz []byte) (u uint64, n int, err error) {
 		err = errors.New("EOF decoding uint64")
 		return
 	}
-	u = binary.BigEndian.Uint64(bz[:size])
+	u = binary.LittleEndian.Uint64(bz[:size])
 	n = size
 	return
 }
 
 func DecodeUvarint(bz []byte) (u uint64, n int, err error) {
 	u, n = binary.Uvarint(bz)
-	if n <= 0 {
-		n = 0
+	if n == 0 {
+		// buf too small
+		err = errors.New("buffer too small")
+	} else if n < 0 {
+		// value larger than 64 bits (overflow)
+		// and -n is the number of bytes read
+		n = -n
 		err = errors.New("EOF decoding uvarint")
 	}
 	return
@@ -163,7 +173,7 @@ func DecodeFloat32(bz []byte) (f float32, n int, err error) {
 		err = errors.New("EOF decoding float32")
 		return
 	}
-	i := binary.BigEndian.Uint32(bz[:size])
+	i := binary.LittleEndian.Uint32(bz[:size])
 	f = math.Float32frombits(i)
 	n = size
 	return
@@ -176,7 +186,7 @@ func DecodeFloat64(bz []byte) (f float64, n int, err error) {
 		err = errors.New("EOF decoding float64")
 		return
 	}
-	i := binary.BigEndian.Uint64(bz[:size])
+	i := binary.LittleEndian.Uint64(bz[:size])
 	f = math.Float64frombits(i)
 	n = size
 	return
@@ -200,11 +210,11 @@ func DecodeTime(bz []byte) (t time.Time, n int, err error) {
 			return
 		}
 		if fieldNum != 1 {
-			err = fmt.Errorf("Expected field number 1, got %v", fieldNum)
+			err = fmt.Errorf("expected field number 1, got %v", fieldNum)
 			return
 		}
 		if typ != Typ3_8Byte {
-			err = fmt.Errorf("Expected Typ3 bytes <8Bytes> for time field #1, got %X", typ)
+			err = fmt.Errorf("expected Typ3 bytes <8Bytes> for time field #1, got %v", typ)
 			return
 		}
 	}
@@ -222,11 +232,11 @@ func DecodeTime(bz []byte) (t time.Time, n int, err error) {
 			return
 		}
 		if fieldNum != 2 {
-			err = fmt.Errorf("Expected field number 2, got %v", fieldNum)
+			err = fmt.Errorf("expected field number 2, got %v", fieldNum)
 			return
 		}
 		if typ != Typ3_4Byte {
-			err = fmt.Errorf("Expected Typ3 bytes <4Byte> for time field #2, got %X", typ)
+			err = fmt.Errorf("expected Typ3 bytes <4Byte> for time field #2, got %v", typ)
 			return
 		}
 	}
@@ -238,20 +248,9 @@ func DecodeTime(bz []byte) (t time.Time, n int, err error) {
 	}
 	// Validation check.
 	if nsec < 0 || 999999999 < nsec {
-		err = fmt.Errorf("Invalid time, nanoseconds out of bounds %v", nsec)
+		err = fmt.Errorf("invalid time, nanoseconds out of bounds %v", nsec)
 		return
 	}
-	{ // Expect "StructTerm" Typ3 byte.
-		var typ, _n = Typ3(0x00), int(0)
-		typ, _n, err = decodeTyp3(bz)
-		if slide(&bz, &n, _n) && err != nil {
-			return
-		}
-		if typ != Typ3_StructTerm {
-			err = errors.New(fmt.Sprintf("Expected StructTerm Typ3 byte for time, got %X", typ))
-			return
-		}
-	}
 	// Construct time.
 	t = time.Unix(sec, int64(nsec))
 	// Strip timezone and monotonic for deep equality.
diff --git a/vendor/github.com/tendermint/go-amino/encoder.go b/vendor/github.com/tendermint/go-amino/encoder.go
index 29d6383098f2ff8b49c9c6261f5869a7dfcf9830..05fe73c998c97d5925f7d205e28b6d9ce94d4d92 100644
--- a/vendor/github.com/tendermint/go-amino/encoder.go
+++ b/vendor/github.com/tendermint/go-amino/encoder.go
@@ -20,14 +20,14 @@ func EncodeInt16(w io.Writer, i int16) (err error) {
 
 func EncodeInt32(w io.Writer, i int32) (err error) {
 	var buf [4]byte
-	binary.BigEndian.PutUint32(buf[:], uint32(i))
+	binary.LittleEndian.PutUint32(buf[:], uint32(i))
 	_, err = w.Write(buf[:])
 	return
 }
 
 func EncodeInt64(w io.Writer, i int64) (err error) {
 	var buf [8]byte
-	binary.BigEndian.PutUint64(buf[:], uint64(i))
+	binary.LittleEndian.PutUint64(buf[:], uint64(i))
 	_, err = w.Write(buf[:])
 	return err
 }
@@ -62,14 +62,14 @@ func EncodeUint16(w io.Writer, u uint16) (err error) {
 
 func EncodeUint32(w io.Writer, u uint32) (err error) {
 	var buf [4]byte
-	binary.BigEndian.PutUint32(buf[:], u)
+	binary.LittleEndian.PutUint32(buf[:], u)
 	_, err = w.Write(buf[:])
 	return
 }
 
 func EncodeUint64(w io.Writer, u uint64) (err error) {
 	var buf [8]byte
-	binary.BigEndian.PutUint64(buf[:], u)
+	binary.LittleEndian.PutUint64(buf[:], u)
 	_, err = w.Write(buf[:])
 	return
 }
@@ -138,7 +138,6 @@ func EncodeTime(w io.Writer, t time.Time) (err error) {
 		return
 	}
 
-	err = EncodeByte(w, byte(0x04)) // StructTerm
 	return
 }
 
diff --git a/vendor/github.com/tendermint/go-amino/json-decode.go b/vendor/github.com/tendermint/go-amino/json-decode.go
index 15b70f80946d74505cae721a4faf2a3ba742a9fd..06174bef15a9733eb75142aa9269766d4cd98d06 100644
--- a/vendor/github.com/tendermint/go-amino/json-decode.go
+++ b/vendor/github.com/tendermint/go-amino/json-decode.go
@@ -2,7 +2,6 @@ package amino
 
 import (
 	"bytes"
-	"encoding/hex"
 	"encoding/json"
 	"errors"
 	"fmt"
@@ -15,7 +14,7 @@ import (
 // cdc.decodeReflectJSON
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectJSON(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) decodeReflectJSON(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -23,55 +22,18 @@ func (cdc *Codec) decodeReflectJSON(bz []byte, info *TypeInfo, rv reflect.Value,
 		panic("should not happen")
 	}
 	if printLog {
-		spew.Printf("(D) decodeReflectJSON(bz: %s, info: %v, rv: %#v (%v), opts: %v)\n",
-			bz, info, rv.Interface(), rv.Type(), opts)
+		spew.Printf("(D) decodeReflectJSON(bz: %s, info: %v, rv: %#v (%v), fopts: %v)\n",
+			bz, info, rv.Interface(), rv.Type(), fopts)
 		defer func() {
 			fmt.Printf("(D) -> err: %v\n", err)
 		}()
 	}
 
-	// Read disfix bytes if registered.
-	if info.Registered {
-		// Strip the disfix bytes after checking it.
-		var disfix DisfixBytes
-		disfix, bz, err = decodeDisfixJSON(bz)
-		if err != nil {
-			return
-		}
-		if !info.GetDisfix().EqualBytes(disfix[:]) {
-			err = fmt.Errorf("Expected disfix bytes %X but got %X", info.GetDisfix(), disfix)
-			return
-		}
-	}
-
-	err = cdc._decodeReflectJSON(bz, info, rv, opts)
-	return
-}
-
-// CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) _decodeReflectJSON(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
-	if !rv.CanAddr() {
-		panic("rv not addressable")
-	}
-	if info.Type.Kind() == reflect.Interface && rv.Kind() == reflect.Ptr {
-		panic("should not happen")
-	}
-	if printLog {
-		spew.Printf("(_) _decodeReflectJSON(bz: %s, info: %v, rv: %#v (%v), opts: %v)\n",
-			bz, info, rv.Interface(), rv.Type(), opts)
-		defer func() {
-			fmt.Printf("(_) -> err: %v\n", err)
-		}()
-	}
-
 	// Special case for null for either interface, pointer, slice
 	// NOTE: This doesn't match the binary implementation completely.
 	if nullBytes(bz) {
-		switch rv.Kind() {
-		case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Array:
-			rv.Set(reflect.Zero(rv.Type()))
-			return
-		}
+		rv.Set(reflect.Zero(rv.Type()))
+		return
 	}
 
 	// Dereference-and-construct pointers all the way.
@@ -84,6 +46,20 @@ func (cdc *Codec) _decodeReflectJSON(bz []byte, info *TypeInfo, rv reflect.Value
 		rv = rv.Elem()
 	}
 
+	// Special case:
+	if rv.Type() == timeType {
+		// Amino time strips the timezone, so must end with Z.
+		if len(bz) >= 2 && bz[0] == '"' && bz[len(bz)-1] == '"' {
+			if bz[len(bz)-2] != 'Z' {
+				err = fmt.Errorf("Amino:JSON time must be UTC and end with 'Z' but got %s.", bz)
+				return
+			}
+		} else {
+			err = fmt.Errorf("Amino:JSON time must be an RFC3339Nano string, but got %s.", bz)
+			return
+		}
+	}
+
 	// Handle override if a pointer to rv implements json.Unmarshaler.
 	if rv.Addr().Type().Implements(jsonUnmarshalerType) {
 		err = rv.Addr().Interface().(json.Unmarshaler).UnmarshalJSON(bz)
@@ -98,7 +74,7 @@ func (cdc *Codec) _decodeReflectJSON(bz []byte, info *TypeInfo, rv reflect.Value
 		if err != nil {
 			return
 		}
-		err = cdc._decodeReflectJSON(bz, rinfo, rrv, opts)
+		err = cdc.decodeReflectJSON(bz, rinfo, rrv, fopts)
 		if err != nil {
 			return
 		}
@@ -118,37 +94,48 @@ func (cdc *Codec) _decodeReflectJSON(bz []byte, info *TypeInfo, rv reflect.Value
 	// Complex
 
 	case reflect.Interface:
-		err = cdc.decodeReflectJSONInterface(bz, info, rv, opts)
+		err = cdc.decodeReflectJSONInterface(bz, info, rv, fopts)
 
 	case reflect.Array:
-		err = cdc.decodeReflectJSONArray(bz, info, rv, opts)
+		err = cdc.decodeReflectJSONArray(bz, info, rv, fopts)
 
 	case reflect.Slice:
-		err = cdc.decodeReflectJSONSlice(bz, info, rv, opts)
+		err = cdc.decodeReflectJSONSlice(bz, info, rv, fopts)
 
 	case reflect.Struct:
-		err = cdc.decodeReflectJSONStruct(bz, info, rv, opts)
+		err = cdc.decodeReflectJSONStruct(bz, info, rv, fopts)
 
 	case reflect.Map:
-		err = cdc.decodeReflectJSONMap(bz, info, rv, opts)
+		err = cdc.decodeReflectJSONMap(bz, info, rv, fopts)
 
 	//----------------------------------------
 	// Signed, Unsigned
 
-	case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int,
-		reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uint:
-		err = invokeStdlibJSONUnmarshal(bz, rv, opts)
+	case reflect.Int64, reflect.Int:
+		fallthrough
+	case reflect.Uint64, reflect.Uint:
+		if bz[0] != '"' || bz[len(bz)-1] != '"' {
+			err = fmt.Errorf("invalid character -- Amino:JSON int/int64/uint/uint64 expects quoted values for javascript numeric support, got: %v.", string(bz))
+			if err != nil {
+				return
+			}
+		}
+		bz = bz[1 : len(bz)-1]
+		fallthrough
+	case reflect.Int32, reflect.Int16, reflect.Int8,
+		reflect.Uint32, reflect.Uint16, reflect.Uint8:
+		err = invokeStdlibJSONUnmarshal(bz, rv, fopts)
 
 	//----------------------------------------
 	// Misc
 
 	case reflect.Float32, reflect.Float64:
-		if !opts.Unsafe {
-			return errors.New("Amino.JSON float* support requires `amino:\"unsafe\"`.")
+		if !fopts.Unsafe {
+			return errors.New("Amino:JSON float* support requires `amino:\"unsafe\"`.")
 		}
 		fallthrough
 	case reflect.Bool, reflect.String:
-		err = invokeStdlibJSONUnmarshal(bz, rv, opts)
+		err = invokeStdlibJSONUnmarshal(bz, rv, fopts)
 
 	//----------------------------------------
 	// Default
@@ -160,7 +147,7 @@ func (cdc *Codec) _decodeReflectJSON(bz []byte, info *TypeInfo, rv reflect.Value
 	return
 }
 
-func invokeStdlibJSONUnmarshal(bz []byte, rv reflect.Value, opts FieldOptions) error {
+func invokeStdlibJSONUnmarshal(bz []byte, rv reflect.Value, fopts FieldOptions) error {
 	if !rv.CanAddr() && rv.Kind() != reflect.Ptr {
 		panic("rv not addressable nor pointer")
 	}
@@ -178,7 +165,7 @@ func invokeStdlibJSONUnmarshal(bz []byte, rv reflect.Value, opts FieldOptions) e
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectJSONInterface(bz []byte, iinfo *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) decodeReflectJSONInterface(bz []byte, iinfo *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -203,22 +190,21 @@ func (cdc *Codec) decodeReflectJSONInterface(bz []byte, iinfo *TypeInfo, rv refl
 		rv.Set(iinfo.ZeroValue)
 	}
 
-	// Consume disambiguation / prefix info.
-	disfix, bz, err := decodeDisfixJSON(bz)
+	// Consume type wrapper info.
+	name, bz, err := decodeInterfaceJSON(bz)
 	if err != nil {
 		return
 	}
-
-	// XXX: Check disfix against interface to make sure that it actually
+	// XXX: Check name against interface to make sure that it actually
 	// matches, and return an error if it doesn't.
 
-	// NOTE: Unlike decodeReflectBinaryInterface, we already dealt with nil in _decodeReflectJSON.
-	// NOTE: We also "consumed" the disfix wrapper by replacing `bz` above.
+	// NOTE: Unlike decodeReflectBinaryInterface, we already dealt with nil in decodeReflectJSON.
+	// NOTE: We also "consumed" the interface wrapper by replacing `bz` above.
 
 	// Get concrete type info.
-	// NOTE: Unlike decodeReflectBinaryInterface, always disfix.
+	// NOTE: Unlike decodeReflectBinaryInterface, uses the full name string.
 	var cinfo *TypeInfo
-	cinfo, err = cdc.getTypeInfoFromDisfix_rlock(disfix)
+	cinfo, err = cdc.getTypeInfoFromName_rlock(name)
 	if err != nil {
 		return
 	}
@@ -227,7 +213,7 @@ func (cdc *Codec) decodeReflectJSONInterface(bz []byte, iinfo *TypeInfo, rv refl
 	var crv, irvSet = constructConcreteType(cinfo)
 
 	// Decode into the concrete type.
-	err = cdc._decodeReflectJSON(bz, cinfo, crv, opts)
+	err = cdc.decodeReflectJSON(bz, cinfo, crv, fopts)
 	if err != nil {
 		rv.Set(irvSet) // Helps with debugging
 		return
@@ -241,7 +227,7 @@ func (cdc *Codec) decodeReflectJSONInterface(bz []byte, iinfo *TypeInfo, rv refl
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectJSONArray(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) decodeReflectJSONArray(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -290,7 +276,7 @@ func (cdc *Codec) decodeReflectJSONArray(bz []byte, info *TypeInfo, rv reflect.V
 		for i := 0; i < length; i++ {
 			erv := rv.Index(i)
 			ebz := rawSlice[i]
-			err = cdc.decodeReflectJSON(ebz, einfo, erv, opts)
+			err = cdc.decodeReflectJSON(ebz, einfo, erv, fopts)
 			if err != nil {
 				return
 			}
@@ -300,7 +286,7 @@ func (cdc *Codec) decodeReflectJSONArray(bz []byte, info *TypeInfo, rv reflect.V
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectJSONSlice(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) decodeReflectJSONSlice(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -356,7 +342,7 @@ func (cdc *Codec) decodeReflectJSONSlice(bz []byte, info *TypeInfo, rv reflect.V
 		for i := 0; i < length; i++ {
 			erv := srv.Index(i)
 			ebz := rawSlice[i]
-			err = cdc.decodeReflectJSON(ebz, einfo, erv, opts)
+			err = cdc.decodeReflectJSON(ebz, einfo, erv, fopts)
 			if err != nil {
 				return
 			}
@@ -369,7 +355,7 @@ func (cdc *Codec) decodeReflectJSONSlice(bz []byte, info *TypeInfo, rv reflect.V
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectJSONStruct(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) decodeReflectJSONStruct(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -416,7 +402,7 @@ func (cdc *Codec) decodeReflectJSONStruct(bz []byte, info *TypeInfo, rv reflect.
 		}
 
 		// Decode into field rv.
-		err = cdc.decodeReflectJSON(valueBytes, finfo, frv, opts)
+		err = cdc.decodeReflectJSON(valueBytes, finfo, frv, fopts)
 		if err != nil {
 			return
 		}
@@ -426,7 +412,7 @@ func (cdc *Codec) decodeReflectJSONStruct(bz []byte, info *TypeInfo, rv reflect.
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectJSONMap(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) decodeReflectJSONMap(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -464,7 +450,7 @@ func (cdc *Codec) decodeReflectJSONMap(bz []byte, info *TypeInfo, rv reflect.Val
 		vrv := reflect.New(mrv.Type().Elem()).Elem()
 
 		// Decode valueBytes into vrv.
-		err = cdc.decodeReflectJSON(valueBytes, vinfo, vrv, opts)
+		err = cdc.decodeReflectJSON(valueBytes, vinfo, vrv, fopts)
 		if err != nil {
 			return
 		}
@@ -483,45 +469,34 @@ func (cdc *Codec) decodeReflectJSONMap(bz []byte, info *TypeInfo, rv reflect.Val
 // Misc.
 
 type disfixWrapper struct {
-	Disfix string          `json:"type"`
-	Data   json.RawMessage `json:"value"`
+	Name string          `json:"type"`
+	Data json.RawMessage `json:"value"`
 }
 
-// decodeDisfixJSON helps unravel the disfix and
+// decodeInterfaceJSON helps unravel the type name and
 // the stored data, which are expected in the form:
 // {
-//    "type": "XXXXXXXXXXXXXXXXX",
+//    "type": "<canonical concrete type name>",
 //    "value":  {}
 // }
-func decodeDisfixJSON(bz []byte) (df DisfixBytes, data []byte, err error) {
-	if string(bz) == "null" {
-		panic("yay")
-	}
+func decodeInterfaceJSON(bz []byte) (name string, data []byte, err error) {
 	dfw := new(disfixWrapper)
 	err = json.Unmarshal(bz, dfw)
 	if err != nil {
-		err = fmt.Errorf("Cannot parse disfix JSON wrapper: %v", err)
-		return
-	}
-	dfBytes, err := hex.DecodeString(dfw.Disfix)
-	if err != nil {
+		err = fmt.Errorf("cannot parse disfix JSON wrapper: %v", err)
 		return
 	}
 
-	// Get disfix.
-	if g, w := len(dfBytes), DisfixBytesLen; g != w {
-		err = fmt.Errorf("Disfix length got=%d want=%d data=%s", g, w, bz)
-		return
-	}
-	copy(df[:], dfBytes)
-	if (DisfixBytes{}).EqualBytes(df[:]) {
-		err = errors.New("Unexpected zero disfix in JSON")
+	// Get name.
+	if dfw.Name == "" {
+		err = errors.New("JSON encoding of interfaces require non-empty type field.")
 		return
 	}
+	name = dfw.Name
 
 	// Get data.
 	if len(dfw.Data) == 0 {
-		err = errors.New("Disfix JSON wrapper should have non-empty value field")
+		err = errors.New("interface JSON wrapper should have non-empty value field")
 		return
 	}
 	data = dfw.Data
diff --git a/vendor/github.com/tendermint/go-amino/json-encode.go b/vendor/github.com/tendermint/go-amino/json-encode.go
index 4ae11f1c207c7b9ec1e0e5931f5c9368c6af35e5..15b10f21ddcee5af80796ddc6b38de51cc53de9e 100644
--- a/vendor/github.com/tendermint/go-amino/json-encode.go
+++ b/vendor/github.com/tendermint/go-amino/json-encode.go
@@ -6,6 +6,7 @@ import (
 	"fmt"
 	"io"
 	"reflect"
+	"time"
 
 	"github.com/davecgh/go-spew/spew"
 )
@@ -18,54 +19,18 @@ import (
 // only call this one, for the disfix wrapper is only written here.
 // NOTE: Unlike encodeReflectBinary, rv may be a pointer.
 // CONTRACT: rv is valid.
-func (cdc *Codec) encodeReflectJSON(w io.Writer, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) encodeReflectJSON(w io.Writer, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if !rv.IsValid() {
 		panic("should not happen")
 	}
 	if printLog {
-		spew.Printf("(E) encodeReflectJSON(info: %v, rv: %#v (%v), opts: %v)\n",
-			info, rv.Interface(), rv.Type(), opts)
+		spew.Printf("(E) encodeReflectJSON(info: %v, rv: %#v (%v), fopts: %v)\n",
+			info, rv.Interface(), rv.Type(), fopts)
 		defer func() {
 			fmt.Printf("(E) -> err: %v\n", err)
 		}()
 	}
 
-	// Write the disfix wrapper if it is a registered concrete type.
-	if info.Registered {
-		// Part 1:
-		disfix := toDisfix(info.Disamb, info.Prefix)
-		err = writeStr(w, _fmt(`{"type":"%X","value":`, disfix))
-		if err != nil {
-			return
-		}
-		// Part 2:
-		defer func() {
-			if err != nil {
-				return
-			}
-			err = writeStr(w, `}`)
-		}()
-	}
-
-	err = cdc._encodeReflectJSON(w, info, rv, opts)
-	return
-}
-
-// NOTE: Unlike _encodeReflectBinary, rv may be a pointer.
-// CONTRACT: rv is valid.
-// CONTRACT: any disfix wrapper has already been written.
-func (cdc *Codec) _encodeReflectJSON(w io.Writer, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
-	if !rv.IsValid() {
-		panic("should not happen")
-	}
-	if printLog {
-		spew.Printf("(_) _encodeReflectJSON(info: %v, rv: %#v (%v), opts: %v)\n",
-			info, rv.Interface(), rv.Type(), opts)
-		defer func() {
-			fmt.Printf("(_) -> err: %v\n", err)
-		}()
-	}
-
 	// Dereference value if pointer.
 	var isNilPtr bool
 	rv, _, isNilPtr = derefPointers(rv)
@@ -76,6 +41,13 @@ func (cdc *Codec) _encodeReflectJSON(w io.Writer, info *TypeInfo, rv reflect.Val
 		return
 	}
 
+	// Special case:
+	if rv.Type() == timeType {
+		// Amino time strips the timezone.
+		// NOTE: This must be done before json.Marshaler override below.
+		ct := rv.Interface().(time.Time).Round(0).UTC()
+		rv = reflect.ValueOf(ct)
+	}
 	// Handle override if rv implements json.Marshaler.
 	if rv.CanAddr() { // Try pointer first.
 		if rv.Addr().Type().Implements(jsonMarshalerType) {
@@ -100,7 +72,7 @@ func (cdc *Codec) _encodeReflectJSON(w io.Writer, info *TypeInfo, rv reflect.Val
 			return
 		}
 		// Then, encode the repr instance.
-		err = cdc._encodeReflectJSON(w, rinfo, rrv, opts)
+		err = cdc.encodeReflectJSON(w, rinfo, rrv, fopts)
 		return
 	}
 
@@ -110,29 +82,37 @@ func (cdc *Codec) _encodeReflectJSON(w io.Writer, info *TypeInfo, rv reflect.Val
 	// Complex
 
 	case reflect.Interface:
-		return cdc.encodeReflectJSONInterface(w, info, rv, opts)
+		return cdc.encodeReflectJSONInterface(w, info, rv, fopts)
 
 	case reflect.Array, reflect.Slice:
-		return cdc.encodeReflectJSONList(w, info, rv, opts)
+		return cdc.encodeReflectJSONList(w, info, rv, fopts)
 
 	case reflect.Struct:
-		return cdc.encodeReflectJSONStruct(w, info, rv, opts)
+		return cdc.encodeReflectJSONStruct(w, info, rv, fopts)
 
 	case reflect.Map:
-		return cdc.encodeReflectJSONMap(w, info, rv, opts)
+		return cdc.encodeReflectJSONMap(w, info, rv, fopts)
 
 	//----------------------------------------
 	// Signed, Unsigned
 
-	case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int,
-		reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uint:
+	case reflect.Int64, reflect.Int:
+		_, err = fmt.Fprintf(w, `"%d"`, rv.Int()) // JS can't handle int64
+		return
+
+	case reflect.Uint64, reflect.Uint:
+		_, err = fmt.Fprintf(w, `"%d"`, rv.Uint()) // JS can't handle uint64
+		return
+
+	case reflect.Int32, reflect.Int16, reflect.Int8,
+		reflect.Uint32, reflect.Uint16, reflect.Uint8:
 		return invokeStdlibJSONMarshal(w, rv.Interface())
 
 	//----------------------------------------
 	// Misc
 
 	case reflect.Float64, reflect.Float32:
-		if !opts.Unsafe {
+		if !fopts.Unsafe {
 			return errors.New("Amino.JSON float* support requires `amino:\"unsafe\"`.")
 		}
 		fallthrough
@@ -147,7 +127,7 @@ func (cdc *Codec) _encodeReflectJSON(w io.Writer, info *TypeInfo, rv reflect.Val
 	}
 }
 
-func (cdc *Codec) encodeReflectJSONInterface(w io.Writer, iinfo *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) encodeReflectJSONInterface(w io.Writer, iinfo *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if printLog {
 		fmt.Println("(e) encodeReflectJSONInterface")
 		defer func() {
@@ -184,10 +164,9 @@ func (cdc *Codec) encodeReflectJSONInterface(w io.Writer, iinfo *TypeInfo, rv re
 		return
 	}
 
-	// Write disfix wrapper.
+	// Write interface wrapper.
 	// Part 1:
-	disfix := toDisfix(cinfo.Disamb, cinfo.Prefix)
-	err = writeStr(w, _fmt(`{"type":"%X","value":`, disfix))
+	err = writeStr(w, _fmt(`{"type":"%s","value":`, cinfo.Name))
 	if err != nil {
 		return
 	}
@@ -204,11 +183,11 @@ func (cdc *Codec) encodeReflectJSONInterface(w io.Writer, iinfo *TypeInfo, rv re
 	// Currently, go-amino JSON *always* writes disfix bytes for
 	// all registered concrete types.
 
-	err = cdc._encodeReflectJSON(w, cinfo, crv, opts)
+	err = cdc.encodeReflectJSON(w, cinfo, crv, fopts)
 	return
 }
 
-func (cdc *Codec) encodeReflectJSONList(w io.Writer, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) encodeReflectJSONList(w io.Writer, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if printLog {
 		fmt.Println("(e) encodeReflectJSONList")
 		defer func() {
@@ -266,7 +245,7 @@ func (cdc *Codec) encodeReflectJSONList(w io.Writer, info *TypeInfo, rv reflect.
 			if isNil {
 				err = writeStr(w, `null`)
 			} else {
-				err = cdc.encodeReflectJSON(w, einfo, erv, opts)
+				err = cdc.encodeReflectJSON(w, einfo, erv, fopts)
 			}
 			if err != nil {
 				return
@@ -356,7 +335,7 @@ func (cdc *Codec) encodeReflectJSONStruct(w io.Writer, info *TypeInfo, rv reflec
 }
 
 // TODO: TEST
-func (cdc *Codec) encodeReflectJSONMap(w io.Writer, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) encodeReflectJSONMap(w io.Writer, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if printLog {
 		fmt.Println("(e) encodeReflectJSONMap")
 		defer func() {
@@ -414,7 +393,7 @@ func (cdc *Codec) encodeReflectJSONMap(w io.Writer, info *TypeInfo, rv reflect.V
 			if err != nil {
 				return
 			}
-			err = cdc.encodeReflectJSON(w, vinfo, vrv, opts) // pass through opts
+			err = cdc.encodeReflectJSON(w, vinfo, vrv, fopts) // pass through fopts
 		}
 		if err != nil {
 			return
diff --git a/vendor/github.com/tendermint/go-amino/reflect.go b/vendor/github.com/tendermint/go-amino/reflect.go
index 981575239847d5eb44ad51a198a7ab6250226938..10a2cea95beb2e2b0f955eac74def7b7c0ea0a9a 100644
--- a/vendor/github.com/tendermint/go-amino/reflect.go
+++ b/vendor/github.com/tendermint/go-amino/reflect.go
@@ -11,7 +11,6 @@ import (
 // Constants
 
 const printLog = false
-const RFC3339Millis = "2006-01-02T15:04:05.000Z" // forced microseconds
 
 var (
 	timeType            = reflect.TypeOf(time.Time{})
@@ -52,7 +51,9 @@ func slide(bz *[]byte, n *int, _n int) bool {
 		panic(fmt.Sprintf("impossible slide: len:%v _n:%v", len(*bz), _n))
 	}
 	*bz = (*bz)[_n:]
-	*n += _n
+	if n != nil {
+		*n += _n
+	}
 	return true
 }
 
@@ -73,14 +74,44 @@ func derefPointers(rv reflect.Value) (drv reflect.Value, isPtr bool, isNilPtr bo
 	return
 }
 
-// Returns isVoid=true iff is ultimately nil or empty after (recursive) dereferencing.
-// If isVoid=false, erv is set to the non-nil non-empty valid dereferenced value.
-func isVoid(rv reflect.Value) (erv reflect.Value, isVoid bool) {
+// Dereference pointer recursively or return zero value.
+// drv: the final non-pointer value (which is never invalid).
+// isPtr: whether rv.Kind() == reflect.Ptr.
+// isNilPtr: whether a nil pointer at any level.
+func derefPointersZero(rv reflect.Value) (drv reflect.Value, isPtr bool, isNilPtr bool) {
+	for rv.Kind() == reflect.Ptr {
+		isPtr = true
+		if rv.IsNil() {
+			isNilPtr = true
+			rt := rv.Type().Elem()
+			for rt.Kind() == reflect.Ptr {
+				rt = rt.Elem()
+			}
+			drv = reflect.New(rt).Elem()
+			return
+		}
+		rv = rv.Elem()
+	}
+	drv = rv
+	return
+}
+
+// Returns isDefaultValue=true iff is ultimately nil or empty after (recursive)
+// dereferencing. If isDefaultValue=false, erv is set to the non-nil non-empty
+// non-default dereferenced value.
+// A zero/empty struct is not considered default.
+func isDefaultValue(rv reflect.Value) (erv reflect.Value, isDefaultValue bool) {
 	rv, _, isNilPtr := derefPointers(rv)
 	if isNilPtr {
 		return rv, true
 	} else {
 		switch rv.Kind() {
+		case reflect.Bool:
+			return rv, rv.Bool() == false
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			return rv, rv.Int() == 0
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+			return rv, rv.Uint() == 0
 		case reflect.String:
 			return rv, rv.Len() == 0
 		case reflect.Chan, reflect.Map, reflect.Slice:
@@ -118,55 +149,36 @@ func constructConcreteType(cinfo *TypeInfo) (crv, irvSet reflect.Value) {
 	return
 }
 
-// Like typeToTyp4 but include a pointer bit.
-func typeToTyp4(rt reflect.Type, opts FieldOptions) (typ Typ4) {
-
-	// Dereference pointer type.
-	var pointer = false
-	for rt.Kind() == reflect.Ptr {
-		pointer = true
-		rt = rt.Elem()
-	}
-
-	// Call actual logic.
-	typ = Typ4(typeToTyp3(rt, opts))
-
-	// Set pointer bit to 1 if pointer.
-	if pointer {
-		typ |= Typ4_Pointer
-	}
-	return
-}
-
 // CONTRACT: rt.Kind() != reflect.Ptr
 func typeToTyp3(rt reflect.Type, opts FieldOptions) Typ3 {
 	switch rt.Kind() {
 	case reflect.Interface:
-		return Typ3_Interface
+		return Typ3_ByteLength
 	case reflect.Array, reflect.Slice:
-		ert := rt.Elem()
-		switch ert.Kind() {
-		case reflect.Uint8:
-			return Typ3_ByteLength
-		default:
-			return Typ3_List
-		}
+		return Typ3_ByteLength
 	case reflect.String:
 		return Typ3_ByteLength
 	case reflect.Struct, reflect.Map:
-		return Typ3_Struct
+		return Typ3_ByteLength
 	case reflect.Int64, reflect.Uint64:
-		if opts.BinVarint {
+		if opts.BinFixed64 {
+			return Typ3_8Byte
+		} else {
+			return Typ3_Varint
+		}
+	case reflect.Int32, reflect.Uint32:
+		if opts.BinFixed32 {
+			return Typ3_4Byte
+		} else {
 			return Typ3_Varint
 		}
-		return Typ3_8Byte
-	case reflect.Float64:
-		return Typ3_8Byte
-	case reflect.Int32, reflect.Uint32, reflect.Float32:
-		return Typ3_4Byte
 	case reflect.Int16, reflect.Int8, reflect.Int,
 		reflect.Uint16, reflect.Uint8, reflect.Uint, reflect.Bool:
 		return Typ3_Varint
+	case reflect.Float64:
+		return Typ3_8Byte
+	case reflect.Float32:
+		return Typ3_4Byte
 	default:
 		panic(fmt.Sprintf("unsupported field type %v", rt))
 	}
diff --git a/vendor/github.com/tendermint/go-amino/version.go b/vendor/github.com/tendermint/go-amino/version.go
index 7f84d3a4a87bea146463abfc74b7f95bd5908460..ed348aaf42f88c0dd79af44a3013f4bc8d19221c 100644
--- a/vendor/github.com/tendermint/go-amino/version.go
+++ b/vendor/github.com/tendermint/go-amino/version.go
@@ -1,4 +1,4 @@
 package amino
 
 // Version
-const Version = "0.9.9"
\ No newline at end of file
+const Version = "0.10.1"
diff --git a/vendor/github.com/tendermint/go-crypto/doc.go b/vendor/github.com/tendermint/go-crypto/doc.go
deleted file mode 100644
index c6701bc58fe386859c9c08f9e05a429b5ea8d1be..0000000000000000000000000000000000000000
--- a/vendor/github.com/tendermint/go-crypto/doc.go
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
-go-crypto is a customized/convenience cryptography package
-for supporting Tendermint.
-
-It wraps select functionality of equivalent functions in the
-Go standard library, for easy usage with our libraries.
-
-Keys:
-
-All key generation functions return an instance of the PrivKey interface
-which implements methods
-
-    AssertIsPrivKeyInner()
-    Bytes() []byte
-    Sign(msg []byte) Signature
-    PubKey() PubKey
-    Equals(PrivKey) bool
-    Wrap() PrivKey
-
-From the above method we can:
-a) Retrieve the public key if needed
-
-    pubKey := key.PubKey()
-
-For example:
-    privKey, err := crypto.GenPrivKeyEd25519()
-    if err != nil {
-	...
-    }
-    pubKey := privKey.PubKey()
-    ...
-    // And then you can use the private and public key
-    doSomething(privKey, pubKey)
-
-
-We also provide hashing wrappers around algorithms:
-
-Sha256
-    sum := crypto.Sha256([]byte("This is Tendermint"))
-    fmt.Printf("%x\n", sum)
-
-Ripemd160
-    sum := crypto.Ripemd160([]byte("This is consensus"))
-    fmt.Printf("%x\n", sum)
-*/
-package crypto
-
-// TODO: Add more docs in here
diff --git a/vendor/github.com/tendermint/go-crypto/version.go b/vendor/github.com/tendermint/go-crypto/version.go
deleted file mode 100644
index aac87c4f347a08d6481c600d3d1e82e87e1c06dc..0000000000000000000000000000000000000000
--- a/vendor/github.com/tendermint/go-crypto/version.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package crypto
-
-const Version = "0.6.2"
diff --git a/vendor/github.com/tendermint/iavl/amino.go b/vendor/github.com/tendermint/iavl/amino.go
deleted file mode 100644
index 0e80ee6fb1c374b05318ae27622d489a3bf202ba..0000000000000000000000000000000000000000
--- a/vendor/github.com/tendermint/iavl/amino.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package iavl
-
-import "github.com/tendermint/go-amino"
-
-var cdc = amino.NewCodec()
diff --git a/vendor/github.com/tendermint/iavl/chunk.go b/vendor/github.com/tendermint/iavl/chunk.go
deleted file mode 100644
index b1cbdd2264acf583466f189a6cd00566b77480ac..0000000000000000000000000000000000000000
--- a/vendor/github.com/tendermint/iavl/chunk.go
+++ /dev/null
@@ -1,185 +0,0 @@
-package iavl
-
-import (
-	"sort"
-
-	"github.com/pkg/errors"
-	cmn "github.com/tendermint/tmlibs/common"
-)
-
-// Chunk is a list of ordered nodes.
-// It can be sorted, merged, exported from a tree and
-// used to generate a new tree.
-type Chunk []OrderedNodeData
-
-// OrderedNodeData is the data to recreate a leaf node,
-// along with a SortOrder to define a BFS insertion order.
-type OrderedNodeData struct {
-	SortOrder uint64
-	NodeData
-}
-
-// NewOrderedNode creates the data from a leaf node.
-func NewOrderedNode(leaf *Node, prefix uint64) OrderedNodeData {
-	return OrderedNodeData{
-		SortOrder: prefix,
-		NodeData: NodeData{
-			Key:   leaf.key,
-			Value: leaf.value,
-		},
-	}
-}
-
-// getChunkHashes returns all the "checksum" hashes for
-// the chunks that will be sent.
-func getChunkHashes(tree *Tree, depth uint) ([][]byte, [][]byte, uint, error) {
-	maxDepth := uint(tree.root.height / 2)
-	if depth > maxDepth {
-		return nil, nil, 0, errors.New("depth exceeds maximum allowed")
-	}
-
-	nodes := getNodes(tree, depth)
-	hashes := make([][]byte, len(nodes))
-	keys := make([][]byte, len(nodes))
-	for i, n := range nodes {
-		hashes[i] = n.hash
-		keys[i] = n.key
-	}
-	return hashes, keys, depth, nil
-}
-
-// GetChunkHashesWithProofs takes a tree and returns the list of chunks with
-// proofs that can be used to synchronize a tree across the network.
-func GetChunkHashesWithProofs(tree *Tree) ([][]byte, []*InnerKeyProof, uint) {
-	hashes, keys, depth, err := getChunkHashes(tree, uint(tree.root.height/2))
-	if err != nil {
-		cmn.PanicSanity(cmn.Fmt("GetChunkHashes: %s", err))
-	}
-	proofs := make([]*InnerKeyProof, len(keys))
-
-	for i, k := range keys {
-		proof, err := tree.getInnerWithProof(k)
-		if err != nil {
-			cmn.PanicSanity(cmn.Fmt("Error getting inner key proof: %s", err))
-		}
-		proofs[i] = proof
-	}
-	return hashes, proofs, depth
-}
-
-// getNodes returns an array of nodes at the given depth.
-func getNodes(tree *Tree, depth uint) []*Node {
-	nodes := make([]*Node, 0, 1<<depth)
-	tree.root.traverseDepth(tree, depth, func(node *Node) {
-		nodes = append(nodes, node)
-	})
-	return nodes
-}
-
-// call cb for every node exactly depth levels below it
-// depth first search to return in tree ordering.
-func (node *Node) traverseDepth(t *Tree, depth uint, cb func(*Node)) {
-	// base case
-	if depth == 0 {
-		cb(node)
-		return
-	}
-	if node.isLeaf() {
-		return
-	}
-
-	// otherwise, descend one more level
-	node.getLeftNode(t).traverseDepth(t, depth-1, cb)
-	node.getRightNode(t).traverseDepth(t, depth-1, cb)
-}
-
-// position to key can calculate the appropriate sort order
-// for the count-th node at a given depth, assuming a full
-// tree above this height.
-func positionToKey(depth, count uint) (key uint64) {
-	for d := depth; d > 0; d-- {
-		// lowest digit of count * 2^(d-1)
-		key += uint64((count & 1) << (d - 1))
-		count = count >> 1
-	}
-	return
-}
-
-// GetChunk finds the count-th subtree at depth and
-// generates a Chunk for that data.
-func GetChunk(tree *Tree, depth, count uint) Chunk {
-	node := getNodes(tree, depth)[count]
-	prefix := positionToKey(depth, count)
-	return getChunk(tree, node, prefix, depth)
-}
-
-// getChunk takes a node and serializes all nodes below it
-//
-// As it is part of a larger tree, prefix defines the path
-// up to this point, and depth the current depth
-// (which defines where we add to the prefix)
-//
-// TODO: make this more efficient, *Chunk as arg???
-func getChunk(t *Tree, node *Node, prefix uint64, depth uint) Chunk {
-	if node.isLeaf() {
-		return Chunk{NewOrderedNode(node, prefix)}
-	}
-	res := make(Chunk, 0, node.size)
-	if node.leftNode != nil {
-		left := getChunk(t, node.getLeftNode(t), prefix, depth+1)
-		res = append(res, left...)
-	}
-	if node.rightNode != nil {
-		offset := prefix + 1<<depth
-		right := getChunk(t, node.getRightNode(t), offset, depth+1)
-		res = append(res, right...)
-	}
-	return res
-}
-
-// Sort does an inline quicksort.
-func (c Chunk) Sort() {
-	sort.Slice(c, func(i, j int) bool {
-		return c[i].SortOrder < c[j].SortOrder
-	})
-}
-
-// MergeChunks does a merge sort of the two Chunks,
-// assuming they were already in sorted order.
-func MergeChunks(left, right Chunk) Chunk {
-	size, i, j := len(left)+len(right), 0, 0
-	slice := make([]OrderedNodeData, size)
-
-	for k := 0; k < size; k++ {
-		if i > len(left)-1 && j <= len(right)-1 {
-			slice[k] = right[j]
-			j++
-		} else if j > len(right)-1 && i <= len(left)-1 {
-			slice[k] = left[i]
-			i++
-		} else if left[i].SortOrder < right[j].SortOrder {
-			slice[k] = left[i]
-			i++
-		} else {
-			slice[k] = right[j]
-			j++
-		}
-	}
-	return Chunk(slice)
-}
-
-// CalculateRoot creates a temporary in-memory
-// iavl tree to calculate the root hash of inserting
-// all the nodes.
-func (c Chunk) CalculateRoot() []byte {
-	test := NewTree(nil, 2*len(c))
-	c.PopulateTree(test)
-	return test.Hash()
-}
-
-// PopulateTree adds all the chunks in order to the given tree.
-func (c Chunk) PopulateTree(empty *Tree) {
-	for _, data := range c {
-		empty.Set(data.Key, data.Value)
-	}
-}
diff --git a/vendor/github.com/tendermint/iavl/doc.go b/vendor/github.com/tendermint/iavl/doc.go
index d6c38c6c068e1fb2153a8b5694e89585fb5688b7..7e4891bcb2a40aa1195e35957899fe913759c494 100644
--- a/vendor/github.com/tendermint/iavl/doc.go
+++ b/vendor/github.com/tendermint/iavl/doc.go
@@ -1,7 +1,11 @@
+// Package iavl implements a versioned, snapshottable (immutable) AVL+ tree
+// for persisting key-value pairs.
+//
+//
 // Basic usage of VersionedTree.
 //
 //  import "github.com/tendermint/iavl"
-//  import "github.com/tendermint/tmlibs/db"
+//  import "github.com/tendermint/tendermint/libs/db"
 //  ...
 //
 //  tree := iavl.NewVersionedTree(db.NewMemDB(), 128)
@@ -23,12 +27,12 @@
 // Proof of existence:
 //
 //  root := tree.Hash()
-//  val, proof, err := tree.GetVersionedWithProof([]byte("bob"), 2) // "xyz", KeyProof, nil
+//  val, proof, err := tree.GetVersionedWithProof([]byte("bob"), 2) // "xyz", RangeProof, nil
 //  proof.Verify([]byte("bob"), val, root) // nil
 //
 // Proof of absence:
 //
-//  _, proof, err = tree.GetVersionedWithProof([]byte("tom"), 2) // nil, KeyProof, nil
+//  _, proof, err = tree.GetVersionedWithProof([]byte("tom"), 2) // nil, RangeProof, nil
 //  proof.Verify([]byte("tom"), nil, root) // nil
 //
 // Now we delete an old version:
diff --git a/vendor/github.com/tendermint/iavl/node.go b/vendor/github.com/tendermint/iavl/node.go
index 9955833640e3d7df8b166ce30da78cc6ecfd6713..307412c338b733e4e80e07fba320ec3b9b86d556 100644
--- a/vendor/github.com/tendermint/iavl/node.go
+++ b/vendor/github.com/tendermint/iavl/node.go
@@ -9,7 +9,8 @@ import (
 	"io"
 
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/iavl/sha256truncated"
+	"github.com/tendermint/tendermint/crypto/tmhash"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // Node represents a node in a Tree.
@@ -40,56 +41,60 @@ func NewNode(key []byte, value []byte, version int64) *Node {
 
 // MakeNode constructs an *Node from an encoded byte slice.
 //
-// The new node doesn't have its hash saved or set.  The caller must set it
+// The new node doesn't have its hash saved or set. The caller must set it
 // afterwards.
-func MakeNode(buf []byte) (node *Node, err error) {
-	node = &Node{}
+func MakeNode(buf []byte) (*Node, cmn.Error) {
 
-	// Keeps track of bytes read.
-	n := 0
-
-	// Read node header.
-	node.height, n, err = amino.DecodeInt8(buf)
-	if err != nil {
-		return nil, err
+	// Read node header (height, size, version, key).
+	height, n, cause := amino.DecodeInt8(buf)
+	if cause != nil {
+		return nil, cmn.ErrorWrap(cause, "decoding node.height")
 	}
 	buf = buf[n:]
 
-	node.size, n, err = amino.DecodeInt64(buf)
-	if err != nil {
-		return nil, err
+	size, n, cause := amino.DecodeVarint(buf)
+	if cause != nil {
+		return nil, cmn.ErrorWrap(cause, "decoding node.size")
 	}
 	buf = buf[n:]
 
-	node.version, n, err = amino.DecodeInt64(buf)
-	if err != nil {
-		return nil, err
+	ver, n, cause := amino.DecodeVarint(buf)
+	if cause != nil {
+		return nil, cmn.ErrorWrap(cause, "decoding node.version")
 	}
 	buf = buf[n:]
 
-	node.key, n, err = amino.DecodeByteSlice(buf)
-	if err != nil {
-		return nil, err
+	key, n, cause := amino.DecodeByteSlice(buf)
+	if cause != nil {
+		return nil, cmn.ErrorWrap(cause, "decoding node.key")
 	}
 	buf = buf[n:]
 
+	node := &Node{
+		height:  height,
+		size:    size,
+		version: ver,
+		key:     key,
+	}
+
 	// Read node body.
 
 	if node.isLeaf() {
-		node.value, _, err = amino.DecodeByteSlice(buf)
-		if err != nil {
-			return nil, err
+		val, _, cause := amino.DecodeByteSlice(buf)
+		if cause != nil {
+			return nil, cmn.ErrorWrap(cause, "decoding node.value")
 		}
+		node.value = val
 	} else { // Read children.
-		leftHash, n, err := amino.DecodeByteSlice(buf)
-		if err != nil {
-			return nil, err
+		leftHash, n, cause := amino.DecodeByteSlice(buf)
+		if cause != nil {
+			return nil, cmn.ErrorWrap(cause, "deocding node.leftHash")
 		}
 		buf = buf[n:]
 
-		rightHash, _, err := amino.DecodeByteSlice(buf)
-		if err != nil {
-			return nil, err
+		rightHash, _, cause := amino.DecodeByteSlice(buf)
+		if cause != nil {
+			return nil, cmn.ErrorWrap(cause, "decoding node.rightHash")
 		}
 		node.leftHash = leftHash
 		node.rightHash = rightHash
@@ -99,11 +104,16 @@ func MakeNode(buf []byte) (node *Node, err error) {
 
 // String returns a string representation of the node.
 func (node *Node) String() string {
-	if len(node.hash) == 0 {
-		return "<no hash>"
-	} else {
-		return fmt.Sprintf("%x", node.hash)
-	}
+	hashstr := "<no hash>"
+	if len(node.hash) > 0 {
+		hashstr = fmt.Sprintf("%X", node.hash)
+	}
+	return fmt.Sprintf("Node{%s:%s@%d %X;%X}#%s",
+		cmn.ColoredBytes(node.key, cmn.Green, cmn.Blue),
+		cmn.ColoredBytes(node.value, cmn.Cyan, cmn.Blue),
+		node.version,
+		node.leftHash, node.rightHash,
+		hashstr)
 }
 
 // clone creates a shallow copy of a node with its hash set to nil.
@@ -139,9 +149,8 @@ func (node *Node) has(t *Tree, key []byte) (has bool) {
 	}
 	if bytes.Compare(key, node.key) < 0 {
 		return node.getLeftNode(t).has(t, key)
-	} else {
-		return node.getRightNode(t).has(t, key)
 	}
+	return node.getRightNode(t).has(t, key)
 }
 
 // Get a key under the node.
@@ -159,31 +168,28 @@ func (node *Node) get(t *Tree, key []byte) (index int64, value []byte) {
 
 	if bytes.Compare(key, node.key) < 0 {
 		return node.getLeftNode(t).get(t, key)
-	} else {
-		rightNode := node.getRightNode(t)
-		index, value = rightNode.get(t, key)
-		index += node.size - rightNode.size
-		return index, value
 	}
+	rightNode := node.getRightNode(t)
+	index, value = rightNode.get(t, key)
+	index += node.size - rightNode.size
+	return index, value
 }
 
 func (node *Node) getByIndex(t *Tree, index int64) (key []byte, value []byte) {
 	if node.isLeaf() {
 		if index == 0 {
 			return node.key, node.value
-		} else {
-			return nil, nil
-		}
-	} else {
-		// TODO: could improve this by storing the
-		// sizes as well as left/right hash.
-		leftNode := node.getLeftNode(t)
-		if index < leftNode.size {
-			return leftNode.getByIndex(t, index)
-		} else {
-			return node.getRightNode(t).getByIndex(t, index-leftNode.size)
 		}
+		return nil, nil
+	}
+	// TODO: could improve this by storing the
+	// sizes as well as left/right hash.
+	leftNode := node.getLeftNode(t)
+
+	if index < leftNode.size {
+		return leftNode.getByIndex(t, index)
 	}
+	return node.getRightNode(t).getByIndex(t, index-leftNode.size)
 }
 
 // Computes the hash of the node without computing its descendants. Must be
@@ -193,7 +199,7 @@ func (node *Node) _hash() []byte {
 		return node.hash
 	}
 
-	h := sha256truncated.New()
+	h := tmhash.New()
 	buf := new(bytes.Buffer)
 	if err := node.writeHashBytes(buf); err != nil {
 		panic(err)
@@ -211,7 +217,7 @@ func (node *Node) hashWithCount() ([]byte, int64) {
 		return node.hash, 0
 	}
 
-	h := sha256truncated.New()
+	h := tmhash.New()
 	buf := new(bytes.Buffer)
 	hashCount, err := node.writeHashBytesRecursively(buf)
 	if err != nil {
@@ -225,41 +231,54 @@ func (node *Node) hashWithCount() ([]byte, int64) {
 
 // Writes the node's hash to the given io.Writer. This function expects
 // child hashes to be already set.
-func (node *Node) writeHashBytes(w io.Writer) (err error) {
-	err = amino.EncodeInt8(w, node.height)
-	if err == nil {
-		err = amino.EncodeInt64(w, node.size)
+func (node *Node) writeHashBytes(w io.Writer) cmn.Error {
+	err := amino.EncodeInt8(w, node.height)
+	if err != nil {
+		return cmn.ErrorWrap(err, "writing height")
+	}
+	err = amino.EncodeVarint(w, node.size)
+	if err != nil {
+		return cmn.ErrorWrap(err, "writing size")
 	}
-	if err == nil {
-		err = amino.EncodeInt64(w, node.version)
+	err = amino.EncodeVarint(w, node.version)
+	if err != nil {
+		return cmn.ErrorWrap(err, "writing version")
 	}
 
 	// Key is not written for inner nodes, unlike writeBytes.
 
 	if node.isLeaf() {
-		if err == nil {
-			err = amino.EncodeByteSlice(w, node.key)
+		err = amino.EncodeByteSlice(w, node.key)
+		if err != nil {
+			return cmn.ErrorWrap(err, "writing key")
 		}
-		if err == nil {
-			err = amino.EncodeByteSlice(w, node.value)
+		// Indirection needed to provide proofs without values.
+		// (e.g. proofLeafNode.ValueHash)
+		valueHash := tmhash.Sum(node.value)
+		err = amino.EncodeByteSlice(w, valueHash)
+		if err != nil {
+			return cmn.ErrorWrap(err, "writing value")
 		}
 	} else {
 		if node.leftHash == nil || node.rightHash == nil {
 			panic("Found an empty child hash")
 		}
-		if err == nil {
-			err = amino.EncodeByteSlice(w, node.leftHash)
+		err = amino.EncodeByteSlice(w, node.leftHash)
+		if err != nil {
+			return cmn.ErrorWrap(err, "writing left hash")
 		}
-		if err == nil {
-			err = amino.EncodeByteSlice(w, node.rightHash)
+		err = amino.EncodeByteSlice(w, node.rightHash)
+		if err != nil {
+			return cmn.ErrorWrap(err, "writing right hash")
 		}
 	}
-	return
+
+	return nil
 }
 
 // Writes the node's hash to the given io.Writer.
 // This function has the side-effect of calling hashWithCount.
-func (node *Node) writeHashBytesRecursively(w io.Writer) (hashCount int64, err error) {
+func (node *Node) writeHashBytesRecursively(w io.Writer) (hashCount int64, err cmn.Error) {
 	if node.leftNode != nil {
 		leftHash, leftCount := node.leftNode.hashWithCount()
 		node.leftHash = leftHash
@@ -276,40 +295,50 @@ func (node *Node) writeHashBytesRecursively(w io.Writer) (hashCount int64, err e
 }
 
 // Writes the node as a serialized byte slice to the supplied io.Writer.
-func (node *Node) writeBytes(w io.Writer) (err error) {
-	err = amino.EncodeInt8(w, node.height)
-	if err == nil {
-		err = amino.EncodeInt64(w, node.size)
+func (node *Node) writeBytes(w io.Writer) cmn.Error {
+	var cause error
+	cause = amino.EncodeInt8(w, node.height)
+	if cause != nil {
+		return cmn.ErrorWrap(cause, "writing height")
 	}
-	if err == nil {
-		err = amino.EncodeInt64(w, node.version)
+	cause = amino.EncodeVarint(w, node.size)
+	if cause != nil {
+		return cmn.ErrorWrap(cause, "writing size")
+	}
+	cause = amino.EncodeVarint(w, node.version)
+	if cause != nil {
+		return cmn.ErrorWrap(cause, "writing version")
 	}
 
 	// Unlike writeHashBytes, key is written for inner nodes.
-	if err == nil {
-		err = amino.EncodeByteSlice(w, node.key)
+	cause = amino.EncodeByteSlice(w, node.key)
+	if cause != nil {
+		return cmn.ErrorWrap(cause, "writing key")
 	}
 
 	if node.isLeaf() {
-		if err == nil {
-			err = amino.EncodeByteSlice(w, node.value)
+		cause = amino.EncodeByteSlice(w, node.value)
+		if cause != nil {
+			return cmn.ErrorWrap(cause, "writing value")
 		}
 	} else {
 		if node.leftHash == nil {
 			panic("node.leftHash was nil in writeBytes")
 		}
-		if err == nil {
-			err = amino.EncodeByteSlice(w, node.leftHash)
+		cause = amino.EncodeByteSlice(w, node.leftHash)
+		if cause != nil {
+			return cmn.ErrorWrap(cause, "writing left hash")
 		}
 
 		if node.rightHash == nil {
 			panic("node.rightHash was nil in writeBytes")
 		}
-		if err == nil {
-			err = amino.EncodeByteSlice(w, node.rightHash)
+		cause = amino.EncodeByteSlice(w, node.rightHash)
+		if cause != nil {
+			return cmn.ErrorWrap(cause, "writing right hash")
 		}
 	}
-	return
+	return nil
 }
 
 func (node *Node) set(t *Tree, key []byte, value []byte) (
@@ -358,35 +387,33 @@ func (node *Node) set(t *Tree, key []byte, value []byte) (
 
 		if updated {
 			return node, updated, orphaned
-		} else {
-			node.calcHeightAndSize(t)
-			newNode, balanceOrphaned := node.balance(t)
-			return newNode, updated, append(orphaned, balanceOrphaned...)
 		}
+		node.calcHeightAndSize(t)
+		newNode, balanceOrphaned := node.balance(t)
+		return newNode, updated, append(orphaned, balanceOrphaned...)
 	}
 }
 
-// newHash/newNode: The new hash or node to replace node after remove.
-// newKey: new leftmost leaf key for tree after successfully removing 'key' if changed.
-// value: removed value.
-func (node *Node) remove(t *Tree, key []byte) (
-	newHash []byte, newNode *Node, newKey []byte, value []byte, orphaned []*Node,
-) {
+// removes the node corresponding to the passed key and balances the tree.
+// It returns:
+// - the hash of the new node (or nil if the node is the one removed)
+// - the node that replaces the orig. node after remove
+// - new leftmost leaf key for tree after successfully removing 'key' if changed.
+// - the removed value
+// - the orphaned nodes.
+func (node *Node) remove(t *Tree, key []byte) ([]byte, *Node, []byte, []byte, []*Node) {
 	version := t.version + 1
 
 	if node.isLeaf() {
 		if bytes.Equal(key, node.key) {
 			return nil, nil, nil, node.value, []*Node{node}
 		}
-		return node.hash, node, nil, nil, orphaned
+		return node.hash, node, nil, nil, nil
 	}
 
+	// node.key < key; we go to the left to find the key:
 	if bytes.Compare(key, node.key) < 0 {
-		var newLeftHash []byte
-		var newLeftNode *Node
-
-		newLeftHash, newLeftNode, newKey, value, orphaned =
-			node.getLeftNode(t).remove(t, key)
+		newLeftHash, newLeftNode, newKey, value, orphaned := node.getLeftNode(t).remove(t, key)
 
 		if len(orphaned) == 0 {
 			return node.hash, node, nil, value, orphaned
@@ -401,30 +428,26 @@ func (node *Node) remove(t *Tree, key []byte) (
 		newNode, balanceOrphaned := newNode.balance(t)
 
 		return newNode.hash, newNode, newKey, value, append(orphaned, balanceOrphaned...)
-	} else {
-		var newRightHash []byte
-		var newRightNode *Node
-
-		newRightHash, newRightNode, newKey, value, orphaned =
-			node.getRightNode(t).remove(t, key)
-
-		if len(orphaned) == 0 {
-			return node.hash, node, nil, value, orphaned
-		} else if newRightHash == nil && newRightNode == nil { // right node held value, was removed
-			return node.leftHash, node.leftNode, nil, value, orphaned
-		}
-		orphaned = append(orphaned, node)
+	}
+	// node.key >= key; either found or look to the right:
+	newRightHash, newRightNode, newKey, value, orphaned := node.getRightNode(t).remove(t, key)
 
-		newNode := node.clone(version)
-		newNode.rightHash, newNode.rightNode = newRightHash, newRightNode
-		if newKey != nil {
-			newNode.key = newKey
-		}
-		newNode.calcHeightAndSize(t)
-		newNode, balanceOrphaned := newNode.balance(t)
+	if len(orphaned) == 0 {
+		return node.hash, node, nil, value, orphaned
+	} else if newRightHash == nil && newRightNode == nil { // right node held value, was removed
+		return node.leftHash, node.leftNode, nil, value, orphaned
+	}
+	orphaned = append(orphaned, node)
 
-		return newNode.hash, newNode, nil, value, append(orphaned, balanceOrphaned...)
+	newNode := node.clone(version)
+	newNode.rightHash, newNode.rightNode = newRightHash, newRightNode
+	if newKey != nil {
+		newNode.key = newKey
 	}
+	newNode.calcHeightAndSize(t)
+	newNode, balanceOrphaned := newNode.balance(t)
+
+	return newNode.hash, newNode, nil, value, append(orphaned, balanceOrphaned...)
 }
 
 func (node *Node) getLeftNode(t *Tree) *Node {
@@ -502,34 +525,32 @@ func (node *Node) balance(t *Tree) (newSelf *Node, orphaned []*Node) {
 			// Left Left Case
 			newNode, orphaned := node.rotateRight(t)
 			return newNode, []*Node{orphaned}
-		} else {
-			// Left Right Case
-			var leftOrphaned *Node
+		}
+		// Left Right Case
+		var leftOrphaned *Node
 
-			left := node.getLeftNode(t)
-			node.leftHash = nil
-			node.leftNode, leftOrphaned = left.rotateLeft(t)
-			newNode, rightOrphaned := node.rotateRight(t)
+		left := node.getLeftNode(t)
+		node.leftHash = nil
+		node.leftNode, leftOrphaned = left.rotateLeft(t)
+		newNode, rightOrphaned := node.rotateRight(t)
 
-			return newNode, []*Node{left, leftOrphaned, rightOrphaned}
-		}
+		return newNode, []*Node{left, leftOrphaned, rightOrphaned}
 	}
 	if balance < -1 {
 		if node.getRightNode(t).calcBalance(t) <= 0 {
 			// Right Right Case
 			newNode, orphaned := node.rotateLeft(t)
 			return newNode, []*Node{orphaned}
-		} else {
-			// Right Left Case
-			var rightOrphaned *Node
+		}
+		// Right Left Case
+		var rightOrphaned *Node
 
-			right := node.getRightNode(t)
-			node.rightHash = nil
-			node.rightNode, rightOrphaned = right.rotateRight(t)
-			newNode, leftOrphaned := node.rotateLeft(t)
+		right := node.getRightNode(t)
+		node.rightHash = nil
+		node.rightNode, rightOrphaned = right.rotateRight(t)
+		newNode, leftOrphaned := node.rotateLeft(t)
 
-			return newNode, []*Node{right, leftOrphaned, rightOrphaned}
-		}
+		return newNode, []*Node{right, leftOrphaned, rightOrphaned}
 	}
 	// Nothing changed
 	return node, []*Node{}
@@ -547,19 +568,20 @@ func (node *Node) traverseWithDepth(t *Tree, ascending bool, cb func(*Node, uint
 }
 
 func (node *Node) traverseInRange(t *Tree, start, end []byte, ascending bool, inclusive bool, depth uint8, cb func(*Node, uint8) bool) bool {
-	afterStart := start == nil || bytes.Compare(start, node.key) <= 0
+	afterStart := start == nil || bytes.Compare(start, node.key) < 0
+	startOrAfter := start == nil || bytes.Compare(start, node.key) <= 0
 	beforeEnd := end == nil || bytes.Compare(node.key, end) < 0
 	if inclusive {
 		beforeEnd = end == nil || bytes.Compare(node.key, end) <= 0
 	}
 
+	// Run callback per inner/leaf node.
 	stop := false
-	if afterStart && beforeEnd {
-		// IterateRange ignores this if not leaf
+	if !node.isLeaf() || (startOrAfter && beforeEnd) {
 		stop = cb(node, depth)
-	}
-	if stop {
-		return stop
+		if stop {
+			return stop
+		}
 	}
 	if node.isLeaf() {
 		return stop
diff --git a/vendor/github.com/tendermint/iavl/nodedb.go b/vendor/github.com/tendermint/iavl/nodedb.go
index 2f546cb729f02f775cda7f09c2bb8dcb68762fa3..d0d3df19c0835b5b4b16cd6f2b97dfd7472dada8 100644
--- a/vendor/github.com/tendermint/iavl/nodedb.go
+++ b/vendor/github.com/tendermint/iavl/nodedb.go
@@ -7,14 +7,14 @@ import (
 	"sort"
 	"sync"
 
-	dbm "github.com/tendermint/tmlibs/db"
+	dbm "github.com/tendermint/tendermint/libs/db"
 )
 
 var (
 	// All node keys are prefixed with this. This ensures no collision is
 	// possible with the other keys, and makes them easier to traverse.
 	nodePrefix = "n/"
-	nodeKeyFmt = "n/%x"
+	nodeKeyFmt = "n/%X"
 
 	// Orphans are keyed in the database by their expected lifetime.
 	// The first number represents the *last* version at which the orphan needs
@@ -22,12 +22,12 @@ var (
 	// which it is expected to exist - which starts out by being the version
 	// of the node being orphaned.
 	orphanPrefix    = "o/"
-	orphanPrefixFmt = "o/%d/"      // o/<last-version>/
-	orphanKeyFmt    = "o/%d/%d/%x" // o/<last-version>/<first-version>/<hash>
+	orphanPrefixFmt = "o/%010d/"         // o/<last-version>/
+	orphanKeyFmt    = "o/%010d/%010d/%X" // o/<last-version>/<first-version>/<hash>
 
 	// r/<version>
 	rootPrefix    = "r/"
-	rootPrefixFmt = "r/%d"
+	rootPrefixFmt = "r/%010d"
 )
 
 type nodeDB struct {
@@ -35,9 +35,7 @@ type nodeDB struct {
 	db    dbm.DB     // Persistent node storage.
 	batch dbm.Batch  // Batched writing buffer.
 
-	versionCache  map[int64][]byte // Cache of tree (root) versions.
-	latestVersion int64            // Latest root version.
-
+	latestVersion  int64
 	nodeCache      map[string]*list.Element // Node cache.
 	nodeCacheSize  int                      // Node cache size limit in elements.
 	nodeCacheQueue *list.List               // LRU queue of cache elements. Used for deletion.
@@ -47,7 +45,6 @@ func newNodeDB(db dbm.DB, cacheSize int) *nodeDB {
 	ndb := &nodeDB{
 		db:             db,
 		batch:          db.NewBatch(),
-		versionCache:   map[int64][]byte{},
 		latestVersion:  0, // initially invalid
 		nodeCache:      make(map[string]*list.Element),
 		nodeCacheSize:  cacheSize,
@@ -171,7 +168,6 @@ func (ndb *nodeDB) SaveOrphans(version int64, orphans map[string]int64) {
 	defer ndb.mtx.Unlock()
 
 	toVersion := ndb.getPreviousVersion(version)
-
 	for hash, fromVersion := range orphans {
 		debug("SAVEORPHAN %v-%v %X\n", fromVersion, toVersion, hash)
 		ndb.saveOrphan([]byte(hash), fromVersion, toVersion)
@@ -194,6 +190,7 @@ func (ndb *nodeDB) deleteOrphans(version int64) {
 	predecessor := ndb.getPreviousVersion(version)
 
 	// Traverse orphans with a lifetime ending at the version specified.
+	// TODO optimize.
 	ndb.traverseOrphansVersion(version, func(key, hash []byte) {
 		var fromVersion, toVersion int64
 
@@ -234,38 +231,36 @@ func (ndb *nodeDB) rootKey(version int64) []byte {
 
 func (ndb *nodeDB) getLatestVersion() int64 {
 	if ndb.latestVersion == 0 {
-		ndb.getVersions()
+		ndb.latestVersion = ndb.getPreviousVersion(1<<63 - 1)
 	}
 	return ndb.latestVersion
 }
 
-func (ndb *nodeDB) getVersions() map[int64][]byte {
-	if len(ndb.versionCache) == 0 {
-		ndb.traversePrefix([]byte(rootPrefix), func(k, hash []byte) {
-			var version int64
-			fmt.Sscanf(string(k), rootPrefixFmt, &version)
-			ndb.cacheVersion(version, hash)
-		})
-	}
-	return ndb.versionCache
-}
-
-func (ndb *nodeDB) cacheVersion(version int64, hash []byte) {
-	ndb.versionCache[version] = hash
-
-	if version > ndb.getLatestVersion() {
+func (ndb *nodeDB) updateLatestVersion(version int64) {
+	if ndb.latestVersion < version {
 		ndb.latestVersion = version
 	}
 }
 
 func (ndb *nodeDB) getPreviousVersion(version int64) int64 {
-	var result int64
-	for v := range ndb.getVersions() {
-		if v < version && v > result {
-			result = v
+	itr := ndb.db.ReverseIterator(
+		[]byte(fmt.Sprintf(rootPrefixFmt, version-1)),
+		[]byte(fmt.Sprintf(rootPrefixFmt, 0)),
+	)
+	defer itr.Close()
+
+	pversion := int64(-1)
+	for ; itr.Valid(); itr.Next() {
+		k := itr.Key()
+		_, err := fmt.Sscanf(string(k), rootPrefixFmt, &pversion)
+		if err != nil {
+			panic(err)
+		} else {
+			return pversion
 		}
 	}
-	return result
+
+	return 0
 }
 
 // deleteRoot deletes the root entry from disk, but not the node it points to.
@@ -276,7 +271,6 @@ func (ndb *nodeDB) deleteRoot(version int64) {
 
 	key := ndb.rootKey(version)
 	ndb.batch.Delete(key)
-	delete(ndb.versionCache, version)
 }
 
 func (ndb *nodeDB) traverseOrphans(fn func(k, v []byte)) {
@@ -373,7 +367,7 @@ func (ndb *nodeDB) saveRoot(hash []byte, version int64) error {
 
 	key := ndb.rootKey(version)
 	ndb.batch.Set(key, hash)
-	ndb.cacheVersion(version, hash)
+	ndb.updateLatestVersion(version)
 
 	return nil
 }
diff --git a/vendor/github.com/tendermint/iavl/orphaning_tree.go b/vendor/github.com/tendermint/iavl/orphaning_tree.go
index 893afeaf85f4fecfb1a3b7a93b3e71a08d74a8ed..fb7493f28a0e678e5cfa29f0a43181f824bd5ab4 100644
--- a/vendor/github.com/tendermint/iavl/orphaning_tree.go
+++ b/vendor/github.com/tendermint/iavl/orphaning_tree.go
@@ -45,6 +45,7 @@ func (tree *orphaningTree) SaveAs(version int64) {
 	if tree.root == nil {
 		// There can still be orphans, for example if the root is the node being
 		// removed.
+		debug("SAVE EMPTY TREE %v\n", version)
 		tree.ndb.SaveOrphans(version, tree.orphans)
 		tree.ndb.SaveEmptyRoot(version)
 	} else {
diff --git a/vendor/github.com/tendermint/iavl/path.go b/vendor/github.com/tendermint/iavl/path.go
deleted file mode 100644
index 7e3bb01d97b7b52f13587e33eb69ee6132f87c98..0000000000000000000000000000000000000000
--- a/vendor/github.com/tendermint/iavl/path.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package iavl
-
-import (
-	"bytes"
-
-	"github.com/pkg/errors"
-)
-
-// PathToKey represents an inner path to a leaf node.
-// Note that the nodes are ordered such that the last one is closest
-// to the root of the tree.
-type PathToKey struct {
-	InnerNodes []proofInnerNode `json:"inner_nodes"`
-}
-
-func (p *PathToKey) String() string {
-	str := ""
-	for i := len(p.InnerNodes) - 1; i >= 0; i-- {
-		str += p.InnerNodes[i].String() + "\n"
-	}
-	return str
-}
-
-// verify check that the leafNode's hash matches the path's LeafHash and that
-// the root is the merkle hash of all the inner nodes.
-func (p *PathToKey) verify(leafHash []byte, root []byte) error {
-	hash := leafHash
-	for _, branch := range p.InnerNodes {
-		hash = branch.Hash(hash)
-	}
-	if !bytes.Equal(root, hash) {
-		return errors.WithStack(ErrInvalidProof)
-	}
-	return nil
-}
-
-func (p *PathToKey) isLeftmost() bool {
-	for _, node := range p.InnerNodes {
-		if len(node.Left) > 0 {
-			return false
-		}
-	}
-	return true
-}
-
-func (p *PathToKey) isRightmost() bool {
-	for _, node := range p.InnerNodes {
-		if len(node.Right) > 0 {
-			return false
-		}
-	}
-	return true
-}
-
-func (p *PathToKey) isEmpty() bool {
-	return p == nil || len(p.InnerNodes) == 0
-}
-
-func (p *PathToKey) dropRoot() *PathToKey {
-	if p.isEmpty() {
-		return p
-	}
-	return &PathToKey{
-		InnerNodes: p.InnerNodes[:len(p.InnerNodes)-1],
-	}
-}
-
-func (p *PathToKey) hasCommonRoot(p2 *PathToKey) bool {
-	if p.isEmpty() || p2.isEmpty() {
-		return false
-	}
-	leftEnd := p.InnerNodes[len(p.InnerNodes)-1]
-	rightEnd := p2.InnerNodes[len(p2.InnerNodes)-1]
-
-	return bytes.Equal(leftEnd.Left, rightEnd.Left) &&
-		bytes.Equal(leftEnd.Right, rightEnd.Right)
-}
-
-func (p *PathToKey) isLeftAdjacentTo(p2 *PathToKey) bool {
-	for p.hasCommonRoot(p2) {
-		p, p2 = p.dropRoot(), p2.dropRoot()
-	}
-	p, p2 = p.dropRoot(), p2.dropRoot()
-
-	return p.isRightmost() && p2.isLeftmost()
-}
-
-// PathWithNode is a path to a key which includes the leaf node at that key.
-type pathWithNode struct {
-	Path *PathToKey    `json:"path"`
-	Node proofLeafNode `json:"node"`
-}
-
-func (p *pathWithNode) verify(root []byte) error {
-	return p.Path.verify(p.Node.Hash(), root)
-}
-
-// verifyPaths verifies the left and right paths individually, and makes sure
-// the ordering is such that left < startKey <= endKey < right.
-func verifyPaths(left, right *pathWithNode, startKey, endKey, root []byte) error {
-	if bytes.Compare(startKey, endKey) == 1 {
-		return ErrInvalidInputs
-	}
-	if left != nil {
-		if err := left.verify(root); err != nil {
-			return err
-		}
-		if !left.Node.isLesserThan(startKey) {
-			return errors.WithStack(ErrInvalidProof)
-		}
-	}
-	if right != nil {
-		if err := right.verify(root); err != nil {
-			return err
-		}
-		if !right.Node.isGreaterThan(endKey) {
-			return errors.WithStack(ErrInvalidProof)
-		}
-	}
-	return nil
-}
-
-// Checks that all paths are adjacent to one another, ie. that there are no
-// keys missing.
-func verifyNoMissingKeys(paths []*PathToKey) error {
-	ps := make([]*PathToKey, 0, len(paths))
-	for _, p := range paths {
-		if p != nil {
-			ps = append(ps, p)
-		}
-	}
-	for i := 0; i < len(ps)-1; i++ {
-		// Always check from left to right, since paths are always in ascending order.
-		if !ps[i].isLeftAdjacentTo(ps[i+1]) {
-			return errors.Errorf("paths #%d and #%d are not adjacent", i, i+1)
-		}
-	}
-	return nil
-}
-
-// Checks that with the given left and right paths, no keys can exist in between.
-// Supports nil paths to signify out-of-range.
-func verifyKeyAbsence(left, right *pathWithNode) error {
-	if left != nil && left.Path.isRightmost() {
-		// Range starts outside of the right boundary.
-		return nil
-	} else if right != nil && right.Path.isLeftmost() {
-		// Range ends outside of the left boundary.
-		return nil
-	} else if left != nil && right != nil &&
-		left.Path.isLeftAdjacentTo(right.Path) {
-		// Range is between two existing keys.
-		return nil
-	}
-	return errors.WithStack(ErrInvalidProof)
-}
diff --git a/vendor/github.com/tendermint/iavl/proof.go b/vendor/github.com/tendermint/iavl/proof.go
index c770ea96fc6c9dee7d3b6c2da16a9400f7fb5315..a878770484fcefbc637642de23da3fc4b0df650f 100644
--- a/vendor/github.com/tendermint/iavl/proof.go
+++ b/vendor/github.com/tendermint/iavl/proof.go
@@ -4,11 +4,9 @@ import (
 	"bytes"
 	"fmt"
 
-	"github.com/pkg/errors"
-
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/iavl/sha256truncated"
-	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/crypto/tmhash"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 var (
@@ -25,40 +23,58 @@ var (
 	ErrNilRoot = fmt.Errorf("tree root is nil")
 )
 
+//----------------------------------------
+
 type proofInnerNode struct {
-	Height  int8
-	Size    int64
-	Version int64
-	Left    []byte
-	Right   []byte
+	Height  int8   `json:"height"`
+	Size    int64  `json:"size"`
+	Version int64  `json:"version"`
+	Left    []byte `json:"left"`
+	Right   []byte `json:"right"`
+}
+
+func (pin proofInnerNode) String() string {
+	return pin.stringIndented("")
 }
 
-func (n *proofInnerNode) String() string {
-	return fmt.Sprintf("proofInnerNode[height=%d, ver=%d %x / %x]", n.Height, n.Version, n.Left, n.Right)
+func (pin proofInnerNode) stringIndented(indent string) string {
+	return fmt.Sprintf(`proofInnerNode{
+%s  Height:  %v
+%s  Size:    %v
+%s  Version: %v
+%s  Left:    %X
+%s  Right:   %X
+%s}`,
+		indent, pin.Height,
+		indent, pin.Size,
+		indent, pin.Version,
+		indent, pin.Left,
+		indent, pin.Right,
+		indent)
 }
 
-func (branch proofInnerNode) Hash(childHash []byte) []byte {
-	hasher := sha256truncated.New()
+func (pin proofInnerNode) Hash(childHash []byte) []byte {
+	hasher := tmhash.New()
 	buf := new(bytes.Buffer)
 
-	err := amino.EncodeInt8(buf, branch.Height)
+	err := amino.EncodeInt8(buf, pin.Height)
 	if err == nil {
-		err = amino.EncodeInt64(buf, branch.Size)
+		err = amino.EncodeVarint(buf, pin.Size)
 	}
 	if err == nil {
-		err = amino.EncodeInt64(buf, branch.Version)
+		err = amino.EncodeVarint(buf, pin.Version)
 	}
 
-	if len(branch.Left) == 0 {
+	if len(pin.Left) == 0 {
 		if err == nil {
 			err = amino.EncodeByteSlice(buf, childHash)
 		}
 		if err == nil {
-			err = amino.EncodeByteSlice(buf, branch.Right)
+			err = amino.EncodeByteSlice(buf, pin.Right)
 		}
 	} else {
 		if err == nil {
-			err = amino.EncodeByteSlice(buf, branch.Left)
+			err = amino.EncodeByteSlice(buf, pin.Left)
 		}
 		if err == nil {
 			err = amino.EncodeByteSlice(buf, childHash)
@@ -67,33 +83,51 @@ func (branch proofInnerNode) Hash(childHash []byte) []byte {
 	if err != nil {
 		panic(fmt.Sprintf("Failed to hash proofInnerNode: %v", err))
 	}
-	hasher.Write(buf.Bytes())
 
+	hasher.Write(buf.Bytes())
 	return hasher.Sum(nil)
 }
 
+//----------------------------------------
+
 type proofLeafNode struct {
-	KeyBytes   cmn.HexBytes `json:"key"`
-	ValueBytes cmn.HexBytes `json:"value"`
-	Version    int64        `json:"version"`
+	Key       cmn.HexBytes `json:"key"`
+	ValueHash cmn.HexBytes `json:"value"`
+	Version   int64        `json:"version"`
 }
 
-func (leaf proofLeafNode) Hash() []byte {
-	hasher := sha256truncated.New()
+func (pln proofLeafNode) String() string {
+	return pln.stringIndented("")
+}
+
+func (pln proofLeafNode) stringIndented(indent string) string {
+	return fmt.Sprintf(`proofLeafNode{
+%s  Key:       %v
+%s  ValueHash: %X
+%s  Version:   %v
+%s}`,
+		indent, pln.Key,
+		indent, pln.ValueHash,
+		indent, pln.Version,
+		indent)
+}
+
+func (pln proofLeafNode) Hash() []byte {
+	hasher := tmhash.New()
 	buf := new(bytes.Buffer)
 
 	err := amino.EncodeInt8(buf, 0)
 	if err == nil {
-		err = amino.EncodeInt64(buf, 1)
+		err = amino.EncodeVarint(buf, 1)
 	}
 	if err == nil {
-		err = amino.EncodeInt64(buf, leaf.Version)
+		err = amino.EncodeVarint(buf, pln.Version)
 	}
 	if err == nil {
-		err = amino.EncodeByteSlice(buf, leaf.KeyBytes)
+		err = amino.EncodeByteSlice(buf, pln.Key)
 	}
 	if err == nil {
-		err = amino.EncodeByteSlice(buf, leaf.ValueBytes)
+		err = amino.EncodeByteSlice(buf, pln.ValueHash)
 	}
 	if err != nil {
 		panic(fmt.Sprintf("Failed to hash proofLeafNode: %v", err))
@@ -103,157 +137,50 @@ func (leaf proofLeafNode) Hash() []byte {
 	return hasher.Sum(nil)
 }
 
-func (leaf proofLeafNode) isLesserThan(key []byte) bool {
-	return bytes.Compare(leaf.KeyBytes, key) == -1
-}
-
-func (leaf proofLeafNode) isGreaterThan(key []byte) bool {
-	return bytes.Compare(leaf.KeyBytes, key) == 1
-}
+//----------------------------------------
 
-func (node *Node) pathToInnerKey(t *Tree, key []byte) (*PathToKey, *Node, error) {
-	path := &PathToKey{}
-	val, err := node._pathToKey(t, key, false, path)
-	return path, val, err
+// If the key does not exist, returns the path to the next leaf left of key (w/
+// path), except when key is less than the least item, in which case it returns
+// a path to the least item.
+func (node *Node) PathToLeaf(t *Tree, key []byte) (PathToLeaf, *Node, error) {
+	path := new(PathToLeaf)
+	val, err := node.pathToLeaf(t, key, path)
+	return *path, val, err
 }
 
-func (node *Node) pathToKey(t *Tree, key []byte) (*PathToKey, *Node, error) {
-	path := &PathToKey{}
-	val, err := node._pathToKey(t, key, true, path)
-	return path, val, err
-}
-func (node *Node) _pathToKey(t *Tree, key []byte, skipInner bool, path *PathToKey) (*Node, error) {
+// pathToLeaf is a helper which recursively constructs the PathToLeaf.
+// As an optimization the already constructed path is passed in as an argument
+// and is shared among recursive calls.
+func (node *Node) pathToLeaf(t *Tree, key []byte, path *PathToLeaf) (*Node, error) {
 	if node.height == 0 {
 		if bytes.Equal(node.key, key) {
 			return node, nil
 		}
-		return nil, errors.New("key does not exist")
-	} else if !skipInner && bytes.Equal(node.key, key) {
-		return node, nil
+		return node, cmn.NewError("key does not exist")
 	}
 
 	if bytes.Compare(key, node.key) < 0 {
-		if n, err := node.getLeftNode(t)._pathToKey(t, key, skipInner, path); err != nil {
-			return nil, err
-		} else {
-			branch := proofInnerNode{
-				Height:  node.height,
-				Size:    node.size,
-				Version: node.version,
-				Left:    nil,
-				Right:   node.getRightNode(t).hash,
-			}
-			path.InnerNodes = append(path.InnerNodes, branch)
-			return n, nil
-		}
-	}
-
-	if n, err := node.getRightNode(t)._pathToKey(t, key, skipInner, path); err != nil {
-		return nil, err
-	} else {
-		branch := proofInnerNode{
+		// left side
+		pin := proofInnerNode{
 			Height:  node.height,
 			Size:    node.size,
 			Version: node.version,
-			Left:    node.getLeftNode(t).hash,
-			Right:   nil,
-		}
-		path.InnerNodes = append(path.InnerNodes, branch)
-		return n, nil
-	}
-}
-
-func (t *Tree) constructKeyAbsentProof(key []byte, proof *KeyAbsentProof) error {
-	// Get the index of the first key greater than the requested key, if the key doesn't exist.
-	idx, val := t.Get64(key)
-	if val != nil {
-		return errors.Errorf("couldn't construct non-existence proof: key 0x%x exists", key)
-	}
-
-	var (
-		lkey, lval []byte
-		rkey, rval []byte
-	)
-	if idx > 0 {
-		lkey, lval = t.GetByIndex64(idx - 1)
-	}
-	if idx <= t.Size64()-1 {
-		rkey, rval = t.GetByIndex64(idx)
-	}
-
-	if lkey == nil && rkey == nil {
-		return errors.New("couldn't get keys required for non-existence proof")
-	}
-
-	if lkey != nil {
-		path, node, _ := t.root.pathToKey(t, lkey)
-		proof.Left = &pathWithNode{
-			Path: path,
-			Node: proofLeafNode{lkey, lval, node.version},
-		}
-	}
-	if rkey != nil {
-		path, node, _ := t.root.pathToKey(t, rkey)
-		proof.Right = &pathWithNode{
-			Path: path,
-			Node: proofLeafNode{rkey, rval, node.version},
+			Left:    nil,
+			Right:   node.getRightNode(t).hash,
 		}
-	}
-
-	return nil
-}
-
-func (t *Tree) getWithProof(key []byte) (value []byte, proof *KeyExistsProof, err error) {
-	if t.root == nil {
-		return nil, nil, errors.WithStack(ErrNilRoot)
-	}
-	t.root.hashWithCount() // Ensure that all hashes are calculated.
-
-	path, node, err := t.root.pathToKey(t, key)
-	if err != nil {
-		return nil, nil, errors.Wrap(err, "could not construct path to key")
-	}
-
-	proof = &KeyExistsProof{
-		RootHash:  t.root.hash,
-		PathToKey: path,
-		Version:   node.version,
-	}
-	return node.value, proof, nil
-}
-
-func (t *Tree) getInnerWithProof(key []byte) (proof *InnerKeyProof, err error) {
-	if t.root == nil {
-		return nil, errors.WithStack(ErrNilRoot)
-	}
-	t.root.hashWithCount() // Ensure that all hashes are calculated.
-
-	path, node, err := t.root.pathToInnerKey(t, key)
-	if err != nil {
-		return nil, errors.Wrap(err, "could not construct path to key")
-	}
-
-	proof = &InnerKeyProof{
-		&KeyExistsProof{
-			RootHash:  t.root.hash,
-			PathToKey: path,
-			Version:   node.version,
-		},
-	}
-	return proof, nil
-}
-
-func (t *Tree) keyAbsentProof(key []byte) (*KeyAbsentProof, error) {
-	if t.root == nil {
-		return nil, errors.WithStack(ErrNilRoot)
-	}
-	t.root.hashWithCount() // Ensure that all hashes are calculated.
-
-	proof := &KeyAbsentProof{
-		RootHash: t.root.hash,
-	}
-	if err := t.constructKeyAbsentProof(key, proof); err != nil {
-		return nil, errors.Wrap(err, "could not construct proof of non-existence")
-	}
-	return proof, nil
+		*path = append(*path, pin)
+		n, err := node.getLeftNode(t).pathToLeaf(t, key, path)
+		return n, err
+	}
+	// right side
+	pin := proofInnerNode{
+		Height:  node.height,
+		Size:    node.size,
+		Version: node.version,
+		Left:    node.getLeftNode(t).hash,
+		Right:   nil,
+	}
+	*path = append(*path, pin)
+	n, err := node.getRightNode(t).pathToLeaf(t, key, path)
+	return n, err
 }
diff --git a/vendor/github.com/tendermint/iavl/proof_key.go b/vendor/github.com/tendermint/iavl/proof_key.go
deleted file mode 100644
index e9c900884db27a9e41953b30743157240aab29ea..0000000000000000000000000000000000000000
--- a/vendor/github.com/tendermint/iavl/proof_key.go
+++ /dev/null
@@ -1,161 +0,0 @@
-package iavl
-
-import (
-	"bytes"
-	"fmt"
-
-	"github.com/pkg/errors"
-	cmn "github.com/tendermint/tmlibs/common"
-)
-
-// KeyProof represents a proof of existence or absence of a single key.
-type KeyProof interface {
-	// Verify verfies the proof is valid. To verify absence,
-	// the value should be nil.
-	Verify(key, value, root []byte) error
-
-	// Root returns the root hash of the proof.
-	Root() []byte
-
-	// Serialize itself
-	Bytes() []byte
-}
-
-const (
-	// Used for serialization of proofs.
-	keyExistsMagicNumber = 0x50
-	keyAbsentMagicNumber = 0x51
-)
-
-// KeyExistsProof represents a proof of existence of a single key.
-type KeyExistsProof struct {
-	RootHash cmn.HexBytes `json:"root_hash"`
-	Version  int64        `json:"version"`
-
-	*PathToKey `json:"path"`
-}
-
-func (proof *KeyExistsProof) Root() []byte {
-	return proof.RootHash
-}
-
-// Verify verifies the proof is valid and returns an error if it isn't.
-func (proof *KeyExistsProof) Verify(key []byte, value []byte, root []byte) error {
-	if !bytes.Equal(proof.RootHash, root) {
-		return errors.WithStack(ErrInvalidRoot)
-	}
-	if key == nil || value == nil {
-		return errors.WithStack(ErrInvalidInputs)
-	}
-	return proof.PathToKey.verify(proofLeafNode{key, value, proof.Version}.Hash(), root)
-}
-
-// Bytes returns a go-amino binary serialization
-func (proof *KeyExistsProof) Bytes() []byte {
-	bz, err := cdc.MarshalBinary(proof)
-	if err != nil {
-		panic(fmt.Sprintf("error marshaling proof (%v): %v", proof, err))
-	}
-	return append([]byte{keyExistsMagicNumber}, bz...)
-}
-
-// readKeyExistsProof will deserialize a KeyExistsProof from bytes.
-func readKeyExistsProof(data []byte) (*KeyExistsProof, error) {
-	proof := new(KeyExistsProof)
-	err := cdc.UnmarshalBinary(data, proof)
-	return proof, err
-}
-
-///////////////////////////////////////////////////////////////////////////////
-
-// KeyAbsentProof represents a proof of the absence of a single key.
-type KeyAbsentProof struct {
-	RootHash cmn.HexBytes `json:"root_hash"`
-
-	Left  *pathWithNode `json:"left"`
-	Right *pathWithNode `json:"right"`
-}
-
-func (proof *KeyAbsentProof) Root() []byte {
-	return proof.RootHash
-}
-
-func (p *KeyAbsentProof) String() string {
-	return fmt.Sprintf("KeyAbsentProof\nroot=%s\nleft=%s%#v\nright=%s%#v\n", p.RootHash, p.Left.Path, p.Left.Node, p.Right.Path, p.Right.Node)
-}
-
-// Verify verifies the proof is valid and returns an error if it isn't.
-func (proof *KeyAbsentProof) Verify(key, value []byte, root []byte) error {
-	if !bytes.Equal(proof.RootHash, root) {
-		return errors.WithStack(ErrInvalidRoot)
-	}
-	if key == nil || value != nil {
-		return ErrInvalidInputs
-	}
-
-	if proof.Left == nil && proof.Right == nil {
-		return errors.WithStack(ErrInvalidProof)
-	}
-	if err := verifyPaths(proof.Left, proof.Right, key, key, root); err != nil {
-		return err
-	}
-
-	return verifyKeyAbsence(proof.Left, proof.Right)
-}
-
-// Bytes returns a go-wire binary serialization
-func (proof *KeyAbsentProof) Bytes() []byte {
-	bz, err := cdc.MarshalBinary(proof)
-	if err != nil {
-		panic(fmt.Sprintf("error marshaling proof (%v): %v", proof, err))
-	}
-	return append([]byte{keyAbsentMagicNumber}, bz...)
-}
-
-// readKeyAbsentProof will deserialize a KeyAbsentProof from bytes.
-func readKeyAbsentProof(data []byte) (*KeyAbsentProof, error) {
-	proof := new(KeyAbsentProof)
-	err := cdc.UnmarshalBinary(data, proof)
-	return proof, err
-}
-
-// ReadKeyProof reads a KeyProof from a byte-slice.
-func ReadKeyProof(data []byte) (KeyProof, error) {
-	if len(data) == 0 {
-		return nil, errors.New("proof bytes are empty")
-	}
-	b, val := data[0], data[1:]
-
-	switch b {
-	case keyExistsMagicNumber:
-		return readKeyExistsProof(val)
-	case keyAbsentMagicNumber:
-		return readKeyAbsentProof(val)
-	}
-	return nil, errors.New("unrecognized proof")
-}
-
-///////////////////////////////////////////////////////////////////////////////
-
-// InnerKeyProof represents a proof of existence of an inner node key.
-type InnerKeyProof struct {
-	*KeyExistsProof
-}
-
-// Verify verifies the proof is valid and returns an error if it isn't.
-func (proof *InnerKeyProof) Verify(hash []byte, value []byte, root []byte) error {
-	if !bytes.Equal(proof.RootHash, root) {
-		return errors.WithStack(ErrInvalidRoot)
-	}
-	if hash == nil || value != nil {
-		return errors.WithStack(ErrInvalidInputs)
-	}
-	return proof.PathToKey.verify(hash, root)
-}
-
-// ReadKeyInnerProof will deserialize a InnerKeyProof from bytes.
-func ReadInnerKeyProof(data []byte) (*InnerKeyProof, error) {
-	proof := new(InnerKeyProof)
-	err := cdc.UnmarshalBinary(data, proof)
-	return proof, err
-}
diff --git a/vendor/github.com/tendermint/iavl/proof_path.go b/vendor/github.com/tendermint/iavl/proof_path.go
new file mode 100644
index 0000000000000000000000000000000000000000..de366f338132b4bac4c7dab9fab413119c2dd787
--- /dev/null
+++ b/vendor/github.com/tendermint/iavl/proof_path.go
@@ -0,0 +1,167 @@
+package iavl
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+
+	cmn "github.com/tendermint/tendermint/libs/common"
+)
+
+// pathWithLeaf is a path to a leaf node and the leaf node itself.
+type pathWithLeaf struct {
+	Path PathToLeaf    `json:"path"`
+	Leaf proofLeafNode `json:"leaf"`
+}
+
+func (pwl pathWithLeaf) String() string {
+	return pwl.StringIndented("")
+}
+
+func (pwl pathWithLeaf) StringIndented(indent string) string {
+	return fmt.Sprintf(`pathWithLeaf{
+%s  Path: %v
+%s  Leaf: %v
+%s}`,
+		indent, pwl.Path.stringIndented(indent+"  "),
+		indent, pwl.Leaf.stringIndented(indent+"  "),
+		indent)
+}
+
+// `verify` checks that the leaf node's hash + the inner nodes merkle-izes to
+// the given root. If it returns an error, it means the leafHash or the
+// PathToLeaf is incorrect.
+func (pwl pathWithLeaf) verify(root []byte) cmn.Error {
+	leafHash := pwl.Leaf.Hash()
+	return pwl.Path.verify(leafHash, root)
+}
+
+// `computeRootHash` computes the root hash with leaf node.
+// Does not verify the root hash.
+func (pwl pathWithLeaf) computeRootHash() []byte {
+	leafHash := pwl.Leaf.Hash()
+	return pwl.Path.computeRootHash(leafHash)
+}
+
+//----------------------------------------
+
+// PathToLeaf represents an inner path to a leaf node.
+// Note that the nodes are ordered such that the last one is closest
+// to the root of the tree.
+type PathToLeaf []proofInnerNode
+
+func (pl PathToLeaf) String() string {
+	return pl.stringIndented("")
+}
+
+func (pl PathToLeaf) stringIndented(indent string) string {
+	if len(pl) == 0 {
+		return "empty-PathToLeaf"
+	}
+	strs := make([]string, len(pl))
+	for i, pin := range pl {
+		if i == 20 {
+			strs[i] = fmt.Sprintf("... (%v total)", len(pl))
+			break
+		}
+		strs[i] = fmt.Sprintf("%v:%v", i, pin.stringIndented(indent+"  "))
+	}
+	return fmt.Sprintf(`PathToLeaf{
+%s  %v
+%s}`,
+		indent, strings.Join(strs, "\n"+indent+"  "),
+		indent)
+}
+
+// `verify` checks that the leaf node's hash + the inner nodes merkle-izes to
+// the given root. If it returns an error, it means the leafHash or the
+// PathToLeaf is incorrect.
+func (pl PathToLeaf) verify(leafHash []byte, root []byte) cmn.Error {
+	hash := leafHash
+	for i := len(pl) - 1; i >= 0; i-- {
+		pin := pl[i]
+		hash = pin.Hash(hash)
+	}
+	if !bytes.Equal(root, hash) {
+		return cmn.ErrorWrap(ErrInvalidProof, "")
+	}
+	return nil
+}
+
+// `computeRootHash` computes the root hash assuming some leaf hash.
+// Does not verify the root hash.
+func (pl PathToLeaf) computeRootHash(leafHash []byte) []byte {
+	hash := leafHash
+	for i := len(pl) - 1; i >= 0; i-- {
+		pin := pl[i]
+		hash = pin.Hash(hash)
+	}
+	return hash
+}
+
+func (pl PathToLeaf) isLeftmost() bool {
+	for _, node := range pl {
+		if len(node.Left) > 0 {
+			return false
+		}
+	}
+	return true
+}
+
+func (pl PathToLeaf) isRightmost() bool {
+	for _, node := range pl {
+		if len(node.Right) > 0 {
+			return false
+		}
+	}
+	return true
+}
+
+func (pl PathToLeaf) isEmpty() bool {
+	return pl == nil || len(pl) == 0
+}
+
+func (pl PathToLeaf) dropRoot() PathToLeaf {
+	if pl.isEmpty() {
+		return pl
+	}
+	return PathToLeaf(pl[:len(pl)-1])
+}
+
+func (pl PathToLeaf) hasCommonRoot(pl2 PathToLeaf) bool {
+	if pl.isEmpty() || pl2.isEmpty() {
+		return false
+	}
+	leftEnd := pl[len(pl)-1]
+	rightEnd := pl2[len(pl2)-1]
+
+	return bytes.Equal(leftEnd.Left, rightEnd.Left) &&
+		bytes.Equal(leftEnd.Right, rightEnd.Right)
+}
+
+func (pl PathToLeaf) isLeftAdjacentTo(pl2 PathToLeaf) bool {
+	for pl.hasCommonRoot(pl2) {
+		pl, pl2 = pl.dropRoot(), pl2.dropRoot()
+	}
+	pl, pl2 = pl.dropRoot(), pl2.dropRoot()
+
+	return pl.isRightmost() && pl2.isLeftmost()
+}
+
+// returns -1 if invalid.
+func (pl PathToLeaf) Index() (idx int64) {
+	for i, node := range pl {
+		if node.Left == nil {
+			continue
+		} else if node.Right == nil {
+			if i < len(pl)-1 {
+				idx += node.Size - pl[i+1].Size
+			} else {
+				idx += node.Size - 1
+			}
+		} else {
+			return -1
+		}
+	}
+	return idx
+}
diff --git a/vendor/github.com/tendermint/iavl/proof_range.go b/vendor/github.com/tendermint/iavl/proof_range.go
index 403740559d1830e9681c26c723688fb3e2e8482d..cc12618f97848943a5587efe66b52a77a7f7cd03 100644
--- a/vendor/github.com/tendermint/iavl/proof_range.go
+++ b/vendor/github.com/tendermint/iavl/proof_range.go
@@ -3,445 +3,490 @@ package iavl
 import (
 	"bytes"
 	"fmt"
+	"sort"
+	"strings"
 
-	"github.com/pkg/errors"
-	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/crypto/tmhash"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
-// KeyInRangeProof is an interface which covers both first-in-range and last-in-range proofs.
-type KeyInRangeProof interface {
-	Verify(startKey, endKey, key, value, root []byte) error
-}
-
-// KeyFirstInRangeProof is a proof that a given key is the first in a given range.
-type KeyFirstInRangeProof struct {
-	KeyExistsProof `json:"key_proof"`
+type RangeProof struct {
+	// You don't need the right path because
+	// it can be derived from what we have.
+	LeftPath   PathToLeaf      `json:"left_path"`
+	InnerNodes []PathToLeaf    `json:"inner_nodes"`
+	Leaves     []proofLeafNode `json:"leaves"`
 
-	Left  *pathWithNode `json:"left"`
-	Right *pathWithNode `json:"right"`
-}
+	// memoize
+	rootVerified bool
+	rootHash     []byte // valid iff rootVerified is true
+	treeEnd      bool   // valid iff rootVerified is true
 
-// String returns a string representation of the proof.
-func (proof *KeyFirstInRangeProof) String() string {
-	return fmt.Sprintf("%#v", proof)
 }
 
-// Verify that the first in range proof is valid.
-func (proof *KeyFirstInRangeProof) Verify(startKey, endKey, key, value []byte, root []byte) error {
-	if key != nil {
-		inputsOutOfRange := bytes.Compare(key, startKey) == -1 || bytes.Compare(key, endKey) == 1
-		if inputsOutOfRange {
-			return ErrInvalidInputs
-		}
-	}
-	if proof.Left == nil && proof.Right == nil && proof.PathToKey == nil {
-		return errors.WithStack(ErrInvalidProof)
-	}
-	if err := verifyPaths(proof.Left, proof.Right, startKey, endKey, root); err != nil {
-		return err
-	}
-	if proof.PathToKey == nil {
-		// If we don't have an existing key, we effectively have a proof of absence.
-		return verifyKeyAbsence(proof.Left, proof.Right)
-	}
-
-	if err := proof.KeyExistsProof.Verify(key, value, root); err != nil {
-		return errors.Wrap(err, "failed to verify key exists proof")
-	}
-	// If the key returned is equal to our start key, and we've verified
-	// that it exists, there's nothing else to check.
-	if bytes.Equal(key, startKey) {
+// Keys returns all the keys in the RangeProof.  NOTE: The keys here may
+// include more keys than provided by tree.GetRangeWithProof or
+// VersionedTree.GetVersionedRangeWithProof.  The keys returned there are only
+// in the provided [startKey,endKey){limit} range.  The keys returned here may
+// include extra keys, such as:
+// - the key before startKey if startKey is provided and doesn't exist;
+// - the key after a queried key with tree.GetWithProof, when the key is absent.
+func (proof *RangeProof) Keys() (keys [][]byte) {
+	if proof == nil {
 		return nil
 	}
-	// If the key returned is the smallest in the tree, then it must be
-	// the smallest in the given range too.
-	if proof.PathToKey.isLeftmost() {
-		return nil
-	}
-	// The start key is in between the left path and the key returned,
-	// and the paths are adjacent. Therefore there is nothing between
-	// the key returned and the start key.
-	if proof.Left != nil && proof.Left.Path.isLeftAdjacentTo(proof.PathToKey) {
-		return nil
+	for _, leaf := range proof.Leaves {
+		keys = append(keys, leaf.Key)
 	}
-	return errors.WithStack(ErrInvalidProof)
+	return keys
 }
 
-///////////////////////////////////////////////////////////////////////////////
-
-// KeyLastInRangeProof is a proof that a given key is the last in a given range.
-type KeyLastInRangeProof struct {
-	KeyExistsProof `json:"key_proof"`
-
-	Left  *pathWithNode `json:"left"`
-	Right *pathWithNode `json:"right"`
+// String returns a string representation of the proof.
+func (proof *RangeProof) String() string {
+	if proof == nil {
+		return "<nil-RangeProof>"
+	}
+	return proof.StringIndented("")
 }
 
-// String returns a string representation of the proof.
-func (proof *KeyLastInRangeProof) String() string {
-	// TODO(cloudhead): Needs work.
-	return fmt.Sprintf("%#v", proof)
+func (proof *RangeProof) StringIndented(indent string) string {
+	istrs := make([]string, 0, len(proof.InnerNodes))
+	for _, ptl := range proof.InnerNodes {
+		istrs = append(istrs, ptl.stringIndented(indent+"    "))
+	}
+	lstrs := make([]string, 0, len(proof.Leaves))
+	for _, leaf := range proof.Leaves {
+		lstrs = append(lstrs, leaf.stringIndented(indent+"    "))
+	}
+	return fmt.Sprintf(`RangeProof{
+%s  LeftPath: %v
+%s  InnerNodes:
+%s    %v
+%s  Leaves:
+%s    %v
+%s  (rootVerified): %v
+%s  (rootHash): %X
+%s  (treeEnd): %v
+%s}`,
+		indent, proof.LeftPath.stringIndented(indent+"  "),
+		indent,
+		indent, strings.Join(istrs, "\n"+indent+"    "),
+		indent,
+		indent, strings.Join(lstrs, "\n"+indent+"    "),
+		indent, proof.rootVerified,
+		indent, proof.rootHash,
+		indent, proof.treeEnd,
+		indent)
 }
 
-// Verify that the last in range proof is valid.
-func (proof *KeyLastInRangeProof) Verify(startKey, endKey, key, value []byte, root []byte) error {
-	if key != nil && (bytes.Compare(key, startKey) == -1 || bytes.Compare(key, endKey) == 1) {
-		return ErrInvalidInputs
-	}
-	if proof.Left == nil && proof.Right == nil && proof.PathToKey == nil {
-		return errors.WithStack(ErrInvalidProof)
-	}
-	if err := verifyPaths(proof.Left, proof.Right, startKey, endKey, root); err != nil {
-		return err
-	}
-	if proof.PathToKey == nil {
-		// If we don't have an existing key, we effectively have a proof of absence.
-		return verifyKeyAbsence(proof.Left, proof.Right)
+// The index of the first leaf (of the whole tree).
+// Returns -1 if the proof is nil.
+func (proof *RangeProof) LeftIndex() int64 {
+	if proof == nil {
+		return -1
 	}
+	return proof.LeftPath.Index()
+}
 
-	if err := proof.KeyExistsProof.Verify(key, value, root); err != nil {
-		return err
+// Also see LeftIndex().
+// Verify that a key has some value.
+// Does not assume that the proof itself is valid, call Verify() first.
+func (proof *RangeProof) VerifyItem(key, value []byte) error {
+	leaves := proof.Leaves
+	if proof == nil {
+		return cmn.ErrorWrap(ErrInvalidProof, "proof is nil")
 	}
-	if bytes.Equal(key, endKey) {
-		return nil
+	if !proof.rootVerified {
+		return cmn.NewError("must call Verify(root) first.")
 	}
-	if proof.PathToKey.isRightmost() {
-		return nil
+	i := sort.Search(len(leaves), func(i int) bool {
+		return bytes.Compare(key, leaves[i].Key) <= 0
+	})
+	if i >= len(leaves) || !bytes.Equal(leaves[i].Key, key) {
+		return cmn.ErrorWrap(ErrInvalidProof, "leaf key not found in proof")
 	}
-	if proof.Right != nil &&
-		proof.PathToKey.isLeftAdjacentTo(proof.Right.Path) {
-		return nil
+	valueHash := tmhash.Sum(value)
+	if !bytes.Equal(leaves[i].ValueHash, valueHash) {
+		return cmn.ErrorWrap(ErrInvalidProof, "leaf value hash not same")
 	}
-
-	return errors.WithStack(ErrInvalidProof)
-}
-
-///////////////////////////////////////////////////////////////////////////////
-
-// KeyRangeProof is proof that a range of keys does or does not exist.
-type KeyRangeProof struct {
-	RootHash   cmn.HexBytes `json:"root_hash"`
-	Versions   []int64      `json:"versions"`
-	PathToKeys []*PathToKey `json:"paths"`
-
-	Left  *pathWithNode `json:"left"`
-	Right *pathWithNode `json:"right"`
+	return nil
 }
 
-// Verify that a range proof is valid.
-//
-// This method expects the same parameters passed to query the range.
-func (proof *KeyRangeProof) Verify(
-	startKey, endKey []byte, limit int, keys, values [][]byte, root []byte,
-) error {
-	if len(proof.PathToKeys) != len(keys) || len(values) != len(keys) || len(proof.Versions) != len(keys) {
-		return errors.WithStack(ErrInvalidInputs)
-	}
-	if limit > 0 && len(keys) > limit {
-		return errors.WithStack(ErrInvalidInputs)
+// Verify that proof is valid absence proof for key.
+// Does not assume that the proof itself is valid.
+// For that, use Verify(root).
+func (proof *RangeProof) VerifyAbsence(key []byte) error {
+	if proof == nil {
+		return cmn.ErrorWrap(ErrInvalidProof, "proof is nil")
 	}
-
-	// If startKey > endKey, reverse the keys and values, since our proofs are
-	// always in ascending order.
-	ascending := bytes.Compare(startKey, endKey) == -1
-	if !ascending {
-		startKey, endKey, keys, values = reverseKeys(startKey, endKey, keys, values)
+	if !proof.rootVerified {
+		return cmn.NewError("must call Verify(root) first.")
 	}
-
-	// If the range is empty, we just have to check the left and right paths.
-	if len(keys) == 0 {
-		if err := verifyKeyAbsence(proof.Left, proof.Right); err != nil {
-			return err
+	cmp := bytes.Compare(key, proof.Leaves[0].Key)
+	if cmp < 0 {
+		if proof.LeftPath.isLeftmost() {
+			return nil
 		}
-		return verifyPaths(proof.Left, proof.Right, startKey, endKey, root)
+		return cmn.NewError("absence not proved by left path")
+	} else if cmp == 0 {
+		return cmn.NewError("absence disproved via first item #0")
 	}
-
-	// If we hit the limit, one of the two ends doesn't have to match the
-	// limits of the query, so we adjust the range to match the limit we found.
-	if limit > 0 && len(keys) == limit {
-		if ascending {
-			endKey = keys[len(keys)-1]
-		} else {
-			startKey = keys[0]
-		}
+	if len(proof.LeftPath) == 0 {
+		return nil // proof ok
 	}
-	// Now we know Left < startKey <= endKey < Right.
-	if err := verifyPaths(proof.Left, proof.Right, startKey, endKey, root); err != nil {
-		return err
+	if proof.LeftPath.isRightmost() {
+		return nil
 	}
 
-	if err := verifyNoMissingKeys(proof.paths()); err != nil {
-		return errors.WithStack(err)
-	}
+	// See if any of the leaves are greater than key.
+	for i := 1; i < len(proof.Leaves); i++ {
+		leaf := proof.Leaves[i]
+		cmp := bytes.Compare(key, leaf.Key)
+		if cmp < 0 {
+			return nil // proof ok
+		} else if cmp == 0 {
+			return cmn.NewError("absence disproved via item #%v", i)
+		} else {
+			if i == len(proof.Leaves)-1 {
+				// If last item, check whether
+				// it's the last item in the tree.
 
-	// If we've reached this point, it means our range isn't empty, and we have
-	// a list of keys.
-	for i, path := range proof.PathToKeys {
-		leafNode := proofLeafNode{
-			KeyBytes:   keys[i],
-			ValueBytes: values[i],
-			Version:    proof.Versions[i],
-		}
-		if err := path.verify(leafNode.Hash(), root); err != nil {
-			return errors.WithStack(err)
+			}
+			continue
 		}
 	}
 
-	// In the case of a descending range, if the left proof is nil and the
-	// limit wasn't reached, we have to verify that we're not missing any
-	// keys. Basically, if a key to the left is missing because we've
-	// reached the limit, then it's fine. But if the key count is smaller
-	// than the limit, we need a left proof to make sure no keys are
-	// missing.
-	if proof.Left == nil &&
-		!bytes.Equal(startKey, keys[0]) &&
-		!proof.PathToKeys[0].isLeftmost() {
-		return errors.WithStack(ErrInvalidProof)
+	// It's still a valid proof if our last leaf is the rightmost child.
+	if proof.treeEnd {
+		return nil // OK!
 	}
 
-	if proof.Right == nil &&
-		!bytes.Equal(endKey, keys[len(keys)-1]) &&
-		!proof.PathToKeys[len(proof.PathToKeys)-1].isRightmost() {
-		return errors.WithStack(ErrInvalidProof)
+	// It's not a valid absence proof.
+	if len(proof.Leaves) < 2 {
+		return cmn.NewError("absence not proved by right leaf (need another leaf?)")
 	}
-	return nil
+	return cmn.NewError("absence not proved by right leaf")
 }
 
-func (proof *KeyRangeProof) String() string {
-	// TODO(cloudhead): Needs work.
-	return fmt.Sprintf("%#v", proof)
+// Verify that proof is valid.
+func (proof *RangeProof) Verify(root []byte) error {
+	if proof == nil {
+		return cmn.ErrorWrap(ErrInvalidProof, "proof is nil")
+	}
+	err := proof.verify(root)
+	return err
 }
 
-// Returns a list of all paths, in order, with the proof's Left and Right
-// paths preprended and appended respectively, if they exist.
-func (proof *KeyRangeProof) paths() []*PathToKey {
-	paths := proof.PathToKeys[:]
-	if proof.Left != nil {
-		paths = append([]*PathToKey{proof.Left.Path}, paths...)
+func (proof *RangeProof) verify(root []byte) error {
+	rootHash := proof.rootHash
+	if rootHash == nil {
+		derivedHash, err := proof.computeRootHash()
+		if err != nil {
+			return err
+		}
+		rootHash = derivedHash
 	}
-	if proof.Right != nil {
-		paths = append(paths, proof.Right.Path)
+	if !bytes.Equal(rootHash, root) {
+		return cmn.ErrorWrap(ErrInvalidRoot, "root hash doesn't match")
 	}
-	return paths
+	proof.rootVerified = true
+	return nil
 }
 
-///////////////////////////////////////////////////////////////////////////////
-
-func (t *Tree) getRangeWithProof(keyStart, keyEnd []byte, limit int) (
-	keys, values [][]byte, rangeProof *KeyRangeProof, err error,
-) {
-	if t.root == nil {
-		return nil, nil, nil, ErrNilRoot
+// ComputeRootHash computes the root hash with leaves.
+// Returns nil if error or proof is nil.
+// Does not verify the root hash.
+func (proof *RangeProof) ComputeRootHash() []byte {
+	if proof == nil {
+		return nil
 	}
-	t.root.hashWithCount() // Ensure that all hashes are calculated.
+	rootHash, _ := proof.computeRootHash()
+	return rootHash
+}
 
-	rangeProof = &KeyRangeProof{RootHash: t.root.hash}
-	rangeStart, rangeEnd := keyStart, keyEnd
-	ascending := bytes.Compare(keyStart, keyEnd) == -1
-	if !ascending {
-		rangeStart, rangeEnd = rangeEnd, rangeStart
+func (proof *RangeProof) computeRootHash() (rootHash []byte, err error) {
+	rootHash, treeEnd, err := proof._computeRootHash()
+	if err == nil {
+		proof.rootHash = rootHash // memoize
+		proof.treeEnd = treeEnd   // memoize
 	}
+	return rootHash, err
+}
 
-	versions := []int64{}
-	limited := t.IterateRangeInclusive(rangeStart, rangeEnd, ascending, func(k, v []byte, version int64) bool {
-		keys = append(keys, k)
-		values = append(values, v)
-		versions = append(versions, version)
-		return len(keys) == limit
-	})
-
-	// Construct the paths such that they are always in ascending order.
-	rangeProof.PathToKeys = make([]*PathToKey, len(keys))
-	rangeProof.Versions = make([]int64, len(keys))
-	for i, k := range keys {
-		path, _, _ := t.root.pathToKey(t, k)
-		if ascending {
-			rangeProof.PathToKeys[i] = path
-			rangeProof.Versions[i] = versions[i]
-		} else {
-			rangeProof.PathToKeys[len(keys)-i-1] = path
-			rangeProof.Versions[len(keys)-i-1] = versions[i]
-		}
+func (proof *RangeProof) _computeRootHash() (rootHash []byte, treeEnd bool, err error) {
+	if len(proof.Leaves) == 0 {
+		return nil, false, cmn.ErrorWrap(ErrInvalidProof, "no leaves")
+	}
+	if len(proof.InnerNodes)+1 != len(proof.Leaves) {
+		return nil, false, cmn.ErrorWrap(ErrInvalidProof, "InnerNodes vs Leaves length mismatch, leaves should be 1 more.")
 	}
 
-	//
-	// Figure out which of the left or right paths we need.
-	//
-	var needsLeft, needsRight bool
+	// Start from the left path and prove each leaf.
 
-	if len(keys) == 0 {
-		needsLeft, needsRight = true, true
-	} else {
-		first, last := 0, len(keys)-1
-		if !ascending {
-			first, last = last, first
-		}
+	// shared across recursive calls
+	var leaves = proof.Leaves
+	var innersq = proof.InnerNodes
+	var COMPUTEHASH func(path PathToLeaf, rightmost bool) (hash []byte, treeEnd bool, done bool, err error)
 
-		needsLeft = !bytes.Equal(keys[first], rangeStart)
-		needsRight = !bytes.Equal(keys[last], rangeEnd)
+	// rightmost: is the root a rightmost child of the tree?
+	// treeEnd: true iff the last leaf is the last item of the tree.
+	// Returns the (possibly intermediate, possibly root) hash.
+	COMPUTEHASH = func(path PathToLeaf, rightmost bool) (hash []byte, treeEnd bool, done bool, err error) {
 
-		// When limited, we can relax the right or left side, depending on
-		// the direction of the range.
-		if limited {
-			if ascending {
-				needsRight = false
-			} else {
-				needsLeft = false
-			}
-		}
-	}
+		// Pop next leaf.
+		nleaf, rleaves := leaves[0], leaves[1:]
+		leaves = rleaves
 
-	// So far, we've created proofs of the keys which are within the provided range.
-	// Next, we need to create a proof that we haven't omitted any keys to the left
-	// or right of that range. This is relevant in two scenarios:
-	//
-	// 1. There are no keys in the range. In this case, include a proof of the key
-	//    to the left and right of that empty range.
-	// 2. The start or end key do not match the start and end of the keys returned.
-	//    In this case, include proofs of the keys immediately outside of those returned.
-	//
-	if needsLeft {
-		// Find index of first key to the left, and include proof if it isn't the
-		// leftmost key.
-		if idx, _ := t.Get64(rangeStart); idx > 0 {
-			lkey, lval := t.GetByIndex64(idx - 1)
-			path, node, _ := t.root.pathToKey(t, lkey)
-			rangeProof.Left = &pathWithNode{
-				Path: path,
-				Node: proofLeafNode{lkey, lval, node.version},
-			}
-		}
-	}
+		// Compute hash.
+		hash = (pathWithLeaf{
+			Path: path,
+			Leaf: nleaf,
+		}).computeRootHash()
 
-	// Proof that the last key is the last value before keyEnd, or that we're limited.
-	// If len(keys) == limit, it doesn't matter that a key exists to the right of the
-	// last key, since we aren't interested in it.
-	if needsRight {
-		// Find index of first key to the right, and include proof if it isn't the
-		// rightmost key.
-		if idx, _ := t.Get64(rangeEnd); idx <= t.Size64()-1 {
-			rkey, rval := t.GetByIndex64(idx)
-			path, node, _ := t.root.pathToKey(t, rkey)
-			rangeProof.Right = &pathWithNode{
-				Path: path,
-				Node: proofLeafNode{rkey, rval, node.version},
-			}
+		// If we don't have any leaves left, we're done.
+		if len(leaves) == 0 {
+			rightmost = rightmost && path.isRightmost()
+			return hash, rightmost, true, nil
 		}
-	}
-
-	return keys, values, rangeProof, nil
-}
 
-func (t *Tree) getFirstInRangeWithProof(keyStart, keyEnd []byte) (
-	key, value []byte, proof *KeyFirstInRangeProof, err error,
-) {
-	if t.root == nil {
-		return nil, nil, nil, ErrNilRoot
-	}
-	t.root.hashWithCount() // Ensure that all hashes are calculated.
-	proof = &KeyFirstInRangeProof{}
-	proof.RootHash = t.root.hash
-	proof.Version = t.root.version
-
-	// Get the first value in the range.
-	t.IterateRangeInclusive(keyStart, keyEnd, true, func(k, v []byte, _ int64) bool {
-		key, value = k, v
-		return true
-	})
+		// Prove along path (until we run out of leaves).
+		for len(path) > 0 {
+
+			// Drop the leaf-most (last-most) inner nodes from path
+			// until we encounter one with a left hash.
+			// We assume that the left side is already verified.
+			// rpath: rest of path
+			// lpath: last path item
+			rpath, lpath := path[:len(path)-1], path[len(path)-1]
+			path = rpath
+			if len(lpath.Right) == 0 {
+				continue
+			}
 
-	if len(key) > 0 {
-		proof.PathToKey, _, _ = t.root.pathToKey(t, key)
-	}
+			// Pop next inners, a PathToLeaf (e.g. []proofInnerNode).
+			inners, rinnersq := innersq[0], innersq[1:]
+			innersq = rinnersq
 
-	if !bytes.Equal(key, keyStart) {
-		if idx, _ := t.Get64(keyStart); idx-1 >= 0 && idx-1 <= t.Size64()-1 {
-			k, v := t.GetByIndex64(idx - 1)
-			path, node, _ := t.root.pathToKey(t, k)
-			proof.Left = &pathWithNode{
-				Path: path,
-				Node: proofLeafNode{k, v, node.version},
+			// Recursively verify inners against remaining leaves.
+			derivedRoot, treeEnd, done, err := COMPUTEHASH(inners, rightmost && rpath.isRightmost())
+			if err != nil {
+				return nil, treeEnd, false, cmn.ErrorWrap(err, "recursive COMPUTEHASH call")
+			}
+			if !bytes.Equal(derivedRoot, lpath.Right) {
+				return nil, treeEnd, false, cmn.ErrorWrap(ErrInvalidRoot, "intermediate root hash %X doesn't match, got %X", lpath.Right, derivedRoot)
+			}
+			if done {
+				return hash, treeEnd, true, nil
 			}
 		}
+
+		// We're not done yet (leaves left over). No error, not done either.
+		// Technically if rightmost, we know there's an error "left over leaves
+		// -- malformed proof", but we return that at the top level, below.
+		return hash, false, false, nil
 	}
 
-	if !bytes.Equal(key, keyEnd) {
-		if idx, val := t.Get64(keyEnd); idx <= t.Size64()-1 && val == nil {
-			k, v := t.GetByIndex64(idx)
-			path, node, _ := t.root.pathToKey(t, k)
-			proof.Right = &pathWithNode{
-				Path: path,
-				Node: proofLeafNode{k, v, node.version},
-			}
-		}
+	// Verify!
+	path := proof.LeftPath
+	rootHash, treeEnd, done, err := COMPUTEHASH(path, true)
+	if err != nil {
+		return nil, treeEnd, cmn.ErrorWrap(err, "root COMPUTEHASH call")
+	} else if !done {
+		return nil, treeEnd, cmn.ErrorWrap(ErrInvalidProof, "left over leaves -- malformed proof")
 	}
 
-	return key, value, proof, nil
+	// Ok!
+	return rootHash, treeEnd, nil
 }
 
-func (t *Tree) getLastInRangeWithProof(keyStart, keyEnd []byte) (
-	key, value []byte, proof *KeyLastInRangeProof, err error,
-) {
+///////////////////////////////////////////////////////////////////////////////
+
+// keyStart is inclusive and keyEnd is exclusive.
+// Returns the range-proof and the included keys and values.
+// If keyStart or keyEnd don't exist, the leaf before keyStart
+// or after keyEnd will also be included, but not be included in values.
+// If keyEnd-1 exists, no later leaves will be included.
+// If keyStart >= keyEnd and both not nil, panics.
+// Limit is never exceeded.
+func (t *Tree) getRangeProof(keyStart, keyEnd []byte, limit int) (*RangeProof, [][]byte, [][]byte, error) {
+	if keyStart != nil && keyEnd != nil && bytes.Compare(keyStart, keyEnd) >= 0 {
+		panic("if keyStart and keyEnd are present, need keyStart < keyEnd.")
+	}
+	if limit < 0 {
+		panic("limit must be greater or equal to 0 -- 0 means no limit")
+	}
 	if t.root == nil {
-		return nil, nil, nil, ErrNilRoot
+		return nil, nil, nil, cmn.ErrorWrap(ErrNilRoot, "")
 	}
 	t.root.hashWithCount() // Ensure that all hashes are calculated.
 
-	proof = &KeyLastInRangeProof{}
-	proof.RootHash = t.root.hash
-	proof.Version = t.root.version
+	// Get the first key/value pair proof, which provides us with the left key.
+	path, left, err := t.root.PathToLeaf(t, keyStart)
+	if err != nil {
+		// Key doesn't exist, but instead we got the prev leaf (or the
+		// first or last leaf), which provides proof of absence).
+		// err = nil isn't necessary as we do not use it in the returns below
+	}
+	startOK := keyStart == nil || bytes.Compare(keyStart, left.key) <= 0
+	endOK := keyEnd == nil || bytes.Compare(left.key, keyEnd) < 0
+	// If left.key is in range, add it to key/values.
+	var keys, values [][]byte
+	if startOK && endOK {
+		keys = append(keys, left.key) // == keyStart
+		values = append(values, left.value)
+	}
+	// Either way, add to proof leaves.
+	var leaves = []proofLeafNode{{
+		Key:       left.key,
+		ValueHash: tmhash.Sum(left.value),
+		Version:   left.version,
+	}}
+
+	// 1: Special case if limit is 1.
+	// 2: Special case if keyEnd is left.key+1.
+	_stop := false
+	if limit == 1 {
+		_stop = true // case 1
+	} else if keyEnd != nil && bytes.Compare(cpIncr(left.key), keyEnd) >= 0 {
+		_stop = true // case 2
+	}
+	if _stop {
+		return &RangeProof{
+			LeftPath: path,
+			Leaves:   leaves,
+		}, keys, values, nil
+	}
+
+	// Get the key after left.key to iterate from.
+	afterLeft := cpIncr(left.key)
+
+	// Traverse starting from afterLeft, until keyEnd or the next leaf
+	// after keyEnd.
+	// nolint
+	var innersq = []PathToLeaf(nil)
+	var inners = PathToLeaf(nil)
+	var lastDepth uint8 = 0
+	var leafCount = 1 // from left above.
+	var pathCount = 0
+	// var keys, values [][]byte defined as function outs.
+
+	t.root.traverseInRange(t, afterLeft, nil, true, false, 0,
+		func(node *Node, depth uint8) (stop bool) {
+
+			// Track when we diverge from path, or when we've exhausted path,
+			// since the first innersq shouldn't include it.
+			if pathCount != -1 {
+				if len(path) <= pathCount {
+					// We're done with path counting.
+					pathCount = -1
+				} else {
+					pn := path[pathCount]
+					if pn.Height != node.height ||
+						pn.Left != nil && !bytes.Equal(pn.Left, node.leftHash) ||
+						pn.Right != nil && !bytes.Equal(pn.Right, node.rightHash) {
+
+						// We've diverged, so start appending to inners.
+						pathCount--
+					} else {
+						pathCount++
+					}
+				}
+			}
 
-	// Get the last value in the range.
-	t.IterateRangeInclusive(keyStart, keyEnd, false, func(k, v []byte, _ int64) bool {
-		key, value = k, v
-		return true
-	})
+			if node.height == 0 {
+				// Leaf node.
+				// Append inners to innersq.
+				innersq = append(innersq, inners)
+				inners = PathToLeaf(nil)
+				// Append leaf to leaves.
+				leaves = append(leaves, proofLeafNode{
+					Key:       node.key,
+					ValueHash: tmhash.Sum(node.value),
+					Version:   node.version,
+				})
+				leafCount++
+				// Maybe terminate because we found enough leaves.
+				if limit > 0 && limit <= leafCount {
+					return true
+				}
+				// Terminate if we've found keyEnd or after.
+				if keyEnd != nil && bytes.Compare(node.key, keyEnd) >= 0 {
+					return true
+				}
+				// Value is in range, append to keys and values.
+				keys = append(keys, node.key)
+				values = append(values, node.value)
+				// Terminate if we've found keyEnd-1 or after.
+				// We don't want to fetch any leaves for it.
+				if keyEnd != nil && bytes.Compare(cpIncr(node.key), keyEnd) >= 0 {
+					return true
+				}
+			} else {
+				// Inner node.
+				if pathCount >= 0 {
+					// Skip redundant path items.
+				} else {
+					inners = append(inners, proofInnerNode{
+						Height:  node.height,
+						Size:    node.size,
+						Version: node.version,
+						Left:    nil, // left is nil for range proof inners
+						Right:   node.rightHash,
+					})
+				}
+			}
+			lastDepth = depth
+			return false
+		},
+	)
+
+	return &RangeProof{
+		LeftPath:   path,
+		InnerNodes: innersq,
+		Leaves:     leaves,
+	}, keys, values, nil
+}
 
-	if len(key) > 0 {
-		proof.PathToKey, _, _ = t.root.pathToKey(t, key)
-	}
+//----------------------------------------
 
-	if !bytes.Equal(key, keyEnd) {
-		if idx, _ := t.Get64(keyEnd); idx <= t.Size64()-1 {
-			k, v := t.GetByIndex64(idx)
-			path, node, _ := t.root.pathToKey(t, k)
-			proof.Right = &pathWithNode{
-				Path: path,
-				Node: proofLeafNode{k, v, node.version},
+// GetWithProof gets the value under the key if it exists, or returns nil.
+// A proof of existence or absence is returned alongside the value.
+func (t *Tree) GetWithProof(key []byte) (value []byte, proof *RangeProof, err error) {
+	proof, _, values, err := t.getRangeProof(key, cpIncr(key), 2)
+	if err == nil {
+		if len(values) > 0 {
+			if !bytes.Equal(proof.Leaves[0].Key, key) {
+				return nil, proof, nil
 			}
+			return values[0], proof, nil
 		}
+		return nil, proof, nil
 	}
+	return nil, nil, cmn.ErrorWrap(err, "could not construct any proof")
+}
 
-	if !bytes.Equal(key, keyStart) {
-		if idx, _ := t.Get64(keyStart); idx-1 >= 0 && idx-1 <= t.Size64()-1 {
-			k, v := t.GetByIndex64(idx - 1)
-			path, node, _ := t.root.pathToKey(t, k)
-			proof.Left = &pathWithNode{
-				Path: path,
-				Node: proofLeafNode{k, v, node.version},
-			}
-		}
-	}
+// GetRangeWithProof gets key/value pairs within the specified range and limit.
+func (t *Tree) GetRangeWithProof(startKey []byte, endKey []byte, limit int) (keys, values [][]byte, proof *RangeProof, err error) {
+	proof, keys, values, err = t.getRangeProof(startKey, endKey, limit)
+	return
+}
 
-	return key, value, proof, nil
+// GetVersionedWithProof gets the value under the key at the specified version
+// if it exists, or returns nil.
+func (tree *VersionedTree) GetVersionedWithProof(key []byte, version int64) ([]byte, *RangeProof, error) {
+	if t, ok := tree.versions[version]; ok {
+		return t.GetWithProof(key)
+	}
+	return nil, nil, cmn.ErrorWrap(ErrVersionDoesNotExist, "")
 }
 
-///////////////////////////////////////////////////////////////////////////////
+// GetVersionedRangeWithProof gets key/value pairs within the specified range
+// and limit.
+func (tree *VersionedTree) GetVersionedRangeWithProof(startKey, endKey []byte, limit int, version int64) (
+	keys, values [][]byte, proof *RangeProof, err error) {
 
-// reverseKeys reverses the keys and values and swaps start and end key
-// if startKey > endKey.
-func reverseKeys(startKey, endKey []byte, keys, values [][]byte) (
-	[]byte, []byte, [][]byte, [][]byte,
-) {
-	if bytes.Compare(startKey, endKey) == 1 {
-		startKey, endKey = endKey, startKey
-
-		ks := make([][]byte, len(keys))
-		vs := make([][]byte, len(keys))
-		for i, _ := range keys {
-			ks[len(ks)-1-i] = keys[i]
-			vs[len(vs)-1-i] = values[i]
-		}
-		keys, values = ks, vs
+	if t, ok := tree.versions[version]; ok {
+		return t.GetRangeWithProof(startKey, endKey, limit)
 	}
-	return startKey, endKey, keys, values
+	return nil, nil, nil, cmn.ErrorWrap(ErrVersionDoesNotExist, "")
 }
diff --git a/vendor/github.com/tendermint/iavl/serialize.go b/vendor/github.com/tendermint/iavl/serialize.go
deleted file mode 100644
index 3b856478388abbc380e88a1660a04a4187f98945..0000000000000000000000000000000000000000
--- a/vendor/github.com/tendermint/iavl/serialize.go
+++ /dev/null
@@ -1,200 +0,0 @@
-package iavl
-
-// NodeData groups together a key, value and depth.
-type NodeData struct {
-	Key   []byte
-	Value []byte
-	Depth uint8
-}
-
-// SerializeFunc is any implementation that can serialize
-// an iavl Node and its descendants.
-type SerializeFunc func(*Tree, *Node) []NodeData
-
-// RestoreFunc is an implementation that can restore an iavl tree from
-// NodeData.
-type RestoreFunc func(*Tree, []NodeData)
-
-// Restore will take an (empty) tree restore it
-// from the keys returned from a SerializeFunc.
-func Restore(empty *Tree, kvs []NodeData) {
-	for _, kv := range kvs {
-		empty.Set(kv.Key, kv.Value)
-	}
-	empty.Hash()
-}
-
-func RestoreUsingDepth(empty *Tree, kvs []NodeData) {
-	// Create an array of arrays of nodes. We're going to store each depth in
-	// here, forming a kind of pyramid.
-	depths := [][]*Node{}
-
-	// Go through all the leaf nodes, grouping them in pairs and creating their
-	// parents recursively.
-	for _, kv := range kvs {
-		var (
-			// Left and right nodes.
-			l     *Node = nil
-			r     *Node = NewNode(kv.Key, kv.Value, 1)
-			depth uint8 = kv.Depth
-		)
-		// Create depths as needed.
-		for len(depths) < int(depth)+1 {
-			depths = append(depths, []*Node{})
-		}
-		depths[depth] = append(depths[depth], r) // Add the leaf node to this depth.
-
-		// If the nodes at this level are uneven after adding a node to it, it
-		// means we have to wait for another node to be appended before we have
-		// a pair. If we do have a pair, go up the tree until we don't.
-		for d := depth; len(depths[d])%2 == 0; d-- {
-			nodes := depths[d] // List of nodes at this depth.
-
-			l = nodes[len(nodes)-1-1]
-			r = nodes[len(nodes)-1]
-
-			depths[d-1] = append(depths[d-1], &Node{
-				key:       leftmost(r).Key,
-				height:    maxInt8(l.height, r.height) + 1,
-				size:      l.size + r.size,
-				leftNode:  l,
-				rightNode: r,
-				version:   1,
-			})
-		}
-	}
-	empty.root = depths[0][0]
-	empty.Hash()
-}
-
-// InOrderSerialize returns all key-values in the
-// key order (as stored). May be nice to read, but
-// when recovering, it will create a different.
-func InOrderSerialize(t *Tree, root *Node) []NodeData {
-	res := make([]NodeData, 0, root.size)
-	root.traverseWithDepth(t, true, func(node *Node, depth uint8) bool {
-		if node.height == 0 {
-			kv := NodeData{Key: node.key, Value: node.value, Depth: depth}
-			res = append(res, kv)
-		}
-		return false
-	})
-	return res
-}
-
-// StableSerializeBFS serializes the tree in a breadth-first manner.
-func StableSerializeBFS(t *Tree, root *Node) []NodeData {
-	if root == nil {
-		return nil
-	}
-
-	size := root.size
-	visited := map[string][]byte{}
-	keys := make([][]byte, 0, size)
-	numKeys := -1
-
-	// Breadth-first search. At every depth, add keys in search order. Keep
-	// going as long as we find keys at that depth. When we reach a leaf, set
-	// its value in the visited map.
-	// Since we have an AVL+ tree, the inner nodes contain only keys and not
-	// values, while the leaves contain both. Note also that there are N-1 inner
-	// nodes for N keys, so one of the leaf keys is only set once we reach the leaves
-	// of the tree.
-	for depth := uint(0); len(keys) > numKeys; depth++ {
-		numKeys = len(keys)
-		root.traverseDepth(t, depth, func(node *Node) {
-			if _, ok := visited[string(node.key)]; !ok {
-				keys = append(keys, node.key)
-				visited[string(node.key)] = nil
-			}
-			if node.isLeaf() {
-				visited[string(node.key)] = node.value
-			}
-		})
-	}
-
-	nds := make([]NodeData, size)
-	for i, k := range keys {
-		nds[i] = NodeData{k, visited[string(k)], 0}
-	}
-	return nds
-}
-
-// StableSerializeFrey exports the key value pairs of the tree
-// in an order, such that when Restored from those keys, the
-// new tree would have the same structure (and thus same
-// shape) as the original tree.
-//
-// the algorithm is basically this: take the leftmost node
-// of the left half and the leftmost node of the righthalf.
-// Then go down a level...
-// each time adding leftmost node of the right side.
-// (bredth first search)
-//
-// Imagine 8 nodes in a balanced tree, split in half each time
-// 1
-// 1, 5
-// 1, 5, 3, 7
-// 1, 5, 3, 7, 2, 4, 6, 8
-func StableSerializeFrey(t *Tree, top *Node) []NodeData {
-	if top == nil {
-		return nil
-	}
-	size := top.size
-
-	// store all pending nodes for depth-first search
-	queue := make([]*Node, 0, size)
-	queue = append(queue, top)
-
-	// to store all results - started with
-	res := make([]NodeData, 0, size)
-	left := leftmost(top)
-	if left != nil {
-		res = append(res, *left)
-	}
-
-	var n *Node
-	for len(queue) > 0 {
-		// pop
-		n, queue = queue[0], queue[1:]
-
-		// l := n.getLeftNode(tree)
-		l := n.leftNode
-		if isInner(l) {
-			queue = append(queue, l)
-		}
-
-		// r := n.getRightNode(tree)
-		r := n.rightNode
-		if isInner(r) {
-			queue = append(queue, r)
-			left = leftmost(r)
-			if left != nil {
-				res = append(res, *left)
-			}
-		} else if isLeaf(r) {
-			kv := NodeData{Key: r.key, Value: r.value}
-			res = append(res, kv)
-		}
-	}
-
-	return res
-}
-
-func isInner(n *Node) bool {
-	return n != nil && !n.isLeaf()
-}
-
-func isLeaf(n *Node) bool {
-	return n != nil && n.isLeaf()
-}
-
-func leftmost(node *Node) *NodeData {
-	for isInner(node) {
-		node = node.leftNode
-	}
-	if node == nil {
-		return nil
-	}
-	return &NodeData{Key: node.key, Value: node.value}
-}
diff --git a/vendor/github.com/tendermint/iavl/sha256truncated/sha256truncated.go b/vendor/github.com/tendermint/iavl/sha256truncated/sha256truncated.go
deleted file mode 100644
index f62ff31351f846a5a7cd357e1be6e6ef14646c94..0000000000000000000000000000000000000000
--- a/vendor/github.com/tendermint/iavl/sha256truncated/sha256truncated.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Package sha256truncated provides a sha256 hash.Hash whose output is truncated to 20 bytes (160 bits).
-//
-// This is the default hashing algorithm used by IAVL+ trees.
-//
-//   s256 := sha256.New() // crypto/sha256
-//   s256Truncated := New() // this package
-//
-//   // Use like any other hash.Hash ...
-//   // Contract:
-//   s256Trunc.Sum(nil) == s256.Sum(nil)[:20]
-package sha256truncated
-
-import (
-	"crypto/sha256"
-	"hash"
-)
-
-const Size = 20
-
-// New returns a new hash.Hash computing the truncated to the first 20 bytes SHA256 checksum.
-func New() hash.Hash {
-	return &digest{sha256.New()}
-}
-
-func (d *digest) Sum(in []byte) []byte {
-	return d.Hash.Sum(in)[:Size]
-}
-
-func (d *digest) Reset() {
-	d.Hash.Reset()
-}
-
-func (d *digest) Size() int {
-	return Size
-}
-
-func (d *digest) BlockSize() int {
-	return d.Hash.BlockSize()
-}
-
-// digest is just a wrapper around sha256
-type digest struct {
-	hash.Hash
-}
diff --git a/vendor/github.com/tendermint/iavl/tree.go b/vendor/github.com/tendermint/iavl/tree.go
index 85cf6e21e3bed5f409feeb881ef29087f3b5f81c..c92901532c5c9a7630371c8989ef04d932f0c5d4 100644
--- a/vendor/github.com/tendermint/iavl/tree.go
+++ b/vendor/github.com/tendermint/iavl/tree.go
@@ -4,9 +4,7 @@ import (
 	"fmt"
 	"strings"
 
-	dbm "github.com/tendermint/tmlibs/db"
-
-	"github.com/pkg/errors"
+	dbm "github.com/tendermint/tendermint/libs/db"
 )
 
 // Tree is a container for an immutable AVL+ Tree. Changes are performed by
@@ -143,39 +141,6 @@ func (t *Tree) GetByIndex64(index int64) (key []byte, value []byte) {
 	return t.root.getByIndex(t, index)
 }
 
-// GetWithProof gets the value under the key if it exists, or returns nil.
-// A proof of existence or absence is returned alongside the value.
-func (t *Tree) GetWithProof(key []byte) ([]byte, KeyProof, error) {
-	value, eproof, err := t.getWithProof(key)
-	if err == nil {
-		return value, eproof, nil
-	}
-
-	aproof, err := t.keyAbsentProof(key)
-	if err == nil {
-		return nil, aproof, nil
-	}
-	return nil, nil, errors.Wrap(err, "could not construct any proof")
-}
-
-// GetRangeWithProof gets key/value pairs within the specified range and limit. To specify a descending
-// range, swap the start and end keys.
-//
-// Returns a list of keys, a list of values and a proof.
-func (t *Tree) GetRangeWithProof(startKey []byte, endKey []byte, limit int) ([][]byte, [][]byte, *KeyRangeProof, error) {
-	return t.getRangeWithProof(startKey, endKey, limit)
-}
-
-// GetFirstInRangeWithProof gets the first key/value pair in the specified range, with a proof.
-func (t *Tree) GetFirstInRangeWithProof(startKey, endKey []byte) ([]byte, []byte, *KeyFirstInRangeProof, error) {
-	return t.getFirstInRangeWithProof(startKey, endKey)
-}
-
-// GetLastInRangeWithProof gets the last key/value pair in the specified range, with a proof.
-func (t *Tree) GetLastInRangeWithProof(startKey, endKey []byte) ([]byte, []byte, *KeyLastInRangeProof, error) {
-	return t.getLastInRangeWithProof(startKey, endKey)
-}
-
 // Remove tries to remove a key from the tree and if removed, returns its
 // value, and 'true'.
 func (t *Tree) Remove(key []byte) ([]byte, bool) {
@@ -210,9 +175,8 @@ func (t *Tree) Iterate(fn func(key []byte, value []byte) bool) (stopped bool) {
 	return t.root.traverse(t, true, func(node *Node) bool {
 		if node.height == 0 {
 			return fn(node.key, node.value)
-		} else {
-			return false
 		}
+		return false
 	})
 }
 
@@ -225,9 +189,8 @@ func (t *Tree) IterateRange(start, end []byte, ascending bool, fn func(key []byt
 	return t.root.traverseInRange(t, start, end, ascending, false, 0, func(node *Node, _ uint8) bool {
 		if node.height == 0 {
 			return fn(node.key, node.value)
-		} else {
-			return false
 		}
+		return false
 	})
 }
 
@@ -240,19 +203,18 @@ func (t *Tree) IterateRangeInclusive(start, end []byte, ascending bool, fn func(
 	return t.root.traverseInRange(t, start, end, ascending, true, 0, func(node *Node, _ uint8) bool {
 		if node.height == 0 {
 			return fn(node.key, node.value, node.version)
-		} else {
-			return false
 		}
+		return false
 	})
 }
 
 // Clone creates a clone of the tree.
 // Used internally by VersionedTree.
-func (tree *Tree) clone() *Tree {
+func (t *Tree) clone() *Tree {
 	return &Tree{
-		root:    tree.root,
-		ndb:     tree.ndb,
-		version: tree.version,
+		root:    t.root,
+		ndb:     t.ndb,
+		version: t.version,
 	}
 }
 
diff --git a/vendor/github.com/tendermint/iavl/tree_dotgraph.go b/vendor/github.com/tendermint/iavl/tree_dotgraph.go
index 57fc21678ff545df226fe5f50fe2f3670c735db6..14294851ac1083e3f416ff95847bb399d69264e6 100644
--- a/vendor/github.com/tendermint/iavl/tree_dotgraph.go
+++ b/vendor/github.com/tendermint/iavl/tree_dotgraph.go
@@ -41,7 +41,7 @@ var defaultGraphNodeAttrs = map[string]string{
 	"shape": "circle",
 }
 
-func WriteDOTGraph(w io.Writer, tree *Tree, paths []*PathToKey) {
+func WriteDOTGraph(w io.Writer, tree *Tree, paths []PathToLeaf) {
 	ctx := &graphContext{}
 
 	tree.root.hashWithCount()
@@ -69,7 +69,7 @@ func WriteDOTGraph(w io.Writer, tree *Tree, paths []*PathToKey) {
 		}
 
 		for _, path := range paths {
-			for _, n := range path.InnerNodes {
+			for _, n := range path {
 				if bytes.Equal(n.Left, node.hash) || bytes.Equal(n.Right, node.hash) {
 					graphNode.Attrs["peripheries"] = "2"
 					graphNode.Attrs["style"] = "filled"
diff --git a/vendor/github.com/tendermint/iavl/util.go b/vendor/github.com/tendermint/iavl/util.go
index b28b4877fa32e5bbcfdb342f3a4e201a2bc4639a..96f754189518f587e854e2f93e072fb568d88091 100644
--- a/vendor/github.com/tendermint/iavl/util.go
+++ b/vendor/github.com/tendermint/iavl/util.go
@@ -1,27 +1,45 @@
 package iavl
 
 import (
+	"bytes"
 	"fmt"
+	"sort"
 )
 
-func printNode(node *Node, indent int) {
+// PrintTree prints the whole tree in an indented form.
+func PrintTree(tree *Tree) {
+	ndb, root := tree.ndb, tree.root
+	printNode(ndb, root, 0)
+}
+
+func printNode(ndb *nodeDB, node *Node, indent int) {
 	indentPrefix := ""
 	for i := 0; i < indent; i++ {
 		indentPrefix += "    "
 	}
 
+	if node == nil {
+		fmt.Printf("%s<nil>\n", indentPrefix)
+		return
+	}
 	if node.rightNode != nil {
-		printNode(node.rightNode, indent+1)
+		printNode(ndb, node.rightNode, indent+1)
 	} else if node.rightHash != nil {
-		fmt.Printf("%s    %X\n", indentPrefix, node.rightHash)
+		rightNode := ndb.GetNode(node.rightHash)
+		printNode(ndb, rightNode, indent+1)
 	}
 
-	fmt.Printf("%s%v:%v\n", indentPrefix, node.key, node.height)
+	hash := node._hash()
+	fmt.Printf("%sh:%X\n", indentPrefix, hash)
+	if node.isLeaf() {
+		fmt.Printf("%s%X:%X (%v)\n", indentPrefix, node.key, node.value, node.height)
+	}
 
 	if node.leftNode != nil {
-		printNode(node.leftNode, indent+1)
+		printNode(ndb, node.leftNode, indent+1)
 	} else if node.leftHash != nil {
-		fmt.Printf("%s    %X\n", indentPrefix, node.leftHash)
+		leftNode := ndb.GetNode(node.leftHash)
+		printNode(ndb, leftNode, indent+1)
 	}
 
 }
@@ -32,3 +50,57 @@ func maxInt8(a, b int8) int8 {
 	}
 	return b
 }
+
+func cp(bz []byte) (ret []byte) {
+	ret = make([]byte, len(bz))
+	copy(ret, bz)
+	return ret
+}
+
+// Returns a slice of the same length (big endian)
+// except incremented by one.
+// Appends 0x00 if bz is all 0xFF.
+// CONTRACT: len(bz) > 0
+func cpIncr(bz []byte) (ret []byte) {
+	ret = cp(bz)
+	for i := len(bz) - 1; i >= 0; i-- {
+		if ret[i] < byte(0xFF) {
+			ret[i]++
+			return
+		}
+		ret[i] = byte(0x00)
+		if i == 0 {
+			return append(ret, 0x00)
+			// Overflow
+			return nil
+		}
+	}
+	return []byte{0x00}
+}
+
+type byteslices [][]byte
+
+func (bz byteslices) Len() int {
+	return len(bz)
+}
+
+func (bz byteslices) Less(i, j int) bool {
+	switch bytes.Compare(bz[i], bz[j]) {
+	case -1:
+		return true
+	case 0, 1:
+		return false
+	default:
+		panic("should not happen")
+	}
+}
+
+func (bz byteslices) Swap(i, j int) {
+	bz[j], bz[i] = bz[i], bz[j]
+}
+
+func sortByteSlices(src [][]byte) [][]byte {
+	bzz := byteslices(src)
+	sort.Sort(bzz)
+	return bzz
+}
diff --git a/vendor/github.com/tendermint/iavl/version.go b/vendor/github.com/tendermint/iavl/version.go
index 679f0cc3aced629dae35b2c2db88e261dd1adb0c..9efd1114579c05336323ebbad17751f39f57479e 100644
--- a/vendor/github.com/tendermint/iavl/version.go
+++ b/vendor/github.com/tendermint/iavl/version.go
@@ -1,3 +1,4 @@
 package iavl
 
-const Version = "0.8.0-dev"
+// Version of iavl.
+const Version = "0.9.2"
diff --git a/vendor/github.com/tendermint/iavl/versioned_tree.go b/vendor/github.com/tendermint/iavl/versioned_tree.go
index 9fef5a15476a89cec4a310012567900921aaa7a6..7d8108ea85caf2cc2a4d8bd58938ed01f2e67c5d 100644
--- a/vendor/github.com/tendermint/iavl/versioned_tree.go
+++ b/vendor/github.com/tendermint/iavl/versioned_tree.go
@@ -4,10 +4,11 @@ import (
 	"bytes"
 	"fmt"
 
-	"github.com/pkg/errors"
-	dbm "github.com/tendermint/tmlibs/db"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	dbm "github.com/tendermint/tendermint/libs/db"
 )
 
+// ErrVersionDoesNotExist is returned if a requested version does not exist.
 var ErrVersionDoesNotExist = fmt.Errorf("version does not exist")
 
 // VersionedTree is a persistent tree which keeps track of versions.
@@ -160,7 +161,7 @@ func (tree *VersionedTree) SaveVersion() ([]byte, int64, error) {
 			tree.orphaningTree = newOrphaningTree(tree.versions[version].clone())
 			return existingHash, version, nil
 		}
-		return nil, version, errors.Errorf("version %d was already saved to different hash %X (existing hash %X)",
+		return nil, version, fmt.Errorf("version %d was already saved to different hash %X (existing hash %X)",
 			version, newHash, existingHash)
 	}
 
@@ -178,13 +179,13 @@ func (tree *VersionedTree) SaveVersion() ([]byte, int64, error) {
 // longer be accessed.
 func (tree *VersionedTree) DeleteVersion(version int64) error {
 	if version == 0 {
-		return errors.New("version must be greater than 0")
+		return cmn.NewError("version must be greater than 0")
 	}
 	if version == tree.version {
-		return errors.Errorf("cannot delete latest saved version (%d)", version)
+		return cmn.NewError("cannot delete latest saved version (%d)", version)
 	}
 	if _, ok := tree.versions[version]; !ok {
-		return errors.WithStack(ErrVersionDoesNotExist)
+		return cmn.ErrorWrap(ErrVersionDoesNotExist, "")
 	}
 
 	tree.ndb.DeleteVersion(version)
@@ -194,42 +195,3 @@ func (tree *VersionedTree) DeleteVersion(version int64) error {
 
 	return nil
 }
-
-// GetVersionedWithProof gets the value under the key at the specified version
-// if it exists, or returns nil.  A proof of existence or absence is returned
-// alongside the value.
-func (tree *VersionedTree) GetVersionedWithProof(key []byte, version int64) ([]byte, KeyProof, error) {
-	if t, ok := tree.versions[version]; ok {
-		return t.GetWithProof(key)
-	}
-	return nil, nil, errors.WithStack(ErrVersionDoesNotExist)
-}
-
-// GetVersionedRangeWithProof gets key/value pairs within the specified range
-// and limit. To specify a descending range, swap the start and end keys.
-//
-// Returns a list of keys, a list of values and a proof.
-func (tree *VersionedTree) GetVersionedRangeWithProof(startKey, endKey []byte, limit int, version int64) ([][]byte, [][]byte, *KeyRangeProof, error) {
-	if t, ok := tree.versions[version]; ok {
-		return t.GetRangeWithProof(startKey, endKey, limit)
-	}
-	return nil, nil, nil, errors.WithStack(ErrVersionDoesNotExist)
-}
-
-// GetVersionedFirstInRangeWithProof gets the first key/value pair in the
-// specified range, with a proof.
-func (tree *VersionedTree) GetVersionedFirstInRangeWithProof(startKey, endKey []byte, version int64) ([]byte, []byte, *KeyFirstInRangeProof, error) {
-	if t, ok := tree.versions[version]; ok {
-		return t.GetFirstInRangeWithProof(startKey, endKey)
-	}
-	return nil, nil, nil, errors.WithStack(ErrVersionDoesNotExist)
-}
-
-// GetVersionedLastInRangeWithProof gets the last key/value pair in the
-// specified range, with a proof.
-func (tree *VersionedTree) GetVersionedLastInRangeWithProof(startKey, endKey []byte, version int64) ([]byte, []byte, *KeyLastInRangeProof, error) {
-	if t, ok := tree.versions[version]; ok {
-		return t.GetLastInRangeWithProof(startKey, endKey)
-	}
-	return nil, nil, nil, errors.WithStack(ErrVersionDoesNotExist)
-}
diff --git a/vendor/github.com/tendermint/iavl/wire.go b/vendor/github.com/tendermint/iavl/wire.go
new file mode 100644
index 0000000000000000000000000000000000000000..8549ae4ab3ec9dd18f0bea9d820a77cc12085f20
--- /dev/null
+++ b/vendor/github.com/tendermint/iavl/wire.go
@@ -0,0 +1,17 @@
+package iavl
+
+import (
+	"github.com/tendermint/go-amino"
+)
+
+var cdc = amino.NewCodec()
+
+func init() {
+	// NOTE: It's important that there be no conflicts here,
+	// as that would change the canonical representations.
+	RegisterWire(cdc)
+}
+
+func RegisterWire(cdc *amino.Codec) {
+	// TODO
+}
diff --git a/vendor/github.com/tendermint/abci/client/client.go b/vendor/github.com/tendermint/tendermint/abci/client/client.go
similarity index 97%
rename from vendor/github.com/tendermint/abci/client/client.go
rename to vendor/github.com/tendermint/tendermint/abci/client/client.go
index ad0e5a7abf55ad504ad47d5f43d38b2f9833b44f..558588107e5ac324eb1e251a976d56b4aef1d820 100644
--- a/vendor/github.com/tendermint/abci/client/client.go
+++ b/vendor/github.com/tendermint/tendermint/abci/client/client.go
@@ -4,8 +4,8 @@ import (
 	"fmt"
 	"sync"
 
-	"github.com/tendermint/abci/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/abci/types"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 const (
diff --git a/vendor/github.com/tendermint/abci/client/grpc_client.go b/vendor/github.com/tendermint/tendermint/abci/client/grpc_client.go
similarity index 98%
rename from vendor/github.com/tendermint/abci/client/grpc_client.go
rename to vendor/github.com/tendermint/tendermint/abci/client/grpc_client.go
index 0f405a9c411faf1b0e064190e3f8207bced57c79..502ee0fcd8d54b81158ad7e39c09ac5e0b2e7276 100644
--- a/vendor/github.com/tendermint/abci/client/grpc_client.go
+++ b/vendor/github.com/tendermint/tendermint/abci/client/grpc_client.go
@@ -9,8 +9,8 @@ import (
 	context "golang.org/x/net/context"
 	grpc "google.golang.org/grpc"
 
-	"github.com/tendermint/abci/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/abci/types"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 var _ Client = (*grpcClient)(nil)
diff --git a/vendor/github.com/tendermint/abci/client/local_client.go b/vendor/github.com/tendermint/tendermint/abci/client/local_client.go
similarity index 98%
rename from vendor/github.com/tendermint/abci/client/local_client.go
rename to vendor/github.com/tendermint/tendermint/abci/client/local_client.go
index 64bf5fe08da5ac9431d1d8db9d68fe068934637c..3d1f8d8e4162722e45d24590f6dc9e17dac29d35 100644
--- a/vendor/github.com/tendermint/abci/client/local_client.go
+++ b/vendor/github.com/tendermint/tendermint/abci/client/local_client.go
@@ -3,8 +3,8 @@ package abcicli
 import (
 	"sync"
 
-	types "github.com/tendermint/abci/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	types "github.com/tendermint/tendermint/abci/types"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 var _ Client = (*localClient)(nil)
diff --git a/vendor/github.com/tendermint/abci/client/socket_client.go b/vendor/github.com/tendermint/tendermint/abci/client/socket_client.go
similarity index 96%
rename from vendor/github.com/tendermint/abci/client/socket_client.go
rename to vendor/github.com/tendermint/tendermint/abci/client/socket_client.go
index 5c010168542791f23f3d119a46d3b1cf6b6317e9..affea1a9ec893272198e976cb6adb3824ae1b84d 100644
--- a/vendor/github.com/tendermint/abci/client/socket_client.go
+++ b/vendor/github.com/tendermint/tendermint/abci/client/socket_client.go
@@ -10,8 +10,8 @@ import (
 	"sync"
 	"time"
 
-	"github.com/tendermint/abci/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/abci/types"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 const reqQueueSize = 256 // TODO make configurable
@@ -357,6 +357,13 @@ func (cli *socketClient) queueRequest(req *types.Request) *ReqRes {
 }
 
 func (cli *socketClient) flushQueue() {
+	// mark all in-flight messages as resolved (they will get cli.Error())
+	for req := cli.reqSent.Front(); req != nil; req = req.Next() {
+		reqres := req.Value.(*ReqRes)
+		reqres.Done()
+	}
+
+	// mark all queued messages as resolved
 LOOP:
 	for {
 		select {
diff --git a/vendor/github.com/tendermint/abci/example/code/code.go b/vendor/github.com/tendermint/tendermint/abci/example/code/code.go
similarity index 100%
rename from vendor/github.com/tendermint/abci/example/code/code.go
rename to vendor/github.com/tendermint/tendermint/abci/example/code/code.go
diff --git a/vendor/github.com/tendermint/abci/example/kvstore/README.md b/vendor/github.com/tendermint/tendermint/abci/example/kvstore/README.md
similarity index 100%
rename from vendor/github.com/tendermint/abci/example/kvstore/README.md
rename to vendor/github.com/tendermint/tendermint/abci/example/kvstore/README.md
diff --git a/vendor/github.com/tendermint/abci/example/kvstore/helpers.go b/vendor/github.com/tendermint/tendermint/abci/example/kvstore/helpers.go
similarity index 90%
rename from vendor/github.com/tendermint/abci/example/kvstore/helpers.go
rename to vendor/github.com/tendermint/tendermint/abci/example/kvstore/helpers.go
index 63bc31a634d9483e53997309060c221868380277..0e69fab9f6cf47fd5bb7b4f98d0cb9918ca0e62b 100644
--- a/vendor/github.com/tendermint/abci/example/kvstore/helpers.go
+++ b/vendor/github.com/tendermint/tendermint/abci/example/kvstore/helpers.go
@@ -1,8 +1,8 @@
 package kvstore
 
 import (
-	"github.com/tendermint/abci/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/abci/types"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // RandVal creates one random validator, with a key derived
diff --git a/vendor/github.com/tendermint/abci/example/kvstore/kvstore.go b/vendor/github.com/tendermint/tendermint/abci/example/kvstore/kvstore.go
similarity index 93%
rename from vendor/github.com/tendermint/abci/example/kvstore/kvstore.go
rename to vendor/github.com/tendermint/tendermint/abci/example/kvstore/kvstore.go
index 4ccbc56b0d2d1b6a0d8965c73101bc1df9018101..0f72b44eafa106b329f91ed290044f0b3f8896dc 100644
--- a/vendor/github.com/tendermint/abci/example/kvstore/kvstore.go
+++ b/vendor/github.com/tendermint/tendermint/abci/example/kvstore/kvstore.go
@@ -6,10 +6,10 @@ import (
 	"encoding/json"
 	"fmt"
 
-	"github.com/tendermint/abci/example/code"
-	"github.com/tendermint/abci/types"
-	cmn "github.com/tendermint/tmlibs/common"
-	dbm "github.com/tendermint/tmlibs/db"
+	"github.com/tendermint/tendermint/abci/example/code"
+	"github.com/tendermint/tendermint/abci/types"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	dbm "github.com/tendermint/tendermint/libs/db"
 )
 
 var (
diff --git a/vendor/github.com/tendermint/abci/example/kvstore/persistent_kvstore.go b/vendor/github.com/tendermint/tendermint/abci/example/kvstore/persistent_kvstore.go
similarity index 95%
rename from vendor/github.com/tendermint/abci/example/kvstore/persistent_kvstore.go
rename to vendor/github.com/tendermint/tendermint/abci/example/kvstore/persistent_kvstore.go
index 02f7ce74a2b14a480b44a7ede1d92622eed1cf29..12ccbab782be0094fa6bb60526539a54d5c1a4d3 100644
--- a/vendor/github.com/tendermint/abci/example/kvstore/persistent_kvstore.go
+++ b/vendor/github.com/tendermint/tendermint/abci/example/kvstore/persistent_kvstore.go
@@ -7,11 +7,11 @@ import (
 	"strconv"
 	"strings"
 
-	"github.com/tendermint/abci/example/code"
-	"github.com/tendermint/abci/types"
-	cmn "github.com/tendermint/tmlibs/common"
-	dbm "github.com/tendermint/tmlibs/db"
-	"github.com/tendermint/tmlibs/log"
+	"github.com/tendermint/tendermint/abci/example/code"
+	"github.com/tendermint/tendermint/abci/types"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	dbm "github.com/tendermint/tendermint/libs/db"
+	"github.com/tendermint/tendermint/libs/log"
 )
 
 const (
diff --git a/vendor/github.com/tendermint/abci/types/application.go b/vendor/github.com/tendermint/tendermint/abci/types/application.go
similarity index 100%
rename from vendor/github.com/tendermint/abci/types/application.go
rename to vendor/github.com/tendermint/tendermint/abci/types/application.go
diff --git a/vendor/github.com/tendermint/abci/types/messages.go b/vendor/github.com/tendermint/tendermint/abci/types/messages.go
similarity index 100%
rename from vendor/github.com/tendermint/abci/types/messages.go
rename to vendor/github.com/tendermint/tendermint/abci/types/messages.go
diff --git a/vendor/github.com/tendermint/abci/types/pubkey.go b/vendor/github.com/tendermint/tendermint/abci/types/pubkey.go
similarity index 100%
rename from vendor/github.com/tendermint/abci/types/pubkey.go
rename to vendor/github.com/tendermint/tendermint/abci/types/pubkey.go
diff --git a/vendor/github.com/tendermint/abci/types/result.go b/vendor/github.com/tendermint/tendermint/abci/types/result.go
similarity index 100%
rename from vendor/github.com/tendermint/abci/types/result.go
rename to vendor/github.com/tendermint/tendermint/abci/types/result.go
diff --git a/vendor/github.com/tendermint/abci/types/types.pb.go b/vendor/github.com/tendermint/tendermint/abci/types/types.pb.go
similarity index 99%
rename from vendor/github.com/tendermint/abci/types/types.pb.go
rename to vendor/github.com/tendermint/tendermint/abci/types/types.pb.go
index a6b806fe6ac9d0e118c759c16673a8fcf36bd0d9..8135db50faf8bf3d093e63c5b29be9dc9ce862e1 100644
--- a/vendor/github.com/tendermint/abci/types/types.pb.go
+++ b/vendor/github.com/tendermint/tendermint/abci/types/types.pb.go
@@ -50,7 +50,7 @@ import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
 import _ "github.com/gogo/protobuf/gogoproto"
-import common "github.com/tendermint/tmlibs/common"
+import common "github.com/tendermint/tendermint/libs/common"
 
 import context "golang.org/x/net/context"
 import grpc "google.golang.org/grpc"
diff --git a/vendor/github.com/tendermint/abci/types/types.proto b/vendor/github.com/tendermint/tendermint/abci/types/types.proto
similarity index 100%
rename from vendor/github.com/tendermint/abci/types/types.proto
rename to vendor/github.com/tendermint/tendermint/abci/types/types.proto
diff --git a/vendor/github.com/tendermint/abci/types/util.go b/vendor/github.com/tendermint/tendermint/abci/types/util.go
similarity index 96%
rename from vendor/github.com/tendermint/abci/types/util.go
rename to vendor/github.com/tendermint/tendermint/abci/types/util.go
index 0924ab5ff3465b393202707288d7b101bbd848af..458024c581bdef48e824036e68d63c65686468a1 100644
--- a/vendor/github.com/tendermint/abci/types/util.go
+++ b/vendor/github.com/tendermint/tendermint/abci/types/util.go
@@ -5,7 +5,7 @@ import (
 	"encoding/json"
 	"sort"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 //------------------------------------------------------------------------------
diff --git a/vendor/github.com/tendermint/tendermint/blockchain/pool.go b/vendor/github.com/tendermint/tendermint/blockchain/pool.go
index 8b964e81ad4a98961042ab50bef23e2e51dd3502..e379d846a79508680e6665a56d9fc22980f65385 100644
--- a/vendor/github.com/tendermint/tendermint/blockchain/pool.go
+++ b/vendor/github.com/tendermint/tendermint/blockchain/pool.go
@@ -8,9 +8,9 @@ import (
 	"sync/atomic"
 	"time"
 
-	cmn "github.com/tendermint/tmlibs/common"
-	flow "github.com/tendermint/tmlibs/flowrate"
-	"github.com/tendermint/tmlibs/log"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	flow "github.com/tendermint/tendermint/libs/flowrate"
+	"github.com/tendermint/tendermint/libs/log"
 
 	"github.com/tendermint/tendermint/p2p"
 	"github.com/tendermint/tendermint/types"
diff --git a/vendor/github.com/tendermint/tendermint/blockchain/reactor.go b/vendor/github.com/tendermint/tendermint/blockchain/reactor.go
index 33dfdd288eb74d77faa685463ab0742cf300d31b..449a42ff09e6e17ab0a191d59373be6ec6cf0aff 100644
--- a/vendor/github.com/tendermint/tendermint/blockchain/reactor.go
+++ b/vendor/github.com/tendermint/tendermint/blockchain/reactor.go
@@ -5,12 +5,13 @@ import (
 	"reflect"
 	"time"
 
-	"github.com/tendermint/go-amino"
+	amino "github.com/tendermint/go-amino"
+
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 	"github.com/tendermint/tendermint/p2p"
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
 )
 
 const (
@@ -174,7 +175,7 @@ func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage,
 
 // Receive implements Reactor by handling 4 types of messages (look below).
 func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
-	msg, err := DecodeMessage(msgBytes)
+	msg, err := decodeMsg(msgBytes)
 	if err != nil {
 		bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
 		bcR.Switch.StopPeerForError(src, err)
@@ -342,17 +343,11 @@ func RegisterBlockchainMessages(cdc *amino.Codec) {
 	cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/mempool/StatusRequest", nil)
 }
 
-// DecodeMessage decodes BlockchainMessage.
-// TODO: ensure that bz is completely read.
-func DecodeMessage(bz []byte) (msg BlockchainMessage, err error) {
+func decodeMsg(bz []byte) (msg BlockchainMessage, err error) {
 	if len(bz) > maxMsgSize {
-		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
-			len(bz), maxMsgSize)
+		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
 	}
 	err = cdc.UnmarshalBinaryBare(bz, &msg)
-	if err != nil {
-		err = cmn.ErrorWrap(err, "DecodeMessage() had bytes left over")
-	}
 	return
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/blockchain/store.go b/vendor/github.com/tendermint/tendermint/blockchain/store.go
index e7608b2ccae6cd21e9d7111fce7d9df74c43e0c0..f02d4facbcfca728bce2333ef6a92c5ae8b3b69a 100644
--- a/vendor/github.com/tendermint/tendermint/blockchain/store.go
+++ b/vendor/github.com/tendermint/tendermint/blockchain/store.go
@@ -4,8 +4,8 @@ import (
 	"fmt"
 	"sync"
 
-	cmn "github.com/tendermint/tmlibs/common"
-	dbm "github.com/tendermint/tmlibs/db"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	dbm "github.com/tendermint/tendermint/libs/db"
 
 	"github.com/tendermint/tendermint/types"
 )
diff --git a/vendor/github.com/tendermint/tendermint/blockchain/wire.go b/vendor/github.com/tendermint/tendermint/blockchain/wire.go
index 55b4e60ae38f9c4d8a8b83ec0ccad1b21e6bf77d..70b50565d4cf134b6d809c7618b1c8e47803af6e 100644
--- a/vendor/github.com/tendermint/tendermint/blockchain/wire.go
+++ b/vendor/github.com/tendermint/tendermint/blockchain/wire.go
@@ -2,7 +2,7 @@ package blockchain
 
 import (
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 var cdc = amino.NewCodec()
diff --git a/vendor/github.com/tendermint/tendermint/config/config.go b/vendor/github.com/tendermint/tendermint/config/config.go
index 5ba568f2634f09df1603ad29a008e9c2aa269c8f..2df8eb8e80556872faf59c123064485837baa048 100644
--- a/vendor/github.com/tendermint/tendermint/config/config.go
+++ b/vendor/github.com/tendermint/tendermint/config/config.go
@@ -45,34 +45,37 @@ type Config struct {
 	BaseConfig `mapstructure:",squash"`
 
 	// Options for services
-	RPC       *RPCConfig       `mapstructure:"rpc"`
-	P2P       *P2PConfig       `mapstructure:"p2p"`
-	Mempool   *MempoolConfig   `mapstructure:"mempool"`
-	Consensus *ConsensusConfig `mapstructure:"consensus"`
-	TxIndex   *TxIndexConfig   `mapstructure:"tx_index"`
+	RPC             *RPCConfig             `mapstructure:"rpc"`
+	P2P             *P2PConfig             `mapstructure:"p2p"`
+	Mempool         *MempoolConfig         `mapstructure:"mempool"`
+	Consensus       *ConsensusConfig       `mapstructure:"consensus"`
+	TxIndex         *TxIndexConfig         `mapstructure:"tx_index"`
+	Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
 }
 
 // DefaultConfig returns a default configuration for a Tendermint node
 func DefaultConfig() *Config {
 	return &Config{
-		BaseConfig: DefaultBaseConfig(),
-		RPC:        DefaultRPCConfig(),
-		P2P:        DefaultP2PConfig(),
-		Mempool:    DefaultMempoolConfig(),
-		Consensus:  DefaultConsensusConfig(),
-		TxIndex:    DefaultTxIndexConfig(),
+		BaseConfig:      DefaultBaseConfig(),
+		RPC:             DefaultRPCConfig(),
+		P2P:             DefaultP2PConfig(),
+		Mempool:         DefaultMempoolConfig(),
+		Consensus:       DefaultConsensusConfig(),
+		TxIndex:         DefaultTxIndexConfig(),
+		Instrumentation: DefaultInstrumentationConfig(),
 	}
 }
 
 // TestConfig returns a configuration that can be used for testing
 func TestConfig() *Config {
 	return &Config{
-		BaseConfig: TestBaseConfig(),
-		RPC:        TestRPCConfig(),
-		P2P:        TestP2PConfig(),
-		Mempool:    TestMempoolConfig(),
-		Consensus:  TestConsensusConfig(),
-		TxIndex:    TestTxIndexConfig(),
+		BaseConfig:      TestBaseConfig(),
+		RPC:             TestRPCConfig(),
+		P2P:             TestP2PConfig(),
+		Mempool:         TestMempoolConfig(),
+		Consensus:       TestConsensusConfig(),
+		TxIndex:         TestTxIndexConfig(),
+		Instrumentation: TestInstrumentationConfig(),
 	}
 }
 
@@ -221,16 +224,36 @@ type RPCConfig struct {
 	// NOTE: This server only supports /broadcast_tx_commit
 	GRPCListenAddress string `mapstructure:"grpc_laddr"`
 
+	// Maximum number of simultaneous connections.
+	// Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
+	// If you want to accept more significant number than the default, make sure
+	// you increase your OS limits.
+	// 0 - unlimited.
+	GRPCMaxOpenConnections int `mapstructure:"grpc_max_open_connections"`
+
 	// Activate unsafe RPC commands like /dial_persistent_peers and /unsafe_flush_mempool
 	Unsafe bool `mapstructure:"unsafe"`
+
+	// Maximum number of simultaneous connections (including WebSocket).
+	// Does not include gRPC connections. See grpc_max_open_connections
+	// If you want to accept more significant number than the default, make sure
+	// you increase your OS limits.
+	// 0 - unlimited.
+	MaxOpenConnections int `mapstructure:"max_open_connections"`
 }
 
 // DefaultRPCConfig returns a default configuration for the RPC server
 func DefaultRPCConfig() *RPCConfig {
 	return &RPCConfig{
-		ListenAddress:     "tcp://0.0.0.0:26657",
-		GRPCListenAddress: "",
-		Unsafe:            false,
+		ListenAddress: "tcp://0.0.0.0:26657",
+
+		GRPCListenAddress:      "",
+		GRPCMaxOpenConnections: 900, // no ipv4
+
+		Unsafe: false,
+		// should be < {ulimit -Sn} - {MaxNumPeers} - {N of wal, db and other open files}
+		// 1024 - 50 - 50 = 924 = ~900
+		MaxOpenConnections: 900,
 	}
 }
 
@@ -253,6 +276,9 @@ type P2PConfig struct {
 	// Address to listen for incoming connections
 	ListenAddress string `mapstructure:"laddr"`
 
+	// Address to advertise to peers for them to dial
+	ExternalAddress string `mapstructure:"external_address"`
+
 	// Comma separated list of seed nodes to connect to
 	// We only use these if we can’t connect to peers in the addrbook
 	Seeds string `mapstructure:"seeds"`
@@ -261,8 +287,8 @@ type P2PConfig struct {
 	// Do not add private peers to this list if you don't want them advertised
 	PersistentPeers string `mapstructure:"persistent_peers"`
 
-	// Skip UPNP port forwarding
-	SkipUPNP bool `mapstructure:"skip_upnp"`
+	// UPNP port forwarding
+	UPNP bool `mapstructure:"upnp"`
 
 	// Path to address book
 	AddrBook string `mapstructure:"addr_book_file"`
@@ -317,6 +343,8 @@ type P2PConfig struct {
 func DefaultP2PConfig() *P2PConfig {
 	return &P2PConfig{
 		ListenAddress:           "tcp://0.0.0.0:26656",
+		ExternalAddress:         "",
+		UPNP:                    false,
 		AddrBook:                defaultAddrBookPath,
 		AddrBookStrict:          true,
 		MaxNumPeers:             50,
@@ -339,7 +367,6 @@ func DefaultP2PConfig() *P2PConfig {
 func TestP2PConfig() *P2PConfig {
 	cfg := DefaultP2PConfig()
 	cfg.ListenAddress = "tcp://0.0.0.0:36656"
-	cfg.SkipUPNP = true
 	cfg.FlushThrottleTimeout = 10
 	cfg.AllowDuplicateIP = true
 	return cfg
@@ -411,7 +438,7 @@ func (cfg *MempoolConfig) WalDir() string {
 //-----------------------------------------------------------------------------
 // ConsensusConfig
 
-// ConsensusConfig defines the confuguration for the Tendermint consensus service,
+// ConsensusConfig defines the configuration for the Tendermint consensus service,
 // including timeouts and details about the WAL and the block structure.
 type ConsensusConfig struct {
 	RootDir string `mapstructure:"home"`
@@ -430,10 +457,6 @@ type ConsensusConfig struct {
 	// Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
 	SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"`
 
-	// BlockSize
-	MaxBlockSizeTxs   int `mapstructure:"max_block_size_txs"`
-	MaxBlockSizeBytes int `mapstructure:"max_block_size_bytes"`
-
 	// EmptyBlocks mode and possible interval between empty blocks in seconds
 	CreateEmptyBlocks         bool `mapstructure:"create_empty_blocks"`
 	CreateEmptyBlocksInterval int  `mapstructure:"create_empty_blocks_interval"`
@@ -455,8 +478,6 @@ func DefaultConsensusConfig() *ConsensusConfig {
 		TimeoutPrecommitDelta:       500,
 		TimeoutCommit:               1000,
 		SkipTimeoutCommit:           false,
-		MaxBlockSizeTxs:             10000,
-		MaxBlockSizeBytes:           1, // TODO
 		CreateEmptyBlocks:           true,
 		CreateEmptyBlocksInterval:   0,
 		PeerGossipSleepDuration:     100,
@@ -536,14 +557,14 @@ func (cfg *ConsensusConfig) SetWalFile(walFile string) {
 //-----------------------------------------------------------------------------
 // TxIndexConfig
 
-// TxIndexConfig defines the confuguration for the transaction
+// TxIndexConfig defines the configuration for the transaction
 // indexer, including tags to index.
 type TxIndexConfig struct {
 	// What indexer to use for transactions
 	//
 	// Options:
-	//   1) "null" (default)
-	//   2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
+	//   1) "null"
+	//   2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
 	Indexer string `mapstructure:"indexer"`
 
 	// Comma-separated list of tags to index (by default the only tag is tx hash)
@@ -573,6 +594,42 @@ func TestTxIndexConfig() *TxIndexConfig {
 	return DefaultTxIndexConfig()
 }
 
+//-----------------------------------------------------------------------------
+// InstrumentationConfig
+
+// InstrumentationConfig defines the configuration for metrics reporting.
+type InstrumentationConfig struct {
+	// When true, Prometheus metrics are served under /metrics on
+	// PrometheusListenAddr.
+	// Check out the documentation for the list of available metrics.
+	Prometheus bool `mapstructure:"prometheus"`
+
+	// Address to listen for Prometheus collector(s) connections.
+	PrometheusListenAddr string `mapstructure:"prometheus_listen_addr"`
+
+	// Maximum number of simultaneous connections.
+	// If you want to accept more significant number than the default, make sure
+	// you increase your OS limits.
+	// 0 - unlimited.
+	MaxOpenConnections int `mapstructure:"max_open_connections"`
+}
+
+// DefaultInstrumentationConfig returns a default configuration for metrics
+// reporting.
+func DefaultInstrumentationConfig() *InstrumentationConfig {
+	return &InstrumentationConfig{
+		Prometheus:           false,
+		PrometheusListenAddr: ":26660",
+		MaxOpenConnections:   3,
+	}
+}
+
+// TestInstrumentationConfig returns a default configuration for metrics
+// reporting.
+func TestInstrumentationConfig() *InstrumentationConfig {
+	return DefaultInstrumentationConfig()
+}
+
 //-----------------------------------------------------------------------------
 // Utils
 
diff --git a/vendor/github.com/tendermint/tendermint/config/toml.go b/vendor/github.com/tendermint/tendermint/config/toml.go
index 7ed3e971db4e1815572c5cf97049e95925c005f7..858d9b31dc6e5014d440e16b337f255bc751cbcc 100644
--- a/vendor/github.com/tendermint/tendermint/config/toml.go
+++ b/vendor/github.com/tendermint/tendermint/config/toml.go
@@ -6,7 +6,7 @@ import (
 	"path/filepath"
 	"text/template"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 var configTemplate *template.Template
@@ -119,15 +119,35 @@ laddr = "{{ .RPC.ListenAddress }}"
 # NOTE: This server only supports /broadcast_tx_commit
 grpc_laddr = "{{ .RPC.GRPCListenAddress }}"
 
+# Maximum number of simultaneous connections.
+# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
+# If you want to accept more significant number than the default, make sure
+# you increase your OS limits.
+# 0 - unlimited.
+grpc_max_open_connections = {{ .RPC.GRPCMaxOpenConnections }}
+
 # Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
 unsafe = {{ .RPC.Unsafe }}
 
+# Maximum number of simultaneous connections (including WebSocket).
+# Does not include gRPC connections. See grpc_max_open_connections
+# If you want to accept more significant number than the default, make sure
+# you increase your OS limits.
+# 0 - unlimited.
+max_open_connections = {{ .RPC.MaxOpenConnections }}
+
 ##### peer to peer configuration options #####
 [p2p]
 
 # Address to listen for incoming connections
 laddr = "{{ .P2P.ListenAddress }}"
 
+# Address to advertise to peers for them to dial
+# If empty, will use the same port as the laddr,
+# and will introspect on the listener or use UPnP
+# to figure out the address.
+external_address = "{{ .P2P.ExternalAddress }}"
+
 # Comma separated list of seed nodes to connect to
 seeds = "{{ .P2P.Seeds }}"
 
@@ -135,6 +155,9 @@ seeds = "{{ .P2P.Seeds }}"
 # Do not add private peers to this list if you don't want them advertised
 persistent_peers = "{{ .P2P.PersistentPeers }}"
 
+# UPNP port forwarding
+upnp = {{ .P2P.UPNP }}
+
 # Path to address book
 addr_book_file = "{{ js .P2P.AddrBook }}"
 
@@ -199,10 +222,6 @@ timeout_commit = {{ .Consensus.TimeoutCommit }}
 # Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
 skip_timeout_commit = {{ .Consensus.SkipTimeoutCommit }}
 
-# BlockSize
-max_block_size_txs = {{ .Consensus.MaxBlockSizeTxs }}
-max_block_size_bytes = {{ .Consensus.MaxBlockSizeBytes }}
-
 # EmptyBlocks mode and possible interval between empty blocks in seconds
 create_empty_blocks = {{ .Consensus.CreateEmptyBlocks }}
 create_empty_blocks_interval = {{ .Consensus.CreateEmptyBlocksInterval }}
@@ -232,6 +251,23 @@ index_tags = "{{ .TxIndex.IndexTags }}"
 # desirable (see the comment above). IndexTags has a precedence over
 # IndexAllTags (i.e. when given both, IndexTags will be indexed).
 index_all_tags = {{ .TxIndex.IndexAllTags }}
+
+##### instrumentation configuration options #####
+[instrumentation]
+
+# When true, Prometheus metrics are served under /metrics on
+# PrometheusListenAddr.
+# Check out the documentation for the list of available metrics.
+prometheus = {{ .Instrumentation.Prometheus }}
+
+# Address to listen for Prometheus collector(s) connections
+prometheus_listen_addr = "{{ .Instrumentation.PrometheusListenAddr }}"
+
+# Maximum number of simultaneous connections.
+# If you want to accept more significant number than the default, make sure
+# you increase your OS limits.
+# 0 - unlimited.
+max_open_connections = {{ .Instrumentation.MaxOpenConnections }}
 `
 
 /****** these are for test settings ***********/
@@ -287,10 +323,10 @@ var testGenesis = `{
   "validators": [
     {
       "pub_key": {
-        "type": "AC26791624DE60",
+        "type": "tendermint/PubKeyEd25519",
         "value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="
       },
-      "power": 10,
+      "power": "10",
       "name": ""
     }
   ],
@@ -298,16 +334,16 @@ var testGenesis = `{
 }`
 
 var testPrivValidator = `{
-  "address": "849CB2C877F87A20925F35D00AE6688342D25B47",
+  "address": "A3258DCBF45DCA0DF052981870F2D1441A36D145",
   "pub_key": {
-    "type": "AC26791624DE60",
+    "type": "tendermint/PubKeyEd25519",
     "value": "AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="
   },
   "priv_key": {
-    "type": "954568A3288910",
+    "type": "tendermint/PrivKeyEd25519",
     "value": "EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="
   },
-  "last_height": 0,
-  "last_round": 0,
+  "last_height": "0",
+  "last_round": "0",
   "last_step": 0
 }`
diff --git a/vendor/github.com/tendermint/tendermint/consensus/README.md b/vendor/github.com/tendermint/tendermint/consensus/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1111317d5cef48233be07feb5640ef6cdc48de63
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/consensus/README.md
@@ -0,0 +1 @@
+See the [consensus spec](https://github.com/tendermint/tendermint/tree/master/docs/spec/consensus) and the [reactor consensus spec](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/consensus) for more information.
diff --git a/vendor/github.com/tendermint/tendermint/consensus/metrics.go b/vendor/github.com/tendermint/tendermint/consensus/metrics.go
new file mode 100644
index 0000000000000000000000000000000000000000..253880e84e4f35e42cefb9a36dcd5bfb8321abaf
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/consensus/metrics.go
@@ -0,0 +1,133 @@
+package consensus
+
+import (
+	"github.com/go-kit/kit/metrics"
+	"github.com/go-kit/kit/metrics/discard"
+
+	prometheus "github.com/go-kit/kit/metrics/prometheus"
+	stdprometheus "github.com/prometheus/client_golang/prometheus"
+)
+
+// Metrics contains metrics exposed by this package.
+type Metrics struct {
+	// Height of the chain.
+	Height metrics.Gauge
+
+	// Number of rounds.
+	Rounds metrics.Gauge
+
+	// Number of validators.
+	Validators metrics.Gauge
+	// Total power of all validators.
+	ValidatorsPower metrics.Gauge
+	// Number of validators who did not sign.
+	MissingValidators metrics.Gauge
+	// Total power of the missing validators.
+	MissingValidatorsPower metrics.Gauge
+	// Number of validators who tried to double sign.
+	ByzantineValidators metrics.Gauge
+	// Total power of the byzantine validators.
+	ByzantineValidatorsPower metrics.Gauge
+
+	// Time between this and the last block.
+	BlockIntervalSeconds metrics.Histogram
+
+	// Number of transactions.
+	NumTxs metrics.Gauge
+	// Size of the block.
+	BlockSizeBytes metrics.Gauge
+	// Total number of transactions.
+	TotalTxs metrics.Gauge
+}
+
+// PrometheusMetrics returns Metrics build using Prometheus client library.
+func PrometheusMetrics() *Metrics {
+	return &Metrics{
+		Height: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "height",
+			Help:      "Height of the chain.",
+		}, []string{}),
+		Rounds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "rounds",
+			Help:      "Number of rounds.",
+		}, []string{}),
+
+		Validators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "validators",
+			Help:      "Number of validators.",
+		}, []string{}),
+		ValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "validators_power",
+			Help:      "Total power of all validators.",
+		}, []string{}),
+		MissingValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "missing_validators",
+			Help:      "Number of validators who did not sign.",
+		}, []string{}),
+		MissingValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "missing_validators_power",
+			Help:      "Total power of the missing validators.",
+		}, []string{}),
+		ByzantineValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "byzantine_validators",
+			Help:      "Number of validators who tried to double sign.",
+		}, []string{}),
+		ByzantineValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "byzantine_validators_power",
+			Help:      "Total power of the byzantine validators.",
+		}, []string{}),
+
+		BlockIntervalSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
+			Subsystem: "consensus",
+			Name:      "block_interval_seconds",
+			Help:      "Time between this and the last block.",
+			Buckets:   []float64{1, 2.5, 5, 10, 60},
+		}, []string{}),
+
+		NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "num_txs",
+			Help:      "Number of transactions.",
+		}, []string{}),
+		BlockSizeBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "block_size_bytes",
+			Help:      "Size of the block.",
+		}, []string{}),
+		TotalTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "total_txs",
+			Help:      "Total number of transactions.",
+		}, []string{}),
+	}
+}
+
+// NopMetrics returns no-op Metrics.
+func NopMetrics() *Metrics {
+	return &Metrics{
+		Height: discard.NewGauge(),
+
+		Rounds: discard.NewGauge(),
+
+		Validators:               discard.NewGauge(),
+		ValidatorsPower:          discard.NewGauge(),
+		MissingValidators:        discard.NewGauge(),
+		MissingValidatorsPower:   discard.NewGauge(),
+		ByzantineValidators:      discard.NewGauge(),
+		ByzantineValidatorsPower: discard.NewGauge(),
+
+		BlockIntervalSeconds: discard.NewHistogram(),
+
+		NumTxs:         discard.NewGauge(),
+		BlockSizeBytes: discard.NewGauge(),
+		TotalTxs:       discard.NewGauge(),
+	}
+}
diff --git a/vendor/github.com/tendermint/tendermint/consensus/reactor.go b/vendor/github.com/tendermint/tendermint/consensus/reactor.go
index 2034ad3441e274ce79d145b8a471794bd1d2b92d..3eb1d73aa639e4393e2fc1b730740a8669682c48 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/reactor.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/reactor.go
@@ -9,11 +9,11 @@ import (
 	"github.com/pkg/errors"
 
 	amino "github.com/tendermint/go-amino"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
 
 	cstypes "github.com/tendermint/tendermint/consensus/types"
+	cmn "github.com/tendermint/tendermint/libs/common"
 	tmevents "github.com/tendermint/tendermint/libs/events"
+	"github.com/tendermint/tendermint/libs/log"
 	"github.com/tendermint/tendermint/p2p"
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/types"
@@ -80,6 +80,9 @@ func (conR *ConsensusReactor) OnStop() {
 	conR.BaseReactor.OnStop()
 	conR.unsubscribeFromBroadcastEvents()
 	conR.conS.Stop()
+	if !conR.FastSync() {
+		conR.conS.Wait()
+	}
 }
 
 // SwitchToConsensus switches from fast_sync mode to consensus mode.
@@ -183,7 +186,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
 		return
 	}
 
-	msg, err := DecodeMessage(msgBytes)
+	msg, err := decodeMsg(msgBytes)
 	if err != nil {
 		conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
 		conR.Switch.StopPeerForError(src, err)
@@ -1306,11 +1309,9 @@ func RegisterConsensusMessages(cdc *amino.Codec) {
 	cdc.RegisterConcrete(&ProposalHeartbeatMessage{}, "tendermint/ProposalHeartbeat", nil)
 }
 
-// DecodeMessage decodes the given bytes into a ConsensusMessage.
-func DecodeMessage(bz []byte) (msg ConsensusMessage, err error) {
+func decodeMsg(bz []byte) (msg ConsensusMessage, err error) {
 	if len(bz) > maxMsgSize {
-		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
-			len(bz), maxMsgSize)
+		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
 	}
 	err = cdc.UnmarshalBinaryBare(bz, &msg)
 	return
diff --git a/vendor/github.com/tendermint/tendermint/consensus/replay.go b/vendor/github.com/tendermint/tendermint/consensus/replay.go
index 13ec9e403f96d50c8a7c3dcae31599011713858d..dd940998fabdf4753c637650e52e5187f7ee5702 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/replay.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/replay.go
@@ -10,11 +10,11 @@ import (
 	//"strings"
 	"time"
 
-	abci "github.com/tendermint/abci/types"
-	//auto "github.com/tendermint/tmlibs/autofile"
-	cmn "github.com/tendermint/tmlibs/common"
-	dbm "github.com/tendermint/tmlibs/db"
-	"github.com/tendermint/tmlibs/log"
+	abci "github.com/tendermint/tendermint/abci/types"
+	//auto "github.com/tendermint/tendermint/libs/autofile"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	dbm "github.com/tendermint/tendermint/libs/db"
+	"github.com/tendermint/tendermint/libs/log"
 
 	"github.com/tendermint/tendermint/proxy"
 	sm "github.com/tendermint/tendermint/state"
@@ -273,7 +273,7 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
 			ChainId:         h.genDoc.ChainID,
 			ConsensusParams: csParams,
 			Validators:      validators,
-			AppStateBytes:   h.genDoc.AppStateJSON,
+			AppStateBytes:   h.genDoc.AppState,
 		}
 		res, err := proxyApp.Consensus().InitChainSync(req)
 		if err != nil {
diff --git a/vendor/github.com/tendermint/tendermint/consensus/replay_file.go b/vendor/github.com/tendermint/tendermint/consensus/replay_file.go
index 57204b01a942b92c468bb51d55f2f97fe3b576ed..0c0b0dcb169299d09768ce580ea0a80d5cf09833 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/replay_file.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/replay_file.go
@@ -16,9 +16,9 @@ import (
 	"github.com/tendermint/tendermint/proxy"
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
-	dbm "github.com/tendermint/tmlibs/db"
-	"github.com/tendermint/tmlibs/log"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	dbm "github.com/tendermint/tendermint/libs/db"
+	"github.com/tendermint/tendermint/libs/log"
 )
 
 const (
diff --git a/vendor/github.com/tendermint/tendermint/consensus/state.go b/vendor/github.com/tendermint/tendermint/consensus/state.go
index 3834b1515ccdb616264b9caec7f2656adf007fe7..e4b360e080d59f1296d63bad53cdbf3c7fca917c 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/state.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/state.go
@@ -10,8 +10,8 @@ import (
 	"time"
 
 	fail "github.com/ebuchman/fail-test"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 
 	cfg "github.com/tendermint/tendermint/config"
 	cstypes "github.com/tendermint/tendermint/consensus/types"
@@ -115,10 +115,24 @@ type ConsensusState struct {
 	// synchronous pubsub between consensus state and reactor.
 	// state only emits EventNewRoundStep, EventVote and EventProposalHeartbeat
 	evsw tmevents.EventSwitch
+
+	// for reporting metrics
+	metrics *Metrics
 }
 
+// CSOption sets an optional parameter on the ConsensusState.
+type CSOption func(*ConsensusState)
+
 // NewConsensusState returns a new ConsensusState.
-func NewConsensusState(config *cfg.ConsensusConfig, state sm.State, blockExec *sm.BlockExecutor, blockStore sm.BlockStore, mempool sm.Mempool, evpool sm.EvidencePool) *ConsensusState {
+func NewConsensusState(
+	config *cfg.ConsensusConfig,
+	state sm.State,
+	blockExec *sm.BlockExecutor,
+	blockStore sm.BlockStore,
+	mempool sm.Mempool,
+	evpool sm.EvidencePool,
+	options ...CSOption,
+) *ConsensusState {
 	cs := &ConsensusState{
 		config:           config,
 		blockExec:        blockExec,
@@ -132,6 +146,7 @@ func NewConsensusState(config *cfg.ConsensusConfig, state sm.State, blockExec *s
 		wal:              nilWAL{},
 		evpool:           evpool,
 		evsw:             tmevents.NewEventSwitch(),
+		metrics:          NopMetrics(),
 	}
 	// set function defaults (may be overwritten before calling Start)
 	cs.decideProposal = cs.defaultDecideProposal
@@ -143,6 +158,9 @@ func NewConsensusState(config *cfg.ConsensusConfig, state sm.State, blockExec *s
 	// We do that upon Start().
 	cs.reconstructLastCommit(state)
 	cs.BaseService = *cmn.NewBaseService(nil, "ConsensusState", cs)
+	for _, option := range options {
+		option(cs)
+	}
 	return cs
 }
 
@@ -161,6 +179,11 @@ func (cs *ConsensusState) SetEventBus(b *types.EventBus) {
 	cs.blockExec.SetEventBus(b)
 }
 
+// WithMetrics sets the metrics.
+func WithMetrics(metrics *Metrics) CSOption {
+	return func(cs *ConsensusState) { cs.metrics = metrics }
+}
+
 // String returns a string.
 func (cs *ConsensusState) String() string {
 	// better not to access shared variables
@@ -291,16 +314,8 @@ func (cs *ConsensusState) startRoutines(maxSteps int) {
 
 // OnStop implements cmn.Service. It stops all routines and waits for the WAL to finish.
 func (cs *ConsensusState) OnStop() {
-	cs.BaseService.OnStop()
-
 	cs.evsw.Stop()
-
 	cs.timeoutTicker.Stop()
-
-	// Make BaseService.Wait() wait until cs.wal.Wait()
-	if cs.IsRunning() {
-		cs.wal.Wait()
-	}
 }
 
 // Wait waits for the the main routine to return.
@@ -387,6 +402,7 @@ func (cs *ConsensusState) SetProposalAndBlock(proposal *types.Proposal, block *t
 // internal functions for managing the state
 
 func (cs *ConsensusState) updateHeight(height int64) {
+	cs.metrics.Height.Set(float64(height))
 	cs.Height = height
 }
 
@@ -579,6 +595,7 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
 
 			// close wal now that we're done writing to it
 			cs.wal.Stop()
+			cs.wal.Wait()
 
 			close(cs.done)
 			return
@@ -600,7 +617,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
 		err = cs.setProposal(msg.Proposal)
 	case *BlockPartMessage:
 		// if the proposal is complete, we'll enterPrevote or tryFinalizeCommit
-		_, err = cs.addProposalBlockPart(msg.Height, msg.Part)
+		_, err = cs.addProposalBlockPart(msg, peerID)
 		if err != nil && msg.Round != cs.Round {
 			cs.Logger.Debug("Received block part from wrong round", "height", cs.Height, "csRound", cs.Round, "blockRound", msg.Round)
 			err = nil
@@ -722,6 +739,7 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) {
 	cs.Votes.SetRound(round + 1) // also track next round (round+1) to allow round-skipping
 
 	cs.eventBus.PublishEventNewRound(cs.RoundStateEvent())
+	cs.metrics.Rounds.Set(float64(round))
 
 	// Wait for txs to be available in the mempool
 	// before we enterPropose in round 0. If the last block changed the app hash,
@@ -907,7 +925,7 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
 	}
 
 	// Mempool validated transactions
-	txs := cs.mempool.Reap(cs.config.MaxBlockSizeTxs)
+	txs := cs.mempool.Reap(cs.state.ConsensusParams.BlockSize.MaxTxs)
 	block, parts := cs.state.MakeBlock(cs.Height, txs, commit)
 	evidence := cs.evpool.PendingEvidence()
 	block.AddEvidence(evidence)
@@ -1280,6 +1298,9 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
 
 	fail.Fail() // XXX
 
+	// must be called before we update state
+	cs.recordMetrics(height, block)
+
 	// NewHeightStep!
 	cs.updateToState(stateCopy)
 
@@ -1295,6 +1316,44 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
 	// * cs.StartTime is set to when we will start round0.
 }
 
+func (cs *ConsensusState) recordMetrics(height int64, block *types.Block) {
+	cs.metrics.Validators.Set(float64(cs.Validators.Size()))
+	cs.metrics.ValidatorsPower.Set(float64(cs.Validators.TotalVotingPower()))
+	missingValidators := 0
+	missingValidatorsPower := int64(0)
+	for i, val := range cs.Validators.Validators {
+		var vote *types.Vote
+		if i < len(block.LastCommit.Precommits) {
+			vote = block.LastCommit.Precommits[i]
+		}
+		if vote == nil {
+			missingValidators++
+			missingValidatorsPower += val.VotingPower
+		}
+	}
+	cs.metrics.MissingValidators.Set(float64(missingValidators))
+	cs.metrics.MissingValidatorsPower.Set(float64(missingValidatorsPower))
+	cs.metrics.ByzantineValidators.Set(float64(len(block.Evidence.Evidence)))
+	byzantineValidatorsPower := int64(0)
+	for _, ev := range block.Evidence.Evidence {
+		if _, val := cs.Validators.GetByAddress(ev.Address()); val != nil {
+			byzantineValidatorsPower += val.VotingPower
+		}
+	}
+	cs.metrics.ByzantineValidatorsPower.Set(float64(byzantineValidatorsPower))
+
+	if height > 1 {
+		lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1)
+		cs.metrics.BlockIntervalSeconds.Observe(
+			block.Time.Sub(lastBlockMeta.Header.Time).Seconds(),
+		)
+	}
+
+	cs.metrics.NumTxs.Set(float64(block.NumTxs))
+	cs.metrics.BlockSizeBytes.Set(float64(block.Size()))
+	cs.metrics.TotalTxs.Set(float64(block.TotalTxs))
+}
+
 //-----------------------------------------------------------------------------
 
 func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error {
@@ -1333,17 +1392,22 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error {
 
 // NOTE: block is not necessarily valid.
 // Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, once we have the full block.
-func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part) (added bool, err error) {
+func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (added bool, err error) {
+	height, round, part := msg.Height, msg.Round, msg.Part
+
 	// Blocks might be reused, so round mismatch is OK
 	if cs.Height != height {
-		cs.Logger.Debug("Received block part from wrong height", "height", height)
+		cs.Logger.Debug("Received block part from wrong height", "height", height, "round", round)
 		return false, nil
 	}
 
 	// We're not expecting a block part.
 	if cs.ProposalBlockParts == nil {
-		cs.Logger.Info("Received a block part when we're not expecting any", "height", height)
-		return false, nil // TODO: bad peer? Return error?
+		// NOTE: this can happen when we've gone to a higher round and
+		// then receive parts from the previous round - not necessarily a bad peer.
+		cs.Logger.Info("Received a block part when we're not expecting any",
+			"height", height, "round", round, "index", part.Index, "peer", peerID)
+		return false, nil
 	}
 
 	added, err = cs.ProposalBlockParts.AddPart(part)
@@ -1377,7 +1441,7 @@ func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part) (
 			// procedure at this point.
 		}
 
-		if cs.Step == cstypes.RoundStepPropose && cs.isProposalComplete() {
+		if cs.Step <= cstypes.RoundStepPropose && cs.isProposalComplete() {
 			// Move onto the next step
 			cs.enterPrevote(height, cs.Round)
 		} else if cs.Step == cstypes.RoundStepCommit {
diff --git a/vendor/github.com/tendermint/tendermint/consensus/ticker.go b/vendor/github.com/tendermint/tendermint/consensus/ticker.go
index b37b7c4957de3ba821c41810152f5a3480ddf4d1..a1e2174c369bdcfceb512adc841fb873e3a4eb66 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/ticker.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/ticker.go
@@ -3,8 +3,8 @@ package consensus
 import (
 	"time"
 
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 )
 
 var (
diff --git a/vendor/github.com/tendermint/tendermint/consensus/types/height_vote_set.go b/vendor/github.com/tendermint/tendermint/consensus/types/height_vote_set.go
index 3c9867940b7a9cf69025f8fa4e684cfee822bd35..70a38668ff91fe043f3238baf5d58e3d92dc8861 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/types/height_vote_set.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/types/height_vote_set.go
@@ -8,7 +8,7 @@ import (
 
 	"github.com/tendermint/tendermint/p2p"
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 type RoundVoteSet struct {
diff --git a/vendor/github.com/tendermint/tendermint/consensus/types/peer_round_state.go b/vendor/github.com/tendermint/tendermint/consensus/types/peer_round_state.go
index dcb6c8e020756e5eae5f04d7739d770710506ad6..7a5d69b8eb9e080b00a90f90596d7f436ad758fc 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/types/peer_round_state.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/types/peer_round_state.go
@@ -5,7 +5,7 @@ import (
 	"time"
 
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 //-----------------------------------------------------------------------------
diff --git a/vendor/github.com/tendermint/tendermint/consensus/types/round_state.go b/vendor/github.com/tendermint/tendermint/consensus/types/round_state.go
index 14da1f149e223eb2208b9defd0dad84511662644..cca560ccf83d39280043069a14026782dfd265fb 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/types/round_state.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/types/round_state.go
@@ -6,7 +6,7 @@ import (
 	"time"
 
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 //-----------------------------------------------------------------------------
diff --git a/vendor/github.com/tendermint/tendermint/consensus/types/wire.go b/vendor/github.com/tendermint/tendermint/consensus/types/wire.go
index bd5c4497d06fdf1a1ae942e6c55bbc7dbeac6632..6342d7ebabeffb8e934c59ed0d6778f23ff5af18 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/types/wire.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/types/wire.go
@@ -2,7 +2,7 @@ package types
 
 import (
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 var cdc = amino.NewCodec()
diff --git a/vendor/github.com/tendermint/tendermint/consensus/version.go b/vendor/github.com/tendermint/tendermint/consensus/version.go
index 2c137bf7fe861cb5b5256d958f34e5d215c457da..5c74a16db37ecd86d99f92db0de41cd41286f376 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/version.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/version.go
@@ -1,7 +1,7 @@
 package consensus
 
 import (
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // kind of arbitrary
diff --git a/vendor/github.com/tendermint/tendermint/consensus/wal.go b/vendor/github.com/tendermint/tendermint/consensus/wal.go
index 3d9bf8afcb6c77a233d2759d8c96258dec8caf80..8c4c10bc7b04305383547d685de5379ff69af648 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/wal.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/wal.go
@@ -12,8 +12,8 @@ import (
 
 	amino "github.com/tendermint/go-amino"
 	"github.com/tendermint/tendermint/types"
-	auto "github.com/tendermint/tmlibs/autofile"
-	cmn "github.com/tendermint/tmlibs/common"
+	auto "github.com/tendermint/tendermint/libs/autofile"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 const (
diff --git a/vendor/github.com/tendermint/tendermint/consensus/wal_generator.go b/vendor/github.com/tendermint/tendermint/consensus/wal_generator.go
index f61af15f575a6f249ec40202867fe09b260e8955..f3a365809cec0ebe1a00c07a8b4b460d2c25871f 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/wal_generator.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/wal_generator.go
@@ -10,24 +10,24 @@ import (
 	"time"
 
 	"github.com/pkg/errors"
-	"github.com/tendermint/abci/example/kvstore"
+	"github.com/tendermint/tendermint/abci/example/kvstore"
 	bc "github.com/tendermint/tendermint/blockchain"
 	cfg "github.com/tendermint/tendermint/config"
 	"github.com/tendermint/tendermint/privval"
 	"github.com/tendermint/tendermint/proxy"
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/types"
-	auto "github.com/tendermint/tmlibs/autofile"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/db"
-	"github.com/tendermint/tmlibs/log"
+	auto "github.com/tendermint/tendermint/libs/autofile"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/db"
+	"github.com/tendermint/tendermint/libs/log"
 )
 
 // WALWithNBlocks generates a consensus WAL. It does this by spining up a
 // stripped down version of node (proxy app, event bus, consensus state) with a
 // persistent kvstore application and special consensus wal instance
 // (byteBufferWAL) and waits until numBlocks are created. Then it returns a WAL
-// content.
+// content. If the node fails to produce given numBlocks, it returns an error.
 func WALWithNBlocks(numBlocks int) (data []byte, err error) {
 	config := getConfig()
 
@@ -89,15 +89,15 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
 	if err := consensusState.Start(); err != nil {
 		return nil, errors.Wrap(err, "failed to start consensus state")
 	}
-	defer consensusState.Stop()
 
 	select {
 	case <-numBlocksWritten:
+		consensusState.Stop()
 		wr.Flush()
 		return b.Bytes(), nil
 	case <-time.After(1 * time.Minute):
-		wr.Flush()
-		return b.Bytes(), fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks)
+		consensusState.Stop()
+		return []byte{}, fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks)
 	}
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/consensus/wire.go b/vendor/github.com/tendermint/tendermint/consensus/wire.go
index 81223c6895fbcf0aa1811f5ba20e6d5e332c0763..5f231c0c7256e73f747088745ca7c8c829b5b1d4 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/wire.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/wire.go
@@ -2,7 +2,7 @@ package consensus
 
 import (
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 var cdc = amino.NewCodec()
diff --git a/vendor/github.com/tendermint/tendermint/crypto/CHANGELOG.md b/vendor/github.com/tendermint/tendermint/crypto/CHANGELOG.md
new file mode 100644
index 0000000000000000000000000000000000000000..dd7c1039fcc772a11038aa2a8b4d4b3948dfb6bb
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/crypto/CHANGELOG.md
@@ -0,0 +1,154 @@
+# Changelog
+
+## 0.9.0
+
+BREAKING CHANGES
+
+- `priv.PubKey()` no longer returns an error. Any applicable errors (such as when fetching the public key from a hardware wallet) should be checked and returned when constructing the private key.
+
+## 0.8.0
+
+**TBD**
+
+## 0.7.0
+
+**May 30th, 2018**
+
+BREAKING CHANGES
+
+No breaking changes compared to 0.6.2, but making up for the version bump that
+should have happened in 0.6.1.
+
+We also bring in the `tmlibs/merkle` package with breaking changes:
+
+- change the hash function from RIPEMD160 to tmhash (first 20-bytes of SHA256)
+- remove unused funcs and unexport SimpleMap
+
+FEATURES
+
+- [xchacha20poly1305] New authenticated encryption module
+- [merkle] Moved in from tmlibs
+- [merkle/tmhash] New hash function: the first 20-bytes of SHA256
+
+IMPROVEMENTS
+
+- Remove some dead code
+- Use constant-time compare for signatures
+
+BUG FIXES
+
+- Fix MixEntropy weakness
+- Fix PrivKeyEd25519.Generate()
+
+## 0.6.2 (April 9, 2018)
+
+IMPROVEMENTS
+
+- Update for latest go-amino
+
+## 0.6.1 (March 26, 2018)
+
+BREAKING CHANGES
+
+- Encoding uses MarshalBinaryBare rather than MarshalBinary (which auto-length-prefixes) for pub/priv/sig.
+
+## 0.6.0 (March 2, 2018)
+
+BREAKING CHANGES
+
+- Update Amino names from "com.tendermint/..." to "tendermint/"
+
+## 0.5.0 (March 2, 2018)
+
+BREAKING CHANGES
+
+- nano: moved to `_nano` now while we're having build issues
+- bcrypt: moved to `keys/bcrypt`
+- hd: moved to `keys/hd`; `BTC` added to some function names; other function cleanup
+- keys/cryptostore: moved to `keys`, renamed to `keybase`, and completely refactored
+- keys: moved BIP39 related code to `keys/words`
+
+FEATURE
+
+- `Address` is a type alias for `cmn.HexBytes`
+
+BUG FIX
+
+- PrivKey comparisons done in constant time
+
+## 0.4.1 (October 27, 2017)
+
+This release removes support for bcrypt as it was merged too soon without an upgrade plan
+for existing keys.
+
+REVERTS THE FOLLOWING COMMITS:
+
+- Parameterize and lower bcrypt cost - dfc4cdd2d71513e4a9922d679c74f36357c4c862
+- Upgrade keys to use bcrypt with salts (#38)  - 8e7f0e7701f92206679ad093d013b9b162427631
+
+## 0.4.0 (October 27, 2017)
+
+BREAKING CHANGES:
+
+- `keys`: use bcrypt plus salt
+
+FEATURES:
+
+- add support for signing via Ledger Nano
+
+IMPROVEMENTS:
+
+- linting and comments
+
+## 0.3.0 (September 22, 2017)
+
+BREAKING CHANGES:
+
+- Remove `cmd` and `keys/tx` packages altogether: move it to the cosmos-sdk
+- `cryptostore.Generator` takes a secret 
+- Remove `String()` from `Signature` interface
+
+FEATURES:
+
+- `keys`: add CRC16 error correcting code
+
+IMPROVEMENTS:
+
+- Allow no passwords on keys for development convenience
+
+
+## 0.2.1 (June 21, 2017)
+
+- Improve keys command
+  - No password prompts in non-interactive mode (echo 'foobar' | keys new foo)
+  - Added support for seed phrases
+    - Seed phrase now returned on `keys new`
+    - Add `keys restore` to restore private key from key phrase
+    - Checksum to verify typos in the seed phrase (rather than just a useless key)
+  - Add `keys delete` to remove a key if needed
+
+## 0.2.0 (May 18, 2017)
+
+BREAKING CHANGES:
+
+- [hd] The following functions no longer take a `coin string` as argument: `ComputeAddress`, `AddrFromPubKeyBytes`, `ComputeAddressForPrivKey`, `ComputeWIF`, `WIFFromPrivKeyBytes`
+- Changes to `PrivKey`, `PubKey`, and `Signature` (denoted `Xxx` below):
+  - interfaces are renamed `XxxInner`, and are not for use outside the package, though they must be exposed for sake of serialization.
+  - `Xxx` is now a struct that wraps the corresponding `XxxInner` interface
+
+FEATURES:
+
+- `github.com/tendermint/go-keys -> github.com/tendermint/go-crypto/keys` - command and lib for generating and managing encrypted keys
+- [hd] New function `WIFFromPrivKeyBytes(privKeyBytes []byte, compress bool) string`
+- Changes to `PrivKey`, `PubKey`, and `Signature` (denoted `Xxx` below):
+  - Expose a new method `Unwrap() XxxInner` on the `Xxx` struct which returns the corresponding `XxxInner` interface
+  - Expose a new method `Wrap() Xxx` on the `XxxInner` interface which returns the corresponding `Xxx` struct
+
+IMPROVEMENTS:
+
+- Update to use new `tmlibs` repository
+
+## 0.1.0 (April 14, 2017)
+
+Initial release
+
diff --git a/vendor/github.com/tendermint/tendermint/crypto/README.md b/vendor/github.com/tendermint/tendermint/crypto/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..32afde699d1112443dd386dbafb071a22aa6b0bd
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/crypto/README.md
@@ -0,0 +1,25 @@
+# crypto
+
+crypto is the cryptographic package adapted for Tendermint's uses
+
+## Importing it
+`import "github.com/tendermint/tendermint/crypto"`
+
+## Binary encoding
+
+For Binary encoding, please refer to the [Tendermint encoding spec](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/encoding.md).
+
+## JSON Encoding
+
+crypto `.Bytes()` uses Amino:binary encoding, but Amino:JSON is also supported.
+
+```go
+Example Amino:JSON encodings:
+
+crypto.PrivKeyEd25519     - {"type":"954568A3288910","value":"EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="}
+crypto.SignatureEd25519   - {"type":"6BF5903DA1DB28","value":"77sQNZOrf7ltExpf7AV1WaYPCHbyRLgjBsoWVzcduuLk+jIGmYk+s5R6Emm29p12HeiNAuhUJgdFGmwkpeGJCA=="}
+crypto.PubKeyEd25519      - {"type":"AC26791624DE60","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="}
+crypto.PrivKeySecp256k1   - {"type":"019E82E1B0F798","value":"zx4Pnh67N+g2V+5vZbQzEyRerX9c4ccNZOVzM9RvJ0Y="}
+crypto.SignatureSecp256k1 - {"type":"6D1EA416E1FEE8","value":"MEUCIQCIg5TqS1l7I+MKTrSPIuUN2+4m5tA29dcauqn3NhEJ2wIgICaZ+lgRc5aOTVahU/XoLopXKn8BZcl0bnuYWLvohR8="}
+crypto.PubKeySecp256k1    - {"type":"F8CCEAEB5AE980","value":"A8lPKJXcNl5VHt1FK8a244K9EJuS4WX1hFBnwisi0IJx"}
+```
diff --git a/vendor/github.com/tendermint/go-crypto/amino.go b/vendor/github.com/tendermint/tendermint/crypto/amino.go
similarity index 85%
rename from vendor/github.com/tendermint/go-crypto/amino.go
rename to vendor/github.com/tendermint/tendermint/crypto/amino.go
index 89636895ad5c2247325075baf7b850add469458a..6a8703fc99fd0081225a5d1ed83e6e38e8f8795a 100644
--- a/vendor/github.com/tendermint/go-crypto/amino.go
+++ b/vendor/github.com/tendermint/tendermint/crypto/amino.go
@@ -15,6 +15,7 @@ func init() {
 	RegisterAmino(cdc)
 }
 
+// RegisterAmino registers all crypto related types in the given (amino) codec.
 func RegisterAmino(cdc *amino.Codec) {
 	cdc.RegisterInterface((*PubKey)(nil), nil)
 	cdc.RegisterConcrete(PubKeyEd25519{},
@@ -30,7 +31,7 @@ func RegisterAmino(cdc *amino.Codec) {
 
 	cdc.RegisterInterface((*Signature)(nil), nil)
 	cdc.RegisterConcrete(SignatureEd25519{},
-		"tendermint/SignatureKeyEd25519", nil)
+		"tendermint/SignatureEd25519", nil)
 	cdc.RegisterConcrete(SignatureSecp256k1{},
-		"tendermint/SignatureKeySecp256k1", nil)
+		"tendermint/SignatureSecp256k1", nil)
 }
diff --git a/vendor/github.com/tendermint/go-crypto/armor.go b/vendor/github.com/tendermint/tendermint/crypto/armor.go
similarity index 76%
rename from vendor/github.com/tendermint/go-crypto/armor.go
rename to vendor/github.com/tendermint/tendermint/crypto/armor.go
index 5f199df43fd45beb4cca20d10d374f170f5386cc..4146048ad19738bf002140bddb64487239c98572 100644
--- a/vendor/github.com/tendermint/go-crypto/armor.go
+++ b/vendor/github.com/tendermint/tendermint/crypto/armor.go
@@ -2,9 +2,9 @@ package crypto
 
 import (
 	"bytes"
+	"fmt"
 	"io/ioutil"
 
-	. "github.com/tendermint/tmlibs/common"
 	"golang.org/x/crypto/openpgp/armor"
 )
 
@@ -12,15 +12,15 @@ func EncodeArmor(blockType string, headers map[string]string, data []byte) strin
 	buf := new(bytes.Buffer)
 	w, err := armor.Encode(buf, blockType, headers)
 	if err != nil {
-		PanicSanity("Error encoding ascii armor: " + err.Error())
+		panic(fmt.Errorf("could not encode ascii armor: %s", err))
 	}
 	_, err = w.Write(data)
 	if err != nil {
-		PanicSanity("Error encoding ascii armor: " + err.Error())
+		panic(fmt.Errorf("could not encode ascii armor: %s", err))
 	}
 	err = w.Close()
 	if err != nil {
-		PanicSanity("Error encoding ascii armor: " + err.Error())
+		panic(fmt.Errorf("could not encode ascii armor: %s", err))
 	}
 	return buf.String()
 }
diff --git a/vendor/github.com/tendermint/tendermint/crypto/doc.go b/vendor/github.com/tendermint/tendermint/crypto/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..544e0df36708de3c25c83425db756724beca26e0
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/crypto/doc.go
@@ -0,0 +1,45 @@
+// crypto is a customized/convenience cryptography package for supporting
+// Tendermint.
+
+// It wraps select functionality of equivalent functions in the
+// Go standard library, for easy usage with our libraries.
+
+// Keys:
+
+// All key generation functions return an instance of the PrivKey interface
+// which implements methods
+
+//     AssertIsPrivKeyInner()
+//     Bytes() []byte
+//     Sign(msg []byte) Signature
+//     PubKey() PubKey
+//     Equals(PrivKey) bool
+//     Wrap() PrivKey
+
+// From the above method we can:
+// a) Retrieve the public key if needed
+
+//     pubKey := key.PubKey()
+
+// For example:
+//     privKey, err := crypto.GenPrivKeyEd25519()
+//     if err != nil {
+// 	...
+//     }
+//     pubKey := privKey.PubKey()
+//     ...
+//     // And then you can use the private and public key
+//     doSomething(privKey, pubKey)
+
+// We also provide hashing wrappers around algorithms:
+
+// Sha256
+//     sum := crypto.Sha256([]byte("This is Tendermint"))
+//     fmt.Printf("%x\n", sum)
+
+// Ripemd160
+//     sum := crypto.Ripemd160([]byte("This is consensus"))
+//     fmt.Printf("%x\n", sum)
+package crypto
+
+// TODO: Add more docs in here
diff --git a/vendor/github.com/tendermint/go-crypto/hash.go b/vendor/github.com/tendermint/tendermint/crypto/hash.go
similarity index 100%
rename from vendor/github.com/tendermint/go-crypto/hash.go
rename to vendor/github.com/tendermint/tendermint/crypto/hash.go
diff --git a/vendor/github.com/tendermint/tmlibs/merkle/README.md b/vendor/github.com/tendermint/tendermint/crypto/merkle/README.md
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/merkle/README.md
rename to vendor/github.com/tendermint/tendermint/crypto/merkle/README.md
diff --git a/vendor/github.com/tendermint/tendermint/crypto/merkle/doc.go b/vendor/github.com/tendermint/tendermint/crypto/merkle/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..865c302170d7d0a7535940360271d342c6b43e30
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/crypto/merkle/doc.go
@@ -0,0 +1,31 @@
+/*
+Package merkle computes a deterministic minimal height Merkle tree hash.
+If the number of items is not a power of two, some leaves
+will be at different levels. Tries to keep both sides of
+the tree the same size, but the left may be one greater.
+
+Use this for short deterministic trees, such as the validator list.
+For larger datasets, use IAVLTree.
+
+Be aware that the current implementation by itself does not prevent
+second pre-image attacks. Hence, use this library with caution.
+Otherwise you might run into similar issues as, e.g., in early Bitcoin:
+https://bitcointalk.org/?topic=102395
+
+                        *
+                       / \
+                     /     \
+                   /         \
+                 /             \
+                *               *
+               / \             / \
+              /   \           /   \
+             /     \         /     \
+            *       *       *       h6
+           / \     / \     / \
+          h0  h1  h2  h3  h4  h5
+
+TODO(ismail): add 2nd pre-image protection or clarify further on how we use this and why this secure.
+
+*/
+package merkle
diff --git a/vendor/github.com/tendermint/tmlibs/merkle/simple_map.go b/vendor/github.com/tendermint/tendermint/crypto/merkle/simple_map.go
similarity index 53%
rename from vendor/github.com/tendermint/tmlibs/merkle/simple_map.go
rename to vendor/github.com/tendermint/tendermint/crypto/merkle/simple_map.go
index cd38de7617732e1a4b30fb38800dc9e4cffbcdf6..ba4b9309af0926eb97ad6fbd1ab20904ddc0f927 100644
--- a/vendor/github.com/tendermint/tmlibs/merkle/simple_map.go
+++ b/vendor/github.com/tendermint/tendermint/crypto/merkle/simple_map.go
@@ -1,47 +1,48 @@
 package merkle
 
 import (
-	cmn "github.com/tendermint/tmlibs/common"
-	"golang.org/x/crypto/ripemd160"
+	"github.com/tendermint/tendermint/crypto/tmhash"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
-type SimpleMap struct {
+// Merkle tree from a map.
+// Leaves are `hash(key) | hash(value)`.
+// Leaves are sorted before Merkle hashing.
+type simpleMap struct {
 	kvs    cmn.KVPairs
 	sorted bool
 }
 
-func NewSimpleMap() *SimpleMap {
-	return &SimpleMap{
+func newSimpleMap() *simpleMap {
+	return &simpleMap{
 		kvs:    nil,
 		sorted: false,
 	}
 }
 
-func (sm *SimpleMap) Set(key string, value Hasher) {
+// Set hashes the key and value and appends it to the kv pairs.
+func (sm *simpleMap) Set(key string, value Hasher) {
 	sm.sorted = false
 
-	// Hash the key to blind it... why not?
-	khash := SimpleHashFromBytes([]byte(key))
-
-	// And the value is hashed too, so you can
+	// The value is hashed, so you can
 	// check for equality with a cached value (say)
 	// and make a determination to fetch or not.
 	vhash := value.Hash()
 
 	sm.kvs = append(sm.kvs, cmn.KVPair{
-		Key:   khash,
+		Key:   []byte(key),
 		Value: vhash,
 	})
 }
 
-// Merkle root hash of items sorted by key
+// Hash Merkle root hash of items sorted by key
 // (UNSTABLE: and by value too if duplicate key).
-func (sm *SimpleMap) Hash() []byte {
+func (sm *simpleMap) Hash() []byte {
 	sm.Sort()
 	return hashKVPairs(sm.kvs)
 }
 
-func (sm *SimpleMap) Sort() {
+func (sm *simpleMap) Sort() {
 	if sm.sorted {
 		return
 	}
@@ -50,7 +51,8 @@ func (sm *SimpleMap) Sort() {
 }
 
 // Returns a copy of sorted KVPairs.
-func (sm *SimpleMap) KVPairs() cmn.KVPairs {
+// NOTE these contain the hashed key and value.
+func (sm *simpleMap) KVPairs() cmn.KVPairs {
 	sm.Sort()
 	kvs := make(cmn.KVPairs, len(sm.kvs))
 	copy(kvs, sm.kvs)
@@ -60,10 +62,12 @@ func (sm *SimpleMap) KVPairs() cmn.KVPairs {
 //----------------------------------------
 
 // A local extension to KVPair that can be hashed.
+// Key and value are length prefixed and concatenated,
+// then hashed.
 type KVPair cmn.KVPair
 
 func (kv KVPair) Hash() []byte {
-	hasher := ripemd160.New()
+	hasher := tmhash.New()
 	err := encodeByteSlice(hasher, kv.Key)
 	if err != nil {
 		panic(err)
@@ -76,9 +80,9 @@ func (kv KVPair) Hash() []byte {
 }
 
 func hashKVPairs(kvs cmn.KVPairs) []byte {
-	kvsH := make([]Hasher, 0, len(kvs))
-	for _, kvp := range kvs {
-		kvsH = append(kvsH, KVPair(kvp))
+	kvsH := make([]Hasher, len(kvs))
+	for i, kvp := range kvs {
+		kvsH[i] = KVPair(kvp)
 	}
 	return SimpleHashFromHashers(kvsH)
 }
diff --git a/vendor/github.com/tendermint/tmlibs/merkle/simple_proof.go b/vendor/github.com/tendermint/tendermint/crypto/merkle/simple_proof.go
similarity index 79%
rename from vendor/github.com/tendermint/tmlibs/merkle/simple_proof.go
rename to vendor/github.com/tendermint/tendermint/crypto/merkle/simple_proof.go
index ca6ccf37266f2baaa002a4c9f6b4d25585b85a50..2541b6d3843518d73240fca74e704c96e0e3907b 100644
--- a/vendor/github.com/tendermint/tmlibs/merkle/simple_proof.go
+++ b/vendor/github.com/tendermint/tendermint/crypto/merkle/simple_proof.go
@@ -5,10 +5,12 @@ import (
 	"fmt"
 )
 
+// SimpleProof represents a simple merkle proof.
 type SimpleProof struct {
 	Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child.
 }
 
+// SimpleProofsFromHashers computes inclusion proof for given items.
 // proofs[0] is the proof for items[0].
 func SimpleProofsFromHashers(items []Hasher) (rootHash []byte, proofs []*SimpleProof) {
 	trails, rootSPN := trailsFromHashers(items)
@@ -22,8 +24,11 @@ func SimpleProofsFromHashers(items []Hasher) (rootHash []byte, proofs []*SimpleP
 	return
 }
 
-func SimpleProofsFromMap(m map[string]Hasher) (rootHash []byte, proofs []*SimpleProof) {
-	sm := NewSimpleMap()
+// SimpleProofsFromMap generates proofs from a map. The keys/values of the map will be used as the keys/values
+// in the underlying key-value pairs.
+// The keys are sorted before the proofs are computed.
+func SimpleProofsFromMap(m map[string]Hasher) (rootHash []byte, proofs map[string]*SimpleProof, keys []string) {
+	sm := newSimpleMap()
 	for k, v := range m {
 		sm.Set(k, v)
 	}
@@ -33,7 +38,15 @@ func SimpleProofsFromMap(m map[string]Hasher) (rootHash []byte, proofs []*Simple
 	for _, kvp := range kvs {
 		kvsH = append(kvsH, KVPair(kvp))
 	}
-	return SimpleProofsFromHashers(kvsH)
+
+	rootHash, proofList := SimpleProofsFromHashers(kvsH)
+	proofs = make(map[string]*SimpleProof)
+	keys = make([]string, len(proofList))
+	for i, kvp := range kvs {
+		proofs[string(kvp.Key)] = proofList[i]
+		keys[i] = string(kvp.Key)
+	}
+	return
 }
 
 // Verify that leafHash is a leaf hash of the simple-merkle-tree
@@ -43,10 +56,13 @@ func (sp *SimpleProof) Verify(index int, total int, leafHash []byte, rootHash []
 	return computedHash != nil && bytes.Equal(computedHash, rootHash)
 }
 
+// String implements the stringer interface for SimpleProof.
+// It is a wrapper around StringIndented.
 func (sp *SimpleProof) String() string {
 	return sp.StringIndented("")
 }
 
+// StringIndented generates a canonical string representation of a SimpleProof.
 func (sp *SimpleProof) StringIndented(indent string) string {
 	return fmt.Sprintf(`SimpleProof{
 %s  Aunts: %X
@@ -90,7 +106,7 @@ func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][
 	}
 }
 
-// Helper structure to construct merkle proof.
+// SimpleProofNode is a helper structure to construct merkle proof.
 // The node and the tree is thrown away afterwards.
 // Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil.
 // node.Parent.Hash = hash(node.Hash, node.Right.Hash) or
@@ -102,8 +118,8 @@ type SimpleProofNode struct {
 	Right  *SimpleProofNode // Right sibling (only one of Left,Right is set)
 }
 
-// Starting from a leaf SimpleProofNode, FlattenAunts() will return
-// the inner hashes for the item corresponding to the leaf.
+// FlattenAunts will return the inner hashes for the item corresponding to the leaf,
+// starting from a leaf SimpleProofNode.
 func (spn *SimpleProofNode) FlattenAunts() [][]byte {
 	// Nonrecursive impl.
 	innerHashes := [][]byte{}
diff --git a/vendor/github.com/tendermint/tendermint/crypto/merkle/simple_tree.go b/vendor/github.com/tendermint/tendermint/crypto/merkle/simple_tree.go
new file mode 100644
index 0000000000000000000000000000000000000000..46a0759099190b022f1e8aa5a69e51183cd6c0c0
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/crypto/merkle/simple_tree.go
@@ -0,0 +1,58 @@
+package merkle
+
+import (
+	"github.com/tendermint/tendermint/crypto/tmhash"
+)
+
+// SimpleHashFromTwoHashes is the basic operation of the Merkle tree: Hash(left | right).
+func SimpleHashFromTwoHashes(left, right []byte) []byte {
+	var hasher = tmhash.New()
+	err := encodeByteSlice(hasher, left)
+	if err != nil {
+		panic(err)
+	}
+	err = encodeByteSlice(hasher, right)
+	if err != nil {
+		panic(err)
+	}
+	return hasher.Sum(nil)
+}
+
+// SimpleHashFromHashers computes a Merkle tree from items that can be hashed.
+func SimpleHashFromHashers(items []Hasher) []byte {
+	hashes := make([][]byte, len(items))
+	for i, item := range items {
+		hash := item.Hash()
+		hashes[i] = hash
+	}
+	return simpleHashFromHashes(hashes)
+}
+
+// SimpleHashFromMap computes a Merkle tree from sorted map.
+// Like calling SimpleHashFromHashers with
+// `item = []byte(Hash(key) | Hash(value))`,
+// sorted by `item`.
+func SimpleHashFromMap(m map[string]Hasher) []byte {
+	sm := newSimpleMap()
+	for k, v := range m {
+		sm.Set(k, v)
+	}
+	return sm.Hash()
+}
+
+//----------------------------------------------------------------
+
+// Expects hashes!
+func simpleHashFromHashes(hashes [][]byte) []byte {
+	// Recursive impl.
+	switch len(hashes) {
+	case 0:
+		return nil
+	case 1:
+		return hashes[0]
+	default:
+		left := simpleHashFromHashes(hashes[:(len(hashes)+1)/2])
+		right := simpleHashFromHashes(hashes[(len(hashes)+1)/2:])
+		return SimpleHashFromTwoHashes(left, right)
+	}
+}
diff --git a/vendor/github.com/tendermint/tmlibs/merkle/types.go b/vendor/github.com/tendermint/tendermint/crypto/merkle/types.go
similarity index 72%
rename from vendor/github.com/tendermint/tmlibs/merkle/types.go
rename to vendor/github.com/tendermint/tendermint/crypto/merkle/types.go
index a0c491a7eceeb41c6296d5bc9e78e1f4a968f166..2fcb3f39d8bd07bfb326ad8cb2747468721b483f 100644
--- a/vendor/github.com/tendermint/tmlibs/merkle/types.go
+++ b/vendor/github.com/tendermint/tendermint/crypto/merkle/types.go
@@ -1,10 +1,12 @@
 package merkle
 
 import (
-	"encoding/binary"
 	"io"
+
+	amino "github.com/tendermint/go-amino"
 )
 
+// Tree is a Merkle tree interface.
 type Tree interface {
 	Size() (size int)
 	Height() (height int8)
@@ -23,25 +25,14 @@ type Tree interface {
 	IterateRange(start []byte, end []byte, ascending bool, fx func(key []byte, value []byte) (stop bool)) (stopped bool)
 }
 
+// Hasher represents a hashable piece of data which can be hashed in the Tree.
 type Hasher interface {
 	Hash() []byte
 }
 
 //-----------------------------------------------------------------------
-// NOTE: these are duplicated from go-amino so we dont need go-amino as a dep
 
+// Uvarint length prefixed byteslice
 func encodeByteSlice(w io.Writer, bz []byte) (err error) {
-	err = encodeUvarint(w, uint64(len(bz)))
-	if err != nil {
-		return
-	}
-	_, err = w.Write(bz)
-	return
-}
-
-func encodeUvarint(w io.Writer, i uint64) (err error) {
-	var buf [10]byte
-	n := binary.PutUvarint(buf[:], i)
-	_, err = w.Write(buf[0:n])
-	return
+	return amino.EncodeByteSlice(w, bz)
 }
diff --git a/vendor/github.com/tendermint/go-crypto/priv_key.go b/vendor/github.com/tendermint/tendermint/crypto/priv_key.go
similarity index 84%
rename from vendor/github.com/tendermint/go-crypto/priv_key.go
rename to vendor/github.com/tendermint/tendermint/crypto/priv_key.go
index 61d373f60c6fa737769a97683b203d68d31b8775..dbfe64c33443191a21af12d76aa57372fcba1eed 100644
--- a/vendor/github.com/tendermint/go-crypto/priv_key.go
+++ b/vendor/github.com/tendermint/tendermint/crypto/priv_key.go
@@ -6,7 +6,6 @@ import (
 	secp256k1 "github.com/btcsuite/btcd/btcec"
 	"github.com/tendermint/ed25519"
 	"github.com/tendermint/ed25519/extra25519"
-	. "github.com/tendermint/tmlibs/common"
 )
 
 func PrivKeyFromBytes(privKeyBytes []byte) (privKey PrivKey, err error) {
@@ -18,7 +17,7 @@ func PrivKeyFromBytes(privKeyBytes []byte) (privKey PrivKey, err error) {
 
 type PrivKey interface {
 	Bytes() []byte
-	Sign(msg []byte) Signature
+	Sign(msg []byte) (Signature, error)
 	PubKey() PubKey
 	Equals(PrivKey) bool
 }
@@ -31,17 +30,13 @@ var _ PrivKey = PrivKeyEd25519{}
 type PrivKeyEd25519 [64]byte
 
 func (privKey PrivKeyEd25519) Bytes() []byte {
-	bz, err := cdc.MarshalBinaryBare(privKey)
-	if err != nil {
-		panic(err)
-	}
-	return bz
+	return cdc.MustMarshalBinaryBare(privKey)
 }
 
-func (privKey PrivKeyEd25519) Sign(msg []byte) Signature {
+func (privKey PrivKeyEd25519) Sign(msg []byte) (Signature, error) {
 	privKeyBytes := [64]byte(privKey)
 	signatureBytes := ed25519.Sign(&privKeyBytes, msg)
-	return SignatureEd25519(*signatureBytes)
+	return SignatureEd25519(*signatureBytes), nil
 }
 
 func (privKey PrivKeyEd25519) PubKey() PubKey {
@@ -67,12 +62,6 @@ func (privKey PrivKeyEd25519) ToCurve25519() *[32]byte {
 	return keyCurve25519
 }
 
-/*
-func (privKey PrivKeyEd25519) String() string {
-	return Fmt("PrivKeyEd25519{*****}")
-}
-*/
-
 // Deterministically generates new priv-key bytes from key.
 func (privKey PrivKeyEd25519) Generate(index int) PrivKeyEd25519 {
 	bz, err := cdc.MarshalBinaryBare(struct {
@@ -83,9 +72,10 @@ func (privKey PrivKeyEd25519) Generate(index int) PrivKeyEd25519 {
 		panic(err)
 	}
 	newBytes := Sha256(bz)
-	var newKey [64]byte
-	copy(newKey[:], newBytes)
-	return PrivKeyEd25519(newKey)
+	newKey := new([64]byte)
+	copy(newKey[:32], newBytes)
+	ed25519.MakePublicKey(newKey)
+	return PrivKeyEd25519(*newKey)
 }
 
 func GenPrivKeyEd25519() PrivKeyEd25519 {
@@ -113,20 +103,16 @@ var _ PrivKey = PrivKeySecp256k1{}
 type PrivKeySecp256k1 [32]byte
 
 func (privKey PrivKeySecp256k1) Bytes() []byte {
-	bz, err := cdc.MarshalBinaryBare(privKey)
-	if err != nil {
-		panic(err)
-	}
-	return bz
+	return cdc.MustMarshalBinaryBare(privKey)
 }
 
-func (privKey PrivKeySecp256k1) Sign(msg []byte) Signature {
+func (privKey PrivKeySecp256k1) Sign(msg []byte) (Signature, error) {
 	priv__, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey[:])
 	sig__, err := priv__.Sign(Sha256(msg))
 	if err != nil {
-		PanicSanity(err)
+		return nil, err
 	}
-	return SignatureSecp256k1(sig__.Serialize())
+	return SignatureSecp256k1(sig__.Serialize()), nil
 }
 
 func (privKey PrivKeySecp256k1) PubKey() PubKey {
@@ -146,12 +132,6 @@ func (privKey PrivKeySecp256k1) Equals(other PrivKey) bool {
 	}
 }
 
-/*
-func (privKey PrivKeySecp256k1) String() string {
-	return Fmt("PrivKeySecp256k1{*****}")
-}
-*/
-
 /*
 // Deterministically generates new priv-key bytes from key.
 func (key PrivKeySecp256k1) Generate(index int) PrivKeySecp256k1 {
diff --git a/vendor/github.com/tendermint/go-crypto/pub_key.go b/vendor/github.com/tendermint/tendermint/crypto/pub_key.go
similarity index 83%
rename from vendor/github.com/tendermint/go-crypto/pub_key.go
rename to vendor/github.com/tendermint/tendermint/crypto/pub_key.go
index 9be64acdf49d16ea2cf5e997bd68e1b9f0d6dc9b..588c54113ded19b5b5cec5c4d2e724c14fac975b 100644
--- a/vendor/github.com/tendermint/go-crypto/pub_key.go
+++ b/vendor/github.com/tendermint/tendermint/crypto/pub_key.go
@@ -5,11 +5,16 @@ import (
 	"crypto/sha256"
 	"fmt"
 
+	"golang.org/x/crypto/ripemd160"
+
 	secp256k1 "github.com/btcsuite/btcd/btcec"
+
 	"github.com/tendermint/ed25519"
 	"github.com/tendermint/ed25519/extra25519"
-	cmn "github.com/tendermint/tmlibs/common"
-	"golang.org/x/crypto/ripemd160"
+
+	cmn "github.com/tendermint/tendermint/libs/common"
+
+	"github.com/tendermint/tendermint/crypto/tmhash"
 )
 
 // An address is a []byte, but hex-encoded even in JSON.
@@ -35,14 +40,14 @@ type PubKey interface {
 
 var _ PubKey = PubKeyEd25519{}
 
+const PubKeyEd25519Size = 32
+
 // Implements PubKeyInner
-type PubKeyEd25519 [32]byte
+type PubKeyEd25519 [PubKeyEd25519Size]byte
 
+// Address is the SHA256-20 of the raw pubkey bytes.
 func (pubKey PubKeyEd25519) Address() Address {
-	// append type byte
-	hasher := ripemd160.New()
-	hasher.Write(pubKey.Bytes()) // does not error
-	return Address(hasher.Sum(nil))
+	return Address(tmhash.Sum(pubKey[:]))
 }
 
 func (pubKey PubKeyEd25519) Bytes() []byte {
@@ -59,15 +64,15 @@ func (pubKey PubKeyEd25519) VerifyBytes(msg []byte, sig_ Signature) bool {
 	if !ok {
 		return false
 	}
-	pubKeyBytes := [32]byte(pubKey)
-	sigBytes := [64]byte(sig)
+	pubKeyBytes := [PubKeyEd25519Size]byte(pubKey)
+	sigBytes := [SignatureEd25519Size]byte(sig)
 	return ed25519.Verify(&pubKeyBytes, msg, &sigBytes)
 }
 
 // For use with golang/crypto/nacl/box
 // If error, returns nil.
-func (pubKey PubKeyEd25519) ToCurve25519() *[32]byte {
-	keyCurve25519, pubKeyBytes := new([32]byte), [32]byte(pubKey)
+func (pubKey PubKeyEd25519) ToCurve25519() *[PubKeyEd25519Size]byte {
+	keyCurve25519, pubKeyBytes := new([PubKeyEd25519Size]byte), [PubKeyEd25519Size]byte(pubKey)
 	ok := extra25519.PublicKeyToCurve25519(keyCurve25519, &pubKeyBytes)
 	if !ok {
 		return nil
@@ -91,10 +96,12 @@ func (pubKey PubKeyEd25519) Equals(other PubKey) bool {
 
 var _ PubKey = PubKeySecp256k1{}
 
+const PubKeySecp256k1Size = 33
+
 // Implements PubKey.
 // Compressed pubkey (just the x-cord),
 // prefixed with 0x02 or 0x03, depending on the y-cord.
-type PubKeySecp256k1 [33]byte
+type PubKeySecp256k1 [PubKeySecp256k1Size]byte
 
 // Implements Bitcoin style addresses: RIPEMD160(SHA256(pubkey))
 func (pubKey PubKeySecp256k1) Address() Address {
diff --git a/vendor/github.com/tendermint/go-crypto/random.go b/vendor/github.com/tendermint/tendermint/crypto/random.go
similarity index 89%
rename from vendor/github.com/tendermint/go-crypto/random.go
rename to vendor/github.com/tendermint/tendermint/crypto/random.go
index 46754219d4c0d13d4e4b71528a10c8f44f6b3dd6..5c5057d301d14ea185c009faebef7795c0005e4c 100644
--- a/vendor/github.com/tendermint/go-crypto/random.go
+++ b/vendor/github.com/tendermint/tendermint/crypto/random.go
@@ -4,11 +4,12 @@ import (
 	"crypto/aes"
 	"crypto/cipher"
 	crand "crypto/rand"
+	"crypto/sha256"
 	"encoding/hex"
 	"io"
 	"sync"
 
-	. "github.com/tendermint/tmlibs/common"
+	. "github.com/tendermint/tendermint/libs/common"
 )
 
 var gRandInfo *randInfo
@@ -72,8 +73,12 @@ type randInfo struct {
 func (ri *randInfo) MixEntropy(seedBytes []byte) {
 	ri.mtx.Lock()
 	defer ri.mtx.Unlock()
-	// Make new ri.seedBytes
-	hashBytes := Sha256(seedBytes)
+	// Make new ri.seedBytes using passed seedBytes and current ri.seedBytes:
+	// ri.seedBytes = sha256( seedBytes || ri.seedBytes )
+	h := sha256.New()
+	h.Write(seedBytes)
+	h.Write(ri.seedBytes[:])
+	hashBytes := h.Sum(nil)
 	hashBytes32 := [32]byte{}
 	copy(hashBytes32[:], hashBytes)
 	ri.seedBytes = xorBytes32(ri.seedBytes, hashBytes32)
diff --git a/vendor/github.com/tendermint/go-crypto/signature.go b/vendor/github.com/tendermint/tendermint/crypto/signature.go
similarity index 79%
rename from vendor/github.com/tendermint/go-crypto/signature.go
rename to vendor/github.com/tendermint/tendermint/crypto/signature.go
index cfe9271379e1e64f926f74cf10f5b9f765bb3703..ae447da64a1a7642db00cb555313c989d574b59f 100644
--- a/vendor/github.com/tendermint/go-crypto/signature.go
+++ b/vendor/github.com/tendermint/tendermint/crypto/signature.go
@@ -1,10 +1,11 @@
 package crypto
 
 import (
-	"bytes"
 	"fmt"
 
-	. "github.com/tendermint/tmlibs/common"
+	"crypto/subtle"
+
+	. "github.com/tendermint/tendermint/libs/common"
 )
 
 func SignatureFromBytes(pubKeyBytes []byte) (pubKey Signature, err error) {
@@ -24,8 +25,10 @@ type Signature interface {
 
 var _ Signature = SignatureEd25519{}
 
+const SignatureEd25519Size = 64
+
 // Implements Signature
-type SignatureEd25519 [64]byte
+type SignatureEd25519 [SignatureEd25519Size]byte
 
 func (sig SignatureEd25519) Bytes() []byte {
 	bz, err := cdc.MarshalBinaryBare(sig)
@@ -41,7 +44,7 @@ func (sig SignatureEd25519) String() string { return fmt.Sprintf("/%X.../", Fing
 
 func (sig SignatureEd25519) Equals(other Signature) bool {
 	if otherEd, ok := other.(SignatureEd25519); ok {
-		return bytes.Equal(sig[:], otherEd[:])
+		return subtle.ConstantTimeCompare(sig[:], otherEd[:]) == 1
 	} else {
 		return false
 	}
@@ -74,8 +77,14 @@ func (sig SignatureSecp256k1) String() string { return fmt.Sprintf("/%X.../", Fi
 
 func (sig SignatureSecp256k1) Equals(other Signature) bool {
 	if otherSecp, ok := other.(SignatureSecp256k1); ok {
-		return bytes.Equal(sig[:], otherSecp[:])
+		return subtle.ConstantTimeCompare(sig[:], otherSecp[:]) == 1
 	} else {
 		return false
 	}
 }
+
+func SignatureSecp256k1FromBytes(data []byte) Signature {
+	sig := make(SignatureSecp256k1, len(data))
+	copy(sig[:], data)
+	return sig
+}
diff --git a/vendor/github.com/tendermint/go-crypto/symmetric.go b/vendor/github.com/tendermint/tendermint/crypto/symmetric.go
similarity index 97%
rename from vendor/github.com/tendermint/go-crypto/symmetric.go
rename to vendor/github.com/tendermint/tendermint/crypto/symmetric.go
index d4ac9b55b0728c57e8e05c757cd1d6e2ac186c0b..62379c15fb42e00d579a621d1210b8939c8bc618 100644
--- a/vendor/github.com/tendermint/go-crypto/symmetric.go
+++ b/vendor/github.com/tendermint/tendermint/crypto/symmetric.go
@@ -3,7 +3,7 @@ package crypto
 import (
 	"errors"
 
-	. "github.com/tendermint/tmlibs/common"
+	. "github.com/tendermint/tendermint/libs/common"
 	"golang.org/x/crypto/nacl/secretbox"
 )
 
diff --git a/vendor/github.com/tendermint/tendermint/crypto/tmhash/hash.go b/vendor/github.com/tendermint/tendermint/crypto/tmhash/hash.go
new file mode 100644
index 0000000000000000000000000000000000000000..1b29d8680cf2defed41c28724884243f97d56fb5
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/crypto/tmhash/hash.go
@@ -0,0 +1,48 @@
+package tmhash
+
+import (
+	"crypto/sha256"
+	"hash"
+)
+
+const (
+	Size      = 20
+	BlockSize = sha256.BlockSize
+)
+
+type sha256trunc struct {
+	sha256 hash.Hash
+}
+
+func (h sha256trunc) Write(p []byte) (n int, err error) {
+	return h.sha256.Write(p)
+}
+func (h sha256trunc) Sum(b []byte) []byte {
+	shasum := h.sha256.Sum(b)
+	return shasum[:Size]
+}
+
+func (h sha256trunc) Reset() {
+	h.sha256.Reset()
+}
+
+func (h sha256trunc) Size() int {
+	return Size
+}
+
+func (h sha256trunc) BlockSize() int {
+	return h.sha256.BlockSize()
+}
+
+// New returns a new hash.Hash.
+func New() hash.Hash {
+	return sha256trunc{
+		sha256: sha256.New(),
+	}
+}
+
+// Sum returns the first 20 bytes of SHA256 of the bz.
+func Sum(bz []byte) []byte {
+	hash := sha256.Sum256(bz)
+	return hash[:Size]
+}
diff --git a/vendor/github.com/tendermint/tendermint/crypto/version.go b/vendor/github.com/tendermint/tendermint/crypto/version.go
new file mode 100644
index 0000000000000000000000000000000000000000..77c0bed8a20000b11524660da0ccfe66d19adaee
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/crypto/version.go
@@ -0,0 +1,3 @@
+package crypto
+
+const Version = "0.9.0-dev"
diff --git a/vendor/github.com/tendermint/tendermint/evidence/pool.go b/vendor/github.com/tendermint/tendermint/evidence/pool.go
index 4bad355f71a8eb4fd1403e81013821e9a37649a7..247629b6be4e61743ef35c6ce7aedbb013996a2a 100644
--- a/vendor/github.com/tendermint/tendermint/evidence/pool.go
+++ b/vendor/github.com/tendermint/tendermint/evidence/pool.go
@@ -4,9 +4,9 @@ import (
 	"fmt"
 	"sync"
 
-	clist "github.com/tendermint/tmlibs/clist"
-	dbm "github.com/tendermint/tmlibs/db"
-	"github.com/tendermint/tmlibs/log"
+	clist "github.com/tendermint/tendermint/libs/clist"
+	dbm "github.com/tendermint/tendermint/libs/db"
+	"github.com/tendermint/tendermint/libs/log"
 
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/types"
diff --git a/vendor/github.com/tendermint/tendermint/evidence/reactor.go b/vendor/github.com/tendermint/tendermint/evidence/reactor.go
index 5159572e3730ae41251c0786b5211e680ed637aa..bf11ac105572f0174e7d1a9da4d51f455acfda93 100644
--- a/vendor/github.com/tendermint/tendermint/evidence/reactor.go
+++ b/vendor/github.com/tendermint/tendermint/evidence/reactor.go
@@ -5,10 +5,10 @@ import (
 	"reflect"
 	"time"
 
-	"github.com/tendermint/go-amino"
-	clist "github.com/tendermint/tmlibs/clist"
-	"github.com/tendermint/tmlibs/log"
+	amino "github.com/tendermint/go-amino"
 
+	clist "github.com/tendermint/tendermint/libs/clist"
+	"github.com/tendermint/tendermint/libs/log"
 	"github.com/tendermint/tendermint/p2p"
 	"github.com/tendermint/tendermint/types"
 )
@@ -73,7 +73,7 @@ func (evR *EvidenceReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
 // Receive implements Reactor.
 // It adds any received evidence to the evpool.
 func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
-	msg, err := DecodeMessage(msgBytes)
+	msg, err := decodeMsg(msgBytes)
 	if err != nil {
 		evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
 		evR.Switch.StopPeerForError(src, err)
@@ -204,11 +204,9 @@ func RegisterEvidenceMessages(cdc *amino.Codec) {
 		"tendermint/evidence/EvidenceListMessage", nil)
 }
 
-// DecodeMessage decodes a byte-array into a EvidenceMessage.
-func DecodeMessage(bz []byte) (msg EvidenceMessage, err error) {
+func decodeMsg(bz []byte) (msg EvidenceMessage, err error) {
 	if len(bz) > maxMsgSize {
-		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
-			len(bz), maxMsgSize)
+		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
 	}
 	err = cdc.UnmarshalBinaryBare(bz, &msg)
 	return
diff --git a/vendor/github.com/tendermint/tendermint/evidence/store.go b/vendor/github.com/tendermint/tendermint/evidence/store.go
index 6af5d75d8d0e621194829d2b217045a8105a5aae..20b37bdb27a9c2d7f0336dffff3da758e5feeaae 100644
--- a/vendor/github.com/tendermint/tendermint/evidence/store.go
+++ b/vendor/github.com/tendermint/tendermint/evidence/store.go
@@ -4,7 +4,7 @@ import (
 	"fmt"
 
 	"github.com/tendermint/tendermint/types"
-	dbm "github.com/tendermint/tmlibs/db"
+	dbm "github.com/tendermint/tendermint/libs/db"
 )
 
 /*
diff --git a/vendor/github.com/tendermint/tendermint/evidence/wire.go b/vendor/github.com/tendermint/tendermint/evidence/wire.go
index 842e0707a8eda58b2a581d8244312672a3cec881..fb3a177cc1f49d1be214d64c2f5e3675ceea6927 100644
--- a/vendor/github.com/tendermint/tendermint/evidence/wire.go
+++ b/vendor/github.com/tendermint/tendermint/evidence/wire.go
@@ -2,7 +2,7 @@ package evidence
 
 import (
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 	"github.com/tendermint/tendermint/types"
 )
 
diff --git a/vendor/github.com/tendermint/tmlibs/autofile/README.md b/vendor/github.com/tendermint/tendermint/libs/autofile/README.md
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/autofile/README.md
rename to vendor/github.com/tendermint/tendermint/libs/autofile/README.md
diff --git a/vendor/github.com/tendermint/tmlibs/autofile/autofile.go b/vendor/github.com/tendermint/tendermint/libs/autofile/autofile.go
similarity index 97%
rename from vendor/github.com/tendermint/tmlibs/autofile/autofile.go
rename to vendor/github.com/tendermint/tendermint/libs/autofile/autofile.go
index 790be5224b3ce69e5fbe73d144fd5c18d68a5a85..313da67890d826b785ff30c6c94847818f2fc32d 100644
--- a/vendor/github.com/tendermint/tmlibs/autofile/autofile.go
+++ b/vendor/github.com/tendermint/tendermint/libs/autofile/autofile.go
@@ -5,7 +5,7 @@ import (
 	"sync"
 	"time"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 /* AutoFile usage
diff --git a/vendor/github.com/tendermint/tmlibs/autofile/group.go b/vendor/github.com/tendermint/tendermint/libs/autofile/group.go
similarity index 99%
rename from vendor/github.com/tendermint/tmlibs/autofile/group.go
rename to vendor/github.com/tendermint/tendermint/libs/autofile/group.go
index 1ae5450324788d425f198bbef8bc7d68e1396230..b4368ed9e0acafb3612bdd618dbdc5c8d5defc05 100644
--- a/vendor/github.com/tendermint/tmlibs/autofile/group.go
+++ b/vendor/github.com/tendermint/tendermint/libs/autofile/group.go
@@ -15,7 +15,7 @@ import (
 	"sync"
 	"time"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 const (
diff --git a/vendor/github.com/tendermint/tmlibs/autofile/sighup_watcher.go b/vendor/github.com/tendermint/tendermint/libs/autofile/sighup_watcher.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/autofile/sighup_watcher.go
rename to vendor/github.com/tendermint/tendermint/libs/autofile/sighup_watcher.go
diff --git a/vendor/github.com/tendermint/tmlibs/clist/clist.go b/vendor/github.com/tendermint/tendermint/libs/clist/clist.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/clist/clist.go
rename to vendor/github.com/tendermint/tendermint/libs/clist/clist.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/LICENSE b/vendor/github.com/tendermint/tendermint/libs/common/LICENSE
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/LICENSE
rename to vendor/github.com/tendermint/tendermint/libs/common/LICENSE
diff --git a/vendor/github.com/tendermint/tmlibs/common/async.go b/vendor/github.com/tendermint/tendermint/libs/common/async.go
similarity index 95%
rename from vendor/github.com/tendermint/tmlibs/common/async.go
rename to vendor/github.com/tendermint/tendermint/libs/common/async.go
index 7be09a3c1ba30791b21e5b4e000881de0b507698..e3293ab4c8b38afed6df5f6e29a603936ab83629 100644
--- a/vendor/github.com/tendermint/tmlibs/common/async.go
+++ b/vendor/github.com/tendermint/tendermint/libs/common/async.go
@@ -76,17 +76,15 @@ func (trs *TaskResultSet) Reap() *TaskResultSet {
 func (trs *TaskResultSet) Wait() *TaskResultSet {
 	for i := 0; i < len(trs.results); i++ {
 		var trch = trs.chz[i]
-		select {
-		case result, ok := <-trch:
-			if ok {
-				// Write result.
-				trs.results[i] = taskResultOK{
-					TaskResult: result,
-					OK:         true,
-				}
-			} else {
-				// We already wrote it.
+		result, ok := <-trch
+		if ok {
+			// Write result.
+			trs.results[i] = taskResultOK{
+				TaskResult: result,
+				OK:         true,
 			}
+		} else {
+			// We already wrote it.
 		}
 	}
 	return trs
diff --git a/vendor/github.com/tendermint/tmlibs/common/bit_array.go b/vendor/github.com/tendermint/tendermint/libs/common/bit_array.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/bit_array.go
rename to vendor/github.com/tendermint/tendermint/libs/common/bit_array.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/bytes.go b/vendor/github.com/tendermint/tendermint/libs/common/bytes.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/bytes.go
rename to vendor/github.com/tendermint/tendermint/libs/common/bytes.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/byteslice.go b/vendor/github.com/tendermint/tendermint/libs/common/byteslice.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/byteslice.go
rename to vendor/github.com/tendermint/tendermint/libs/common/byteslice.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/cmap.go b/vendor/github.com/tendermint/tendermint/libs/common/cmap.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/cmap.go
rename to vendor/github.com/tendermint/tendermint/libs/common/cmap.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/colors.go b/vendor/github.com/tendermint/tendermint/libs/common/colors.go
similarity index 87%
rename from vendor/github.com/tendermint/tmlibs/common/colors.go
rename to vendor/github.com/tendermint/tendermint/libs/common/colors.go
index 85e592248e4b5ca86e237b35a2a89efad9424722..049ce7a50563c1d5a26ac84d7e7ccd63468875b9 100644
--- a/vendor/github.com/tendermint/tmlibs/common/colors.go
+++ b/vendor/github.com/tendermint/tendermint/libs/common/colors.go
@@ -81,3 +81,15 @@ func Cyan(args ...interface{}) string {
 func White(args ...interface{}) string {
 	return treatAll(ANSIFgWhite, args...)
 }
+
+func ColoredBytes(data []byte, textColor, bytesColor func(...interface{}) string) string {
+	s := ""
+	for _, b := range data {
+		if 0x21 <= b && b < 0x7F {
+			s += textColor(string(b))
+		} else {
+			s += bytesColor(Fmt("%02X", b))
+		}
+	}
+	return s
+}
diff --git a/vendor/github.com/tendermint/tmlibs/common/date.go b/vendor/github.com/tendermint/tendermint/libs/common/date.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/date.go
rename to vendor/github.com/tendermint/tendermint/libs/common/date.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/errors.go b/vendor/github.com/tendermint/tendermint/libs/common/errors.go
similarity index 53%
rename from vendor/github.com/tendermint/tmlibs/common/errors.go
rename to vendor/github.com/tendermint/tendermint/libs/common/errors.go
index 5992b23466d55c72d2be895962dd1fcf58a9ae3b..5c31b8968ddd17dda6b5860ac959a6e97c0e2195 100644
--- a/vendor/github.com/tendermint/tmlibs/common/errors.go
+++ b/vendor/github.com/tendermint/tendermint/libs/common/errors.go
@@ -6,106 +6,81 @@ import (
 )
 
 //----------------------------------------
-// Convenience methods
+// Convenience method.
 
-// ErrorWrap will just call .TraceFrom(), or create a new *cmnError.
 func ErrorWrap(cause interface{}, format string, args ...interface{}) Error {
-	msg := Fmt(format, args...)
 	if causeCmnError, ok := cause.(*cmnError); ok {
-		return causeCmnError.TraceFrom(1, msg)
+		msg := Fmt(format, args...)
+		return causeCmnError.Stacktrace().Trace(1, msg)
+	} else if cause == nil {
+		return newCmnError(FmtError{format, args}).Stacktrace()
+	} else {
+		// NOTE: causeCmnError is a typed nil here.
+		msg := Fmt(format, args...)
+		return newCmnError(cause).Stacktrace().Trace(1, msg)
 	}
-	// NOTE: cause may be nil.
-	// NOTE: do not use causeCmnError here, not the same as nil.
-	return newError(msg, cause, cause).Stacktrace()
 }
 
 //----------------------------------------
 // Error & cmnError
 
 /*
-Usage:
+
+Usage with arbitrary error data:
 
 ```go
 	// Error construction
-	var someT = errors.New("Some err type")
-	var err1 error = NewErrorWithT(someT, "my message")
+	type MyError struct{}
+	var err1 error = NewErrorWithData(MyError{}, "my message")
 	...
 	// Wrapping
 	var err2 error  = ErrorWrap(err1, "another message")
 	if (err1 != err2) { panic("should be the same")
 	...
 	// Error handling
-	switch err2.T() {
-		case someT: ...
+	switch err2.Data().(type){
+		case MyError: ...
 	    default: ...
 	}
 ```
-
 */
 type Error interface {
 	Error() string
-	Message() string
 	Stacktrace() Error
-	Trace(format string, args ...interface{}) Error
-	TraceFrom(offset int, format string, args ...interface{}) Error
-	Cause() interface{}
-	WithT(t interface{}) Error
-	T() interface{}
-	Format(s fmt.State, verb rune)
+	Trace(offset int, format string, args ...interface{}) Error
+	Data() interface{}
 }
 
-// New Error with no cause where the type is the format string of the message..
+// New Error with formatted message.
+// The Error's Data will be a FmtError type.
 func NewError(format string, args ...interface{}) Error {
-	msg := Fmt(format, args...)
-	return newError(msg, nil, format)
-
+	err := FmtError{format, args}
+	return newCmnError(err)
 }
 
-// New Error with specified type and message.
-func NewErrorWithT(t interface{}, format string, args ...interface{}) Error {
-	msg := Fmt(format, args...)
-	return newError(msg, nil, t)
-}
-
-// NOTE: The name of a function "NewErrorWithCause()" implies that you are
-// creating a new Error, yet, if the cause is an Error, creating a new Error to
-// hold a ref to the old Error is probably *not* what you want to do.
-// So, use ErrorWrap(cause, format, a...) instead, which returns the same error
-// if cause is an Error.
-// IF you must set an Error as the cause of an Error,
-// then you can use the WithCauser interface to do so manually.
-// e.g. (error).(tmlibs.WithCauser).WithCause(causeError)
-
-type WithCauser interface {
-	WithCause(cause interface{}) Error
+// New Error with specified data.
+func NewErrorWithData(data interface{}) Error {
+	return newCmnError(data)
 }
 
 type cmnError struct {
-	msg        string         // first msg which also appears in msg
-	cause      interface{}    // underlying cause (or panic object)
-	t          interface{}    // for switching on error
+	data       interface{}    // associated data
 	msgtraces  []msgtraceItem // all messages traced
 	stacktrace []uintptr      // first stack trace
 }
 
-var _ WithCauser = &cmnError{}
 var _ Error = &cmnError{}
 
 // NOTE: do not expose.
-func newError(msg string, cause interface{}, t interface{}) *cmnError {
+func newCmnError(data interface{}) *cmnError {
 	return &cmnError{
-		msg:        msg,
-		cause:      cause,
-		t:          t,
+		data:       data,
 		msgtraces:  nil,
 		stacktrace: nil,
 	}
 }
 
-func (err *cmnError) Message() string {
-	return err.msg
-}
-
+// Implements error.
 func (err *cmnError) Error() string {
 	return fmt.Sprintf("%v", err)
 }
@@ -121,42 +96,17 @@ func (err *cmnError) Stacktrace() Error {
 }
 
 // Add tracing information with msg.
-func (err *cmnError) Trace(format string, args ...interface{}) Error {
-	msg := Fmt(format, args...)
-	return err.doTrace(msg, 0)
-}
-
-// Same as Trace, but traces the line `offset` calls out.
-// If n == 0, the behavior is identical to Trace().
-func (err *cmnError) TraceFrom(offset int, format string, args ...interface{}) Error {
+// Set n=0 unless wrapped with some function, then n > 0.
+func (err *cmnError) Trace(offset int, format string, args ...interface{}) Error {
 	msg := Fmt(format, args...)
 	return err.doTrace(msg, offset)
 }
 
-// Return last known cause.
-// NOTE: The meaning of "cause" is left for the caller to define.
-// There exists no "canonical" definition of "cause".
-// Instead of blaming, try to handle it, or organize it.
-func (err *cmnError) Cause() interface{} {
-	return err.cause
-}
-
-// Overwrites the Error's cause.
-func (err *cmnError) WithCause(cause interface{}) Error {
-	err.cause = cause
-	return err
-}
-
-// Overwrites the Error's type.
-func (err *cmnError) WithT(t interface{}) Error {
-	err.t = t
-	return err
-}
-
-// Return the "type" of this message, primarily for switching
-// to handle this Error.
-func (err *cmnError) T() interface{} {
-	return err.t
+// Return the "data" of this error.
+// Data could be used for error handling/switching,
+// or for holding general error/debug information.
+func (err *cmnError) Data() interface{} {
+	return err.data
 }
 
 func (err *cmnError) doTrace(msg string, n int) Error {
@@ -177,12 +127,8 @@ func (err *cmnError) Format(s fmt.State, verb rune) {
 	default:
 		if s.Flag('#') {
 			s.Write([]byte("--= Error =--\n"))
-			// Write msg.
-			s.Write([]byte(fmt.Sprintf("Message: %s\n", err.msg)))
-			// Write cause.
-			s.Write([]byte(fmt.Sprintf("Cause: %#v\n", err.cause)))
-			// Write type.
-			s.Write([]byte(fmt.Sprintf("T: %#v\n", err.t)))
+			// Write data.
+			s.Write([]byte(fmt.Sprintf("Data: %#v\n", err.data)))
 			// Write msg trace items.
 			s.Write([]byte(fmt.Sprintf("Msg Traces:\n")))
 			for i, msgtrace := range err.msgtraces {
@@ -200,11 +146,7 @@ func (err *cmnError) Format(s fmt.State, verb rune) {
 			s.Write([]byte("--= /Error =--\n"))
 		} else {
 			// Write msg.
-			if err.cause != nil {
-				s.Write([]byte(fmt.Sprintf("Error{`%s` (cause: %v)}", err.msg, err.cause))) // TODO tick-esc?
-			} else {
-				s.Write([]byte(fmt.Sprintf("Error{`%s`}", err.msg))) // TODO tick-esc?
-			}
+			s.Write([]byte(fmt.Sprintf("Error{%v}", err.data))) // TODO tick-esc?
 		}
 	}
 }
@@ -232,6 +174,45 @@ func (mti msgtraceItem) String() string {
 	)
 }
 
+//----------------------------------------
+// fmt error
+
+/*
+
+FmtError is the data type for NewError() (e.g. NewError().Data().(FmtError))
+Theoretically it could be used to switch on the format string.
+
+```go
+	// Error construction
+	var err1 error = NewError("invalid username %v", "BOB")
+	var err2 error = NewError("another kind of error")
+	...
+	// Error handling
+	switch err1.Data().(cmn.FmtError).Format() {
+		case "invalid username %v": ...
+		case "another kind of error": ...
+	    default: ...
+	}
+```
+*/
+type FmtError struct {
+	format string
+	args   []interface{}
+}
+
+func (fe FmtError) Error() string {
+	return fmt.Sprintf(fe.format, fe.args...)
+}
+
+func (fe FmtError) String() string {
+	return fmt.Sprintf("FmtError{format:%v,args:%v}",
+		fe.format, fe.args)
+}
+
+func (fe FmtError) Format() string {
+	return fe.format
+}
+
 //----------------------------------------
 // Panic wrappers
 // XXX DEPRECATED
diff --git a/vendor/github.com/tendermint/tmlibs/common/heap.go b/vendor/github.com/tendermint/tendermint/libs/common/heap.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/heap.go
rename to vendor/github.com/tendermint/tendermint/libs/common/heap.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/int.go b/vendor/github.com/tendermint/tendermint/libs/common/int.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/int.go
rename to vendor/github.com/tendermint/tendermint/libs/common/int.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/io.go b/vendor/github.com/tendermint/tendermint/libs/common/io.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/io.go
rename to vendor/github.com/tendermint/tendermint/libs/common/io.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/kvpair.go b/vendor/github.com/tendermint/tendermint/libs/common/kvpair.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/kvpair.go
rename to vendor/github.com/tendermint/tendermint/libs/common/kvpair.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/math.go b/vendor/github.com/tendermint/tendermint/libs/common/math.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/math.go
rename to vendor/github.com/tendermint/tendermint/libs/common/math.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/net.go b/vendor/github.com/tendermint/tendermint/libs/common/net.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/net.go
rename to vendor/github.com/tendermint/tendermint/libs/common/net.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/nil.go b/vendor/github.com/tendermint/tendermint/libs/common/nil.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/nil.go
rename to vendor/github.com/tendermint/tendermint/libs/common/nil.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/os.go b/vendor/github.com/tendermint/tendermint/libs/common/os.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/os.go
rename to vendor/github.com/tendermint/tendermint/libs/common/os.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/random.go b/vendor/github.com/tendermint/tendermint/libs/common/random.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/random.go
rename to vendor/github.com/tendermint/tendermint/libs/common/random.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/repeat_timer.go b/vendor/github.com/tendermint/tendermint/libs/common/repeat_timer.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/repeat_timer.go
rename to vendor/github.com/tendermint/tendermint/libs/common/repeat_timer.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/service.go b/vendor/github.com/tendermint/tendermint/libs/common/service.go
similarity index 99%
rename from vendor/github.com/tendermint/tmlibs/common/service.go
rename to vendor/github.com/tendermint/tendermint/libs/common/service.go
index 2f90fa4f940b84624b1aab25002a3a3aa3cbd347..b6f166e77cf9b46536f332dc4e5d3c8cd99bd893 100644
--- a/vendor/github.com/tendermint/tmlibs/common/service.go
+++ b/vendor/github.com/tendermint/tendermint/libs/common/service.go
@@ -5,7 +5,7 @@ import (
 	"fmt"
 	"sync/atomic"
 
-	"github.com/tendermint/tmlibs/log"
+	"github.com/tendermint/tendermint/libs/log"
 )
 
 var (
diff --git a/vendor/github.com/tendermint/tmlibs/common/string.go b/vendor/github.com/tendermint/tendermint/libs/common/string.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/string.go
rename to vendor/github.com/tendermint/tendermint/libs/common/string.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/throttle_timer.go b/vendor/github.com/tendermint/tendermint/libs/common/throttle_timer.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/throttle_timer.go
rename to vendor/github.com/tendermint/tendermint/libs/common/throttle_timer.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/types.pb.go b/vendor/github.com/tendermint/tendermint/libs/common/types.pb.go
similarity index 68%
rename from vendor/github.com/tendermint/tmlibs/common/types.pb.go
rename to vendor/github.com/tendermint/tendermint/libs/common/types.pb.go
index 047b7aee26d1ecc2984257454510f7e4d386618c..f6645602a9f940d4b40f51d4dd0baf38e87777d1 100644
--- a/vendor/github.com/tendermint/tmlibs/common/types.pb.go
+++ b/vendor/github.com/tendermint/tendermint/libs/common/types.pb.go
@@ -1,4 +1,4 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// Code generated by protoc-gen-go. DO NOT EDIT.
 // source: common/types.proto
 
 /*
@@ -14,10 +14,9 @@ It has these top-level messages:
 //nolint: gas
 package common
 
-import proto "github.com/gogo/protobuf/proto"
+import proto "github.com/golang/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -28,7 +27,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
 
 // Define these here for compatibility but use tmlibs/common.KVPair.
 type KVPair struct {
@@ -39,7 +38,7 @@ type KVPair struct {
 func (m *KVPair) Reset()                    { *m = KVPair{} }
 func (m *KVPair) String() string            { return proto.CompactTextString(m) }
 func (*KVPair) ProtoMessage()               {}
-func (*KVPair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} }
+func (*KVPair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
 
 func (m *KVPair) GetKey() []byte {
 	if m != nil {
@@ -58,13 +57,13 @@ func (m *KVPair) GetValue() []byte {
 // Define these here for compatibility but use tmlibs/common.KI64Pair.
 type KI64Pair struct {
 	Key   []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
-	Value int64  `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+	Value int64  `protobuf:"varint,2,opt,name=value" json:"value,omitempty"`
 }
 
 func (m *KI64Pair) Reset()                    { *m = KI64Pair{} }
 func (m *KI64Pair) String() string            { return proto.CompactTextString(m) }
 func (*KI64Pair) ProtoMessage()               {}
-func (*KI64Pair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} }
+func (*KI64Pair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
 
 func (m *KI64Pair) GetKey() []byte {
 	if m != nil {
@@ -85,17 +84,15 @@ func init() {
 	proto.RegisterType((*KI64Pair)(nil), "common.KI64Pair")
 }
 
-func init() { proto.RegisterFile("common/types.proto", fileDescriptorTypes) }
+func init() { proto.RegisterFile("common/types.proto", fileDescriptor0) }
 
-var fileDescriptorTypes = []byte{
-	// 137 bytes of a gzipped FileDescriptorProto
+var fileDescriptor0 = []byte{
+	// 107 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4a, 0xce, 0xcf, 0xcd,
 	0xcd, 0xcf, 0xd3, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62,
-	0x83, 0x88, 0x49, 0xe9, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7,
-	0xe7, 0xa7, 0xe7, 0xeb, 0x83, 0xa5, 0x93, 0x4a, 0xd3, 0xc0, 0x3c, 0x30, 0x07, 0xcc, 0x82, 0x68,
-	0x53, 0x32, 0xe0, 0x62, 0xf3, 0x0e, 0x0b, 0x48, 0xcc, 0x2c, 0x12, 0x12, 0xe0, 0x62, 0xce, 0x4e,
-	0xad, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0x31, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x12,
-	0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x92, 0x11, 0x17, 0x87, 0xb7, 0xa7, 0x99,
-	0x09, 0x31, 0x7a, 0x98, 0xa1, 0x7a, 0x92, 0xd8, 0xc0, 0x96, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff,
-	0xff, 0x5c, 0xb8, 0x46, 0xc5, 0xb9, 0x00, 0x00, 0x00,
+	0x83, 0x88, 0x29, 0x19, 0x70, 0xb1, 0x79, 0x87, 0x05, 0x24, 0x66, 0x16, 0x09, 0x09, 0x70, 0x31,
+	0x67, 0xa7, 0x56, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x04, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac,
+	0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12, 0x4c, 0x60, 0x31, 0x08, 0x47, 0xc9, 0x88, 0x8b, 0xc3, 0xdb,
+	0xd3, 0xcc, 0x84, 0x18, 0x3d, 0xcc, 0x50, 0x3d, 0x49, 0x6c, 0x60, 0x4b, 0x8d, 0x01, 0x01, 0x00,
+	0x00, 0xff, 0xff, 0xd8, 0xf1, 0xc3, 0x8c, 0x8a, 0x00, 0x00, 0x00,
 }
diff --git a/vendor/github.com/tendermint/tmlibs/common/types.proto b/vendor/github.com/tendermint/tendermint/libs/common/types.proto
similarity index 56%
rename from vendor/github.com/tendermint/tmlibs/common/types.proto
rename to vendor/github.com/tendermint/tendermint/libs/common/types.proto
index 94abcccc3be5e31cf2d350cd47d48ebffd6fe6c9..8406fcfddb0841b697bd1b8cc97ebb20c8272b33 100644
--- a/vendor/github.com/tendermint/tmlibs/common/types.proto
+++ b/vendor/github.com/tendermint/tendermint/libs/common/types.proto
@@ -1,13 +1,6 @@
 syntax = "proto3";
 package common;
 
-// For more information on gogo.proto, see:
-// https://github.com/gogo/protobuf/blob/master/extensions.md
-// NOTE: Try really hard not to use custom types,
-// it's often complicated, broken, nor not worth it.
-import "github.com/gogo/protobuf/gogoproto/gogo.proto";
-
-
 //----------------------------------------
 // Abstract types
 
diff --git a/vendor/github.com/tendermint/tmlibs/common/word.go b/vendor/github.com/tendermint/tendermint/libs/common/word.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/word.go
rename to vendor/github.com/tendermint/tendermint/libs/common/word.go
diff --git a/vendor/github.com/tendermint/tmlibs/db/LICENSE.md b/vendor/github.com/tendermint/tendermint/libs/db/LICENSE.md
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/db/LICENSE.md
rename to vendor/github.com/tendermint/tendermint/libs/db/LICENSE.md
diff --git a/vendor/github.com/tendermint/tmlibs/db/README.md b/vendor/github.com/tendermint/tendermint/libs/db/README.md
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/db/README.md
rename to vendor/github.com/tendermint/tendermint/libs/db/README.md
diff --git a/vendor/github.com/tendermint/tmlibs/db/c_level_db.go b/vendor/github.com/tendermint/tendermint/libs/db/c_level_db.go
similarity index 88%
rename from vendor/github.com/tendermint/tmlibs/db/c_level_db.go
rename to vendor/github.com/tendermint/tendermint/libs/db/c_level_db.go
index e3e6c1d5d0b882871ebeecd09f1657ae7dea903f..30746126196a56034f9f14089e867b506e9ef941 100644
--- a/vendor/github.com/tendermint/tmlibs/db/c_level_db.go
+++ b/vendor/github.com/tendermint/tendermint/libs/db/c_level_db.go
@@ -190,7 +190,8 @@ func (db *CLevelDB) Iterator(start, end []byte) Iterator {
 }
 
 func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator {
-	panic("not implemented yet") // XXX
+	itr := db.db.NewIterator(db.ro)
+	return newCLevelDBIterator(itr, start, end, true)
 }
 
 var _ Iterator = (*cLevelDBIterator)(nil)
@@ -204,12 +205,25 @@ type cLevelDBIterator struct {
 
 func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse bool) *cLevelDBIterator {
 	if isReverse {
-		panic("not implemented yet") // XXX
-	}
-	if start != nil {
-		source.Seek(start)
+		if start == nil {
+			source.SeekToLast()
+		} else {
+			source.Seek(start)
+			if source.Valid() {
+				soakey := source.Key() // start or after key
+				if bytes.Compare(start, soakey) < 0 {
+					source.Prev()
+				}
+			} else {
+				source.SeekToLast()
+			}
+		}
 	} else {
-		source.SeekToFirst()
+		if start == nil {
+			source.SeekToFirst()
+		} else {
+			source.Seek(start)
+		}
 	}
 	return &cLevelDBIterator{
 		source:    source,
@@ -243,9 +257,16 @@ func (itr cLevelDBIterator) Valid() bool {
 	// If key is end or past it, invalid.
 	var end = itr.end
 	var key = itr.source.Key()
-	if end != nil && bytes.Compare(end, key) <= 0 {
-		itr.isInvalid = true
-		return false
+	if itr.isReverse {
+		if end != nil && bytes.Compare(key, end) <= 0 {
+			itr.isInvalid = true
+			return false
+		}
+	} else {
+		if end != nil && bytes.Compare(end, key) <= 0 {
+			itr.isInvalid = true
+			return false
+		}
 	}
 
 	// It's valid.
@@ -267,7 +288,11 @@ func (itr cLevelDBIterator) Value() []byte {
 func (itr cLevelDBIterator) Next() {
 	itr.assertNoError()
 	itr.assertIsValid()
-	itr.source.Next()
+	if itr.isReverse {
+		itr.source.Prev()
+	} else {
+		itr.source.Next()
+	}
 }
 
 func (itr cLevelDBIterator) Close() {
diff --git a/vendor/github.com/tendermint/tmlibs/db/db.go b/vendor/github.com/tendermint/tendermint/libs/db/db.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/db/db.go
rename to vendor/github.com/tendermint/tendermint/libs/db/db.go
diff --git a/vendor/github.com/tendermint/tmlibs/db/debug_db.go b/vendor/github.com/tendermint/tendermint/libs/db/debug_db.go
similarity index 68%
rename from vendor/github.com/tendermint/tmlibs/db/debug_db.go
rename to vendor/github.com/tendermint/tendermint/libs/db/debug_db.go
index 7666ed9fd70e93751b8b6ec3924c0fe02d9ba55c..bb361a266f2c8654ecdfb155ada8cfce41306624 100644
--- a/vendor/github.com/tendermint/tmlibs/db/debug_db.go
+++ b/vendor/github.com/tendermint/tendermint/libs/db/debug_db.go
@@ -4,13 +4,9 @@ import (
 	"fmt"
 	"sync"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
-func _fmt(f string, az ...interface{}) string {
-	return fmt.Sprintf(f, az...)
-}
-
 //----------------------------------------
 // debugDB
 
@@ -33,7 +29,9 @@ func (ddb debugDB) Mutex() *sync.Mutex { return nil }
 // Implements DB.
 func (ddb debugDB) Get(key []byte) (value []byte) {
 	defer func() {
-		fmt.Printf("%v.Get(%v) %v\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Blue(_fmt("%X", value)))
+		fmt.Printf("%v.Get(%v) %v\n", ddb.label,
+			cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue),
+			cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
 	}()
 	value = ddb.db.Get(key)
 	return
@@ -42,68 +40,85 @@ func (ddb debugDB) Get(key []byte) (value []byte) {
 // Implements DB.
 func (ddb debugDB) Has(key []byte) (has bool) {
 	defer func() {
-		fmt.Printf("%v.Has(%v) %v\n", ddb.label, cmn.Cyan(_fmt("%X", key)), has)
+		fmt.Printf("%v.Has(%v) %v\n", ddb.label,
+			cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue), has)
 	}()
 	return ddb.db.Has(key)
 }
 
 // Implements DB.
 func (ddb debugDB) Set(key []byte, value []byte) {
-	fmt.Printf("%v.Set(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value)))
+	fmt.Printf("%v.Set(%v, %v)\n", ddb.label,
+		cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue),
+		cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
 	ddb.db.Set(key, value)
 }
 
 // Implements DB.
 func (ddb debugDB) SetSync(key []byte, value []byte) {
-	fmt.Printf("%v.SetSync(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value)))
+	fmt.Printf("%v.SetSync(%v, %v)\n", ddb.label,
+		cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue),
+		cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
 	ddb.db.SetSync(key, value)
 }
 
 // Implements atomicSetDeleter.
 func (ddb debugDB) SetNoLock(key []byte, value []byte) {
-	fmt.Printf("%v.SetNoLock(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value)))
+	fmt.Printf("%v.SetNoLock(%v, %v)\n", ddb.label,
+		cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue),
+		cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
 	ddb.db.(atomicSetDeleter).SetNoLock(key, value)
 }
 
 // Implements atomicSetDeleter.
 func (ddb debugDB) SetNoLockSync(key []byte, value []byte) {
-	fmt.Printf("%v.SetNoLockSync(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value)))
+	fmt.Printf("%v.SetNoLockSync(%v, %v)\n", ddb.label,
+		cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue),
+		cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
 	ddb.db.(atomicSetDeleter).SetNoLockSync(key, value)
 }
 
 // Implements DB.
 func (ddb debugDB) Delete(key []byte) {
-	fmt.Printf("%v.Delete(%v)\n", ddb.label, cmn.Red(_fmt("%X", key)))
+	fmt.Printf("%v.Delete(%v)\n", ddb.label,
+		cmn.ColoredBytes(key, cmn.Red, cmn.Yellow))
 	ddb.db.Delete(key)
 }
 
 // Implements DB.
 func (ddb debugDB) DeleteSync(key []byte) {
-	fmt.Printf("%v.DeleteSync(%v)\n", ddb.label, cmn.Red(_fmt("%X", key)))
+	fmt.Printf("%v.DeleteSync(%v)\n", ddb.label,
+		cmn.ColoredBytes(key, cmn.Red, cmn.Yellow))
 	ddb.db.DeleteSync(key)
 }
 
 // Implements atomicSetDeleter.
 func (ddb debugDB) DeleteNoLock(key []byte) {
-	fmt.Printf("%v.DeleteNoLock(%v)\n", ddb.label, cmn.Red(_fmt("%X", key)))
+	fmt.Printf("%v.DeleteNoLock(%v)\n", ddb.label,
+		cmn.ColoredBytes(key, cmn.Red, cmn.Yellow))
 	ddb.db.(atomicSetDeleter).DeleteNoLock(key)
 }
 
 // Implements atomicSetDeleter.
 func (ddb debugDB) DeleteNoLockSync(key []byte) {
-	fmt.Printf("%v.DeleteNoLockSync(%v)\n", ddb.label, cmn.Red(_fmt("%X", key)))
+	fmt.Printf("%v.DeleteNoLockSync(%v)\n", ddb.label,
+		cmn.ColoredBytes(key, cmn.Red, cmn.Yellow))
 	ddb.db.(atomicSetDeleter).DeleteNoLockSync(key)
 }
 
 // Implements DB.
 func (ddb debugDB) Iterator(start, end []byte) Iterator {
-	fmt.Printf("%v.Iterator(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", start)), cmn.Blue(_fmt("%X", end)))
+	fmt.Printf("%v.Iterator(%v, %v)\n", ddb.label,
+		cmn.ColoredBytes(start, cmn.Cyan, cmn.Blue),
+		cmn.ColoredBytes(end, cmn.Cyan, cmn.Blue))
 	return NewDebugIterator(ddb.label, ddb.db.Iterator(start, end))
 }
 
 // Implements DB.
 func (ddb debugDB) ReverseIterator(start, end []byte) Iterator {
-	fmt.Printf("%v.ReverseIterator(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", start)), cmn.Blue(_fmt("%X", end)))
+	fmt.Printf("%v.ReverseIterator(%v, %v)\n", ddb.label,
+		cmn.ColoredBytes(start, cmn.Cyan, cmn.Blue),
+		cmn.ColoredBytes(end, cmn.Cyan, cmn.Blue))
 	return NewDebugIterator(ddb.label, ddb.db.ReverseIterator(start, end))
 }
 
@@ -173,15 +188,17 @@ func (ditr debugIterator) Next() {
 
 // Implements Iterator.
 func (ditr debugIterator) Key() (key []byte) {
-	fmt.Printf("%v.itr.Key() %v\n", ditr.label, cmn.Cyan(_fmt("%X", key)))
 	key = ditr.itr.Key()
+	fmt.Printf("%v.itr.Key() %v\n", ditr.label,
+		cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue))
 	return
 }
 
 // Implements Iterator.
 func (ditr debugIterator) Value() (value []byte) {
-	fmt.Printf("%v.itr.Value() %v\n", ditr.label, cmn.Blue(_fmt("%X", value)))
 	value = ditr.itr.Value()
+	fmt.Printf("%v.itr.Value() %v\n", ditr.label,
+		cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
 	return
 }
 
@@ -209,13 +226,16 @@ func NewDebugBatch(label string, bch Batch) debugBatch {
 
 // Implements Batch.
 func (dbch debugBatch) Set(key, value []byte) {
-	fmt.Printf("%v.batch.Set(%v, %v)\n", dbch.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value)))
+	fmt.Printf("%v.batch.Set(%v, %v)\n", dbch.label,
+		cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue),
+		cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
 	dbch.bch.Set(key, value)
 }
 
 // Implements Batch.
 func (dbch debugBatch) Delete(key []byte) {
-	fmt.Printf("%v.batch.Delete(%v)\n", dbch.label, cmn.Red(_fmt("%X", key)))
+	fmt.Printf("%v.batch.Delete(%v)\n", dbch.label,
+		cmn.ColoredBytes(key, cmn.Red, cmn.Yellow))
 	dbch.bch.Delete(key)
 }
 
diff --git a/vendor/github.com/tendermint/tmlibs/db/fsdb.go b/vendor/github.com/tendermint/tendermint/libs/db/fsdb.go
similarity index 90%
rename from vendor/github.com/tendermint/tmlibs/db/fsdb.go
rename to vendor/github.com/tendermint/tendermint/libs/db/fsdb.go
index 578c1785a37e0d97982f149114cfb93232a62d47..fc861deccba29cf54c77f8c47d1758806828bc99 100644
--- a/vendor/github.com/tendermint/tmlibs/db/fsdb.go
+++ b/vendor/github.com/tendermint/tendermint/libs/db/fsdb.go
@@ -10,7 +10,7 @@ import (
 	"sync"
 
 	"github.com/pkg/errors"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 const (
@@ -151,21 +151,29 @@ func (db *FSDB) Mutex() *sync.Mutex {
 }
 
 func (db *FSDB) Iterator(start, end []byte) Iterator {
+	return db.MakeIterator(start, end, false)
+}
+
+func (db *FSDB) MakeIterator(start, end []byte, isReversed bool) Iterator {
 	db.mtx.Lock()
 	defer db.mtx.Unlock()
 
 	// We need a copy of all of the keys.
 	// Not the best, but probably not a bottleneck depending.
-	keys, err := list(db.dir, start, end)
+	keys, err := list(db.dir, start, end, isReversed)
 	if err != nil {
 		panic(errors.Wrapf(err, "Listing keys in %s", db.dir))
 	}
-	sort.Strings(keys)
+	if isReversed {
+		sort.Sort(sort.Reverse(sort.StringSlice(keys)))
+	} else {
+		sort.Strings(keys)
+	}
 	return newMemDBIterator(db, keys, start, end)
 }
 
 func (db *FSDB) ReverseIterator(start, end []byte) Iterator {
-	panic("not implemented yet") // XXX
+	return db.MakeIterator(start, end, true)
 }
 
 func (db *FSDB) nameToPath(name []byte) string {
@@ -213,7 +221,7 @@ func remove(path string) error {
 
 // List keys in a directory, stripping of escape sequences and dir portions.
 // CONTRACT: returns os errors directly without wrapping.
-func list(dirPath string, start, end []byte) ([]string, error) {
+func list(dirPath string, start, end []byte, isReversed bool) ([]string, error) {
 	dir, err := os.Open(dirPath)
 	if err != nil {
 		return nil, err
@@ -231,7 +239,7 @@ func list(dirPath string, start, end []byte) ([]string, error) {
 			return nil, fmt.Errorf("Failed to unescape %s while listing", name)
 		}
 		key := unescapeKey([]byte(n))
-		if IsKeyInDomain(key, start, end, false) {
+		if IsKeyInDomain(key, start, end, isReversed) {
 			keys = append(keys, string(key))
 		}
 	}
diff --git a/vendor/github.com/tendermint/tmlibs/db/go_level_db.go b/vendor/github.com/tendermint/tendermint/libs/db/go_level_db.go
similarity index 88%
rename from vendor/github.com/tendermint/tmlibs/db/go_level_db.go
rename to vendor/github.com/tendermint/tendermint/libs/db/go_level_db.go
index 9ff162e3809ef4cd298e9433e28ad540b753a051..349e447b2bf2045db646fe8ff62403c20b70079d 100644
--- a/vendor/github.com/tendermint/tmlibs/db/go_level_db.go
+++ b/vendor/github.com/tendermint/tendermint/libs/db/go_level_db.go
@@ -10,7 +10,7 @@ import (
 	"github.com/syndtr/goleveldb/leveldb/iterator"
 	"github.com/syndtr/goleveldb/leveldb/opt"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 func init() {
@@ -193,7 +193,8 @@ func (db *GoLevelDB) Iterator(start, end []byte) Iterator {
 
 // Implements DB.
 func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator {
-	panic("not implemented yet") // XXX
+	itr := db.db.NewIterator(nil, nil)
+	return newGoLevelDBIterator(itr, start, end, true)
 }
 
 type goLevelDBIterator struct {
@@ -208,9 +209,26 @@ var _ Iterator = (*goLevelDBIterator)(nil)
 
 func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse bool) *goLevelDBIterator {
 	if isReverse {
-		panic("not implemented yet") // XXX
+		if start == nil {
+			source.Last()
+		} else {
+			valid := source.Seek(start)
+			if valid {
+				soakey := source.Key() // start or after key
+				if bytes.Compare(start, soakey) < 0 {
+					source.Prev()
+				}
+			} else {
+				source.Last()
+			}
+		}
+	} else {
+		if start == nil {
+			source.First()
+		} else {
+			source.Seek(start)
+		}
 	}
-	source.Seek(start)
 	return &goLevelDBIterator{
 		source:    source,
 		start:     start,
@@ -245,9 +263,17 @@ func (itr *goLevelDBIterator) Valid() bool {
 	// If key is end or past it, invalid.
 	var end = itr.end
 	var key = itr.source.Key()
-	if end != nil && bytes.Compare(end, key) <= 0 {
-		itr.isInvalid = true
-		return false
+
+	if itr.isReverse {
+		if end != nil && bytes.Compare(key, end) <= 0 {
+			itr.isInvalid = true
+			return false
+		}
+	} else {
+		if end != nil && bytes.Compare(end, key) <= 0 {
+			itr.isInvalid = true
+			return false
+		}
 	}
 
 	// Valid
@@ -276,7 +302,11 @@ func (itr *goLevelDBIterator) Value() []byte {
 func (itr *goLevelDBIterator) Next() {
 	itr.assertNoError()
 	itr.assertIsValid()
-	itr.source.Next()
+	if itr.isReverse {
+		itr.source.Prev()
+	} else {
+		itr.source.Next()
+	}
 }
 
 // Implements Iterator.
diff --git a/vendor/github.com/tendermint/tmlibs/db/mem_batch.go b/vendor/github.com/tendermint/tendermint/libs/db/mem_batch.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/db/mem_batch.go
rename to vendor/github.com/tendermint/tendermint/libs/db/mem_batch.go
diff --git a/vendor/github.com/tendermint/tmlibs/db/mem_db.go b/vendor/github.com/tendermint/tendermint/libs/db/mem_db.go
similarity index 98%
rename from vendor/github.com/tendermint/tmlibs/db/mem_db.go
rename to vendor/github.com/tendermint/tendermint/libs/db/mem_db.go
index 1521f87acec88dff3d07d134fd2574546a45efd4..580123017cddd613286240db2824f562b1ef5c7d 100644
--- a/vendor/github.com/tendermint/tmlibs/db/mem_db.go
+++ b/vendor/github.com/tendermint/tendermint/libs/db/mem_db.go
@@ -114,7 +114,7 @@ func (db *MemDB) Close() {
 	// database, we don't have a destination
 	// to flush contents to nor do we want
 	// any data loss on invoking Close()
-	// See the discussion in https://github.com/tendermint/tmlibs/pull/56
+	// See the discussion in https://github.com/tendermint/tendermint/libs/pull/56
 }
 
 // Implements DB.
diff --git a/vendor/github.com/tendermint/tmlibs/db/prefix_db.go b/vendor/github.com/tendermint/tendermint/libs/db/prefix_db.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/db/prefix_db.go
rename to vendor/github.com/tendermint/tendermint/libs/db/prefix_db.go
diff --git a/vendor/github.com/tendermint/tmlibs/db/types.go b/vendor/github.com/tendermint/tendermint/libs/db/types.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/db/types.go
rename to vendor/github.com/tendermint/tendermint/libs/db/types.go
diff --git a/vendor/github.com/tendermint/tmlibs/db/util.go b/vendor/github.com/tendermint/tendermint/libs/db/util.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/db/util.go
rename to vendor/github.com/tendermint/tendermint/libs/db/util.go
diff --git a/vendor/github.com/tendermint/tendermint/libs/events/Makefile b/vendor/github.com/tendermint/tendermint/libs/events/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..696aafff1c28c2a8288acc59dc24487cf976ded2
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/libs/events/Makefile
@@ -0,0 +1,9 @@
+.PHONY: docs
+REPO:=github.com/tendermint/tendermint/libs/events
+
+docs:
+	@go get github.com/davecheney/godoc2md
+	godoc2md $(REPO) > README.md
+
+test:
+	go test -v ./...
diff --git a/vendor/github.com/tendermint/tendermint/libs/events/README.md b/vendor/github.com/tendermint/tendermint/libs/events/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..14aa498ff8715c21373add990f02f5e4c25cc827
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/libs/events/README.md
@@ -0,0 +1,175 @@
+
+
+# events
+`import "github.com/tendermint/tendermint/libs/events"`
+
+* [Overview](#pkg-overview)
+* [Index](#pkg-index)
+
+## <a name="pkg-overview">Overview</a>
+Pub-Sub in go with event caching
+
+
+
+
+## <a name="pkg-index">Index</a>
+* [type EventCache](#EventCache)
+  * [func NewEventCache(evsw Fireable) *EventCache](#NewEventCache)
+  * [func (evc *EventCache) FireEvent(event string, data EventData)](#EventCache.FireEvent)
+  * [func (evc *EventCache) Flush()](#EventCache.Flush)
+* [type EventCallback](#EventCallback)
+* [type EventData](#EventData)
+* [type EventSwitch](#EventSwitch)
+  * [func NewEventSwitch() EventSwitch](#NewEventSwitch)
+* [type Eventable](#Eventable)
+* [type Fireable](#Fireable)
+
+
+#### <a name="pkg-files">Package files</a>
+[event_cache.go](/src/github.com/tendermint/tendermint/libs/events/event_cache.go) [events.go](/src/github.com/tendermint/tendermint/libs/events/events.go) 
+
+
+
+
+
+
+## <a name="EventCache">type</a> [EventCache](/src/target/event_cache.go?s=116:179#L5)
+``` go
+type EventCache struct {
+    // contains filtered or unexported fields
+}
+```
+An EventCache buffers events for a Fireable
+All events are cached. Filtering happens on Flush
+
+
+
+
+
+
+
+### <a name="NewEventCache">func</a> [NewEventCache](/src/target/event_cache.go?s=239:284#L11)
+``` go
+func NewEventCache(evsw Fireable) *EventCache
+```
+Create a new EventCache with an EventSwitch as backend
+
+
+
+
+
+### <a name="EventCache.FireEvent">func</a> (\*EventCache) [FireEvent](/src/target/event_cache.go?s=449:511#L24)
+``` go
+func (evc *EventCache) FireEvent(event string, data EventData)
+```
+Cache an event to be fired upon finality.
+
+
+
+
+### <a name="EventCache.Flush">func</a> (\*EventCache) [Flush](/src/target/event_cache.go?s=735:765#L31)
+``` go
+func (evc *EventCache) Flush()
+```
+Fire events by running evsw.FireEvent on all cached events. Blocks.
+Clears cached events
+
+
+
+
+## <a name="EventCallback">type</a> [EventCallback](/src/target/events.go?s=4201:4240#L185)
+``` go
+type EventCallback func(data EventData)
+```
+
+
+
+
+
+
+
+
+
+## <a name="EventData">type</a> [EventData](/src/target/events.go?s=243:294#L14)
+``` go
+type EventData interface {
+}
+```
+Generic event data can be typed and registered with tendermint/go-amino
+via concrete implementation of this interface
+
+
+
+
+
+
+
+
+
+
+## <a name="EventSwitch">type</a> [EventSwitch](/src/target/events.go?s=560:771#L29)
+``` go
+type EventSwitch interface {
+    cmn.Service
+    Fireable
+
+    AddListenerForEvent(listenerID, event string, cb EventCallback)
+    RemoveListenerForEvent(event string, listenerID string)
+    RemoveListener(listenerID string)
+}
+```
+
+
+
+
+
+
+### <a name="NewEventSwitch">func</a> [NewEventSwitch](/src/target/events.go?s=917:950#L46)
+``` go
+func NewEventSwitch() EventSwitch
+```
+
+
+
+
+## <a name="Eventable">type</a> [Eventable](/src/target/events.go?s=378:440#L20)
+``` go
+type Eventable interface {
+    SetEventSwitch(evsw EventSwitch)
+}
+```
+reactors and other modules should export
+this interface to become eventable
+
+
+
+
+
+
+
+
+
+
+## <a name="Fireable">type</a> [Fireable](/src/target/events.go?s=490:558#L25)
+``` go
+type Fireable interface {
+    FireEvent(event string, data EventData)
+}
+```
+an event switch or cache implements fireable
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
diff --git a/vendor/github.com/tendermint/tendermint/libs/events/events.go b/vendor/github.com/tendermint/tendermint/libs/events/events.go
index 075f9b42b16accfedf4d975757d8372d80be4216..9c7f0fd05255c1d86b98877757597048b61a6d25 100644
--- a/vendor/github.com/tendermint/tendermint/libs/events/events.go
+++ b/vendor/github.com/tendermint/tendermint/libs/events/events.go
@@ -6,7 +6,7 @@ package events
 import (
 	"sync"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // Generic event data can be typed and registered with tendermint/go-amino
diff --git a/vendor/github.com/tendermint/tmlibs/flowrate/README.md b/vendor/github.com/tendermint/tendermint/libs/flowrate/README.md
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/flowrate/README.md
rename to vendor/github.com/tendermint/tendermint/libs/flowrate/README.md
diff --git a/vendor/github.com/tendermint/tmlibs/flowrate/flowrate.go b/vendor/github.com/tendermint/tendermint/libs/flowrate/flowrate.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/flowrate/flowrate.go
rename to vendor/github.com/tendermint/tendermint/libs/flowrate/flowrate.go
diff --git a/vendor/github.com/tendermint/tmlibs/flowrate/io.go b/vendor/github.com/tendermint/tendermint/libs/flowrate/io.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/flowrate/io.go
rename to vendor/github.com/tendermint/tendermint/libs/flowrate/io.go
diff --git a/vendor/github.com/tendermint/tmlibs/flowrate/util.go b/vendor/github.com/tendermint/tendermint/libs/flowrate/util.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/flowrate/util.go
rename to vendor/github.com/tendermint/tendermint/libs/flowrate/util.go
diff --git a/vendor/github.com/tendermint/tmlibs/log/filter.go b/vendor/github.com/tendermint/tendermint/libs/log/filter.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/log/filter.go
rename to vendor/github.com/tendermint/tendermint/libs/log/filter.go
diff --git a/vendor/github.com/tendermint/tmlibs/log/logger.go b/vendor/github.com/tendermint/tendermint/libs/log/logger.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/log/logger.go
rename to vendor/github.com/tendermint/tendermint/libs/log/logger.go
diff --git a/vendor/github.com/tendermint/tmlibs/log/nop_logger.go b/vendor/github.com/tendermint/tendermint/libs/log/nop_logger.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/log/nop_logger.go
rename to vendor/github.com/tendermint/tendermint/libs/log/nop_logger.go
diff --git a/vendor/github.com/tendermint/tmlibs/log/testing_logger.go b/vendor/github.com/tendermint/tendermint/libs/log/testing_logger.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/log/testing_logger.go
rename to vendor/github.com/tendermint/tendermint/libs/log/testing_logger.go
diff --git a/vendor/github.com/tendermint/tmlibs/log/tm_json_logger.go b/vendor/github.com/tendermint/tendermint/libs/log/tm_json_logger.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/log/tm_json_logger.go
rename to vendor/github.com/tendermint/tendermint/libs/log/tm_json_logger.go
diff --git a/vendor/github.com/tendermint/tmlibs/log/tm_logger.go b/vendor/github.com/tendermint/tendermint/libs/log/tm_logger.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/log/tm_logger.go
rename to vendor/github.com/tendermint/tendermint/libs/log/tm_logger.go
diff --git a/vendor/github.com/tendermint/tmlibs/log/tmfmt_logger.go b/vendor/github.com/tendermint/tendermint/libs/log/tmfmt_logger.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/log/tmfmt_logger.go
rename to vendor/github.com/tendermint/tendermint/libs/log/tmfmt_logger.go
diff --git a/vendor/github.com/tendermint/tmlibs/log/tracing_logger.go b/vendor/github.com/tendermint/tendermint/libs/log/tracing_logger.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/log/tracing_logger.go
rename to vendor/github.com/tendermint/tendermint/libs/log/tracing_logger.go
diff --git a/vendor/github.com/tendermint/tendermint/libs/pubsub/pubsub.go b/vendor/github.com/tendermint/tendermint/libs/pubsub/pubsub.go
index 776e0653bcb780d5000d0aa1fa3dfc2c108d696e..4c0d97e2ffc5f87a849a8a0dcc41f995729b3ef2 100644
--- a/vendor/github.com/tendermint/tendermint/libs/pubsub/pubsub.go
+++ b/vendor/github.com/tendermint/tendermint/libs/pubsub/pubsub.go
@@ -16,7 +16,7 @@ import (
 	"errors"
 	"sync"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 type operation int
@@ -163,6 +163,8 @@ func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, ou
 		return nil
 	case <-ctx.Done():
 		return ctx.Err()
+	case <-s.Quit():
+		return nil
 	}
 }
 
@@ -190,6 +192,8 @@ func (s *Server) Unsubscribe(ctx context.Context, clientID string, query Query)
 		return nil
 	case <-ctx.Done():
 		return ctx.Err()
+	case <-s.Quit():
+		return nil
 	}
 }
 
@@ -211,6 +215,8 @@ func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error {
 		return nil
 	case <-ctx.Done():
 		return ctx.Err()
+	case <-s.Quit():
+		return nil
 	}
 }
 
@@ -229,6 +235,8 @@ func (s *Server) PublishWithTags(ctx context.Context, msg interface{}, tags TagM
 		return nil
 	case <-ctx.Done():
 		return ctx.Err()
+	case <-s.Quit():
+		return nil
 	}
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/libs/pubsub/query/Makefile b/vendor/github.com/tendermint/tendermint/libs/pubsub/query/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..aef42b2dff643b1eba904a0cde9454d7faa29392
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/libs/pubsub/query/Makefile
@@ -0,0 +1,11 @@
+gen_query_parser:
+	go get -u -v github.com/pointlander/peg
+	peg -inline -switch query.peg
+
+fuzzy_test:
+	go get -u -v github.com/dvyukov/go-fuzz/go-fuzz
+	go get -u -v github.com/dvyukov/go-fuzz/go-fuzz-build
+	go-fuzz-build github.com/tendermint/tendermint/libs/pubsub/query/fuzz_test
+	go-fuzz -bin=./fuzz_test-fuzz.zip -workdir=./fuzz_test/output
+
+.PHONY: gen_query_parser fuzzy_test
diff --git a/vendor/github.com/tendermint/tendermint/libs/pubsub/query/query.peg b/vendor/github.com/tendermint/tendermint/libs/pubsub/query/query.peg
new file mode 100644
index 0000000000000000000000000000000000000000..739892e4f704db4221676f0b84da3ca63889bb44
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/libs/pubsub/query/query.peg
@@ -0,0 +1,33 @@
+package query
+
+type QueryParser Peg {
+}
+
+e <- '\"' condition ( ' '+ and ' '+ condition )* '\"' !.
+
+condition <- tag ' '* (le ' '* (number / time / date)
+                      / ge ' '* (number / time / date)
+                      / l ' '* (number / time / date)
+                      / g ' '* (number / time / date)
+                      / equal ' '* (number / time / date / value)
+                      / contains ' '* value
+                      )
+
+tag <- < (![ \t\n\r\\()"'=><] .)+ >
+value <- < '\'' (!["'] .)* '\''>
+number <- < ('0'
+           / [1-9] digit* ('.' digit*)?) >
+digit <- [0-9]
+time <- "TIME " < year '-' month '-' day 'T' digit digit ':' digit digit ':' digit digit (('-' / '+') digit digit ':' digit digit / 'Z') >
+date <- "DATE " < year '-' month '-' day >
+year <- ('1' / '2') digit digit digit
+month <- ('0' / '1') digit
+day <- ('0' / '1' / '2' / '3') digit
+and <- "AND"
+
+equal <- "="
+contains <- "CONTAINS"
+le <- "<="
+ge <- ">="
+l <- "<"
+g <- ">"
diff --git a/vendor/github.com/tendermint/tendermint/libs/pubsub/query/query.peg.go b/vendor/github.com/tendermint/tendermint/libs/pubsub/query/query.peg.go
index c86e4a47fb41be45336dfc7717d5a3703dfaee01..c1cc60aa9045cebf5aca5d30a890840af7d4aa93 100644
--- a/vendor/github.com/tendermint/tendermint/libs/pubsub/query/query.peg.go
+++ b/vendor/github.com/tendermint/tendermint/libs/pubsub/query/query.peg.go
@@ -1,6 +1,8 @@
 // nolint
 package query
 
+//go:generate peg -inline -switch query.peg
+
 import (
 	"fmt"
 	"math"
diff --git a/vendor/github.com/tendermint/tendermint/mempool/mempool.go b/vendor/github.com/tendermint/tendermint/mempool/mempool.go
index 935dfaac70b9a7a60b2ae92b623954fba2985183..06852c9af3884ec5c0c2582f2d9539daaf3829a5 100644
--- a/vendor/github.com/tendermint/tendermint/mempool/mempool.go
+++ b/vendor/github.com/tendermint/tendermint/mempool/mempool.go
@@ -10,11 +10,11 @@ import (
 
 	"github.com/pkg/errors"
 
-	abci "github.com/tendermint/abci/types"
-	auto "github.com/tendermint/tmlibs/autofile"
-	"github.com/tendermint/tmlibs/clist"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
+	abci "github.com/tendermint/tendermint/abci/types"
+	auto "github.com/tendermint/tendermint/libs/autofile"
+	"github.com/tendermint/tendermint/libs/clist"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 
 	cfg "github.com/tendermint/tendermint/config"
 	"github.com/tendermint/tendermint/proxy"
@@ -57,6 +57,11 @@ var (
 	ErrMempoolIsFull = errors.New("Mempool is full")
 )
 
+// TxID is the hex encoded hash of the bytes as a types.Tx.
+func TxID(tx []byte) string {
+	return fmt.Sprintf("%X", types.Tx(tx).Hash())
+}
+
 // Mempool is an ordered in-memory pool for transactions before they are proposed in a consensus
 // round. Transaction validity is checked using the CheckTx abci message before the transaction is
 // added to the pool. The Mempool uses a concurrent list structure for storing transactions that
@@ -83,10 +88,20 @@ type Mempool struct {
 	wal *auto.AutoFile
 
 	logger log.Logger
+
+	metrics *Metrics
 }
 
+// MempoolOption sets an optional parameter on the Mempool.
+type MempoolOption func(*Mempool)
+
 // NewMempool returns a new Mempool with the given configuration and connection to an application.
-func NewMempool(config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool, height int64) *Mempool {
+func NewMempool(
+	config *cfg.MempoolConfig,
+	proxyAppConn proxy.AppConnMempool,
+	height int64,
+	options ...MempoolOption,
+) *Mempool {
 	mempool := &Mempool{
 		config:        config,
 		proxyAppConn:  proxyAppConn,
@@ -97,6 +112,7 @@ func NewMempool(config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool, he
 		recheckCursor: nil,
 		recheckEnd:    nil,
 		logger:        log.NewNopLogger(),
+		metrics:       NopMetrics(),
 	}
 	if config.CacheSize > 0 {
 		mempool.cache = newMapTxCache(config.CacheSize)
@@ -104,6 +120,9 @@ func NewMempool(config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool, he
 		mempool.cache = nopTxCache{}
 	}
 	proxyAppConn.SetResponseCallback(mempool.resCb)
+	for _, option := range options {
+		option(mempool)
+	}
 	return mempool
 }
 
@@ -119,6 +138,11 @@ func (mem *Mempool) SetLogger(l log.Logger) {
 	mem.logger = l
 }
 
+// WithMetrics sets the metrics.
+func WithMetrics(metrics *Metrics) MempoolOption {
+	return func(mem *Mempool) { mem.metrics = metrics }
+}
+
 // CloseWAL closes and discards the underlying WAL file.
 // Any further writes will not be relayed to disk.
 func (mem *Mempool) CloseWAL() bool {
@@ -254,6 +278,7 @@ func (mem *Mempool) resCb(req *abci.Request, res *abci.Response) {
 	} else {
 		mem.resCbRecheck(req, res)
 	}
+	mem.metrics.Size.Set(float64(mem.Size()))
 }
 
 func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) {
@@ -268,11 +293,11 @@ func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) {
 				tx:      tx,
 			}
 			mem.txs.PushBack(memTx)
-			mem.logger.Info("Added good transaction", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "res", r)
+			mem.logger.Info("Added good transaction", "tx", TxID(tx), "res", r, "total", mem.Size())
 			mem.notifyTxsAvailable()
 		} else {
 			// ignore bad transaction
-			mem.logger.Info("Rejected bad transaction", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "res", r)
+			mem.logger.Info("Rejected bad transaction", "tx", TxID(tx), "res", r)
 
 			// remove from cache (it might be good later)
 			mem.cache.Remove(tx)
@@ -397,6 +422,7 @@ func (mem *Mempool) Update(height int64, txs types.Txs) error {
 		// mem.recheckCursor re-scans mem.txs and possibly removes some txs.
 		// Before mem.Reap(), we should wait for mem.recheckCursor to be nil.
 	}
+	mem.metrics.Size.Set(float64(mem.Size()))
 	return nil
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/mempool/metrics.go b/vendor/github.com/tendermint/tendermint/mempool/metrics.go
new file mode 100644
index 0000000000000000000000000000000000000000..f381678cb314d6c367598a354f16fdcb2a6d0be1
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/mempool/metrics.go
@@ -0,0 +1,34 @@
+package mempool
+
+import (
+	"github.com/go-kit/kit/metrics"
+	"github.com/go-kit/kit/metrics/discard"
+
+	prometheus "github.com/go-kit/kit/metrics/prometheus"
+	stdprometheus "github.com/prometheus/client_golang/prometheus"
+)
+
+// Metrics contains metrics exposed by this package.
+// see MetricsProvider for descriptions.
+type Metrics struct {
+	// Size of the mempool.
+	Size metrics.Gauge
+}
+
+// PrometheusMetrics returns Metrics build using Prometheus client library.
+func PrometheusMetrics() *Metrics {
+	return &Metrics{
+		Size: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "mempool",
+			Name:      "size",
+			Help:      "Size of the mempool (number of uncommitted transactions).",
+		}, []string{}),
+	}
+}
+
+// NopMetrics returns no-op Metrics.
+func NopMetrics() *Metrics {
+	return &Metrics{
+		Size: discard.NewGauge(),
+	}
+}
diff --git a/vendor/github.com/tendermint/tendermint/mempool/reactor.go b/vendor/github.com/tendermint/tendermint/mempool/reactor.go
index 5d1f4e79304b8eb0a92a11502032a73976c9d80a..96988be78c8ebbe4a2e86b4984b06f7871ed059e 100644
--- a/vendor/github.com/tendermint/tendermint/mempool/reactor.go
+++ b/vendor/github.com/tendermint/tendermint/mempool/reactor.go
@@ -5,10 +5,10 @@ import (
 	"reflect"
 	"time"
 
-	abci "github.com/tendermint/abci/types"
 	amino "github.com/tendermint/go-amino"
-	"github.com/tendermint/tmlibs/clist"
-	"github.com/tendermint/tmlibs/log"
+	abci "github.com/tendermint/tendermint/abci/types"
+	"github.com/tendermint/tendermint/libs/clist"
+	"github.com/tendermint/tendermint/libs/log"
 
 	cfg "github.com/tendermint/tendermint/config"
 	"github.com/tendermint/tendermint/p2p"
@@ -78,7 +78,7 @@ func (memR *MempoolReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
 // Receive implements Reactor.
 // It adds any received transactions to the mempool.
 func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
-	msg, err := DecodeMessage(msgBytes)
+	msg, err := decodeMsg(msgBytes)
 	if err != nil {
 		memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
 		memR.Switch.StopPeerForError(src, err)
@@ -90,7 +90,7 @@ func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
 	case *TxMessage:
 		err := memR.Mempool.CheckTx(msg.Tx, nil)
 		if err != nil {
-			memR.Logger.Info("Could not check tx", "tx", msg.Tx, "err", err)
+			memR.Logger.Info("Could not check tx", "tx", TxID(msg.Tx), "err", err)
 		}
 		// broadcasting happens from go routines per peer
 	default:
@@ -174,11 +174,9 @@ func RegisterMempoolMessages(cdc *amino.Codec) {
 	cdc.RegisterConcrete(&TxMessage{}, "tendermint/mempool/TxMessage", nil)
 }
 
-// DecodeMessage decodes a byte-array into a MempoolMessage.
-func DecodeMessage(bz []byte) (msg MempoolMessage, err error) {
+func decodeMsg(bz []byte) (msg MempoolMessage, err error) {
 	if len(bz) > maxMsgSize {
-		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
-			len(bz), maxMsgSize)
+		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
 	}
 	err = cdc.UnmarshalBinaryBare(bz, &msg)
 	return
diff --git a/vendor/github.com/tendermint/tendermint/node/id.go b/vendor/github.com/tendermint/tendermint/node/id.go
index fa391f946e4e30131b16186f5e5f83f2e75e04cb..5100597c6233ffe30aead79f6a37548992d60efe 100644
--- a/vendor/github.com/tendermint/tendermint/node/id.go
+++ b/vendor/github.com/tendermint/tendermint/node/id.go
@@ -3,7 +3,7 @@ package node
 import (
 	"time"
 
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 type NodeID struct {
diff --git a/vendor/github.com/tendermint/tendermint/node/node.go b/vendor/github.com/tendermint/tendermint/node/node.go
index efeb17ee0945f07c66f846c89b3c65472c295c47..faf33d88a0e83862a942b93affabf3f89a1273d3 100644
--- a/vendor/github.com/tendermint/tendermint/node/node.go
+++ b/vendor/github.com/tendermint/tendermint/node/node.go
@@ -2,21 +2,25 @@ package node
 
 import (
 	"bytes"
+	"context"
 	"errors"
 	"fmt"
 	"net"
 	"net/http"
 
-	abci "github.com/tendermint/abci/types"
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/client_golang/prometheus/promhttp"
+
 	amino "github.com/tendermint/go-amino"
-	crypto "github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
-	dbm "github.com/tendermint/tmlibs/db"
-	"github.com/tendermint/tmlibs/log"
+	abci "github.com/tendermint/tendermint/abci/types"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	dbm "github.com/tendermint/tendermint/libs/db"
+	"github.com/tendermint/tendermint/libs/log"
 
 	bc "github.com/tendermint/tendermint/blockchain"
 	cfg "github.com/tendermint/tendermint/config"
 	cs "github.com/tendermint/tendermint/consensus"
+	"github.com/tendermint/tendermint/crypto"
 	"github.com/tendermint/tendermint/evidence"
 	mempl "github.com/tendermint/tendermint/mempool"
 	"github.com/tendermint/tendermint/p2p"
@@ -81,10 +85,25 @@ func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
 		proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
 		DefaultGenesisDocProviderFunc(config),
 		DefaultDBProvider,
+		DefaultMetricsProvider,
 		logger,
 	)
 }
 
+// MetricsProvider returns a consensus, p2p and mempool Metrics.
+type MetricsProvider func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics)
+
+// DefaultMetricsProvider returns consensus, p2p and mempool Metrics build
+// using Prometheus client library.
+func DefaultMetricsProvider() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics) {
+	return cs.PrometheusMetrics(), p2p.PrometheusMetrics(), mempl.PrometheusMetrics()
+}
+
+// NopMetricsProvider returns consensus, p2p and mempool Metrics as no-op.
+func NopMetricsProvider() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics) {
+	return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics()
+}
+
 //------------------------------------------------------------------------------
 
 // Node is the highest level interface to a full Tendermint node.
@@ -114,6 +133,7 @@ type Node struct {
 	rpcListeners     []net.Listener         // rpc servers
 	txIndexer        txindex.TxIndexer
 	indexerService   *txindex.IndexerService
+	prometheusSrv    *http.Server
 }
 
 // NewNode returns a new, ready to go, Tendermint Node.
@@ -122,6 +142,7 @@ func NewNode(config *cfg.Config,
 	clientCreator proxy.ClientCreator,
 	genesisDocProvider GenesisDocProvider,
 	dbProvider DBProvider,
+	metricsProvider MetricsProvider,
 	logger log.Logger) (*Node, error) {
 
 	// Get BlockStore
@@ -208,11 +229,28 @@ func NewNode(config *cfg.Config,
 		consensusLogger.Info("This node is not a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey())
 	}
 
+	// metrics
+	var (
+		csMetrics    *cs.Metrics
+		p2pMetrics   *p2p.Metrics
+		memplMetrics *mempl.Metrics
+	)
+	if config.Instrumentation.Prometheus {
+		csMetrics, p2pMetrics, memplMetrics = metricsProvider()
+	} else {
+		csMetrics, p2pMetrics, memplMetrics = NopMetricsProvider()
+	}
+
 	// Make MempoolReactor
 	mempoolLogger := logger.With("module", "mempool")
-	mempool := mempl.NewMempool(config.Mempool, proxyApp.Mempool(), state.LastBlockHeight)
-	mempool.InitWAL() // no need to have the mempool wal during tests
+	mempool := mempl.NewMempool(
+		config.Mempool,
+		proxyApp.Mempool(),
+		state.LastBlockHeight,
+		mempl.WithMetrics(memplMetrics),
+	)
 	mempool.SetLogger(mempoolLogger)
+	mempool.InitWAL() // no need to have the mempool wal during tests
 	mempoolReactor := mempl.NewMempoolReactor(config.Mempool, mempool)
 	mempoolReactor.SetLogger(mempoolLogger)
 
@@ -241,8 +279,15 @@ func NewNode(config *cfg.Config,
 	bcReactor.SetLogger(logger.With("module", "blockchain"))
 
 	// Make ConsensusReactor
-	consensusState := cs.NewConsensusState(config.Consensus, state.Copy(),
-		blockExec, blockStore, mempool, evidencePool)
+	consensusState := cs.NewConsensusState(
+		config.Consensus,
+		state.Copy(),
+		blockExec,
+		blockStore,
+		mempool,
+		evidencePool,
+		cs.WithMetrics(csMetrics),
+	)
 	consensusState.SetLogger(consensusLogger)
 	if privValidator != nil {
 		consensusState.SetPrivValidator(privValidator)
@@ -252,7 +297,7 @@ func NewNode(config *cfg.Config,
 
 	p2pLogger := logger.With("module", "p2p")
 
-	sw := p2p.NewSwitch(config.P2P)
+	sw := p2p.NewSwitch(config.P2P, p2p.WithMetrics(p2pMetrics))
 	sw.SetLogger(p2pLogger)
 	sw.AddReactor("MEMPOOL", mempoolReactor)
 	sw.AddReactor("BLOCKCHAIN", bcReactor)
@@ -382,8 +427,11 @@ func (n *Node) OnStart() error {
 	}
 
 	// Create & add listener
-	protocol, address := cmn.ProtocolAndAddress(n.config.P2P.ListenAddress)
-	l := p2p.NewDefaultListener(protocol, address, n.config.P2P.SkipUPNP, n.Logger.With("module", "p2p"))
+	l := p2p.NewDefaultListener(
+		n.config.P2P.ListenAddress,
+		n.config.P2P.ExternalAddress,
+		n.config.P2P.UPNP,
+		n.Logger.With("module", "p2p"))
 	n.sw.AddListener(l)
 
 	// Generate node PrivKey
@@ -411,6 +459,10 @@ func (n *Node) OnStart() error {
 		n.rpcListeners = listeners
 	}
 
+	if n.config.Instrumentation.Prometheus {
+		n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr)
+	}
+
 	// Start the switch (the P2P server).
 	err = n.sw.Start()
 	if err != nil {
@@ -434,9 +486,16 @@ func (n *Node) OnStop() {
 	n.BaseService.OnStop()
 
 	n.Logger.Info("Stopping Node")
+
+	// first stop the non-reactor services
+	n.eventBus.Stop()
+	n.indexerService.Stop()
+
+	// now stop the reactors
 	// TODO: gracefully disconnect from peers.
 	n.sw.Stop()
 
+	// finally stop the listeners / external services
 	for _, l := range n.rpcListeners {
 		n.Logger.Info("Closing rpc listener", "listener", l)
 		if err := l.Close(); err != nil {
@@ -444,14 +503,18 @@ func (n *Node) OnStop() {
 		}
 	}
 
-	n.eventBus.Stop()
-	n.indexerService.Stop()
-
 	if pvsc, ok := n.privValidator.(*privval.SocketPV); ok {
 		if err := pvsc.Stop(); err != nil {
 			n.Logger.Error("Error stopping priv validator socket client", "err", err)
 		}
 	}
+
+	if n.prometheusSrv != nil {
+		if err := n.prometheusSrv.Shutdown(context.Background()); err != nil {
+			// Error from closing listeners, or context timeout:
+			n.Logger.Error("Prometheus HTTP server Shutdown", "err", err)
+		}
+	}
 }
 
 // RunForever waits for an interrupt signal and stops the node.
@@ -507,7 +570,12 @@ func (n *Node) startRPC() ([]net.Listener, error) {
 		wm.SetLogger(rpcLogger.With("protocol", "websocket"))
 		mux.HandleFunc("/websocket", wm.WebsocketHandler)
 		rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, coreCodec, rpcLogger)
-		listener, err := rpcserver.StartHTTPServer(listenAddr, mux, rpcLogger)
+		listener, err := rpcserver.StartHTTPServer(
+			listenAddr,
+			mux,
+			rpcLogger,
+			rpcserver.Config{MaxOpenConnections: n.config.RPC.MaxOpenConnections},
+		)
 		if err != nil {
 			return nil, err
 		}
@@ -517,7 +585,12 @@ func (n *Node) startRPC() ([]net.Listener, error) {
 	// we expose a simplified api over grpc for convenience to app devs
 	grpcListenAddr := n.config.RPC.GRPCListenAddress
 	if grpcListenAddr != "" {
-		listener, err := grpccore.StartGRPCServer(grpcListenAddr)
+		listener, err := grpccore.StartGRPCServer(
+			grpcListenAddr,
+			grpccore.Config{
+				MaxOpenConnections: n.config.RPC.GRPCMaxOpenConnections,
+			},
+		)
 		if err != nil {
 			return nil, err
 		}
@@ -527,6 +600,27 @@ func (n *Node) startRPC() ([]net.Listener, error) {
 	return listeners, nil
 }
 
+// startPrometheusServer starts a Prometheus HTTP server, listening for metrics
+// collectors on addr.
+func (n *Node) startPrometheusServer(addr string) *http.Server {
+	srv := &http.Server{
+		Addr: addr,
+		Handler: promhttp.InstrumentMetricHandler(
+			prometheus.DefaultRegisterer, promhttp.HandlerFor(
+				prometheus.DefaultGatherer,
+				promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections},
+			),
+		),
+	}
+	go func() {
+		if err := srv.ListenAndServe(); err != http.ErrServerClosed {
+			// Error starting or closing listener:
+			n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
+		}
+	}()
+	return srv
+}
+
 // Switch returns the Node's Switch.
 func (n *Node) Switch() *p2p.Switch {
 	return n.sw
@@ -615,7 +709,7 @@ func (n *Node) makeNodeInfo(nodeID p2p.ID) p2p.NodeInfo {
 	}
 
 	p2pListener := n.sw.Listeners()[0]
-	p2pHost := p2pListener.ExternalAddress().IP.String()
+	p2pHost := p2pListener.ExternalAddressHost()
 	p2pPort := p2pListener.ExternalAddress().Port
 	nodeInfo.ListenAddr = cmn.Fmt("%v:%v", p2pHost, p2pPort)
 
diff --git a/vendor/github.com/tendermint/tendermint/node/wire.go b/vendor/github.com/tendermint/tendermint/node/wire.go
index a0d7677df3d1b184f5e3e29b46e3c65e7e945365..8b3ae895021cf65c2588e0ecb38142df5d898ca4 100644
--- a/vendor/github.com/tendermint/tendermint/node/wire.go
+++ b/vendor/github.com/tendermint/tendermint/node/wire.go
@@ -2,7 +2,7 @@ package node
 
 import (
 	amino "github.com/tendermint/go-amino"
-	crypto "github.com/tendermint/go-crypto"
+	crypto "github.com/tendermint/tendermint/crypto"
 )
 
 var cdc = amino.NewCodec()
diff --git a/vendor/github.com/tendermint/tendermint/p2p/README.md b/vendor/github.com/tendermint/tendermint/p2p/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..819a5056b4a6868c018c7bce21f3381ad7baf6f0
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/p2p/README.md
@@ -0,0 +1,11 @@
+# p2p
+
+The p2p package provides an abstraction around peer-to-peer communication.
+
+Docs:
+
+- [Connection](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/p2p/connection.md) for details on how connections and multiplexing work
+- [Peer](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/p2p/peer.md) for details on peer ID, handshakes, and peer exchange
+- [Node](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/p2p/node.md) for details about different types of nodes and how they should work
+- [Pex](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/reactors/pex/pex.md) for details on peer discovery and exchange
+- [Config](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/p2p/config.md) for details on some config option
diff --git a/vendor/github.com/tendermint/tendermint/p2p/base_reactor.go b/vendor/github.com/tendermint/tendermint/p2p/base_reactor.go
index 83c8efa4b5966854607ceb15e08b51c06dac9739..da1296da0d821c2aa15d92cb5ce575ce0fa3d7e8 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/base_reactor.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/base_reactor.go
@@ -2,7 +2,7 @@ package p2p
 
 import (
 	"github.com/tendermint/tendermint/p2p/conn"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 type Reactor interface {
diff --git a/vendor/github.com/tendermint/tendermint/p2p/conn/connection.go b/vendor/github.com/tendermint/tendermint/p2p/conn/connection.go
index 5c7f19cf7cef480aa112387790958748d8de94c6..9672e01174fa189c4040973e650f899d75cde286 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/conn/connection.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/conn/connection.go
@@ -12,14 +12,13 @@ import (
 	"time"
 
 	amino "github.com/tendermint/go-amino"
-	cmn "github.com/tendermint/tmlibs/common"
-	flow "github.com/tendermint/tmlibs/flowrate"
-	"github.com/tendermint/tmlibs/log"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	flow "github.com/tendermint/tendermint/libs/flowrate"
+	"github.com/tendermint/tendermint/libs/log"
 )
 
 const (
-	maxPacketMsgPayloadSizeDefault = 1024 // NOTE: Must be below 16,384 bytes for 14 below.
-	maxPacketMsgOverheadSize       = 14   // NOTE: See connection_test for derivation.
+	defaultMaxPacketMsgPayloadSize = 1024
 
 	numBatchPacketMsgs = 10
 	minReadBufferSize  = 1024
@@ -96,6 +95,8 @@ type MConnection struct {
 	chStatsTimer *cmn.RepeatTimer // update channel stats periodically
 
 	created time.Time // time of creation
+
+	_maxPacketMsgSize int
 }
 
 // MConnConfig is a MConnection configuration.
@@ -116,16 +117,12 @@ type MConnConfig struct {
 	PongTimeout time.Duration `mapstructure:"pong_timeout"`
 }
 
-func (cfg *MConnConfig) maxPacketMsgTotalSize() int {
-	return cfg.MaxPacketMsgPayloadSize + maxPacketMsgOverheadSize
-}
-
 // DefaultMConnConfig returns the default config.
 func DefaultMConnConfig() MConnConfig {
 	return MConnConfig{
 		SendRate:                defaultSendRate,
 		RecvRate:                defaultRecvRate,
-		MaxPacketMsgPayloadSize: maxPacketMsgPayloadSizeDefault,
+		MaxPacketMsgPayloadSize: defaultMaxPacketMsgPayloadSize,
 		FlushThrottle:           defaultFlushThrottle,
 		PingInterval:            defaultPingInterval,
 		PongTimeout:             defaultPongTimeout,
@@ -175,6 +172,9 @@ func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onRec
 
 	mconn.BaseService = *cmn.NewBaseService(nil, "MConnection", mconn)
 
+	// maxPacketMsgSize() is a bit heavy, so call just once
+	mconn._maxPacketMsgSize = mconn.maxPacketMsgSize()
+
 	return mconn
 }
 
@@ -399,7 +399,7 @@ func (c *MConnection) sendSomePacketMsgs() bool {
 	// Block until .sendMonitor says we can write.
 	// Once we're ready we send more than we asked for,
 	// but amortized it should even out.
-	c.sendMonitor.Limit(c.config.maxPacketMsgTotalSize(), atomic.LoadInt64(&c.config.SendRate), true)
+	c.sendMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.SendRate), true)
 
 	// Now send some PacketMsgs.
 	for i := 0; i < numBatchPacketMsgs; i++ {
@@ -457,7 +457,7 @@ func (c *MConnection) recvRoutine() {
 FOR_LOOP:
 	for {
 		// Block until .recvMonitor says we can read.
-		c.recvMonitor.Limit(c.config.maxPacketMsgTotalSize(), atomic.LoadInt64(&c.config.RecvRate), true)
+		c.recvMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.RecvRate), true)
 
 		// Peek into bufConnReader for debugging
 		/*
@@ -477,7 +477,7 @@ FOR_LOOP:
 		var packet Packet
 		var _n int64
 		var err error
-		_n, err = cdc.UnmarshalBinaryReader(c.bufConnReader, &packet, int64(c.config.maxPacketMsgTotalSize()))
+		_n, err = cdc.UnmarshalBinaryReader(c.bufConnReader, &packet, int64(c._maxPacketMsgSize))
 		c.recvMonitor.Update(int(_n))
 		if err != nil {
 			if c.IsRunning() {
@@ -550,6 +550,16 @@ func (c *MConnection) stopPongTimer() {
 	}
 }
 
+// maxPacketMsgSize returns a maximum size of PacketMsg, including the overhead
+// of amino encoding.
+func (c *MConnection) maxPacketMsgSize() int {
+	return len(cdc.MustMarshalBinary(PacketMsg{
+		ChannelID: 0x01,
+		EOF:       1,
+		Bytes:     make([]byte, c.config.MaxPacketMsgPayloadSize),
+	})) + 10 // leave room for changes in amino
+}
+
 type ConnectionStatus struct {
 	Duration    time.Duration
 	SendMonitor flow.Status
diff --git a/vendor/github.com/tendermint/tendermint/p2p/conn/secret_connection.go b/vendor/github.com/tendermint/tendermint/p2p/conn/secret_connection.go
index 2a507f8821023c5d721bfc0de76dbc38a78bd174..a2cbe008d6bb1626cc1a43b588246750c055c436 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/conn/secret_connection.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/conn/secret_connection.go
@@ -20,8 +20,8 @@ import (
 	"golang.org/x/crypto/nacl/secretbox"
 	"golang.org/x/crypto/ripemd160"
 
-	"github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // 4 + 1024 == 1028 total frame size
@@ -267,7 +267,11 @@ func genChallenge(loPubKey, hiPubKey *[32]byte) (challenge *[32]byte) {
 }
 
 func signChallenge(challenge *[32]byte, locPrivKey crypto.PrivKey) (signature crypto.Signature) {
-	signature = locPrivKey.Sign(challenge[:])
+	signature, err := locPrivKey.Sign(challenge[:])
+	// TODO(ismail): let signChallenge return an error instead
+	if err != nil {
+		panic(err)
+	}
 	return
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/p2p/conn/wire.go b/vendor/github.com/tendermint/tendermint/p2p/conn/wire.go
index 02d67f6fb6b4ba461b251ef01293ec60b1c5087e..3182fde38e3b6468eb67baefbc4adf502a32c404 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/conn/wire.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/conn/wire.go
@@ -2,7 +2,7 @@ package conn
 
 import (
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 var cdc *amino.Codec = amino.NewCodec()
diff --git a/vendor/github.com/tendermint/tendermint/p2p/fuzz.go b/vendor/github.com/tendermint/tendermint/p2p/fuzz.go
index 8d00ba40d6bdf344163ad7484290828f1f2c8784..80e4fed6ad8157fb8935014f7f8d201f527fc528 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/fuzz.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/fuzz.go
@@ -6,7 +6,7 @@ import (
 	"time"
 
 	"github.com/tendermint/tendermint/config"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // FuzzedConnection wraps any net.Conn and depending on the mode either delays
diff --git a/vendor/github.com/tendermint/tendermint/p2p/key.go b/vendor/github.com/tendermint/tendermint/p2p/key.go
index 73103ebd42242113062bc3cc216e7cc0b175043e..9548d34f0071888600645b33178819c4fe4da3c9 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/key.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/key.go
@@ -6,8 +6,8 @@ import (
 	"fmt"
 	"io/ioutil"
 
-	crypto "github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
+	crypto "github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // ID is a hex-encoded crypto.Address
diff --git a/vendor/github.com/tendermint/tendermint/p2p/listener.go b/vendor/github.com/tendermint/tendermint/p2p/listener.go
index e698765cd3d6cc15639d55f1cef3241dc508c900..3509ec69cf82f59137d2a1ae2fad2d5b6b0368b4 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/listener.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/listener.go
@@ -4,22 +4,30 @@ import (
 	"fmt"
 	"net"
 	"strconv"
+	"strings"
 	"time"
 
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 	"github.com/tendermint/tendermint/p2p/upnp"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
 )
 
+// Listener is a network listener for stream-oriented protocols, providing
+// convenient methods to get listener's internal and external addresses.
+// Clients are supposed to read incoming connections from a channel, returned
+// by Connections() method.
 type Listener interface {
 	Connections() <-chan net.Conn
 	InternalAddress() *NetAddress
 	ExternalAddress() *NetAddress
+	ExternalAddressHost() string
 	String() string
 	Stop() error
 }
 
-// Implements Listener
+// DefaultListener is a cmn.Service, running net.Listener underneath.
+// Optionally, UPnP is used upon calling NewDefaultListener to resolve external
+// address.
 type DefaultListener struct {
 	cmn.BaseService
 
@@ -29,6 +37,8 @@ type DefaultListener struct {
 	connections chan net.Conn
 }
 
+var _ Listener = (*DefaultListener)(nil)
+
 const (
 	numBufferedConnections = 10
 	defaultExternalPort    = 8770
@@ -47,9 +57,16 @@ func splitHostPort(addr string) (host string, port int) {
 	return host, port
 }
 
-// skipUPNP: If true, does not try getUPNPExternalAddress()
-func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log.Logger) Listener {
-	// Local listen IP & port
+// NewDefaultListener creates a new DefaultListener on lAddr, optionally trying
+// to determine external address using UPnP.
+func NewDefaultListener(
+	fullListenAddrString string,
+	externalAddrString string,
+	useUPnP bool,
+	logger log.Logger) Listener {
+
+	// Split protocol, address, and port.
+	protocol, lAddr := cmn.ProtocolAndAddress(fullListenAddrString)
 	lAddrIP, lAddrPort := splitHostPort(lAddr)
 
 	// Create listener
@@ -77,17 +94,28 @@ func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log
 		panic(err)
 	}
 
-	// Determine external address...
+	inAddrAny := lAddrIP == "" || lAddrIP == "0.0.0.0"
+
+	// Determine external address.
 	var extAddr *NetAddress
-	if !skipUPNP {
-		// If the lAddrIP is INADDR_ANY, try UPnP
-		if lAddrIP == "" || lAddrIP == "0.0.0.0" {
-			extAddr = getUPNPExternalAddress(lAddrPort, listenerPort, logger)
+
+	if externalAddrString != "" {
+		var err error
+		extAddr, err = NewNetAddressStringWithOptionalID(externalAddrString)
+		if err != nil {
+			panic(fmt.Sprintf("Error in ExternalAddress: %v", err))
 		}
 	}
-	// Otherwise just use the local address...
+
+	// If the lAddrIP is INADDR_ANY, try UPnP.
+	if extAddr == nil && useUPnP && inAddrAny {
+		extAddr = getUPNPExternalAddress(lAddrPort, listenerPort, logger)
+	}
+
+	// Otherwise just use the local address.
 	if extAddr == nil {
-		extAddr = getNaiveExternalAddress(listenerPort, false, logger)
+		defaultToIPv4 := inAddrAny
+		extAddr = getNaiveExternalAddress(defaultToIPv4, listenerPort, false, logger)
 	}
 	if extAddr == nil {
 		panic("Could not determine external address!")
@@ -107,6 +135,8 @@ func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log
 	return dl
 }
 
+// OnStart implements cmn.Service by spinning a goroutine, listening for new
+// connections.
 func (l *DefaultListener) OnStart() error {
 	if err := l.BaseService.OnStart(); err != nil {
 		return err
@@ -115,6 +145,7 @@ func (l *DefaultListener) OnStart() error {
 	return nil
 }
 
+// OnStop implements cmn.Service by closing the listener.
 func (l *DefaultListener) OnStop() {
 	l.BaseService.OnStop()
 	l.listener.Close() // nolint: errcheck
@@ -145,24 +176,33 @@ func (l *DefaultListener) listenRoutine() {
 	}
 }
 
-// A channel of inbound connections.
+// Connections returns a channel of inbound connections.
 // It gets closed when the listener closes.
 func (l *DefaultListener) Connections() <-chan net.Conn {
 	return l.connections
 }
 
+// InternalAddress returns the internal NetAddress (address used for
+// listening).
 func (l *DefaultListener) InternalAddress() *NetAddress {
 	return l.intAddr
 }
 
+// ExternalAddress returns the external NetAddress (publicly available,
+// determined using either UPnP or local resolver).
 func (l *DefaultListener) ExternalAddress() *NetAddress {
 	return l.extAddr
 }
 
-// NOTE: The returned listener is already Accept()'ing.
-// So it's not suitable to pass into http.Serve().
-func (l *DefaultListener) NetListener() net.Listener {
-	return l.listener
+// ExternalAddressHost returns the external NetAddress IP string. If an IP is
+// IPv6, it's wrapped in brackets ("[2001:db8:1f70::999:de8:7648:6e8]").
+func (l *DefaultListener) ExternalAddressHost() string {
+	ip := l.ExternalAddress().IP
+	if isIpv6(ip) {
+		// Means it's ipv6, so format it with brackets
+		return "[" + ip.String() + "]"
+	}
+	return ip.String()
 }
 
 func (l *DefaultListener) String() string {
@@ -201,8 +241,20 @@ func getUPNPExternalAddress(externalPort, internalPort int, logger log.Logger) *
 	return NewNetAddressIPPort(ext, uint16(externalPort))
 }
 
+func isIpv6(ip net.IP) bool {
+	v4 := ip.To4()
+	if v4 != nil {
+		return false
+	}
+
+	ipString := ip.String()
+
+	// Extra check just to be sure it's IPv6
+	return (strings.Contains(ipString, ":") && !strings.Contains(ipString, "."))
+}
+
 // TODO: use syscalls: see issue #712
-func getNaiveExternalAddress(port int, settleForLocal bool, logger log.Logger) *NetAddress {
+func getNaiveExternalAddress(defaultToIPv4 bool, port int, settleForLocal bool, logger log.Logger) *NetAddress {
 	addrs, err := net.InterfaceAddrs()
 	if err != nil {
 		panic(cmn.Fmt("Could not fetch interface addresses: %v", err))
@@ -213,14 +265,20 @@ func getNaiveExternalAddress(port int, settleForLocal bool, logger log.Logger) *
 		if !ok {
 			continue
 		}
-		v4 := ipnet.IP.To4()
-		if v4 == nil || (!settleForLocal && v4[0] == 127) {
+		if defaultToIPv4 || !isIpv6(ipnet.IP) {
+			v4 := ipnet.IP.To4()
+			if v4 == nil || (!settleForLocal && v4[0] == 127) {
+				// loopback
+				continue
+			}
+		} else if !settleForLocal && ipnet.IP.IsLoopback() {
+			// IPv6, check for loopback
 			continue
-		} // loopback
+		}
 		return NewNetAddressIPPort(ipnet.IP, uint16(port))
 	}
 
 	// try again, but settle for local
 	logger.Info("Node may not be connected to internet. Settling for local address")
-	return getNaiveExternalAddress(port, true, logger)
+	return getNaiveExternalAddress(defaultToIPv4, port, true, logger)
 }
diff --git a/vendor/github.com/tendermint/tendermint/p2p/metrics.go b/vendor/github.com/tendermint/tendermint/p2p/metrics.go
new file mode 100644
index 0000000000000000000000000000000000000000..ab876ee7c62d4e573c65518fb93cf76042a42481
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/p2p/metrics.go
@@ -0,0 +1,33 @@
+package p2p
+
+import (
+	"github.com/go-kit/kit/metrics"
+	"github.com/go-kit/kit/metrics/discard"
+
+	prometheus "github.com/go-kit/kit/metrics/prometheus"
+	stdprometheus "github.com/prometheus/client_golang/prometheus"
+)
+
+// Metrics contains metrics exposed by this package.
+type Metrics struct {
+	// Number of peers.
+	Peers metrics.Gauge
+}
+
+// PrometheusMetrics returns Metrics build using Prometheus client library.
+func PrometheusMetrics() *Metrics {
+	return &Metrics{
+		Peers: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "p2p",
+			Name:      "peers",
+			Help:      "Number of peers.",
+		}, []string{}),
+	}
+}
+
+// NopMetrics returns no-op Metrics.
+func NopMetrics() *Metrics {
+	return &Metrics{
+		Peers: discard.NewGauge(),
+	}
+}
diff --git a/vendor/github.com/tendermint/tendermint/p2p/netaddress.go b/vendor/github.com/tendermint/tendermint/p2p/netaddress.go
index 3e0d99d69f67c037aaf02f5601aee0e516aba6aa..ebac8cc82d5c578cf6b7bc48cc0d46262bb6e6d4 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/netaddress.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/netaddress.go
@@ -13,7 +13,7 @@ import (
 	"strings"
 	"time"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // NetAddress defines information about a peer on the network
diff --git a/vendor/github.com/tendermint/tendermint/p2p/node_info.go b/vendor/github.com/tendermint/tendermint/p2p/node_info.go
index 60383bc5e2a850ee58215d9ad48ad50950fb52f3..5e8160a3b015f59a9f7b13fa081808beaafc931f 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/node_info.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/node_info.go
@@ -2,7 +2,7 @@ package p2p
 
 import (
 	"fmt"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 	"strings"
 )
 
diff --git a/vendor/github.com/tendermint/tendermint/p2p/peer.go b/vendor/github.com/tendermint/tendermint/p2p/peer.go
index da69fe74f5e9aef8c7a7e558b40598a177d9dd6a..5c615275bd4707704332a56c257616f4786fd3d0 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/peer.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/peer.go
@@ -6,9 +6,9 @@ import (
 	"sync/atomic"
 	"time"
 
-	crypto "github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
+	crypto "github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 
 	"github.com/tendermint/tendermint/config"
 	tmconn "github.com/tendermint/tendermint/p2p/conn"
diff --git a/vendor/github.com/tendermint/tendermint/p2p/peer_set.go b/vendor/github.com/tendermint/tendermint/p2p/peer_set.go
index e048cf4e30bb25c2ed8cf062fa42cd3c2a933e27..25785615630ae4783f28119e1f3702964ac7a99b 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/peer_set.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/peer_set.go
@@ -55,8 +55,8 @@ func (ps *PeerSet) Add(peer Peer) error {
 	return nil
 }
 
-// Has returns true iff the PeerSet contains
-// the peer referred to by this peerKey.
+// Has returns true if the set contains the peer referred to by this
+// peerKey, otherwise false.
 func (ps *PeerSet) Has(peerKey ID) bool {
 	ps.mtx.Lock()
 	_, ok := ps.lookup[peerKey]
@@ -64,8 +64,8 @@ func (ps *PeerSet) Has(peerKey ID) bool {
 	return ok
 }
 
-// HasIP returns true if the PeerSet contains the peer referred to by this IP
-// address.
+// HasIP returns true if the set contains the peer referred to by this IP
+// address, otherwise false.
 func (ps *PeerSet) HasIP(peerIP net.IP) bool {
 	ps.mtx.Lock()
 	defer ps.mtx.Unlock()
@@ -85,7 +85,8 @@ func (ps *PeerSet) hasIP(peerIP net.IP) bool {
 	return false
 }
 
-// Get looks up a peer by the provided peerKey.
+// Get looks up a peer by the provided peerKey. Returns nil if peer is not
+// found.
 func (ps *PeerSet) Get(peerKey ID) Peer {
 	ps.mtx.Lock()
 	defer ps.mtx.Unlock()
diff --git a/vendor/github.com/tendermint/tendermint/p2p/pex/addrbook.go b/vendor/github.com/tendermint/tendermint/p2p/pex/addrbook.go
index dc51761fefaf9258279984280fdb9d1d03c39b73..421aa135ab0345bfa8d7be6abe5ef43abad30b85 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/pex/addrbook.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/pex/addrbook.go
@@ -12,9 +12,9 @@ import (
 	"sync"
 	"time"
 
-	crypto "github.com/tendermint/go-crypto"
+	crypto "github.com/tendermint/tendermint/crypto"
 	"github.com/tendermint/tendermint/p2p"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 const (
diff --git a/vendor/github.com/tendermint/tendermint/p2p/pex/file.go b/vendor/github.com/tendermint/tendermint/p2p/pex/file.go
index 38142dd9d921311f4fe66d9fab965d1161734cd1..3237e12537e296d5ddbf55d79a6369c54ec2535e 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/pex/file.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/pex/file.go
@@ -4,7 +4,7 @@ import (
 	"encoding/json"
 	"os"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 /* Loading & Saving */
diff --git a/vendor/github.com/tendermint/tendermint/p2p/pex/pex_reactor.go b/vendor/github.com/tendermint/tendermint/p2p/pex/pex_reactor.go
index 27ed422c500dcea0c992e85717a1f4bdccda1ef6..e90665a3738eab21c2bf41aec091f4eb38a056f8 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/pex/pex_reactor.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/pex/pex_reactor.go
@@ -8,7 +8,7 @@ import (
 	"time"
 
 	amino "github.com/tendermint/go-amino"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 
 	"github.com/tendermint/tendermint/p2p"
 	"github.com/tendermint/tendermint/p2p/conn"
@@ -77,10 +77,10 @@ type PEXReactor struct {
 	attemptsToDial sync.Map // address (string) -> {number of attempts (int), last time dialed (time.Time)}
 }
 
-func (pexR *PEXReactor) minReceiveRequestInterval() time.Duration {
+func (r *PEXReactor) minReceiveRequestInterval() time.Duration {
 	// NOTE: must be less than ensurePeersPeriod, otherwise we'll request
 	// peers too quickly from others and they'll think we're bad!
-	return pexR.ensurePeersPeriod / 3
+	return r.ensurePeersPeriod / 3
 }
 
 // PEXReactorConfig holds reactor specific configuration data.
@@ -206,7 +206,7 @@ func (r *PEXReactor) RemovePeer(p Peer, reason interface{}) {
 
 // Receive implements Reactor by handling incoming PEX messages.
 func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) {
-	msg, err := DecodeMessage(msgBytes)
+	msg, err := decodeMsg(msgBytes)
 	if err != nil {
 		r.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
 		r.Switch.StopPeerForError(src, err)
@@ -287,7 +287,7 @@ func (r *PEXReactor) RequestAddrs(p Peer) {
 		return
 	}
 	r.requestsSent.Set(id, struct{}{})
-	p.Send(PexChannel, cdc.MustMarshalBinary(&pexRequestMessage{}))
+	p.Send(PexChannel, cdc.MustMarshalBinaryBare(&pexRequestMessage{}))
 }
 
 // ReceiveAddrs adds the given addrs to the addrbook if theres an open
@@ -324,7 +324,7 @@ func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error {
 
 // SendAddrs sends addrs to the peer.
 func (r *PEXReactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) {
-	p.Send(PexChannel, cdc.MustMarshalBinary(&pexAddrsMessage{Addrs: netAddrs}))
+	p.Send(PexChannel, cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: netAddrs}))
 }
 
 // SetEnsurePeersPeriod sets period to ensure peers connected.
@@ -628,7 +628,9 @@ func (r *PEXReactor) crawlPeers() {
 		}
 		// Ask for more addresses
 		peer := r.Switch.Peers().Get(pi.Addr.ID)
-		r.RequestAddrs(peer)
+		if peer != nil {
+			r.RequestAddrs(peer)
+		}
 	}
 }
 
@@ -668,13 +670,11 @@ func RegisterPexMessage(cdc *amino.Codec) {
 	cdc.RegisterConcrete(&pexAddrsMessage{}, "tendermint/p2p/PexAddrsMessage", nil)
 }
 
-// DecodeMessage implements interface registered above.
-func DecodeMessage(bz []byte) (msg PexMessage, err error) {
+func decodeMsg(bz []byte) (msg PexMessage, err error) {
 	if len(bz) > maxMsgSize {
-		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
-			len(bz), maxMsgSize)
+		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
 	}
-	err = cdc.UnmarshalBinary(bz, &msg)
+	err = cdc.UnmarshalBinaryBare(bz, &msg)
 	return
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/p2p/switch.go b/vendor/github.com/tendermint/tendermint/p2p/switch.go
index f1ceee5c6eb5c9421ff32b0919141489bf0a88e6..d1e2ef23b8a23e44285f602a364aab5c002d0a87 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/switch.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/switch.go
@@ -9,7 +9,7 @@ import (
 
 	"github.com/tendermint/tendermint/config"
 	"github.com/tendermint/tendermint/p2p/conn"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 const (
@@ -73,10 +73,15 @@ type Switch struct {
 	mConfig conn.MConnConfig
 
 	rng *cmn.Rand // seed for randomizing dial times and orders
+
+	metrics *Metrics
 }
 
+// SwitchOption sets an optional parameter on the Switch.
+type SwitchOption func(*Switch)
+
 // NewSwitch creates a new Switch with the given config.
-func NewSwitch(cfg *config.P2PConfig) *Switch {
+func NewSwitch(cfg *config.P2PConfig, options ...SwitchOption) *Switch {
 	sw := &Switch{
 		config:       cfg,
 		reactors:     make(map[string]Reactor),
@@ -85,6 +90,7 @@ func NewSwitch(cfg *config.P2PConfig) *Switch {
 		peers:        NewPeerSet(),
 		dialing:      cmn.NewCMap(),
 		reconnecting: cmn.NewCMap(),
+		metrics:      NopMetrics(),
 	}
 
 	// Ensure we have a completely undeterministic PRNG.
@@ -99,9 +105,19 @@ func NewSwitch(cfg *config.P2PConfig) *Switch {
 	sw.mConfig = mConfig
 
 	sw.BaseService = *cmn.NewBaseService(nil, "P2P Switch", sw)
+
+	for _, option := range options {
+		option(sw)
+	}
+
 	return sw
 }
 
+// WithMetrics sets the metrics.
+func WithMetrics(metrics *Metrics) SwitchOption {
+	return func(sw *Switch) { sw.metrics = metrics }
+}
+
 //---------------------------------------------------------------------
 // Switch setup
 
@@ -279,6 +295,7 @@ func (sw *Switch) StopPeerGracefully(peer Peer) {
 
 func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) {
 	sw.peers.Remove(peer)
+	sw.metrics.Peers.Add(float64(-1))
 	peer.Stop()
 	for _, reactor := range sw.reactors {
 		reactor.RemovePeer(peer, reason)
@@ -623,6 +640,7 @@ func (sw *Switch) addPeer(pc peerConn) error {
 	if err := sw.peers.Add(peer); err != nil {
 		return err
 	}
+	sw.metrics.Peers.Add(float64(1))
 
 	sw.Logger.Info("Added peer", "peer", peer)
 	return nil
diff --git a/vendor/github.com/tendermint/tendermint/p2p/test_util.go b/vendor/github.com/tendermint/tendermint/p2p/test_util.go
index 0d2ba6c5e5aa061ae1f1e897e0112510233c6629..467532f0f504193b45956393dae3a9962fd86e40 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/test_util.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/test_util.go
@@ -4,9 +4,9 @@ import (
 	"fmt"
 	"net"
 
-	crypto "github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
+	crypto "github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 
 	"github.com/tendermint/tendermint/config"
 	"github.com/tendermint/tendermint/p2p/conn"
diff --git a/vendor/github.com/tendermint/tendermint/p2p/upnp/probe.go b/vendor/github.com/tendermint/tendermint/p2p/upnp/probe.go
index 55479415f08c8d6589acd1a4c268d539b2dfc2b6..2de5e7905055875bb55c64cfa36f9644a531a90f 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/upnp/probe.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/upnp/probe.go
@@ -5,8 +5,8 @@ import (
 	"net"
 	"time"
 
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 )
 
 type UPNPCapabilities struct {
diff --git a/vendor/github.com/tendermint/tendermint/p2p/wire.go b/vendor/github.com/tendermint/tendermint/p2p/wire.go
index a90ac851cc304637900c88e250abdce8fa56fc1d..b7ae412536657389a8388734bd562cb7f613a5a0 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/wire.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/wire.go
@@ -2,7 +2,7 @@ package p2p
 
 import (
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 var cdc = amino.NewCodec()
diff --git a/vendor/github.com/tendermint/tendermint/privval/priv_validator.go b/vendor/github.com/tendermint/tendermint/privval/priv_validator.go
index 2bb5ef323d09c9aa074a4e58ea9353855b97222d..1e85bf7b3c1adc9222c9f3c3419c83e1a5ae5267 100644
--- a/vendor/github.com/tendermint/tendermint/privval/priv_validator.go
+++ b/vendor/github.com/tendermint/tendermint/privval/priv_validator.go
@@ -8,9 +8,9 @@ import (
 	"sync"
 	"time"
 
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
 )
 
 // TODO: type ?
@@ -91,6 +91,10 @@ func LoadFilePV(filePath string) *FilePV {
 		cmn.Exit(cmn.Fmt("Error reading PrivValidator from %v: %v\n", filePath, err))
 	}
 
+	// overwrite pubkey and address for convenience
+	pv.PubKey = pv.PrivKey.PubKey()
+	pv.Address = pv.PubKey.Address()
+
 	pv.filePath = filePath
 	return pv
 }
@@ -222,7 +226,10 @@ func (pv *FilePV) signVote(chainID string, vote *types.Vote) error {
 	}
 
 	// It passed the checks. Sign the vote
-	sig := pv.PrivKey.Sign(signBytes)
+	sig, err := pv.PrivKey.Sign(signBytes)
+	if err != nil {
+		return err
+	}
 	pv.saveSigned(height, round, step, signBytes, sig)
 	vote.Signature = sig
 	return nil
@@ -258,7 +265,10 @@ func (pv *FilePV) signProposal(chainID string, proposal *types.Proposal) error {
 	}
 
 	// It passed the checks. Sign the proposal
-	sig := pv.PrivKey.Sign(signBytes)
+	sig, err := pv.PrivKey.Sign(signBytes)
+	if err != nil {
+		return err
+	}
 	pv.saveSigned(height, round, step, signBytes, sig)
 	proposal.Signature = sig
 	return nil
@@ -281,7 +291,11 @@ func (pv *FilePV) saveSigned(height int64, round int, step int8,
 func (pv *FilePV) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) error {
 	pv.mtx.Lock()
 	defer pv.mtx.Unlock()
-	heartbeat.Signature = pv.PrivKey.Sign(heartbeat.SignBytes(chainID))
+	sig, err := pv.PrivKey.Sign(heartbeat.SignBytes(chainID))
+	if err != nil {
+		return err
+	}
+	heartbeat.Signature = sig
 	return nil
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/privval/socket.go b/vendor/github.com/tendermint/tendermint/privval/socket.go
index 9f59a8152e90f5463cb43b4f341ed390e95e4cc8..1e8a3807b454303116bfabd9707773396038fa15 100644
--- a/vendor/github.com/tendermint/tendermint/privval/socket.go
+++ b/vendor/github.com/tendermint/tendermint/privval/socket.go
@@ -8,9 +8,9 @@ import (
 	"time"
 
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
+	"github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 
 	p2pconn "github.com/tendermint/tendermint/p2p/conn"
 	"github.com/tendermint/tendermint/types"
diff --git a/vendor/github.com/tendermint/tendermint/privval/wire.go b/vendor/github.com/tendermint/tendermint/privval/wire.go
index 688910834eb9e0bef89bfd4b24bc9cf48a401bf6..c42ba40d6612c7c3d945c4783a8b5b03e6f2448e 100644
--- a/vendor/github.com/tendermint/tendermint/privval/wire.go
+++ b/vendor/github.com/tendermint/tendermint/privval/wire.go
@@ -2,7 +2,7 @@ package privval
 
 import (
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 var cdc = amino.NewCodec()
diff --git a/vendor/github.com/tendermint/tendermint/proxy/app_conn.go b/vendor/github.com/tendermint/tendermint/proxy/app_conn.go
index 2319fed82a7278c3d422004d0ba717a553ee893e..2f792671ec90fc3666c00cea2150dc286f3efcc3 100644
--- a/vendor/github.com/tendermint/tendermint/proxy/app_conn.go
+++ b/vendor/github.com/tendermint/tendermint/proxy/app_conn.go
@@ -1,8 +1,8 @@
 package proxy
 
 import (
-	abcicli "github.com/tendermint/abci/client"
-	"github.com/tendermint/abci/types"
+	abcicli "github.com/tendermint/tendermint/abci/client"
+	"github.com/tendermint/tendermint/abci/types"
 )
 
 //----------------------------------------------------------------------------------------
diff --git a/vendor/github.com/tendermint/tendermint/proxy/client.go b/vendor/github.com/tendermint/tendermint/proxy/client.go
index 6c987368a5c841a01044a472a0cd91e9be848cc9..87f4e716dfae9fc4da105ffbdc7c4803f8021efe 100644
--- a/vendor/github.com/tendermint/tendermint/proxy/client.go
+++ b/vendor/github.com/tendermint/tendermint/proxy/client.go
@@ -5,9 +5,9 @@ import (
 
 	"github.com/pkg/errors"
 
-	abcicli "github.com/tendermint/abci/client"
-	"github.com/tendermint/abci/example/kvstore"
-	"github.com/tendermint/abci/types"
+	abcicli "github.com/tendermint/tendermint/abci/client"
+	"github.com/tendermint/tendermint/abci/example/kvstore"
+	"github.com/tendermint/tendermint/abci/types"
 )
 
 // NewABCIClient returns newly connected client
diff --git a/vendor/github.com/tendermint/tendermint/proxy/multi_app_conn.go b/vendor/github.com/tendermint/tendermint/proxy/multi_app_conn.go
index 5d89ef1953f51e7a37de2850029690a234d874c9..279fa42eedd9c872e93b43ea009aec0fa26f2b76 100644
--- a/vendor/github.com/tendermint/tendermint/proxy/multi_app_conn.go
+++ b/vendor/github.com/tendermint/tendermint/proxy/multi_app_conn.go
@@ -3,7 +3,7 @@ package proxy
 import (
 	"github.com/pkg/errors"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 //-----------------------------
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/README.md b/vendor/github.com/tendermint/tendermint/rpc/core/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9547079b2975e9b6c28c521380d9d914cfd756ee
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/README.md
@@ -0,0 +1,20 @@
+# Tendermint RPC
+
+## Generate markdown for [Slate](https://github.com/tendermint/slate)
+
+We are using [Slate](https://github.com/tendermint/slate) to power our RPC
+documentation. For generating markdown use:
+
+```shell
+go get github.com/davecheney/godoc2md
+
+godoc2md -template rpc/core/doc_template.txt github.com/tendermint/tendermint/rpc/core | grep -v -e "pipe.go" -e "routes.go" -e "dev.go" | sed 's$/src/target$https://github.com/tendermint/tendermint/tree/master/rpc/core$'
+```
+
+For more information see the [CI script for building the Slate docs](/scripts/slate.sh)
+
+## Pagination
+
+Requests that return multiple items will be paginated to 30 items by default.
+You can specify further pages with the ?page parameter. You can also set a
+custom page size up to 100 with the ?per_page parameter.
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/abci.go b/vendor/github.com/tendermint/tendermint/rpc/core/abci.go
index 067108c44ff7502c5df4c1c4b33cb6a2b16824e2..a5eede3fc88700114e8c2c91d12744059f20066f 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/abci.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/abci.go
@@ -1,10 +1,10 @@
 package core
 
 import (
-	abci "github.com/tendermint/abci/types"
+	abci "github.com/tendermint/tendermint/abci/types"
 	ctypes "github.com/tendermint/tendermint/rpc/core/types"
 	"github.com/tendermint/tendermint/version"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // Query the application for some information.
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/blocks.go b/vendor/github.com/tendermint/tendermint/rpc/core/blocks.go
index a5ad5b4cb53603a314542b830faa9c7d62245802..0e8873152fe3f7789db2803c0219b92e4d60cb3c 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/blocks.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/blocks.go
@@ -6,7 +6,7 @@ import (
 	ctypes "github.com/tendermint/tendermint/rpc/core/types"
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // Get block headers for minHeight <= height <= maxHeight.
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/consensus.go b/vendor/github.com/tendermint/tendermint/rpc/core/consensus.go
index dad64b6b6e1a2e23c0806454a024c53ea04c085f..c026cd91f49caa2456ca2e4113d37ebf1e4ee40c 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/consensus.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/consensus.go
@@ -87,7 +87,7 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) {
 //           {
 //             "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244",
 //             "pub_key": {
-//               "type": "AC26791624DE60",
+//               "type": "tendermint/PubKeyEd25519",
 //               "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg="
 //             },
 //             "voting_power": 10,
@@ -97,7 +97,7 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) {
 //         "proposer": {
 //           "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244",
 //           "pub_key": {
-//             "type": "AC26791624DE60",
+//             "type": "tendermint/PubKeyEd25519",
 //             "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg="
 //           },
 //           "voting_power": 10,
@@ -133,7 +133,7 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) {
 //           {
 //             "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244",
 //             "pub_key": {
-//               "type": "AC26791624DE60",
+//               "type": "tendermint/PubKeyEd25519",
 //               "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg="
 //             },
 //             "voting_power": 10,
@@ -143,7 +143,7 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) {
 //         "proposer": {
 //           "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244",
 //           "pub_key": {
-//             "type": "AC26791624DE60",
+//             "type": "tendermint/PubKeyEd25519",
 //             "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg="
 //           },
 //           "voting_power": 10,
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/doc_template.txt b/vendor/github.com/tendermint/tendermint/rpc/core/doc_template.txt
new file mode 100644
index 0000000000000000000000000000000000000000..896d0c271f97a709d4749a4d74a6b9b39e90c670
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/doc_template.txt
@@ -0,0 +1,8 @@
+{{with .PDoc}}
+{{comment_md .Doc}}
+{{example_html $ ""}}
+
+{{range .Funcs}}{{$name_html := html .Name}}## [{{$name_html}}]({{posLink_url $ .Decl}})
+{{comment_md .Doc}}{{end}}
+{{end}}
+---
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go b/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go
index 515ada87c87344aa53838ad372fc54f978426fe9..ecc41ce12b2e14d8b224ea97edf884877e9e7be7 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go
@@ -7,10 +7,10 @@ import (
 
 	"github.com/pkg/errors"
 
-	abci "github.com/tendermint/abci/types"
+	abci "github.com/tendermint/tendermint/abci/types"
 	ctypes "github.com/tendermint/tendermint/rpc/core/types"
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 //-----------------------------------------------------------------------------
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/pipe.go b/vendor/github.com/tendermint/tendermint/rpc/core/pipe.go
index 7a042362fb6e1ac8bcaacf9630414ee4b36a8941..128b3e9a7392809eeb661ba9bce349d1ac783937 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/pipe.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/pipe.go
@@ -3,15 +3,15 @@ package core
 import (
 	"time"
 
-	crypto "github.com/tendermint/go-crypto"
 	"github.com/tendermint/tendermint/consensus"
+	crypto "github.com/tendermint/tendermint/crypto"
 	"github.com/tendermint/tendermint/p2p"
 	"github.com/tendermint/tendermint/proxy"
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/state/txindex"
 	"github.com/tendermint/tendermint/types"
-	dbm "github.com/tendermint/tmlibs/db"
-	"github.com/tendermint/tmlibs/log"
+	dbm "github.com/tendermint/tendermint/libs/db"
+	"github.com/tendermint/tendermint/libs/log"
 )
 
 const (
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/status.go b/vendor/github.com/tendermint/tendermint/rpc/core/status.go
index 044c12899b991724e6183c514f38c779c2f608bb..63e62b2c7e6c66040bead4599a4ac7c0a6a5373b 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/status.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/status.go
@@ -7,7 +7,7 @@ import (
 	ctypes "github.com/tendermint/tendermint/rpc/core/types"
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // Get Tendermint status including node info, pubkey, latest block
@@ -50,12 +50,12 @@ import (
 //      "latest_app_hash": "0000000000000000",
 //      "latest_block_height": 231,
 //      "latest_block_time": "2018-04-27T23:18:08.459766485-04:00",
-//      "syncing": false
+//      "catching_up": false
 //    },
 //    "validator_info": {
 //      "address": "5875562FF0FFDECC895C20E32FC14988952E99E7",
 //      "pub_key": {
-//        "type": "AC26791624DE60",
+//        "type": "tendermint/PubKeyEd25519",
 //        "value": "PpDJRUrLG2RgFqYYjawfn/AcAgacSXpLFrmfYYQnuzE="
 //      },
 //      "voting_power": 10
@@ -92,7 +92,7 @@ func Status() (*ctypes.ResultStatus, error) {
 			LatestAppHash:     latestAppHash,
 			LatestBlockHeight: latestHeight,
 			LatestBlockTime:   latestBlockTime,
-			Syncing:           consensusReactor.FastSync(),
+			CatchingUp:        consensusReactor.FastSync(),
 		},
 		ValidatorInfo: ctypes.ValidatorInfo{
 			Address:     pubKey.Address(),
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/tx.go b/vendor/github.com/tendermint/tendermint/rpc/core/tx.go
index 2fa7825fd66a91b3cd59f23d709e4446044ed2e6..f53d82f140f60dcc21fa35f05658f028e8df5e9a 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/tx.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/tx.go
@@ -3,7 +3,7 @@ package core
 import (
 	"fmt"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 
 	tmquery "github.com/tendermint/tendermint/libs/pubsub/query"
 	ctypes "github.com/tendermint/tendermint/rpc/core/types"
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/types/responses.go b/vendor/github.com/tendermint/tendermint/rpc/core/types/responses.go
index 5b001d7d3cbd12913d89749a75953a41b42a5959..4fec416eda1e7c45e590ce285694fdc823264ef6 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/types/responses.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/types/responses.go
@@ -5,9 +5,9 @@ import (
 	"strings"
 	"time"
 
-	abci "github.com/tendermint/abci/types"
-	crypto "github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
+	abci "github.com/tendermint/tendermint/abci/types"
+	crypto "github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
 
 	"github.com/tendermint/tendermint/p2p"
 	"github.com/tendermint/tendermint/state"
@@ -65,7 +65,7 @@ type SyncInfo struct {
 	LatestAppHash     cmn.HexBytes `json:"latest_app_hash"`
 	LatestBlockHeight int64        `json:"latest_block_height"`
 	LatestBlockTime   time.Time    `json:"latest_block_time"`
-	Syncing           bool         `json:"syncing"`
+	CatchingUp        bool         `json:"catching_up"`
 }
 
 // Info about the node's validator
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/types/wire.go b/vendor/github.com/tendermint/tendermint/rpc/core/types/wire.go
index 6648364b10f09c7040740647d94270f43e4d74c2..d3a31dc352412943680ea183c7cdecfef2ba6c08 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/types/wire.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/types/wire.go
@@ -2,7 +2,7 @@ package core_types
 
 import (
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 	"github.com/tendermint/tendermint/types"
 )
 
diff --git a/vendor/github.com/tendermint/tendermint/rpc/grpc/api.go b/vendor/github.com/tendermint/tendermint/rpc/grpc/api.go
index c0a9200462bfcb612b9dc3763f262820e861864e..0b840e3e95ef480ca93716a6c648046c1f1befbe 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/grpc/api.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/grpc/api.go
@@ -3,7 +3,7 @@ package core_grpc
 import (
 	"context"
 
-	abci "github.com/tendermint/abci/types"
+	abci "github.com/tendermint/tendermint/abci/types"
 	core "github.com/tendermint/tendermint/rpc/core"
 )
 
diff --git a/vendor/github.com/tendermint/tendermint/rpc/grpc/client_server.go b/vendor/github.com/tendermint/tendermint/rpc/grpc/client_server.go
index 80d736f5796d5110b862eaeab0cafee334329824..c88989685a3112d6a35b8a562ea921cc91e203dd 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/grpc/client_server.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/grpc/client_server.go
@@ -6,13 +6,21 @@ import (
 	"strings"
 	"time"
 
+	"golang.org/x/net/netutil"
 	"google.golang.org/grpc"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
-// Start the grpcServer in a go routine
-func StartGRPCServer(protoAddr string) (net.Listener, error) {
+// Config is an gRPC server configuration.
+type Config struct {
+	MaxOpenConnections int
+}
+
+// StartGRPCServer starts a new gRPC BroadcastAPIServer, listening on
+// protoAddr, in a goroutine. Returns a listener and an error, if it fails to
+// parse an address.
+func StartGRPCServer(protoAddr string, config Config) (net.Listener, error) {
 	parts := strings.SplitN(protoAddr, "://", 2)
 	if len(parts) != 2 {
 		return nil, fmt.Errorf("Invalid listen address for grpc server (did you forget a tcp:// prefix?) : %s", protoAddr)
@@ -22,6 +30,9 @@ func StartGRPCServer(protoAddr string) (net.Listener, error) {
 	if err != nil {
 		return nil, err
 	}
+	if config.MaxOpenConnections > 0 {
+		ln = netutil.LimitListener(ln, config.MaxOpenConnections)
+	}
 
 	grpcServer := grpc.NewServer()
 	RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{})
@@ -30,7 +41,8 @@ func StartGRPCServer(protoAddr string) (net.Listener, error) {
 	return ln, nil
 }
 
-// Start the client by dialing the server
+// StartGRPCClient dials the gRPC server using protoAddr and returns a new
+// BroadcastAPIClient.
 func StartGRPCClient(protoAddr string) BroadcastAPIClient {
 	conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc))
 	if err != nil {
diff --git a/vendor/github.com/tendermint/tendermint/rpc/grpc/compile.sh b/vendor/github.com/tendermint/tendermint/rpc/grpc/compile.sh
new file mode 100644
index 0000000000000000000000000000000000000000..2c4629c8e1ce3b3cac64f86c2303656240db0756
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/rpc/grpc/compile.sh
@@ -0,0 +1,3 @@
+#! /bin/bash
+
+protoc --go_out=plugins=grpc:. -I $GOPATH/src/ -I . types.proto
diff --git a/vendor/github.com/tendermint/tendermint/rpc/grpc/types.pb.go b/vendor/github.com/tendermint/tendermint/rpc/grpc/types.pb.go
index cf7a5ec7146149cb42835986379b4539191850c3..be16b711a12912c7be7bc9df06413ab525a951f3 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/grpc/types.pb.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/grpc/types.pb.go
@@ -19,7 +19,7 @@ package core_grpc
 import proto "github.com/golang/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import types "github.com/tendermint/abci/types"
+import types "github.com/tendermint/tendermint/abci/types"
 
 import (
 	"context"
diff --git a/vendor/github.com/tendermint/tendermint/rpc/grpc/types.proto b/vendor/github.com/tendermint/tendermint/rpc/grpc/types.proto
new file mode 100644
index 0000000000000000000000000000000000000000..d7980d5e05688dad329533e418a069bdeda366d1
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/rpc/grpc/types.proto
@@ -0,0 +1,36 @@
+syntax = "proto3";
+package core_grpc;
+
+import "github.com/tendermint/tendermint/abci/types/types.proto";
+
+//----------------------------------------
+// Message types
+
+//----------------------------------------
+// Request types
+
+message RequestPing {
+}
+
+message RequestBroadcastTx {
+  bytes tx = 1;
+}
+
+//----------------------------------------
+// Response types
+
+message ResponsePing{
+}
+
+message ResponseBroadcastTx{
+  types.ResponseCheckTx check_tx = 1;
+  types.ResponseDeliverTx deliver_tx = 2;
+}
+
+//----------------------------------------
+// Service Definition
+
+service BroadcastAPI {
+  rpc Ping(RequestPing) returns (ResponsePing) ;
+  rpc BroadcastTx(RequestBroadcastTx) returns (ResponseBroadcastTx) ;
+}
diff --git a/vendor/github.com/tendermint/tendermint/rpc/lib/doc.go b/vendor/github.com/tendermint/tendermint/rpc/lib/doc.go
index 2bc438593705184b5ba2a94e602a350ab69fe1cc..b96b9123cfbf84029fe97903e4158326eaa09557 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/lib/doc.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/lib/doc.go
@@ -98,6 +98,6 @@ Each route is available as a GET request, as a JSONRPCv2 POST request, and via J
 # Examples
 
 * [Tendermint](https://github.com/tendermint/tendermint/blob/master/rpc/core/routes.go)
-* [tm-monitor](https://github.com/tendermint/tools/blob/master/tm-monitor/rpc.go)
+* [tm-monitor](https://github.com/tendermint/tendermint/blob/master/tools/tm-monitor/rpc.go)
 */
 package rpc
diff --git a/vendor/github.com/tendermint/tendermint/rpc/lib/server/handlers.go b/vendor/github.com/tendermint/tendermint/rpc/lib/server/handlers.go
index 6cc03012a3d1dc6020a42bad000ef7cbb3cea99b..3ec5f81e3f80775b123ec53bd5d827dab2036009 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/lib/server/handlers.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/lib/server/handlers.go
@@ -18,9 +18,9 @@ import (
 	"github.com/pkg/errors"
 
 	amino "github.com/tendermint/go-amino"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 	types "github.com/tendermint/tendermint/rpc/lib/types"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
 )
 
 // RegisterRPCFuncs adds a route for each function in the funcMap, as well as general jsonrpc and websocket handlers for all functions.
@@ -294,7 +294,7 @@ func httpParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, r *http.Request) ([]re
 			continue
 		}
 
-		v, err, ok := nonJSONToArg(cdc, argType, arg)
+		v, err, ok := nonJSONStringToArg(cdc, argType, arg)
 		if err != nil {
 			return nil, err
 		}
@@ -303,7 +303,7 @@ func httpParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, r *http.Request) ([]re
 			continue
 		}
 
-		values[i], err = _jsonStringToArg(cdc, argType, arg)
+		values[i], err = jsonStringToArg(cdc, argType, arg)
 		if err != nil {
 			return nil, err
 		}
@@ -312,26 +312,64 @@ func httpParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, r *http.Request) ([]re
 	return values, nil
 }
 
-func _jsonStringToArg(cdc *amino.Codec, ty reflect.Type, arg string) (reflect.Value, error) {
-	v := reflect.New(ty)
-	err := cdc.UnmarshalJSON([]byte(arg), v.Interface())
+func jsonStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect.Value, error) {
+	rv := reflect.New(rt)
+	err := cdc.UnmarshalJSON([]byte(arg), rv.Interface())
 	if err != nil {
-		return v, err
+		return rv, err
 	}
-	v = v.Elem()
-	return v, nil
+	rv = rv.Elem()
+	return rv, nil
 }
 
-func nonJSONToArg(cdc *amino.Codec, ty reflect.Type, arg string) (reflect.Value, error, bool) {
+func nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect.Value, error, bool) {
+	if rt.Kind() == reflect.Ptr {
+		rv_, err, ok := nonJSONStringToArg(cdc, rt.Elem(), arg)
+		if err != nil {
+			return reflect.Value{}, err, false
+		} else if ok {
+			rv := reflect.New(rt.Elem())
+			rv.Elem().Set(rv_)
+			return rv, nil, true
+		} else {
+			return reflect.Value{}, nil, false
+		}
+	} else {
+		return _nonJSONStringToArg(cdc, rt, arg)
+	}
+}
+
+// NOTE: rt.Kind() isn't a pointer.
+func _nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect.Value, error, bool) {
+	isIntString := RE_INT.Match([]byte(arg))
 	isQuotedString := strings.HasPrefix(arg, `"`) && strings.HasSuffix(arg, `"`)
 	isHexString := strings.HasPrefix(strings.ToLower(arg), "0x")
-	expectingString := ty.Kind() == reflect.String
-	expectingByteSlice := ty.Kind() == reflect.Slice && ty.Elem().Kind() == reflect.Uint8
+
+	var expectingString, expectingByteSlice, expectingInt bool
+	switch rt.Kind() {
+	case reflect.Int, reflect.Uint, reflect.Int8, reflect.Uint8, reflect.Int16, reflect.Uint16, reflect.Int32, reflect.Uint32, reflect.Int64, reflect.Uint64:
+		expectingInt = true
+	case reflect.String:
+		expectingString = true
+	case reflect.Slice:
+		expectingByteSlice = rt.Elem().Kind() == reflect.Uint8
+	}
+
+	if isIntString && expectingInt {
+		qarg := `"` + arg + `"`
+		// jsonStringToArg
+		rv, err := jsonStringToArg(cdc, rt, qarg)
+		if err != nil {
+			return rv, err, false
+		} else {
+			return rv, nil, true
+		}
+	}
 
 	if isHexString {
 		if !expectingString && !expectingByteSlice {
 			err := errors.Errorf("Got a hex string arg, but expected '%s'",
-				ty.Kind().String())
+				rt.Kind().String())
 			return reflect.ValueOf(nil), err, false
 		}
 
@@ -340,7 +378,7 @@ func nonJSONToArg(cdc *amino.Codec, ty reflect.Type, arg string) (reflect.Value,
 		if err != nil {
 			return reflect.ValueOf(nil), err, false
 		}
-		if ty.Kind() == reflect.String {
+		if rt.Kind() == reflect.String {
 			return reflect.ValueOf(string(value)), nil, true
 		}
 		return reflect.ValueOf([]byte(value)), nil, true
@@ -406,7 +444,13 @@ type wsConnection struct {
 // description of how to configure ping period and pong wait time. NOTE: if the
 // write buffer is full, pongs may be dropped, which may cause clients to
 // disconnect. see https://github.com/gorilla/websocket/issues/97
-func NewWSConnection(baseConn *websocket.Conn, funcMap map[string]*RPCFunc, cdc *amino.Codec, options ...func(*wsConnection)) *wsConnection {
+func NewWSConnection(
+	baseConn *websocket.Conn,
+	funcMap map[string]*RPCFunc,
+	cdc *amino.Codec,
+	options ...func(*wsConnection),
+) *wsConnection {
+	baseConn.SetReadLimit(maxBodyBytes)
 	wsc := &wsConnection{
 		remoteAddr:        baseConn.RemoteAddr().String(),
 		baseConn:          baseConn,
diff --git a/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_params.go b/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_params.go
index 56506067893058644636d597abd0622bb5cef87c..3c948c0ba69154c62adfc1c922a0a7277d2e6cb7 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_params.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_params.go
@@ -15,6 +15,7 @@ var (
 	dotAtom = atom + `(?:\.` + atom + `)*`
 	domain  = `[A-Z0-9.-]+\.[A-Z]{2,4}`
 
+	RE_INT     = regexp.MustCompile(`^-?[0-9]+$`)
 	RE_HEX     = regexp.MustCompile(`^(?i)[a-f0-9]+$`)
 	RE_EMAIL   = regexp.MustCompile(`^(?i)(` + dotAtom + `)@(` + dotAtom + `)$`)
 	RE_ADDRESS = regexp.MustCompile(`^(?i)[a-z0-9]{25,34}$`)
diff --git a/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go b/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go
index 3f54c61efe338b0196f31e8cb3459e69a978d1f8..5d816ef22afce1a8d762f70e78bc3efb84e8ff5a 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go
@@ -12,16 +12,38 @@ import (
 	"time"
 
 	"github.com/pkg/errors"
+	"golang.org/x/net/netutil"
 
 	types "github.com/tendermint/tendermint/rpc/lib/types"
-	"github.com/tendermint/tmlibs/log"
+	"github.com/tendermint/tendermint/libs/log"
 )
 
-func StartHTTPServer(listenAddr string, handler http.Handler, logger log.Logger) (listener net.Listener, err error) {
+// Config is an RPC server configuration.
+type Config struct {
+	MaxOpenConnections int
+}
+
+const (
+	// maxBodyBytes controls the maximum number of bytes the
+	// server will read parsing the request body.
+	maxBodyBytes = int64(1000000) // 1MB
+)
+
+// StartHTTPServer starts an HTTP server on listenAddr with the given handler.
+// It wraps handler with RecoverAndLogHandler.
+func StartHTTPServer(
+	listenAddr string,
+	handler http.Handler,
+	logger log.Logger,
+	config Config,
+) (listener net.Listener, err error) {
 	var proto, addr string
 	parts := strings.SplitN(listenAddr, "://", 2)
 	if len(parts) != 2 {
-		return nil, errors.Errorf("Invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", listenAddr)
+		return nil, errors.Errorf(
+			"Invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)",
+			listenAddr,
+		)
 	}
 	proto, addr = parts[0], parts[1]
 
@@ -30,35 +52,60 @@ func StartHTTPServer(listenAddr string, handler http.Handler, logger log.Logger)
 	if err != nil {
 		return nil, errors.Errorf("Failed to listen on %v: %v", listenAddr, err)
 	}
+	if config.MaxOpenConnections > 0 {
+		listener = netutil.LimitListener(listener, config.MaxOpenConnections)
+	}
 
 	go func() {
 		err := http.Serve(
 			listener,
-			RecoverAndLogHandler(handler, logger),
+			RecoverAndLogHandler(maxBytesHandler{h: handler, n: maxBodyBytes}, logger),
 		)
 		logger.Error("RPC HTTP server stopped", "err", err)
 	}()
 	return listener, nil
 }
 
-func StartHTTPAndTLSServer(listenAddr string, handler http.Handler, certFile, keyFile string, logger log.Logger) (listener net.Listener, err error) {
+// StartHTTPAndTLSServer starts an HTTPS server on listenAddr with the given
+// handler.
+// It wraps handler with RecoverAndLogHandler.
+func StartHTTPAndTLSServer(
+	listenAddr string,
+	handler http.Handler,
+	certFile, keyFile string,
+	logger log.Logger,
+	config Config,
+) (listener net.Listener, err error) {
 	var proto, addr string
 	parts := strings.SplitN(listenAddr, "://", 2)
 	if len(parts) != 2 {
-		return nil, errors.Errorf("Invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", listenAddr)
+		return nil, errors.Errorf(
+			"Invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)",
+			listenAddr,
+		)
 	}
 	proto, addr = parts[0], parts[1]
 
-	logger.Info(fmt.Sprintf("Starting RPC HTTPS server on %s (cert: %q, key: %q)", listenAddr, certFile, keyFile))
+	logger.Info(
+		fmt.Sprintf(
+			"Starting RPC HTTPS server on %s (cert: %q, key: %q)",
+			listenAddr,
+			certFile,
+			keyFile,
+		),
+	)
 	listener, err = net.Listen(proto, addr)
 	if err != nil {
 		return nil, errors.Errorf("Failed to listen on %v: %v", listenAddr, err)
 	}
+	if config.MaxOpenConnections > 0 {
+		listener = netutil.LimitListener(listener, config.MaxOpenConnections)
+	}
 
 	go func() {
 		err := http.ServeTLS(
 			listener,
-			RecoverAndLogHandler(handler, logger),
+			RecoverAndLogHandler(maxBytesHandler{h: handler, n: maxBodyBytes}, logger),
 			certFile,
 			keyFile,
 		)
@@ -67,7 +114,11 @@ func StartHTTPAndTLSServer(listenAddr string, handler http.Handler, certFile, ke
 	return listener, nil
 }
 
-func WriteRPCResponseHTTPError(w http.ResponseWriter, httpCode int, res types.RPCResponse) {
+func WriteRPCResponseHTTPError(
+	w http.ResponseWriter,
+	httpCode int,
+	res types.RPCResponse,
+) {
 	jsonBytes, err := json.MarshalIndent(res, "", "  ")
 	if err != nil {
 		panic(err)
@@ -117,7 +168,10 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler
 					WriteRPCResponseHTTP(rww, res)
 				} else {
 					// For the rest,
-					logger.Error("Panic in RPC HTTP handler", "err", e, "stack", string(debug.Stack()))
+					logger.Error(
+						"Panic in RPC HTTP handler", "err", e, "stack",
+						string(debug.Stack()),
+					)
 					rww.WriteHeader(http.StatusInternalServerError)
 					WriteRPCResponseHTTP(rww, types.RPCInternalError("", e.(error)))
 				}
@@ -154,3 +208,13 @@ func (w *ResponseWriterWrapper) WriteHeader(status int) {
 func (w *ResponseWriterWrapper) Hijack() (net.Conn, *bufio.ReadWriter, error) {
 	return w.ResponseWriter.(http.Hijacker).Hijack()
 }
+
+type maxBytesHandler struct {
+	h http.Handler
+	n int64
+}
+
+func (h maxBytesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	r.Body = http.MaxBytesReader(w, r.Body, h.n)
+	h.h.ServeHTTP(w, r)
+}
diff --git a/vendor/github.com/tendermint/tendermint/state/errors.go b/vendor/github.com/tendermint/tendermint/state/errors.go
index afb5737d7345e620c9ad21a3125b42b58cfa6083..d40c7e1413d3c5a536719cb3c2b05c1b90b0df51 100644
--- a/vendor/github.com/tendermint/tendermint/state/errors.go
+++ b/vendor/github.com/tendermint/tendermint/state/errors.go
@@ -1,7 +1,7 @@
 package state
 
 import (
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 type (
diff --git a/vendor/github.com/tendermint/tendermint/state/execution.go b/vendor/github.com/tendermint/tendermint/state/execution.go
index e6b94429e5d013a661a4d7a885498b94c0b7e896..601abec9e4e621e41b75f17a6dbaa443f37c66ee 100644
--- a/vendor/github.com/tendermint/tendermint/state/execution.go
+++ b/vendor/github.com/tendermint/tendermint/state/execution.go
@@ -4,11 +4,11 @@ import (
 	"fmt"
 
 	fail "github.com/ebuchman/fail-test"
-	abci "github.com/tendermint/abci/types"
+	abci "github.com/tendermint/tendermint/abci/types"
+	dbm "github.com/tendermint/tendermint/libs/db"
+	"github.com/tendermint/tendermint/libs/log"
 	"github.com/tendermint/tendermint/proxy"
 	"github.com/tendermint/tendermint/types"
-	dbm "github.com/tendermint/tmlibs/db"
-	"github.com/tendermint/tmlibs/log"
 )
 
 //-----------------------------------------------------------------------------
@@ -278,20 +278,24 @@ func updateValidators(currentSet *types.ValidatorSet, abciUpdates []abci.Validat
 
 	// these are tendermint types now
 	for _, valUpdate := range updates {
+		if valUpdate.VotingPower < 0 {
+			return fmt.Errorf("Voting power can't be negative %v", valUpdate)
+		}
+
 		address := valUpdate.Address
 		_, val := currentSet.GetByAddress(address)
-		if val == nil {
-			// add val
-			added := currentSet.Add(valUpdate)
-			if !added {
-				return fmt.Errorf("Failed to add new validator %v", valUpdate)
-			}
-		} else if valUpdate.VotingPower == 0 {
+		if valUpdate.VotingPower == 0 {
 			// remove val
 			_, removed := currentSet.Remove(address)
 			if !removed {
 				return fmt.Errorf("Failed to remove validator %X", address)
 			}
+		} else if val == nil {
+			// add val
+			added := currentSet.Add(valUpdate)
+			if !added {
+				return fmt.Errorf("Failed to add new validator %v", valUpdate)
+			}
 		} else {
 			// update val
 			updated := currentSet.Update(valUpdate)
diff --git a/vendor/github.com/tendermint/tendermint/state/services.go b/vendor/github.com/tendermint/tendermint/state/services.go
index bef286b20e15c02601aec8f917916e1ab0595d35..bf0b1a6f441078a88ef005b2864162a0f07ce5b1 100644
--- a/vendor/github.com/tendermint/tendermint/state/services.go
+++ b/vendor/github.com/tendermint/tendermint/state/services.go
@@ -1,7 +1,7 @@
 package state
 
 import (
-	abci "github.com/tendermint/abci/types"
+	abci "github.com/tendermint/tendermint/abci/types"
 	"github.com/tendermint/tendermint/types"
 )
 
diff --git a/vendor/github.com/tendermint/tendermint/state/store.go b/vendor/github.com/tendermint/tendermint/state/store.go
index 2164d699df05a3bf75a67f36c16fac3faa7a8d2a..9e94e36faab74a8e1ac2642749f3db82327f65a3 100644
--- a/vendor/github.com/tendermint/tendermint/state/store.go
+++ b/vendor/github.com/tendermint/tendermint/state/store.go
@@ -3,10 +3,10 @@ package state
 import (
 	"fmt"
 
-	abci "github.com/tendermint/abci/types"
+	abci "github.com/tendermint/tendermint/abci/types"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	dbm "github.com/tendermint/tendermint/libs/db"
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
-	dbm "github.com/tendermint/tmlibs/db"
 )
 
 //------------------------------------------------------------------------
@@ -175,8 +175,13 @@ func LoadValidators(db dbm.DB, height int64) (*types.ValidatorSet, error) {
 	if valInfo.ValidatorSet == nil {
 		valInfo2 := loadValidatorsInfo(db, valInfo.LastHeightChanged)
 		if valInfo2 == nil {
-			cmn.PanicSanity(fmt.Sprintf(`Couldn't find validators at height %d as
-                        last changed from height %d`, valInfo.LastHeightChanged, height))
+			panic(
+				fmt.Sprintf(
+					"Couldn't find validators at height %d as last changed from height %d",
+					valInfo.LastHeightChanged,
+					height,
+				),
+			)
 		}
 		valInfo = valInfo2
 	}
@@ -239,11 +244,17 @@ func LoadConsensusParams(db dbm.DB, height int64) (types.ConsensusParams, error)
 	}
 
 	if paramsInfo.ConsensusParams == empty {
-		paramsInfo = loadConsensusParamsInfo(db, paramsInfo.LastHeightChanged)
-		if paramsInfo == nil {
-			cmn.PanicSanity(fmt.Sprintf(`Couldn't find consensus params at height %d as
-                        last changed from height %d`, paramsInfo.LastHeightChanged, height))
+		paramsInfo2 := loadConsensusParamsInfo(db, paramsInfo.LastHeightChanged)
+		if paramsInfo2 == nil {
+			panic(
+				fmt.Sprintf(
+					"Couldn't find consensus params at height %d as last changed from height %d",
+					paramsInfo.LastHeightChanged,
+					height,
+				),
+			)
 		}
+		paramsInfo = paramsInfo2
 	}
 
 	return paramsInfo.ConsensusParams, nil
diff --git a/vendor/github.com/tendermint/tendermint/state/txindex/indexer_service.go b/vendor/github.com/tendermint/tendermint/state/txindex/indexer_service.go
index 264be1fd893ddb0aaa1c5a397e52fc538b63fa8a..088252f5e6aec5cae60db5ea0c9538bc8ff7082d 100644
--- a/vendor/github.com/tendermint/tendermint/state/txindex/indexer_service.go
+++ b/vendor/github.com/tendermint/tendermint/state/txindex/indexer_service.go
@@ -3,7 +3,7 @@ package txindex
 import (
 	"context"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 
 	"github.com/tendermint/tendermint/types"
 )
diff --git a/vendor/github.com/tendermint/tendermint/state/txindex/kv/kv.go b/vendor/github.com/tendermint/tendermint/state/txindex/kv/kv.go
index 718a55d15133c53ceceefc5a11e1a45b9a3c47fc..707325929aa5e0fc1b20491cec0c01906ba822b2 100644
--- a/vendor/github.com/tendermint/tendermint/state/txindex/kv/kv.go
+++ b/vendor/github.com/tendermint/tendermint/state/txindex/kv/kv.go
@@ -10,8 +10,8 @@ import (
 	"time"
 
 	"github.com/pkg/errors"
-	cmn "github.com/tendermint/tmlibs/common"
-	dbm "github.com/tendermint/tmlibs/db"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	dbm "github.com/tendermint/tendermint/libs/db"
 
 	"github.com/tendermint/tendermint/libs/pubsub/query"
 	"github.com/tendermint/tendermint/state/txindex"
diff --git a/vendor/github.com/tendermint/tendermint/state/validation.go b/vendor/github.com/tendermint/tendermint/state/validation.go
index 84a4cc82454dceac8457d2eb683045c68c2a8a64..c36339203222014e3bcdb9f2fbc0ff53821be7eb 100644
--- a/vendor/github.com/tendermint/tendermint/state/validation.go
+++ b/vendor/github.com/tendermint/tendermint/state/validation.go
@@ -6,7 +6,7 @@ import (
 	"fmt"
 
 	"github.com/tendermint/tendermint/types"
-	dbm "github.com/tendermint/tmlibs/db"
+	dbm "github.com/tendermint/tendermint/libs/db"
 )
 
 //-----------------------------------------------------
diff --git a/vendor/github.com/tendermint/tendermint/state/wire.go b/vendor/github.com/tendermint/tendermint/state/wire.go
index 3e8b544d98763f37324af4d2a265d1503a806f97..af743c7b85940a3f0f15032f6c1030f175961a72 100644
--- a/vendor/github.com/tendermint/tendermint/state/wire.go
+++ b/vendor/github.com/tendermint/tendermint/state/wire.go
@@ -2,7 +2,7 @@ package state
 
 import (
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 var cdc = amino.NewCodec()
diff --git a/vendor/github.com/tendermint/abci/LICENSE b/vendor/github.com/tendermint/tendermint/tools/build/LICENSE
similarity index 93%
rename from vendor/github.com/tendermint/abci/LICENSE
rename to vendor/github.com/tendermint/tendermint/tools/build/LICENSE
index 57951bb8ae4760d0aac73eb76051f31d416e22f5..bb66bb3507e96f784d5f0e8d3d824fede11dcb5e 100644
--- a/vendor/github.com/tendermint/abci/LICENSE
+++ b/vendor/github.com/tendermint/tendermint/tools/build/LICENSE
@@ -1,11 +1,9 @@
-Tendermint ABCI
-Copyright (C) 2015 Tendermint
-
-
+Tendermint Core
+License: Apache2.0
 
                                  Apache License
                            Version 2.0, January 2004
-                        https://www.apache.org/licenses/
+                        http://www.apache.org/licenses/
 
    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 
@@ -180,11 +178,24 @@ Copyright (C) 2015 Tendermint
 
    END OF TERMS AND CONDITIONS
 
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2016 All in Bits, Inc
+
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
    You may obtain a copy of the License at
 
-       https://www.apache.org/licenses/LICENSE-2.0
+       http://www.apache.org/licenses/LICENSE-2.0
 
    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/github.com/tendermint/tendermint/tools/build/basecoind/DEBIAN/copyright b/vendor/github.com/tendermint/tendermint/tools/build/basecoind/DEBIAN/copyright
new file mode 100644
index 0000000000000000000000000000000000000000..fe449650cd82bdcb821d5d303f55adaa38eb1b55
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/tools/build/basecoind/DEBIAN/copyright
@@ -0,0 +1,21 @@
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: basecoind
+Source: https://github.com/cosmos/cosmos-sdk
+
+Files: *
+Copyright: 2017 All In Bits, Inc.
+License: Apache-2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ .
+ On Debian systems, the full text of the Apache License 2.0 can be found
+ in the file `/usr/share/common-licenses/Apache-2.0'.
diff --git a/vendor/github.com/tendermint/tendermint/tools/build/ethermint/DEBIAN/copyright b/vendor/github.com/tendermint/tendermint/tools/build/ethermint/DEBIAN/copyright
new file mode 100644
index 0000000000000000000000000000000000000000..6d1bab01bd01968638c84e9adb71555426e157b3
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/tools/build/ethermint/DEBIAN/copyright
@@ -0,0 +1,21 @@
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: ethermint
+Source: https://github.com/tendermint/ethermint
+
+Files: *
+Copyright: 2017 All In Bits, Inc.
+License: Apache-2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ .
+ On Debian systems, the full text of the Apache License 2.0 can be found
+ in the file `/usr/share/common-licenses/Apache-2.0'.
diff --git a/vendor/github.com/tendermint/tendermint/tools/build/gaia/DEBIAN/copyright b/vendor/github.com/tendermint/tendermint/tools/build/gaia/DEBIAN/copyright
new file mode 100644
index 0000000000000000000000000000000000000000..ffc230134a4bf89dfd635646bc6acb770faa1335
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/tools/build/gaia/DEBIAN/copyright
@@ -0,0 +1,21 @@
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: gaia
+Source: https://github.com/cosmos/gaia
+
+Files: *
+Copyright: 2017 All In Bits, Inc.
+License: Apache-2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ .
+ On Debian systems, the full text of the Apache License 2.0 can be found
+ in the file `/usr/share/common-licenses/Apache-2.0'.
diff --git a/vendor/github.com/tendermint/tendermint/tools/build/tendermint/DEBIAN/copyright b/vendor/github.com/tendermint/tendermint/tools/build/tendermint/DEBIAN/copyright
new file mode 100644
index 0000000000000000000000000000000000000000..15ee960dd671b928a29a32d1c3538aa7842e3894
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/tools/build/tendermint/DEBIAN/copyright
@@ -0,0 +1,21 @@
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: tendermint
+Source: https://github.com/tendermint/tendermint
+
+Files: *
+Copyright: 2017 All In Bits, Inc.
+License: Apache-2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ .
+ On Debian systems, the full text of the Apache License 2.0 can be found
+ in the file `/usr/share/common-licenses/Apache-2.0'.
diff --git a/vendor/github.com/tendermint/tmlibs/LICENSE b/vendor/github.com/tendermint/tendermint/tools/mintnet-kubernetes/LICENSE
similarity index 99%
rename from vendor/github.com/tendermint/tmlibs/LICENSE
rename to vendor/github.com/tendermint/tendermint/tools/mintnet-kubernetes/LICENSE
index 06bc5e1c6d20044c9c598fa6b4ee2faf6f07a51a..64a33ddf1a4455dceb070463508e23b500bc8936 100644
--- a/vendor/github.com/tendermint/tmlibs/LICENSE
+++ b/vendor/github.com/tendermint/tendermint/tools/mintnet-kubernetes/LICENSE
@@ -1,4 +1,3 @@
-Tendermint Libraries
 Copyright (C) 2017 Tendermint
 
 
diff --git a/vendor/github.com/tendermint/go-crypto/LICENSE b/vendor/github.com/tendermint/tendermint/tools/tm-bench/LICENSE
similarity index 93%
rename from vendor/github.com/tendermint/go-crypto/LICENSE
rename to vendor/github.com/tendermint/tendermint/tools/tm-bench/LICENSE
index 3beb77b13b05df12584aeb8c9c3a86df806421c8..f489139675ee621b374d23e48f555adc5c0b64d4 100644
--- a/vendor/github.com/tendermint/go-crypto/LICENSE
+++ b/vendor/github.com/tendermint/tendermint/tools/tm-bench/LICENSE
@@ -1,11 +1,9 @@
-Tendermint Go-Crypto
-Copyright (C) 2015 Tendermint
-
-
+Tendermint Bench
+Copyright 2017 Tendermint
 
                                  Apache License
                            Version 2.0, January 2004
-                        https://www.apache.org/licenses/
+                        http://www.apache.org/licenses/
 
    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 
@@ -180,11 +178,24 @@ Copyright (C) 2015 Tendermint
 
    END OF TERMS AND CONDITIONS
 
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
    You may obtain a copy of the License at
 
-       https://www.apache.org/licenses/LICENSE-2.0
+       http://www.apache.org/licenses/LICENSE-2.0
 
    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/github.com/tendermint/tendermint/tools/tm-monitor/LICENSE b/vendor/github.com/tendermint/tendermint/tools/tm-monitor/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..20728d3180c4374e819e969750015e7588a15a21
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/tools/tm-monitor/LICENSE
@@ -0,0 +1,204 @@
+Tendermint Monitor
+Copyright 2017 Tendermint
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/tendermint/tendermint/types/block.go b/vendor/github.com/tendermint/tendermint/types/block.go
index 3004672c89972ccf71286b6c880405f6cf44cf05..e23fd71d9d5e310a61e33086a929335275571f58 100644
--- a/vendor/github.com/tendermint/tendermint/types/block.go
+++ b/vendor/github.com/tendermint/tendermint/types/block.go
@@ -8,9 +8,9 @@ import (
 	"sync"
 	"time"
 
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/merkle"
-	"golang.org/x/crypto/ripemd160"
+	"github.com/tendermint/tendermint/crypto/merkle"
+	"github.com/tendermint/tendermint/crypto/tmhash"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // Block defines the atomic unit of a Tendermint blockchain.
@@ -107,6 +107,7 @@ func (b *Block) Hash() cmn.HexBytes {
 
 // MakePartSet returns a PartSet containing parts of a serialized block.
 // This is the form in which the block is gossipped to peers.
+// CONTRACT: partSize is greater than zero.
 func (b *Block) MakePartSet(partSize int) *PartSet {
 	if b == nil {
 		return nil
@@ -135,6 +136,15 @@ func (b *Block) HashesTo(hash []byte) bool {
 	return bytes.Equal(b.Hash(), hash)
 }
 
+// Size returns size of the block in bytes.
+func (b *Block) Size() int {
+	bz, err := cdc.MarshalBinaryBare(b)
+	if err != nil {
+		return 0
+	}
+	return len(bz)
+}
+
 // String returns a string representation of the block
 func (b *Block) String() string {
 	return b.StringIndented("")
@@ -199,7 +209,7 @@ type Header struct {
 // Hash returns the hash of the header.
 // Returns nil if ValidatorHash is missing,
 // since a Header is not valid unless there is
-// a ValidaotrsHash (corresponding to the validator set).
+// a ValidatorsHash (corresponding to the validator set).
 func (h *Header) Hash() cmn.HexBytes {
 	if h == nil || len(h.ValidatorsHash) == 0 {
 		return nil
@@ -383,6 +393,9 @@ func (commit *Commit) ValidateBasic() error {
 
 // Hash returns the hash of the commit
 func (commit *Commit) Hash() cmn.HexBytes {
+	if commit == nil {
+		return nil
+	}
 	if commit.hash == nil {
 		bs := make([]merkle.Hasher, len(commit.Precommits))
 		for i, precommit := range commit.Precommits {
@@ -455,7 +468,7 @@ func (data *Data) StringIndented(indent string) string {
 			txStrings[i] = fmt.Sprintf("... (%v total)", len(data.Txs))
 			break
 		}
-		txStrings[i] = fmt.Sprintf("Tx:%v", tx)
+		txStrings[i] = fmt.Sprintf("%X (%d bytes)", tx.Hash(), len(tx))
 	}
 	return fmt.Sprintf(`Data{
 %s  %v
@@ -495,7 +508,7 @@ func (data *EvidenceData) StringIndented(indent string) string {
 		}
 		evStrings[i] = fmt.Sprintf("Evidence:%v", ev)
 	}
-	return fmt.Sprintf(`Data{
+	return fmt.Sprintf(`EvidenceData{
 %s  %v
 %s}#%v`,
 		indent, strings.Join(evStrings, "\n"+indent+"  "),
@@ -543,7 +556,7 @@ type hasher struct {
 }
 
 func (h hasher) Hash() []byte {
-	hasher := ripemd160.New()
+	hasher := tmhash.New()
 	if h.item != nil && !cmn.IsTypedNil(h.item) && !cmn.IsEmpty(h.item) {
 		bz, err := cdc.MarshalBinaryBare(h.item)
 		if err != nil {
diff --git a/vendor/github.com/tendermint/tendermint/types/canonical_json.go b/vendor/github.com/tendermint/tendermint/types/canonical_json.go
index 95ade9c67bba1cb96598e2e6fc0fab4746986653..189a8a7a2fec6f93455e5b8e23aa636659d94001 100644
--- a/vendor/github.com/tendermint/tendermint/types/canonical_json.go
+++ b/vendor/github.com/tendermint/tendermint/types/canonical_json.go
@@ -3,14 +3,13 @@ package types
 import (
 	"time"
 
-	"github.com/tendermint/go-amino"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // Canonical json is amino's json for structs with fields in alphabetical order
 
 // TimeFormat is used for generating the sigs
-const TimeFormat = amino.RFC3339Millis
+const TimeFormat = "2006-01-02T15:04:05.000Z"
 
 type CanonicalJSONBlockID struct {
 	Hash        cmn.HexBytes               `json:"hash,omitempty"`
diff --git a/vendor/github.com/tendermint/tendermint/types/event_buffer.go b/vendor/github.com/tendermint/tendermint/types/event_buffer.go
deleted file mode 100644
index 18b41014e113ffc56d4d5101bbc1080c988c5f65..0000000000000000000000000000000000000000
--- a/vendor/github.com/tendermint/tendermint/types/event_buffer.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package types
-
-// Interface assertions
-var _ TxEventPublisher = (*TxEventBuffer)(nil)
-
-// TxEventBuffer is a buffer of events, which uses a slice to temporarily store
-// events.
-type TxEventBuffer struct {
-	next     TxEventPublisher
-	capacity int
-	events   []EventDataTx
-}
-
-// NewTxEventBuffer accepts a TxEventPublisher and returns a new buffer with the given
-// capacity.
-func NewTxEventBuffer(next TxEventPublisher, capacity int) *TxEventBuffer {
-	return &TxEventBuffer{
-		next:     next,
-		capacity: capacity,
-		events:   make([]EventDataTx, 0, capacity),
-	}
-}
-
-// Len returns the number of events cached.
-func (b TxEventBuffer) Len() int {
-	return len(b.events)
-}
-
-// PublishEventTx buffers an event to be fired upon finality.
-func (b *TxEventBuffer) PublishEventTx(e EventDataTx) error {
-	b.events = append(b.events, e)
-	return nil
-}
-
-// Flush publishes events by running next.PublishWithTags on all cached events.
-// Blocks. Clears cached events.
-func (b *TxEventBuffer) Flush() error {
-	for _, e := range b.events {
-		err := b.next.PublishEventTx(e)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Clear out the elements and set the length to 0
-	// but maintain the underlying slice's capacity.
-	// See Issue https://github.com/tendermint/tendermint/issues/1189
-	b.events = b.events[:0]
-	return nil
-}
diff --git a/vendor/github.com/tendermint/tendermint/types/event_bus.go b/vendor/github.com/tendermint/tendermint/types/event_bus.go
index cb4b17d5161121a7304ae6f0b922d542e449d8da..b4965feee67b10fb895ee1998bd9b730a7834302 100644
--- a/vendor/github.com/tendermint/tendermint/types/event_bus.go
+++ b/vendor/github.com/tendermint/tendermint/types/event_bus.go
@@ -4,9 +4,9 @@ import (
 	"context"
 	"fmt"
 
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 	tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
 )
 
 const defaultCapacity = 0
@@ -49,7 +49,7 @@ func (b *EventBus) OnStart() error {
 }
 
 func (b *EventBus) OnStop() {
-	b.pubsub.OnStop()
+	b.pubsub.Stop()
 }
 
 func (b *EventBus) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error {
diff --git a/vendor/github.com/tendermint/tendermint/types/events.go b/vendor/github.com/tendermint/tendermint/types/events.go
index 2b87297cdcee4ac9928c6ef787d6459d83d24d90..891c6a9024c871ee7b898e78f3f8c2491c47da1b 100644
--- a/vendor/github.com/tendermint/tendermint/types/events.go
+++ b/vendor/github.com/tendermint/tendermint/types/events.go
@@ -10,22 +10,17 @@ import (
 
 // Reserved event types
 const (
-	EventBond              = "Bond"
 	EventCompleteProposal  = "CompleteProposal"
-	EventDupeout           = "Dupeout"
-	EventFork              = "Fork"
 	EventLock              = "Lock"
 	EventNewBlock          = "NewBlock"
 	EventNewBlockHeader    = "NewBlockHeader"
 	EventNewRound          = "NewRound"
 	EventNewRoundStep      = "NewRoundStep"
 	EventPolka             = "Polka"
-	EventRebond            = "Rebond"
 	EventRelock            = "Relock"
 	EventTimeoutPropose    = "TimeoutPropose"
 	EventTimeoutWait       = "TimeoutWait"
 	EventTx                = "Tx"
-	EventUnbond            = "Unbond"
 	EventUnlock            = "Unlock"
 	EventVote              = "Vote"
 	EventProposalHeartbeat = "ProposalHeartbeat"
@@ -113,11 +108,6 @@ const (
 )
 
 var (
-	EventQueryBond              = QueryForEvent(EventBond)
-	EventQueryUnbond            = QueryForEvent(EventUnbond)
-	EventQueryRebond            = QueryForEvent(EventRebond)
-	EventQueryDupeout           = QueryForEvent(EventDupeout)
-	EventQueryFork              = QueryForEvent(EventFork)
 	EventQueryNewBlock          = QueryForEvent(EventNewBlock)
 	EventQueryNewBlockHeader    = QueryForEvent(EventNewBlockHeader)
 	EventQueryNewRound          = QueryForEvent(EventNewRound)
diff --git a/vendor/github.com/tendermint/tendermint/types/evidence.go b/vendor/github.com/tendermint/tendermint/types/evidence.go
index 10907869e0f0c0d793c8d20067dc245cb3fa4db6..6313f43a51059da15af989cc233958d0f3ea8a42 100644
--- a/vendor/github.com/tendermint/tendermint/types/evidence.go
+++ b/vendor/github.com/tendermint/tendermint/types/evidence.go
@@ -4,9 +4,10 @@ import (
 	"bytes"
 	"fmt"
 
-	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
-	"github.com/tendermint/tmlibs/merkle"
+	amino "github.com/tendermint/go-amino"
+
+	"github.com/tendermint/tendermint/crypto"
+	"github.com/tendermint/tendermint/crypto/merkle"
 )
 
 // ErrEvidenceInvalid wraps a piece of evidence and the error denoting how or why it is invalid.
@@ -180,7 +181,7 @@ type EvidenceList []Evidence
 // Hash returns the simple merkle root hash of the EvidenceList.
 func (evl EvidenceList) Hash() []byte {
 	// Recursive impl.
-	// Copied from tmlibs/merkle to avoid allocations
+	// Copied from crypto/merkle to avoid allocations
 	switch len(evl) {
 	case 0:
 		return nil
diff --git a/vendor/github.com/tendermint/tendermint/types/genesis.go b/vendor/github.com/tendermint/tendermint/types/genesis.go
index aee8e07670f97d2b4451fcfc0678401cfeae97fc..220ee0e0efcb02ec7dbaf006775491d0cc3052cd 100644
--- a/vendor/github.com/tendermint/tendermint/types/genesis.go
+++ b/vendor/github.com/tendermint/tendermint/types/genesis.go
@@ -5,8 +5,8 @@ import (
 	"io/ioutil"
 	"time"
 
-	"github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 //------------------------------------------------------------
@@ -26,17 +26,7 @@ type GenesisDoc struct {
 	ConsensusParams *ConsensusParams   `json:"consensus_params,omitempty"`
 	Validators      []GenesisValidator `json:"validators"`
 	AppHash         cmn.HexBytes       `json:"app_hash"`
-	AppStateJSON    json.RawMessage    `json:"app_state,omitempty"`
-	AppOptions      json.RawMessage    `json:"app_options,omitempty"` // DEPRECATED
-}
-
-// AppState returns raw application state.
-// TODO: replace with AppState field during next breaking release (0.18)
-func (genDoc *GenesisDoc) AppState() json.RawMessage {
-	if len(genDoc.AppOptions) > 0 {
-		return genDoc.AppOptions
-	}
-	return genDoc.AppStateJSON
+	AppState        json.RawMessage    `json:"app_state,omitempty"`
 }
 
 // SaveAs is a utility method for saving GenensisDoc as a JSON file.
diff --git a/vendor/github.com/tendermint/tendermint/types/heartbeat.go b/vendor/github.com/tendermint/tendermint/types/heartbeat.go
index 097dd22db7a93b110432c3f2b7070cec55f070da..cebe2864cf430211f5cf86043485cc9a492bf5bd 100644
--- a/vendor/github.com/tendermint/tendermint/types/heartbeat.go
+++ b/vendor/github.com/tendermint/tendermint/types/heartbeat.go
@@ -3,8 +3,8 @@ package types
 import (
 	"fmt"
 
-	"github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // Heartbeat is a simple vote-like structure so validators can
diff --git a/vendor/github.com/tendermint/tendermint/types/params.go b/vendor/github.com/tendermint/tendermint/types/params.go
index 2df092d62ad5d3dd857fc45b6470c330c714f8c1..3056c82a03839e704fd0d20c615c37bdcfd4bf0c 100644
--- a/vendor/github.com/tendermint/tendermint/types/params.go
+++ b/vendor/github.com/tendermint/tendermint/types/params.go
@@ -1,12 +1,13 @@
 package types
 
 import (
-	abci "github.com/tendermint/abci/types"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/merkle"
+	abci "github.com/tendermint/tendermint/abci/types"
+	"github.com/tendermint/tendermint/crypto/merkle"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 const (
+	// MaxBlockSizeBytes is the maximum permitted size of the blocks.
 	MaxBlockSizeBytes = 104857600 // 100MB
 )
 
@@ -56,7 +57,7 @@ func DefaultConsensusParams() *ConsensusParams {
 func DefaultBlockSize() BlockSize {
 	return BlockSize{
 		MaxBytes: 22020096, // 21MB
-		MaxTxs:   100000,
+		MaxTxs:   10000,
 		MaxGas:   -1,
 	}
 }
diff --git a/vendor/github.com/tendermint/tendermint/types/part_set.go b/vendor/github.com/tendermint/tendermint/types/part_set.go
index 18cfe802c564f5a4f30da782ce664d32aff20f08..f6d7f6b6e2b2cc17b5465c017550d5f45950696a 100644
--- a/vendor/github.com/tendermint/tendermint/types/part_set.go
+++ b/vendor/github.com/tendermint/tendermint/types/part_set.go
@@ -7,10 +7,9 @@ import (
 	"io"
 	"sync"
 
-	"golang.org/x/crypto/ripemd160"
-
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/merkle"
+	"github.com/tendermint/tendermint/crypto/merkle"
+	"github.com/tendermint/tendermint/crypto/tmhash"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 var (
@@ -31,7 +30,7 @@ func (part *Part) Hash() []byte {
 	if part.hash != nil {
 		return part.hash
 	}
-	hasher := ripemd160.New()
+	hasher := tmhash.New()
 	hasher.Write(part.Bytes) // nolint: errcheck, gas
 	part.hash = hasher.Sum(nil)
 	return part.hash
diff --git a/vendor/github.com/tendermint/tendermint/types/priv_validator.go b/vendor/github.com/tendermint/tendermint/types/priv_validator.go
index 8759d3f99f70d501c11686665b3ce4d244c38bcb..85db65a41ed89367ef316b9e1b382f4a93477b5a 100644
--- a/vendor/github.com/tendermint/tendermint/types/priv_validator.go
+++ b/vendor/github.com/tendermint/tendermint/types/priv_validator.go
@@ -4,7 +4,7 @@ import (
 	"bytes"
 	"fmt"
 
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 // PrivValidator defines the functionality of a local Tendermint validator
@@ -63,7 +63,10 @@ func (pv *MockPV) GetPubKey() crypto.PubKey {
 // Implements PrivValidator.
 func (pv *MockPV) SignVote(chainID string, vote *Vote) error {
 	signBytes := vote.SignBytes(chainID)
-	sig := pv.privKey.Sign(signBytes)
+	sig, err := pv.privKey.Sign(signBytes)
+	if err != nil {
+		return err
+	}
 	vote.Signature = sig
 	return nil
 }
@@ -71,14 +74,20 @@ func (pv *MockPV) SignVote(chainID string, vote *Vote) error {
 // Implements PrivValidator.
 func (pv *MockPV) SignProposal(chainID string, proposal *Proposal) error {
 	signBytes := proposal.SignBytes(chainID)
-	sig := pv.privKey.Sign(signBytes)
+	sig, err := pv.privKey.Sign(signBytes)
+	if err != nil {
+		return err
+	}
 	proposal.Signature = sig
 	return nil
 }
 
 // signHeartbeat signs the heartbeat without any checking.
 func (pv *MockPV) SignHeartbeat(chainID string, heartbeat *Heartbeat) error {
-	sig := pv.privKey.Sign(heartbeat.SignBytes(chainID))
+	sig, err := pv.privKey.Sign(heartbeat.SignBytes(chainID))
+	if err != nil {
+		return err
+	}
 	heartbeat.Signature = sig
 	return nil
 }
diff --git a/vendor/github.com/tendermint/tendermint/types/proposal.go b/vendor/github.com/tendermint/tendermint/types/proposal.go
index 95008897b8b10d29b637063662bc66ca546f07c4..52ce8756e8f591fb474e5c46dc4c8727191df4f0 100644
--- a/vendor/github.com/tendermint/tendermint/types/proposal.go
+++ b/vendor/github.com/tendermint/tendermint/types/proposal.go
@@ -5,7 +5,7 @@ import (
 	"fmt"
 	"time"
 
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 var (
diff --git a/vendor/github.com/tendermint/tendermint/types/protobuf.go b/vendor/github.com/tendermint/tendermint/types/protobuf.go
index eb684ae7d4d290d0f6e39c213e2df69ae7caf9e5..4fe4482536e1cea0686d64b91d0e4892ac0d1ab0 100644
--- a/vendor/github.com/tendermint/tendermint/types/protobuf.go
+++ b/vendor/github.com/tendermint/tendermint/types/protobuf.go
@@ -6,8 +6,8 @@ import (
 	"reflect"
 	"time"
 
-	abci "github.com/tendermint/abci/types"
-	crypto "github.com/tendermint/go-crypto"
+	abci "github.com/tendermint/tendermint/abci/types"
+	crypto "github.com/tendermint/tendermint/crypto"
 )
 
 //-------------------------------------------------------
@@ -58,7 +58,7 @@ func (tm2pb) Validator(val *Validator) abci.Validator {
 }
 
 // XXX: panics on nil or unknown pubkey type
-// TODO: add cases when new pubkey types are added to go-crypto
+// TODO: add cases when new pubkey types are added to crypto
 func (tm2pb) PubKey(pubKey crypto.PubKey) abci.PubKey {
 	switch pk := pubKey.(type) {
 	case crypto.PubKeyEd25519:
@@ -78,7 +78,7 @@ func (tm2pb) PubKey(pubKey crypto.PubKey) abci.PubKey {
 
 // XXX: panics on nil or unknown pubkey type
 func (tm2pb) Validators(vals *ValidatorSet) []abci.Validator {
-	validators := make([]abci.Validator, len(vals.Validators))
+	validators := make([]abci.Validator, vals.Size())
 	for i, val := range vals.Validators {
 		validators[i] = TM2PB.Validator(val)
 	}
@@ -153,7 +153,7 @@ var PB2TM = pb2tm{}
 type pb2tm struct{}
 
 func (pb2tm) PubKey(pubKey abci.PubKey) (crypto.PubKey, error) {
-	// TODO: define these in go-crypto and use them
+	// TODO: define these in crypto and use them
 	sizeEd := 32
 	sizeSecp := 33
 	switch pubKey.Type {
diff --git a/vendor/github.com/tendermint/tendermint/types/results.go b/vendor/github.com/tendermint/tendermint/types/results.go
index 326cee48d517002be1c55ca4eb5c20f6f17f5165..17d5891c3c61182d3316222cb2fd57a92e305fe9 100644
--- a/vendor/github.com/tendermint/tendermint/types/results.go
+++ b/vendor/github.com/tendermint/tendermint/types/results.go
@@ -1,9 +1,9 @@
 package types
 
 import (
-	abci "github.com/tendermint/abci/types"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/merkle"
+	abci "github.com/tendermint/tendermint/abci/types"
+	"github.com/tendermint/tendermint/crypto/merkle"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 //-----------------------------------------------------------------------------
@@ -24,15 +24,16 @@ func (a ABCIResult) Hash() []byte {
 // ABCIResults wraps the deliver tx results to return a proof
 type ABCIResults []ABCIResult
 
-// NewResults creates ABCIResults from ResponseDeliverTx
-func NewResults(del []*abci.ResponseDeliverTx) ABCIResults {
-	res := make(ABCIResults, len(del))
-	for i, d := range del {
+// NewResults creates ABCIResults from the list of ResponseDeliverTx.
+func NewResults(responses []*abci.ResponseDeliverTx) ABCIResults {
+	res := make(ABCIResults, len(responses))
+	for i, d := range responses {
 		res[i] = NewResultFromResponse(d)
 	}
 	return res
 }
 
+// NewResultFromResponse creates ABCIResult from ResponseDeliverTx.
 func NewResultFromResponse(response *abci.ResponseDeliverTx) ABCIResult {
 	return ABCIResult{
 		Code: response.Code,
@@ -51,6 +52,8 @@ func (a ABCIResults) Bytes() []byte {
 
 // Hash returns a merkle hash of all results
 func (a ABCIResults) Hash() []byte {
+	// NOTE: we copy the impl of the merkle tree for txs -
+	// we should be consistent and either do it for both or not.
 	return merkle.SimpleHashFromHashers(a.toHashers())
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/types/tx.go b/vendor/github.com/tendermint/tendermint/types/tx.go
index e7247693a9def8b20647a5f9e841927ddb19a920..489f0b232c2f4f5f7ed6425d7b86f25c76361f22 100644
--- a/vendor/github.com/tendermint/tendermint/types/tx.go
+++ b/vendor/github.com/tendermint/tendermint/types/tx.go
@@ -5,20 +5,20 @@ import (
 	"errors"
 	"fmt"
 
-	abci "github.com/tendermint/abci/types"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/merkle"
+	abci "github.com/tendermint/tendermint/abci/types"
+	"github.com/tendermint/tendermint/crypto/merkle"
+	"github.com/tendermint/tendermint/crypto/tmhash"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // Tx is an arbitrary byte array.
 // NOTE: Tx has no types at this level, so when wire encoded it's just length-prefixed.
-// Alternatively, it may make sense to add types here and let
-// []byte be type 0x1 so we can have versioned txs if need be in the future.
+// Might we want types here ?
 type Tx []byte
 
-// Hash computes the RIPEMD160 hash of the wire encoded transaction.
+// Hash computes the TMHASH hash of the wire encoded transaction.
 func (tx Tx) Hash() []byte {
-	return aminoHasher(tx).Hash()
+	return tmhash.Sum(tx)
 }
 
 // String returns the hex-encoded transaction as a string.
@@ -32,7 +32,7 @@ type Txs []Tx
 // Hash returns the simple Merkle root hash of the transactions.
 func (txs Txs) Hash() []byte {
 	// Recursive impl.
-	// Copied from tmlibs/merkle to avoid allocations
+	// Copied from tendermint/crypto/merkle to avoid allocations
 	switch len(txs) {
 	case 0:
 		return nil
diff --git a/vendor/github.com/tendermint/tendermint/types/validator.go b/vendor/github.com/tendermint/tendermint/types/validator.go
index 46dc61d07e866a77f6bd3670587d66258fbedc11..e43acf09d61c78ed3963cfe1adc4369ff2f0321d 100644
--- a/vendor/github.com/tendermint/tendermint/types/validator.go
+++ b/vendor/github.com/tendermint/tendermint/types/validator.go
@@ -4,8 +4,8 @@ import (
 	"bytes"
 	"fmt"
 
-	"github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // Volatile state for each Validator
diff --git a/vendor/github.com/tendermint/tendermint/types/validator_set.go b/vendor/github.com/tendermint/tendermint/types/validator_set.go
index f2fac292975d66b363f45e1d4b943a62411fe1ee..60fc2d83b598e001da76a1bd326df6415df46313 100644
--- a/vendor/github.com/tendermint/tendermint/types/validator_set.go
+++ b/vendor/github.com/tendermint/tendermint/types/validator_set.go
@@ -7,8 +7,8 @@ import (
 	"sort"
 	"strings"
 
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/merkle"
+	"github.com/tendermint/tendermint/crypto/merkle"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // ValidatorSet represent a set of *Validator at a given height.
@@ -39,14 +39,15 @@ func NewValidatorSet(vals []*Validator) *ValidatorSet {
 		Validators: validators,
 	}
 
-	if vals != nil {
+	if len(vals) > 0 {
 		vs.IncrementAccum(1)
 	}
 
 	return vs
 }
 
-// incrementAccum and update the proposer
+// IncrementAccum increments accum of each validator and updates the
+// proposer. Panics if validator set is empty.
 func (valSet *ValidatorSet) IncrementAccum(times int) {
 	// Add VotingPower * times to each validator and order into heap.
 	validatorsHeap := cmn.NewHeap()
diff --git a/vendor/github.com/tendermint/tendermint/types/vote.go b/vendor/github.com/tendermint/tendermint/types/vote.go
index e4ead612a9fc8fcf6145d703021e4d2fdad75ec4..ed4ebd73e51c85f3d1806c27ce2e3d7e871681fa 100644
--- a/vendor/github.com/tendermint/tendermint/types/vote.go
+++ b/vendor/github.com/tendermint/tendermint/types/vote.go
@@ -6,8 +6,8 @@ import (
 	"fmt"
 	"time"
 
-	crypto "github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
+	crypto "github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 var (
diff --git a/vendor/github.com/tendermint/tendermint/types/vote_set.go b/vendor/github.com/tendermint/tendermint/types/vote_set.go
index a60d95dafd15af3ef41bc2ecad962456e1e1e685..c516810534863522cc01d4a9751f4e28d65d9930 100644
--- a/vendor/github.com/tendermint/tendermint/types/vote_set.go
+++ b/vendor/github.com/tendermint/tendermint/types/vote_set.go
@@ -8,7 +8,7 @@ import (
 
 	"github.com/pkg/errors"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // UNSTABLE
diff --git a/vendor/github.com/tendermint/tendermint/types/wire.go b/vendor/github.com/tendermint/tendermint/types/wire.go
index bd5c4497d06fdf1a1ae942e6c55bbc7dbeac6632..6342d7ebabeffb8e934c59ed0d6778f23ff5af18 100644
--- a/vendor/github.com/tendermint/tendermint/types/wire.go
+++ b/vendor/github.com/tendermint/tendermint/types/wire.go
@@ -2,7 +2,7 @@ package types
 
 import (
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 var cdc = amino.NewCodec()
diff --git a/vendor/github.com/tendermint/tendermint/version/version.go b/vendor/github.com/tendermint/tendermint/version/version.go
index df553115a1f60eb9546aef1d40da5152773f0851..165f258292150c53a088b692bf86761b2d56f0cf 100644
--- a/vendor/github.com/tendermint/tendermint/version/version.go
+++ b/vendor/github.com/tendermint/tendermint/version/version.go
@@ -3,14 +3,14 @@ package version
 // Version components
 const (
 	Maj = "0"
-	Min = "21"
-	Fix = "0"
+	Min = "22"
+	Fix = "4"
 )
 
 var (
 	// Version is the current version of Tendermint
 	// Must be a string because scripts like dist.sh read this file.
-	Version = "0.21.0"
+	Version = "0.22.4"
 
 	// GitCommit is the current HEAD set using ldflags.
 	GitCommit string
diff --git a/vendor/github.com/tendermint/tmlibs/common/array.go b/vendor/github.com/tendermint/tmlibs/common/array.go
deleted file mode 100644
index adedc42bee59b606617f0076903cda97c2f05d64..0000000000000000000000000000000000000000
--- a/vendor/github.com/tendermint/tmlibs/common/array.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package common
-
-func Arr(items ...interface{}) []interface{} {
-	return items
-}
diff --git a/vendor/github.com/tendermint/tmlibs/merkle/simple_tree.go b/vendor/github.com/tendermint/tmlibs/merkle/simple_tree.go
deleted file mode 100644
index 9bdf52cb2708b15be5465eb717f79b02d1cdc188..0000000000000000000000000000000000000000
--- a/vendor/github.com/tendermint/tmlibs/merkle/simple_tree.go
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
-Computes a deterministic minimal height merkle tree hash.
-If the number of items is not a power of two, some leaves
-will be at different levels. Tries to keep both sides of
-the tree the same size, but the left may be one greater.
-
-Use this for short deterministic trees, such as the validator list.
-For larger datasets, use IAVLTree.
-
-                        *
-                       / \
-                     /     \
-                   /         \
-                 /             \
-                *               *
-               / \             / \
-              /   \           /   \
-             /     \         /     \
-            *       *       *       h6
-           / \     / \     / \
-          h0  h1  h2  h3  h4  h5
-
-*/
-
-package merkle
-
-import (
-	"golang.org/x/crypto/ripemd160"
-)
-
-func SimpleHashFromTwoHashes(left []byte, right []byte) []byte {
-	var hasher = ripemd160.New()
-	err := encodeByteSlice(hasher, left)
-	if err != nil {
-		panic(err)
-	}
-	err = encodeByteSlice(hasher, right)
-	if err != nil {
-		panic(err)
-	}
-	return hasher.Sum(nil)
-}
-
-func SimpleHashFromHashes(hashes [][]byte) []byte {
-	// Recursive impl.
-	switch len(hashes) {
-	case 0:
-		return nil
-	case 1:
-		return hashes[0]
-	default:
-		left := SimpleHashFromHashes(hashes[:(len(hashes)+1)/2])
-		right := SimpleHashFromHashes(hashes[(len(hashes)+1)/2:])
-		return SimpleHashFromTwoHashes(left, right)
-	}
-}
-
-// NOTE: Do not implement this, use SimpleHashFromByteslices instead.
-// type Byteser interface { Bytes() []byte }
-// func SimpleHashFromBytesers(items []Byteser) []byte { ... }
-
-func SimpleHashFromByteslices(bzs [][]byte) []byte {
-	hashes := make([][]byte, len(bzs))
-	for i, bz := range bzs {
-		hashes[i] = SimpleHashFromBytes(bz)
-	}
-	return SimpleHashFromHashes(hashes)
-}
-
-func SimpleHashFromBytes(bz []byte) []byte {
-	hasher := ripemd160.New()
-	hasher.Write(bz)
-	return hasher.Sum(nil)
-}
-
-func SimpleHashFromHashers(items []Hasher) []byte {
-	hashes := make([][]byte, len(items))
-	for i, item := range items {
-		hash := item.Hash()
-		hashes[i] = hash
-	}
-	return SimpleHashFromHashes(hashes)
-}
-
-func SimpleHashFromMap(m map[string]Hasher) []byte {
-	sm := NewSimpleMap()
-	for k, v := range m {
-		sm.Set(k, v)
-	}
-	return sm.Hash()
-}
diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go
index bdf5652b01ce679fb0bdf6b6cf9e4dd82371ba2d..f4d9b5ece3ef0fbf7ca709bfccdcdcb6165cf5ea 100644
--- a/vendor/golang.org/x/net/http2/client_conn_pool.go
+++ b/vendor/golang.org/x/net/http2/client_conn_pool.go
@@ -52,9 +52,31 @@ const (
 	noDialOnMiss = false
 )
 
+// shouldTraceGetConn reports whether getClientConn should call any
+// ClientTrace.GetConn hook associated with the http.Request.
+//
+// This complexity is needed to avoid double calls of the GetConn hook
+// during the back-and-forth between net/http and x/net/http2 (when the
+// net/http.Transport is upgraded to also speak http2), as well as support
+// the case where x/net/http2 is being used directly.
+func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool {
+	// If our Transport wasn't made via ConfigureTransport, always
+	// trace the GetConn hook if provided, because that means the
+	// http2 package is being used directly and it's the one
+	// dialing, as opposed to net/http.
+	if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok {
+		return true
+	}
+	// Otherwise, only use the GetConn hook if this connection has
+	// been used previously for other requests. For fresh
+	// connections, the net/http package does the dialing.
+	return !st.freshConn
+}
+
 func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
 	if isConnectionCloseRequest(req) && dialOnMiss {
 		// It gets its own connection.
+		traceGetConn(req, addr)
 		const singleUse = true
 		cc, err := p.t.dialClientConn(addr, singleUse)
 		if err != nil {
@@ -64,7 +86,10 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
 	}
 	p.mu.Lock()
 	for _, cc := range p.conns[addr] {
-		if cc.CanTakeNewRequest() {
+		if st := cc.idleState(); st.canTakeNewRequest {
+			if p.shouldTraceGetConn(st) {
+				traceGetConn(req, addr)
+			}
 			p.mu.Unlock()
 			return cc, nil
 		}
@@ -73,6 +98,7 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
 		p.mu.Unlock()
 		return nil, ErrNoCachedConn
 	}
+	traceGetConn(req, addr)
 	call := p.getStartDialLocked(addr)
 	p.mu.Unlock()
 	<-call.done
diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go
new file mode 100644
index 0000000000000000000000000000000000000000..e38ea29033c481f7eedaf943d475d23783cdb641
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go111.go
@@ -0,0 +1,17 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.11
+
+package http2
+
+func traceHasWroteHeaderField(trace *clientTrace) bool {
+	return trace != nil && trace.WroteHeaderField != nil
+}
+
+func traceWroteHeaderField(trace *clientTrace, k, v string) {
+	if trace != nil && trace.WroteHeaderField != nil {
+		trace.WroteHeaderField(k, []string{v})
+	}
+}
diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go
index 47b7fae081a3593849e5f4c04eaf843302a272ce..d957b7bc52dd18fca1dd7995024b2e2034636e5f 100644
--- a/vendor/golang.org/x/net/http2/go17.go
+++ b/vendor/golang.org/x/net/http2/go17.go
@@ -18,6 +18,8 @@ type contextContext interface {
 	context.Context
 }
 
+var errCanceled = context.Canceled
+
 func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
 	ctx, cancel = context.WithCancel(context.Background())
 	ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
@@ -48,6 +50,14 @@ func (t *Transport) idleConnTimeout() time.Duration {
 
 func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
 
+func traceGetConn(req *http.Request, hostPort string) {
+	trace := httptrace.ContextClientTrace(req.Context())
+	if trace == nil || trace.GetConn == nil {
+		return
+	}
+	trace.GetConn(hostPort)
+}
+
 func traceGotConn(req *http.Request, cc *ClientConn) {
 	trace := httptrace.ContextClientTrace(req.Context())
 	if trace == nil || trace.GotConn == nil {
@@ -104,3 +114,8 @@ func requestTrace(req *http.Request) *clientTrace {
 func (cc *ClientConn) Ping(ctx context.Context) error {
 	return cc.ping(ctx)
 }
+
+// Shutdown gracefully closes the client connection, waiting for running streams to complete.
+func (cc *ClientConn) Shutdown(ctx context.Context) error {
+	return cc.shutdown(ctx)
+}
diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go
new file mode 100644
index 0000000000000000000000000000000000000000..d036b013fca9a693d4c7cceae271b5c95a159041
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/not_go111.go
@@ -0,0 +1,11 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.11
+
+package http2
+
+func traceHasWroteHeaderField(trace *clientTrace) bool { return false }
+
+func traceWroteHeaderField(trace *clientTrace, k, v string) {}
diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go
index 140434a791a3a105469a240340e8d20f3a2b36aa..7ffb25046a05a4f66187480cca18f1c0c544b04c 100644
--- a/vendor/golang.org/x/net/http2/not_go17.go
+++ b/vendor/golang.org/x/net/http2/not_go17.go
@@ -8,6 +8,7 @@ package http2
 
 import (
 	"crypto/tls"
+	"errors"
 	"net"
 	"net/http"
 	"time"
@@ -18,6 +19,8 @@ type contextContext interface {
 	Err() error
 }
 
+var errCanceled = errors.New("canceled")
+
 type fakeContext struct{}
 
 func (fakeContext) Done() <-chan struct{} { return nil }
@@ -34,6 +37,7 @@ func setResponseUncompressed(res *http.Response) {
 type clientTrace struct{}
 
 func requestTrace(*http.Request) *clientTrace { return nil }
+func traceGetConn(*http.Request, string)      {}
 func traceGotConn(*http.Request, *ClientConn) {}
 func traceFirstResponseByte(*clientTrace)     {}
 func traceWroteHeaders(*clientTrace)          {}
@@ -84,4 +88,8 @@ func (cc *ClientConn) Ping(ctx contextContext) error {
 	return cc.ping(ctx)
 }
 
+func (cc *ClientConn) Shutdown(ctx contextContext) error {
+	return cc.shutdown(ctx)
+}
+
 func (t *Transport) idleConnTimeout() time.Duration { return 0 }
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 793899169820b63f6b64b3c67d75c86866dccd96..e111019d5f76ca763812dd15a16300a1bb4ea1b7 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -1721,6 +1721,13 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
 			// processing this frame.
 			return nil
 		}
+		// RFC 7540, sec 5.1: If an endpoint receives additional frames, other than
+		// WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in
+		// this state, it MUST respond with a stream error (Section 5.4.2) of
+		// type STREAM_CLOSED.
+		if st.state == stateHalfClosedRemote {
+			return streamError(id, ErrCodeStreamClosed)
+		}
 		return st.processTrailerHeaders(f)
 	}
 
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index d23a226251c26cf4bd95e80a5089ac322bbdce39..300b02fe7cede8e32382effa2491041680283db5 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -159,6 +159,7 @@ type ClientConn struct {
 	cond            *sync.Cond // hold mu; broadcast on flow/closed changes
 	flow            flow       // our conn-level flow control quota (cs.flow is per stream)
 	inflow          flow       // peer's conn-level flow control
+	closing         bool
 	closed          bool
 	wantSettingsAck bool                     // we sent a SETTINGS frame and haven't heard back
 	goAway          *GoAwayFrame             // if non-nil, the GoAwayFrame we received
@@ -630,12 +631,32 @@ func (cc *ClientConn) CanTakeNewRequest() bool {
 	return cc.canTakeNewRequestLocked()
 }
 
-func (cc *ClientConn) canTakeNewRequestLocked() bool {
+// clientConnIdleState describes the suitability of a client
+// connection to initiate a new RoundTrip request.
+type clientConnIdleState struct {
+	canTakeNewRequest bool
+	freshConn         bool // whether it's unused by any previous request
+}
+
+func (cc *ClientConn) idleState() clientConnIdleState {
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+	return cc.idleStateLocked()
+}
+
+func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
 	if cc.singleUse && cc.nextStreamID > 1 {
-		return false
+		return
 	}
-	return cc.goAway == nil && !cc.closed &&
+	st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing &&
 		int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32
+	st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest
+	return
+}
+
+func (cc *ClientConn) canTakeNewRequestLocked() bool {
+	st := cc.idleStateLocked()
+	return st.canTakeNewRequest
 }
 
 // onIdleTimeout is called from a time.AfterFunc goroutine. It will
@@ -665,6 +686,88 @@ func (cc *ClientConn) closeIfIdle() {
 	cc.tconn.Close()
 }
 
+var shutdownEnterWaitStateHook = func() {}
+
+// Shutdown gracefully close the client connection, waiting for running streams to complete.
+// Public implementation is in go17.go and not_go17.go
+func (cc *ClientConn) shutdown(ctx contextContext) error {
+	if err := cc.sendGoAway(); err != nil {
+		return err
+	}
+	// Wait for all in-flight streams to complete or connection to close
+	done := make(chan error, 1)
+	cancelled := false // guarded by cc.mu
+	go func() {
+		cc.mu.Lock()
+		defer cc.mu.Unlock()
+		for {
+			if len(cc.streams) == 0 || cc.closed {
+				cc.closed = true
+				done <- cc.tconn.Close()
+				break
+			}
+			if cancelled {
+				break
+			}
+			cc.cond.Wait()
+		}
+	}()
+	shutdownEnterWaitStateHook()
+	select {
+	case err := <-done:
+		return err
+	case <-ctx.Done():
+		cc.mu.Lock()
+		// Free the goroutine above
+		cancelled = true
+		cc.cond.Broadcast()
+		cc.mu.Unlock()
+		return ctx.Err()
+	}
+}
+
+func (cc *ClientConn) sendGoAway() error {
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+	cc.wmu.Lock()
+	defer cc.wmu.Unlock()
+	if cc.closing {
+		// GOAWAY sent already
+		return nil
+	}
+	// Send a graceful shutdown frame to server
+	maxStreamID := cc.nextStreamID
+	if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil {
+		return err
+	}
+	if err := cc.bw.Flush(); err != nil {
+		return err
+	}
+	// Prevent new requests
+	cc.closing = true
+	return nil
+}
+
+// Close closes the client connection immediately.
+//
+// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
+func (cc *ClientConn) Close() error {
+	cc.mu.Lock()
+	defer cc.cond.Broadcast()
+	defer cc.mu.Unlock()
+	err := errors.New("http2: client connection force closed via ClientConn.Close")
+	for id, cs := range cc.streams {
+		select {
+		case cs.resc <- resAndError{err: err}:
+		default:
+		}
+		cs.bufPipe.CloseWithError(err)
+		delete(cc.streams, id)
+	}
+	cc.closed = true
+	return cc.tconn.Close()
+}
+
 const maxAllocFrameSize = 512 << 10
 
 // frameBuffer returns a scratch buffer suitable for writing DATA frames.
@@ -747,7 +850,7 @@ func checkConnHeaders(req *http.Request) error {
 	if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
 		return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
 	}
-	if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") {
+	if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !strings.EqualFold(vv[0], "close") && !strings.EqualFold(vv[0], "keep-alive")) {
 		return fmt.Errorf("http2: invalid Connection request header: %q", vv)
 	}
 	return nil
@@ -1291,9 +1394,16 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
 		return nil, errRequestHeaderListSize
 	}
 
+	trace := requestTrace(req)
+	traceHeaders := traceHasWroteHeaderField(trace)
+
 	// Header list size is ok. Write the headers.
 	enumerateHeaders(func(name, value string) {
-		cc.writeHeader(strings.ToLower(name), value)
+		name = strings.ToLower(name)
+		cc.writeHeader(name, value)
+		if traceHeaders {
+			traceWroteHeaderField(trace, name, value)
+		}
 	})
 
 	return cc.hbuf.Bytes(), nil
diff --git a/vendor/golang.org/x/net/netutil/listen.go b/vendor/golang.org/x/net/netutil/listen.go
new file mode 100644
index 0000000000000000000000000000000000000000..cee46e331ff3ebd8d443920e3b89c01c19e78452
--- /dev/null
+++ b/vendor/golang.org/x/net/netutil/listen.go
@@ -0,0 +1,74 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package netutil provides network utility functions, complementing the more
+// common ones in the net package.
+package netutil // import "golang.org/x/net/netutil"
+
+import (
+	"net"
+	"sync"
+)
+
+// LimitListener returns a Listener that accepts at most n simultaneous
+// connections from the provided Listener.
+func LimitListener(l net.Listener, n int) net.Listener {
+	return &limitListener{
+		Listener: l,
+		sem:      make(chan struct{}, n),
+		done:     make(chan struct{}),
+	}
+}
+
+type limitListener struct {
+	net.Listener
+	sem       chan struct{}
+	closeOnce sync.Once     // ensures the done chan is only closed once
+	done      chan struct{} // no values sent; closed when Close is called
+}
+
+// acquire acquires the limiting semaphore. Returns true if successfully
+// accquired, false if the listener is closed and the semaphore is not
+// acquired.
+func (l *limitListener) acquire() bool {
+	select {
+	case <-l.done:
+		return false
+	case l.sem <- struct{}{}:
+		return true
+	}
+}
+func (l *limitListener) release() { <-l.sem }
+
+func (l *limitListener) Accept() (net.Conn, error) {
+	acquired := l.acquire()
+	// If the semaphore isn't acquired because the listener was closed, expect
+	// that this call to accept won't block, but immediately return an error.
+	c, err := l.Listener.Accept()
+	if err != nil {
+		if acquired {
+			l.release()
+		}
+		return nil, err
+	}
+	return &limitListenerConn{Conn: c, release: l.release}, nil
+}
+
+func (l *limitListener) Close() error {
+	err := l.Listener.Close()
+	l.closeOnce.Do(func() { close(l.done) })
+	return err
+}
+
+type limitListenerConn struct {
+	net.Conn
+	releaseOnce sync.Once
+	release     func()
+}
+
+func (l *limitListenerConn) Close() error {
+	err := l.Conn.Close()
+	l.releaseOnce.Do(l.release)
+	return err
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go
index 53fb8518237d9d985d730f55c7292f57d9c1f03c..33c8b5f0db728bc684b99f5bdb765b58a5f2b087 100644
--- a/vendor/golang.org/x/sys/unix/syscall_bsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go
@@ -206,7 +206,7 @@ func (sa *SockaddrDatalink) sockaddr() (unsafe.Pointer, _Socklen, error) {
 	return unsafe.Pointer(&sa.raw), SizeofSockaddrDatalink, nil
 }
 
-func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
+func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
 	switch rsa.Addr.Family {
 	case AF_LINK:
 		pp := (*RawSockaddrDatalink)(unsafe.Pointer(rsa))
@@ -286,7 +286,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) {
 		Close(nfd)
 		return 0, nil, ECONNABORTED
 	}
-	sa, err = anyToSockaddr(&rsa)
+	sa, err = anyToSockaddr(fd, &rsa)
 	if err != nil {
 		Close(nfd)
 		nfd = 0
@@ -306,7 +306,7 @@ func Getsockname(fd int) (sa Sockaddr, err error) {
 		rsa.Addr.Family = AF_UNIX
 		rsa.Addr.Len = SizeofSockaddrUnix
 	}
-	return anyToSockaddr(&rsa)
+	return anyToSockaddr(fd, &rsa)
 }
 
 //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
@@ -356,7 +356,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from
 	recvflags = int(msg.Flags)
 	// source address is only specified if the socket is unconnected
 	if rsa.Addr.Family != AF_UNSPEC {
-		from, err = anyToSockaddr(&rsa)
+		from, err = anyToSockaddr(fd, &rsa)
 	}
 	return
 }
diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
index b5072de28535390886951b0f0ad7451a3d278d76..e34abe29df8865867b59dd320f390171b57a7eca 100644
--- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
+++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
@@ -87,7 +87,7 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) {
 	if len > SizeofSockaddrAny {
 		panic("RawSockaddrAny too small")
 	}
-	sa, err = anyToSockaddr(&rsa)
+	sa, err = anyToSockaddr(fd, &rsa)
 	if err != nil {
 		Close(nfd)
 		nfd = 0
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go
index ba9df4ac1264958bafc6286e33523f6208c39a40..5561a3eb764727cd9a667213df27211fb9457c87 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go
@@ -89,7 +89,7 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) {
 	if len > SizeofSockaddrAny {
 		panic("RawSockaddrAny too small")
 	}
-	sa, err = anyToSockaddr(&rsa)
+	sa, err = anyToSockaddr(fd, &rsa)
 	if err != nil {
 		Close(nfd)
 		nfd = 0
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 9908030cbe3209ac1a005c559c16b4ada932e55d..690c2c87f0232f394766754dd837dec28df6dc37 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -489,6 +489,47 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) {
 	return unsafe.Pointer(&sa.raw), SizeofSockaddrL2, nil
 }
 
+// SockaddrRFCOMM implements the Sockaddr interface for AF_BLUETOOTH type sockets
+// using the RFCOMM protocol.
+//
+// Server example:
+//
+//      fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
+//      _ = unix.Bind(fd, &unix.SockaddrRFCOMM{
+//      	Channel: 1,
+//      	Addr:    [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00
+//      })
+//      _ = Listen(fd, 1)
+//      nfd, sa, _ := Accept(fd)
+//      fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd)
+//      Read(nfd, buf)
+//
+// Client example:
+//
+//      fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
+//      _ = Connect(fd, &SockaddrRFCOMM{
+//      	Channel: 1,
+//      	Addr:    [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11
+//      })
+//      Write(fd, []byte(`hello`))
+type SockaddrRFCOMM struct {
+	// Addr represents a bluetooth address, byte ordering is little-endian.
+	Addr [6]uint8
+
+	// Channel is a designated bluetooth channel, only 1-30 are available for use.
+	// Since Linux 2.6.7 and further zero value is the first available channel.
+	Channel uint8
+
+	raw RawSockaddrRFCOMM
+}
+
+func (sa *SockaddrRFCOMM) sockaddr() (unsafe.Pointer, _Socklen, error) {
+	sa.raw.Family = AF_BLUETOOTH
+	sa.raw.Channel = sa.Channel
+	sa.raw.Bdaddr = sa.Addr
+	return unsafe.Pointer(&sa.raw), SizeofSockaddrRFCOMM, nil
+}
+
 // SockaddrCAN implements the Sockaddr interface for AF_CAN type sockets.
 // The RxID and TxID fields are used for transport protocol addressing in
 // (CAN_TP16, CAN_TP20, CAN_MCNET, and CAN_ISOTP), they can be left with
@@ -651,7 +692,7 @@ func (sa *SockaddrVM) sockaddr() (unsafe.Pointer, _Socklen, error) {
 	return unsafe.Pointer(&sa.raw), SizeofSockaddrVM, nil
 }
 
-func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
+func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
 	switch rsa.Addr.Family {
 	case AF_NETLINK:
 		pp := (*RawSockaddrNetlink)(unsafe.Pointer(rsa))
@@ -728,6 +769,30 @@ func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
 			Port: pp.Port,
 		}
 		return sa, nil
+	case AF_BLUETOOTH:
+		proto, err := GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL)
+		if err != nil {
+			return nil, err
+		}
+		// only BTPROTO_L2CAP and BTPROTO_RFCOMM can accept connections
+		switch proto {
+		case BTPROTO_L2CAP:
+			pp := (*RawSockaddrL2)(unsafe.Pointer(rsa))
+			sa := &SockaddrL2{
+				PSM:      pp.Psm,
+				CID:      pp.Cid,
+				Addr:     pp.Bdaddr,
+				AddrType: pp.Bdaddr_type,
+			}
+			return sa, nil
+		case BTPROTO_RFCOMM:
+			pp := (*RawSockaddrRFCOMM)(unsafe.Pointer(rsa))
+			sa := &SockaddrRFCOMM{
+				Channel: pp.Channel,
+				Addr:    pp.Bdaddr,
+			}
+			return sa, nil
+		}
 	}
 	return nil, EAFNOSUPPORT
 }
@@ -739,7 +804,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) {
 	if err != nil {
 		return
 	}
-	sa, err = anyToSockaddr(&rsa)
+	sa, err = anyToSockaddr(fd, &rsa)
 	if err != nil {
 		Close(nfd)
 		nfd = 0
@@ -757,7 +822,7 @@ func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) {
 	if len > SizeofSockaddrAny {
 		panic("RawSockaddrAny too small")
 	}
-	sa, err = anyToSockaddr(&rsa)
+	sa, err = anyToSockaddr(fd, &rsa)
 	if err != nil {
 		Close(nfd)
 		nfd = 0
@@ -771,7 +836,7 @@ func Getsockname(fd int) (sa Sockaddr, err error) {
 	if err = getsockname(fd, &rsa, &len); err != nil {
 		return
 	}
-	return anyToSockaddr(&rsa)
+	return anyToSockaddr(fd, &rsa)
 }
 
 func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) {
@@ -960,7 +1025,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from
 	recvflags = int(msg.Flags)
 	// source address is only specified if the socket is unconnected
 	if rsa.Addr.Family != AF_UNSPEC {
-		from, err = anyToSockaddr(&rsa)
+		from, err = anyToSockaddr(fd, &rsa)
 	}
 	return
 }
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
index 820ef77af279cf50ce083b115aa280b2de1ac7d9..a05337d540964fe4b736f4b7c6625c1c19cbe402 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -112,7 +112,7 @@ func Getsockname(fd int) (sa Sockaddr, err error) {
 	if err = getsockname(fd, &rsa, &len); err != nil {
 		return
 	}
-	return anyToSockaddr(&rsa)
+	return anyToSockaddr(fd, &rsa)
 }
 
 // GetsockoptString returns the string value of the socket option opt for the
@@ -360,7 +360,7 @@ func Futimes(fd int, tv []Timeval) error {
 	return futimesat(fd, nil, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
 }
 
-func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
+func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
 	switch rsa.Addr.Family {
 	case AF_UNIX:
 		pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa))
@@ -411,7 +411,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) {
 	if nfd == -1 {
 		return
 	}
-	sa, err = anyToSockaddr(&rsa)
+	sa, err = anyToSockaddr(fd, &rsa)
 	if err != nil {
 		Close(nfd)
 		nfd = 0
@@ -448,7 +448,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from
 	oobn = int(msg.Accrightslen)
 	// source address is only specified if the socket is unconnected
 	if rsa.Addr.Family != AF_UNSPEC {
-		from, err = anyToSockaddr(&rsa)
+		from, err = anyToSockaddr(fd, &rsa)
 	}
 	return
 }
diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go
index b835bad0fe47357da17e1cbe4a8e2563b15e84f7..95b2180aebe3ed7cd62937171375cc31f8921048 100644
--- a/vendor/golang.org/x/sys/unix/syscall_unix.go
+++ b/vendor/golang.org/x/sys/unix/syscall_unix.go
@@ -219,7 +219,7 @@ func Getpeername(fd int) (sa Sockaddr, err error) {
 	if err = getpeername(fd, &rsa, &len); err != nil {
 		return
 	}
-	return anyToSockaddr(&rsa)
+	return anyToSockaddr(fd, &rsa)
 }
 
 func GetsockoptByte(fd, level, opt int) (value byte, err error) {
@@ -291,7 +291,7 @@ func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) {
 		return
 	}
 	if rsa.Addr.Family != AF_UNSPEC {
-		from, err = anyToSockaddr(&rsa)
+		from, err = anyToSockaddr(fd, &rsa)
 	}
 	return
 }
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
index 0ae2aa842254a224da1bdbeedfe3e2dec75d7156..7cc1bfd129f2a7273380b52fc53b98c8546d1dda 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
@@ -1474,8 +1474,13 @@ func Munlockall() (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Dup2(oldfd int, newfd int) (err error) {
-	_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
+func faccessat(dirfd int, path string, mode uint32) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
 	if e1 != 0 {
 		err = errnoErr(e1)
 	}
@@ -1484,9 +1489,8 @@ func Dup2(oldfd int, newfd int) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func EpollCreate(size int) (fd int, err error) {
-	r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0)
-	fd = int(r0)
+func Dup2(oldfd int, newfd int) (err error) {
+	_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
 	if e1 != 0 {
 		err = errnoErr(e1)
 	}
@@ -1495,13 +1499,9 @@ func EpollCreate(size int) (fd int, err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func faccessat(dirfd int, path string, mode uint32) (err error) {
-	var _p0 *byte
-	_p0, err = BytePtrFromString(path)
-	if err != nil {
-		return
-	}
-	_, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+func EpollCreate(size int) (fd int, err error) {
+	r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0)
+	fd = int(r0)
 	if e1 != 0 {
 		err = errnoErr(e1)
 	}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
index fa16c165e1179915e75410d1ad0e06ce24d60674..c3dcb381861bea68784057233ec25affa60ec3d9 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
@@ -1474,8 +1474,13 @@ func Munlockall() (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Dup2(oldfd int, newfd int) (err error) {
-	_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
+func faccessat(dirfd int, path string, mode uint32) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
 	if e1 != 0 {
 		err = errnoErr(e1)
 	}
@@ -1484,9 +1489,8 @@ func Dup2(oldfd int, newfd int) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func EpollCreate(size int) (fd int, err error) {
-	r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0)
-	fd = int(r0)
+func Dup2(oldfd int, newfd int) (err error) {
+	_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
 	if e1 != 0 {
 		err = errnoErr(e1)
 	}
@@ -1495,13 +1499,9 @@ func EpollCreate(size int) (fd int, err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func faccessat(dirfd int, path string, mode uint32) (err error) {
-	var _p0 *byte
-	_p0, err = BytePtrFromString(path)
-	if err != nil {
-		return
-	}
-	_, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+func EpollCreate(size int) (fd int, err error) {
+	r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0)
+	fd = int(r0)
 	if e1 != 0 {
 		err = errnoErr(e1)
 	}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index e89bc6b3664cede7872b813e2d4deed84803f36a..4c250033fa76b019ac847e61013558b3f2234d08 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -248,6 +248,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -401,6 +408,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index d95372baecf91d1aebd836108711ea8687a9fcb7..2e4d709b4fd74fe6bd38d1cd1b52e5826618e7f4 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -250,6 +250,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -405,6 +412,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index 77875ba01b67b70afdcf47f491af94fbe17899ce..bf38e5e2cd81d6c35bcd4ca7da8316d599472d65 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -251,6 +251,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -404,6 +411,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index 5a9df694a6f769df7b9d5717bc81bc80c6944b19..972c1b872363187b9f7a13f430f5bbb73b755865 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -251,6 +251,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -406,6 +413,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
index dcb239de813da64813f9c44137f227e80e2f3170..783e70e87ca40cde6ffe6c6cfe99afe3db52d9ce 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
@@ -249,6 +249,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -402,6 +409,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
index 9cf85f7218a3d797a3615ae81b24aae666d5c5c9..5c6ea719dabc6e54ef95cc3ab9930072d4061c06 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -251,6 +251,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -406,6 +413,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
index 6fd66e7510db53f75677683b00e1cfe883a8799e..93effc8eca8e506f480f0cd18b6dec917b072886 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -251,6 +251,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -406,6 +413,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
index faa5b3ef18077f1d3358191b8d84317628eb0109..cc5ca242ebffdc83cfe8c5580c4dc89bdda8bc56 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
@@ -249,6 +249,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -402,6 +409,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index ad4c452460c0c0e281e19ce1b7d67bedeb3fc47e..712f640295bcca5056b24d0af15e9ed961c3e1ba 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -252,6 +252,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -407,6 +414,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index 1fdb2f21626dfe42579ccb8b16c61f2b4257c1ec..1be45320abea8d88c3fb314e02f97e9a40c24065 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -252,6 +252,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -407,6 +414,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
index d32079d1aaa097c5c6e779e55715f74f34ef9c0d..932b655fed6f89479b6e98011312f9d2408fca9a 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
@@ -250,6 +250,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -405,6 +412,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index b4e424788d5180868c15f81151a6a229685ee593..7d2a6794db84634d6821622a77c34261c2c09512 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -94,16 +94,29 @@ const (
 	FILE_APPEND_DATA      = 0x00000004
 	FILE_WRITE_ATTRIBUTES = 0x00000100
 
-	FILE_SHARE_READ              = 0x00000001
-	FILE_SHARE_WRITE             = 0x00000002
-	FILE_SHARE_DELETE            = 0x00000004
-	FILE_ATTRIBUTE_READONLY      = 0x00000001
-	FILE_ATTRIBUTE_HIDDEN        = 0x00000002
-	FILE_ATTRIBUTE_SYSTEM        = 0x00000004
-	FILE_ATTRIBUTE_DIRECTORY     = 0x00000010
-	FILE_ATTRIBUTE_ARCHIVE       = 0x00000020
-	FILE_ATTRIBUTE_NORMAL        = 0x00000080
-	FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400
+	FILE_SHARE_READ   = 0x00000001
+	FILE_SHARE_WRITE  = 0x00000002
+	FILE_SHARE_DELETE = 0x00000004
+
+	FILE_ATTRIBUTE_READONLY              = 0x00000001
+	FILE_ATTRIBUTE_HIDDEN                = 0x00000002
+	FILE_ATTRIBUTE_SYSTEM                = 0x00000004
+	FILE_ATTRIBUTE_DIRECTORY             = 0x00000010
+	FILE_ATTRIBUTE_ARCHIVE               = 0x00000020
+	FILE_ATTRIBUTE_DEVICE                = 0x00000040
+	FILE_ATTRIBUTE_NORMAL                = 0x00000080
+	FILE_ATTRIBUTE_TEMPORARY             = 0x00000100
+	FILE_ATTRIBUTE_SPARSE_FILE           = 0x00000200
+	FILE_ATTRIBUTE_REPARSE_POINT         = 0x00000400
+	FILE_ATTRIBUTE_COMPRESSED            = 0x00000800
+	FILE_ATTRIBUTE_OFFLINE               = 0x00001000
+	FILE_ATTRIBUTE_NOT_CONTENT_INDEXED   = 0x00002000
+	FILE_ATTRIBUTE_ENCRYPTED             = 0x00004000
+	FILE_ATTRIBUTE_INTEGRITY_STREAM      = 0x00008000
+	FILE_ATTRIBUTE_VIRTUAL               = 0x00010000
+	FILE_ATTRIBUTE_NO_SCRUB_DATA         = 0x00020000
+	FILE_ATTRIBUTE_RECALL_ON_OPEN        = 0x00040000
+	FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS = 0x00400000
 
 	INVALID_FILE_ATTRIBUTES = 0xffffffff