From 00fedc6de2a25446b63f532448306436a762dd2a Mon Sep 17 00:00:00 2001
From: Silas Davis <silas@monax.io>
Date: Fri, 20 Jul 2018 12:39:33 +0100
Subject: [PATCH] Upgrade tendermint in vendor

Signed-off-by: Silas Davis <silas@monax.io>
---
 Gopkg.lock                                    | 135 ++--
 Gopkg.toml                                    |  23 +-
 README.md                                     |   2 +-
 .../github.com/btcsuite/btcd/btcec/pubkey.go  |  13 +-
 .../go-kit/kit/metrics/discard/discard.go     |  40 +
 vendor/github.com/go-kit/kit/metrics/doc.go   |  97 +++
 .../kit/metrics/internal/lv/labelvalues.go    |  14 +
 .../go-kit/kit/metrics/internal/lv/space.go   | 145 ++++
 .../github.com/go-kit/kit/metrics/metrics.go  |  25 +
 .../kit/metrics/prometheus/prometheus.go      | 165 ++++
 vendor/github.com/go-kit/kit/metrics/timer.go |  36 +
 .../client_golang/prometheus/collector.go     |  39 +-
 .../client_golang/prometheus/doc.go           |  12 +-
 .../prometheus/promhttp/delegator.go          | 199 +++++
 .../prometheus/promhttp/delegator_1_8.go      | 181 +++++
 .../prometheus/promhttp/delegator_pre_1_8.go  |  44 +
 .../client_golang/prometheus/promhttp/http.go | 311 +++++++
 .../prometheus/promhttp/instrument_client.go  |  97 +++
 .../promhttp/instrument_client_1_8.go         | 144 ++++
 .../prometheus/promhttp/instrument_server.go  | 447 +++++++++++
 .../client_golang/prometheus/registry.go      | 120 ++-
 .../prometheus/client_model/go/metrics.pb.go  | 381 +++++++--
 .../prometheus/procfs/mountstats.go           |  51 +-
 .../syndtr/goleveldb/leveldb/db_compaction.go |  40 +-
 .../github.com/tendermint/go-amino/amino.go   | 195 +++--
 .../tendermint/go-amino/binary-decode.go      | 643 ++++++++-------
 .../tendermint/go-amino/binary-encode.go      | 272 ++++---
 .../github.com/tendermint/go-amino/codec.go   | 207 ++++-
 .../github.com/tendermint/go-amino/decoder.go |  51 +-
 .../github.com/tendermint/go-amino/encoder.go |   9 +-
 .../tendermint/go-amino/json-decode.go        | 167 ++--
 .../tendermint/go-amino/json-encode.go        |  91 +--
 .../github.com/tendermint/go-amino/reflect.go |  92 ++-
 .../github.com/tendermint/go-amino/version.go |   2 +-
 vendor/github.com/tendermint/go-crypto/doc.go |  48 --
 .../tendermint/go-crypto/version.go           |   3 -
 vendor/github.com/tendermint/iavl/amino.go    |   5 -
 vendor/github.com/tendermint/iavl/chunk.go    | 185 -----
 vendor/github.com/tendermint/iavl/doc.go      |  10 +-
 vendor/github.com/tendermint/iavl/node.go     | 314 ++++----
 vendor/github.com/tendermint/iavl/nodedb.go   |  60 +-
 .../tendermint/iavl/orphaning_tree.go         |   1 +
 vendor/github.com/tendermint/iavl/path.go     | 156 ----
 vendor/github.com/tendermint/iavl/proof.go    | 263 +++---
 .../github.com/tendermint/iavl/proof_key.go   | 161 ----
 .../github.com/tendermint/iavl/proof_path.go  | 167 ++++
 .../github.com/tendermint/iavl/proof_range.go | 757 ++++++++++--------
 .../github.com/tendermint/iavl/serialize.go   | 200 -----
 .../iavl/sha256truncated/sha256truncated.go   |  44 -
 vendor/github.com/tendermint/iavl/tree.go     |  54 +-
 .../tendermint/iavl/tree_dotgraph.go          |   4 +-
 vendor/github.com/tendermint/iavl/util.go     |  84 +-
 vendor/github.com/tendermint/iavl/version.go  |   3 +-
 .../tendermint/iavl/versioned_tree.go         |  52 +-
 vendor/github.com/tendermint/iavl/wire.go     |  17 +
 .../{ => tendermint}/abci/client/client.go    |   4 +-
 .../abci/client/grpc_client.go                |   4 +-
 .../abci/client/local_client.go               |   4 +-
 .../abci/client/socket_client.go              |  11 +-
 .../abci/example/code/code.go                 |   0
 .../abci/example/kvstore/README.md            |   0
 .../abci/example/kvstore/helpers.go           |   4 +-
 .../abci/example/kvstore/kvstore.go           |   8 +-
 .../example/kvstore/persistent_kvstore.go     |  10 +-
 .../abci/types/application.go                 |   0
 .../{ => tendermint}/abci/types/messages.go   |   0
 .../{ => tendermint}/abci/types/pubkey.go     |   0
 .../{ => tendermint}/abci/types/result.go     |   0
 .../{ => tendermint}/abci/types/types.pb.go   |   2 +-
 .../{ => tendermint}/abci/types/types.proto   |   0
 .../{ => tendermint}/abci/types/util.go       |   2 +-
 .../tendermint/tendermint/blockchain/pool.go  |   6 +-
 .../tendermint/blockchain/reactor.go          |  19 +-
 .../tendermint/tendermint/blockchain/store.go |   4 +-
 .../tendermint/tendermint/blockchain/wire.go  |   2 +-
 .../tendermint/tendermint/config/config.go    | 123 ++-
 .../tendermint/tendermint/config/toml.go      |  60 +-
 .../tendermint/tendermint/consensus/README.md |   1 +
 .../tendermint/consensus/metrics.go           | 133 +++
 .../tendermint/consensus/reactor.go           |  15 +-
 .../tendermint/tendermint/consensus/replay.go |  12 +-
 .../tendermint/consensus/replay_file.go       |   6 +-
 .../tendermint/tendermint/consensus/state.go  | 100 ++-
 .../tendermint/tendermint/consensus/ticker.go |   4 +-
 .../consensus/types/height_vote_set.go        |   2 +-
 .../consensus/types/peer_round_state.go       |   2 +-
 .../tendermint/consensus/types/round_state.go |   2 +-
 .../tendermint/consensus/types/wire.go        |   2 +-
 .../tendermint/consensus/version.go           |   2 +-
 .../tendermint/tendermint/consensus/wal.go    |   4 +-
 .../tendermint/consensus/wal_generator.go     |  18 +-
 .../tendermint/tendermint/consensus/wire.go   |   2 +-
 .../tendermint/tendermint/crypto/CHANGELOG.md | 154 ++++
 .../tendermint/tendermint/crypto/README.md    |  25 +
 .../{go-crypto => tendermint/crypto}/amino.go |   5 +-
 .../{go-crypto => tendermint/crypto}/armor.go |   8 +-
 .../tendermint/tendermint/crypto/doc.go       |  45 ++
 .../{go-crypto => tendermint/crypto}/hash.go  |   0
 .../crypto}/merkle/README.md                  |   0
 .../tendermint/crypto/merkle/doc.go           |  31 +
 .../crypto}/merkle/simple_map.go              |  42 +-
 .../crypto}/merkle/simple_proof.go            |  28 +-
 .../tendermint/crypto/merkle/simple_tree.go   |  58 ++
 .../crypto}/merkle/types.go                   |  21 +-
 .../crypto}/priv_key.go                       |  44 +-
 .../crypto}/pub_key.go                        |  31 +-
 .../crypto}/random.go                         |  11 +-
 .../crypto}/signature.go                      |  19 +-
 .../crypto}/symmetric.go                      |   2 +-
 .../tendermint/crypto/tmhash/hash.go          |  48 ++
 .../tendermint/tendermint/crypto/version.go   |   3 +
 .../tendermint/tendermint/evidence/pool.go    |   6 +-
 .../tendermint/tendermint/evidence/reactor.go |  14 +-
 .../tendermint/tendermint/evidence/store.go   |   2 +-
 .../tendermint/tendermint/evidence/wire.go    |   2 +-
 .../libs}/autofile/README.md                  |   0
 .../libs}/autofile/autofile.go                |   2 +-
 .../libs}/autofile/group.go                   |   2 +-
 .../libs}/autofile/sighup_watcher.go          |   0
 .../libs}/clist/clist.go                      |   0
 .../libs}/common/LICENSE                      |   0
 .../libs}/common/async.go                     |  18 +-
 .../libs}/common/bit_array.go                 |   0
 .../libs}/common/bytes.go                     |   0
 .../libs}/common/byteslice.go                 |   0
 .../libs}/common/cmap.go                      |   0
 .../libs}/common/colors.go                    |  12 +
 .../libs}/common/date.go                      |   0
 .../libs}/common/errors.go                    | 173 ++--
 .../libs}/common/heap.go                      |   0
 .../{tmlibs => tendermint/libs}/common/int.go |   0
 .../{tmlibs => tendermint/libs}/common/io.go  |   0
 .../libs}/common/kvpair.go                    |   0
 .../libs}/common/math.go                      |   0
 .../{tmlibs => tendermint/libs}/common/net.go |   0
 .../{tmlibs => tendermint/libs}/common/nil.go |   0
 .../{tmlibs => tendermint/libs}/common/os.go  |   0
 .../libs}/common/random.go                    |   0
 .../libs}/common/repeat_timer.go              |   0
 .../libs}/common/service.go                   |   2 +-
 .../libs}/common/string.go                    |   0
 .../libs}/common/throttle_timer.go            |   0
 .../libs}/common/types.pb.go                  |  31 +-
 .../libs}/common/types.proto                  |   7 -
 .../libs}/common/word.go                      |   0
 .../{tmlibs => tendermint/libs}/db/LICENSE.md |   0
 .../{tmlibs => tendermint/libs}/db/README.md  |   0
 .../libs}/db/c_level_db.go                    |  45 +-
 .../{tmlibs => tendermint/libs}/db/db.go      |   0
 .../libs}/db/debug_db.go                      |  62 +-
 .../{tmlibs => tendermint/libs}/db/fsdb.go    |  20 +-
 .../libs}/db/go_level_db.go                   |  46 +-
 .../libs}/db/mem_batch.go                     |   0
 .../{tmlibs => tendermint/libs}/db/mem_db.go  |   2 +-
 .../libs}/db/prefix_db.go                     |   0
 .../{tmlibs => tendermint/libs}/db/types.go   |   0
 .../{tmlibs => tendermint/libs}/db/util.go    |   0
 .../tendermint/libs/events/Makefile           |   9 +
 .../tendermint/libs/events/README.md          | 175 ++++
 .../tendermint/libs/events/events.go          |   2 +-
 .../libs}/flowrate/README.md                  |   0
 .../libs}/flowrate/flowrate.go                |   0
 .../libs}/flowrate/io.go                      |   0
 .../libs}/flowrate/util.go                    |   0
 .../{tmlibs => tendermint/libs}/log/filter.go |   0
 .../{tmlibs => tendermint/libs}/log/logger.go |   0
 .../libs}/log/nop_logger.go                   |   0
 .../libs}/log/testing_logger.go               |   0
 .../libs}/log/tm_json_logger.go               |   0
 .../libs}/log/tm_logger.go                    |   0
 .../libs}/log/tmfmt_logger.go                 |   0
 .../libs}/log/tracing_logger.go               |   0
 .../tendermint/libs/pubsub/pubsub.go          |  10 +-
 .../tendermint/libs/pubsub/query/Makefile     |  11 +
 .../tendermint/libs/pubsub/query/query.peg    |  33 +
 .../tendermint/libs/pubsub/query/query.peg.go |   2 +
 .../tendermint/tendermint/mempool/mempool.go  |  42 +-
 .../tendermint/tendermint/mempool/metrics.go  |  34 +
 .../tendermint/tendermint/mempool/reactor.go  |  16 +-
 .../tendermint/tendermint/node/id.go          |   2 +-
 .../tendermint/tendermint/node/node.go        | 130 ++-
 .../tendermint/tendermint/node/wire.go        |   2 +-
 .../tendermint/tendermint/p2p/README.md       |  11 +
 .../tendermint/tendermint/p2p/base_reactor.go |   2 +-
 .../tendermint/p2p/conn/connection.go         |  36 +-
 .../tendermint/p2p/conn/secret_connection.go  |  10 +-
 .../tendermint/tendermint/p2p/conn/wire.go    |   2 +-
 .../tendermint/tendermint/p2p/fuzz.go         |   2 +-
 .../tendermint/tendermint/p2p/key.go          |   4 +-
 .../tendermint/tendermint/p2p/listener.go     | 104 ++-
 .../tendermint/tendermint/p2p/metrics.go      |  33 +
 .../tendermint/tendermint/p2p/netaddress.go   |   2 +-
 .../tendermint/tendermint/p2p/node_info.go    |   2 +-
 .../tendermint/tendermint/p2p/peer.go         |   6 +-
 .../tendermint/tendermint/p2p/peer_set.go     |  11 +-
 .../tendermint/tendermint/p2p/pex/addrbook.go |   4 +-
 .../tendermint/tendermint/p2p/pex/file.go     |   2 +-
 .../tendermint/p2p/pex/pex_reactor.go         |  24 +-
 .../tendermint/tendermint/p2p/switch.go       |  22 +-
 .../tendermint/tendermint/p2p/test_util.go    |   6 +-
 .../tendermint/tendermint/p2p/upnp/probe.go   |   4 +-
 .../tendermint/tendermint/p2p/wire.go         |   2 +-
 .../tendermint/privval/priv_validator.go      |  24 +-
 .../tendermint/tendermint/privval/socket.go   |   6 +-
 .../tendermint/tendermint/privval/wire.go     |   2 +-
 .../tendermint/tendermint/proxy/app_conn.go   |   4 +-
 .../tendermint/tendermint/proxy/client.go     |   6 +-
 .../tendermint/proxy/multi_app_conn.go        |   2 +-
 .../tendermint/tendermint/rpc/core/README.md  |  20 +
 .../tendermint/tendermint/rpc/core/abci.go    |   4 +-
 .../tendermint/tendermint/rpc/core/blocks.go  |   2 +-
 .../tendermint/rpc/core/consensus.go          |   8 +-
 .../tendermint/rpc/core/doc_template.txt      |   8 +
 .../tendermint/tendermint/rpc/core/mempool.go |   4 +-
 .../tendermint/tendermint/rpc/core/pipe.go    |   6 +-
 .../tendermint/tendermint/rpc/core/status.go  |   8 +-
 .../tendermint/tendermint/rpc/core/tx.go      |   2 +-
 .../tendermint/rpc/core/types/responses.go    |   8 +-
 .../tendermint/rpc/core/types/wire.go         |   2 +-
 .../tendermint/tendermint/rpc/grpc/api.go     |   2 +-
 .../tendermint/rpc/grpc/client_server.go      |  20 +-
 .../tendermint/tendermint/rpc/grpc/compile.sh |   3 +
 .../tendermint/rpc/grpc/types.pb.go           |   2 +-
 .../tendermint/rpc/grpc/types.proto           |  36 +
 .../tendermint/tendermint/rpc/lib/doc.go      |   2 +-
 .../tendermint/rpc/lib/server/handlers.go     |  76 +-
 .../tendermint/rpc/lib/server/http_params.go  |   1 +
 .../tendermint/rpc/lib/server/http_server.go  |  84 +-
 .../tendermint/tendermint/state/errors.go     |   2 +-
 .../tendermint/tendermint/state/execution.go  |  24 +-
 .../tendermint/tendermint/state/services.go   |   2 +-
 .../tendermint/tendermint/state/store.go      |  29 +-
 .../state/txindex/indexer_service.go          |   2 +-
 .../tendermint/state/txindex/kv/kv.go         |   4 +-
 .../tendermint/tendermint/state/validation.go |   2 +-
 .../tendermint/tendermint/state/wire.go       |   2 +-
 .../{abci => tendermint/tools/build}/LICENSE  |  23 +-
 .../tools/build/basecoind/DEBIAN/copyright    |  21 +
 .../tools/build/ethermint/DEBIAN/copyright    |  21 +
 .../tools/build/gaia/DEBIAN/copyright         |  21 +
 .../tools/build/tendermint/DEBIAN/copyright   |  21 +
 .../tools/mintnet-kubernetes}/LICENSE         |   1 -
 .../tools/tm-bench}/LICENSE                   |  23 +-
 .../tendermint/tools/tm-monitor/LICENSE       | 204 +++++
 .../tendermint/tendermint/types/block.go      |  27 +-
 .../tendermint/types/canonical_json.go        |   5 +-
 .../tendermint/types/event_buffer.go          |  50 --
 .../tendermint/tendermint/types/event_bus.go  |   6 +-
 .../tendermint/tendermint/types/events.go     |  10 -
 .../tendermint/tendermint/types/evidence.go   |   9 +-
 .../tendermint/tendermint/types/genesis.go    |  16 +-
 .../tendermint/tendermint/types/heartbeat.go  |   4 +-
 .../tendermint/tendermint/types/params.go     |   9 +-
 .../tendermint/tendermint/types/part_set.go   |   9 +-
 .../tendermint/types/priv_validator.go        |  17 +-
 .../tendermint/tendermint/types/proposal.go   |   2 +-
 .../tendermint/tendermint/types/protobuf.go   |  10 +-
 .../tendermint/tendermint/types/results.go    |  17 +-
 .../tendermint/tendermint/types/tx.go         |  16 +-
 .../tendermint/tendermint/types/validator.go  |   4 +-
 .../tendermint/types/validator_set.go         |   9 +-
 .../tendermint/tendermint/types/vote.go       |   4 +-
 .../tendermint/tendermint/types/vote_set.go   |   2 +-
 .../tendermint/tendermint/types/wire.go       |   2 +-
 .../tendermint/tendermint/version/version.go  |   6 +-
 .../tendermint/tmlibs/common/array.go         |   5 -
 .../tendermint/tmlibs/merkle/simple_tree.go   |  91 ---
 .../x/net/http2/client_conn_pool.go           |  28 +-
 vendor/golang.org/x/net/http2/go111.go        |  17 +
 vendor/golang.org/x/net/http2/go17.go         |  15 +
 vendor/golang.org/x/net/http2/not_go111.go    |  11 +
 vendor/golang.org/x/net/http2/not_go17.go     |   8 +
 vendor/golang.org/x/net/http2/server.go       |   7 +
 vendor/golang.org/x/net/http2/transport.go    | 120 ++-
 vendor/golang.org/x/net/netutil/listen.go     |  74 ++
 vendor/golang.org/x/sys/unix/syscall_bsd.go   |   8 +-
 .../x/sys/unix/syscall_dragonfly.go           |   2 +-
 .../golang.org/x/sys/unix/syscall_freebsd.go  |   2 +-
 vendor/golang.org/x/sys/unix/syscall_linux.go |  75 +-
 .../golang.org/x/sys/unix/syscall_solaris.go  |   8 +-
 vendor/golang.org/x/sys/unix/syscall_unix.go  |   4 +-
 .../x/sys/unix/zsyscall_linux_ppc64.go        |  24 +-
 .../x/sys/unix/zsyscall_linux_ppc64le.go      |  24 +-
 .../golang.org/x/sys/unix/ztypes_linux_386.go |   8 +
 .../x/sys/unix/ztypes_linux_amd64.go          |   8 +
 .../golang.org/x/sys/unix/ztypes_linux_arm.go |   8 +
 .../x/sys/unix/ztypes_linux_arm64.go          |   8 +
 .../x/sys/unix/ztypes_linux_mips.go           |   8 +
 .../x/sys/unix/ztypes_linux_mips64.go         |   8 +
 .../x/sys/unix/ztypes_linux_mips64le.go       |   8 +
 .../x/sys/unix/ztypes_linux_mipsle.go         |   8 +
 .../x/sys/unix/ztypes_linux_ppc64.go          |   8 +
 .../x/sys/unix/ztypes_linux_ppc64le.go        |   8 +
 .../x/sys/unix/ztypes_linux_s390x.go          |   8 +
 .../golang.org/x/sys/windows/types_windows.go |  33 +-
 295 files changed, 7558 insertions(+), 3488 deletions(-)
 create mode 100644 vendor/github.com/go-kit/kit/metrics/discard/discard.go
 create mode 100644 vendor/github.com/go-kit/kit/metrics/doc.go
 create mode 100644 vendor/github.com/go-kit/kit/metrics/internal/lv/labelvalues.go
 create mode 100644 vendor/github.com/go-kit/kit/metrics/internal/lv/space.go
 create mode 100644 vendor/github.com/go-kit/kit/metrics/metrics.go
 create mode 100644 vendor/github.com/go-kit/kit/metrics/prometheus/prometheus.go
 create mode 100644 vendor/github.com/go-kit/kit/metrics/timer.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
 delete mode 100644 vendor/github.com/tendermint/go-crypto/doc.go
 delete mode 100644 vendor/github.com/tendermint/go-crypto/version.go
 delete mode 100644 vendor/github.com/tendermint/iavl/amino.go
 delete mode 100644 vendor/github.com/tendermint/iavl/chunk.go
 delete mode 100644 vendor/github.com/tendermint/iavl/path.go
 delete mode 100644 vendor/github.com/tendermint/iavl/proof_key.go
 create mode 100644 vendor/github.com/tendermint/iavl/proof_path.go
 delete mode 100644 vendor/github.com/tendermint/iavl/serialize.go
 delete mode 100644 vendor/github.com/tendermint/iavl/sha256truncated/sha256truncated.go
 create mode 100644 vendor/github.com/tendermint/iavl/wire.go
 rename vendor/github.com/tendermint/{ => tendermint}/abci/client/client.go (97%)
 rename vendor/github.com/tendermint/{ => tendermint}/abci/client/grpc_client.go (98%)
 rename vendor/github.com/tendermint/{ => tendermint}/abci/client/local_client.go (98%)
 rename vendor/github.com/tendermint/{ => tendermint}/abci/client/socket_client.go (96%)
 rename vendor/github.com/tendermint/{ => tendermint}/abci/example/code/code.go (100%)
 rename vendor/github.com/tendermint/{ => tendermint}/abci/example/kvstore/README.md (100%)
 rename vendor/github.com/tendermint/{ => tendermint}/abci/example/kvstore/helpers.go (90%)
 rename vendor/github.com/tendermint/{ => tendermint}/abci/example/kvstore/kvstore.go (93%)
 rename vendor/github.com/tendermint/{ => tendermint}/abci/example/kvstore/persistent_kvstore.go (95%)
 rename vendor/github.com/tendermint/{ => tendermint}/abci/types/application.go (100%)
 rename vendor/github.com/tendermint/{ => tendermint}/abci/types/messages.go (100%)
 rename vendor/github.com/tendermint/{ => tendermint}/abci/types/pubkey.go (100%)
 rename vendor/github.com/tendermint/{ => tendermint}/abci/types/result.go (100%)
 rename vendor/github.com/tendermint/{ => tendermint}/abci/types/types.pb.go (99%)
 rename vendor/github.com/tendermint/{ => tendermint}/abci/types/types.proto (100%)
 rename vendor/github.com/tendermint/{ => tendermint}/abci/types/util.go (96%)
 create mode 100644 vendor/github.com/tendermint/tendermint/consensus/README.md
 create mode 100644 vendor/github.com/tendermint/tendermint/consensus/metrics.go
 create mode 100644 vendor/github.com/tendermint/tendermint/crypto/CHANGELOG.md
 create mode 100644 vendor/github.com/tendermint/tendermint/crypto/README.md
 rename vendor/github.com/tendermint/{go-crypto => tendermint/crypto}/amino.go (85%)
 rename vendor/github.com/tendermint/{go-crypto => tendermint/crypto}/armor.go (76%)
 create mode 100644 vendor/github.com/tendermint/tendermint/crypto/doc.go
 rename vendor/github.com/tendermint/{go-crypto => tendermint/crypto}/hash.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/crypto}/merkle/README.md (100%)
 create mode 100644 vendor/github.com/tendermint/tendermint/crypto/merkle/doc.go
 rename vendor/github.com/tendermint/{tmlibs => tendermint/crypto}/merkle/simple_map.go (53%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/crypto}/merkle/simple_proof.go (79%)
 create mode 100644 vendor/github.com/tendermint/tendermint/crypto/merkle/simple_tree.go
 rename vendor/github.com/tendermint/{tmlibs => tendermint/crypto}/merkle/types.go (72%)
 rename vendor/github.com/tendermint/{go-crypto => tendermint/crypto}/priv_key.go (84%)
 rename vendor/github.com/tendermint/{go-crypto => tendermint/crypto}/pub_key.go (83%)
 rename vendor/github.com/tendermint/{go-crypto => tendermint/crypto}/random.go (89%)
 rename vendor/github.com/tendermint/{go-crypto => tendermint/crypto}/signature.go (79%)
 rename vendor/github.com/tendermint/{go-crypto => tendermint/crypto}/symmetric.go (97%)
 create mode 100644 vendor/github.com/tendermint/tendermint/crypto/tmhash/hash.go
 create mode 100644 vendor/github.com/tendermint/tendermint/crypto/version.go
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/autofile/README.md (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/autofile/autofile.go (97%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/autofile/group.go (99%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/autofile/sighup_watcher.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/clist/clist.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/LICENSE (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/async.go (95%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/bit_array.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/bytes.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/byteslice.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/cmap.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/colors.go (87%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/date.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/errors.go (53%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/heap.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/int.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/io.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/kvpair.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/math.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/net.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/nil.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/os.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/random.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/repeat_timer.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/service.go (99%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/string.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/throttle_timer.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/types.pb.go (68%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/types.proto (56%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/common/word.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/db/LICENSE.md (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/db/README.md (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/db/c_level_db.go (88%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/db/db.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/db/debug_db.go (68%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/db/fsdb.go (90%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/db/go_level_db.go (88%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/db/mem_batch.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/db/mem_db.go (98%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/db/prefix_db.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/db/types.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/db/util.go (100%)
 create mode 100644 vendor/github.com/tendermint/tendermint/libs/events/Makefile
 create mode 100644 vendor/github.com/tendermint/tendermint/libs/events/README.md
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/flowrate/README.md (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/flowrate/flowrate.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/flowrate/io.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/flowrate/util.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/log/filter.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/log/logger.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/log/nop_logger.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/log/testing_logger.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/log/tm_json_logger.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/log/tm_logger.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/log/tmfmt_logger.go (100%)
 rename vendor/github.com/tendermint/{tmlibs => tendermint/libs}/log/tracing_logger.go (100%)
 create mode 100644 vendor/github.com/tendermint/tendermint/libs/pubsub/query/Makefile
 create mode 100644 vendor/github.com/tendermint/tendermint/libs/pubsub/query/query.peg
 create mode 100644 vendor/github.com/tendermint/tendermint/mempool/metrics.go
 create mode 100644 vendor/github.com/tendermint/tendermint/p2p/README.md
 create mode 100644 vendor/github.com/tendermint/tendermint/p2p/metrics.go
 create mode 100644 vendor/github.com/tendermint/tendermint/rpc/core/README.md
 create mode 100644 vendor/github.com/tendermint/tendermint/rpc/core/doc_template.txt
 create mode 100644 vendor/github.com/tendermint/tendermint/rpc/grpc/compile.sh
 create mode 100644 vendor/github.com/tendermint/tendermint/rpc/grpc/types.proto
 rename vendor/github.com/tendermint/{abci => tendermint/tools/build}/LICENSE (93%)
 create mode 100644 vendor/github.com/tendermint/tendermint/tools/build/basecoind/DEBIAN/copyright
 create mode 100644 vendor/github.com/tendermint/tendermint/tools/build/ethermint/DEBIAN/copyright
 create mode 100644 vendor/github.com/tendermint/tendermint/tools/build/gaia/DEBIAN/copyright
 create mode 100644 vendor/github.com/tendermint/tendermint/tools/build/tendermint/DEBIAN/copyright
 rename vendor/github.com/tendermint/{tmlibs => tendermint/tools/mintnet-kubernetes}/LICENSE (99%)
 rename vendor/github.com/tendermint/{go-crypto => tendermint/tools/tm-bench}/LICENSE (93%)
 create mode 100644 vendor/github.com/tendermint/tendermint/tools/tm-monitor/LICENSE
 delete mode 100644 vendor/github.com/tendermint/tendermint/types/event_buffer.go
 delete mode 100644 vendor/github.com/tendermint/tmlibs/common/array.go
 delete mode 100644 vendor/github.com/tendermint/tmlibs/merkle/simple_tree.go
 create mode 100644 vendor/golang.org/x/net/http2/go111.go
 create mode 100644 vendor/golang.org/x/net/http2/not_go111.go
 create mode 100644 vendor/golang.org/x/net/netutil/listen.go

diff --git a/Gopkg.lock b/Gopkg.lock
index 30b89e8a..6eb70eac 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -27,11 +27,11 @@
 
 [[projects]]
   branch = "master"
-  digest = "1:8bd66090a4fc1d422f0cb065a3023bf98f07f44920c3f87da5c3107f990434e4"
+  digest = "1:ad87504ef74b1c36880f9287126dbc8dc4146d86acf902dd776ac6064cc75396"
   name = "github.com/btcsuite/btcd"
   packages = ["btcec"]
   pruneopts = "NUT"
-  revision = "86fed781132ac890ee03e906e4ecd5d6fa180c64"
+  revision = "f673a4b563b57b9a95832545c878669a7fa801d9"
 
 [[projects]]
   branch = "master"
@@ -66,7 +66,6 @@
   version = "v1.1.0"
 
 [[projects]]
-  branch = "master"
   digest = "1:294576320c5093015ca9130ed928c84b2fdc8b9db6d136505a513795f3a64e3e"
   name = "github.com/ebuchman/fail-test"
   packages = ["."]
@@ -74,12 +73,16 @@
   revision = "95f809107225be108efcf10a3509e4ea6ceef3c4"
 
 [[projects]]
-  digest = "1:5a6f43bb19bea1e7894542a80c6e95bab099c380dd90abebd76d3938e8c66567"
+  digest = "1:2c03593aec9fa7d6969dc7295c636b46369ec2dd45dcf4acfb298281d62528ff"
   name = "github.com/go-kit/kit"
   packages = [
     "log",
     "log/level",
     "log/term",
+    "metrics",
+    "metrics/discard",
+    "metrics/internal/lv",
+    "metrics/prometheus",
   ]
   pruneopts = "NUT"
   revision = "4dc7be5d2d12881735283bcab7352178e190fc71"
@@ -113,8 +116,8 @@
     "types",
   ]
   pruneopts = "UT"
-  revision = "7d68e886eac4f7e34d0d82241a6273d6c304c5cf"
-  version = "v1.1.0"
+  revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
+  version = "v1.1.1"
 
 [[projects]]
   digest = "1:713cc7628304d027a7e9edcb52da888a8912d6405250a8d9c8eff6f41dd54398"
@@ -228,20 +231,22 @@
   version = "v1.0.0"
 
 [[projects]]
-  branch = "master"
-  digest = "1:22deec4bab258f99bf1e4394c51d976e04ddf9a52f55b0e2912d9cdd503dff16"
+  digest = "1:76463f8d9f141bb3673ccece5fb0d1d0f9588395e94d01253667db733f865b18"
   name = "github.com/prometheus/client_golang"
-  packages = ["prometheus"]
+  packages = [
+    "prometheus",
+    "prometheus/promhttp",
+  ]
   pruneopts = "NUT"
-  revision = "d6a9817c4afc94d51115e4a30d449056a3fbf547"
+  revision = "ae27198cdd90bf12cd134ad79d1366a6cf49f632"
 
 [[projects]]
   branch = "master"
-  digest = "1:53a76eb11bdc815fcf0c757a9648fda0ab6887da13f07587181ff2223b67956c"
+  digest = "1:0f37e09b3e92aaeda5991581311f8dbf38944b36a3edec61cc2d1991f527554a"
   name = "github.com/prometheus/client_model"
   packages = ["go"]
   pruneopts = "NUT"
-  revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
+  revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
 
 [[projects]]
   branch = "master"
@@ -257,7 +262,7 @@
 
 [[projects]]
   branch = "master"
-  digest = "1:b161534d0cce842f1e1eaf087e2c5f28191f0bac0c3070fc9a7e1b6269238d88"
+  digest = "1:37e418257b05a9e9fabbf836df2d8f3613313e80a909da6b9597b759ebca61cd"
   name = "github.com/prometheus/procfs"
   packages = [
     ".",
@@ -266,7 +271,7 @@
     "xfs",
   ]
   pruneopts = "NUT"
-  revision = "40f013a808ec4fa79def444a1a56de4d1727efcb"
+  revision = "ae68e2d4c00fed4943b5f6698d504a5fe083da8a"
 
 [[projects]]
   branch = "master"
@@ -297,7 +302,7 @@
 
 [[projects]]
   branch = "master"
-  digest = "1:bd62f27525a36697564991b8e6071ff56afa99d3235261924a0212db5ce780bd"
+  digest = "1:922191411ad8f61bcd8018ac127589bb489712c1d1a0ab2497aca4b16de417d2"
   name = "github.com/syndtr/goleveldb"
   packages = [
     "leveldb",
@@ -314,20 +319,7 @@
     "leveldb/util",
   ]
   pruneopts = "NUT"
-  revision = "0d5a0ceb10cf9ab89fdd744cc8c50a83134f6697"
-
-[[projects]]
-  digest = "1:b8a05190f9aa37b0ab5b29b480797306b221709ffcb4a0ed9946cc7110849c07"
-  name = "github.com/tendermint/abci"
-  packages = [
-    "client",
-    "example/code",
-    "example/kvstore",
-    "types",
-  ]
-  pruneopts = "UT"
-  revision = "198dccf0ddfd1bb176f87657e3286a05a6ed9540"
-  version = "v0.12.0"
+  revision = "c4c61651e9e37fa117f53c5a906d3b63090d8445"
 
 [[projects]]
   branch = "master"
@@ -342,42 +334,44 @@
   revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057"
 
 [[projects]]
-  digest = "1:0457834dfbe7dfd39c9e2d99147bde666094819e85728eb235a122943660256c"
+  digest = "1:0fbb744c7842baaec7f5b2e0eb9814668bf34c4eb3c4fe6fc9ac856a937a5eff"
   name = "github.com/tendermint/go-amino"
   packages = ["."]
   pruneopts = "NUT"
-  revision = "ed62928576cfcaf887209dc96142cd79cdfff389"
-  version = "0.9.9"
-
-[[projects]]
-  digest = "1:9b0716c497677d6404499b2931cbfac90943d20302b90128b2979b5e1fcb09f5"
-  name = "github.com/tendermint/go-crypto"
-  packages = ["."]
-  pruneopts = "NUT"
-  revision = "915416979bf70efa4bcbf1c6cd5d64c5fff9fc19"
-  version = "v0.6.2"
+  revision = "2106ca61d91029c931fd54968c2bb02dc96b1412"
+  version = "0.10.1"
 
 [[projects]]
-  digest = "1:91de33917aa98c06cb0f32867908071dd6992fff80347c37fc592165051957a0"
+  digest = "1:5a2904670bdaf13e71cd3a726795b2d0ee65a0862d499a1440799bd62618efac"
   name = "github.com/tendermint/iavl"
-  packages = [
-    ".",
-    "sha256truncated",
-  ]
+  packages = ["."]
   pruneopts = "NUT"
-  revision = "c9206995e8f948e99927f5084a88a7e94ca256da"
-  version = "v0.8.0-rc0"
+  revision = "35f66e53d9b01e83b30de68b931f54b2477a94c9"
+  version = "v0.9.2"
 
 [[projects]]
-  digest = "1:0bde6563ae9ebe48770735120ee223a8761e8ec23fde4f36995bbbb4ea69d1ca"
+  digest = "1:46ac5207f9074a8ca246a3fce7aa576f5c5f43a30c8f3ef833e247777fdb8a4f"
   name = "github.com/tendermint/tendermint"
   packages = [
+    "abci/client",
+    "abci/example/code",
+    "abci/example/kvstore",
+    "abci/types",
     "blockchain",
     "config",
     "consensus",
     "consensus/types",
+    "crypto",
+    "crypto/merkle",
+    "crypto/tmhash",
     "evidence",
+    "libs/autofile",
+    "libs/clist",
+    "libs/common",
+    "libs/db",
     "libs/events",
+    "libs/flowrate",
+    "libs/log",
     "libs/pubsub",
     "libs/pubsub/query",
     "mempool",
@@ -401,25 +395,9 @@
     "types",
     "version",
   ]
-  pruneopts = "NUT"
-  revision = "46369a1ab76f274ab47179c4176221842b8207b4"
-  version = "v0.21.0"
-
-[[projects]]
-  digest = "1:1e17077c52b3c68e5ec1059ed078210e8b1030eb30ad23a69b924e7f30bd0132"
-  name = "github.com/tendermint/tmlibs"
-  packages = [
-    "autofile",
-    "clist",
-    "common",
-    "db",
-    "flowrate",
-    "log",
-    "merkle",
-  ]
   pruneopts = "UT"
-  revision = "692f1d86a6e2c0efa698fd1e4541b68c74ffaf38"
-  version = "v0.8.4"
+  revision = "c64a3c74c870d725ba1356f75b4afadf0928c297"
+  version = "v0.22.4"
 
 [[projects]]
   branch = "master"
@@ -458,11 +436,10 @@
     "ssh/terminal",
   ]
   pruneopts = "NUT"
-  revision = "a49355c7e3f8fe157a85be2f77e6e269a0f89602"
+  revision = "a2144134853fc9a27a7b1e3eb4f19f1a76df13c9"
 
 [[projects]]
-  branch = "master"
-  digest = "1:cdc8cc9377bcb05ff97816bf398120c0a242f502b5eb2bb27d1d650b523e67e1"
+  digest = "1:15dbe437d38eb2103f6b55348758958a6f85a400ecc16fcb53b3f271d38cd8ea"
   name = "golang.org/x/net"
   packages = [
     "context",
@@ -471,14 +448,15 @@
     "http2/hpack",
     "idna",
     "internal/timeseries",
+    "netutil",
     "trace",
   ]
   pruneopts = "NUT"
-  revision = "ed29d75add3d7c4bf7ca65aac0c6df3d1420216f"
+  revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f"
 
 [[projects]]
   branch = "master"
-  digest = "1:569e1719852f85b01939e6040b3eee417913c9e6d0368d3d1460d0af4eb702bc"
+  digest = "1:48419582c83b5715e244977ca617a3ff596dc6808368e3c1dcaf1b3ad2218e53"
   name = "golang.org/x/sys"
   packages = [
     "cpu",
@@ -486,7 +464,7 @@
     "windows",
   ]
   pruneopts = "NUT"
-  revision = "151529c776cdc58ddbe7963ba9af779f3577b419"
+  revision = "ac767d655b305d4e9612f5f6e33120b9176c4ad4"
 
 [[projects]]
   digest = "1:a0f29009397dc27c9dc8440f0945d49e5cbb9b72d0b0fc745474d9bfdea2d9f8"
@@ -517,7 +495,7 @@
   name = "google.golang.org/genproto"
   packages = ["googleapis/rpc/status"]
   pruneopts = "NUT"
-  revision = "ff3583edef7de132f219f0efc00e097cabcc0ec0"
+  revision = "fedd2861243fd1a8152376292b921b394c7bef7e"
 
 [[projects]]
   digest = "1:f778941d5c2e46da5e0f5d553d3e80bf70eb40d2e80bb4c649b625b9133f3d5f"
@@ -586,24 +564,25 @@
     "github.com/streadway/simpleuuid",
     "github.com/stretchr/testify/assert",
     "github.com/stretchr/testify/require",
-    "github.com/tendermint/abci/types",
     "github.com/tendermint/go-amino",
-    "github.com/tendermint/go-crypto",
     "github.com/tendermint/iavl",
+    "github.com/tendermint/tendermint/abci/types",
     "github.com/tendermint/tendermint/blockchain",
     "github.com/tendermint/tendermint/config",
     "github.com/tendermint/tendermint/consensus",
     "github.com/tendermint/tendermint/consensus/types",
+    "github.com/tendermint/tendermint/crypto",
+    "github.com/tendermint/tendermint/libs/common",
+    "github.com/tendermint/tendermint/libs/db",
+    "github.com/tendermint/tendermint/libs/log",
     "github.com/tendermint/tendermint/libs/pubsub",
+    "github.com/tendermint/tendermint/mempool",
     "github.com/tendermint/tendermint/node",
     "github.com/tendermint/tendermint/p2p",
     "github.com/tendermint/tendermint/proxy",
     "github.com/tendermint/tendermint/rpc/core/types",
     "github.com/tendermint/tendermint/state",
     "github.com/tendermint/tendermint/types",
-    "github.com/tendermint/tmlibs/common",
-    "github.com/tendermint/tmlibs/db",
-    "github.com/tendermint/tmlibs/log",
     "github.com/tmthrgd/go-hex",
     "golang.org/x/crypto/ed25519",
     "golang.org/x/crypto/ripemd160",
diff --git a/Gopkg.toml b/Gopkg.toml
index d96822fe..4d10b7e1 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -7,34 +7,21 @@
     name = "github.com/gogo/protobuf"
     non-go = false
   [[prune.project]]
-    name = "github.com/tendermint/abci"
+    name = "github.com/tendermint/tendermint"
     non-go = false
   [[prune.project]]
     name = "github.com/tendermint/tmlibs"
     non-go = false
 
-# From Tendermint
-[[constraint]]
+# overriding here because of IAVL
+[[override]]
   name = "github.com/tendermint/tendermint"
-  version = "=0.21.0"
-
-[[constraint]]
-  name = "github.com/tendermint/go-amino"
-  version = "=0.9.9"
+  version = "=0.22.4"
 
 [[constraint]]
   name = "github.com/tendermint/iavl"
-  version = "=0.8.0-rc0"
-
-[[constraint]]
-  name = "github.com/prometheus/client_golang"
-  branch = "master"
-
-[[override]]
-  name = "github.com/tendermint/tmlibs"
-  version = "~0.8.4"
+  version = "=0.9.2"
 
-# We don't care which version Tendermint wants
 [[override]]
   name = "github.com/gogo/protobuf"
   version = "~1.1.0"
diff --git a/README.md b/README.md
index 285449c1..2cd557c7 100644
--- a/README.md
+++ b/README.md
@@ -14,7 +14,7 @@ Hyperledger Burrow is a permissioned Ethereum smart-contract blockchain node. It
 Hyperledger Burrow is a permissioned blockchain node that executes smart contract code following the Ethereum specification. Burrow is built for a multi-chain universe with application specific optimization in mind. Burrow as a node is constructed out of three main components: the consensus engine, the permissioned Ethereum virtual machine and the rpc gateway. More specifically Burrow consists of the following:
 
 - **Consensus Engine:** Transactions are ordered and finalised with the Byzantine fault-tolerant Tendermint protocol.  The Tendermint protocol provides high transaction throughput over a set of known validators and prevents the blockchain from forking.
-- **Application Blockchain Interface (ABCI):** The smart contract application interfaces with the consensus engine over the [ABCI](https://github.com/tendermint/abci). The ABCI allows for the consensus engine to remain agnostic from the smart contract application.
+- **Application Blockchain Interface (ABCI):** The smart contract application interfaces with the consensus engine over the [ABCI](https://github.com/tendermint/tendermint/abci). The ABCI allows for the consensus engine to remain agnostic from the smart contract application.
 - **Smart Contract Application:** Transactions are validated and applied to the application state in the order that the consensus engine has finalised them. The application state consists of all accounts, the validator set and the name registry. Accounts in Burrow have permissions and either contain smart contract code or correspond to a public-private key pair. A transaction that calls on the smart contract code in a given account will activate the execution of that account’s code in a permissioned virtual machine.
 - **Permissioned Ethereum Virtual Machine:** This virtual machine is built to observe the Ethereum operation code specification and additionally asserts the correct permissions have been granted. Permissioning is enforced through secure native functions and underlies all smart contract code. An arbitrary but finite amount of gas is handed out for every execution to ensure a finite execution duration - “You don’t need money to play, when you have permission to play”.
 - **Application Binary Interface (ABI):** Transactions need to be formulated in a binary format that can be processed by the blockchain node. Current tooling provides functionality to compile, deploy and link solidity smart contracts and formulate transactions to call smart contracts on the chain.
diff --git a/vendor/github.com/btcsuite/btcd/btcec/pubkey.go b/vendor/github.com/btcsuite/btcd/btcec/pubkey.go
index b7491771..cf498075 100644
--- a/vendor/github.com/btcsuite/btcd/btcec/pubkey.go
+++ b/vendor/github.com/btcsuite/btcd/btcec/pubkey.go
@@ -32,8 +32,9 @@ func decompressPoint(curve *KoblitzCurve, x *big.Int, ybit bool) (*big.Int, erro
 	x3 := new(big.Int).Mul(x, x)
 	x3.Mul(x3, x)
 	x3.Add(x3, curve.Params().B)
+	x3.Mod(x3, curve.Params().P)
 
-	// now calculate sqrt mod p of x2 + B
+	// Now calculate sqrt mod p of x^3 + B
 	// This code used to do a full sqrt based on tonelli/shanks,
 	// but this was replaced by the algorithms referenced in
 	// https://bitcointalk.org/index.php?topic=162805.msg1712294#msg1712294
@@ -42,9 +43,19 @@ func decompressPoint(curve *KoblitzCurve, x *big.Int, ybit bool) (*big.Int, erro
 	if ybit != isOdd(y) {
 		y.Sub(curve.Params().P, y)
 	}
+
+	// Check that y is a square root of x^3 + B.
+	y2 := new(big.Int).Mul(y, y)
+	y2.Mod(y2, curve.Params().P)
+	if y2.Cmp(x3) != 0 {
+		return nil, fmt.Errorf("invalid square root")
+	}
+
+	// Verify that y-coord has expected parity.
 	if ybit != isOdd(y) {
 		return nil, fmt.Errorf("ybit doesn't match oddness")
 	}
+
 	return y, nil
 }
 
diff --git a/vendor/github.com/go-kit/kit/metrics/discard/discard.go b/vendor/github.com/go-kit/kit/metrics/discard/discard.go
new file mode 100644
index 00000000..a0d3b149
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/metrics/discard/discard.go
@@ -0,0 +1,40 @@
+// Package discard provides a no-op metrics backend.
+package discard
+
+import "github.com/go-kit/kit/metrics"
+
+type counter struct{}
+
+// NewCounter returns a new no-op counter.
+func NewCounter() metrics.Counter { return counter{} }
+
+// With implements Counter.
+func (c counter) With(labelValues ...string) metrics.Counter { return c }
+
+// Add implements Counter.
+func (c counter) Add(delta float64) {}
+
+type gauge struct{}
+
+// NewGauge returns a new no-op gauge.
+func NewGauge() metrics.Gauge { return gauge{} }
+
+// With implements Gauge.
+func (g gauge) With(labelValues ...string) metrics.Gauge { return g }
+
+// Set implements Gauge.
+func (g gauge) Set(value float64) {}
+
+// Add implements metrics.Gauge.
+func (g gauge) Add(delta float64) {}
+
+type histogram struct{}
+
+// NewHistogram returns a new no-op histogram.
+func NewHistogram() metrics.Histogram { return histogram{} }
+
+// With implements Histogram.
+func (h histogram) With(labelValues ...string) metrics.Histogram { return h }
+
+// Observe implements histogram.
+func (h histogram) Observe(value float64) {}
diff --git a/vendor/github.com/go-kit/kit/metrics/doc.go b/vendor/github.com/go-kit/kit/metrics/doc.go
new file mode 100644
index 00000000..25cda4f7
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/metrics/doc.go
@@ -0,0 +1,97 @@
+// Package metrics provides a framework for application instrumentation. It's
+// primarily designed to help you get started with good and robust
+// instrumentation, and to help you migrate from a less-capable system like
+// Graphite to a more-capable system like Prometheus. If your organization has
+// already standardized on an instrumentation system like Prometheus, and has no
+// plans to change, it may make sense to use that system's instrumentation
+// library directly.
+//
+// This package provides three core metric abstractions (Counter, Gauge, and
+// Histogram) and implementations for almost all common instrumentation
+// backends. Each metric has an observation method (Add, Set, or Observe,
+// respectively) used to record values, and a With method to "scope" the
+// observation by various parameters. For example, you might have a Histogram to
+// record request durations, parameterized by the method that's being called.
+//
+//    var requestDuration metrics.Histogram
+//    // ...
+//    requestDuration.With("method", "MyMethod").Observe(time.Since(begin))
+//
+// This allows a single high-level metrics object (requestDuration) to work with
+// many code paths somewhat dynamically. The concept of With is fully supported
+// in some backends like Prometheus, and not supported in other backends like
+// Graphite. So, With may be a no-op, depending on the concrete implementation
+// you choose. Please check the implementation to know for sure. For
+// implementations that don't provide With, it's necessary to fully parameterize
+// each metric in the metric name, e.g.
+//
+//    // Statsd
+//    c := statsd.NewCounter("request_duration_MyMethod_200")
+//    c.Add(1)
+//
+//    // Prometheus
+//    c := prometheus.NewCounter(stdprometheus.CounterOpts{
+//        Name: "request_duration",
+//        ...
+//    }, []string{"method", "status_code"})
+//    c.With("method", "MyMethod", "status_code", strconv.Itoa(code)).Add(1)
+//
+// Usage
+//
+// Metrics are dependencies, and should be passed to the components that need
+// them in the same way you'd construct and pass a database handle, or reference
+// to another component. Metrics should *not* be created in the global scope.
+// Instead, instantiate metrics in your func main, using whichever concrete
+// implementation is appropriate for your organization.
+//
+//    latency := prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{
+//        Namespace: "myteam",
+//        Subsystem: "foosvc",
+//        Name:      "request_latency_seconds",
+//        Help:      "Incoming request latency in seconds.",
+//    }, []string{"method", "status_code"})
+//
+// Write your components to take the metrics they will use as parameters to
+// their constructors. Use the interface types, not the concrete types. That is,
+//
+//    // NewAPI takes metrics.Histogram, not *prometheus.Summary
+//    func NewAPI(s Store, logger log.Logger, latency metrics.Histogram) *API {
+//        // ...
+//    }
+//
+//    func (a *API) ServeFoo(w http.ResponseWriter, r *http.Request) {
+//        begin := time.Now()
+//        // ...
+//        a.latency.Observe(time.Since(begin).Seconds())
+//    }
+//
+// Finally, pass the metrics as dependencies when building your object graph.
+// This should happen in func main, not in the global scope.
+//
+//    api := NewAPI(store, logger, latency)
+//    http.ListenAndServe("/", api)
+//
+// Note that metrics are "write-only" interfaces.
+//
+// Implementation details
+//
+// All metrics are safe for concurrent use. Considerable design influence has
+// been taken from https://github.com/codahale/metrics and
+// https://prometheus.io.
+//
+// Each telemetry system has different semantics for label values, push vs.
+// pull, support for histograms, etc. These properties influence the design of
+// their respective packages. This table attempts to summarize the key points of
+// distinction.
+//
+//    SYSTEM      DIM  COUNTERS               GAUGES                 HISTOGRAMS
+//    dogstatsd   n    batch, push-aggregate  batch, push-aggregate  native, batch, push-each
+//    statsd      1    batch, push-aggregate  batch, push-aggregate  native, batch, push-each
+//    graphite    1    batch, push-aggregate  batch, push-aggregate  synthetic, batch, push-aggregate
+//    expvar      1    atomic                 atomic                 synthetic, batch, in-place expose
+//    influx      n    custom                 custom                 custom
+//    prometheus  n    native                 native                 native
+//    pcp         1    native                 native                 native
+//    cloudwatch  n    batch push-aggregate   batch push-aggregate   synthetic, batch, push-aggregate
+//
+package metrics
diff --git a/vendor/github.com/go-kit/kit/metrics/internal/lv/labelvalues.go b/vendor/github.com/go-kit/kit/metrics/internal/lv/labelvalues.go
new file mode 100644
index 00000000..8bb1ba09
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/metrics/internal/lv/labelvalues.go
@@ -0,0 +1,14 @@
+package lv
+
+// LabelValues is a type alias that provides validation on its With method.
+// Metrics may include it as a member to help them satisfy With semantics and
+// save some code duplication.
+type LabelValues []string
+
+// With validates the input, and returns a new aggregate labelValues.
+func (lvs LabelValues) With(labelValues ...string) LabelValues {
+	if len(labelValues)%2 != 0 {
+		labelValues = append(labelValues, "unknown")
+	}
+	return append(lvs, labelValues...)
+}
diff --git a/vendor/github.com/go-kit/kit/metrics/internal/lv/space.go b/vendor/github.com/go-kit/kit/metrics/internal/lv/space.go
new file mode 100644
index 00000000..672c9007
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/metrics/internal/lv/space.go
@@ -0,0 +1,145 @@
+package lv
+
+import "sync"
+
+// NewSpace returns an N-dimensional vector space.
+func NewSpace() *Space {
+	return &Space{}
+}
+
+// Space represents an N-dimensional vector space. Each name and unique label
+// value pair establishes a new dimension and point within that dimension. Order
+// matters, i.e. [a=1 b=2] identifies a different timeseries than [b=2 a=1].
+type Space struct {
+	mtx   sync.RWMutex
+	nodes map[string]*node
+}
+
+// Observe locates the time series identified by the name and label values in
+// the vector space, and appends the value to the list of observations.
+func (s *Space) Observe(name string, lvs LabelValues, value float64) {
+	s.nodeFor(name).observe(lvs, value)
+}
+
+// Add locates the time series identified by the name and label values in
+// the vector space, and appends the delta to the last value in the list of
+// observations.
+func (s *Space) Add(name string, lvs LabelValues, delta float64) {
+	s.nodeFor(name).add(lvs, delta)
+}
+
+// Walk traverses the vector space and invokes fn for each non-empty time series
+// which is encountered. Return false to abort the traversal.
+func (s *Space) Walk(fn func(name string, lvs LabelValues, observations []float64) bool) {
+	s.mtx.RLock()
+	defer s.mtx.RUnlock()
+	for name, node := range s.nodes {
+		f := func(lvs LabelValues, observations []float64) bool { return fn(name, lvs, observations) }
+		if !node.walk(LabelValues{}, f) {
+			return
+		}
+	}
+}
+
+// Reset empties the current space and returns a new Space with the old
+// contents. Reset a Space to get an immutable copy suitable for walking.
+func (s *Space) Reset() *Space {
+	s.mtx.Lock()
+	defer s.mtx.Unlock()
+	n := NewSpace()
+	n.nodes, s.nodes = s.nodes, n.nodes
+	return n
+}
+
+func (s *Space) nodeFor(name string) *node {
+	s.mtx.Lock()
+	defer s.mtx.Unlock()
+	if s.nodes == nil {
+		s.nodes = map[string]*node{}
+	}
+	n, ok := s.nodes[name]
+	if !ok {
+		n = &node{}
+		s.nodes[name] = n
+	}
+	return n
+}
+
+// node exists at a specific point in the N-dimensional vector space of all
+// possible label values. The node collects observations and has child nodes
+// with greater specificity.
+type node struct {
+	mtx          sync.RWMutex
+	observations []float64
+	children     map[pair]*node
+}
+
+type pair struct{ label, value string }
+
+func (n *node) observe(lvs LabelValues, value float64) {
+	n.mtx.Lock()
+	defer n.mtx.Unlock()
+	if len(lvs) == 0 {
+		n.observations = append(n.observations, value)
+		return
+	}
+	if len(lvs) < 2 {
+		panic("too few LabelValues; programmer error!")
+	}
+	head, tail := pair{lvs[0], lvs[1]}, lvs[2:]
+	if n.children == nil {
+		n.children = map[pair]*node{}
+	}
+	child, ok := n.children[head]
+	if !ok {
+		child = &node{}
+		n.children[head] = child
+	}
+	child.observe(tail, value)
+}
+
+func (n *node) add(lvs LabelValues, delta float64) {
+	n.mtx.Lock()
+	defer n.mtx.Unlock()
+	if len(lvs) == 0 {
+		var value float64
+		if len(n.observations) > 0 {
+			value = last(n.observations) + delta
+		} else {
+			value = delta
+		}
+		n.observations = append(n.observations, value)
+		return
+	}
+	if len(lvs) < 2 {
+		panic("too few LabelValues; programmer error!")
+	}
+	head, tail := pair{lvs[0], lvs[1]}, lvs[2:]
+	if n.children == nil {
+		n.children = map[pair]*node{}
+	}
+	child, ok := n.children[head]
+	if !ok {
+		child = &node{}
+		n.children[head] = child
+	}
+	child.add(tail, delta)
+}
+
+func (n *node) walk(lvs LabelValues, fn func(LabelValues, []float64) bool) bool {
+	n.mtx.RLock()
+	defer n.mtx.RUnlock()
+	if len(n.observations) > 0 && !fn(lvs, n.observations) {
+		return false
+	}
+	for p, child := range n.children {
+		if !child.walk(append(lvs, p.label, p.value), fn) {
+			return false
+		}
+	}
+	return true
+}
+
+func last(a []float64) float64 {
+	return a[len(a)-1]
+}
diff --git a/vendor/github.com/go-kit/kit/metrics/metrics.go b/vendor/github.com/go-kit/kit/metrics/metrics.go
new file mode 100644
index 00000000..a7ba1b1f
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/metrics/metrics.go
@@ -0,0 +1,25 @@
+package metrics
+
+// Counter describes a metric that accumulates values monotonically.
+// An example of a counter is the number of received HTTP requests.
+type Counter interface {
+	With(labelValues ...string) Counter
+	Add(delta float64)
+}
+
+// Gauge describes a metric that takes specific values over time.
+// An example of a gauge is the current depth of a job queue.
+type Gauge interface {
+	With(labelValues ...string) Gauge
+	Set(value float64)
+	Add(delta float64)
+}
+
+// Histogram describes a metric that takes repeated observations of the same
+// kind of thing, and produces a statistical summary of those observations,
+// typically expressed as quantiles or buckets. An example of a histogram is
+// HTTP request latencies.
+type Histogram interface {
+	With(labelValues ...string) Histogram
+	Observe(value float64)
+}
diff --git a/vendor/github.com/go-kit/kit/metrics/prometheus/prometheus.go b/vendor/github.com/go-kit/kit/metrics/prometheus/prometheus.go
new file mode 100644
index 00000000..7a364c31
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/metrics/prometheus/prometheus.go
@@ -0,0 +1,165 @@
+// Package prometheus provides Prometheus implementations for metrics.
+// Individual metrics are mapped to their Prometheus counterparts, and
+// (depending on the constructor used) may be automatically registered in the
+// global Prometheus metrics registry.
+package prometheus
+
+import (
+	"github.com/prometheus/client_golang/prometheus"
+
+	"github.com/go-kit/kit/metrics"
+	"github.com/go-kit/kit/metrics/internal/lv"
+)
+
+// Counter implements Counter, via a Prometheus CounterVec.
+type Counter struct {
+	cv  *prometheus.CounterVec
+	lvs lv.LabelValues
+}
+
+// NewCounterFrom constructs and registers a Prometheus CounterVec,
+// and returns a usable Counter object.
+func NewCounterFrom(opts prometheus.CounterOpts, labelNames []string) *Counter {
+	cv := prometheus.NewCounterVec(opts, labelNames)
+	prometheus.MustRegister(cv)
+	return NewCounter(cv)
+}
+
+// NewCounter wraps the CounterVec and returns a usable Counter object.
+func NewCounter(cv *prometheus.CounterVec) *Counter {
+	return &Counter{
+		cv: cv,
+	}
+}
+
+// With implements Counter.
+func (c *Counter) With(labelValues ...string) metrics.Counter {
+	return &Counter{
+		cv:  c.cv,
+		lvs: c.lvs.With(labelValues...),
+	}
+}
+
+// Add implements Counter.
+func (c *Counter) Add(delta float64) {
+	c.cv.With(makeLabels(c.lvs...)).Add(delta)
+}
+
+// Gauge implements Gauge, via a Prometheus GaugeVec.
+type Gauge struct {
+	gv  *prometheus.GaugeVec
+	lvs lv.LabelValues
+}
+
+// NewGaugeFrom construts and registers a Prometheus GaugeVec,
+// and returns a usable Gauge object.
+func NewGaugeFrom(opts prometheus.GaugeOpts, labelNames []string) *Gauge {
+	gv := prometheus.NewGaugeVec(opts, labelNames)
+	prometheus.MustRegister(gv)
+	return NewGauge(gv)
+}
+
+// NewGauge wraps the GaugeVec and returns a usable Gauge object.
+func NewGauge(gv *prometheus.GaugeVec) *Gauge {
+	return &Gauge{
+		gv: gv,
+	}
+}
+
+// With implements Gauge.
+func (g *Gauge) With(labelValues ...string) metrics.Gauge {
+	return &Gauge{
+		gv:  g.gv,
+		lvs: g.lvs.With(labelValues...),
+	}
+}
+
+// Set implements Gauge.
+func (g *Gauge) Set(value float64) {
+	g.gv.With(makeLabels(g.lvs...)).Set(value)
+}
+
+// Add is supported by Prometheus GaugeVecs.
+func (g *Gauge) Add(delta float64) {
+	g.gv.With(makeLabels(g.lvs...)).Add(delta)
+}
+
+// Summary implements Histogram, via a Prometheus SummaryVec. The difference
+// between a Summary and a Histogram is that Summaries don't require predefined
+// quantile buckets, but cannot be statistically aggregated.
+type Summary struct {
+	sv  *prometheus.SummaryVec
+	lvs lv.LabelValues
+}
+
+// NewSummaryFrom constructs and registers a Prometheus SummaryVec,
+// and returns a usable Summary object.
+func NewSummaryFrom(opts prometheus.SummaryOpts, labelNames []string) *Summary {
+	sv := prometheus.NewSummaryVec(opts, labelNames)
+	prometheus.MustRegister(sv)
+	return NewSummary(sv)
+}
+
+// NewSummary wraps the SummaryVec and returns a usable Summary object.
+func NewSummary(sv *prometheus.SummaryVec) *Summary {
+	return &Summary{
+		sv: sv,
+	}
+}
+
+// With implements Histogram.
+func (s *Summary) With(labelValues ...string) metrics.Histogram {
+	return &Summary{
+		sv:  s.sv,
+		lvs: s.lvs.With(labelValues...),
+	}
+}
+
+// Observe implements Histogram.
+func (s *Summary) Observe(value float64) {
+	s.sv.With(makeLabels(s.lvs...)).Observe(value)
+}
+
+// Histogram implements Histogram via a Prometheus HistogramVec. The difference
+// between a Histogram and a Summary is that Histograms require predefined
+// quantile buckets, and can be statistically aggregated.
+type Histogram struct {
+	hv  *prometheus.HistogramVec
+	lvs lv.LabelValues
+}
+
+// NewHistogramFrom constructs and registers a Prometheus HistogramVec,
+// and returns a usable Histogram object.
+func NewHistogramFrom(opts prometheus.HistogramOpts, labelNames []string) *Histogram {
+	hv := prometheus.NewHistogramVec(opts, labelNames)
+	prometheus.MustRegister(hv)
+	return NewHistogram(hv)
+}
+
+// NewHistogram wraps the HistogramVec and returns a usable Histogram object.
+func NewHistogram(hv *prometheus.HistogramVec) *Histogram {
+	return &Histogram{
+		hv: hv,
+	}
+}
+
+// With implements Histogram.
+func (h *Histogram) With(labelValues ...string) metrics.Histogram {
+	return &Histogram{
+		hv:  h.hv,
+		lvs: h.lvs.With(labelValues...),
+	}
+}
+
+// Observe implements Histogram.
+func (h *Histogram) Observe(value float64) {
+	h.hv.With(makeLabels(h.lvs...)).Observe(value)
+}
+
+func makeLabels(labelValues ...string) prometheus.Labels {
+	labels := prometheus.Labels{}
+	for i := 0; i < len(labelValues); i += 2 {
+		labels[labelValues[i]] = labelValues[i+1]
+	}
+	return labels
+}
diff --git a/vendor/github.com/go-kit/kit/metrics/timer.go b/vendor/github.com/go-kit/kit/metrics/timer.go
new file mode 100644
index 00000000..e12d9cd5
--- /dev/null
+++ b/vendor/github.com/go-kit/kit/metrics/timer.go
@@ -0,0 +1,36 @@
+package metrics
+
+import "time"
+
+// Timer acts as a stopwatch, sending observations to a wrapped histogram.
+// It's a bit of helpful syntax sugar for h.Observe(time.Since(x)).
+type Timer struct {
+	h Histogram
+	t time.Time
+	u time.Duration
+}
+
+// NewTimer wraps the given histogram and records the current time.
+func NewTimer(h Histogram) *Timer {
+	return &Timer{
+		h: h,
+		t: time.Now(),
+		u: time.Second,
+	}
+}
+
+// ObserveDuration captures the number of seconds since the timer was
+// constructed, and forwards that observation to the histogram.
+func (t *Timer) ObserveDuration() {
+	d := float64(time.Since(t.t).Nanoseconds()) / float64(t.u)
+	if d < 0 {
+		d = 0
+	}
+	t.h.Observe(d)
+}
+
+// Unit sets the unit of the float64 emitted by the timer.
+// By default, the timer emits seconds.
+func (t *Timer) Unit(u time.Duration) {
+	t.u = u
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
index 623d3d83..3c9bae24 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -29,24 +29,35 @@ type Collector interface {
 	// collected by this Collector to the provided channel and returns once
 	// the last descriptor has been sent. The sent descriptors fulfill the
 	// consistency and uniqueness requirements described in the Desc
-	// documentation. (It is valid if one and the same Collector sends
-	// duplicate descriptors. Those duplicates are simply ignored. However,
-	// two different Collectors must not send duplicate descriptors.) This
-	// method idempotently sends the same descriptors throughout the
-	// lifetime of the Collector. If a Collector encounters an error while
-	// executing this method, it must send an invalid descriptor (created
-	// with NewInvalidDesc) to signal the error to the registry.
+	// documentation.
+	//
+	// It is valid if one and the same Collector sends duplicate
+	// descriptors. Those duplicates are simply ignored. However, two
+	// different Collectors must not send duplicate descriptors.
+	//
+	// Sending no descriptor at all marks the Collector as “unchecked”,
+	// i.e. no checks will be performed at registration time, and the
+	// Collector may yield any Metric it sees fit in its Collect method.
+	//
+	// This method idempotently sends the same descriptors throughout the
+	// lifetime of the Collector.
+	//
+	// If a Collector encounters an error while executing this method, it
+	// must send an invalid descriptor (created with NewInvalidDesc) to
+	// signal the error to the registry.
 	Describe(chan<- *Desc)
 	// Collect is called by the Prometheus registry when collecting
 	// metrics. The implementation sends each collected metric via the
 	// provided channel and returns once the last metric has been sent. The
-	// descriptor of each sent metric is one of those returned by
-	// Describe. Returned metrics that share the same descriptor must differ
-	// in their variable label values. This method may be called
-	// concurrently and must therefore be implemented in a concurrency safe
-	// way. Blocking occurs at the expense of total performance of rendering
-	// all registered metrics. Ideally, Collector implementations support
-	// concurrent readers.
+	// descriptor of each sent metric is one of those returned by Describe
+	// (unless the Collector is unchecked, see above). Returned metrics that
+	// share the same descriptor must differ in their variable label
+	// values.
+	//
+	// This method may be called concurrently and must therefore be
+	// implemented in a concurrency safe way. Blocking occurs at the expense
+	// of total performance of rendering all registered metrics. Ideally,
+	// Collector implementations support concurrent readers.
 	Collect(chan<- Metric)
 }
 
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
index 83c3657d..5d9525de 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -121,7 +121,17 @@
 // NewConstSummary (and their respective Must… versions). That will happen in
 // the Collect method. The Describe method has to return separate Desc
 // instances, representative of the “throw-away” metrics to be created later.
-// NewDesc comes in handy to create those Desc instances.
+// NewDesc comes in handy to create those Desc instances. Alternatively, you
+// could return no Desc at all, which will marke the Collector “unchecked”.  No
+// checks are porformed at registration time, but metric consistency will still
+// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
+// errors. Thus, with unchecked Collectors, the responsibility to not collect
+// metrics that lead to inconsistencies in the total scrape result lies with the
+// implementer of the Collector. While this is not a desirable state, it is
+// sometimes necessary. The typical use case is a situatios where the exact
+// metrics to be returned by a Collector cannot be predicted at registration
+// time, but the implementer has sufficient knowledge of the whole system to
+// guarantee metric consistency.
 //
 // The Collector example illustrates the use case. You can also look at the
 // source code of the processCollector (mirroring process metrics), the
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
new file mode 100644
index 00000000..9c1c66dc
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
@@ -0,0 +1,199 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+	"bufio"
+	"io"
+	"net"
+	"net/http"
+)
+
+const (
+	closeNotifier = 1 << iota
+	flusher
+	hijacker
+	readerFrom
+	pusher
+)
+
+type delegator interface {
+	http.ResponseWriter
+
+	Status() int
+	Written() int64
+}
+
+type responseWriterDelegator struct {
+	http.ResponseWriter
+
+	handler, method    string
+	status             int
+	written            int64
+	wroteHeader        bool
+	observeWriteHeader func(int)
+}
+
+func (r *responseWriterDelegator) Status() int {
+	return r.status
+}
+
+func (r *responseWriterDelegator) Written() int64 {
+	return r.written
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+	r.status = code
+	r.wroteHeader = true
+	r.ResponseWriter.WriteHeader(code)
+	if r.observeWriteHeader != nil {
+		r.observeWriteHeader(code)
+	}
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+	if !r.wroteHeader {
+		r.WriteHeader(http.StatusOK)
+	}
+	n, err := r.ResponseWriter.Write(b)
+	r.written += int64(n)
+	return n, err
+}
+
+type closeNotifierDelegator struct{ *responseWriterDelegator }
+type flusherDelegator struct{ *responseWriterDelegator }
+type hijackerDelegator struct{ *responseWriterDelegator }
+type readerFromDelegator struct{ *responseWriterDelegator }
+
+func (d *closeNotifierDelegator) CloseNotify() <-chan bool {
+	return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+func (d *flusherDelegator) Flush() {
+	d.ResponseWriter.(http.Flusher).Flush()
+}
+func (d *hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+	return d.ResponseWriter.(http.Hijacker).Hijack()
+}
+func (d *readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
+	if !d.wroteHeader {
+		d.WriteHeader(http.StatusOK)
+	}
+	n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
+	d.written += n
+	return n, err
+}
+
+var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
+
+func init() {
+	// TODO(beorn7): Code generation would help here.
+	pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
+		return d
+	}
+	pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
+		return &closeNotifierDelegator{d}
+	}
+	pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
+		return &flusherDelegator{d}
+	}
+	pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
+		return struct {
+			*responseWriterDelegator
+			http.Flusher
+			http.CloseNotifier
+		}{d, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
+		return &hijackerDelegator{d}
+	}
+	pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
+		return struct {
+			*responseWriterDelegator
+			http.Hijacker
+			http.CloseNotifier
+		}{d, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
+		return struct {
+			*responseWriterDelegator
+			http.Hijacker
+			http.Flusher
+		}{d, &hijackerDelegator{d}, &flusherDelegator{d}}
+	}
+	pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
+		return struct {
+			*responseWriterDelegator
+			http.Hijacker
+			http.Flusher
+			http.CloseNotifier
+		}{d, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
+		return readerFromDelegator{d}
+	}
+	pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.CloseNotifier
+		}{d, &readerFromDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Flusher
+		}{d, &readerFromDelegator{d}, &flusherDelegator{d}}
+	}
+	pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Flusher
+			http.CloseNotifier
+		}{d, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Hijacker
+		}{d, &readerFromDelegator{d}, &hijackerDelegator{d}}
+	}
+	pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Hijacker
+			http.CloseNotifier
+		}{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Hijacker
+			http.Flusher
+		}{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
+	}
+	pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Hijacker
+			http.Flusher
+			http.CloseNotifier
+		}{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
new file mode 100644
index 00000000..75a905e2
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
@@ -0,0 +1,181 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package promhttp
+
+import (
+	"io"
+	"net/http"
+)
+
+type pusherDelegator struct{ *responseWriterDelegator }
+
+func (d *pusherDelegator) Push(target string, opts *http.PushOptions) error {
+	return d.ResponseWriter.(http.Pusher).Push(target, opts)
+}
+
+func init() {
+	pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
+		return &pusherDelegator{d}
+	}
+	pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.CloseNotifier
+		}{d, &pusherDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Flusher
+		}{d, &pusherDelegator{d}, &flusherDelegator{d}}
+	}
+	pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Flusher
+			http.CloseNotifier
+		}{d, &pusherDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Hijacker
+		}{d, &pusherDelegator{d}, &hijackerDelegator{d}}
+	}
+	pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Hijacker
+			http.CloseNotifier
+		}{d, &pusherDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Hijacker
+			http.Flusher
+		}{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
+	}
+	pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Hijacker
+			http.Flusher
+			http.CloseNotifier
+		}{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+		}{d, &pusherDelegator{d}, &readerFromDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.CloseNotifier
+		}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Flusher
+		}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Flusher
+			http.CloseNotifier
+		}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Hijacker
+		}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Hijacker
+			http.CloseNotifier
+		}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Hijacker
+			http.Flusher
+		}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Hijacker
+			http.Flusher
+			http.CloseNotifier
+		}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+	}
+}
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+	d := &responseWriterDelegator{
+		ResponseWriter:     w,
+		observeWriteHeader: observeWriteHeaderFunc,
+	}
+
+	id := 0
+	if _, ok := w.(http.CloseNotifier); ok {
+		id += closeNotifier
+	}
+	if _, ok := w.(http.Flusher); ok {
+		id += flusher
+	}
+	if _, ok := w.(http.Hijacker); ok {
+		id += hijacker
+	}
+	if _, ok := w.(io.ReaderFrom); ok {
+		id += readerFrom
+	}
+	if _, ok := w.(http.Pusher); ok {
+		id += pusher
+	}
+
+	return pickDelegator[id](d)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
new file mode 100644
index 00000000..8bb9b8b6
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
@@ -0,0 +1,44 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.8
+
+package promhttp
+
+import (
+	"io"
+	"net/http"
+)
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+	d := &responseWriterDelegator{
+		ResponseWriter:     w,
+		observeWriteHeader: observeWriteHeaderFunc,
+	}
+
+	id := 0
+	if _, ok := w.(http.CloseNotifier); ok {
+		id += closeNotifier
+	}
+	if _, ok := w.(http.Flusher); ok {
+		id += flusher
+	}
+	if _, ok := w.(http.Hijacker); ok {
+		id += hijacker
+	}
+	if _, ok := w.(io.ReaderFrom); ok {
+		id += readerFrom
+	}
+
+	return pickDelegator[id](d)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
new file mode 100644
index 00000000..01357374
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -0,0 +1,311 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package promhttp provides tooling around HTTP servers and clients.
+//
+// First, the package allows the creation of http.Handler instances to expose
+// Prometheus metrics via HTTP. promhttp.Handler acts on the
+// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
+// custom registry or anything that implements the Gatherer interface. It also
+// allows the creation of handlers that act differently on errors or allow to
+// log errors.
+//
+// Second, the package provides tooling to instrument instances of http.Handler
+// via middleware. Middleware wrappers follow the naming scheme
+// InstrumentHandlerX, where X describes the intended use of the middleware.
+// See each function's doc comment for specific details.
+//
+// Finally, the package allows for an http.RoundTripper to be instrumented via
+// middleware. Middleware wrappers follow the naming scheme
+// InstrumentRoundTripperX, where X describes the intended use of the
+// middleware. See each function's doc comment for specific details.
+package promhttp
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"io"
+	"net/http"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/prometheus/common/expfmt"
+
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+	contentTypeHeader     = "Content-Type"
+	contentLengthHeader   = "Content-Length"
+	contentEncodingHeader = "Content-Encoding"
+	acceptEncodingHeader  = "Accept-Encoding"
+)
+
+var bufPool sync.Pool
+
+func getBuf() *bytes.Buffer {
+	buf := bufPool.Get()
+	if buf == nil {
+		return &bytes.Buffer{}
+	}
+	return buf.(*bytes.Buffer)
+}
+
+func giveBuf(buf *bytes.Buffer) {
+	buf.Reset()
+	bufPool.Put(buf)
+}
+
+// Handler returns an http.Handler for the prometheus.DefaultGatherer, using
+// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
+// no error logging, and it applies compression if requested by the client.
+//
+// The returned http.Handler is already instrumented using the
+// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
+// create multiple http.Handlers by separate calls of the Handler function, the
+// metrics used for instrumentation will be shared between them, providing
+// global scrape counts.
+//
+// This function is meant to cover the bulk of basic use cases. If you are doing
+// anything that requires more customization (including using a non-default
+// Gatherer, different instrumentation, and non-default HandlerOpts), use the
+// HandlerFor function. See there for details.
+func Handler() http.Handler {
+	return InstrumentMetricHandler(
+		prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
+	)
+}
+
+// HandlerFor returns an uninstrumented http.Handler for the provided
+// Gatherer. The behavior of the Handler is defined by the provided
+// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
+// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
+// instrumentation. Use the InstrumentMetricHandler function to apply the same
+// kind of instrumentation as it is used by the Handler function.
+func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
+	var inFlightSem chan struct{}
+	if opts.MaxRequestsInFlight > 0 {
+		inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
+	}
+
+	h := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+		if inFlightSem != nil {
+			select {
+			case inFlightSem <- struct{}{}: // All good, carry on.
+				defer func() { <-inFlightSem }()
+			default:
+				http.Error(w, fmt.Sprintf(
+					"Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
+				), http.StatusServiceUnavailable)
+				return
+			}
+		}
+
+		mfs, err := reg.Gather()
+		if err != nil {
+			if opts.ErrorLog != nil {
+				opts.ErrorLog.Println("error gathering metrics:", err)
+			}
+			switch opts.ErrorHandling {
+			case PanicOnError:
+				panic(err)
+			case ContinueOnError:
+				if len(mfs) == 0 {
+					http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+					return
+				}
+			case HTTPErrorOnError:
+				http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError)
+				return
+			}
+		}
+
+		contentType := expfmt.Negotiate(req.Header)
+		buf := getBuf()
+		defer giveBuf(buf)
+		writer, encoding := decorateWriter(req, buf, opts.DisableCompression)
+		enc := expfmt.NewEncoder(writer, contentType)
+		var lastErr error
+		for _, mf := range mfs {
+			if err := enc.Encode(mf); err != nil {
+				lastErr = err
+				if opts.ErrorLog != nil {
+					opts.ErrorLog.Println("error encoding metric family:", err)
+				}
+				switch opts.ErrorHandling {
+				case PanicOnError:
+					panic(err)
+				case ContinueOnError:
+					// Handled later.
+				case HTTPErrorOnError:
+					http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
+					return
+				}
+			}
+		}
+		if closer, ok := writer.(io.Closer); ok {
+			closer.Close()
+		}
+		if lastErr != nil && buf.Len() == 0 {
+			http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError)
+			return
+		}
+		header := w.Header()
+		header.Set(contentTypeHeader, string(contentType))
+		header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+		if encoding != "" {
+			header.Set(contentEncodingHeader, encoding)
+		}
+		if _, err := w.Write(buf.Bytes()); err != nil && opts.ErrorLog != nil {
+			opts.ErrorLog.Println("error while sending encoded metrics:", err)
+		}
+		// TODO(beorn7): Consider streaming serving of metrics.
+	})
+
+	if opts.Timeout <= 0 {
+		return h
+	}
+	return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
+		"Exceeded configured timeout of %v.\n",
+		opts.Timeout,
+	))
+}
+
+// InstrumentMetricHandler is usually used with an http.Handler returned by the
+// HandlerFor function. It instruments the provided http.Handler with two
+// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
+// scrapes partitioned by HTTP status code, and a gauge
+// "promhttp_metric_handler_requests_in_flight" to track the number of
+// simultaneous scrapes. This function idempotently registers collectors for
+// both metrics with the provided Registerer. It panics if the registration
+// fails. The provided metrics are useful to see how many scrapes hit the
+// monitored target (which could be from different Prometheus servers or other
+// scrapers), and how often they overlap (which would result in more than one
+// scrape in flight at the same time). Note that the scrapes-in-flight gauge
+// will contain the scrape by which it is exposed, while the scrape counter will
+// only get incremented after the scrape is complete (as only then the status
+// code is known). For tracking scrape durations, use the
+// "scrape_duration_seconds" gauge created by the Prometheus server upon each
+// scrape.
+func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
+	cnt := prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Name: "promhttp_metric_handler_requests_total",
+			Help: "Total number of scrapes by HTTP status code.",
+		},
+		[]string{"code"},
+	)
+	// Initialize the most likely HTTP status codes.
+	cnt.WithLabelValues("200")
+	cnt.WithLabelValues("500")
+	cnt.WithLabelValues("503")
+	if err := reg.Register(cnt); err != nil {
+		if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+			cnt = are.ExistingCollector.(*prometheus.CounterVec)
+		} else {
+			panic(err)
+		}
+	}
+
+	gge := prometheus.NewGauge(prometheus.GaugeOpts{
+		Name: "promhttp_metric_handler_requests_in_flight",
+		Help: "Current number of scrapes being served.",
+	})
+	if err := reg.Register(gge); err != nil {
+		if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+			gge = are.ExistingCollector.(prometheus.Gauge)
+		} else {
+			panic(err)
+		}
+	}
+
+	return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
+}
+
+// HandlerErrorHandling defines how a Handler serving metrics will handle
+// errors.
+type HandlerErrorHandling int
+
+// These constants cause handlers serving metrics to behave as described if
+// errors are encountered.
+const (
+	// Serve an HTTP status code 500 upon the first error
+	// encountered. Report the error message in the body.
+	HTTPErrorOnError HandlerErrorHandling = iota
+	// Ignore errors and try to serve as many metrics as possible.  However,
+	// if no metrics can be served, serve an HTTP status code 500 and the
+	// last error message in the body. Only use this in deliberate "best
+	// effort" metrics collection scenarios. It is recommended to at least
+	// log errors (by providing an ErrorLog in HandlerOpts) to not mask
+	// errors completely.
+	ContinueOnError
+	// Panic upon the first error encountered (useful for "crash only" apps).
+	PanicOnError
+)
+
+// Logger is the minimal interface HandlerOpts needs for logging. Note that
+// log.Logger from the standard library implements this interface, and it is
+// easy to implement by custom loggers, if they don't do so already anyway.
+type Logger interface {
+	Println(v ...interface{})
+}
+
+// HandlerOpts specifies options how to serve metrics via an http.Handler. The
+// zero value of HandlerOpts is a reasonable default.
+type HandlerOpts struct {
+	// ErrorLog specifies an optional logger for errors collecting and
+	// serving metrics. If nil, errors are not logged at all.
+	ErrorLog Logger
+	// ErrorHandling defines how errors are handled. Note that errors are
+	// logged regardless of the configured ErrorHandling provided ErrorLog
+	// is not nil.
+	ErrorHandling HandlerErrorHandling
+	// If DisableCompression is true, the handler will never compress the
+	// response, even if requested by the client.
+	DisableCompression bool
+	// The number of concurrent HTTP requests is limited to
+	// MaxRequestsInFlight. Additional requests are responded to with 503
+	// Service Unavailable and a suitable message in the body. If
+	// MaxRequestsInFlight is 0 or negative, no limit is applied.
+	MaxRequestsInFlight int
+	// If handling a request takes longer than Timeout, it is responded to
+	// with 503 ServiceUnavailable and a suitable Message. No timeout is
+	// applied if Timeout is 0 or negative. Note that with the current
+	// implementation, reaching the timeout simply ends the HTTP requests as
+	// described above (and even that only if sending of the body hasn't
+	// started yet), while the bulk work of gathering all the metrics keeps
+	// running in the background (with the eventual result to be thrown
+	// away). Until the implementation is improved, it is recommended to
+	// implement a separate timeout in potentially slow Collectors.
+	Timeout time.Duration
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested.  It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) {
+	if compressionDisabled {
+		return writer, ""
+	}
+	header := request.Header.Get(acceptEncodingHeader)
+	parts := strings.Split(header, ",")
+	for _, part := range parts {
+		part = strings.TrimSpace(part)
+		if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+			return gzip.NewWriter(writer), "gzip"
+		}
+	}
+	return writer, ""
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
new file mode 100644
index 00000000..86fd5644
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
@@ -0,0 +1,97 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+	"net/http"
+	"time"
+
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+// The RoundTripperFunc type is an adapter to allow the use of ordinary
+// functions as RoundTrippers. If f is a function with the appropriate
+// signature, RountTripperFunc(f) is a RoundTripper that calls f.
+type RoundTripperFunc func(req *http.Request) (*http.Response, error)
+
+// RoundTrip implements the RoundTripper interface.
+func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
+	return rt(r)
+}
+
+// InstrumentRoundTripperInFlight is a middleware that wraps the provided
+// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.RoundTripper.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
+	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+		gauge.Inc()
+		defer gauge.Dec()
+		return next.RoundTrip(r)
+	})
+}
+
+// InstrumentRoundTripperCounter is a middleware that wraps the provided
+// http.RoundTripper to observe the request result with the provided CounterVec.
+// The CounterVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. Partitioning of the CounterVec happens by HTTP status code
+// and/or HTTP method if the respective instance label names are present in the
+// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
+// is not incremented.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
+	code, method := checkLabels(counter)
+
+	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+		resp, err := next.RoundTrip(r)
+		if err == nil {
+			counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
+		}
+		return resp, err
+	})
+}
+
+// InstrumentRoundTripperDuration is a middleware that wraps the provided
+// http.RoundTripper to observe the request duration with the provided
+// ObserverVec.  The ObserverVec must have zero, one, or two non-const
+// non-curried labels. For those, the only allowed label names are "code" and
+// "method". The function panics otherwise. The Observe method of the Observer
+// in the ObserverVec is called with the request duration in
+// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
+// respective instance label names are present in the ObserverVec. For
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
+// partitioning of Histograms is expensive and should be used judiciously.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, no values are
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
+	code, method := checkLabels(obs)
+
+	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+		start := time.Now()
+		resp, err := next.RoundTrip(r)
+		if err == nil {
+			obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
+		}
+		return resp, err
+	})
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
new file mode 100644
index 00000000..a034d1ec
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
@@ -0,0 +1,144 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package promhttp
+
+import (
+	"context"
+	"crypto/tls"
+	"net/http"
+	"net/http/httptrace"
+	"time"
+)
+
+// InstrumentTrace is used to offer flexibility in instrumenting the available
+// httptrace.ClientTrace hook functions. Each function is passed a float64
+// representing the time in seconds since the start of the http request. A user
+// may choose to use separately buckets Histograms, or implement custom
+// instance labels on a per function basis.
+type InstrumentTrace struct {
+	GotConn              func(float64)
+	PutIdleConn          func(float64)
+	GotFirstResponseByte func(float64)
+	Got100Continue       func(float64)
+	DNSStart             func(float64)
+	DNSDone              func(float64)
+	ConnectStart         func(float64)
+	ConnectDone          func(float64)
+	TLSHandshakeStart    func(float64)
+	TLSHandshakeDone     func(float64)
+	WroteHeaders         func(float64)
+	Wait100Continue      func(float64)
+	WroteRequest         func(float64)
+}
+
+// InstrumentRoundTripperTrace is a middleware that wraps the provided
+// RoundTripper and reports times to hook functions provided in the
+// InstrumentTrace struct. Hook functions that are not present in the provided
+// InstrumentTrace struct are ignored. Times reported to the hook functions are
+// time since the start of the request. Only with Go1.9+, those times are
+// guaranteed to never be negative. (Earlier Go versions are not using a
+// monotonic clock.) Note that partitioning of Histograms is expensive and
+// should be used judiciously.
+//
+// For hook functions that receive an error as an argument, no observations are
+// made in the event of a non-nil error value.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
+	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+		start := time.Now()
+
+		trace := &httptrace.ClientTrace{
+			GotConn: func(_ httptrace.GotConnInfo) {
+				if it.GotConn != nil {
+					it.GotConn(time.Since(start).Seconds())
+				}
+			},
+			PutIdleConn: func(err error) {
+				if err != nil {
+					return
+				}
+				if it.PutIdleConn != nil {
+					it.PutIdleConn(time.Since(start).Seconds())
+				}
+			},
+			DNSStart: func(_ httptrace.DNSStartInfo) {
+				if it.DNSStart != nil {
+					it.DNSStart(time.Since(start).Seconds())
+				}
+			},
+			DNSDone: func(_ httptrace.DNSDoneInfo) {
+				if it.DNSDone != nil {
+					it.DNSDone(time.Since(start).Seconds())
+				}
+			},
+			ConnectStart: func(_, _ string) {
+				if it.ConnectStart != nil {
+					it.ConnectStart(time.Since(start).Seconds())
+				}
+			},
+			ConnectDone: func(_, _ string, err error) {
+				if err != nil {
+					return
+				}
+				if it.ConnectDone != nil {
+					it.ConnectDone(time.Since(start).Seconds())
+				}
+			},
+			GotFirstResponseByte: func() {
+				if it.GotFirstResponseByte != nil {
+					it.GotFirstResponseByte(time.Since(start).Seconds())
+				}
+			},
+			Got100Continue: func() {
+				if it.Got100Continue != nil {
+					it.Got100Continue(time.Since(start).Seconds())
+				}
+			},
+			TLSHandshakeStart: func() {
+				if it.TLSHandshakeStart != nil {
+					it.TLSHandshakeStart(time.Since(start).Seconds())
+				}
+			},
+			TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
+				if err != nil {
+					return
+				}
+				if it.TLSHandshakeDone != nil {
+					it.TLSHandshakeDone(time.Since(start).Seconds())
+				}
+			},
+			WroteHeaders: func() {
+				if it.WroteHeaders != nil {
+					it.WroteHeaders(time.Since(start).Seconds())
+				}
+			},
+			Wait100Continue: func() {
+				if it.Wait100Continue != nil {
+					it.Wait100Continue(time.Since(start).Seconds())
+				}
+			},
+			WroteRequest: func(_ httptrace.WroteRequestInfo) {
+				if it.WroteRequest != nil {
+					it.WroteRequest(time.Since(start).Seconds())
+				}
+			},
+		}
+		r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace))
+
+		return next.RoundTrip(r)
+	})
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
new file mode 100644
index 00000000..9db24380
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -0,0 +1,447 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+	"errors"
+	"net/http"
+	"strconv"
+	"strings"
+	"time"
+
+	dto "github.com/prometheus/client_model/go"
+
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+// magicString is used for the hacky label test in checkLabels. Remove once fixed.
+const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
+
+// InstrumentHandlerInFlight is a middleware that wraps the provided
+// http.Handler. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.Handler.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		g.Inc()
+		defer g.Dec()
+		next.ServeHTTP(w, r)
+	})
+}
+
+// InstrumentHandlerDuration is a middleware that wraps the provided
+// http.Handler to observe the request duration with the provided ObserverVec.
+// The ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the request duration in seconds. Partitioning happens by HTTP
+// status code and/or HTTP method if the respective instance label names are
+// present in the ObserverVec. For unpartitioned observations, use an
+// ObserverVec with zero labels. Note that partitioning of Histograms is
+// expensive and should be used judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+	code, method := checkLabels(obs)
+
+	if code {
+		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			now := time.Now()
+			d := newDelegator(w, nil)
+			next.ServeHTTP(d, r)
+
+			obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
+		})
+	}
+
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		now := time.Now()
+		next.ServeHTTP(w, r)
+		obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
+	})
+}
+
+// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
+// to observe the request result with the provided CounterVec.  The CounterVec
+// must have zero, one, or two non-const non-curried labels. For those, the only
+// allowed label names are "code" and "method". The function panics
+// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or
+// HTTP method if the respective instance label names are present in the
+// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, the Counter is not incremented.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
+	code, method := checkLabels(counter)
+
+	if code {
+		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			d := newDelegator(w, nil)
+			next.ServeHTTP(d, r)
+			counter.With(labels(code, method, r.Method, d.Status())).Inc()
+		})
+	}
+
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		next.ServeHTTP(w, r)
+		counter.With(labels(code, method, r.Method, 0)).Inc()
+	})
+}
+
+// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
+// http.Handler to observe with the provided ObserverVec the request duration
+// until the response headers are written. The ObserverVec must have zero, one,
+// or two non-const non-curried labels. For those, the only allowed label names
+// are "code" and "method". The function panics otherwise. The Observe method of
+// the Observer in the ObserverVec is called with the request duration in
+// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
+// respective instance label names are present in the ObserverVec. For
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
+// partitioning of Histograms is expensive and should be used judiciously.
+//
+// If the wrapped Handler panics before calling WriteHeader, no value is
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+	code, method := checkLabels(obs)
+
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		now := time.Now()
+		d := newDelegator(w, func(status int) {
+			obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
+		})
+		next.ServeHTTP(d, r)
+	})
+}
+
+// InstrumentHandlerRequestSize is a middleware that wraps the provided
+// http.Handler to observe the request size with the provided ObserverVec.  The
+// ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the request size in bytes. Partitioning happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present in
+// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
+// labels. Note that partitioning of Histograms is expensive and should be used
+// judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+	code, method := checkLabels(obs)
+
+	if code {
+		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			d := newDelegator(w, nil)
+			next.ServeHTTP(d, r)
+			size := computeApproximateRequestSize(r)
+			obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
+		})
+	}
+
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		next.ServeHTTP(w, r)
+		size := computeApproximateRequestSize(r)
+		obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
+	})
+}
+
+// InstrumentHandlerResponseSize is a middleware that wraps the provided
+// http.Handler to observe the response size with the provided ObserverVec.  The
+// ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the response size in bytes. Partitioning happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present in
+// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
+// labels. Note that partitioning of Histograms is expensive and should be used
+// judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
+	code, method := checkLabels(obs)
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		d := newDelegator(w, nil)
+		next.ServeHTTP(d, r)
+		obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
+	})
+}
+
+func checkLabels(c prometheus.Collector) (code bool, method bool) {
+	// TODO(beorn7): Remove this hacky way to check for instance labels
+	// once Descriptors can have their dimensionality queried.
+	var (
+		desc *prometheus.Desc
+		m    prometheus.Metric
+		pm   dto.Metric
+		lvs  []string
+	)
+
+	// Get the Desc from the Collector.
+	descc := make(chan *prometheus.Desc, 1)
+	c.Describe(descc)
+
+	select {
+	case desc = <-descc:
+	default:
+		panic("no description provided by collector")
+	}
+	select {
+	case <-descc:
+		panic("more than one description provided by collector")
+	default:
+	}
+
+	close(descc)
+
+	// Create a ConstMetric with the Desc. Since we don't know how many
+	// variable labels there are, try for as long as it needs.
+	for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {
+		m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...)
+	}
+
+	// Write out the metric into a proto message and look at the labels.
+	// If the value is not the magicString, it is a constLabel, which doesn't interest us.
+	// If the label is curried, it doesn't interest us.
+	// In all other cases, only "code" or "method" is allowed.
+	if err := m.Write(&pm); err != nil {
+		panic("error checking metric for labels")
+	}
+	for _, label := range pm.Label {
+		name, value := label.GetName(), label.GetValue()
+		if value != magicString || isLabelCurried(c, name) {
+			continue
+		}
+		switch name {
+		case "code":
+			code = true
+		case "method":
+			method = true
+		default:
+			panic("metric partitioned with non-supported labels")
+		}
+	}
+	return
+}
+
+func isLabelCurried(c prometheus.Collector, label string) bool {
+	// This is even hackier than the label test above.
+	// We essentially try to curry again and see if it works.
+	// But for that, we need to type-convert to the two
+	// types we use here, ObserverVec or *CounterVec.
+	switch v := c.(type) {
+	case *prometheus.CounterVec:
+		if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
+			return false
+		}
+	case prometheus.ObserverVec:
+		if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
+			return false
+		}
+	default:
+		panic("unsupported metric vec type")
+	}
+	return true
+}
+
+// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
+// unnecessary allocations on each request.
+var emptyLabels = prometheus.Labels{}
+
+func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
+	if !(code || method) {
+		return emptyLabels
+	}
+	labels := prometheus.Labels{}
+
+	if code {
+		labels["code"] = sanitizeCode(status)
+	}
+	if method {
+		labels["method"] = sanitizeMethod(reqMethod)
+	}
+
+	return labels
+}
+
+func computeApproximateRequestSize(r *http.Request) int {
+	s := 0
+	if r.URL != nil {
+		s += len(r.URL.String())
+	}
+
+	s += len(r.Method)
+	s += len(r.Proto)
+	for name, values := range r.Header {
+		s += len(name)
+		for _, value := range values {
+			s += len(value)
+		}
+	}
+	s += len(r.Host)
+
+	// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+	if r.ContentLength != -1 {
+		s += int(r.ContentLength)
+	}
+	return s
+}
+
+func sanitizeMethod(m string) string {
+	switch m {
+	case "GET", "get":
+		return "get"
+	case "PUT", "put":
+		return "put"
+	case "HEAD", "head":
+		return "head"
+	case "POST", "post":
+		return "post"
+	case "DELETE", "delete":
+		return "delete"
+	case "CONNECT", "connect":
+		return "connect"
+	case "OPTIONS", "options":
+		return "options"
+	case "NOTIFY", "notify":
+		return "notify"
+	default:
+		return strings.ToLower(m)
+	}
+}
+
+// If the wrapped http.Handler has not set a status code, i.e. the value is
+// currently 0, santizeCode will return 200, for consistency with behavior in
+// the stdlib.
+func sanitizeCode(s int) string {
+	switch s {
+	case 100:
+		return "100"
+	case 101:
+		return "101"
+
+	case 200, 0:
+		return "200"
+	case 201:
+		return "201"
+	case 202:
+		return "202"
+	case 203:
+		return "203"
+	case 204:
+		return "204"
+	case 205:
+		return "205"
+	case 206:
+		return "206"
+
+	case 300:
+		return "300"
+	case 301:
+		return "301"
+	case 302:
+		return "302"
+	case 304:
+		return "304"
+	case 305:
+		return "305"
+	case 307:
+		return "307"
+
+	case 400:
+		return "400"
+	case 401:
+		return "401"
+	case 402:
+		return "402"
+	case 403:
+		return "403"
+	case 404:
+		return "404"
+	case 405:
+		return "405"
+	case 406:
+		return "406"
+	case 407:
+		return "407"
+	case 408:
+		return "408"
+	case 409:
+		return "409"
+	case 410:
+		return "410"
+	case 411:
+		return "411"
+	case 412:
+		return "412"
+	case 413:
+		return "413"
+	case 414:
+		return "414"
+	case 415:
+		return "415"
+	case 416:
+		return "416"
+	case 417:
+		return "417"
+	case 418:
+		return "418"
+
+	case 500:
+		return "500"
+	case 501:
+		return "501"
+	case 502:
+		return "502"
+	case 503:
+		return "503"
+	case 504:
+		return "504"
+	case 505:
+		return "505"
+
+	case 428:
+		return "428"
+	case 429:
+		return "429"
+	case 431:
+		return "431"
+	case 511:
+		return "511"
+
+	default:
+		return strconv.Itoa(s)
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index fdb7badf..5c5cdfe2 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -15,7 +15,6 @@ package prometheus
 
 import (
 	"bytes"
-	"errors"
 	"fmt"
 	"os"
 	"runtime"
@@ -68,7 +67,8 @@ func NewRegistry() *Registry {
 
 // NewPedanticRegistry returns a registry that checks during collection if each
 // collected Metric is consistent with its reported Desc, and if the Desc has
-// actually been registered with the registry.
+// actually been registered with the registry. Unchecked Collectors (those whose
+// Describe methed does not yield any descriptors) are excluded from the check.
 //
 // Usually, a Registry will be happy as long as the union of all collected
 // Metrics is consistent and valid even if some metrics are not consistent with
@@ -98,6 +98,14 @@ type Registerer interface {
 	// returned error is an instance of AlreadyRegisteredError, which
 	// contains the previously registered Collector.
 	//
+	// A Collector whose Describe method does not yield any Desc is treated
+	// as unchecked. Registration will always succeed. No check for
+	// re-registering (see previous paragraph) is performed. Thus, the
+	// caller is responsible for not double-registering the same unchecked
+	// Collector, and for providing a Collector that will not cause
+	// inconsistent metrics on collection. (This would lead to scrape
+	// errors.)
+	//
 	// It is in general not safe to register the same Collector multiple
 	// times concurrently.
 	Register(Collector) error
@@ -108,7 +116,9 @@ type Registerer interface {
 	// Unregister unregisters the Collector that equals the Collector passed
 	// in as an argument.  (Two Collectors are considered equal if their
 	// Describe method yields the same set of descriptors.) The function
-	// returns whether a Collector was unregistered.
+	// returns whether a Collector was unregistered. Note that an unchecked
+	// Collector cannot be unregistered (as its Describe method does not
+	// yield any descriptor).
 	//
 	// Note that even after unregistering, it will not be possible to
 	// register a new Collector that is inconsistent with the unregistered
@@ -243,6 +253,7 @@ type Registry struct {
 	collectorsByID        map[uint64]Collector // ID is a hash of the descIDs.
 	descIDs               map[uint64]struct{}
 	dimHashesByName       map[string]uint64
+	uncheckedCollectors   []Collector
 	pedanticChecksEnabled bool
 }
 
@@ -300,9 +311,10 @@ func (r *Registry) Register(c Collector) error {
 			}
 		}
 	}
-	// Did anything happen at all?
+	// A Collector yielding no Desc at all is considered unchecked.
 	if len(newDescIDs) == 0 {
-		return errors.New("collector has no descriptors")
+		r.uncheckedCollectors = append(r.uncheckedCollectors, c)
+		return nil
 	}
 	if existing, exists := r.collectorsByID[collectorID]; exists {
 		return AlreadyRegisteredError{
@@ -376,19 +388,24 @@ func (r *Registry) MustRegister(cs ...Collector) {
 // Gather implements Gatherer.
 func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
 	var (
-		metricChan        = make(chan Metric, capMetricChan)
-		metricHashes      = map[uint64]struct{}{}
-		wg                sync.WaitGroup
-		errs              MultiError          // The collected errors to return in the end.
-		registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
+		checkedMetricChan   = make(chan Metric, capMetricChan)
+		uncheckedMetricChan = make(chan Metric, capMetricChan)
+		metricHashes        = map[uint64]struct{}{}
+		wg                  sync.WaitGroup
+		errs                MultiError          // The collected errors to return in the end.
+		registeredDescIDs   map[uint64]struct{} // Only used for pedantic checks
 	)
 
 	r.mtx.RLock()
-	goroutineBudget := len(r.collectorsByID)
+	goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
 	metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
-	collectors := make(chan Collector, len(r.collectorsByID))
+	checkedCollectors := make(chan Collector, len(r.collectorsByID))
+	uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
 	for _, collector := range r.collectorsByID {
-		collectors <- collector
+		checkedCollectors <- collector
+	}
+	for _, collector := range r.uncheckedCollectors {
+		uncheckedCollectors <- collector
 	}
 	// In case pedantic checks are enabled, we have to copy the map before
 	// giving up the RLock.
@@ -405,12 +422,14 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
 	collectWorker := func() {
 		for {
 			select {
-			case collector := <-collectors:
-				collector.Collect(metricChan)
-				wg.Done()
+			case collector := <-checkedCollectors:
+				collector.Collect(checkedMetricChan)
+			case collector := <-uncheckedCollectors:
+				collector.Collect(uncheckedMetricChan)
 			default:
 				return
 			}
+			wg.Done()
 		}
 	}
 
@@ -418,51 +437,94 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
 	go collectWorker()
 	goroutineBudget--
 
-	// Close the metricChan once all collectors are collected.
+	// Close checkedMetricChan and uncheckedMetricChan once all collectors
+	// are collected.
 	go func() {
 		wg.Wait()
-		close(metricChan)
+		close(checkedMetricChan)
+		close(uncheckedMetricChan)
 	}()
 
-	// Drain metricChan in case of premature return.
+	// Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
 	defer func() {
-		for range metricChan {
+		if checkedMetricChan != nil {
+			for range checkedMetricChan {
+			}
+		}
+		if uncheckedMetricChan != nil {
+			for range uncheckedMetricChan {
+			}
 		}
 	}()
 
-collectLoop:
+	// Copy the channel references so we can nil them out later to remove
+	// them from the select statements below.
+	cmc := checkedMetricChan
+	umc := uncheckedMetricChan
+
 	for {
 		select {
-		case metric, ok := <-metricChan:
+		case metric, ok := <-cmc:
 			if !ok {
-				// metricChan is closed, we are done.
-				break collectLoop
+				cmc = nil
+				break
 			}
 			errs.Append(processMetric(
 				metric, metricFamiliesByName,
 				metricHashes,
 				registeredDescIDs,
 			))
+		case metric, ok := <-umc:
+			if !ok {
+				umc = nil
+				break
+			}
+			errs.Append(processMetric(
+				metric, metricFamiliesByName,
+				metricHashes,
+				nil,
+			))
 		default:
-			if goroutineBudget <= 0 || len(collectors) == 0 {
+			if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
 				// All collectors are already being worked on or
 				// we have already as many goroutines started as
-				// there are collectors. Just process metrics
-				// from now on.
-				for metric := range metricChan {
+				// there are collectors. Do the same as above,
+				// just without the default.
+				select {
+				case metric, ok := <-cmc:
+					if !ok {
+						cmc = nil
+						break
+					}
 					errs.Append(processMetric(
 						metric, metricFamiliesByName,
 						metricHashes,
 						registeredDescIDs,
 					))
+				case metric, ok := <-umc:
+					if !ok {
+						umc = nil
+						break
+					}
+					errs.Append(processMetric(
+						metric, metricFamiliesByName,
+						metricHashes,
+						nil,
+					))
 				}
-				break collectLoop
+				break
 			}
 			// Start more workers.
 			go collectWorker()
 			goroutineBudget--
 			runtime.Gosched()
 		}
+		// Once both checkedMetricChan and uncheckdMetricChan are closed
+		// and drained, the contraption above will nil out cmc and umc,
+		// and then we can leave the collect loop here.
+		if cmc == nil && umc == nil {
+			break
+		}
 	}
 	return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
 }
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
index b065f868..9805432c 100644
--- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -1,34 +1,23 @@
-// Code generated by protoc-gen-go.
+// Code generated by protoc-gen-go. DO NOT EDIT.
 // source: metrics.proto
-// DO NOT EDIT!
-
-/*
-Package io_prometheus_client is a generated protocol buffer package.
-
-It is generated from these files:
-	metrics.proto
-
-It has these top-level messages:
-	LabelPair
-	Gauge
-	Counter
-	Quantile
-	Summary
-	Untyped
-	Histogram
-	Bucket
-	Metric
-	MetricFamily
-*/
-package io_prometheus_client
+
+package io_prometheus_client // import "github.com/prometheus/client_model/go"
 
 import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
 import math "math"
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
+var _ = fmt.Errorf
 var _ = math.Inf
 
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
 type MetricType int32
 
 const (
@@ -70,16 +59,41 @@ func (x *MetricType) UnmarshalJSON(data []byte) error {
 	*x = MetricType(value)
 	return nil
 }
+func (MetricType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
+}
 
 type LabelPair struct {
-	Name             *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Value            *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
-	XXX_unrecognized []byte  `json:"-"`
+	Name                 *string  `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Value                *string  `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
 func (m *LabelPair) Reset()         { *m = LabelPair{} }
 func (m *LabelPair) String() string { return proto.CompactTextString(m) }
 func (*LabelPair) ProtoMessage()    {}
+func (*LabelPair) Descriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
+}
+func (m *LabelPair) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_LabelPair.Unmarshal(m, b)
+}
+func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
+}
+func (dst *LabelPair) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LabelPair.Merge(dst, src)
+}
+func (m *LabelPair) XXX_Size() int {
+	return xxx_messageInfo_LabelPair.Size(m)
+}
+func (m *LabelPair) XXX_DiscardUnknown() {
+	xxx_messageInfo_LabelPair.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LabelPair proto.InternalMessageInfo
 
 func (m *LabelPair) GetName() string {
 	if m != nil && m.Name != nil {
@@ -96,13 +110,35 @@ func (m *LabelPair) GetValue() string {
 }
 
 type Gauge struct {
-	Value            *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
-	XXX_unrecognized []byte   `json:"-"`
+	Value                *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
 func (m *Gauge) Reset()         { *m = Gauge{} }
 func (m *Gauge) String() string { return proto.CompactTextString(m) }
 func (*Gauge) ProtoMessage()    {}
+func (*Gauge) Descriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1}
+}
+func (m *Gauge) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Gauge.Unmarshal(m, b)
+}
+func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
+}
+func (dst *Gauge) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Gauge.Merge(dst, src)
+}
+func (m *Gauge) XXX_Size() int {
+	return xxx_messageInfo_Gauge.Size(m)
+}
+func (m *Gauge) XXX_DiscardUnknown() {
+	xxx_messageInfo_Gauge.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Gauge proto.InternalMessageInfo
 
 func (m *Gauge) GetValue() float64 {
 	if m != nil && m.Value != nil {
@@ -112,13 +148,35 @@ func (m *Gauge) GetValue() float64 {
 }
 
 type Counter struct {
-	Value            *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
-	XXX_unrecognized []byte   `json:"-"`
+	Value                *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
 func (m *Counter) Reset()         { *m = Counter{} }
 func (m *Counter) String() string { return proto.CompactTextString(m) }
 func (*Counter) ProtoMessage()    {}
+func (*Counter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2}
+}
+func (m *Counter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Counter.Unmarshal(m, b)
+}
+func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
+}
+func (dst *Counter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Counter.Merge(dst, src)
+}
+func (m *Counter) XXX_Size() int {
+	return xxx_messageInfo_Counter.Size(m)
+}
+func (m *Counter) XXX_DiscardUnknown() {
+	xxx_messageInfo_Counter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Counter proto.InternalMessageInfo
 
 func (m *Counter) GetValue() float64 {
 	if m != nil && m.Value != nil {
@@ -128,14 +186,36 @@ func (m *Counter) GetValue() float64 {
 }
 
 type Quantile struct {
-	Quantile         *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
-	Value            *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
-	XXX_unrecognized []byte   `json:"-"`
+	Quantile             *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+	Value                *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
 func (m *Quantile) Reset()         { *m = Quantile{} }
 func (m *Quantile) String() string { return proto.CompactTextString(m) }
 func (*Quantile) ProtoMessage()    {}
+func (*Quantile) Descriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3}
+}
+func (m *Quantile) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Quantile.Unmarshal(m, b)
+}
+func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
+}
+func (dst *Quantile) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Quantile.Merge(dst, src)
+}
+func (m *Quantile) XXX_Size() int {
+	return xxx_messageInfo_Quantile.Size(m)
+}
+func (m *Quantile) XXX_DiscardUnknown() {
+	xxx_messageInfo_Quantile.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Quantile proto.InternalMessageInfo
 
 func (m *Quantile) GetQuantile() float64 {
 	if m != nil && m.Quantile != nil {
@@ -152,15 +232,37 @@ func (m *Quantile) GetValue() float64 {
 }
 
 type Summary struct {
-	SampleCount      *uint64     `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
-	SampleSum        *float64    `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
-	Quantile         []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
-	XXX_unrecognized []byte      `json:"-"`
+	SampleCount          *uint64     `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+	SampleSum            *float64    `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+	Quantile             []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
 }
 
 func (m *Summary) Reset()         { *m = Summary{} }
 func (m *Summary) String() string { return proto.CompactTextString(m) }
 func (*Summary) ProtoMessage()    {}
+func (*Summary) Descriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4}
+}
+func (m *Summary) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Summary.Unmarshal(m, b)
+}
+func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
+}
+func (dst *Summary) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Summary.Merge(dst, src)
+}
+func (m *Summary) XXX_Size() int {
+	return xxx_messageInfo_Summary.Size(m)
+}
+func (m *Summary) XXX_DiscardUnknown() {
+	xxx_messageInfo_Summary.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Summary proto.InternalMessageInfo
 
 func (m *Summary) GetSampleCount() uint64 {
 	if m != nil && m.SampleCount != nil {
@@ -184,13 +286,35 @@ func (m *Summary) GetQuantile() []*Quantile {
 }
 
 type Untyped struct {
-	Value            *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
-	XXX_unrecognized []byte   `json:"-"`
+	Value                *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
 func (m *Untyped) Reset()         { *m = Untyped{} }
 func (m *Untyped) String() string { return proto.CompactTextString(m) }
 func (*Untyped) ProtoMessage()    {}
+func (*Untyped) Descriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5}
+}
+func (m *Untyped) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Untyped.Unmarshal(m, b)
+}
+func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
+}
+func (dst *Untyped) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Untyped.Merge(dst, src)
+}
+func (m *Untyped) XXX_Size() int {
+	return xxx_messageInfo_Untyped.Size(m)
+}
+func (m *Untyped) XXX_DiscardUnknown() {
+	xxx_messageInfo_Untyped.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Untyped proto.InternalMessageInfo
 
 func (m *Untyped) GetValue() float64 {
 	if m != nil && m.Value != nil {
@@ -200,15 +324,37 @@ func (m *Untyped) GetValue() float64 {
 }
 
 type Histogram struct {
-	SampleCount      *uint64   `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
-	SampleSum        *float64  `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
-	Bucket           []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
-	XXX_unrecognized []byte    `json:"-"`
+	SampleCount          *uint64   `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+	SampleSum            *float64  `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+	Bucket               []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
 }
 
 func (m *Histogram) Reset()         { *m = Histogram{} }
 func (m *Histogram) String() string { return proto.CompactTextString(m) }
 func (*Histogram) ProtoMessage()    {}
+func (*Histogram) Descriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6}
+}
+func (m *Histogram) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Histogram.Unmarshal(m, b)
+}
+func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
+}
+func (dst *Histogram) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Histogram.Merge(dst, src)
+}
+func (m *Histogram) XXX_Size() int {
+	return xxx_messageInfo_Histogram.Size(m)
+}
+func (m *Histogram) XXX_DiscardUnknown() {
+	xxx_messageInfo_Histogram.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Histogram proto.InternalMessageInfo
 
 func (m *Histogram) GetSampleCount() uint64 {
 	if m != nil && m.SampleCount != nil {
@@ -232,14 +378,36 @@ func (m *Histogram) GetBucket() []*Bucket {
 }
 
 type Bucket struct {
-	CumulativeCount  *uint64  `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"`
-	UpperBound       *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"`
-	XXX_unrecognized []byte   `json:"-"`
+	CumulativeCount      *uint64  `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
+	UpperBound           *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
 func (m *Bucket) Reset()         { *m = Bucket{} }
 func (m *Bucket) String() string { return proto.CompactTextString(m) }
 func (*Bucket) ProtoMessage()    {}
+func (*Bucket) Descriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7}
+}
+func (m *Bucket) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Bucket.Unmarshal(m, b)
+}
+func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
+}
+func (dst *Bucket) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Bucket.Merge(dst, src)
+}
+func (m *Bucket) XXX_Size() int {
+	return xxx_messageInfo_Bucket.Size(m)
+}
+func (m *Bucket) XXX_DiscardUnknown() {
+	xxx_messageInfo_Bucket.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Bucket proto.InternalMessageInfo
 
 func (m *Bucket) GetCumulativeCount() uint64 {
 	if m != nil && m.CumulativeCount != nil {
@@ -256,19 +424,41 @@ func (m *Bucket) GetUpperBound() float64 {
 }
 
 type Metric struct {
-	Label            []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
-	Gauge            *Gauge       `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
-	Counter          *Counter     `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
-	Summary          *Summary     `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
-	Untyped          *Untyped     `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
-	Histogram        *Histogram   `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
-	TimestampMs      *int64       `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"`
-	XXX_unrecognized []byte       `json:"-"`
+	Label                []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+	Gauge                *Gauge       `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+	Counter              *Counter     `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+	Summary              *Summary     `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+	Untyped              *Untyped     `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+	Histogram            *Histogram   `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+	TimestampMs          *int64       `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
 }
 
 func (m *Metric) Reset()         { *m = Metric{} }
 func (m *Metric) String() string { return proto.CompactTextString(m) }
 func (*Metric) ProtoMessage()    {}
+func (*Metric) Descriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8}
+}
+func (m *Metric) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Metric.Unmarshal(m, b)
+}
+func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
+}
+func (dst *Metric) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Metric.Merge(dst, src)
+}
+func (m *Metric) XXX_Size() int {
+	return xxx_messageInfo_Metric.Size(m)
+}
+func (m *Metric) XXX_DiscardUnknown() {
+	xxx_messageInfo_Metric.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Metric proto.InternalMessageInfo
 
 func (m *Metric) GetLabel() []*LabelPair {
 	if m != nil {
@@ -320,16 +510,38 @@ func (m *Metric) GetTimestampMs() int64 {
 }
 
 type MetricFamily struct {
-	Name             *string     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Help             *string     `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
-	Type             *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
-	Metric           []*Metric   `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
-	XXX_unrecognized []byte      `json:"-"`
+	Name                 *string     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Help                 *string     `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+	Type                 *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+	Metric               []*Metric   `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
 }
 
 func (m *MetricFamily) Reset()         { *m = MetricFamily{} }
 func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
 func (*MetricFamily) ProtoMessage()    {}
+func (*MetricFamily) Descriptor() ([]byte, []int) {
+	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9}
+}
+func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
+}
+func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
+}
+func (dst *MetricFamily) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MetricFamily.Merge(dst, src)
+}
+func (m *MetricFamily) XXX_Size() int {
+	return xxx_messageInfo_MetricFamily.Size(m)
+}
+func (m *MetricFamily) XXX_DiscardUnknown() {
+	xxx_messageInfo_MetricFamily.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetricFamily proto.InternalMessageInfo
 
 func (m *MetricFamily) GetName() string {
 	if m != nil && m.Name != nil {
@@ -360,5 +572,58 @@ func (m *MetricFamily) GetMetric() []*Metric {
 }
 
 func init() {
+	proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
+	proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
+	proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
+	proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile")
+	proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary")
+	proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
+	proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
+	proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
+	proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
+	proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
 	proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
 }
+
+func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) }
+
+var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{
+	// 591 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e,
+	0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89,
+	0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81,
+	0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47,
+	0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77,
+	0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e,
+	0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64,
+	0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58,
+	0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c,
+	0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2,
+	0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4,
+	0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12,
+	0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c,
+	0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee,
+	0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f,
+	0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54,
+	0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea,
+	0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63,
+	0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45,
+	0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d,
+	0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5,
+	0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d,
+	0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d,
+	0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7,
+	0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8,
+	0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2,
+	0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58,
+	0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11,
+	0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff,
+	0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02,
+	0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd,
+	0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25,
+	0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9,
+	0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27,
+	0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9,
+	0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48,
+	0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00,
+}
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
index e95ddbc6..7a8a1e09 100644
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -39,8 +39,11 @@ const (
 	statVersion10 = "1.0"
 	statVersion11 = "1.1"
 
-	fieldTransport10Len = 10
-	fieldTransport11Len = 13
+	fieldTransport10TCPLen = 10
+	fieldTransport10UDPLen = 7
+
+	fieldTransport11TCPLen = 13
+	fieldTransport11UDPLen = 10
 )
 
 // A Mount is a device mount parsed from /proc/[pid]/mountstats.
@@ -186,6 +189,8 @@ type NFSOperationStats struct {
 // A NFSTransportStats contains statistics for the NFS mount RPC requests and
 // responses.
 type NFSTransportStats struct {
+	// The transport protocol used for the NFS mount.
+	Protocol string
 	// The local port used for the NFS mount.
 	Port uint64
 	// Number of times the client has had to establish a connection from scratch
@@ -360,7 +365,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
 				return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
 			}
 
-			tstats, err := parseNFSTransportStats(ss[2:], statVersion)
+			tstats, err := parseNFSTransportStats(ss[1:], statVersion)
 			if err != nil {
 				return nil, err
 			}
@@ -522,13 +527,33 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
 // parseNFSTransportStats parses a NFSTransportStats line using an input set of
 // integer fields matched to a specific stats version.
 func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
+	// Extract the protocol field. It is the only string value in the line
+	protocol := ss[0]
+	ss = ss[1:]
+
 	switch statVersion {
 	case statVersion10:
-		if len(ss) != fieldTransport10Len {
+		var expectedLength int
+		if protocol == "tcp" {
+			expectedLength = fieldTransport10TCPLen
+		} else if protocol == "udp" {
+			expectedLength = fieldTransport10UDPLen
+		} else {
+			return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss)
+		}
+		if len(ss) != expectedLength {
 			return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
 		}
 	case statVersion11:
-		if len(ss) != fieldTransport11Len {
+		var expectedLength int
+		if protocol == "tcp" {
+			expectedLength = fieldTransport11TCPLen
+		} else if protocol == "udp" {
+			expectedLength = fieldTransport11UDPLen
+		} else {
+			return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss)
+		}
+		if len(ss) != expectedLength {
 			return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
 		}
 	default:
@@ -536,12 +561,13 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
 	}
 
 	// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
-	// in a v1.0 response.
+	// in a v1.0 response. Since the stat length is bigger for TCP stats, we use
+	// the TCP length here.
 	//
 	// Note: slice length must be set to length of v1.1 stats to avoid a panic when
 	// only v1.0 stats are present.
 	// See: https://github.com/prometheus/node_exporter/issues/571.
-	ns := make([]uint64, fieldTransport11Len)
+	ns := make([]uint64, fieldTransport11TCPLen)
 	for i, s := range ss {
 		n, err := strconv.ParseUint(s, 10, 64)
 		if err != nil {
@@ -551,7 +577,18 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
 		ns[i] = n
 	}
 
+	// The fields differ depending on the transport protocol (TCP or UDP)
+	// From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt
+	//
+	// For the udp RPC transport there is no connection count, connect idle time,
+	// or idle time (fields #3, #4, and #5); all other fields are the same. So
+	// we set them to 0 here.
+	if protocol == "udp" {
+		ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
+	}
+
 	return &NFSTransportStats{
+		Protocol:                 protocol,
 		Port:                     ns[0],
 		Bind:                     ns[1],
 		Connect:                  ns[2],
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
index b6563e87..28e50906 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
@@ -640,6 +640,16 @@ func (db *DB) tableNeedCompaction() bool {
 	return v.needCompaction()
 }
 
+// resumeWrite returns an indicator whether we should resume write operation if enough level0 files are compacted.
+func (db *DB) resumeWrite() bool {
+	v := db.s.version()
+	defer v.release()
+	if v.tLen(0) < db.s.o.GetWriteL0PauseTrigger() {
+		return true
+	}
+	return false
+}
+
 func (db *DB) pauseCompaction(ch chan<- struct{}) {
 	select {
 	case ch <- struct{}{}:
@@ -653,6 +663,7 @@ type cCmd interface {
 }
 
 type cAuto struct {
+	// Note for table compaction, an empty ackC represents it's a compaction waiting command.
 	ackC chan<- error
 }
 
@@ -765,8 +776,10 @@ func (db *DB) mCompaction() {
 }
 
 func (db *DB) tCompaction() {
-	var x cCmd
-	var ackQ []cCmd
+	var (
+		x           cCmd
+		ackQ, waitQ []cCmd
+	)
 
 	defer func() {
 		if x := recover(); x != nil {
@@ -778,6 +791,10 @@ func (db *DB) tCompaction() {
 			ackQ[i].ack(ErrClosed)
 			ackQ[i] = nil
 		}
+		for i := range waitQ {
+			waitQ[i].ack(ErrClosed)
+			waitQ[i] = nil
+		}
 		if x != nil {
 			x.ack(ErrClosed)
 		}
@@ -795,12 +812,25 @@ func (db *DB) tCompaction() {
 				return
 			default:
 			}
+			// Resume write operation as soon as possible.
+			if len(waitQ) > 0 && db.resumeWrite() {
+				for i := range waitQ {
+					waitQ[i].ack(nil)
+					waitQ[i] = nil
+				}
+				waitQ = waitQ[:0]
+			}
 		} else {
 			for i := range ackQ {
 				ackQ[i].ack(nil)
 				ackQ[i] = nil
 			}
 			ackQ = ackQ[:0]
+			for i := range waitQ {
+				waitQ[i].ack(nil)
+				waitQ[i] = nil
+			}
+			waitQ = waitQ[:0]
 			select {
 			case x = <-db.tcompCmdC:
 			case ch := <-db.tcompPauseC:
@@ -813,7 +843,11 @@ func (db *DB) tCompaction() {
 		if x != nil {
 			switch cmd := x.(type) {
 			case cAuto:
-				ackQ = append(ackQ, x)
+				if cmd.ackC != nil {
+					waitQ = append(waitQ, x)
+				} else {
+					ackQ = append(ackQ, x)
+				}
 			case cRange:
 				x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max))
 			default:
diff --git a/vendor/github.com/tendermint/go-amino/amino.go b/vendor/github.com/tendermint/go-amino/amino.go
index 93831f07..1f0f2a53 100644
--- a/vendor/github.com/tendermint/go-amino/amino.go
+++ b/vendor/github.com/tendermint/go-amino/amino.go
@@ -11,62 +11,105 @@ import (
 )
 
 //----------------------------------------
-// Typ3 and Typ4
+// Global methods for global sealed codec.
+var gcdc *Codec
+
+func init() {
+	gcdc = NewCodec().Seal()
+}
+
+func MarshalBinary(o interface{}) ([]byte, error) {
+	return gcdc.MarshalBinary(o)
+}
+
+func MarshalBinaryWriter(w io.Writer, o interface{}) (n int64, err error) {
+	return gcdc.MarshalBinaryWriter(w, o)
+}
+
+func MustMarshalBinary(o interface{}) []byte {
+	return gcdc.MustMarshalBinary(o)
+}
+
+func MarshalBinaryBare(o interface{}) ([]byte, error) {
+	return gcdc.MarshalBinaryBare(o)
+}
+
+func MustMarshalBinaryBare(o interface{}) []byte {
+	return gcdc.MustMarshalBinaryBare(o)
+}
+
+func UnmarshalBinary(bz []byte, ptr interface{}) error {
+	return gcdc.UnmarshalBinary(bz, ptr)
+}
+
+func UnmarshalBinaryReader(r io.Reader, ptr interface{}, maxSize int64) (n int64, err error) {
+	return gcdc.UnmarshalBinaryReader(r, ptr, maxSize)
+}
+
+func MustUnmarshalBinary(bz []byte, ptr interface{}) {
+	gcdc.MustUnmarshalBinary(bz, ptr)
+}
+
+func UnmarshalBinaryBare(bz []byte, ptr interface{}) error {
+	return gcdc.UnmarshalBinaryBare(bz, ptr)
+}
+
+func MustUnmarshalBinaryBare(bz []byte, ptr interface{}) {
+	gcdc.MustUnmarshalBinaryBare(bz, ptr)
+}
+
+func MarshalJSON(o interface{}) ([]byte, error) {
+	return gcdc.MarshalJSON(o)
+}
+
+func UnmarshalJSON(bz []byte, ptr interface{}) error {
+	return gcdc.UnmarshalJSON(bz, ptr)
+}
+
+func MarshalJSONIndent(o interface{}, prefix, indent string) ([]byte, error) {
+	return gcdc.MarshalJSONIndent(o, prefix, indent)
+}
+
+//----------------------------------------
+// Typ3
 
 type Typ3 uint8
-type Typ4 uint8 // Typ3 | 0x08 (pointer bit)
 
 const (
 	// Typ3 types
 	Typ3_Varint     = Typ3(0)
 	Typ3_8Byte      = Typ3(1)
 	Typ3_ByteLength = Typ3(2)
-	Typ3_Struct     = Typ3(3)
-	Typ3_StructTerm = Typ3(4)
-	Typ3_4Byte      = Typ3(5)
-	Typ3_List       = Typ3(6)
-	Typ3_Interface  = Typ3(7)
-
-	// Typ4 bit
-	Typ4_Pointer = Typ4(0x08)
+	//Typ3_Struct     = Typ3(3)
+	//Typ3_StructTerm = Typ3(4)
+	Typ3_4Byte = Typ3(5)
+	//Typ3_List       = Typ3(6)
+	//Typ3_Interface  = Typ3(7)
 )
 
 func (typ Typ3) String() string {
 	switch typ {
 	case Typ3_Varint:
-		return "Varint"
+		return "(U)Varint"
 	case Typ3_8Byte:
 		return "8Byte"
 	case Typ3_ByteLength:
 		return "ByteLength"
-	case Typ3_Struct:
-		return "Struct"
-	case Typ3_StructTerm:
-		return "StructTerm"
+	//case Typ3_Struct:
+	//	return "Struct"
+	//case Typ3_StructTerm:
+	//	return "StructTerm"
 	case Typ3_4Byte:
 		return "4Byte"
-	case Typ3_List:
-		return "List"
-	case Typ3_Interface:
-		return "Interface"
+	//case Typ3_List:
+	//	return "List"
+	//case Typ3_Interface:
+	//	return "Interface"
 	default:
 		return fmt.Sprintf("<Invalid Typ3 %X>", byte(typ))
 	}
 }
 
-func (typ Typ4) Typ3() Typ3      { return Typ3(typ & 0x07) }
-func (typ Typ4) IsPointer() bool { return (typ & 0x08) > 0 }
-func (typ Typ4) String() string {
-	if typ&0xF0 != 0 {
-		return fmt.Sprintf("<Invalid Typ4 %X>", byte(typ))
-	}
-	if typ&0x08 != 0 {
-		return "*" + Typ3(typ&0x07).String()
-	} else {
-		return Typ3(typ).String()
-	}
-}
-
 //----------------------------------------
 // *Codec methods
 
@@ -146,12 +189,18 @@ func (cdc *Codec) MarshalBinaryBare(o interface{}) ([]byte, error) {
 	if err != nil {
 		return nil, err
 	}
-	err = cdc.encodeReflectBinary(buf, info, rv, FieldOptions{})
+	err = cdc.encodeReflectBinary(buf, info, rv, FieldOptions{}, true)
 	if err != nil {
 		return nil, err
 	}
 	bz = buf.Bytes()
 
+	// If registered concrete, prepend prefix bytes.
+	if info.Registered {
+		pb := info.Prefix.Bytes()
+		bz = append(pb, bz...)
+	}
+
 	return bz, nil
 }
 
@@ -256,25 +305,34 @@ func (cdc *Codec) MustUnmarshalBinary(bz []byte, ptr interface{}) {
 
 // UnmarshalBinaryBare will panic if ptr is a nil-pointer.
 func (cdc *Codec) UnmarshalBinaryBare(bz []byte, ptr interface{}) error {
-	if len(bz) == 0 {
-		return errors.New("UnmarshalBinaryBare cannot decode empty bytes")
-	}
 
-	rv, rt := reflect.ValueOf(ptr), reflect.TypeOf(ptr)
+	rv := reflect.ValueOf(ptr)
 	if rv.Kind() != reflect.Ptr {
 		panic("Unmarshal expects a pointer")
 	}
-	rv, rt = rv.Elem(), rt.Elem()
+	rv = rv.Elem()
+	rt := rv.Type()
 	info, err := cdc.getTypeInfo_wlock(rt)
 	if err != nil {
 		return err
 	}
-	n, err := cdc.decodeReflectBinary(bz, info, rv, FieldOptions{})
+	// If registered concrete, consume and verify prefix bytes.
+	if info.Registered {
+		pb := info.Prefix.Bytes()
+		if len(bz) < 4 {
+			return fmt.Errorf("UnmarshalBinaryBare expected to read prefix bytes %X (since it is registered concrete) but got %X", pb, bz)
+		} else if !bytes.Equal(bz[:4], pb) {
+			return fmt.Errorf("UnmarshalBinaryBare expected to read prefix bytes %X (since it is registered concrete) but got %X...", pb, bz[:4])
+		}
+		bz = bz[4:]
+	}
+	// Decode contents into rv.
+	n, err := cdc.decodeReflectBinary(bz, info, rv, FieldOptions{}, true)
 	if err != nil {
-		return err
+		return fmt.Errorf("unmarshal to %v failed after %d bytes (%v): %X", info.Type, n, err, bz)
 	}
 	if n != len(bz) {
-		return fmt.Errorf("Unmarshal didn't read all bytes. Expected to read %v, only read %v", len(bz), n)
+		return fmt.Errorf("unmarshal to %v didn't read all bytes. Expected to read %v, only read %v: %X", info.Type, len(bz), n, bz)
 	}
 	return nil
 }
@@ -293,25 +351,37 @@ func (cdc *Codec) MarshalJSON(o interface{}) ([]byte, error) {
 		return []byte("null"), nil
 	}
 	rt := rv.Type()
-
-	// Note that we can't yet skip directly
-	// to checking if a type implements
-	// json.Marshaler because in some cases
-	// var s GenericInterface = t1(v1)
-	// var t GenericInterface = t2(v1)
-	// but we need to be able to encode
-	// both s and t disambiguated, so:
-	//    {"type":<disfix>, "value":<data>}
-	// for the above case.
-
 	w := new(bytes.Buffer)
 	info, err := cdc.getTypeInfo_wlock(rt)
 	if err != nil {
 		return nil, err
 	}
+
+	// Write the disfix wrapper if it is a registered concrete type.
+	if info.Registered {
+		// Part 1:
+		err = writeStr(w, _fmt(`{"type":"%s","value":`, info.Name))
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// Write the rest from rv.
 	if err := cdc.encodeReflectJSON(w, info, rv, FieldOptions{}); err != nil {
 		return nil, err
 	}
+
+	// disfix wrapper continued...
+	if info.Registered {
+		// Part 2:
+		if err != nil {
+			return nil, err
+		}
+		err = writeStr(w, `}`)
+		if err != nil {
+			return nil, err
+		}
+	}
 	return w.Bytes(), nil
 }
 
@@ -324,20 +394,25 @@ func (cdc *Codec) UnmarshalJSON(bz []byte, ptr interface{}) error {
 	if rv.Kind() != reflect.Ptr {
 		return errors.New("UnmarshalJSON expects a pointer")
 	}
-
-	// If the type implements json.Unmarshaler, just
-	// automatically respect that and skip to it.
-	// if rv.Type().Implements(jsonUnmarshalerType) {
-	// 	return rv.Interface().(json.Unmarshaler).UnmarshalJSON(bz)
-	// }
-
-	// 1. Dereference until we find the first addressable type.
 	rv = rv.Elem()
 	rt := rv.Type()
 	info, err := cdc.getTypeInfo_wlock(rt)
 	if err != nil {
 		return err
 	}
+	// If registered concrete, consume and verify type wrapper.
+	if info.Registered {
+		// Consume type wrapper info.
+		name, bz_, err := decodeInterfaceJSON(bz)
+		if err != nil {
+			return err
+		}
+		// Check name against info.
+		if name != info.Name {
+			return fmt.Errorf("UnmarshalJSON wants to decode a %v but found a %v", info.Name, name)
+		}
+		bz = bz_
+	}
 	return cdc.decodeReflectJSON(bz, info, rv, FieldOptions{})
 }
 
diff --git a/vendor/github.com/tendermint/go-amino/binary-decode.go b/vendor/github.com/tendermint/go-amino/binary-decode.go
index c1fac922..6e18bc37 100644
--- a/vendor/github.com/tendermint/go-amino/binary-decode.go
+++ b/vendor/github.com/tendermint/go-amino/binary-decode.go
@@ -6,17 +6,18 @@ import (
 	"reflect"
 	"time"
 
+	"encoding/binary"
 	"github.com/davecgh/go-spew/spew"
 )
 
 //----------------------------------------
 // cdc.decodeReflectBinary
 
-// This is the main entrypoint for decoding all types from binary form.  This
+// This is the main entrypoint for decoding all types from binary form. This
 // function calls decodeReflectBinary*, and generally those functions should
 // only call this one, for the prefix bytes are consumed here when present.
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (n int, err error) {
+func (cdc *Codec) decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions, bare bool) (n int, err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -24,62 +25,12 @@ func (cdc *Codec) decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Valu
 		panic("should not happen")
 	}
 	if printLog {
-		spew.Printf("(D) decodeReflectBinary(bz: %X, info: %v, rv: %#v (%v), opts: %v)\n",
-			bz, info, rv.Interface(), rv.Type(), opts)
+		spew.Printf("(D) decodeReflectBinary(bz: %X, info: %v, rv: %#v (%v), fopts: %v)\n",
+			bz, info, rv.Interface(), rv.Type(), fopts)
 		defer func() {
 			fmt.Printf("(D) -> n: %v, err: %v\n", n, err)
 		}()
 	}
-
-	// TODO Read the disamb bytes here if necessary.
-	// e.g. rv isn't an interface, and
-	// info.ConcreteType.AlwaysDisambiguate.  But we don't support
-	// this yet.
-
-	// Read prefix+typ3 bytes if registered.
-	if info.Registered {
-		if len(bz) < PrefixBytesLen {
-			err = errors.New("EOF skipping prefix bytes.")
-			return
-		}
-		// Check prefix bytes.
-		prefix3 := NewPrefixBytes(bz[:PrefixBytesLen])
-		var prefix, typ = prefix3.SplitTyp3()
-		if info.Prefix != prefix {
-			panic("should not happen")
-		}
-		// Check that typ3 in prefix bytes is correct.
-		err = checkTyp3(info.Type, typ, opts)
-		if err != nil {
-			return
-		}
-		// Consume prefix.  Yum.
-		bz = bz[PrefixBytesLen:]
-		n += PrefixBytesLen
-	}
-
-	_n := 0
-	_n, err = cdc._decodeReflectBinary(bz, info, rv, opts)
-	slide(&bz, &n, _n)
-	return
-}
-
-// CONTRACT: any immediate disamb/prefix bytes have been consumed/stripped.
-// CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (n int, err error) {
-	if !rv.CanAddr() {
-		panic("rv not addressable")
-	}
-	if info.Type.Kind() == reflect.Interface && rv.Kind() == reflect.Ptr {
-		panic("should not happen")
-	}
-	if printLog {
-		spew.Printf("(_) _decodeReflectBinary(bz: %X, info: %v, rv: %#v (%v), opts: %v)\n",
-			bz, info, rv.Interface(), rv.Type(), opts)
-		defer func() {
-			fmt.Printf("(_) -> n: %v, err: %v\n", n, err)
-		}()
-	}
 	var _n int
 
 	// TODO consider the binary equivalent of json.Unmarshaller.
@@ -102,7 +53,7 @@ func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Val
 		if err != nil {
 			return
 		}
-		_n, err = cdc._decodeReflectBinary(bz, rinfo, rrv, opts)
+		_n, err = cdc.decodeReflectBinary(bz, rinfo, rrv, fopts, bare)
 		if slide(&bz, &n, _n) && err != nil {
 			return
 		}
@@ -122,17 +73,17 @@ func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Val
 	// Complex
 
 	case reflect.Interface:
-		_n, err = cdc.decodeReflectBinaryInterface(bz, info, rv, opts)
+		_n, err = cdc.decodeReflectBinaryInterface(bz, info, rv, fopts, bare)
 		n += _n
 		return
 
 	case reflect.Array:
 		ert := info.Type.Elem()
 		if ert.Kind() == reflect.Uint8 {
-			_n, err = cdc.decodeReflectBinaryByteArray(bz, info, rv, opts)
+			_n, err = cdc.decodeReflectBinaryByteArray(bz, info, rv, fopts)
 			n += _n
 		} else {
-			_n, err = cdc.decodeReflectBinaryArray(bz, info, rv, opts)
+			_n, err = cdc.decodeReflectBinaryArray(bz, info, rv, fopts, bare)
 			n += _n
 		}
 		return
@@ -140,16 +91,16 @@ func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Val
 	case reflect.Slice:
 		ert := info.Type.Elem()
 		if ert.Kind() == reflect.Uint8 {
-			_n, err = cdc.decodeReflectBinaryByteSlice(bz, info, rv, opts)
+			_n, err = cdc.decodeReflectBinaryByteSlice(bz, info, rv, fopts)
 			n += _n
 		} else {
-			_n, err = cdc.decodeReflectBinarySlice(bz, info, rv, opts)
+			_n, err = cdc.decodeReflectBinarySlice(bz, info, rv, fopts, bare)
 			n += _n
 		}
 		return
 
 	case reflect.Struct:
-		_n, err = cdc.decodeReflectBinaryStruct(bz, info, rv, opts)
+		_n, err = cdc.decodeReflectBinaryStruct(bz, info, rv, fopts, bare)
 		n += _n
 		return
 
@@ -158,14 +109,14 @@ func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Val
 
 	case reflect.Int64:
 		var num int64
-		if opts.BinVarint {
-			num, _n, err = DecodeVarint(bz)
+		if fopts.BinFixed64 {
+			num, _n, err = DecodeInt64(bz)
 			if slide(&bz, &n, _n) && err != nil {
 				return
 			}
 			rv.SetInt(num)
 		} else {
-			num, _n, err = DecodeInt64(bz)
+			num, _n, err = DecodeVarint(bz)
 			if slide(&bz, &n, _n) && err != nil {
 				return
 			}
@@ -174,12 +125,21 @@ func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Val
 		return
 
 	case reflect.Int32:
-		var num int32
-		num, _n, err = DecodeInt32(bz)
-		if slide(&bz, &n, _n) && err != nil {
-			return
+		if fopts.BinFixed32 {
+			var num int32
+			num, _n, err = DecodeInt32(bz)
+			if slide(&bz, &n, _n) && err != nil {
+				return
+			}
+			rv.SetInt(int64(num))
+		} else {
+			var num int64
+			num, _n, err = DecodeVarint(bz)
+			if slide(&bz, &n, _n) && err != nil {
+				return
+			}
+			rv.SetInt(int64(num))
 		}
-		rv.SetInt(int64(num))
 		return
 
 	case reflect.Int16:
@@ -214,14 +174,14 @@ func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Val
 
 	case reflect.Uint64:
 		var num uint64
-		if opts.BinVarint {
-			num, _n, err = DecodeUvarint(bz)
+		if fopts.BinFixed64 {
+			num, _n, err = DecodeUint64(bz)
 			if slide(&bz, &n, _n) && err != nil {
 				return
 			}
 			rv.SetUint(num)
 		} else {
-			num, _n, err = DecodeUint64(bz)
+			num, _n, err = DecodeUvarint(bz)
 			if slide(&bz, &n, _n) && err != nil {
 				return
 			}
@@ -230,12 +190,21 @@ func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Val
 		return
 
 	case reflect.Uint32:
-		var num uint32
-		num, _n, err = DecodeUint32(bz)
-		if slide(&bz, &n, _n) && err != nil {
-			return
+		if fopts.BinFixed32 {
+			var num uint32
+			num, _n, err = DecodeUint32(bz)
+			if slide(&bz, &n, _n) && err != nil {
+				return
+			}
+			rv.SetUint(uint64(num))
+		} else {
+			var num uint64
+			num, _n, err = DecodeUvarint(bz)
+			if slide(&bz, &n, _n) && err != nil {
+				return
+			}
+			rv.SetUint(uint64(num))
 		}
-		rv.SetUint(uint64(num))
 		return
 
 	case reflect.Uint16:
@@ -279,7 +248,7 @@ func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Val
 
 	case reflect.Float64:
 		var f float64
-		if !opts.Unsafe {
+		if !fopts.Unsafe {
 			err = errors.New("Float support requires `amino:\"unsafe\"`.")
 			return
 		}
@@ -292,7 +261,7 @@ func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Val
 
 	case reflect.Float32:
 		var f float32
-		if !opts.Unsafe {
+		if !fopts.Unsafe {
 			err = errors.New("Float support requires `amino:\"unsafe\"`.")
 			return
 		}
@@ -319,7 +288,7 @@ func (cdc *Codec) _decodeReflectBinary(bz []byte, info *TypeInfo, rv reflect.Val
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectBinaryInterface(bz []byte, iinfo *TypeInfo, rv reflect.Value, opts FieldOptions) (n int, err error) {
+func (cdc *Codec) decodeReflectBinaryInterface(bz []byte, iinfo *TypeInfo, rv reflect.Value, fopts FieldOptions, bare bool) (n int, err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -337,15 +306,21 @@ func (cdc *Codec) decodeReflectBinaryInterface(bz []byte, iinfo *TypeInfo, rv re
 		return
 	}
 
-	// Consume disambiguation / prefix+typ3 bytes.
-	disamb, hasDisamb, prefix, typ, hasPrefix, isNil, _n, err := DecodeDisambPrefixBytes(bz)
-	if slide(&bz, &n, _n) && err != nil {
-		return
+	if !bare {
+		// Read byte-length prefixed byteslice.
+		var buf, _n = []byte(nil), int(0)
+		buf, _n, err = DecodeByteSlice(bz)
+		if slide(&bz, nil, _n) && err != nil {
+			return
+		}
+		// This is a trick for debuggability -- we slide on &n more later.
+		n += UvarintSize(uint64(len(buf)))
+		bz = buf
 	}
 
-	// Special case for nil.
-	if isNil {
-		rv.Set(iinfo.ZeroValue)
+	// Consume disambiguation / prefix bytes.
+	disamb, hasDisamb, prefix, hasPrefix, _n, err := DecodeDisambPrefixBytes(bz)
+	if slide(&bz, &n, _n) && err != nil {
 		return
 	}
 
@@ -362,23 +337,23 @@ func (cdc *Codec) decodeReflectBinaryInterface(bz []byte, iinfo *TypeInfo, rv re
 		return
 	}
 
-	// Check and consume typ3 byte.
-	// It cannot be a typ4 byte because it cannot be nil.
-	err = checkTyp3(cinfo.Type, typ, opts)
-	if err != nil {
-		return
-	}
-
 	// Construct the concrete type.
 	var crv, irvSet = constructConcreteType(cinfo)
 
 	// Decode into the concrete type.
-	_n, err = cdc._decodeReflectBinary(bz, cinfo, crv, opts)
+	_n, err = cdc.decodeReflectBinary(bz, cinfo, crv, fopts, true)
 	if slide(&bz, &n, _n) && err != nil {
 		rv.Set(irvSet) // Helps with debugging
 		return
 	}
 
+	// Earlier, we set bz to the byteslice read from buf.
+	// Ensure that all of bz was consumed.
+	if len(bz) > 0 {
+		err = errors.New("bytes left over after reading interface contents")
+		return
+	}
+
 	// We need to set here, for when !PointerPreferred and the type
 	// is say, an array of bytes (e.g. [32]byte), then we must call
 	// rv.Set() *after* the value was acquired.
@@ -389,7 +364,7 @@ func (cdc *Codec) decodeReflectBinaryInterface(bz []byte, iinfo *TypeInfo, rv re
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectBinaryByteArray(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (n int, err error) {
+func (cdc *Codec) decodeReflectBinaryByteArray(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (n int, err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -426,7 +401,7 @@ func (cdc *Codec) decodeReflectBinaryByteArray(bz []byte, info *TypeInfo, rv ref
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectBinaryArray(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (n int, err error) {
+func (cdc *Codec) decodeReflectBinaryArray(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions, bare bool) (n int, err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -441,65 +416,106 @@ func (cdc *Codec) decodeReflectBinaryArray(bz []byte, info *TypeInfo, rv reflect
 		panic("should not happen")
 	}
 	length := info.Type.Len()
-	einfo := (*TypeInfo)(nil)
-	einfo, err = cdc.getTypeInfo_wlock(ert)
+	einfo, err := cdc.getTypeInfo_wlock(ert)
 	if err != nil {
 		return
 	}
 
-	// Check and consume typ4 byte.
-	var ptr, _n = false, int(0)
-	ptr, _n, err = decodeTyp4AndCheck(ert, bz, opts)
-	if slide(&bz, &n, _n) && err != nil {
-		return
-	}
-
-	// Read number of items.
-	var count = uint64(0)
-	count, _n, err = DecodeUvarint(bz)
-	if slide(&bz, &n, _n) && err != nil {
-		return
-	}
-	if int(count) != length {
-		err = fmt.Errorf("Expected num items of %v, decoded %v", length, count)
-		return
+	if !bare {
+		// Read byte-length prefixed byteslice.
+		var buf, _n = []byte(nil), int(0)
+		buf, _n, err = DecodeByteSlice(bz)
+		if slide(&bz, nil, _n) && err != nil {
+			return
+		}
+		// This is a trick for debuggability -- we slide on &n more later.
+		n += UvarintSize(uint64(len(buf)))
+		bz = buf
 	}
 
-	// NOTE: Unlike decodeReflectBinarySlice,
-	// there is nothing special to do for
-	// zero-length arrays.  Is that even possible?
-
-	// Read each item.
-	for i := 0; i < length; i++ {
-		var erv, _n = rv.Index(i), int(0)
-		// Maybe read nil.
-		if ptr {
-			numNil := int64(0)
-			numNil, _n, err = decodeNumNilBytes(bz)
+	// If elem is not already a ByteLength type, read in packed form.
+	// This is a Proto wart due to Proto backwards compatibility issues.
+	// Amino2 will probably migrate to use the List typ3.
+	typ3 := typeToTyp3(einfo.Type, fopts)
+	if typ3 != Typ3_ByteLength {
+		// Read elements in packed form.
+		for i := 0; i < length; i++ {
+			var erv, _n = rv.Index(i), int(0)
+			_n, err = cdc.decodeReflectBinary(bz, einfo, erv, fopts, false)
+			if slide(&bz, &n, _n) && err != nil {
+				err = fmt.Errorf("error reading array contents: %v", err)
+				return
+			}
+			// Special case when reading default value, prefer nil.
+			if erv.Kind() == reflect.Ptr {
+				_, isDefault := isDefaultValue(erv)
+				if isDefault {
+					erv.Set(reflect.Zero(erv.Type()))
+					continue
+				}
+			}
+		}
+		// Ensure that we read the whole buffer.
+		if len(bz) > 0 {
+			err = errors.New("bytes left over after reading array contents")
+			return
+		}
+	} else {
+		// Read elements in unpacked form.
+		for i := 0; i < length; i++ {
+			// Read field key (number and type).
+			var fnum, typ, _n = uint32(0), Typ3(0x00), int(0)
+			fnum, typ, _n, err = decodeFieldNumberAndTyp3(bz)
+			// Validate field number and typ3.
+			if fnum != fopts.BinFieldNum {
+				err = errors.New(fmt.Sprintf("expected repeated field number %v, got %v", fopts.BinFieldNum, fnum))
+				return
+			}
+			if typ != Typ3_ByteLength {
+				err = errors.New(fmt.Sprintf("expected repeated field type %v, got %v", Typ3_ByteLength, typ))
+				return
+			}
 			if slide(&bz, &n, _n) && err != nil {
 				return
 			}
-			if numNil == 0 {
-				// Good, continue decoding item.
-			} else if numNil == 1 {
-				// Set nil/zero.
+			// Decode the next ByteLength bytes into erv.
+			var erv = rv.Index(i)
+			// Special case if next ByteLength bytes are 0x00, set nil.
+			if len(bz) > 0 && bz[0] == 0x00 {
+				slide(&bz, &n, 1)
 				erv.Set(reflect.Zero(erv.Type()))
 				continue
-			} else {
-				panic("should not happen")
+			}
+			// Normal case, read next non-nil element from bz.
+			// In case of any inner lists in unpacked form.
+			efopts := fopts
+			efopts.BinFieldNum = 1
+			_n, err = cdc.decodeReflectBinary(bz, einfo, erv, efopts, false)
+			if slide(&bz, &n, _n) && err != nil {
+				err = fmt.Errorf("error reading array contents: %v", err)
+				return
 			}
 		}
-		// Decode non-nil value.
-		_n, err = cdc.decodeReflectBinary(bz, einfo, erv, opts)
-		if slide(&bz, &n, _n) && err != nil {
-			return
+		// Ensure that there are no more elements left,
+		// and no field number regression either.
+		// This is to provide better error messages.
+		if len(bz) > 0 {
+			var fnum = uint32(0)
+			fnum, _, _, err = decodeFieldNumberAndTyp3(bz)
+			if err != nil {
+				return
+			}
+			if fnum <= fopts.BinFieldNum {
+				err = fmt.Errorf("unexpected field number %v after repeated field number %v", fnum, fopts.BinFieldNum)
+				return
+			}
 		}
 	}
 	return
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectBinaryByteSlice(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (n int, err error) {
+func (cdc *Codec) decodeReflectBinaryByteSlice(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (n int, err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -531,7 +547,7 @@ func (cdc *Codec) decodeReflectBinaryByteSlice(bz []byte, info *TypeInfo, rv ref
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectBinarySlice(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (n int, err error) {
+func (cdc *Codec) decodeReflectBinarySlice(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions, bare bool) (n int, err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -545,72 +561,98 @@ func (cdc *Codec) decodeReflectBinarySlice(bz []byte, info *TypeInfo, rv reflect
 	if ert.Kind() == reflect.Uint8 {
 		panic("should not happen")
 	}
-	einfo := (*TypeInfo)(nil)
-	einfo, err = cdc.getTypeInfo_wlock(ert)
+	einfo, err := cdc.getTypeInfo_wlock(ert)
 	if err != nil {
 		return
 	}
 
-	// Check and consume typ4 byte.
-	var ptr, _n = false, int(0)
-	ptr, _n, err = decodeTyp4AndCheck(ert, bz, opts)
-	if slide(&bz, &n, _n) && err != nil {
-		return
-	}
+	// Construct slice to collect decoded items to.
+	// NOTE: This is due to Proto3.  How to best optimize?
+	esrt := reflect.SliceOf(ert)
+	var srv = reflect.Zero(esrt)
 
-	// Read number of items.
-	var count = uint64(0)
-	count, _n, err = DecodeUvarint(bz)
-	if slide(&bz, &n, _n) && err != nil {
-		return
-	}
-	if int(count) < 0 {
-		err = fmt.Errorf("Impossible number of elements (%v)", count)
-		return
-	}
-	if int(count) > len(bz) { // Currently, each item takes at least 1 byte.
-		err = fmt.Errorf("Impossible number of elements (%v) compared to buffer length (%v)",
-			count, len(bz))
-		return
-	}
-
-	// Special case when length is 0.
-	// NOTE: We prefer nil slices.
-	if count == 0 {
-		rv.Set(info.ZeroValue)
-		return
+	if !bare {
+		// Read byte-length prefixed byteslice.
+		var buf, _n = []byte(nil), int(0)
+		buf, _n, err = DecodeByteSlice(bz)
+		if slide(&bz, nil, _n) && err != nil {
+			return
+		}
+		// This is a trick for debuggability -- we slide on &n more later.
+		n += UvarintSize(uint64(len(buf)))
+		bz = buf
 	}
 
-	// Read each item.
-	// NOTE: Unlike decodeReflectBinaryArray,
-	// we need to construct a new slice before
-	// we populate it. Arrays on the other hand
-	// reserve space in the value itself.
-	var esrt = reflect.SliceOf(ert) // TODO could be optimized.
-	var srv = reflect.MakeSlice(esrt, int(count), int(count))
-	for i := 0; i < int(count); i++ {
-		var erv, _n = srv.Index(i), int(0)
-		// Maybe read nil.
-		if ptr {
-			var numNil = int64(0)
-			numNil, _n, err = decodeNumNilBytes(bz)
+	// If elem is not already a ByteLength type, read in packed form.
+	// This is a Proto wart due to Proto backwards compatibility issues.
+	// Amino2 will probably migrate to use the List typ3.
+	typ3 := typeToTyp3(einfo.Type, fopts)
+	if typ3 != Typ3_ByteLength {
+		// Read elems in packed form.
+		for {
+			if len(bz) == 0 {
+				break
+			}
+			erv, _n := reflect.New(ert).Elem(), int(0)
+			_n, err = cdc.decodeReflectBinary(bz, einfo, erv, fopts, false)
 			if slide(&bz, &n, _n) && err != nil {
+				err = fmt.Errorf("error reading array contents: %v", err)
 				return
 			}
-			if numNil == 0 {
-				// Good, continue decoding item.
-			} else if numNil == 1 {
-				// Set nil/zero.
+			// Special case when reading default value, prefer nil.
+			if ert.Kind() == reflect.Ptr {
+				_, isDefault := isDefaultValue(erv)
+				if isDefault {
+					srv = reflect.Append(srv, reflect.Zero(ert))
+					continue
+				}
+			}
+			// Otherwise append to slice.
+			srv = reflect.Append(srv, erv)
+		}
+	} else {
+		// Read elements in unpacked form.
+		for {
+			if len(bz) == 0 {
+				break
+			}
+			// Read field key (number and type).
+			var fnum, typ, _n = uint32(0), Typ3(0x00), int(0)
+			fnum, typ, _n, err = decodeFieldNumberAndTyp3(bz)
+			// Validate field number and typ3.
+			if fnum < fopts.BinFieldNum {
+				err = errors.New(fmt.Sprintf("expected repeated field number %v or greater, got %v", fopts.BinFieldNum, fnum))
+				return
+			}
+			if fnum > fopts.BinFieldNum {
+				break
+			}
+			if typ != Typ3_ByteLength {
+				err = errors.New(fmt.Sprintf("expected repeated field type %v, got %v", Typ3_ByteLength, typ))
+				return
+			}
+			if slide(&bz, &n, _n) && err != nil {
+				return
+			}
+			// Decode the next ByteLength bytes into erv.
+			erv, _n := reflect.New(ert).Elem(), int(0)
+			// Special case if next ByteLength bytes are 0x00, set nil.
+			if len(bz) > 0 && bz[0] == 0x00 {
+				slide(&bz, &n, 1)
 				erv.Set(reflect.Zero(erv.Type()))
+				srv = reflect.Append(srv, erv)
 				continue
-			} else {
-				panic("should not happen")
 			}
-		}
-		// Decode non-nil value.
-		_n, err = cdc.decodeReflectBinary(bz, einfo, erv, opts)
-		if slide(&bz, &n, _n) && err != nil {
-			return
+			// Normal case, read next non-nil element from bz.
+			// In case of any inner lists in unpacked form.
+			efopts := fopts
+			efopts.BinFieldNum = 1
+			_n, err = cdc.decodeReflectBinary(bz, einfo, erv, efopts, false)
+			if slide(&bz, &n, _n) && err != nil {
+				err = fmt.Errorf("error reading array contents: %v", err)
+				return
+			}
+			srv = reflect.Append(srv, erv)
 		}
 	}
 	rv.Set(srv)
@@ -618,7 +660,7 @@ func (cdc *Codec) decodeReflectBinarySlice(bz []byte, info *TypeInfo, rv reflect
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectBinaryStruct(bz []byte, info *TypeInfo, rv reflect.Value, _ FieldOptions) (n int, err error) {
+func (cdc *Codec) decodeReflectBinaryStruct(bz []byte, info *TypeInfo, rv reflect.Value, _ FieldOptions, bare bool) (n int, err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -630,9 +672,21 @@ func (cdc *Codec) decodeReflectBinaryStruct(bz []byte, info *TypeInfo, rv reflec
 	}
 	_n := 0 // nolint: ineffassign
 
-	// The "Struct" typ3 doesn't get read here.
+	// NOTE: The "Struct" typ3 doesn't get read here.
 	// It's already implied, either by struct-key or list-element-type-byte.
 
+	if !bare {
+		// Read byte-length prefixed byteslice.
+		var buf, _n = []byte(nil), int(0)
+		buf, _n, err = DecodeByteSlice(bz)
+		if slide(&bz, nil, _n) && err != nil {
+			return
+		}
+		// This is a trick for debuggability -- we slide on &n more later.
+		n += UvarintSize(uint64(len(buf)))
+		bz = buf
+	}
+
 	switch info.Type {
 
 	case timeType:
@@ -643,9 +697,10 @@ func (cdc *Codec) decodeReflectBinaryStruct(bz []byte, info *TypeInfo, rv reflec
 			return
 		}
 		rv.Set(reflect.ValueOf(t))
-		return
 
 	default:
+		// Track the last seen field number.
+		var lastFieldNum uint32
 		// Read each field.
 		for _, field := range info.Fields {
 
@@ -657,81 +712,157 @@ func (cdc *Codec) decodeReflectBinaryStruct(bz []byte, info *TypeInfo, rv reflec
 				return
 			}
 
-			// Read field key (number and type).
-			var fieldNum, typ = uint32(0), Typ3(0x00)
-			fieldNum, typ, _n, err = decodeFieldNumberAndTyp3(bz)
-			if field.BinFieldNum < fieldNum {
-				// Set nil field value.
+			// We're done if we've consumed all the bytes.
+			if len(bz) == 0 {
 				frv.Set(reflect.Zero(frv.Type()))
 				continue
-				// Do not slide, we will read it again.
 			}
-			if fieldNum == 0 {
-				// Probably a StructTerm.
-				break
+
+			if field.UnpackedList {
+				// This is a list that was encoded unpacked, e.g.
+				// with repeated field entries for each list item.
+				_n, err = cdc.decodeReflectBinary(bz, finfo, frv, field.FieldOptions, true)
+				if slide(&bz, &n, _n) && err != nil {
+					return
+				}
+			} else {
+				// Read field key (number and type).
+				var fnum, typ = uint32(0), Typ3(0x00)
+				fnum, typ, _n, err = decodeFieldNumberAndTyp3(bz)
+				if field.BinFieldNum < fnum {
+					// Set zero field value.
+					frv.Set(reflect.Zero(frv.Type()))
+					continue
+					// Do not slide, we will read it again.
+				}
+				if fnum <= lastFieldNum {
+					err = fmt.Errorf("encountered fieldnNum: %v, but we have already seen fnum: %v\nbytes:%X",
+						fnum, lastFieldNum, bz)
+					return
+				}
+				lastFieldNum = fnum
+				if slide(&bz, &n, _n) && err != nil {
+					return
+				}
+
+				// Validate fnum and typ.
+				// NOTE: In the future, we'll support upgradeability.
+				// So in the future, this may not match,
+				// so we will need to remove this sanity check.
+				if field.BinFieldNum != fnum {
+					err = errors.New(fmt.Sprintf("expected field # %v of %v, got %v",
+						field.BinFieldNum, info.Type, fnum))
+					return
+				}
+				typWanted := typeToTyp3(finfo.Type, field.FieldOptions)
+				if typ != typWanted {
+					err = errors.New(fmt.Sprintf("expected field type %v for # %v of %v, got %v",
+						typWanted, fnum, info.Type, typ))
+					return
+				}
+
+				// Decode field into frv.
+				_n, err = cdc.decodeReflectBinary(bz, finfo, frv, field.FieldOptions, false)
+				if slide(&bz, &n, _n) && err != nil {
+					return
+				}
 			}
+		}
+
+		// Consume any remaining fields.
+		var _n, fnum = 0, uint32(0)
+		var typ3 Typ3
+		for len(bz) > 0 {
+			fnum, typ3, _n, err = decodeFieldNumberAndTyp3(bz)
 			if slide(&bz, &n, _n) && err != nil {
 				return
 			}
-			// NOTE: In the future, we'll support upgradeability.
-			// So in the future, this may not match,
-			// so we will need to remove this sanity check.
-			if field.BinFieldNum != fieldNum {
-				err = errors.New(fmt.Sprintf("Expected field number %v, got %v", field.BinFieldNum, fieldNum))
-				return
-			}
-			typWanted := typeToTyp4(field.Type, field.FieldOptions).Typ3()
-			if typ != typWanted {
-				err = errors.New(fmt.Sprintf("Expected field type %X, got %X", typWanted, typ))
+			if fnum <= lastFieldNum {
+				err = fmt.Errorf("encountered fieldnNum: %v, but we have already seen fnum: %v\nbytes:%X",
+					fnum, lastFieldNum, bz)
 				return
 			}
+			lastFieldNum = fnum
 
-			// Decode field into frv.
-			_n, err = cdc.decodeReflectBinary(bz, finfo, frv, field.FieldOptions)
+			_n, err = consumeAny(typ3, bz)
 			if slide(&bz, &n, _n) && err != nil {
 				return
 			}
 		}
+	}
+	return
+}
+
+//----------------------------------------
+// consume* for skipping struct fields
 
-		// Read "StructTerm".
-		// NOTE: In the future, we'll need to break out of a loop
-		// when encoutering an StructTerm typ3 byte.
-		var typ = Typ3(0x00)
-		typ, _n, err = decodeTyp3(bz)
+// Read everything without doing anything with it. Report errors if they occur.
+func consumeAny(typ3 Typ3, bz []byte) (n int, err error) {
+	var _n int
+	switch typ3 {
+	case Typ3_Varint:
+		_, _n, err = DecodeVarint(bz)
+	case Typ3_8Byte:
+		_, _n, err = DecodeInt64(bz)
+	case Typ3_ByteLength:
+		_, _n, err = DecodeByteSlice(bz)
+	case Typ3_4Byte:
+		_, _n, err = DecodeInt32(bz)
+	default:
+		err = fmt.Errorf("invalid typ3 bytes %v", typ3)
+		return
+	}
+	if err != nil {
+		// do not slide
+		return
+	}
+	slide(&bz, &n, _n)
+	return
+}
+
+func consumeStruct(bz []byte) (n int, err error) {
+	var _n, typ = int(0), Typ3(0x00)
+	for {
+		typ, _n, err = consumeFieldKey(bz)
 		if slide(&bz, &n, _n) && err != nil {
 			return
 		}
-		if typ != Typ3_StructTerm {
-			err = errors.New(fmt.Sprintf("Expected StructTerm typ3 byte, got %X", typ))
+		_n, err = consumeAny(typ, bz)
+		if slide(&bz, &n, _n) && err != nil {
 			return
 		}
+	}
+	return
+}
+
+func consumeFieldKey(bz []byte) (typ Typ3, n int, err error) {
+	var u64 uint64
+	u64, n = binary.Uvarint(bz)
+	if n < 0 {
+		n = 0
+		err = errors.New("error decoding uvarint")
 		return
 	}
+	typ = Typ3(u64 & 0x07)
+	return
 }
 
 //----------------------------------------
 
-func DecodeDisambPrefixBytes(bz []byte) (db DisambBytes, hasDb bool, pb PrefixBytes, typ Typ3, hasPb bool, isNil bool, n int, err error) {
-	// Special case: nil
-	if len(bz) >= 2 && bz[0] == 0x00 && bz[1] == 0x00 {
-		isNil = true
-		n = 2
-		return
-	}
+func DecodeDisambPrefixBytes(bz []byte) (db DisambBytes, hasDb bool, pb PrefixBytes, hasPb bool, n int, err error) {
 	// Validate
 	if len(bz) < 4 {
-		err = errors.New("EOF reading prefix bytes.")
+		err = errors.New("EOF while reading prefix bytes.")
 		return // hasPb = false
 	}
 	if bz[0] == 0x00 { // Disfix
 		// Validate
 		if len(bz) < 8 {
-			err = errors.New("EOF reading disamb bytes.")
+			err = errors.New("EOF while reading disamb bytes.")
 			return // hasPb = false
 		}
 		copy(db[0:3], bz[1:4])
 		copy(pb[0:4], bz[4:8])
-		pb, typ = pb.SplitTyp3()
 		hasDb = true
 		hasPb = true
 		n = 8
@@ -739,7 +870,6 @@ func DecodeDisambPrefixBytes(bz []byte) (db DisambBytes, hasDb bool, pb PrefixBy
 	} else { // Prefix
 		// General case with no disambiguation
 		copy(pb[0:4], bz[0:4])
-		pb, typ = pb.SplitTyp3()
 		hasDb = false
 		hasPb = true
 		n = 4
@@ -764,49 +894,18 @@ func decodeFieldNumberAndTyp3(bz []byte) (num uint32, typ Typ3, n int, err error
 	var num64 uint64
 	num64 = value64 >> 3
 	if num64 > (1<<29 - 1) {
-		err = errors.New(fmt.Sprintf("invalid field num %v", num64))
+		err = fmt.Errorf("invalid field num %v", num64)
 		return
 	}
 	num = uint32(num64)
 	return
 }
 
-// Consume typ4 byte and error if it doesn't match rt.
-func decodeTyp4AndCheck(rt reflect.Type, bz []byte, opts FieldOptions) (ptr bool, n int, err error) {
-	var typ = Typ4(0x00)
-	typ, n, err = decodeTyp4(bz)
-	if err != nil {
-		return
-	}
-	var typWanted = typeToTyp4(rt, opts)
-	if typWanted != typ {
-		err = errors.New(fmt.Sprintf("Typ4 mismatch.  Expected %X, got %X", typWanted, typ))
-		return
-	}
-	ptr = (typ & 0x08) != 0
-	return
-}
-
-// Read Typ4 byte.
-func decodeTyp4(bz []byte) (typ Typ4, n int, err error) {
-	if len(bz) == 0 {
-		err = errors.New(fmt.Sprintf("EOF reading typ4 byte"))
-		return
-	}
-	if bz[0]&0xF0 != 0 {
-		err = errors.New(fmt.Sprintf("Invalid non-zero nibble reading typ4 byte"))
-		return
-	}
-	typ = Typ4(bz[0])
-	n = 1
-	return
-}
-
 // Error if typ doesn't match rt.
-func checkTyp3(rt reflect.Type, typ Typ3, opts FieldOptions) (err error) {
-	typWanted := typeToTyp3(rt, opts)
+func checkTyp3(rt reflect.Type, typ Typ3, fopts FieldOptions) (err error) {
+	typWanted := typeToTyp3(rt, fopts)
 	if typ != typWanted {
-		err = fmt.Errorf("Typ3 mismatch.  Expected %X, got %X", typWanted, typ)
+		err = fmt.Errorf("unexpected Typ3. want %v, got %v", typWanted, typ)
 	}
 	return
 }
@@ -814,11 +913,11 @@ func checkTyp3(rt reflect.Type, typ Typ3, opts FieldOptions) (err error) {
 // Read typ3 byte.
 func decodeTyp3(bz []byte) (typ Typ3, n int, err error) {
 	if len(bz) == 0 {
-		err = fmt.Errorf("EOF reading typ3 byte")
+		err = fmt.Errorf("EOF while reading typ3 byte")
 		return
 	}
 	if bz[0]&0xF8 != 0 {
-		err = fmt.Errorf("Invalid typ3 byte")
+		err = fmt.Errorf("invalid typ3 byte: %v", Typ3(bz[0]).String())
 		return
 	}
 	typ = Typ3(bz[0])
@@ -831,7 +930,7 @@ func decodeTyp3(bz []byte) (typ Typ3, n int, err error) {
 // other values will error.
 func decodeNumNilBytes(bz []byte) (numNil int64, n int, err error) {
 	if len(bz) == 0 {
-		err = errors.New("EOF reading nil byte(s)")
+		err = errors.New("EOF while reading nil byte(s)")
 		return
 	}
 	if bz[0] == 0x00 {
@@ -842,6 +941,6 @@ func decodeNumNilBytes(bz []byte) (numNil int64, n int, err error) {
 		numNil, n = 1, 1
 		return
 	}
-	n, err = 0, fmt.Errorf("Unexpected nil byte %X (sparse lists not supported)", bz[0])
+	n, err = 0, fmt.Errorf("unexpected nil byte, want: either '0x00' or '0x01' got: %X (sparse lists not supported)", bz[0])
 	return
 }
diff --git a/vendor/github.com/tendermint/go-amino/binary-encode.go b/vendor/github.com/tendermint/go-amino/binary-encode.go
index e141d4d2..9f7e5831 100644
--- a/vendor/github.com/tendermint/go-amino/binary-encode.go
+++ b/vendor/github.com/tendermint/go-amino/binary-encode.go
@@ -1,6 +1,7 @@
 package amino
 
 import (
+	"bytes"
 	"encoding/binary"
 	"errors"
 	"fmt"
@@ -21,7 +22,7 @@ import (
 // The following contracts apply to all similar encode methods.
 // CONTRACT: rv is not a pointer
 // CONTRACT: rv is valid.
-func (cdc *Codec) encodeReflectBinary(w io.Writer, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) encodeReflectBinary(w io.Writer, info *TypeInfo, rv reflect.Value, fopts FieldOptions, bare bool) (err error) {
 	if rv.Kind() == reflect.Ptr {
 		panic("should not happen")
 	}
@@ -29,42 +30,13 @@ func (cdc *Codec) encodeReflectBinary(w io.Writer, info *TypeInfo, rv reflect.Va
 		panic("should not happen")
 	}
 	if printLog {
-		spew.Printf("(E) encodeReflectBinary(info: %v, rv: %#v (%v), opts: %v)\n",
-			info, rv.Interface(), rv.Type(), opts)
+		spew.Printf("(E) encodeReflectBinary(info: %v, rv: %#v (%v), fopts: %v)\n",
+			info, rv.Interface(), rv.Type(), fopts)
 		defer func() {
 			fmt.Printf("(E) -> err: %v\n", err)
 		}()
 	}
 
-	// Maybe write prefix+typ3 bytes.
-	if info.Registered {
-		var typ = typeToTyp4(info.Type, opts).Typ3()
-		_, err = w.Write(info.Prefix.WithTyp3(typ).Bytes())
-		if err != nil {
-			return
-		}
-	}
-
-	err = cdc._encodeReflectBinary(w, info, rv, opts)
-	return
-}
-
-// CONTRACT: any disamb/prefix+typ3 bytes have already been written.
-func (cdc *Codec) _encodeReflectBinary(w io.Writer, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
-	if rv.Kind() == reflect.Ptr {
-		panic("should not happen")
-	}
-	if !rv.IsValid() {
-		panic("should not happen")
-	}
-	if printLog {
-		spew.Printf("(_) _encodeReflectBinary(info: %v, rv: %#v (%v), opts: %v)\n",
-			info, rv.Interface(), rv.Type(), opts)
-		defer func() {
-			fmt.Printf("(_) -> err: %v\n", err)
-		}()
-	}
-
 	// Handle override if rv implements json.Marshaler.
 	if info.IsAminoMarshaler {
 		// First, encode rv into repr instance.
@@ -78,7 +50,7 @@ func (cdc *Codec) _encodeReflectBinary(w io.Writer, info *TypeInfo, rv reflect.V
 			return
 		}
 		// Then, encode the repr instance.
-		err = cdc._encodeReflectBinary(w, rinfo, rrv, opts)
+		err = cdc.encodeReflectBinary(w, rinfo, rrv, fopts, bare)
 		return
 	}
 
@@ -88,37 +60,41 @@ func (cdc *Codec) _encodeReflectBinary(w io.Writer, info *TypeInfo, rv reflect.V
 	// Complex
 
 	case reflect.Interface:
-		err = cdc.encodeReflectBinaryInterface(w, info, rv, opts)
+		err = cdc.encodeReflectBinaryInterface(w, info, rv, fopts, bare)
 
 	case reflect.Array:
 		if info.Type.Elem().Kind() == reflect.Uint8 {
-			err = cdc.encodeReflectBinaryByteArray(w, info, rv, opts)
+			err = cdc.encodeReflectBinaryByteArray(w, info, rv, fopts)
 		} else {
-			err = cdc.encodeReflectBinaryList(w, info, rv, opts)
+			err = cdc.encodeReflectBinaryList(w, info, rv, fopts, bare)
 		}
 
 	case reflect.Slice:
 		if info.Type.Elem().Kind() == reflect.Uint8 {
-			err = cdc.encodeReflectBinaryByteSlice(w, info, rv, opts)
+			err = cdc.encodeReflectBinaryByteSlice(w, info, rv, fopts)
 		} else {
-			err = cdc.encodeReflectBinaryList(w, info, rv, opts)
+			err = cdc.encodeReflectBinaryList(w, info, rv, fopts, bare)
 		}
 
 	case reflect.Struct:
-		err = cdc.encodeReflectBinaryStruct(w, info, rv, opts)
+		err = cdc.encodeReflectBinaryStruct(w, info, rv, fopts, bare)
 
 	//----------------------------------------
 	// Signed
 
 	case reflect.Int64:
-		if opts.BinVarint {
-			err = EncodeVarint(w, rv.Int())
-		} else {
+		if fopts.BinFixed64 {
 			err = EncodeInt64(w, rv.Int())
+		} else {
+			err = EncodeVarint(w, rv.Int())
 		}
 
 	case reflect.Int32:
-		err = EncodeInt32(w, int32(rv.Int()))
+		if fopts.BinFixed32 {
+			err = EncodeInt32(w, int32(rv.Int()))
+		} else {
+			err = EncodeVarint(w, rv.Int())
+		}
 
 	case reflect.Int16:
 		err = EncodeInt16(w, int16(rv.Int()))
@@ -133,14 +109,18 @@ func (cdc *Codec) _encodeReflectBinary(w io.Writer, info *TypeInfo, rv reflect.V
 	// Unsigned
 
 	case reflect.Uint64:
-		if opts.BinVarint {
-			err = EncodeUvarint(w, rv.Uint())
-		} else {
+		if fopts.BinFixed64 {
 			err = EncodeUint64(w, rv.Uint())
+		} else {
+			err = EncodeUvarint(w, rv.Uint())
 		}
 
 	case reflect.Uint32:
-		err = EncodeUint32(w, uint32(rv.Uint()))
+		if fopts.BinFixed32 {
+			err = EncodeUint32(w, uint32(rv.Uint()))
+		} else {
+			err = EncodeUvarint(w, rv.Uint())
+		}
 
 	case reflect.Uint16:
 		err = EncodeUint16(w, uint16(rv.Uint()))
@@ -158,14 +138,14 @@ func (cdc *Codec) _encodeReflectBinary(w io.Writer, info *TypeInfo, rv reflect.V
 		err = EncodeBool(w, rv.Bool())
 
 	case reflect.Float64:
-		if !opts.Unsafe {
+		if !fopts.Unsafe {
 			err = errors.New("Amino float* support requires `amino:\"unsafe\"`.")
 			return
 		}
 		err = EncodeFloat64(w, rv.Float())
 
 	case reflect.Float32:
-		if !opts.Unsafe {
+		if !fopts.Unsafe {
 			err = errors.New("Amino float* support requires `amino:\"unsafe\"`.")
 			return
 		}
@@ -184,7 +164,7 @@ func (cdc *Codec) _encodeReflectBinary(w io.Writer, info *TypeInfo, rv reflect.V
 	return
 }
 
-func (cdc *Codec) encodeReflectBinaryInterface(w io.Writer, iinfo *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) encodeReflectBinaryInterface(w io.Writer, iinfo *TypeInfo, rv reflect.Value, fopts FieldOptions, bare bool) (err error) {
 	if printLog {
 		fmt.Println("(e) encodeReflectBinaryInterface")
 		defer func() {
@@ -192,9 +172,9 @@ func (cdc *Codec) encodeReflectBinaryInterface(w io.Writer, iinfo *TypeInfo, rv
 		}()
 	}
 
-	// Special case when rv is nil, write 0x0000.
+	// Special case when rv is nil, write 0x00 to denote an empty byteslice.
 	if rv.IsNil() {
-		_, err = w.Write([]byte{0x00, 0x00})
+		_, err = w.Write([]byte{0x00})
 		return
 	}
 
@@ -221,6 +201,9 @@ func (cdc *Codec) encodeReflectBinaryInterface(w io.Writer, iinfo *TypeInfo, rv
 		return
 	}
 
+	// For Proto3 compatibility, encode interfaces as ByteLength.
+	buf := bytes.NewBuffer(nil)
+
 	// Write disambiguation bytes if needed.
 	var needDisamb bool = false
 	if iinfo.AlwaysDisambiguate {
@@ -229,25 +212,35 @@ func (cdc *Codec) encodeReflectBinaryInterface(w io.Writer, iinfo *TypeInfo, rv
 		needDisamb = true
 	}
 	if needDisamb {
-		_, err = w.Write(append([]byte{0x00}, cinfo.Disamb[:]...))
+		_, err = buf.Write(append([]byte{0x00}, cinfo.Disamb[:]...))
 		if err != nil {
 			return
 		}
 	}
 
-	// Write prefix+typ3 bytes.
-	var typ = typeToTyp3(crt, opts)
-	_, err = w.Write(cinfo.Prefix.WithTyp3(typ).Bytes())
+	// Write prefix bytes.
+	_, err = buf.Write(cinfo.Prefix.Bytes())
 	if err != nil {
 		return
 	}
 
 	// Write actual concrete value.
-	err = cdc._encodeReflectBinary(w, cinfo, crv, opts)
+	err = cdc.encodeReflectBinary(buf, cinfo, crv, fopts, true)
+	if err != nil {
+		return
+	}
+
+	if bare {
+		// Write byteslice without byte-length prefixing.
+		_, err = w.Write(buf.Bytes())
+	} else {
+		// Write byte-length prefixed byteslice.
+		err = EncodeByteSlice(w, buf.Bytes())
+	}
 	return
 }
 
-func (cdc *Codec) encodeReflectBinaryByteArray(w io.Writer, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) encodeReflectBinaryByteArray(w io.Writer, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	ert := info.Type.Elem()
 	if ert.Kind() != reflect.Uint8 {
 		panic("should not happen")
@@ -268,7 +261,7 @@ func (cdc *Codec) encodeReflectBinaryByteArray(w io.Writer, info *TypeInfo, rv r
 	return
 }
 
-func (cdc *Codec) encodeReflectBinaryList(w io.Writer, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) encodeReflectBinaryList(w io.Writer, info *TypeInfo, rv reflect.Value, fopts FieldOptions, bare bool) (err error) {
 	if printLog {
 		fmt.Println("(e) encodeReflectBinaryList")
 		defer func() {
@@ -279,57 +272,71 @@ func (cdc *Codec) encodeReflectBinaryList(w io.Writer, info *TypeInfo, rv reflec
 	if ert.Kind() == reflect.Uint8 {
 		panic("should not happen")
 	}
-
-	// Write element Typ4 byte.
-	var typ = typeToTyp4(ert, opts)
-	err = EncodeByte(w, byte(typ))
-	if err != nil {
-		return
-	}
-
-	// Write length.
-	err = EncodeUvarint(w, uint64(rv.Len()))
+	einfo, err := cdc.getTypeInfo_wlock(ert)
 	if err != nil {
 		return
 	}
 
-	// Write elems.
-	var einfo *TypeInfo
-	einfo, err = cdc.getTypeInfo_wlock(ert)
-	if err != nil {
-		return
-	}
-	for i := 0; i < rv.Len(); i++ {
-		// Get dereferenced element value and info.
-		var erv, void = isVoid(rv.Index(i))
-		if typ.IsPointer() {
-			// We must write a byte to denote whether element is nil.
-			if void {
-				// Value is nil or empty.
-				// e.g. nil pointer, nil/empty slice, pointer to nil/empty slice, pointer
-				// to nil pointer.  Write 0x01 for "is nil".
-				// NOTE: Do not use a pointer to nil/empty slices to denote
-				// existence or not.  We have to make a design choice here, and
-				// here we discourage using pointers to denote existence.
-				_, err = w.Write([]byte{0x01})
-				continue
-			} else {
-				// Value is not nil or empty.  Write 0x00 for "not nil/empty".
-				_, err = w.Write([]byte{0x00})
+	// Proto3 byte-length prefixing incurs alloc cost on the encoder.
+	// Here we incur it for unpacked form for ease of dev.
+	buf := bytes.NewBuffer(nil)
+
+	// If elem is not already a ByteLength type, write in packed form.
+	// This is a Proto wart due to Proto backwards compatibility issues.
+	// Amino2 will probably migrate to use the List typ3.  Please?  :)
+	typ3 := typeToTyp3(einfo.Type, fopts)
+	if typ3 != Typ3_ByteLength {
+		// Write elems in packed form.
+		for i := 0; i < rv.Len(); i++ {
+			// Get dereferenced element value (or zero).
+			var erv, _, _ = derefPointersZero(rv.Index(i))
+			// Write the element value.
+			err = cdc.encodeReflectBinary(buf, einfo, erv, fopts, false)
+			if err != nil {
+				return
 			}
 		}
-		// Write the element value.
-		// It may be a nil interface, but not a nil pointer.
-		err = cdc.encodeReflectBinary(w, einfo, erv, opts)
-		if err != nil {
-			return
+	} else {
+		// Write elems in unpacked form.
+		for i := 0; i < rv.Len(); i++ {
+			// Write elements as repeated fields of the parent struct.
+			err = encodeFieldNumberAndTyp3(buf, fopts.BinFieldNum, Typ3_ByteLength)
+			if err != nil {
+				return
+			}
+			// Get dereferenced element value and info.
+			var erv, isDefault = isDefaultValue(rv.Index(i))
+			if isDefault {
+				// Nothing to encode, so the length is 0.
+				err = EncodeByte(buf, byte(0x00))
+				if err != nil {
+					return
+				}
+			} else {
+				// Write the element value as a ByteLength.
+				// In case of any inner lists in unpacked form.
+				efopts := fopts
+				efopts.BinFieldNum = 1
+				err = cdc.encodeReflectBinary(buf, einfo, erv, efopts, false)
+				if err != nil {
+					return
+				}
+			}
 		}
 	}
+
+	if bare {
+		// Write byteslice without byte-length prefixing.
+		_, err = w.Write(buf.Bytes())
+	} else {
+		// Write byte-length prefixed byteslice.
+		err = EncodeByteSlice(w, buf.Bytes())
+	}
 	return
 }
 
 // CONTRACT: info.Type.Elem().Kind() == reflect.Uint8
-func (cdc *Codec) encodeReflectBinaryByteSlice(w io.Writer, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) encodeReflectBinaryByteSlice(w io.Writer, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if printLog {
 		fmt.Println("(e) encodeReflectBinaryByteSlice")
 		defer func() {
@@ -347,7 +354,7 @@ func (cdc *Codec) encodeReflectBinaryByteSlice(w io.Writer, info *TypeInfo, rv r
 	return
 }
 
-func (cdc *Codec) encodeReflectBinaryStruct(w io.Writer, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) encodeReflectBinaryStruct(w io.Writer, info *TypeInfo, rv reflect.Value, fopts FieldOptions, bare bool) (err error) {
 	if printLog {
 		fmt.Println("(e) encodeReflectBinaryBinaryStruct")
 		defer func() {
@@ -355,51 +362,62 @@ func (cdc *Codec) encodeReflectBinaryStruct(w io.Writer, info *TypeInfo, rv refl
 		}()
 	}
 
-	// The "Struct" Typ3 doesn't get written here.
-	// It's already implied, either by struct-key or list-element-type-byte.
+	// Proto3 incurs a cost in writing non-root structs.
+	// Here we incur it for root structs as well for ease of dev.
+	buf := bytes.NewBuffer(nil)
 
 	switch info.Type {
 
 	case timeType:
 		// Special case: time.Time
-		err = EncodeTime(w, rv.Interface().(time.Time))
-		return
+		err = EncodeTime(buf, rv.Interface().(time.Time))
+		if err != nil {
+			return
+		}
 
 	default:
 		for _, field := range info.Fields {
-			// Get dereferenced field value and info.
-			var frv, void = isVoid(rv.Field(field.Index))
-			if void {
-				// Do not encode nil or empty fields.
-				continue
-			}
+			// Get type info for field.
 			var finfo *TypeInfo
 			finfo, err = cdc.getTypeInfo_wlock(field.Type)
 			if err != nil {
 				return
 			}
-			// TODO Maybe allow omitempty somehow.
-			// Write field key (number and type).
-			err = encodeFieldNumberAndTyp3(w, field.BinFieldNum, field.BinTyp3)
-			if err != nil {
-				return
+			// Get dereferenced field value and info.
+			var frv, isDefault = isDefaultValue(rv.Field(field.Index))
+			if isDefault {
+				// Do not encode default value fields.
+				continue
 			}
-			// Write field from rv.
-			err = cdc.encodeReflectBinary(w, finfo, frv, field.FieldOptions)
-			if err != nil {
-				return
+			if field.UnpackedList {
+				// Write repeated field entries for each list item.
+				err = cdc.encodeReflectBinaryList(buf, finfo, frv, field.FieldOptions, true)
+				if err != nil {
+					return
+				}
+			} else {
+				// Write field key (number and type).
+				err = encodeFieldNumberAndTyp3(buf, field.BinFieldNum, typeToTyp3(finfo.Type, field.FieldOptions))
+				if err != nil {
+					return
+				}
+				// Write field from rv.
+				err = cdc.encodeReflectBinary(buf, finfo, frv, field.FieldOptions, false)
+				if err != nil {
+					return
+				}
 			}
 		}
-
-		// Write "StructTerm".
-		err = EncodeByte(w, byte(Typ3_StructTerm))
-		if err != nil {
-			return
-		}
-		return
-
 	}
 
+	if bare {
+		// Write byteslice without byte-length prefixing.
+		_, err = w.Write(buf.Bytes())
+	} else {
+		// Write byte-length prefixed byteslice.
+		err = EncodeByteSlice(w, buf.Bytes())
+	}
+	return
 }
 
 //----------------------------------------
@@ -408,7 +426,7 @@ func (cdc *Codec) encodeReflectBinaryStruct(w io.Writer, info *TypeInfo, rv refl
 // Write field key.
 func encodeFieldNumberAndTyp3(w io.Writer, num uint32, typ Typ3) (err error) {
 	if (typ & 0xF8) != 0 {
-		panic(fmt.Sprintf("invalid Typ3 byte %X", typ))
+		panic(fmt.Sprintf("invalid Typ3 byte %v", typ))
 	}
 	if num < 0 || num > (1<<29-1) {
 		panic(fmt.Sprintf("invalid field number %v", num))
diff --git a/vendor/github.com/tendermint/go-amino/codec.go b/vendor/github.com/tendermint/go-amino/codec.go
index 30af7e7e..605b9993 100644
--- a/vendor/github.com/tendermint/go-amino/codec.go
+++ b/vendor/github.com/tendermint/go-amino/codec.go
@@ -4,6 +4,7 @@ import (
 	"bytes"
 	"crypto/sha256"
 	"fmt"
+	"io"
 	"reflect"
 	"strings"
 	"sync"
@@ -34,14 +35,8 @@ func NewPrefixBytes(prefixBytes []byte) PrefixBytes {
 	return pb
 }
 
-func (pb PrefixBytes) Bytes() []byte                 { return pb[:] }
-func (pb PrefixBytes) EqualBytes(bz []byte) bool     { return bytes.Equal(pb[:], bz) }
-func (pb PrefixBytes) WithTyp3(typ Typ3) PrefixBytes { pb[3] |= byte(typ); return pb }
-func (pb PrefixBytes) SplitTyp3() (PrefixBytes, Typ3) {
-	typ := Typ3(pb[3] & 0x07)
-	pb[3] &= 0xF8
-	return pb, typ
-}
+func (pb PrefixBytes) Bytes() []byte             { return pb[:] }
+func (pb PrefixBytes) EqualBytes(bz []byte) bool { return bytes.Equal(pb[:], bz) }
 func (db DisambBytes) Bytes() []byte             { return db[:] }
 func (db DisambBytes) EqualBytes(bz []byte) bool { return bytes.Equal(db[:], bz) }
 func (df DisfixBytes) Bytes() []byte             { return df[:] }
@@ -79,12 +74,13 @@ type InterfaceOptions struct {
 type ConcreteInfo struct {
 
 	// These fields are only set when registered (as implementing an interface).
-	Registered       bool        // Registered with RegisterConcrete().
-	PointerPreferred bool        // Deserialize to pointer type if possible.
-	Name             string      // Registered name.
-	Disamb           DisambBytes // Disambiguation bytes derived from name.
-	Prefix           PrefixBytes // Prefix bytes derived from name.
-	ConcreteOptions              // Registration options.
+	Registered       bool // Registered with RegisterConcrete().
+	PointerPreferred bool // Deserialize to pointer type if possible.
+	// NilPreferred     bool        // Deserialize to nil for empty structs if PointerPreferred.
+	Name            string      // Registered name.
+	Disamb          DisambBytes // Disambiguation bytes derived from name.
+	Prefix          PrefixBytes // Prefix bytes derived from name.
+	ConcreteOptions             // Registration options.
 
 	// These fields get set for all concrete types,
 	// even those not manually registered (e.g. are never interface values).
@@ -110,14 +106,15 @@ type FieldInfo struct {
 	Type         reflect.Type  // Struct field type
 	Index        int           // Struct field index
 	ZeroValue    reflect.Value // Could be nil pointer unlike TypeInfo.ZeroValue.
+	UnpackedList bool          // True iff this field should be encoded as an unpacked list.
 	FieldOptions               // Encoding options
-	BinTyp3      Typ3          // (Binary) Typ3 byte
 }
 
 type FieldOptions struct {
 	JSONName      string // (JSON) field name
 	JSONOmitEmpty bool   // (JSON) omitempty
-	BinVarint     bool   // (Binary) Use length-prefixed encoding for (u)int64.
+	BinFixed64    bool   // (Binary) Encode as fixed64
+	BinFixed32    bool   // (Binary) Encode as fixed32
 	BinFieldNum   uint32 // (Binary) max 1<<29-1
 	Unsafe        bool   // e.g. if this field is a float.
 }
@@ -127,16 +124,20 @@ type FieldOptions struct {
 
 type Codec struct {
 	mtx              sync.RWMutex
+	sealed           bool
 	typeInfos        map[reflect.Type]*TypeInfo
 	interfaceInfos   []*TypeInfo
 	concreteInfos    []*TypeInfo
 	disfixToTypeInfo map[DisfixBytes]*TypeInfo
+	nameToTypeInfo   map[string]*TypeInfo
 }
 
 func NewCodec() *Codec {
 	cdc := &Codec{
+		sealed:           false,
 		typeInfos:        make(map[reflect.Type]*TypeInfo),
 		disfixToTypeInfo: make(map[DisfixBytes]*TypeInfo),
+		nameToTypeInfo:   make(map[string]*TypeInfo),
 	}
 	return cdc
 }
@@ -145,7 +146,8 @@ func NewCodec() *Codec {
 // encoded/decoded by go-amino.
 // Usage:
 // `amino.RegisterInterface((*MyInterface1)(nil), nil)`
-func (cdc *Codec) RegisterInterface(ptr interface{}, opts *InterfaceOptions) {
+func (cdc *Codec) RegisterInterface(ptr interface{}, iopts *InterfaceOptions) {
+	cdc.assertNotSealed()
 
 	// Get reflect.Type from ptr.
 	rt := getTypeFromPointer(ptr)
@@ -154,7 +156,7 @@ func (cdc *Codec) RegisterInterface(ptr interface{}, opts *InterfaceOptions) {
 	}
 
 	// Construct InterfaceInfo
-	var info = cdc.newTypeInfoFromInterfaceType(rt, opts)
+	var info = cdc.newTypeInfoFromInterfaceType(rt, iopts)
 
 	// Finally, check conflicts and register.
 	func() {
@@ -199,7 +201,8 @@ func (cdc *Codec) RegisterInterface(ptr interface{}, opts *InterfaceOptions) {
 // interface fields/elements to be encoded/decoded by go-amino.
 // Usage:
 // `amino.RegisterConcrete(MyStruct1{}, "com.tendermint/MyStruct1", nil)`
-func (cdc *Codec) RegisterConcrete(o interface{}, name string, opts *ConcreteOptions) {
+func (cdc *Codec) RegisterConcrete(o interface{}, name string, copts *ConcreteOptions) {
+	cdc.assertNotSealed()
 
 	var pointerPreferred bool
 
@@ -222,7 +225,7 @@ func (cdc *Codec) RegisterConcrete(o interface{}, name string, opts *ConcreteOpt
 	}
 
 	// Construct ConcreteInfo.
-	var info = cdc.newTypeInfoFromRegisteredConcreteType(rt, pointerPreferred, name, opts)
+	var info = cdc.newTypeInfoFromRegisteredConcreteType(rt, pointerPreferred, name, copts)
 
 	// Finally, check conflicts and register.
 	func() {
@@ -234,8 +237,95 @@ func (cdc *Codec) RegisterConcrete(o interface{}, name string, opts *ConcreteOpt
 	}()
 }
 
+func (cdc *Codec) Seal() *Codec {
+	cdc.mtx.Lock()
+	defer cdc.mtx.Unlock()
+
+	cdc.sealed = true
+	return cdc
+}
+
+// PrintTypes writes all registered types in a markdown-style table.
+// The table's header is:
+//
+// | Type  | Name | Prefix | Notes |
+//
+// Where Type is the golang type name and Name is the name the type was registered with.
+func (cdc Codec) PrintTypes(out io.Writer) error {
+	cdc.mtx.RLock()
+	defer cdc.mtx.RUnlock()
+	// print header
+	if _, err := io.WriteString(out, "| Type | Name | Prefix | Length | Notes |\n"); err != nil {
+		return err
+	}
+	if _, err := io.WriteString(out, "| ---- | ---- | ------ | ----- | ------ |\n"); err != nil {
+		return err
+	}
+	// only print concrete types for now (if we want everything, we can iterate over the typeInfos map instead)
+	for _, i := range cdc.concreteInfos {
+		io.WriteString(out, "| ")
+		// TODO(ismail): optionally create a link to code on github:
+		if _, err := io.WriteString(out, i.Type.Name()); err != nil {
+			return err
+		}
+		if _, err := io.WriteString(out, " | "); err != nil {
+			return err
+		}
+		if _, err := io.WriteString(out, i.Name); err != nil {
+			return err
+		}
+		if _, err := io.WriteString(out, " | "); err != nil {
+			return err
+		}
+		if _, err := io.WriteString(out, fmt.Sprintf("0x%X", i.Prefix)); err != nil {
+			return err
+		}
+		if _, err := io.WriteString(out, " | "); err != nil {
+			return err
+		}
+
+		if _, err := io.WriteString(out, getLengthStr(i)); err != nil {
+			return err
+		}
+
+		if _, err := io.WriteString(out, " | "); err != nil {
+			return err
+		}
+		// empty notes table data by default // TODO(ismail): make this configurable
+
+		io.WriteString(out, " |\n")
+	}
+	// finish table
+	return nil
+}
+
+// A heuristic to guess the size of a registered type and return it as a string.
+// If the size is not fixed it returns "variable".
+func getLengthStr(info *TypeInfo) string {
+	switch info.Type.Kind() {
+	case reflect.Array,
+		reflect.Int8,
+		reflect.Int16, reflect.Int32, reflect.Int64,
+		reflect.Float32, reflect.Float64,
+		reflect.Complex64, reflect.Complex128:
+		s := info.Type.Size()
+		return fmt.Sprintf("0x%X", s)
+	default:
+		return "variable"
+	}
+}
+
 //----------------------------------------
 
+func (cdc *Codec) assertNotSealed() {
+	cdc.mtx.Lock()
+	defer cdc.mtx.Unlock()
+
+	if cdc.sealed {
+		panic("codec sealed")
+	}
+}
+
 func (cdc *Codec) setTypeInfo_nolock(info *TypeInfo) {
 
 	if info.Type.Kind() == reflect.Ptr {
@@ -254,7 +344,11 @@ func (cdc *Codec) setTypeInfo_nolock(info *TypeInfo) {
 		if existing, ok := cdc.disfixToTypeInfo[disfix]; ok {
 			panic(fmt.Sprintf("disfix <%X> already registered for %v", disfix, existing.Type))
 		}
+		if existing, ok := cdc.nameToTypeInfo[info.Name]; ok {
+			panic(fmt.Sprintf("name <%s> already registered for %v", info.Name, existing.Type))
+		}
 		cdc.disfixToTypeInfo[disfix] = info
+		cdc.nameToTypeInfo[info.Name] = info
 		//cdc.prefixToTypeInfos[prefix] =
 		//	append(cdc.prefixToTypeInfos[prefix], info)
 	}
@@ -294,7 +388,7 @@ func (cdc *Codec) getTypeInfoFromPrefix_rlock(iinfo *TypeInfo, pb PrefixBytes) (
 		return
 	}
 	if len(infos) > 1 {
-		err = fmt.Errorf("Conflicting concrete types registered for %X: e.g. %v and %v.", pb, infos[0].Type, infos[1].Type)
+		err = fmt.Errorf("conflicting concrete types registered for %X: e.g. %v and %v", pb, infos[0].Type, infos[1].Type)
 		return
 	}
 	info = infos[0]
@@ -313,6 +407,18 @@ func (cdc *Codec) getTypeInfoFromDisfix_rlock(df DisfixBytes) (info *TypeInfo, e
 	return
 }
 
+func (cdc *Codec) getTypeInfoFromName_rlock(name string) (info *TypeInfo, err error) {
+	cdc.mtx.RLock()
+	defer cdc.mtx.RUnlock()
+
+	info, ok := cdc.nameToTypeInfo[name]
+	if !ok {
+		err = fmt.Errorf("unrecognized concrete type name %s", name)
+		return
+	}
+	return
+}
+
 func (cdc *Codec) parseStructInfo(rt reflect.Type) (sinfo StructInfo) {
 	if rt.Kind() != reflect.Struct {
 		panic("should not happen")
@@ -322,23 +428,40 @@ func (cdc *Codec) parseStructInfo(rt reflect.Type) (sinfo StructInfo) {
 	for i := 0; i < rt.NumField(); i++ {
 		var field = rt.Field(i)
 		var ftype = field.Type
+		var unpackedList = false
 		if !isExported(field) {
 			continue // field is unexported
 		}
-		skip, opts := cdc.parseFieldOptions(field)
+		skip, fopts := cdc.parseFieldOptions(field)
 		if skip {
 			continue // e.g. json:"-"
 		}
+		if ftype.Kind() == reflect.Array || ftype.Kind() == reflect.Slice {
+			if ftype.Elem().Kind() == reflect.Uint8 {
+				// These get handled by our optimized methods,
+				// encodeReflectBinaryByte[Slice/Array].
+				unpackedList = false
+			} else {
+				etype := ftype.Elem()
+				for etype.Kind() == reflect.Ptr {
+					etype = etype.Elem()
+				}
+				typ3 := typeToTyp3(etype, fopts)
+				if typ3 == Typ3_ByteLength {
+					unpackedList = true
+				}
+			}
+		}
 		// NOTE: This is going to change a bit.
 		// NOTE: BinFieldNum starts with 1.
-		opts.BinFieldNum = uint32(len(infos) + 1)
+		fopts.BinFieldNum = uint32(len(infos) + 1)
 		fieldInfo := FieldInfo{
 			Name:         field.Name, // Mostly for debugging.
 			Index:        i,
 			Type:         ftype,
 			ZeroValue:    reflect.Zero(ftype),
-			FieldOptions: opts,
-			BinTyp3:      typeToTyp4(ftype, opts).Typ3(),
+			UnpackedList: unpackedList,
+			FieldOptions: fopts,
 		}
 		checkUnsafe(fieldInfo)
 		infos = append(infos, fieldInfo)
@@ -347,7 +470,7 @@ func (cdc *Codec) parseStructInfo(rt reflect.Type) (sinfo StructInfo) {
 	return
 }
 
-func (cdc *Codec) parseFieldOptions(field reflect.StructField) (skip bool, opts FieldOptions) {
+func (cdc *Codec) parseFieldOptions(field reflect.StructField) (skip bool, fopts FieldOptions) {
 	binTag := field.Tag.Get("binary")
 	aminoTag := field.Tag.Get("amino")
 	jsonTag := field.Tag.Get("json")
@@ -362,26 +485,28 @@ func (cdc *Codec) parseFieldOptions(field reflect.StructField) (skip bool, opts
 	// Get JSON field name.
 	jsonTagParts := strings.Split(jsonTag, ",")
 	if jsonTagParts[0] == "" {
-		opts.JSONName = field.Name
+		fopts.JSONName = field.Name
 	} else {
-		opts.JSONName = jsonTagParts[0]
+		fopts.JSONName = jsonTagParts[0]
 	}
 
 	// Get JSON omitempty.
 	if len(jsonTagParts) > 1 {
 		if jsonTagParts[1] == "omitempty" {
-			opts.JSONOmitEmpty = true
+			fopts.JSONOmitEmpty = true
 		}
 	}
 
 	// Parse binary tags.
-	if binTag == "varint" { // TODO: extend
-		opts.BinVarint = true
+	if binTag == "fixed64" { // TODO: extend
+		fopts.BinFixed64 = true
+	} else if binTag == "fixed32" {
+		fopts.BinFixed32 = true
 	}
 
 	// Parse amino tags.
 	if aminoTag == "unsafe" {
-		opts.Unsafe = true
+		fopts.Unsafe = true
 	}
 
 	return
@@ -415,7 +540,7 @@ func (cdc *Codec) newTypeInfoUnregistered(rt reflect.Type) *TypeInfo {
 	return info
 }
 
-func (cdc *Codec) newTypeInfoFromInterfaceType(rt reflect.Type, opts *InterfaceOptions) *TypeInfo {
+func (cdc *Codec) newTypeInfoFromInterfaceType(rt reflect.Type, iopts *InterfaceOptions) *TypeInfo {
 	if rt.Kind() != reflect.Interface {
 		panic(fmt.Sprintf("expected interface type, got %v", rt))
 	}
@@ -426,11 +551,11 @@ func (cdc *Codec) newTypeInfoFromInterfaceType(rt reflect.Type, opts *InterfaceO
 	info.ZeroValue = reflect.Zero(rt)
 	info.ZeroProto = reflect.Zero(rt).Interface()
 	info.InterfaceInfo.Implementers = make(map[PrefixBytes][]*TypeInfo)
-	if opts != nil {
-		info.InterfaceInfo.InterfaceOptions = *opts
-		info.InterfaceInfo.Priority = make([]DisfixBytes, len(opts.Priority))
+	if iopts != nil {
+		info.InterfaceInfo.InterfaceOptions = *iopts
+		info.InterfaceInfo.Priority = make([]DisfixBytes, len(iopts.Priority))
 		// Construct Priority []DisfixBytes
-		for i, name := range opts.Priority {
+		for i, name := range iopts.Priority {
 			disamb, prefix := nameToDisfix(name)
 			disfix := toDisfix(disamb, prefix)
 			info.InterfaceInfo.Priority[i] = disfix
@@ -439,7 +564,7 @@ func (cdc *Codec) newTypeInfoFromInterfaceType(rt reflect.Type, opts *InterfaceO
 	return info
 }
 
-func (cdc *Codec) newTypeInfoFromRegisteredConcreteType(rt reflect.Type, pointerPreferred bool, name string, opts *ConcreteOptions) *TypeInfo {
+func (cdc *Codec) newTypeInfoFromRegisteredConcreteType(rt reflect.Type, pointerPreferred bool, name string, copts *ConcreteOptions) *TypeInfo {
 	if rt.Kind() == reflect.Interface ||
 		rt.Kind() == reflect.Ptr {
 		panic(fmt.Sprintf("expected non-interface non-pointer concrete type, got %v", rt))
@@ -451,8 +576,8 @@ func (cdc *Codec) newTypeInfoFromRegisteredConcreteType(rt reflect.Type, pointer
 	info.ConcreteInfo.Name = name
 	info.ConcreteInfo.Disamb = nameToDisamb(name)
 	info.ConcreteInfo.Prefix = nameToPrefix(name)
-	if opts != nil {
-		info.ConcreteOptions = *opts
+	if copts != nil {
+		info.ConcreteOptions = *copts
 	}
 	return info
 }
@@ -603,8 +728,6 @@ func nameToDisfix(name string) (db DisambBytes, pb PrefixBytes) {
 		bz = bz[1:]
 	}
 	copy(pb[:], bz[0:4])
-	// Drop the last 3 bits to make room for the Typ3.
-	pb[3] &= 0xF8
 	return
 }
 
diff --git a/vendor/github.com/tendermint/go-amino/decoder.go b/vendor/github.com/tendermint/go-amino/decoder.go
index 36a640e8..9464a0c8 100644
--- a/vendor/github.com/tendermint/go-amino/decoder.go
+++ b/vendor/github.com/tendermint/go-amino/decoder.go
@@ -45,7 +45,7 @@ func DecodeInt32(bz []byte) (i int32, n int, err error) {
 		err = errors.New("EOF decoding int32")
 		return
 	}
-	i = int32(binary.BigEndian.Uint32(bz[:size]))
+	i = int32(binary.LittleEndian.Uint32(bz[:size]))
 	n = size
 	return
 }
@@ -56,15 +56,20 @@ func DecodeInt64(bz []byte) (i int64, n int, err error) {
 		err = errors.New("EOF decoding int64")
 		return
 	}
-	i = int64(binary.BigEndian.Uint64(bz[:size]))
+	i = int64(binary.LittleEndian.Uint64(bz[:size]))
 	n = size
 	return
 }
 
 func DecodeVarint(bz []byte) (i int64, n int, err error) {
 	i, n = binary.Varint(bz)
-	if n < 0 {
-		n = 0
+	if n == 0 {
+		// buf too small
+		err = errors.New("buffer too small")
+	} else if n < 0 {
+		// value larger than 64 bits (overflow)
+		// and -n is the number of bytes read
+		n = -n
 		err = errors.New("EOF decoding varint")
 	}
 	return
@@ -110,7 +115,7 @@ func DecodeUint32(bz []byte) (u uint32, n int, err error) {
 		err = errors.New("EOF decoding uint32")
 		return
 	}
-	u = binary.BigEndian.Uint32(bz[:size])
+	u = binary.LittleEndian.Uint32(bz[:size])
 	n = size
 	return
 }
@@ -121,15 +126,20 @@ func DecodeUint64(bz []byte) (u uint64, n int, err error) {
 		err = errors.New("EOF decoding uint64")
 		return
 	}
-	u = binary.BigEndian.Uint64(bz[:size])
+	u = binary.LittleEndian.Uint64(bz[:size])
 	n = size
 	return
 }
 
 func DecodeUvarint(bz []byte) (u uint64, n int, err error) {
 	u, n = binary.Uvarint(bz)
-	if n <= 0 {
-		n = 0
+	if n == 0 {
+		// buf too small
+		err = errors.New("buffer too small")
+	} else if n < 0 {
+		// value larger than 64 bits (overflow)
+		// and -n is the number of bytes read
+		n = -n
 		err = errors.New("EOF decoding uvarint")
 	}
 	return
@@ -163,7 +173,7 @@ func DecodeFloat32(bz []byte) (f float32, n int, err error) {
 		err = errors.New("EOF decoding float32")
 		return
 	}
-	i := binary.BigEndian.Uint32(bz[:size])
+	i := binary.LittleEndian.Uint32(bz[:size])
 	f = math.Float32frombits(i)
 	n = size
 	return
@@ -176,7 +186,7 @@ func DecodeFloat64(bz []byte) (f float64, n int, err error) {
 		err = errors.New("EOF decoding float64")
 		return
 	}
-	i := binary.BigEndian.Uint64(bz[:size])
+	i := binary.LittleEndian.Uint64(bz[:size])
 	f = math.Float64frombits(i)
 	n = size
 	return
@@ -200,11 +210,11 @@ func DecodeTime(bz []byte) (t time.Time, n int, err error) {
 			return
 		}
 		if fieldNum != 1 {
-			err = fmt.Errorf("Expected field number 1, got %v", fieldNum)
+			err = fmt.Errorf("expected field number 1, got %v", fieldNum)
 			return
 		}
 		if typ != Typ3_8Byte {
-			err = fmt.Errorf("Expected Typ3 bytes <8Bytes> for time field #1, got %X", typ)
+			err = fmt.Errorf("expected Typ3 bytes <8Bytes> for time field #1, got %v", typ)
 			return
 		}
 	}
@@ -222,11 +232,11 @@ func DecodeTime(bz []byte) (t time.Time, n int, err error) {
 			return
 		}
 		if fieldNum != 2 {
-			err = fmt.Errorf("Expected field number 2, got %v", fieldNum)
+			err = fmt.Errorf("expected field number 2, got %v", fieldNum)
 			return
 		}
 		if typ != Typ3_4Byte {
-			err = fmt.Errorf("Expected Typ3 bytes <4Byte> for time field #2, got %X", typ)
+			err = fmt.Errorf("expected Typ3 bytes <4Byte> for time field #2, got %v", typ)
 			return
 		}
 	}
@@ -238,20 +248,9 @@ func DecodeTime(bz []byte) (t time.Time, n int, err error) {
 	}
 	// Validation check.
 	if nsec < 0 || 999999999 < nsec {
-		err = fmt.Errorf("Invalid time, nanoseconds out of bounds %v", nsec)
+		err = fmt.Errorf("invalid time, nanoseconds out of bounds %v", nsec)
 		return
 	}
-	{ // Expect "StructTerm" Typ3 byte.
-		var typ, _n = Typ3(0x00), int(0)
-		typ, _n, err = decodeTyp3(bz)
-		if slide(&bz, &n, _n) && err != nil {
-			return
-		}
-		if typ != Typ3_StructTerm {
-			err = errors.New(fmt.Sprintf("Expected StructTerm Typ3 byte for time, got %X", typ))
-			return
-		}
-	}
 	// Construct time.
 	t = time.Unix(sec, int64(nsec))
 	// Strip timezone and monotonic for deep equality.
diff --git a/vendor/github.com/tendermint/go-amino/encoder.go b/vendor/github.com/tendermint/go-amino/encoder.go
index 29d63830..05fe73c9 100644
--- a/vendor/github.com/tendermint/go-amino/encoder.go
+++ b/vendor/github.com/tendermint/go-amino/encoder.go
@@ -20,14 +20,14 @@ func EncodeInt16(w io.Writer, i int16) (err error) {
 
 func EncodeInt32(w io.Writer, i int32) (err error) {
 	var buf [4]byte
-	binary.BigEndian.PutUint32(buf[:], uint32(i))
+	binary.LittleEndian.PutUint32(buf[:], uint32(i))
 	_, err = w.Write(buf[:])
 	return
 }
 
 func EncodeInt64(w io.Writer, i int64) (err error) {
 	var buf [8]byte
-	binary.BigEndian.PutUint64(buf[:], uint64(i))
+	binary.LittleEndian.PutUint64(buf[:], uint64(i))
 	_, err = w.Write(buf[:])
 	return err
 }
@@ -62,14 +62,14 @@ func EncodeUint16(w io.Writer, u uint16) (err error) {
 
 func EncodeUint32(w io.Writer, u uint32) (err error) {
 	var buf [4]byte
-	binary.BigEndian.PutUint32(buf[:], u)
+	binary.LittleEndian.PutUint32(buf[:], u)
 	_, err = w.Write(buf[:])
 	return
 }
 
 func EncodeUint64(w io.Writer, u uint64) (err error) {
 	var buf [8]byte
-	binary.BigEndian.PutUint64(buf[:], u)
+	binary.LittleEndian.PutUint64(buf[:], u)
 	_, err = w.Write(buf[:])
 	return
 }
@@ -138,7 +138,6 @@ func EncodeTime(w io.Writer, t time.Time) (err error) {
 		return
 	}
 
-	err = EncodeByte(w, byte(0x04)) // StructTerm
 	return
 }
 
diff --git a/vendor/github.com/tendermint/go-amino/json-decode.go b/vendor/github.com/tendermint/go-amino/json-decode.go
index 15b70f80..06174bef 100644
--- a/vendor/github.com/tendermint/go-amino/json-decode.go
+++ b/vendor/github.com/tendermint/go-amino/json-decode.go
@@ -2,7 +2,6 @@ package amino
 
 import (
 	"bytes"
-	"encoding/hex"
 	"encoding/json"
 	"errors"
 	"fmt"
@@ -15,7 +14,7 @@ import (
 // cdc.decodeReflectJSON
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectJSON(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) decodeReflectJSON(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -23,55 +22,18 @@ func (cdc *Codec) decodeReflectJSON(bz []byte, info *TypeInfo, rv reflect.Value,
 		panic("should not happen")
 	}
 	if printLog {
-		spew.Printf("(D) decodeReflectJSON(bz: %s, info: %v, rv: %#v (%v), opts: %v)\n",
-			bz, info, rv.Interface(), rv.Type(), opts)
+		spew.Printf("(D) decodeReflectJSON(bz: %s, info: %v, rv: %#v (%v), fopts: %v)\n",
+			bz, info, rv.Interface(), rv.Type(), fopts)
 		defer func() {
 			fmt.Printf("(D) -> err: %v\n", err)
 		}()
 	}
 
-	// Read disfix bytes if registered.
-	if info.Registered {
-		// Strip the disfix bytes after checking it.
-		var disfix DisfixBytes
-		disfix, bz, err = decodeDisfixJSON(bz)
-		if err != nil {
-			return
-		}
-		if !info.GetDisfix().EqualBytes(disfix[:]) {
-			err = fmt.Errorf("Expected disfix bytes %X but got %X", info.GetDisfix(), disfix)
-			return
-		}
-	}
-
-	err = cdc._decodeReflectJSON(bz, info, rv, opts)
-	return
-}
-
-// CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) _decodeReflectJSON(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
-	if !rv.CanAddr() {
-		panic("rv not addressable")
-	}
-	if info.Type.Kind() == reflect.Interface && rv.Kind() == reflect.Ptr {
-		panic("should not happen")
-	}
-	if printLog {
-		spew.Printf("(_) _decodeReflectJSON(bz: %s, info: %v, rv: %#v (%v), opts: %v)\n",
-			bz, info, rv.Interface(), rv.Type(), opts)
-		defer func() {
-			fmt.Printf("(_) -> err: %v\n", err)
-		}()
-	}
-
 	// Special case for null for either interface, pointer, slice
 	// NOTE: This doesn't match the binary implementation completely.
 	if nullBytes(bz) {
-		switch rv.Kind() {
-		case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Array:
-			rv.Set(reflect.Zero(rv.Type()))
-			return
-		}
+		rv.Set(reflect.Zero(rv.Type()))
+		return
 	}
 
 	// Dereference-and-construct pointers all the way.
@@ -84,6 +46,20 @@ func (cdc *Codec) _decodeReflectJSON(bz []byte, info *TypeInfo, rv reflect.Value
 		rv = rv.Elem()
 	}
 
+	// Special case:
+	if rv.Type() == timeType {
+		// Amino time strips the timezone, so must end with Z.
+		if len(bz) >= 2 && bz[0] == '"' && bz[len(bz)-1] == '"' {
+			if bz[len(bz)-2] != 'Z' {
+				err = fmt.Errorf("Amino:JSON time must be UTC and end with 'Z' but got %s.", bz)
+				return
+			}
+		} else {
+			err = fmt.Errorf("Amino:JSON time must be an RFC3339Nano string, but got %s.", bz)
+			return
+		}
+	}
+
 	// Handle override if a pointer to rv implements json.Unmarshaler.
 	if rv.Addr().Type().Implements(jsonUnmarshalerType) {
 		err = rv.Addr().Interface().(json.Unmarshaler).UnmarshalJSON(bz)
@@ -98,7 +74,7 @@ func (cdc *Codec) _decodeReflectJSON(bz []byte, info *TypeInfo, rv reflect.Value
 		if err != nil {
 			return
 		}
-		err = cdc._decodeReflectJSON(bz, rinfo, rrv, opts)
+		err = cdc.decodeReflectJSON(bz, rinfo, rrv, fopts)
 		if err != nil {
 			return
 		}
@@ -118,37 +94,48 @@ func (cdc *Codec) _decodeReflectJSON(bz []byte, info *TypeInfo, rv reflect.Value
 	// Complex
 
 	case reflect.Interface:
-		err = cdc.decodeReflectJSONInterface(bz, info, rv, opts)
+		err = cdc.decodeReflectJSONInterface(bz, info, rv, fopts)
 
 	case reflect.Array:
-		err = cdc.decodeReflectJSONArray(bz, info, rv, opts)
+		err = cdc.decodeReflectJSONArray(bz, info, rv, fopts)
 
 	case reflect.Slice:
-		err = cdc.decodeReflectJSONSlice(bz, info, rv, opts)
+		err = cdc.decodeReflectJSONSlice(bz, info, rv, fopts)
 
 	case reflect.Struct:
-		err = cdc.decodeReflectJSONStruct(bz, info, rv, opts)
+		err = cdc.decodeReflectJSONStruct(bz, info, rv, fopts)
 
 	case reflect.Map:
-		err = cdc.decodeReflectJSONMap(bz, info, rv, opts)
+		err = cdc.decodeReflectJSONMap(bz, info, rv, fopts)
 
 	//----------------------------------------
 	// Signed, Unsigned
 
-	case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int,
-		reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uint:
-		err = invokeStdlibJSONUnmarshal(bz, rv, opts)
+	case reflect.Int64, reflect.Int:
+		fallthrough
+	case reflect.Uint64, reflect.Uint:
+		if bz[0] != '"' || bz[len(bz)-1] != '"' {
+			err = fmt.Errorf("invalid character -- Amino:JSON int/int64/uint/uint64 expects quoted values for javascript numeric support, got: %v.", string(bz))
+			if err != nil {
+				return
+			}
+		}
+		bz = bz[1 : len(bz)-1]
+		fallthrough
+	case reflect.Int32, reflect.Int16, reflect.Int8,
+		reflect.Uint32, reflect.Uint16, reflect.Uint8:
+		err = invokeStdlibJSONUnmarshal(bz, rv, fopts)
 
 	//----------------------------------------
 	// Misc
 
 	case reflect.Float32, reflect.Float64:
-		if !opts.Unsafe {
-			return errors.New("Amino.JSON float* support requires `amino:\"unsafe\"`.")
+		if !fopts.Unsafe {
+			return errors.New("Amino:JSON float* support requires `amino:\"unsafe\"`.")
 		}
 		fallthrough
 	case reflect.Bool, reflect.String:
-		err = invokeStdlibJSONUnmarshal(bz, rv, opts)
+		err = invokeStdlibJSONUnmarshal(bz, rv, fopts)
 
 	//----------------------------------------
 	// Default
@@ -160,7 +147,7 @@ func (cdc *Codec) _decodeReflectJSON(bz []byte, info *TypeInfo, rv reflect.Value
 	return
 }
 
-func invokeStdlibJSONUnmarshal(bz []byte, rv reflect.Value, opts FieldOptions) error {
+func invokeStdlibJSONUnmarshal(bz []byte, rv reflect.Value, fopts FieldOptions) error {
 	if !rv.CanAddr() && rv.Kind() != reflect.Ptr {
 		panic("rv not addressable nor pointer")
 	}
@@ -178,7 +165,7 @@ func invokeStdlibJSONUnmarshal(bz []byte, rv reflect.Value, opts FieldOptions) e
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectJSONInterface(bz []byte, iinfo *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) decodeReflectJSONInterface(bz []byte, iinfo *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -203,22 +190,21 @@ func (cdc *Codec) decodeReflectJSONInterface(bz []byte, iinfo *TypeInfo, rv refl
 		rv.Set(iinfo.ZeroValue)
 	}
 
-	// Consume disambiguation / prefix info.
-	disfix, bz, err := decodeDisfixJSON(bz)
+	// Consume type wrapper info.
+	name, bz, err := decodeInterfaceJSON(bz)
 	if err != nil {
 		return
 	}
-
-	// XXX: Check disfix against interface to make sure that it actually
+	// XXX: Check name against interface to make sure that it actually
 	// matches, and return an error if it doesn't.
 
-	// NOTE: Unlike decodeReflectBinaryInterface, we already dealt with nil in _decodeReflectJSON.
-	// NOTE: We also "consumed" the disfix wrapper by replacing `bz` above.
+	// NOTE: Unlike decodeReflectBinaryInterface, we already dealt with nil in decodeReflectJSON.
+	// NOTE: We also "consumed" the interface wrapper by replacing `bz` above.
 
 	// Get concrete type info.
-	// NOTE: Unlike decodeReflectBinaryInterface, always disfix.
+	// NOTE: Unlike decodeReflectBinaryInterface, uses the full name string.
 	var cinfo *TypeInfo
-	cinfo, err = cdc.getTypeInfoFromDisfix_rlock(disfix)
+	cinfo, err = cdc.getTypeInfoFromName_rlock(name)
 	if err != nil {
 		return
 	}
@@ -227,7 +213,7 @@ func (cdc *Codec) decodeReflectJSONInterface(bz []byte, iinfo *TypeInfo, rv refl
 	var crv, irvSet = constructConcreteType(cinfo)
 
 	// Decode into the concrete type.
-	err = cdc._decodeReflectJSON(bz, cinfo, crv, opts)
+	err = cdc.decodeReflectJSON(bz, cinfo, crv, fopts)
 	if err != nil {
 		rv.Set(irvSet) // Helps with debugging
 		return
@@ -241,7 +227,7 @@ func (cdc *Codec) decodeReflectJSONInterface(bz []byte, iinfo *TypeInfo, rv refl
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectJSONArray(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) decodeReflectJSONArray(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -290,7 +276,7 @@ func (cdc *Codec) decodeReflectJSONArray(bz []byte, info *TypeInfo, rv reflect.V
 		for i := 0; i < length; i++ {
 			erv := rv.Index(i)
 			ebz := rawSlice[i]
-			err = cdc.decodeReflectJSON(ebz, einfo, erv, opts)
+			err = cdc.decodeReflectJSON(ebz, einfo, erv, fopts)
 			if err != nil {
 				return
 			}
@@ -300,7 +286,7 @@ func (cdc *Codec) decodeReflectJSONArray(bz []byte, info *TypeInfo, rv reflect.V
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectJSONSlice(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) decodeReflectJSONSlice(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -356,7 +342,7 @@ func (cdc *Codec) decodeReflectJSONSlice(bz []byte, info *TypeInfo, rv reflect.V
 		for i := 0; i < length; i++ {
 			erv := srv.Index(i)
 			ebz := rawSlice[i]
-			err = cdc.decodeReflectJSON(ebz, einfo, erv, opts)
+			err = cdc.decodeReflectJSON(ebz, einfo, erv, fopts)
 			if err != nil {
 				return
 			}
@@ -369,7 +355,7 @@ func (cdc *Codec) decodeReflectJSONSlice(bz []byte, info *TypeInfo, rv reflect.V
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectJSONStruct(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) decodeReflectJSONStruct(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -416,7 +402,7 @@ func (cdc *Codec) decodeReflectJSONStruct(bz []byte, info *TypeInfo, rv reflect.
 		}
 
 		// Decode into field rv.
-		err = cdc.decodeReflectJSON(valueBytes, finfo, frv, opts)
+		err = cdc.decodeReflectJSON(valueBytes, finfo, frv, fopts)
 		if err != nil {
 			return
 		}
@@ -426,7 +412,7 @@ func (cdc *Codec) decodeReflectJSONStruct(bz []byte, info *TypeInfo, rv reflect.
 }
 
 // CONTRACT: rv.CanAddr() is true.
-func (cdc *Codec) decodeReflectJSONMap(bz []byte, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) decodeReflectJSONMap(bz []byte, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if !rv.CanAddr() {
 		panic("rv not addressable")
 	}
@@ -464,7 +450,7 @@ func (cdc *Codec) decodeReflectJSONMap(bz []byte, info *TypeInfo, rv reflect.Val
 		vrv := reflect.New(mrv.Type().Elem()).Elem()
 
 		// Decode valueBytes into vrv.
-		err = cdc.decodeReflectJSON(valueBytes, vinfo, vrv, opts)
+		err = cdc.decodeReflectJSON(valueBytes, vinfo, vrv, fopts)
 		if err != nil {
 			return
 		}
@@ -483,45 +469,34 @@ func (cdc *Codec) decodeReflectJSONMap(bz []byte, info *TypeInfo, rv reflect.Val
 // Misc.
 
 type disfixWrapper struct {
-	Disfix string          `json:"type"`
-	Data   json.RawMessage `json:"value"`
+	Name string          `json:"type"`
+	Data json.RawMessage `json:"value"`
 }
 
-// decodeDisfixJSON helps unravel the disfix and
+// decodeInterfaceJSON helps unravel the type name and
 // the stored data, which are expected in the form:
 // {
-//    "type": "XXXXXXXXXXXXXXXXX",
+//    "type": "<canonical concrete type name>",
 //    "value":  {}
 // }
-func decodeDisfixJSON(bz []byte) (df DisfixBytes, data []byte, err error) {
-	if string(bz) == "null" {
-		panic("yay")
-	}
+func decodeInterfaceJSON(bz []byte) (name string, data []byte, err error) {
 	dfw := new(disfixWrapper)
 	err = json.Unmarshal(bz, dfw)
 	if err != nil {
-		err = fmt.Errorf("Cannot parse disfix JSON wrapper: %v", err)
-		return
-	}
-	dfBytes, err := hex.DecodeString(dfw.Disfix)
-	if err != nil {
+		err = fmt.Errorf("cannot parse disfix JSON wrapper: %v", err)
 		return
 	}
 
-	// Get disfix.
-	if g, w := len(dfBytes), DisfixBytesLen; g != w {
-		err = fmt.Errorf("Disfix length got=%d want=%d data=%s", g, w, bz)
-		return
-	}
-	copy(df[:], dfBytes)
-	if (DisfixBytes{}).EqualBytes(df[:]) {
-		err = errors.New("Unexpected zero disfix in JSON")
+	// Get name.
+	if dfw.Name == "" {
+		err = errors.New("JSON encoding of interfaces require non-empty type field.")
 		return
 	}
+	name = dfw.Name
 
 	// Get data.
 	if len(dfw.Data) == 0 {
-		err = errors.New("Disfix JSON wrapper should have non-empty value field")
+		err = errors.New("interface JSON wrapper should have non-empty value field")
 		return
 	}
 	data = dfw.Data
diff --git a/vendor/github.com/tendermint/go-amino/json-encode.go b/vendor/github.com/tendermint/go-amino/json-encode.go
index 4ae11f1c..15b10f21 100644
--- a/vendor/github.com/tendermint/go-amino/json-encode.go
+++ b/vendor/github.com/tendermint/go-amino/json-encode.go
@@ -6,6 +6,7 @@ import (
 	"fmt"
 	"io"
 	"reflect"
+	"time"
 
 	"github.com/davecgh/go-spew/spew"
 )
@@ -18,54 +19,18 @@ import (
 // only call this one, for the disfix wrapper is only written here.
 // NOTE: Unlike encodeReflectBinary, rv may be a pointer.
 // CONTRACT: rv is valid.
-func (cdc *Codec) encodeReflectJSON(w io.Writer, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) encodeReflectJSON(w io.Writer, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if !rv.IsValid() {
 		panic("should not happen")
 	}
 	if printLog {
-		spew.Printf("(E) encodeReflectJSON(info: %v, rv: %#v (%v), opts: %v)\n",
-			info, rv.Interface(), rv.Type(), opts)
+		spew.Printf("(E) encodeReflectJSON(info: %v, rv: %#v (%v), fopts: %v)\n",
+			info, rv.Interface(), rv.Type(), fopts)
 		defer func() {
 			fmt.Printf("(E) -> err: %v\n", err)
 		}()
 	}
 
-	// Write the disfix wrapper if it is a registered concrete type.
-	if info.Registered {
-		// Part 1:
-		disfix := toDisfix(info.Disamb, info.Prefix)
-		err = writeStr(w, _fmt(`{"type":"%X","value":`, disfix))
-		if err != nil {
-			return
-		}
-		// Part 2:
-		defer func() {
-			if err != nil {
-				return
-			}
-			err = writeStr(w, `}`)
-		}()
-	}
-
-	err = cdc._encodeReflectJSON(w, info, rv, opts)
-	return
-}
-
-// NOTE: Unlike _encodeReflectBinary, rv may be a pointer.
-// CONTRACT: rv is valid.
-// CONTRACT: any disfix wrapper has already been written.
-func (cdc *Codec) _encodeReflectJSON(w io.Writer, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
-	if !rv.IsValid() {
-		panic("should not happen")
-	}
-	if printLog {
-		spew.Printf("(_) _encodeReflectJSON(info: %v, rv: %#v (%v), opts: %v)\n",
-			info, rv.Interface(), rv.Type(), opts)
-		defer func() {
-			fmt.Printf("(_) -> err: %v\n", err)
-		}()
-	}
-
 	// Dereference value if pointer.
 	var isNilPtr bool
 	rv, _, isNilPtr = derefPointers(rv)
@@ -76,6 +41,13 @@ func (cdc *Codec) _encodeReflectJSON(w io.Writer, info *TypeInfo, rv reflect.Val
 		return
 	}
 
+	// Special case:
+	if rv.Type() == timeType {
+		// Amino time strips the timezone.
+		// NOTE: This must be done before json.Marshaler override below.
+		ct := rv.Interface().(time.Time).Round(0).UTC()
+		rv = reflect.ValueOf(ct)
+	}
 	// Handle override if rv implements json.Marshaler.
 	if rv.CanAddr() { // Try pointer first.
 		if rv.Addr().Type().Implements(jsonMarshalerType) {
@@ -100,7 +72,7 @@ func (cdc *Codec) _encodeReflectJSON(w io.Writer, info *TypeInfo, rv reflect.Val
 			return
 		}
 		// Then, encode the repr instance.
-		err = cdc._encodeReflectJSON(w, rinfo, rrv, opts)
+		err = cdc.encodeReflectJSON(w, rinfo, rrv, fopts)
 		return
 	}
 
@@ -110,29 +82,37 @@ func (cdc *Codec) _encodeReflectJSON(w io.Writer, info *TypeInfo, rv reflect.Val
 	// Complex
 
 	case reflect.Interface:
-		return cdc.encodeReflectJSONInterface(w, info, rv, opts)
+		return cdc.encodeReflectJSONInterface(w, info, rv, fopts)
 
 	case reflect.Array, reflect.Slice:
-		return cdc.encodeReflectJSONList(w, info, rv, opts)
+		return cdc.encodeReflectJSONList(w, info, rv, fopts)
 
 	case reflect.Struct:
-		return cdc.encodeReflectJSONStruct(w, info, rv, opts)
+		return cdc.encodeReflectJSONStruct(w, info, rv, fopts)
 
 	case reflect.Map:
-		return cdc.encodeReflectJSONMap(w, info, rv, opts)
+		return cdc.encodeReflectJSONMap(w, info, rv, fopts)
 
 	//----------------------------------------
 	// Signed, Unsigned
 
-	case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int,
-		reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uint:
+	case reflect.Int64, reflect.Int:
+		_, err = fmt.Fprintf(w, `"%d"`, rv.Int()) // JS can't handle int64
+		return
+
+	case reflect.Uint64, reflect.Uint:
+		_, err = fmt.Fprintf(w, `"%d"`, rv.Uint()) // JS can't handle uint64
+		return
+
+	case reflect.Int32, reflect.Int16, reflect.Int8,
+		reflect.Uint32, reflect.Uint16, reflect.Uint8:
 		return invokeStdlibJSONMarshal(w, rv.Interface())
 
 	//----------------------------------------
 	// Misc
 
 	case reflect.Float64, reflect.Float32:
-		if !opts.Unsafe {
+		if !fopts.Unsafe {
 			return errors.New("Amino.JSON float* support requires `amino:\"unsafe\"`.")
 		}
 		fallthrough
@@ -147,7 +127,7 @@ func (cdc *Codec) _encodeReflectJSON(w io.Writer, info *TypeInfo, rv reflect.Val
 	}
 }
 
-func (cdc *Codec) encodeReflectJSONInterface(w io.Writer, iinfo *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) encodeReflectJSONInterface(w io.Writer, iinfo *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if printLog {
 		fmt.Println("(e) encodeReflectJSONInterface")
 		defer func() {
@@ -184,10 +164,9 @@ func (cdc *Codec) encodeReflectJSONInterface(w io.Writer, iinfo *TypeInfo, rv re
 		return
 	}
 
-	// Write disfix wrapper.
+	// Write interface wrapper.
 	// Part 1:
-	disfix := toDisfix(cinfo.Disamb, cinfo.Prefix)
-	err = writeStr(w, _fmt(`{"type":"%X","value":`, disfix))
+	err = writeStr(w, _fmt(`{"type":"%s","value":`, cinfo.Name))
 	if err != nil {
 		return
 	}
@@ -204,11 +183,11 @@ func (cdc *Codec) encodeReflectJSONInterface(w io.Writer, iinfo *TypeInfo, rv re
 	// Currently, go-amino JSON *always* writes disfix bytes for
 	// all registered concrete types.
 
-	err = cdc._encodeReflectJSON(w, cinfo, crv, opts)
+	err = cdc.encodeReflectJSON(w, cinfo, crv, fopts)
 	return
 }
 
-func (cdc *Codec) encodeReflectJSONList(w io.Writer, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) encodeReflectJSONList(w io.Writer, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if printLog {
 		fmt.Println("(e) encodeReflectJSONList")
 		defer func() {
@@ -266,7 +245,7 @@ func (cdc *Codec) encodeReflectJSONList(w io.Writer, info *TypeInfo, rv reflect.
 			if isNil {
 				err = writeStr(w, `null`)
 			} else {
-				err = cdc.encodeReflectJSON(w, einfo, erv, opts)
+				err = cdc.encodeReflectJSON(w, einfo, erv, fopts)
 			}
 			if err != nil {
 				return
@@ -356,7 +335,7 @@ func (cdc *Codec) encodeReflectJSONStruct(w io.Writer, info *TypeInfo, rv reflec
 }
 
 // TODO: TEST
-func (cdc *Codec) encodeReflectJSONMap(w io.Writer, info *TypeInfo, rv reflect.Value, opts FieldOptions) (err error) {
+func (cdc *Codec) encodeReflectJSONMap(w io.Writer, info *TypeInfo, rv reflect.Value, fopts FieldOptions) (err error) {
 	if printLog {
 		fmt.Println("(e) encodeReflectJSONMap")
 		defer func() {
@@ -414,7 +393,7 @@ func (cdc *Codec) encodeReflectJSONMap(w io.Writer, info *TypeInfo, rv reflect.V
 			if err != nil {
 				return
 			}
-			err = cdc.encodeReflectJSON(w, vinfo, vrv, opts) // pass through opts
+			err = cdc.encodeReflectJSON(w, vinfo, vrv, fopts) // pass through fopts
 		}
 		if err != nil {
 			return
diff --git a/vendor/github.com/tendermint/go-amino/reflect.go b/vendor/github.com/tendermint/go-amino/reflect.go
index 98157523..10a2cea9 100644
--- a/vendor/github.com/tendermint/go-amino/reflect.go
+++ b/vendor/github.com/tendermint/go-amino/reflect.go
@@ -11,7 +11,6 @@ import (
 // Constants
 
 const printLog = false
-const RFC3339Millis = "2006-01-02T15:04:05.000Z" // forced microseconds
 
 var (
 	timeType            = reflect.TypeOf(time.Time{})
@@ -52,7 +51,9 @@ func slide(bz *[]byte, n *int, _n int) bool {
 		panic(fmt.Sprintf("impossible slide: len:%v _n:%v", len(*bz), _n))
 	}
 	*bz = (*bz)[_n:]
-	*n += _n
+	if n != nil {
+		*n += _n
+	}
 	return true
 }
 
@@ -73,14 +74,44 @@ func derefPointers(rv reflect.Value) (drv reflect.Value, isPtr bool, isNilPtr bo
 	return
 }
 
-// Returns isVoid=true iff is ultimately nil or empty after (recursive) dereferencing.
-// If isVoid=false, erv is set to the non-nil non-empty valid dereferenced value.
-func isVoid(rv reflect.Value) (erv reflect.Value, isVoid bool) {
+// Dereference pointer recursively or return zero value.
+// drv: the final non-pointer value (which is never invalid).
+// isPtr: whether rv.Kind() == reflect.Ptr.
+// isNilPtr: whether a nil pointer at any level.
+func derefPointersZero(rv reflect.Value) (drv reflect.Value, isPtr bool, isNilPtr bool) {
+	for rv.Kind() == reflect.Ptr {
+		isPtr = true
+		if rv.IsNil() {
+			isNilPtr = true
+			rt := rv.Type().Elem()
+			for rt.Kind() == reflect.Ptr {
+				rt = rt.Elem()
+			}
+			drv = reflect.New(rt).Elem()
+			return
+		}
+		rv = rv.Elem()
+	}
+	drv = rv
+	return
+}
+
+// Returns isDefaultValue=true iff is ultimately nil or empty after (recursive)
+// dereferencing. If isDefaultValue=false, erv is set to the non-nil non-empty
+// non-default dereferenced value.
+// A zero/empty struct is not considered default.
+func isDefaultValue(rv reflect.Value) (erv reflect.Value, isDefaultValue bool) {
 	rv, _, isNilPtr := derefPointers(rv)
 	if isNilPtr {
 		return rv, true
 	} else {
 		switch rv.Kind() {
+		case reflect.Bool:
+			return rv, rv.Bool() == false
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			return rv, rv.Int() == 0
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+			return rv, rv.Uint() == 0
 		case reflect.String:
 			return rv, rv.Len() == 0
 		case reflect.Chan, reflect.Map, reflect.Slice:
@@ -118,55 +149,36 @@ func constructConcreteType(cinfo *TypeInfo) (crv, irvSet reflect.Value) {
 	return
 }
 
-// Like typeToTyp4 but include a pointer bit.
-func typeToTyp4(rt reflect.Type, opts FieldOptions) (typ Typ4) {
-
-	// Dereference pointer type.
-	var pointer = false
-	for rt.Kind() == reflect.Ptr {
-		pointer = true
-		rt = rt.Elem()
-	}
-
-	// Call actual logic.
-	typ = Typ4(typeToTyp3(rt, opts))
-
-	// Set pointer bit to 1 if pointer.
-	if pointer {
-		typ |= Typ4_Pointer
-	}
-	return
-}
-
 // CONTRACT: rt.Kind() != reflect.Ptr
 func typeToTyp3(rt reflect.Type, opts FieldOptions) Typ3 {
 	switch rt.Kind() {
 	case reflect.Interface:
-		return Typ3_Interface
+		return Typ3_ByteLength
 	case reflect.Array, reflect.Slice:
-		ert := rt.Elem()
-		switch ert.Kind() {
-		case reflect.Uint8:
-			return Typ3_ByteLength
-		default:
-			return Typ3_List
-		}
+		return Typ3_ByteLength
 	case reflect.String:
 		return Typ3_ByteLength
 	case reflect.Struct, reflect.Map:
-		return Typ3_Struct
+		return Typ3_ByteLength
 	case reflect.Int64, reflect.Uint64:
-		if opts.BinVarint {
+		if opts.BinFixed64 {
+			return Typ3_8Byte
+		} else {
+			return Typ3_Varint
+		}
+	case reflect.Int32, reflect.Uint32:
+		if opts.BinFixed32 {
+			return Typ3_4Byte
+		} else {
 			return Typ3_Varint
 		}
-		return Typ3_8Byte
-	case reflect.Float64:
-		return Typ3_8Byte
-	case reflect.Int32, reflect.Uint32, reflect.Float32:
-		return Typ3_4Byte
 	case reflect.Int16, reflect.Int8, reflect.Int,
 		reflect.Uint16, reflect.Uint8, reflect.Uint, reflect.Bool:
 		return Typ3_Varint
+	case reflect.Float64:
+		return Typ3_8Byte
+	case reflect.Float32:
+		return Typ3_4Byte
 	default:
 		panic(fmt.Sprintf("unsupported field type %v", rt))
 	}
diff --git a/vendor/github.com/tendermint/go-amino/version.go b/vendor/github.com/tendermint/go-amino/version.go
index 7f84d3a4..ed348aaf 100644
--- a/vendor/github.com/tendermint/go-amino/version.go
+++ b/vendor/github.com/tendermint/go-amino/version.go
@@ -1,4 +1,4 @@
 package amino
 
 // Version
-const Version = "0.9.9"
\ No newline at end of file
+const Version = "0.10.1"
diff --git a/vendor/github.com/tendermint/go-crypto/doc.go b/vendor/github.com/tendermint/go-crypto/doc.go
deleted file mode 100644
index c6701bc5..00000000
--- a/vendor/github.com/tendermint/go-crypto/doc.go
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
-go-crypto is a customized/convenience cryptography package
-for supporting Tendermint.
-
-It wraps select functionality of equivalent functions in the
-Go standard library, for easy usage with our libraries.
-
-Keys:
-
-All key generation functions return an instance of the PrivKey interface
-which implements methods
-
-    AssertIsPrivKeyInner()
-    Bytes() []byte
-    Sign(msg []byte) Signature
-    PubKey() PubKey
-    Equals(PrivKey) bool
-    Wrap() PrivKey
-
-From the above method we can:
-a) Retrieve the public key if needed
-
-    pubKey := key.PubKey()
-
-For example:
-    privKey, err := crypto.GenPrivKeyEd25519()
-    if err != nil {
-	...
-    }
-    pubKey := privKey.PubKey()
-    ...
-    // And then you can use the private and public key
-    doSomething(privKey, pubKey)
-
-
-We also provide hashing wrappers around algorithms:
-
-Sha256
-    sum := crypto.Sha256([]byte("This is Tendermint"))
-    fmt.Printf("%x\n", sum)
-
-Ripemd160
-    sum := crypto.Ripemd160([]byte("This is consensus"))
-    fmt.Printf("%x\n", sum)
-*/
-package crypto
-
-// TODO: Add more docs in here
diff --git a/vendor/github.com/tendermint/go-crypto/version.go b/vendor/github.com/tendermint/go-crypto/version.go
deleted file mode 100644
index aac87c4f..00000000
--- a/vendor/github.com/tendermint/go-crypto/version.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package crypto
-
-const Version = "0.6.2"
diff --git a/vendor/github.com/tendermint/iavl/amino.go b/vendor/github.com/tendermint/iavl/amino.go
deleted file mode 100644
index 0e80ee6f..00000000
--- a/vendor/github.com/tendermint/iavl/amino.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package iavl
-
-import "github.com/tendermint/go-amino"
-
-var cdc = amino.NewCodec()
diff --git a/vendor/github.com/tendermint/iavl/chunk.go b/vendor/github.com/tendermint/iavl/chunk.go
deleted file mode 100644
index b1cbdd22..00000000
--- a/vendor/github.com/tendermint/iavl/chunk.go
+++ /dev/null
@@ -1,185 +0,0 @@
-package iavl
-
-import (
-	"sort"
-
-	"github.com/pkg/errors"
-	cmn "github.com/tendermint/tmlibs/common"
-)
-
-// Chunk is a list of ordered nodes.
-// It can be sorted, merged, exported from a tree and
-// used to generate a new tree.
-type Chunk []OrderedNodeData
-
-// OrderedNodeData is the data to recreate a leaf node,
-// along with a SortOrder to define a BFS insertion order.
-type OrderedNodeData struct {
-	SortOrder uint64
-	NodeData
-}
-
-// NewOrderedNode creates the data from a leaf node.
-func NewOrderedNode(leaf *Node, prefix uint64) OrderedNodeData {
-	return OrderedNodeData{
-		SortOrder: prefix,
-		NodeData: NodeData{
-			Key:   leaf.key,
-			Value: leaf.value,
-		},
-	}
-}
-
-// getChunkHashes returns all the "checksum" hashes for
-// the chunks that will be sent.
-func getChunkHashes(tree *Tree, depth uint) ([][]byte, [][]byte, uint, error) {
-	maxDepth := uint(tree.root.height / 2)
-	if depth > maxDepth {
-		return nil, nil, 0, errors.New("depth exceeds maximum allowed")
-	}
-
-	nodes := getNodes(tree, depth)
-	hashes := make([][]byte, len(nodes))
-	keys := make([][]byte, len(nodes))
-	for i, n := range nodes {
-		hashes[i] = n.hash
-		keys[i] = n.key
-	}
-	return hashes, keys, depth, nil
-}
-
-// GetChunkHashesWithProofs takes a tree and returns the list of chunks with
-// proofs that can be used to synchronize a tree across the network.
-func GetChunkHashesWithProofs(tree *Tree) ([][]byte, []*InnerKeyProof, uint) {
-	hashes, keys, depth, err := getChunkHashes(tree, uint(tree.root.height/2))
-	if err != nil {
-		cmn.PanicSanity(cmn.Fmt("GetChunkHashes: %s", err))
-	}
-	proofs := make([]*InnerKeyProof, len(keys))
-
-	for i, k := range keys {
-		proof, err := tree.getInnerWithProof(k)
-		if err != nil {
-			cmn.PanicSanity(cmn.Fmt("Error getting inner key proof: %s", err))
-		}
-		proofs[i] = proof
-	}
-	return hashes, proofs, depth
-}
-
-// getNodes returns an array of nodes at the given depth.
-func getNodes(tree *Tree, depth uint) []*Node {
-	nodes := make([]*Node, 0, 1<<depth)
-	tree.root.traverseDepth(tree, depth, func(node *Node) {
-		nodes = append(nodes, node)
-	})
-	return nodes
-}
-
-// call cb for every node exactly depth levels below it
-// depth first search to return in tree ordering.
-func (node *Node) traverseDepth(t *Tree, depth uint, cb func(*Node)) {
-	// base case
-	if depth == 0 {
-		cb(node)
-		return
-	}
-	if node.isLeaf() {
-		return
-	}
-
-	// otherwise, descend one more level
-	node.getLeftNode(t).traverseDepth(t, depth-1, cb)
-	node.getRightNode(t).traverseDepth(t, depth-1, cb)
-}
-
-// position to key can calculate the appropriate sort order
-// for the count-th node at a given depth, assuming a full
-// tree above this height.
-func positionToKey(depth, count uint) (key uint64) {
-	for d := depth; d > 0; d-- {
-		// lowest digit of count * 2^(d-1)
-		key += uint64((count & 1) << (d - 1))
-		count = count >> 1
-	}
-	return
-}
-
-// GetChunk finds the count-th subtree at depth and
-// generates a Chunk for that data.
-func GetChunk(tree *Tree, depth, count uint) Chunk {
-	node := getNodes(tree, depth)[count]
-	prefix := positionToKey(depth, count)
-	return getChunk(tree, node, prefix, depth)
-}
-
-// getChunk takes a node and serializes all nodes below it
-//
-// As it is part of a larger tree, prefix defines the path
-// up to this point, and depth the current depth
-// (which defines where we add to the prefix)
-//
-// TODO: make this more efficient, *Chunk as arg???
-func getChunk(t *Tree, node *Node, prefix uint64, depth uint) Chunk {
-	if node.isLeaf() {
-		return Chunk{NewOrderedNode(node, prefix)}
-	}
-	res := make(Chunk, 0, node.size)
-	if node.leftNode != nil {
-		left := getChunk(t, node.getLeftNode(t), prefix, depth+1)
-		res = append(res, left...)
-	}
-	if node.rightNode != nil {
-		offset := prefix + 1<<depth
-		right := getChunk(t, node.getRightNode(t), offset, depth+1)
-		res = append(res, right...)
-	}
-	return res
-}
-
-// Sort does an inline quicksort.
-func (c Chunk) Sort() {
-	sort.Slice(c, func(i, j int) bool {
-		return c[i].SortOrder < c[j].SortOrder
-	})
-}
-
-// MergeChunks does a merge sort of the two Chunks,
-// assuming they were already in sorted order.
-func MergeChunks(left, right Chunk) Chunk {
-	size, i, j := len(left)+len(right), 0, 0
-	slice := make([]OrderedNodeData, size)
-
-	for k := 0; k < size; k++ {
-		if i > len(left)-1 && j <= len(right)-1 {
-			slice[k] = right[j]
-			j++
-		} else if j > len(right)-1 && i <= len(left)-1 {
-			slice[k] = left[i]
-			i++
-		} else if left[i].SortOrder < right[j].SortOrder {
-			slice[k] = left[i]
-			i++
-		} else {
-			slice[k] = right[j]
-			j++
-		}
-	}
-	return Chunk(slice)
-}
-
-// CalculateRoot creates a temporary in-memory
-// iavl tree to calculate the root hash of inserting
-// all the nodes.
-func (c Chunk) CalculateRoot() []byte {
-	test := NewTree(nil, 2*len(c))
-	c.PopulateTree(test)
-	return test.Hash()
-}
-
-// PopulateTree adds all the chunks in order to the given tree.
-func (c Chunk) PopulateTree(empty *Tree) {
-	for _, data := range c {
-		empty.Set(data.Key, data.Value)
-	}
-}
diff --git a/vendor/github.com/tendermint/iavl/doc.go b/vendor/github.com/tendermint/iavl/doc.go
index d6c38c6c..7e4891bc 100644
--- a/vendor/github.com/tendermint/iavl/doc.go
+++ b/vendor/github.com/tendermint/iavl/doc.go
@@ -1,7 +1,11 @@
+// Package iavl implements a versioned, snapshottable (immutable) AVL+ tree
+// for persisting key-value pairs.
+//
+//
 // Basic usage of VersionedTree.
 //
 //  import "github.com/tendermint/iavl"
-//  import "github.com/tendermint/tmlibs/db"
+//  import "github.com/tendermint/tendermint/libs/db"
 //  ...
 //
 //  tree := iavl.NewVersionedTree(db.NewMemDB(), 128)
@@ -23,12 +27,12 @@
 // Proof of existence:
 //
 //  root := tree.Hash()
-//  val, proof, err := tree.GetVersionedWithProof([]byte("bob"), 2) // "xyz", KeyProof, nil
+//  val, proof, err := tree.GetVersionedWithProof([]byte("bob"), 2) // "xyz", RangeProof, nil
 //  proof.Verify([]byte("bob"), val, root) // nil
 //
 // Proof of absence:
 //
-//  _, proof, err = tree.GetVersionedWithProof([]byte("tom"), 2) // nil, KeyProof, nil
+//  _, proof, err = tree.GetVersionedWithProof([]byte("tom"), 2) // nil, RangeProof, nil
 //  proof.Verify([]byte("tom"), nil, root) // nil
 //
 // Now we delete an old version:
diff --git a/vendor/github.com/tendermint/iavl/node.go b/vendor/github.com/tendermint/iavl/node.go
index 99558336..307412c3 100644
--- a/vendor/github.com/tendermint/iavl/node.go
+++ b/vendor/github.com/tendermint/iavl/node.go
@@ -9,7 +9,8 @@ import (
 	"io"
 
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/iavl/sha256truncated"
+	"github.com/tendermint/tendermint/crypto/tmhash"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // Node represents a node in a Tree.
@@ -40,56 +41,60 @@ func NewNode(key []byte, value []byte, version int64) *Node {
 
 // MakeNode constructs an *Node from an encoded byte slice.
 //
-// The new node doesn't have its hash saved or set.  The caller must set it
+// The new node doesn't have its hash saved or set. The caller must set it
 // afterwards.
-func MakeNode(buf []byte) (node *Node, err error) {
-	node = &Node{}
+func MakeNode(buf []byte) (*Node, cmn.Error) {
 
-	// Keeps track of bytes read.
-	n := 0
-
-	// Read node header.
-	node.height, n, err = amino.DecodeInt8(buf)
-	if err != nil {
-		return nil, err
+	// Read node header (height, size, version, key).
+	height, n, cause := amino.DecodeInt8(buf)
+	if cause != nil {
+		return nil, cmn.ErrorWrap(cause, "decoding node.height")
 	}
 	buf = buf[n:]
 
-	node.size, n, err = amino.DecodeInt64(buf)
-	if err != nil {
-		return nil, err
+	size, n, cause := amino.DecodeVarint(buf)
+	if cause != nil {
+		return nil, cmn.ErrorWrap(cause, "decoding node.size")
 	}
 	buf = buf[n:]
 
-	node.version, n, err = amino.DecodeInt64(buf)
-	if err != nil {
-		return nil, err
+	ver, n, cause := amino.DecodeVarint(buf)
+	if cause != nil {
+		return nil, cmn.ErrorWrap(cause, "decoding node.version")
 	}
 	buf = buf[n:]
 
-	node.key, n, err = amino.DecodeByteSlice(buf)
-	if err != nil {
-		return nil, err
+	key, n, cause := amino.DecodeByteSlice(buf)
+	if cause != nil {
+		return nil, cmn.ErrorWrap(cause, "decoding node.key")
 	}
 	buf = buf[n:]
 
+	node := &Node{
+		height:  height,
+		size:    size,
+		version: ver,
+		key:     key,
+	}
+
 	// Read node body.
 
 	if node.isLeaf() {
-		node.value, _, err = amino.DecodeByteSlice(buf)
-		if err != nil {
-			return nil, err
+		val, _, cause := amino.DecodeByteSlice(buf)
+		if cause != nil {
+			return nil, cmn.ErrorWrap(cause, "decoding node.value")
 		}
+		node.value = val
 	} else { // Read children.
-		leftHash, n, err := amino.DecodeByteSlice(buf)
-		if err != nil {
-			return nil, err
+		leftHash, n, cause := amino.DecodeByteSlice(buf)
+		if cause != nil {
+			return nil, cmn.ErrorWrap(cause, "deocding node.leftHash")
 		}
 		buf = buf[n:]
 
-		rightHash, _, err := amino.DecodeByteSlice(buf)
-		if err != nil {
-			return nil, err
+		rightHash, _, cause := amino.DecodeByteSlice(buf)
+		if cause != nil {
+			return nil, cmn.ErrorWrap(cause, "decoding node.rightHash")
 		}
 		node.leftHash = leftHash
 		node.rightHash = rightHash
@@ -99,11 +104,16 @@ func MakeNode(buf []byte) (node *Node, err error) {
 
 // String returns a string representation of the node.
 func (node *Node) String() string {
-	if len(node.hash) == 0 {
-		return "<no hash>"
-	} else {
-		return fmt.Sprintf("%x", node.hash)
-	}
+	hashstr := "<no hash>"
+	if len(node.hash) > 0 {
+		hashstr = fmt.Sprintf("%X", node.hash)
+	}
+	return fmt.Sprintf("Node{%s:%s@%d %X;%X}#%s",
+		cmn.ColoredBytes(node.key, cmn.Green, cmn.Blue),
+		cmn.ColoredBytes(node.value, cmn.Cyan, cmn.Blue),
+		node.version,
+		node.leftHash, node.rightHash,
+		hashstr)
 }
 
 // clone creates a shallow copy of a node with its hash set to nil.
@@ -139,9 +149,8 @@ func (node *Node) has(t *Tree, key []byte) (has bool) {
 	}
 	if bytes.Compare(key, node.key) < 0 {
 		return node.getLeftNode(t).has(t, key)
-	} else {
-		return node.getRightNode(t).has(t, key)
 	}
+	return node.getRightNode(t).has(t, key)
 }
 
 // Get a key under the node.
@@ -159,31 +168,28 @@ func (node *Node) get(t *Tree, key []byte) (index int64, value []byte) {
 
 	if bytes.Compare(key, node.key) < 0 {
 		return node.getLeftNode(t).get(t, key)
-	} else {
-		rightNode := node.getRightNode(t)
-		index, value = rightNode.get(t, key)
-		index += node.size - rightNode.size
-		return index, value
 	}
+	rightNode := node.getRightNode(t)
+	index, value = rightNode.get(t, key)
+	index += node.size - rightNode.size
+	return index, value
 }
 
 func (node *Node) getByIndex(t *Tree, index int64) (key []byte, value []byte) {
 	if node.isLeaf() {
 		if index == 0 {
 			return node.key, node.value
-		} else {
-			return nil, nil
-		}
-	} else {
-		// TODO: could improve this by storing the
-		// sizes as well as left/right hash.
-		leftNode := node.getLeftNode(t)
-		if index < leftNode.size {
-			return leftNode.getByIndex(t, index)
-		} else {
-			return node.getRightNode(t).getByIndex(t, index-leftNode.size)
 		}
+		return nil, nil
+	}
+	// TODO: could improve this by storing the
+	// sizes as well as left/right hash.
+	leftNode := node.getLeftNode(t)
+
+	if index < leftNode.size {
+		return leftNode.getByIndex(t, index)
 	}
+	return node.getRightNode(t).getByIndex(t, index-leftNode.size)
 }
 
 // Computes the hash of the node without computing its descendants. Must be
@@ -193,7 +199,7 @@ func (node *Node) _hash() []byte {
 		return node.hash
 	}
 
-	h := sha256truncated.New()
+	h := tmhash.New()
 	buf := new(bytes.Buffer)
 	if err := node.writeHashBytes(buf); err != nil {
 		panic(err)
@@ -211,7 +217,7 @@ func (node *Node) hashWithCount() ([]byte, int64) {
 		return node.hash, 0
 	}
 
-	h := sha256truncated.New()
+	h := tmhash.New()
 	buf := new(bytes.Buffer)
 	hashCount, err := node.writeHashBytesRecursively(buf)
 	if err != nil {
@@ -225,41 +231,54 @@ func (node *Node) hashWithCount() ([]byte, int64) {
 
 // Writes the node's hash to the given io.Writer. This function expects
 // child hashes to be already set.
-func (node *Node) writeHashBytes(w io.Writer) (err error) {
-	err = amino.EncodeInt8(w, node.height)
-	if err == nil {
-		err = amino.EncodeInt64(w, node.size)
+func (node *Node) writeHashBytes(w io.Writer) cmn.Error {
+	err := amino.EncodeInt8(w, node.height)
+	if err != nil {
+		return cmn.ErrorWrap(err, "writing height")
+	}
+	err = amino.EncodeVarint(w, node.size)
+	if err != nil {
+		return cmn.ErrorWrap(err, "writing size")
 	}
-	if err == nil {
-		err = amino.EncodeInt64(w, node.version)
+	err = amino.EncodeVarint(w, node.version)
+	if err != nil {
+		return cmn.ErrorWrap(err, "writing version")
 	}
 
 	// Key is not written for inner nodes, unlike writeBytes.
 
 	if node.isLeaf() {
-		if err == nil {
-			err = amino.EncodeByteSlice(w, node.key)
+		err = amino.EncodeByteSlice(w, node.key)
+		if err != nil {
+			return cmn.ErrorWrap(err, "writing key")
 		}
-		if err == nil {
-			err = amino.EncodeByteSlice(w, node.value)
+		// Indirection needed to provide proofs without values.
+		// (e.g. proofLeafNode.ValueHash)
+		valueHash := tmhash.Sum(node.value)
+		err = amino.EncodeByteSlice(w, valueHash)
+		if err != nil {
+			return cmn.ErrorWrap(err, "writing value")
 		}
 	} else {
 		if node.leftHash == nil || node.rightHash == nil {
 			panic("Found an empty child hash")
 		}
-		if err == nil {
-			err = amino.EncodeByteSlice(w, node.leftHash)
+		err = amino.EncodeByteSlice(w, node.leftHash)
+		if err != nil {
+			return cmn.ErrorWrap(err, "writing left hash")
 		}
-		if err == nil {
-			err = amino.EncodeByteSlice(w, node.rightHash)
+		err = amino.EncodeByteSlice(w, node.rightHash)
+		if err != nil {
+			return cmn.ErrorWrap(err, "writing right hash")
 		}
 	}
-	return
+
+	return nil
 }
 
 // Writes the node's hash to the given io.Writer.
 // This function has the side-effect of calling hashWithCount.
-func (node *Node) writeHashBytesRecursively(w io.Writer) (hashCount int64, err error) {
+func (node *Node) writeHashBytesRecursively(w io.Writer) (hashCount int64, err cmn.Error) {
 	if node.leftNode != nil {
 		leftHash, leftCount := node.leftNode.hashWithCount()
 		node.leftHash = leftHash
@@ -276,40 +295,50 @@ func (node *Node) writeHashBytesRecursively(w io.Writer) (hashCount int64, err e
 }
 
 // Writes the node as a serialized byte slice to the supplied io.Writer.
-func (node *Node) writeBytes(w io.Writer) (err error) {
-	err = amino.EncodeInt8(w, node.height)
-	if err == nil {
-		err = amino.EncodeInt64(w, node.size)
+func (node *Node) writeBytes(w io.Writer) cmn.Error {
+	var cause error
+	cause = amino.EncodeInt8(w, node.height)
+	if cause != nil {
+		return cmn.ErrorWrap(cause, "writing height")
 	}
-	if err == nil {
-		err = amino.EncodeInt64(w, node.version)
+	cause = amino.EncodeVarint(w, node.size)
+	if cause != nil {
+		return cmn.ErrorWrap(cause, "writing size")
+	}
+	cause = amino.EncodeVarint(w, node.version)
+	if cause != nil {
+		return cmn.ErrorWrap(cause, "writing version")
 	}
 
 	// Unlike writeHashBytes, key is written for inner nodes.
-	if err == nil {
-		err = amino.EncodeByteSlice(w, node.key)
+	cause = amino.EncodeByteSlice(w, node.key)
+	if cause != nil {
+		return cmn.ErrorWrap(cause, "writing key")
 	}
 
 	if node.isLeaf() {
-		if err == nil {
-			err = amino.EncodeByteSlice(w, node.value)
+		cause = amino.EncodeByteSlice(w, node.value)
+		if cause != nil {
+			return cmn.ErrorWrap(cause, "writing value")
 		}
 	} else {
 		if node.leftHash == nil {
 			panic("node.leftHash was nil in writeBytes")
 		}
-		if err == nil {
-			err = amino.EncodeByteSlice(w, node.leftHash)
+		cause = amino.EncodeByteSlice(w, node.leftHash)
+		if cause != nil {
+			return cmn.ErrorWrap(cause, "writing left hash")
 		}
 
 		if node.rightHash == nil {
 			panic("node.rightHash was nil in writeBytes")
 		}
-		if err == nil {
-			err = amino.EncodeByteSlice(w, node.rightHash)
+		cause = amino.EncodeByteSlice(w, node.rightHash)
+		if cause != nil {
+			return cmn.ErrorWrap(cause, "writing right hash")
 		}
 	}
-	return
+	return nil
 }
 
 func (node *Node) set(t *Tree, key []byte, value []byte) (
@@ -358,35 +387,33 @@ func (node *Node) set(t *Tree, key []byte, value []byte) (
 
 		if updated {
 			return node, updated, orphaned
-		} else {
-			node.calcHeightAndSize(t)
-			newNode, balanceOrphaned := node.balance(t)
-			return newNode, updated, append(orphaned, balanceOrphaned...)
 		}
+		node.calcHeightAndSize(t)
+		newNode, balanceOrphaned := node.balance(t)
+		return newNode, updated, append(orphaned, balanceOrphaned...)
 	}
 }
 
-// newHash/newNode: The new hash or node to replace node after remove.
-// newKey: new leftmost leaf key for tree after successfully removing 'key' if changed.
-// value: removed value.
-func (node *Node) remove(t *Tree, key []byte) (
-	newHash []byte, newNode *Node, newKey []byte, value []byte, orphaned []*Node,
-) {
+// removes the node corresponding to the passed key and balances the tree.
+// It returns:
+// - the hash of the new node (or nil if the node is the one removed)
+// - the node that replaces the orig. node after remove
+// - new leftmost leaf key for tree after successfully removing 'key' if changed.
+// - the removed value
+// - the orphaned nodes.
+func (node *Node) remove(t *Tree, key []byte) ([]byte, *Node, []byte, []byte, []*Node) {
 	version := t.version + 1
 
 	if node.isLeaf() {
 		if bytes.Equal(key, node.key) {
 			return nil, nil, nil, node.value, []*Node{node}
 		}
-		return node.hash, node, nil, nil, orphaned
+		return node.hash, node, nil, nil, nil
 	}
 
+	// node.key < key; we go to the left to find the key:
 	if bytes.Compare(key, node.key) < 0 {
-		var newLeftHash []byte
-		var newLeftNode *Node
-
-		newLeftHash, newLeftNode, newKey, value, orphaned =
-			node.getLeftNode(t).remove(t, key)
+		newLeftHash, newLeftNode, newKey, value, orphaned := node.getLeftNode(t).remove(t, key)
 
 		if len(orphaned) == 0 {
 			return node.hash, node, nil, value, orphaned
@@ -401,30 +428,26 @@ func (node *Node) remove(t *Tree, key []byte) (
 		newNode, balanceOrphaned := newNode.balance(t)
 
 		return newNode.hash, newNode, newKey, value, append(orphaned, balanceOrphaned...)
-	} else {
-		var newRightHash []byte
-		var newRightNode *Node
-
-		newRightHash, newRightNode, newKey, value, orphaned =
-			node.getRightNode(t).remove(t, key)
-
-		if len(orphaned) == 0 {
-			return node.hash, node, nil, value, orphaned
-		} else if newRightHash == nil && newRightNode == nil { // right node held value, was removed
-			return node.leftHash, node.leftNode, nil, value, orphaned
-		}
-		orphaned = append(orphaned, node)
+	}
+	// node.key >= key; either found or look to the right:
+	newRightHash, newRightNode, newKey, value, orphaned := node.getRightNode(t).remove(t, key)
 
-		newNode := node.clone(version)
-		newNode.rightHash, newNode.rightNode = newRightHash, newRightNode
-		if newKey != nil {
-			newNode.key = newKey
-		}
-		newNode.calcHeightAndSize(t)
-		newNode, balanceOrphaned := newNode.balance(t)
+	if len(orphaned) == 0 {
+		return node.hash, node, nil, value, orphaned
+	} else if newRightHash == nil && newRightNode == nil { // right node held value, was removed
+		return node.leftHash, node.leftNode, nil, value, orphaned
+	}
+	orphaned = append(orphaned, node)
 
-		return newNode.hash, newNode, nil, value, append(orphaned, balanceOrphaned...)
+	newNode := node.clone(version)
+	newNode.rightHash, newNode.rightNode = newRightHash, newRightNode
+	if newKey != nil {
+		newNode.key = newKey
 	}
+	newNode.calcHeightAndSize(t)
+	newNode, balanceOrphaned := newNode.balance(t)
+
+	return newNode.hash, newNode, nil, value, append(orphaned, balanceOrphaned...)
 }
 
 func (node *Node) getLeftNode(t *Tree) *Node {
@@ -502,34 +525,32 @@ func (node *Node) balance(t *Tree) (newSelf *Node, orphaned []*Node) {
 			// Left Left Case
 			newNode, orphaned := node.rotateRight(t)
 			return newNode, []*Node{orphaned}
-		} else {
-			// Left Right Case
-			var leftOrphaned *Node
+		}
+		// Left Right Case
+		var leftOrphaned *Node
 
-			left := node.getLeftNode(t)
-			node.leftHash = nil
-			node.leftNode, leftOrphaned = left.rotateLeft(t)
-			newNode, rightOrphaned := node.rotateRight(t)
+		left := node.getLeftNode(t)
+		node.leftHash = nil
+		node.leftNode, leftOrphaned = left.rotateLeft(t)
+		newNode, rightOrphaned := node.rotateRight(t)
 
-			return newNode, []*Node{left, leftOrphaned, rightOrphaned}
-		}
+		return newNode, []*Node{left, leftOrphaned, rightOrphaned}
 	}
 	if balance < -1 {
 		if node.getRightNode(t).calcBalance(t) <= 0 {
 			// Right Right Case
 			newNode, orphaned := node.rotateLeft(t)
 			return newNode, []*Node{orphaned}
-		} else {
-			// Right Left Case
-			var rightOrphaned *Node
+		}
+		// Right Left Case
+		var rightOrphaned *Node
 
-			right := node.getRightNode(t)
-			node.rightHash = nil
-			node.rightNode, rightOrphaned = right.rotateRight(t)
-			newNode, leftOrphaned := node.rotateLeft(t)
+		right := node.getRightNode(t)
+		node.rightHash = nil
+		node.rightNode, rightOrphaned = right.rotateRight(t)
+		newNode, leftOrphaned := node.rotateLeft(t)
 
-			return newNode, []*Node{right, leftOrphaned, rightOrphaned}
-		}
+		return newNode, []*Node{right, leftOrphaned, rightOrphaned}
 	}
 	// Nothing changed
 	return node, []*Node{}
@@ -547,19 +568,20 @@ func (node *Node) traverseWithDepth(t *Tree, ascending bool, cb func(*Node, uint
 }
 
 func (node *Node) traverseInRange(t *Tree, start, end []byte, ascending bool, inclusive bool, depth uint8, cb func(*Node, uint8) bool) bool {
-	afterStart := start == nil || bytes.Compare(start, node.key) <= 0
+	afterStart := start == nil || bytes.Compare(start, node.key) < 0
+	startOrAfter := start == nil || bytes.Compare(start, node.key) <= 0
 	beforeEnd := end == nil || bytes.Compare(node.key, end) < 0
 	if inclusive {
 		beforeEnd = end == nil || bytes.Compare(node.key, end) <= 0
 	}
 
+	// Run callback per inner/leaf node.
 	stop := false
-	if afterStart && beforeEnd {
-		// IterateRange ignores this if not leaf
+	if !node.isLeaf() || (startOrAfter && beforeEnd) {
 		stop = cb(node, depth)
-	}
-	if stop {
-		return stop
+		if stop {
+			return stop
+		}
 	}
 	if node.isLeaf() {
 		return stop
diff --git a/vendor/github.com/tendermint/iavl/nodedb.go b/vendor/github.com/tendermint/iavl/nodedb.go
index 2f546cb7..d0d3df19 100644
--- a/vendor/github.com/tendermint/iavl/nodedb.go
+++ b/vendor/github.com/tendermint/iavl/nodedb.go
@@ -7,14 +7,14 @@ import (
 	"sort"
 	"sync"
 
-	dbm "github.com/tendermint/tmlibs/db"
+	dbm "github.com/tendermint/tendermint/libs/db"
 )
 
 var (
 	// All node keys are prefixed with this. This ensures no collision is
 	// possible with the other keys, and makes them easier to traverse.
 	nodePrefix = "n/"
-	nodeKeyFmt = "n/%x"
+	nodeKeyFmt = "n/%X"
 
 	// Orphans are keyed in the database by their expected lifetime.
 	// The first number represents the *last* version at which the orphan needs
@@ -22,12 +22,12 @@ var (
 	// which it is expected to exist - which starts out by being the version
 	// of the node being orphaned.
 	orphanPrefix    = "o/"
-	orphanPrefixFmt = "o/%d/"      // o/<last-version>/
-	orphanKeyFmt    = "o/%d/%d/%x" // o/<last-version>/<first-version>/<hash>
+	orphanPrefixFmt = "o/%010d/"         // o/<last-version>/
+	orphanKeyFmt    = "o/%010d/%010d/%X" // o/<last-version>/<first-version>/<hash>
 
 	// r/<version>
 	rootPrefix    = "r/"
-	rootPrefixFmt = "r/%d"
+	rootPrefixFmt = "r/%010d"
 )
 
 type nodeDB struct {
@@ -35,9 +35,7 @@ type nodeDB struct {
 	db    dbm.DB     // Persistent node storage.
 	batch dbm.Batch  // Batched writing buffer.
 
-	versionCache  map[int64][]byte // Cache of tree (root) versions.
-	latestVersion int64            // Latest root version.
-
+	latestVersion  int64
 	nodeCache      map[string]*list.Element // Node cache.
 	nodeCacheSize  int                      // Node cache size limit in elements.
 	nodeCacheQueue *list.List               // LRU queue of cache elements. Used for deletion.
@@ -47,7 +45,6 @@ func newNodeDB(db dbm.DB, cacheSize int) *nodeDB {
 	ndb := &nodeDB{
 		db:             db,
 		batch:          db.NewBatch(),
-		versionCache:   map[int64][]byte{},
 		latestVersion:  0, // initially invalid
 		nodeCache:      make(map[string]*list.Element),
 		nodeCacheSize:  cacheSize,
@@ -171,7 +168,6 @@ func (ndb *nodeDB) SaveOrphans(version int64, orphans map[string]int64) {
 	defer ndb.mtx.Unlock()
 
 	toVersion := ndb.getPreviousVersion(version)
-
 	for hash, fromVersion := range orphans {
 		debug("SAVEORPHAN %v-%v %X\n", fromVersion, toVersion, hash)
 		ndb.saveOrphan([]byte(hash), fromVersion, toVersion)
@@ -194,6 +190,7 @@ func (ndb *nodeDB) deleteOrphans(version int64) {
 	predecessor := ndb.getPreviousVersion(version)
 
 	// Traverse orphans with a lifetime ending at the version specified.
+	// TODO optimize.
 	ndb.traverseOrphansVersion(version, func(key, hash []byte) {
 		var fromVersion, toVersion int64
 
@@ -234,38 +231,36 @@ func (ndb *nodeDB) rootKey(version int64) []byte {
 
 func (ndb *nodeDB) getLatestVersion() int64 {
 	if ndb.latestVersion == 0 {
-		ndb.getVersions()
+		ndb.latestVersion = ndb.getPreviousVersion(1<<63 - 1)
 	}
 	return ndb.latestVersion
 }
 
-func (ndb *nodeDB) getVersions() map[int64][]byte {
-	if len(ndb.versionCache) == 0 {
-		ndb.traversePrefix([]byte(rootPrefix), func(k, hash []byte) {
-			var version int64
-			fmt.Sscanf(string(k), rootPrefixFmt, &version)
-			ndb.cacheVersion(version, hash)
-		})
-	}
-	return ndb.versionCache
-}
-
-func (ndb *nodeDB) cacheVersion(version int64, hash []byte) {
-	ndb.versionCache[version] = hash
-
-	if version > ndb.getLatestVersion() {
+func (ndb *nodeDB) updateLatestVersion(version int64) {
+	if ndb.latestVersion < version {
 		ndb.latestVersion = version
 	}
 }
 
 func (ndb *nodeDB) getPreviousVersion(version int64) int64 {
-	var result int64
-	for v := range ndb.getVersions() {
-		if v < version && v > result {
-			result = v
+	itr := ndb.db.ReverseIterator(
+		[]byte(fmt.Sprintf(rootPrefixFmt, version-1)),
+		[]byte(fmt.Sprintf(rootPrefixFmt, 0)),
+	)
+	defer itr.Close()
+
+	pversion := int64(-1)
+	for ; itr.Valid(); itr.Next() {
+		k := itr.Key()
+		_, err := fmt.Sscanf(string(k), rootPrefixFmt, &pversion)
+		if err != nil {
+			panic(err)
+		} else {
+			return pversion
 		}
 	}
-	return result
+
+	return 0
 }
 
 // deleteRoot deletes the root entry from disk, but not the node it points to.
@@ -276,7 +271,6 @@ func (ndb *nodeDB) deleteRoot(version int64) {
 
 	key := ndb.rootKey(version)
 	ndb.batch.Delete(key)
-	delete(ndb.versionCache, version)
 }
 
 func (ndb *nodeDB) traverseOrphans(fn func(k, v []byte)) {
@@ -373,7 +367,7 @@ func (ndb *nodeDB) saveRoot(hash []byte, version int64) error {
 
 	key := ndb.rootKey(version)
 	ndb.batch.Set(key, hash)
-	ndb.cacheVersion(version, hash)
+	ndb.updateLatestVersion(version)
 
 	return nil
 }
diff --git a/vendor/github.com/tendermint/iavl/orphaning_tree.go b/vendor/github.com/tendermint/iavl/orphaning_tree.go
index 893afeaf..fb7493f2 100644
--- a/vendor/github.com/tendermint/iavl/orphaning_tree.go
+++ b/vendor/github.com/tendermint/iavl/orphaning_tree.go
@@ -45,6 +45,7 @@ func (tree *orphaningTree) SaveAs(version int64) {
 	if tree.root == nil {
 		// There can still be orphans, for example if the root is the node being
 		// removed.
+		debug("SAVE EMPTY TREE %v\n", version)
 		tree.ndb.SaveOrphans(version, tree.orphans)
 		tree.ndb.SaveEmptyRoot(version)
 	} else {
diff --git a/vendor/github.com/tendermint/iavl/path.go b/vendor/github.com/tendermint/iavl/path.go
deleted file mode 100644
index 7e3bb01d..00000000
--- a/vendor/github.com/tendermint/iavl/path.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package iavl
-
-import (
-	"bytes"
-
-	"github.com/pkg/errors"
-)
-
-// PathToKey represents an inner path to a leaf node.
-// Note that the nodes are ordered such that the last one is closest
-// to the root of the tree.
-type PathToKey struct {
-	InnerNodes []proofInnerNode `json:"inner_nodes"`
-}
-
-func (p *PathToKey) String() string {
-	str := ""
-	for i := len(p.InnerNodes) - 1; i >= 0; i-- {
-		str += p.InnerNodes[i].String() + "\n"
-	}
-	return str
-}
-
-// verify check that the leafNode's hash matches the path's LeafHash and that
-// the root is the merkle hash of all the inner nodes.
-func (p *PathToKey) verify(leafHash []byte, root []byte) error {
-	hash := leafHash
-	for _, branch := range p.InnerNodes {
-		hash = branch.Hash(hash)
-	}
-	if !bytes.Equal(root, hash) {
-		return errors.WithStack(ErrInvalidProof)
-	}
-	return nil
-}
-
-func (p *PathToKey) isLeftmost() bool {
-	for _, node := range p.InnerNodes {
-		if len(node.Left) > 0 {
-			return false
-		}
-	}
-	return true
-}
-
-func (p *PathToKey) isRightmost() bool {
-	for _, node := range p.InnerNodes {
-		if len(node.Right) > 0 {
-			return false
-		}
-	}
-	return true
-}
-
-func (p *PathToKey) isEmpty() bool {
-	return p == nil || len(p.InnerNodes) == 0
-}
-
-func (p *PathToKey) dropRoot() *PathToKey {
-	if p.isEmpty() {
-		return p
-	}
-	return &PathToKey{
-		InnerNodes: p.InnerNodes[:len(p.InnerNodes)-1],
-	}
-}
-
-func (p *PathToKey) hasCommonRoot(p2 *PathToKey) bool {
-	if p.isEmpty() || p2.isEmpty() {
-		return false
-	}
-	leftEnd := p.InnerNodes[len(p.InnerNodes)-1]
-	rightEnd := p2.InnerNodes[len(p2.InnerNodes)-1]
-
-	return bytes.Equal(leftEnd.Left, rightEnd.Left) &&
-		bytes.Equal(leftEnd.Right, rightEnd.Right)
-}
-
-func (p *PathToKey) isLeftAdjacentTo(p2 *PathToKey) bool {
-	for p.hasCommonRoot(p2) {
-		p, p2 = p.dropRoot(), p2.dropRoot()
-	}
-	p, p2 = p.dropRoot(), p2.dropRoot()
-
-	return p.isRightmost() && p2.isLeftmost()
-}
-
-// PathWithNode is a path to a key which includes the leaf node at that key.
-type pathWithNode struct {
-	Path *PathToKey    `json:"path"`
-	Node proofLeafNode `json:"node"`
-}
-
-func (p *pathWithNode) verify(root []byte) error {
-	return p.Path.verify(p.Node.Hash(), root)
-}
-
-// verifyPaths verifies the left and right paths individually, and makes sure
-// the ordering is such that left < startKey <= endKey < right.
-func verifyPaths(left, right *pathWithNode, startKey, endKey, root []byte) error {
-	if bytes.Compare(startKey, endKey) == 1 {
-		return ErrInvalidInputs
-	}
-	if left != nil {
-		if err := left.verify(root); err != nil {
-			return err
-		}
-		if !left.Node.isLesserThan(startKey) {
-			return errors.WithStack(ErrInvalidProof)
-		}
-	}
-	if right != nil {
-		if err := right.verify(root); err != nil {
-			return err
-		}
-		if !right.Node.isGreaterThan(endKey) {
-			return errors.WithStack(ErrInvalidProof)
-		}
-	}
-	return nil
-}
-
-// Checks that all paths are adjacent to one another, ie. that there are no
-// keys missing.
-func verifyNoMissingKeys(paths []*PathToKey) error {
-	ps := make([]*PathToKey, 0, len(paths))
-	for _, p := range paths {
-		if p != nil {
-			ps = append(ps, p)
-		}
-	}
-	for i := 0; i < len(ps)-1; i++ {
-		// Always check from left to right, since paths are always in ascending order.
-		if !ps[i].isLeftAdjacentTo(ps[i+1]) {
-			return errors.Errorf("paths #%d and #%d are not adjacent", i, i+1)
-		}
-	}
-	return nil
-}
-
-// Checks that with the given left and right paths, no keys can exist in between.
-// Supports nil paths to signify out-of-range.
-func verifyKeyAbsence(left, right *pathWithNode) error {
-	if left != nil && left.Path.isRightmost() {
-		// Range starts outside of the right boundary.
-		return nil
-	} else if right != nil && right.Path.isLeftmost() {
-		// Range ends outside of the left boundary.
-		return nil
-	} else if left != nil && right != nil &&
-		left.Path.isLeftAdjacentTo(right.Path) {
-		// Range is between two existing keys.
-		return nil
-	}
-	return errors.WithStack(ErrInvalidProof)
-}
diff --git a/vendor/github.com/tendermint/iavl/proof.go b/vendor/github.com/tendermint/iavl/proof.go
index c770ea96..a8787704 100644
--- a/vendor/github.com/tendermint/iavl/proof.go
+++ b/vendor/github.com/tendermint/iavl/proof.go
@@ -4,11 +4,9 @@ import (
 	"bytes"
 	"fmt"
 
-	"github.com/pkg/errors"
-
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/iavl/sha256truncated"
-	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/crypto/tmhash"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 var (
@@ -25,40 +23,58 @@ var (
 	ErrNilRoot = fmt.Errorf("tree root is nil")
 )
 
+//----------------------------------------
+
 type proofInnerNode struct {
-	Height  int8
-	Size    int64
-	Version int64
-	Left    []byte
-	Right   []byte
+	Height  int8   `json:"height"`
+	Size    int64  `json:"size"`
+	Version int64  `json:"version"`
+	Left    []byte `json:"left"`
+	Right   []byte `json:"right"`
+}
+
+func (pin proofInnerNode) String() string {
+	return pin.stringIndented("")
 }
 
-func (n *proofInnerNode) String() string {
-	return fmt.Sprintf("proofInnerNode[height=%d, ver=%d %x / %x]", n.Height, n.Version, n.Left, n.Right)
+func (pin proofInnerNode) stringIndented(indent string) string {
+	return fmt.Sprintf(`proofInnerNode{
+%s  Height:  %v
+%s  Size:    %v
+%s  Version: %v
+%s  Left:    %X
+%s  Right:   %X
+%s}`,
+		indent, pin.Height,
+		indent, pin.Size,
+		indent, pin.Version,
+		indent, pin.Left,
+		indent, pin.Right,
+		indent)
 }
 
-func (branch proofInnerNode) Hash(childHash []byte) []byte {
-	hasher := sha256truncated.New()
+func (pin proofInnerNode) Hash(childHash []byte) []byte {
+	hasher := tmhash.New()
 	buf := new(bytes.Buffer)
 
-	err := amino.EncodeInt8(buf, branch.Height)
+	err := amino.EncodeInt8(buf, pin.Height)
 	if err == nil {
-		err = amino.EncodeInt64(buf, branch.Size)
+		err = amino.EncodeVarint(buf, pin.Size)
 	}
 	if err == nil {
-		err = amino.EncodeInt64(buf, branch.Version)
+		err = amino.EncodeVarint(buf, pin.Version)
 	}
 
-	if len(branch.Left) == 0 {
+	if len(pin.Left) == 0 {
 		if err == nil {
 			err = amino.EncodeByteSlice(buf, childHash)
 		}
 		if err == nil {
-			err = amino.EncodeByteSlice(buf, branch.Right)
+			err = amino.EncodeByteSlice(buf, pin.Right)
 		}
 	} else {
 		if err == nil {
-			err = amino.EncodeByteSlice(buf, branch.Left)
+			err = amino.EncodeByteSlice(buf, pin.Left)
 		}
 		if err == nil {
 			err = amino.EncodeByteSlice(buf, childHash)
@@ -67,33 +83,51 @@ func (branch proofInnerNode) Hash(childHash []byte) []byte {
 	if err != nil {
 		panic(fmt.Sprintf("Failed to hash proofInnerNode: %v", err))
 	}
-	hasher.Write(buf.Bytes())
 
+	hasher.Write(buf.Bytes())
 	return hasher.Sum(nil)
 }
 
+//----------------------------------------
+
 type proofLeafNode struct {
-	KeyBytes   cmn.HexBytes `json:"key"`
-	ValueBytes cmn.HexBytes `json:"value"`
-	Version    int64        `json:"version"`
+	Key       cmn.HexBytes `json:"key"`
+	ValueHash cmn.HexBytes `json:"value"`
+	Version   int64        `json:"version"`
 }
 
-func (leaf proofLeafNode) Hash() []byte {
-	hasher := sha256truncated.New()
+func (pln proofLeafNode) String() string {
+	return pln.stringIndented("")
+}
+
+func (pln proofLeafNode) stringIndented(indent string) string {
+	return fmt.Sprintf(`proofLeafNode{
+%s  Key:       %v
+%s  ValueHash: %X
+%s  Version:   %v
+%s}`,
+		indent, pln.Key,
+		indent, pln.ValueHash,
+		indent, pln.Version,
+		indent)
+}
+
+func (pln proofLeafNode) Hash() []byte {
+	hasher := tmhash.New()
 	buf := new(bytes.Buffer)
 
 	err := amino.EncodeInt8(buf, 0)
 	if err == nil {
-		err = amino.EncodeInt64(buf, 1)
+		err = amino.EncodeVarint(buf, 1)
 	}
 	if err == nil {
-		err = amino.EncodeInt64(buf, leaf.Version)
+		err = amino.EncodeVarint(buf, pln.Version)
 	}
 	if err == nil {
-		err = amino.EncodeByteSlice(buf, leaf.KeyBytes)
+		err = amino.EncodeByteSlice(buf, pln.Key)
 	}
 	if err == nil {
-		err = amino.EncodeByteSlice(buf, leaf.ValueBytes)
+		err = amino.EncodeByteSlice(buf, pln.ValueHash)
 	}
 	if err != nil {
 		panic(fmt.Sprintf("Failed to hash proofLeafNode: %v", err))
@@ -103,157 +137,50 @@ func (leaf proofLeafNode) Hash() []byte {
 	return hasher.Sum(nil)
 }
 
-func (leaf proofLeafNode) isLesserThan(key []byte) bool {
-	return bytes.Compare(leaf.KeyBytes, key) == -1
-}
-
-func (leaf proofLeafNode) isGreaterThan(key []byte) bool {
-	return bytes.Compare(leaf.KeyBytes, key) == 1
-}
+//----------------------------------------
 
-func (node *Node) pathToInnerKey(t *Tree, key []byte) (*PathToKey, *Node, error) {
-	path := &PathToKey{}
-	val, err := node._pathToKey(t, key, false, path)
-	return path, val, err
+// If the key does not exist, returns the path to the next leaf left of key (w/
+// path), except when key is less than the least item, in which case it returns
+// a path to the least item.
+func (node *Node) PathToLeaf(t *Tree, key []byte) (PathToLeaf, *Node, error) {
+	path := new(PathToLeaf)
+	val, err := node.pathToLeaf(t, key, path)
+	return *path, val, err
 }
 
-func (node *Node) pathToKey(t *Tree, key []byte) (*PathToKey, *Node, error) {
-	path := &PathToKey{}
-	val, err := node._pathToKey(t, key, true, path)
-	return path, val, err
-}
-func (node *Node) _pathToKey(t *Tree, key []byte, skipInner bool, path *PathToKey) (*Node, error) {
+// pathToLeaf is a helper which recursively constructs the PathToLeaf.
+// As an optimization the already constructed path is passed in as an argument
+// and is shared among recursive calls.
+func (node *Node) pathToLeaf(t *Tree, key []byte, path *PathToLeaf) (*Node, error) {
 	if node.height == 0 {
 		if bytes.Equal(node.key, key) {
 			return node, nil
 		}
-		return nil, errors.New("key does not exist")
-	} else if !skipInner && bytes.Equal(node.key, key) {
-		return node, nil
+		return node, cmn.NewError("key does not exist")
 	}
 
 	if bytes.Compare(key, node.key) < 0 {
-		if n, err := node.getLeftNode(t)._pathToKey(t, key, skipInner, path); err != nil {
-			return nil, err
-		} else {
-			branch := proofInnerNode{
-				Height:  node.height,
-				Size:    node.size,
-				Version: node.version,
-				Left:    nil,
-				Right:   node.getRightNode(t).hash,
-			}
-			path.InnerNodes = append(path.InnerNodes, branch)
-			return n, nil
-		}
-	}
-
-	if n, err := node.getRightNode(t)._pathToKey(t, key, skipInner, path); err != nil {
-		return nil, err
-	} else {
-		branch := proofInnerNode{
+		// left side
+		pin := proofInnerNode{
 			Height:  node.height,
 			Size:    node.size,
 			Version: node.version,
-			Left:    node.getLeftNode(t).hash,
-			Right:   nil,
-		}
-		path.InnerNodes = append(path.InnerNodes, branch)
-		return n, nil
-	}
-}
-
-func (t *Tree) constructKeyAbsentProof(key []byte, proof *KeyAbsentProof) error {
-	// Get the index of the first key greater than the requested key, if the key doesn't exist.
-	idx, val := t.Get64(key)
-	if val != nil {
-		return errors.Errorf("couldn't construct non-existence proof: key 0x%x exists", key)
-	}
-
-	var (
-		lkey, lval []byte
-		rkey, rval []byte
-	)
-	if idx > 0 {
-		lkey, lval = t.GetByIndex64(idx - 1)
-	}
-	if idx <= t.Size64()-1 {
-		rkey, rval = t.GetByIndex64(idx)
-	}
-
-	if lkey == nil && rkey == nil {
-		return errors.New("couldn't get keys required for non-existence proof")
-	}
-
-	if lkey != nil {
-		path, node, _ := t.root.pathToKey(t, lkey)
-		proof.Left = &pathWithNode{
-			Path: path,
-			Node: proofLeafNode{lkey, lval, node.version},
-		}
-	}
-	if rkey != nil {
-		path, node, _ := t.root.pathToKey(t, rkey)
-		proof.Right = &pathWithNode{
-			Path: path,
-			Node: proofLeafNode{rkey, rval, node.version},
+			Left:    nil,
+			Right:   node.getRightNode(t).hash,
 		}
-	}
-
-	return nil
-}
-
-func (t *Tree) getWithProof(key []byte) (value []byte, proof *KeyExistsProof, err error) {
-	if t.root == nil {
-		return nil, nil, errors.WithStack(ErrNilRoot)
-	}
-	t.root.hashWithCount() // Ensure that all hashes are calculated.
-
-	path, node, err := t.root.pathToKey(t, key)
-	if err != nil {
-		return nil, nil, errors.Wrap(err, "could not construct path to key")
-	}
-
-	proof = &KeyExistsProof{
-		RootHash:  t.root.hash,
-		PathToKey: path,
-		Version:   node.version,
-	}
-	return node.value, proof, nil
-}
-
-func (t *Tree) getInnerWithProof(key []byte) (proof *InnerKeyProof, err error) {
-	if t.root == nil {
-		return nil, errors.WithStack(ErrNilRoot)
-	}
-	t.root.hashWithCount() // Ensure that all hashes are calculated.
-
-	path, node, err := t.root.pathToInnerKey(t, key)
-	if err != nil {
-		return nil, errors.Wrap(err, "could not construct path to key")
-	}
-
-	proof = &InnerKeyProof{
-		&KeyExistsProof{
-			RootHash:  t.root.hash,
-			PathToKey: path,
-			Version:   node.version,
-		},
-	}
-	return proof, nil
-}
-
-func (t *Tree) keyAbsentProof(key []byte) (*KeyAbsentProof, error) {
-	if t.root == nil {
-		return nil, errors.WithStack(ErrNilRoot)
-	}
-	t.root.hashWithCount() // Ensure that all hashes are calculated.
-
-	proof := &KeyAbsentProof{
-		RootHash: t.root.hash,
-	}
-	if err := t.constructKeyAbsentProof(key, proof); err != nil {
-		return nil, errors.Wrap(err, "could not construct proof of non-existence")
-	}
-	return proof, nil
+		*path = append(*path, pin)
+		n, err := node.getLeftNode(t).pathToLeaf(t, key, path)
+		return n, err
+	}
+	// right side
+	pin := proofInnerNode{
+		Height:  node.height,
+		Size:    node.size,
+		Version: node.version,
+		Left:    node.getLeftNode(t).hash,
+		Right:   nil,
+	}
+	*path = append(*path, pin)
+	n, err := node.getRightNode(t).pathToLeaf(t, key, path)
+	return n, err
 }
diff --git a/vendor/github.com/tendermint/iavl/proof_key.go b/vendor/github.com/tendermint/iavl/proof_key.go
deleted file mode 100644
index e9c90088..00000000
--- a/vendor/github.com/tendermint/iavl/proof_key.go
+++ /dev/null
@@ -1,161 +0,0 @@
-package iavl
-
-import (
-	"bytes"
-	"fmt"
-
-	"github.com/pkg/errors"
-	cmn "github.com/tendermint/tmlibs/common"
-)
-
-// KeyProof represents a proof of existence or absence of a single key.
-type KeyProof interface {
-	// Verify verfies the proof is valid. To verify absence,
-	// the value should be nil.
-	Verify(key, value, root []byte) error
-
-	// Root returns the root hash of the proof.
-	Root() []byte
-
-	// Serialize itself
-	Bytes() []byte
-}
-
-const (
-	// Used for serialization of proofs.
-	keyExistsMagicNumber = 0x50
-	keyAbsentMagicNumber = 0x51
-)
-
-// KeyExistsProof represents a proof of existence of a single key.
-type KeyExistsProof struct {
-	RootHash cmn.HexBytes `json:"root_hash"`
-	Version  int64        `json:"version"`
-
-	*PathToKey `json:"path"`
-}
-
-func (proof *KeyExistsProof) Root() []byte {
-	return proof.RootHash
-}
-
-// Verify verifies the proof is valid and returns an error if it isn't.
-func (proof *KeyExistsProof) Verify(key []byte, value []byte, root []byte) error {
-	if !bytes.Equal(proof.RootHash, root) {
-		return errors.WithStack(ErrInvalidRoot)
-	}
-	if key == nil || value == nil {
-		return errors.WithStack(ErrInvalidInputs)
-	}
-	return proof.PathToKey.verify(proofLeafNode{key, value, proof.Version}.Hash(), root)
-}
-
-// Bytes returns a go-amino binary serialization
-func (proof *KeyExistsProof) Bytes() []byte {
-	bz, err := cdc.MarshalBinary(proof)
-	if err != nil {
-		panic(fmt.Sprintf("error marshaling proof (%v): %v", proof, err))
-	}
-	return append([]byte{keyExistsMagicNumber}, bz...)
-}
-
-// readKeyExistsProof will deserialize a KeyExistsProof from bytes.
-func readKeyExistsProof(data []byte) (*KeyExistsProof, error) {
-	proof := new(KeyExistsProof)
-	err := cdc.UnmarshalBinary(data, proof)
-	return proof, err
-}
-
-///////////////////////////////////////////////////////////////////////////////
-
-// KeyAbsentProof represents a proof of the absence of a single key.
-type KeyAbsentProof struct {
-	RootHash cmn.HexBytes `json:"root_hash"`
-
-	Left  *pathWithNode `json:"left"`
-	Right *pathWithNode `json:"right"`
-}
-
-func (proof *KeyAbsentProof) Root() []byte {
-	return proof.RootHash
-}
-
-func (p *KeyAbsentProof) String() string {
-	return fmt.Sprintf("KeyAbsentProof\nroot=%s\nleft=%s%#v\nright=%s%#v\n", p.RootHash, p.Left.Path, p.Left.Node, p.Right.Path, p.Right.Node)
-}
-
-// Verify verifies the proof is valid and returns an error if it isn't.
-func (proof *KeyAbsentProof) Verify(key, value []byte, root []byte) error {
-	if !bytes.Equal(proof.RootHash, root) {
-		return errors.WithStack(ErrInvalidRoot)
-	}
-	if key == nil || value != nil {
-		return ErrInvalidInputs
-	}
-
-	if proof.Left == nil && proof.Right == nil {
-		return errors.WithStack(ErrInvalidProof)
-	}
-	if err := verifyPaths(proof.Left, proof.Right, key, key, root); err != nil {
-		return err
-	}
-
-	return verifyKeyAbsence(proof.Left, proof.Right)
-}
-
-// Bytes returns a go-wire binary serialization
-func (proof *KeyAbsentProof) Bytes() []byte {
-	bz, err := cdc.MarshalBinary(proof)
-	if err != nil {
-		panic(fmt.Sprintf("error marshaling proof (%v): %v", proof, err))
-	}
-	return append([]byte{keyAbsentMagicNumber}, bz...)
-}
-
-// readKeyAbsentProof will deserialize a KeyAbsentProof from bytes.
-func readKeyAbsentProof(data []byte) (*KeyAbsentProof, error) {
-	proof := new(KeyAbsentProof)
-	err := cdc.UnmarshalBinary(data, proof)
-	return proof, err
-}
-
-// ReadKeyProof reads a KeyProof from a byte-slice.
-func ReadKeyProof(data []byte) (KeyProof, error) {
-	if len(data) == 0 {
-		return nil, errors.New("proof bytes are empty")
-	}
-	b, val := data[0], data[1:]
-
-	switch b {
-	case keyExistsMagicNumber:
-		return readKeyExistsProof(val)
-	case keyAbsentMagicNumber:
-		return readKeyAbsentProof(val)
-	}
-	return nil, errors.New("unrecognized proof")
-}
-
-///////////////////////////////////////////////////////////////////////////////
-
-// InnerKeyProof represents a proof of existence of an inner node key.
-type InnerKeyProof struct {
-	*KeyExistsProof
-}
-
-// Verify verifies the proof is valid and returns an error if it isn't.
-func (proof *InnerKeyProof) Verify(hash []byte, value []byte, root []byte) error {
-	if !bytes.Equal(proof.RootHash, root) {
-		return errors.WithStack(ErrInvalidRoot)
-	}
-	if hash == nil || value != nil {
-		return errors.WithStack(ErrInvalidInputs)
-	}
-	return proof.PathToKey.verify(hash, root)
-}
-
-// ReadKeyInnerProof will deserialize a InnerKeyProof from bytes.
-func ReadInnerKeyProof(data []byte) (*InnerKeyProof, error) {
-	proof := new(InnerKeyProof)
-	err := cdc.UnmarshalBinary(data, proof)
-	return proof, err
-}
diff --git a/vendor/github.com/tendermint/iavl/proof_path.go b/vendor/github.com/tendermint/iavl/proof_path.go
new file mode 100644
index 00000000..de366f33
--- /dev/null
+++ b/vendor/github.com/tendermint/iavl/proof_path.go
@@ -0,0 +1,167 @@
+package iavl
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+
+	cmn "github.com/tendermint/tendermint/libs/common"
+)
+
+// pathWithLeaf is a path to a leaf node and the leaf node itself.
+type pathWithLeaf struct {
+	Path PathToLeaf    `json:"path"`
+	Leaf proofLeafNode `json:"leaf"`
+}
+
+func (pwl pathWithLeaf) String() string {
+	return pwl.StringIndented("")
+}
+
+func (pwl pathWithLeaf) StringIndented(indent string) string {
+	return fmt.Sprintf(`pathWithLeaf{
+%s  Path: %v
+%s  Leaf: %v
+%s}`,
+		indent, pwl.Path.stringIndented(indent+"  "),
+		indent, pwl.Leaf.stringIndented(indent+"  "),
+		indent)
+}
+
+// `verify` checks that the leaf node's hash + the inner nodes merkle-izes to
+// the given root. If it returns an error, it means the leafHash or the
+// PathToLeaf is incorrect.
+func (pwl pathWithLeaf) verify(root []byte) cmn.Error {
+	leafHash := pwl.Leaf.Hash()
+	return pwl.Path.verify(leafHash, root)
+}
+
+// `computeRootHash` computes the root hash with leaf node.
+// Does not verify the root hash.
+func (pwl pathWithLeaf) computeRootHash() []byte {
+	leafHash := pwl.Leaf.Hash()
+	return pwl.Path.computeRootHash(leafHash)
+}
+
+//----------------------------------------
+
+// PathToLeaf represents an inner path to a leaf node.
+// Note that the nodes are ordered such that the last one is closest
+// to the root of the tree.
+type PathToLeaf []proofInnerNode
+
+func (pl PathToLeaf) String() string {
+	return pl.stringIndented("")
+}
+
+func (pl PathToLeaf) stringIndented(indent string) string {
+	if len(pl) == 0 {
+		return "empty-PathToLeaf"
+	}
+	strs := make([]string, len(pl))
+	for i, pin := range pl {
+		if i == 20 {
+			strs[i] = fmt.Sprintf("... (%v total)", len(pl))
+			break
+		}
+		strs[i] = fmt.Sprintf("%v:%v", i, pin.stringIndented(indent+"  "))
+	}
+	return fmt.Sprintf(`PathToLeaf{
+%s  %v
+%s}`,
+		indent, strings.Join(strs, "\n"+indent+"  "),
+		indent)
+}
+
+// `verify` checks that the leaf node's hash + the inner nodes merkle-izes to
+// the given root. If it returns an error, it means the leafHash or the
+// PathToLeaf is incorrect.
+func (pl PathToLeaf) verify(leafHash []byte, root []byte) cmn.Error {
+	hash := leafHash
+	for i := len(pl) - 1; i >= 0; i-- {
+		pin := pl[i]
+		hash = pin.Hash(hash)
+	}
+	if !bytes.Equal(root, hash) {
+		return cmn.ErrorWrap(ErrInvalidProof, "")
+	}
+	return nil
+}
+
+// `computeRootHash` computes the root hash assuming some leaf hash.
+// Does not verify the root hash.
+func (pl PathToLeaf) computeRootHash(leafHash []byte) []byte {
+	hash := leafHash
+	for i := len(pl) - 1; i >= 0; i-- {
+		pin := pl[i]
+		hash = pin.Hash(hash)
+	}
+	return hash
+}
+
+func (pl PathToLeaf) isLeftmost() bool {
+	for _, node := range pl {
+		if len(node.Left) > 0 {
+			return false
+		}
+	}
+	return true
+}
+
+func (pl PathToLeaf) isRightmost() bool {
+	for _, node := range pl {
+		if len(node.Right) > 0 {
+			return false
+		}
+	}
+	return true
+}
+
+func (pl PathToLeaf) isEmpty() bool {
+	return pl == nil || len(pl) == 0
+}
+
+func (pl PathToLeaf) dropRoot() PathToLeaf {
+	if pl.isEmpty() {
+		return pl
+	}
+	return PathToLeaf(pl[:len(pl)-1])
+}
+
+func (pl PathToLeaf) hasCommonRoot(pl2 PathToLeaf) bool {
+	if pl.isEmpty() || pl2.isEmpty() {
+		return false
+	}
+	leftEnd := pl[len(pl)-1]
+	rightEnd := pl2[len(pl2)-1]
+
+	return bytes.Equal(leftEnd.Left, rightEnd.Left) &&
+		bytes.Equal(leftEnd.Right, rightEnd.Right)
+}
+
+func (pl PathToLeaf) isLeftAdjacentTo(pl2 PathToLeaf) bool {
+	for pl.hasCommonRoot(pl2) {
+		pl, pl2 = pl.dropRoot(), pl2.dropRoot()
+	}
+	pl, pl2 = pl.dropRoot(), pl2.dropRoot()
+
+	return pl.isRightmost() && pl2.isLeftmost()
+}
+
+// returns -1 if invalid.
+func (pl PathToLeaf) Index() (idx int64) {
+	for i, node := range pl {
+		if node.Left == nil {
+			continue
+		} else if node.Right == nil {
+			if i < len(pl)-1 {
+				idx += node.Size - pl[i+1].Size
+			} else {
+				idx += node.Size - 1
+			}
+		} else {
+			return -1
+		}
+	}
+	return idx
+}
diff --git a/vendor/github.com/tendermint/iavl/proof_range.go b/vendor/github.com/tendermint/iavl/proof_range.go
index 40374055..cc12618f 100644
--- a/vendor/github.com/tendermint/iavl/proof_range.go
+++ b/vendor/github.com/tendermint/iavl/proof_range.go
@@ -3,445 +3,490 @@ package iavl
 import (
 	"bytes"
 	"fmt"
+	"sort"
+	"strings"
 
-	"github.com/pkg/errors"
-	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/crypto/tmhash"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
-// KeyInRangeProof is an interface which covers both first-in-range and last-in-range proofs.
-type KeyInRangeProof interface {
-	Verify(startKey, endKey, key, value, root []byte) error
-}
-
-// KeyFirstInRangeProof is a proof that a given key is the first in a given range.
-type KeyFirstInRangeProof struct {
-	KeyExistsProof `json:"key_proof"`
+type RangeProof struct {
+	// You don't need the right path because
+	// it can be derived from what we have.
+	LeftPath   PathToLeaf      `json:"left_path"`
+	InnerNodes []PathToLeaf    `json:"inner_nodes"`
+	Leaves     []proofLeafNode `json:"leaves"`
 
-	Left  *pathWithNode `json:"left"`
-	Right *pathWithNode `json:"right"`
-}
+	// memoize
+	rootVerified bool
+	rootHash     []byte // valid iff rootVerified is true
+	treeEnd      bool   // valid iff rootVerified is true
 
-// String returns a string representation of the proof.
-func (proof *KeyFirstInRangeProof) String() string {
-	return fmt.Sprintf("%#v", proof)
 }
 
-// Verify that the first in range proof is valid.
-func (proof *KeyFirstInRangeProof) Verify(startKey, endKey, key, value []byte, root []byte) error {
-	if key != nil {
-		inputsOutOfRange := bytes.Compare(key, startKey) == -1 || bytes.Compare(key, endKey) == 1
-		if inputsOutOfRange {
-			return ErrInvalidInputs
-		}
-	}
-	if proof.Left == nil && proof.Right == nil && proof.PathToKey == nil {
-		return errors.WithStack(ErrInvalidProof)
-	}
-	if err := verifyPaths(proof.Left, proof.Right, startKey, endKey, root); err != nil {
-		return err
-	}
-	if proof.PathToKey == nil {
-		// If we don't have an existing key, we effectively have a proof of absence.
-		return verifyKeyAbsence(proof.Left, proof.Right)
-	}
-
-	if err := proof.KeyExistsProof.Verify(key, value, root); err != nil {
-		return errors.Wrap(err, "failed to verify key exists proof")
-	}
-	// If the key returned is equal to our start key, and we've verified
-	// that it exists, there's nothing else to check.
-	if bytes.Equal(key, startKey) {
+// Keys returns all the keys in the RangeProof.  NOTE: The keys here may
+// include more keys than provided by tree.GetRangeWithProof or
+// VersionedTree.GetVersionedRangeWithProof.  The keys returned there are only
+// in the provided [startKey,endKey){limit} range.  The keys returned here may
+// include extra keys, such as:
+// - the key before startKey if startKey is provided and doesn't exist;
+// - the key after a queried key with tree.GetWithProof, when the key is absent.
+func (proof *RangeProof) Keys() (keys [][]byte) {
+	if proof == nil {
 		return nil
 	}
-	// If the key returned is the smallest in the tree, then it must be
-	// the smallest in the given range too.
-	if proof.PathToKey.isLeftmost() {
-		return nil
-	}
-	// The start key is in between the left path and the key returned,
-	// and the paths are adjacent. Therefore there is nothing between
-	// the key returned and the start key.
-	if proof.Left != nil && proof.Left.Path.isLeftAdjacentTo(proof.PathToKey) {
-		return nil
+	for _, leaf := range proof.Leaves {
+		keys = append(keys, leaf.Key)
 	}
-	return errors.WithStack(ErrInvalidProof)
+	return keys
 }
 
-///////////////////////////////////////////////////////////////////////////////
-
-// KeyLastInRangeProof is a proof that a given key is the last in a given range.
-type KeyLastInRangeProof struct {
-	KeyExistsProof `json:"key_proof"`
-
-	Left  *pathWithNode `json:"left"`
-	Right *pathWithNode `json:"right"`
+// String returns a string representation of the proof.
+func (proof *RangeProof) String() string {
+	if proof == nil {
+		return "<nil-RangeProof>"
+	}
+	return proof.StringIndented("")
 }
 
-// String returns a string representation of the proof.
-func (proof *KeyLastInRangeProof) String() string {
-	// TODO(cloudhead): Needs work.
-	return fmt.Sprintf("%#v", proof)
+func (proof *RangeProof) StringIndented(indent string) string {
+	istrs := make([]string, 0, len(proof.InnerNodes))
+	for _, ptl := range proof.InnerNodes {
+		istrs = append(istrs, ptl.stringIndented(indent+"    "))
+	}
+	lstrs := make([]string, 0, len(proof.Leaves))
+	for _, leaf := range proof.Leaves {
+		lstrs = append(lstrs, leaf.stringIndented(indent+"    "))
+	}
+	return fmt.Sprintf(`RangeProof{
+%s  LeftPath: %v
+%s  InnerNodes:
+%s    %v
+%s  Leaves:
+%s    %v
+%s  (rootVerified): %v
+%s  (rootHash): %X
+%s  (treeEnd): %v
+%s}`,
+		indent, proof.LeftPath.stringIndented(indent+"  "),
+		indent,
+		indent, strings.Join(istrs, "\n"+indent+"    "),
+		indent,
+		indent, strings.Join(lstrs, "\n"+indent+"    "),
+		indent, proof.rootVerified,
+		indent, proof.rootHash,
+		indent, proof.treeEnd,
+		indent)
 }
 
-// Verify that the last in range proof is valid.
-func (proof *KeyLastInRangeProof) Verify(startKey, endKey, key, value []byte, root []byte) error {
-	if key != nil && (bytes.Compare(key, startKey) == -1 || bytes.Compare(key, endKey) == 1) {
-		return ErrInvalidInputs
-	}
-	if proof.Left == nil && proof.Right == nil && proof.PathToKey == nil {
-		return errors.WithStack(ErrInvalidProof)
-	}
-	if err := verifyPaths(proof.Left, proof.Right, startKey, endKey, root); err != nil {
-		return err
-	}
-	if proof.PathToKey == nil {
-		// If we don't have an existing key, we effectively have a proof of absence.
-		return verifyKeyAbsence(proof.Left, proof.Right)
+// The index of the first leaf (of the whole tree).
+// Returns -1 if the proof is nil.
+func (proof *RangeProof) LeftIndex() int64 {
+	if proof == nil {
+		return -1
 	}
+	return proof.LeftPath.Index()
+}
 
-	if err := proof.KeyExistsProof.Verify(key, value, root); err != nil {
-		return err
+// Also see LeftIndex().
+// Verify that a key has some value.
+// Does not assume that the proof itself is valid, call Verify() first.
+func (proof *RangeProof) VerifyItem(key, value []byte) error {
+	leaves := proof.Leaves
+	if proof == nil {
+		return cmn.ErrorWrap(ErrInvalidProof, "proof is nil")
 	}
-	if bytes.Equal(key, endKey) {
-		return nil
+	if !proof.rootVerified {
+		return cmn.NewError("must call Verify(root) first.")
 	}
-	if proof.PathToKey.isRightmost() {
-		return nil
+	i := sort.Search(len(leaves), func(i int) bool {
+		return bytes.Compare(key, leaves[i].Key) <= 0
+	})
+	if i >= len(leaves) || !bytes.Equal(leaves[i].Key, key) {
+		return cmn.ErrorWrap(ErrInvalidProof, "leaf key not found in proof")
 	}
-	if proof.Right != nil &&
-		proof.PathToKey.isLeftAdjacentTo(proof.Right.Path) {
-		return nil
+	valueHash := tmhash.Sum(value)
+	if !bytes.Equal(leaves[i].ValueHash, valueHash) {
+		return cmn.ErrorWrap(ErrInvalidProof, "leaf value hash not same")
 	}
-
-	return errors.WithStack(ErrInvalidProof)
-}
-
-///////////////////////////////////////////////////////////////////////////////
-
-// KeyRangeProof is proof that a range of keys does or does not exist.
-type KeyRangeProof struct {
-	RootHash   cmn.HexBytes `json:"root_hash"`
-	Versions   []int64      `json:"versions"`
-	PathToKeys []*PathToKey `json:"paths"`
-
-	Left  *pathWithNode `json:"left"`
-	Right *pathWithNode `json:"right"`
+	return nil
 }
 
-// Verify that a range proof is valid.
-//
-// This method expects the same parameters passed to query the range.
-func (proof *KeyRangeProof) Verify(
-	startKey, endKey []byte, limit int, keys, values [][]byte, root []byte,
-) error {
-	if len(proof.PathToKeys) != len(keys) || len(values) != len(keys) || len(proof.Versions) != len(keys) {
-		return errors.WithStack(ErrInvalidInputs)
-	}
-	if limit > 0 && len(keys) > limit {
-		return errors.WithStack(ErrInvalidInputs)
+// Verify that proof is valid absence proof for key.
+// Does not assume that the proof itself is valid.
+// For that, use Verify(root).
+func (proof *RangeProof) VerifyAbsence(key []byte) error {
+	if proof == nil {
+		return cmn.ErrorWrap(ErrInvalidProof, "proof is nil")
 	}
-
-	// If startKey > endKey, reverse the keys and values, since our proofs are
-	// always in ascending order.
-	ascending := bytes.Compare(startKey, endKey) == -1
-	if !ascending {
-		startKey, endKey, keys, values = reverseKeys(startKey, endKey, keys, values)
+	if !proof.rootVerified {
+		return cmn.NewError("must call Verify(root) first.")
 	}
-
-	// If the range is empty, we just have to check the left and right paths.
-	if len(keys) == 0 {
-		if err := verifyKeyAbsence(proof.Left, proof.Right); err != nil {
-			return err
+	cmp := bytes.Compare(key, proof.Leaves[0].Key)
+	if cmp < 0 {
+		if proof.LeftPath.isLeftmost() {
+			return nil
 		}
-		return verifyPaths(proof.Left, proof.Right, startKey, endKey, root)
+		return cmn.NewError("absence not proved by left path")
+	} else if cmp == 0 {
+		return cmn.NewError("absence disproved via first item #0")
 	}
-
-	// If we hit the limit, one of the two ends doesn't have to match the
-	// limits of the query, so we adjust the range to match the limit we found.
-	if limit > 0 && len(keys) == limit {
-		if ascending {
-			endKey = keys[len(keys)-1]
-		} else {
-			startKey = keys[0]
-		}
+	if len(proof.LeftPath) == 0 {
+		return nil // proof ok
 	}
-	// Now we know Left < startKey <= endKey < Right.
-	if err := verifyPaths(proof.Left, proof.Right, startKey, endKey, root); err != nil {
-		return err
+	if proof.LeftPath.isRightmost() {
+		return nil
 	}
 
-	if err := verifyNoMissingKeys(proof.paths()); err != nil {
-		return errors.WithStack(err)
-	}
+	// See if any of the leaves are greater than key.
+	for i := 1; i < len(proof.Leaves); i++ {
+		leaf := proof.Leaves[i]
+		cmp := bytes.Compare(key, leaf.Key)
+		if cmp < 0 {
+			return nil // proof ok
+		} else if cmp == 0 {
+			return cmn.NewError("absence disproved via item #%v", i)
+		} else {
+			if i == len(proof.Leaves)-1 {
+				// If last item, check whether
+				// it's the last item in the tree.
 
-	// If we've reached this point, it means our range isn't empty, and we have
-	// a list of keys.
-	for i, path := range proof.PathToKeys {
-		leafNode := proofLeafNode{
-			KeyBytes:   keys[i],
-			ValueBytes: values[i],
-			Version:    proof.Versions[i],
-		}
-		if err := path.verify(leafNode.Hash(), root); err != nil {
-			return errors.WithStack(err)
+			}
+			continue
 		}
 	}
 
-	// In the case of a descending range, if the left proof is nil and the
-	// limit wasn't reached, we have to verify that we're not missing any
-	// keys. Basically, if a key to the left is missing because we've
-	// reached the limit, then it's fine. But if the key count is smaller
-	// than the limit, we need a left proof to make sure no keys are
-	// missing.
-	if proof.Left == nil &&
-		!bytes.Equal(startKey, keys[0]) &&
-		!proof.PathToKeys[0].isLeftmost() {
-		return errors.WithStack(ErrInvalidProof)
+	// It's still a valid proof if our last leaf is the rightmost child.
+	if proof.treeEnd {
+		return nil // OK!
 	}
 
-	if proof.Right == nil &&
-		!bytes.Equal(endKey, keys[len(keys)-1]) &&
-		!proof.PathToKeys[len(proof.PathToKeys)-1].isRightmost() {
-		return errors.WithStack(ErrInvalidProof)
+	// It's not a valid absence proof.
+	if len(proof.Leaves) < 2 {
+		return cmn.NewError("absence not proved by right leaf (need another leaf?)")
 	}
-	return nil
+	return cmn.NewError("absence not proved by right leaf")
 }
 
-func (proof *KeyRangeProof) String() string {
-	// TODO(cloudhead): Needs work.
-	return fmt.Sprintf("%#v", proof)
+// Verify that proof is valid.
+func (proof *RangeProof) Verify(root []byte) error {
+	if proof == nil {
+		return cmn.ErrorWrap(ErrInvalidProof, "proof is nil")
+	}
+	err := proof.verify(root)
+	return err
 }
 
-// Returns a list of all paths, in order, with the proof's Left and Right
-// paths preprended and appended respectively, if they exist.
-func (proof *KeyRangeProof) paths() []*PathToKey {
-	paths := proof.PathToKeys[:]
-	if proof.Left != nil {
-		paths = append([]*PathToKey{proof.Left.Path}, paths...)
+func (proof *RangeProof) verify(root []byte) error {
+	rootHash := proof.rootHash
+	if rootHash == nil {
+		derivedHash, err := proof.computeRootHash()
+		if err != nil {
+			return err
+		}
+		rootHash = derivedHash
 	}
-	if proof.Right != nil {
-		paths = append(paths, proof.Right.Path)
+	if !bytes.Equal(rootHash, root) {
+		return cmn.ErrorWrap(ErrInvalidRoot, "root hash doesn't match")
 	}
-	return paths
+	proof.rootVerified = true
+	return nil
 }
 
-///////////////////////////////////////////////////////////////////////////////
-
-func (t *Tree) getRangeWithProof(keyStart, keyEnd []byte, limit int) (
-	keys, values [][]byte, rangeProof *KeyRangeProof, err error,
-) {
-	if t.root == nil {
-		return nil, nil, nil, ErrNilRoot
+// ComputeRootHash computes the root hash with leaves.
+// Returns nil if error or proof is nil.
+// Does not verify the root hash.
+func (proof *RangeProof) ComputeRootHash() []byte {
+	if proof == nil {
+		return nil
 	}
-	t.root.hashWithCount() // Ensure that all hashes are calculated.
+	rootHash, _ := proof.computeRootHash()
+	return rootHash
+}
 
-	rangeProof = &KeyRangeProof{RootHash: t.root.hash}
-	rangeStart, rangeEnd := keyStart, keyEnd
-	ascending := bytes.Compare(keyStart, keyEnd) == -1
-	if !ascending {
-		rangeStart, rangeEnd = rangeEnd, rangeStart
+func (proof *RangeProof) computeRootHash() (rootHash []byte, err error) {
+	rootHash, treeEnd, err := proof._computeRootHash()
+	if err == nil {
+		proof.rootHash = rootHash // memoize
+		proof.treeEnd = treeEnd   // memoize
 	}
+	return rootHash, err
+}
 
-	versions := []int64{}
-	limited := t.IterateRangeInclusive(rangeStart, rangeEnd, ascending, func(k, v []byte, version int64) bool {
-		keys = append(keys, k)
-		values = append(values, v)
-		versions = append(versions, version)
-		return len(keys) == limit
-	})
-
-	// Construct the paths such that they are always in ascending order.
-	rangeProof.PathToKeys = make([]*PathToKey, len(keys))
-	rangeProof.Versions = make([]int64, len(keys))
-	for i, k := range keys {
-		path, _, _ := t.root.pathToKey(t, k)
-		if ascending {
-			rangeProof.PathToKeys[i] = path
-			rangeProof.Versions[i] = versions[i]
-		} else {
-			rangeProof.PathToKeys[len(keys)-i-1] = path
-			rangeProof.Versions[len(keys)-i-1] = versions[i]
-		}
+func (proof *RangeProof) _computeRootHash() (rootHash []byte, treeEnd bool, err error) {
+	if len(proof.Leaves) == 0 {
+		return nil, false, cmn.ErrorWrap(ErrInvalidProof, "no leaves")
+	}
+	if len(proof.InnerNodes)+1 != len(proof.Leaves) {
+		return nil, false, cmn.ErrorWrap(ErrInvalidProof, "InnerNodes vs Leaves length mismatch, leaves should be 1 more.")
 	}
 
-	//
-	// Figure out which of the left or right paths we need.
-	//
-	var needsLeft, needsRight bool
+	// Start from the left path and prove each leaf.
 
-	if len(keys) == 0 {
-		needsLeft, needsRight = true, true
-	} else {
-		first, last := 0, len(keys)-1
-		if !ascending {
-			first, last = last, first
-		}
+	// shared across recursive calls
+	var leaves = proof.Leaves
+	var innersq = proof.InnerNodes
+	var COMPUTEHASH func(path PathToLeaf, rightmost bool) (hash []byte, treeEnd bool, done bool, err error)
 
-		needsLeft = !bytes.Equal(keys[first], rangeStart)
-		needsRight = !bytes.Equal(keys[last], rangeEnd)
+	// rightmost: is the root a rightmost child of the tree?
+	// treeEnd: true iff the last leaf is the last item of the tree.
+	// Returns the (possibly intermediate, possibly root) hash.
+	COMPUTEHASH = func(path PathToLeaf, rightmost bool) (hash []byte, treeEnd bool, done bool, err error) {
 
-		// When limited, we can relax the right or left side, depending on
-		// the direction of the range.
-		if limited {
-			if ascending {
-				needsRight = false
-			} else {
-				needsLeft = false
-			}
-		}
-	}
+		// Pop next leaf.
+		nleaf, rleaves := leaves[0], leaves[1:]
+		leaves = rleaves
 
-	// So far, we've created proofs of the keys which are within the provided range.
-	// Next, we need to create a proof that we haven't omitted any keys to the left
-	// or right of that range. This is relevant in two scenarios:
-	//
-	// 1. There are no keys in the range. In this case, include a proof of the key
-	//    to the left and right of that empty range.
-	// 2. The start or end key do not match the start and end of the keys returned.
-	//    In this case, include proofs of the keys immediately outside of those returned.
-	//
-	if needsLeft {
-		// Find index of first key to the left, and include proof if it isn't the
-		// leftmost key.
-		if idx, _ := t.Get64(rangeStart); idx > 0 {
-			lkey, lval := t.GetByIndex64(idx - 1)
-			path, node, _ := t.root.pathToKey(t, lkey)
-			rangeProof.Left = &pathWithNode{
-				Path: path,
-				Node: proofLeafNode{lkey, lval, node.version},
-			}
-		}
-	}
+		// Compute hash.
+		hash = (pathWithLeaf{
+			Path: path,
+			Leaf: nleaf,
+		}).computeRootHash()
 
-	// Proof that the last key is the last value before keyEnd, or that we're limited.
-	// If len(keys) == limit, it doesn't matter that a key exists to the right of the
-	// last key, since we aren't interested in it.
-	if needsRight {
-		// Find index of first key to the right, and include proof if it isn't the
-		// rightmost key.
-		if idx, _ := t.Get64(rangeEnd); idx <= t.Size64()-1 {
-			rkey, rval := t.GetByIndex64(idx)
-			path, node, _ := t.root.pathToKey(t, rkey)
-			rangeProof.Right = &pathWithNode{
-				Path: path,
-				Node: proofLeafNode{rkey, rval, node.version},
-			}
+		// If we don't have any leaves left, we're done.
+		if len(leaves) == 0 {
+			rightmost = rightmost && path.isRightmost()
+			return hash, rightmost, true, nil
 		}
-	}
-
-	return keys, values, rangeProof, nil
-}
 
-func (t *Tree) getFirstInRangeWithProof(keyStart, keyEnd []byte) (
-	key, value []byte, proof *KeyFirstInRangeProof, err error,
-) {
-	if t.root == nil {
-		return nil, nil, nil, ErrNilRoot
-	}
-	t.root.hashWithCount() // Ensure that all hashes are calculated.
-	proof = &KeyFirstInRangeProof{}
-	proof.RootHash = t.root.hash
-	proof.Version = t.root.version
-
-	// Get the first value in the range.
-	t.IterateRangeInclusive(keyStart, keyEnd, true, func(k, v []byte, _ int64) bool {
-		key, value = k, v
-		return true
-	})
+		// Prove along path (until we run out of leaves).
+		for len(path) > 0 {
+
+			// Drop the leaf-most (last-most) inner nodes from path
+			// until we encounter one with a left hash.
+			// We assume that the left side is already verified.
+			// rpath: rest of path
+			// lpath: last path item
+			rpath, lpath := path[:len(path)-1], path[len(path)-1]
+			path = rpath
+			if len(lpath.Right) == 0 {
+				continue
+			}
 
-	if len(key) > 0 {
-		proof.PathToKey, _, _ = t.root.pathToKey(t, key)
-	}
+			// Pop next inners, a PathToLeaf (e.g. []proofInnerNode).
+			inners, rinnersq := innersq[0], innersq[1:]
+			innersq = rinnersq
 
-	if !bytes.Equal(key, keyStart) {
-		if idx, _ := t.Get64(keyStart); idx-1 >= 0 && idx-1 <= t.Size64()-1 {
-			k, v := t.GetByIndex64(idx - 1)
-			path, node, _ := t.root.pathToKey(t, k)
-			proof.Left = &pathWithNode{
-				Path: path,
-				Node: proofLeafNode{k, v, node.version},
+			// Recursively verify inners against remaining leaves.
+			derivedRoot, treeEnd, done, err := COMPUTEHASH(inners, rightmost && rpath.isRightmost())
+			if err != nil {
+				return nil, treeEnd, false, cmn.ErrorWrap(err, "recursive COMPUTEHASH call")
+			}
+			if !bytes.Equal(derivedRoot, lpath.Right) {
+				return nil, treeEnd, false, cmn.ErrorWrap(ErrInvalidRoot, "intermediate root hash %X doesn't match, got %X", lpath.Right, derivedRoot)
+			}
+			if done {
+				return hash, treeEnd, true, nil
 			}
 		}
+
+		// We're not done yet (leaves left over). No error, not done either.
+		// Technically if rightmost, we know there's an error "left over leaves
+		// -- malformed proof", but we return that at the top level, below.
+		return hash, false, false, nil
 	}
 
-	if !bytes.Equal(key, keyEnd) {
-		if idx, val := t.Get64(keyEnd); idx <= t.Size64()-1 && val == nil {
-			k, v := t.GetByIndex64(idx)
-			path, node, _ := t.root.pathToKey(t, k)
-			proof.Right = &pathWithNode{
-				Path: path,
-				Node: proofLeafNode{k, v, node.version},
-			}
-		}
+	// Verify!
+	path := proof.LeftPath
+	rootHash, treeEnd, done, err := COMPUTEHASH(path, true)
+	if err != nil {
+		return nil, treeEnd, cmn.ErrorWrap(err, "root COMPUTEHASH call")
+	} else if !done {
+		return nil, treeEnd, cmn.ErrorWrap(ErrInvalidProof, "left over leaves -- malformed proof")
 	}
 
-	return key, value, proof, nil
+	// Ok!
+	return rootHash, treeEnd, nil
 }
 
-func (t *Tree) getLastInRangeWithProof(keyStart, keyEnd []byte) (
-	key, value []byte, proof *KeyLastInRangeProof, err error,
-) {
+///////////////////////////////////////////////////////////////////////////////
+
+// keyStart is inclusive and keyEnd is exclusive.
+// Returns the range-proof and the included keys and values.
+// If keyStart or keyEnd don't exist, the leaf before keyStart
+// or after keyEnd will also be included, but not be included in values.
+// If keyEnd-1 exists, no later leaves will be included.
+// If keyStart >= keyEnd and both not nil, panics.
+// Limit is never exceeded.
+func (t *Tree) getRangeProof(keyStart, keyEnd []byte, limit int) (*RangeProof, [][]byte, [][]byte, error) {
+	if keyStart != nil && keyEnd != nil && bytes.Compare(keyStart, keyEnd) >= 0 {
+		panic("if keyStart and keyEnd are present, need keyStart < keyEnd.")
+	}
+	if limit < 0 {
+		panic("limit must be greater or equal to 0 -- 0 means no limit")
+	}
 	if t.root == nil {
-		return nil, nil, nil, ErrNilRoot
+		return nil, nil, nil, cmn.ErrorWrap(ErrNilRoot, "")
 	}
 	t.root.hashWithCount() // Ensure that all hashes are calculated.
 
-	proof = &KeyLastInRangeProof{}
-	proof.RootHash = t.root.hash
-	proof.Version = t.root.version
+	// Get the first key/value pair proof, which provides us with the left key.
+	path, left, err := t.root.PathToLeaf(t, keyStart)
+	if err != nil {
+		// Key doesn't exist, but instead we got the prev leaf (or the
+		// first or last leaf), which provides proof of absence).
+		// err = nil isn't necessary as we do not use it in the returns below
+	}
+	startOK := keyStart == nil || bytes.Compare(keyStart, left.key) <= 0
+	endOK := keyEnd == nil || bytes.Compare(left.key, keyEnd) < 0
+	// If left.key is in range, add it to key/values.
+	var keys, values [][]byte
+	if startOK && endOK {
+		keys = append(keys, left.key) // == keyStart
+		values = append(values, left.value)
+	}
+	// Either way, add to proof leaves.
+	var leaves = []proofLeafNode{{
+		Key:       left.key,
+		ValueHash: tmhash.Sum(left.value),
+		Version:   left.version,
+	}}
+
+	// 1: Special case if limit is 1.
+	// 2: Special case if keyEnd is left.key+1.
+	_stop := false
+	if limit == 1 {
+		_stop = true // case 1
+	} else if keyEnd != nil && bytes.Compare(cpIncr(left.key), keyEnd) >= 0 {
+		_stop = true // case 2
+	}
+	if _stop {
+		return &RangeProof{
+			LeftPath: path,
+			Leaves:   leaves,
+		}, keys, values, nil
+	}
+
+	// Get the key after left.key to iterate from.
+	afterLeft := cpIncr(left.key)
+
+	// Traverse starting from afterLeft, until keyEnd or the next leaf
+	// after keyEnd.
+	// nolint
+	var innersq = []PathToLeaf(nil)
+	var inners = PathToLeaf(nil)
+	var lastDepth uint8 = 0
+	var leafCount = 1 // from left above.
+	var pathCount = 0
+	// var keys, values [][]byte defined as function outs.
+
+	t.root.traverseInRange(t, afterLeft, nil, true, false, 0,
+		func(node *Node, depth uint8) (stop bool) {
+
+			// Track when we diverge from path, or when we've exhausted path,
+			// since the first innersq shouldn't include it.
+			if pathCount != -1 {
+				if len(path) <= pathCount {
+					// We're done with path counting.
+					pathCount = -1
+				} else {
+					pn := path[pathCount]
+					if pn.Height != node.height ||
+						pn.Left != nil && !bytes.Equal(pn.Left, node.leftHash) ||
+						pn.Right != nil && !bytes.Equal(pn.Right, node.rightHash) {
+
+						// We've diverged, so start appending to inners.
+						pathCount--
+					} else {
+						pathCount++
+					}
+				}
+			}
 
-	// Get the last value in the range.
-	t.IterateRangeInclusive(keyStart, keyEnd, false, func(k, v []byte, _ int64) bool {
-		key, value = k, v
-		return true
-	})
+			if node.height == 0 {
+				// Leaf node.
+				// Append inners to innersq.
+				innersq = append(innersq, inners)
+				inners = PathToLeaf(nil)
+				// Append leaf to leaves.
+				leaves = append(leaves, proofLeafNode{
+					Key:       node.key,
+					ValueHash: tmhash.Sum(node.value),
+					Version:   node.version,
+				})
+				leafCount++
+				// Maybe terminate because we found enough leaves.
+				if limit > 0 && limit <= leafCount {
+					return true
+				}
+				// Terminate if we've found keyEnd or after.
+				if keyEnd != nil && bytes.Compare(node.key, keyEnd) >= 0 {
+					return true
+				}
+				// Value is in range, append to keys and values.
+				keys = append(keys, node.key)
+				values = append(values, node.value)
+				// Terminate if we've found keyEnd-1 or after.
+				// We don't want to fetch any leaves for it.
+				if keyEnd != nil && bytes.Compare(cpIncr(node.key), keyEnd) >= 0 {
+					return true
+				}
+			} else {
+				// Inner node.
+				if pathCount >= 0 {
+					// Skip redundant path items.
+				} else {
+					inners = append(inners, proofInnerNode{
+						Height:  node.height,
+						Size:    node.size,
+						Version: node.version,
+						Left:    nil, // left is nil for range proof inners
+						Right:   node.rightHash,
+					})
+				}
+			}
+			lastDepth = depth
+			return false
+		},
+	)
+
+	return &RangeProof{
+		LeftPath:   path,
+		InnerNodes: innersq,
+		Leaves:     leaves,
+	}, keys, values, nil
+}
 
-	if len(key) > 0 {
-		proof.PathToKey, _, _ = t.root.pathToKey(t, key)
-	}
+//----------------------------------------
 
-	if !bytes.Equal(key, keyEnd) {
-		if idx, _ := t.Get64(keyEnd); idx <= t.Size64()-1 {
-			k, v := t.GetByIndex64(idx)
-			path, node, _ := t.root.pathToKey(t, k)
-			proof.Right = &pathWithNode{
-				Path: path,
-				Node: proofLeafNode{k, v, node.version},
+// GetWithProof gets the value under the key if it exists, or returns nil.
+// A proof of existence or absence is returned alongside the value.
+func (t *Tree) GetWithProof(key []byte) (value []byte, proof *RangeProof, err error) {
+	proof, _, values, err := t.getRangeProof(key, cpIncr(key), 2)
+	if err == nil {
+		if len(values) > 0 {
+			if !bytes.Equal(proof.Leaves[0].Key, key) {
+				return nil, proof, nil
 			}
+			return values[0], proof, nil
 		}
+		return nil, proof, nil
 	}
+	return nil, nil, cmn.ErrorWrap(err, "could not construct any proof")
+}
 
-	if !bytes.Equal(key, keyStart) {
-		if idx, _ := t.Get64(keyStart); idx-1 >= 0 && idx-1 <= t.Size64()-1 {
-			k, v := t.GetByIndex64(idx - 1)
-			path, node, _ := t.root.pathToKey(t, k)
-			proof.Left = &pathWithNode{
-				Path: path,
-				Node: proofLeafNode{k, v, node.version},
-			}
-		}
-	}
+// GetRangeWithProof gets key/value pairs within the specified range and limit.
+func (t *Tree) GetRangeWithProof(startKey []byte, endKey []byte, limit int) (keys, values [][]byte, proof *RangeProof, err error) {
+	proof, keys, values, err = t.getRangeProof(startKey, endKey, limit)
+	return
+}
 
-	return key, value, proof, nil
+// GetVersionedWithProof gets the value under the key at the specified version
+// if it exists, or returns nil.
+func (tree *VersionedTree) GetVersionedWithProof(key []byte, version int64) ([]byte, *RangeProof, error) {
+	if t, ok := tree.versions[version]; ok {
+		return t.GetWithProof(key)
+	}
+	return nil, nil, cmn.ErrorWrap(ErrVersionDoesNotExist, "")
 }
 
-///////////////////////////////////////////////////////////////////////////////
+// GetVersionedRangeWithProof gets key/value pairs within the specified range
+// and limit.
+func (tree *VersionedTree) GetVersionedRangeWithProof(startKey, endKey []byte, limit int, version int64) (
+	keys, values [][]byte, proof *RangeProof, err error) {
 
-// reverseKeys reverses the keys and values and swaps start and end key
-// if startKey > endKey.
-func reverseKeys(startKey, endKey []byte, keys, values [][]byte) (
-	[]byte, []byte, [][]byte, [][]byte,
-) {
-	if bytes.Compare(startKey, endKey) == 1 {
-		startKey, endKey = endKey, startKey
-
-		ks := make([][]byte, len(keys))
-		vs := make([][]byte, len(keys))
-		for i, _ := range keys {
-			ks[len(ks)-1-i] = keys[i]
-			vs[len(vs)-1-i] = values[i]
-		}
-		keys, values = ks, vs
+	if t, ok := tree.versions[version]; ok {
+		return t.GetRangeWithProof(startKey, endKey, limit)
 	}
-	return startKey, endKey, keys, values
+	return nil, nil, nil, cmn.ErrorWrap(ErrVersionDoesNotExist, "")
 }
diff --git a/vendor/github.com/tendermint/iavl/serialize.go b/vendor/github.com/tendermint/iavl/serialize.go
deleted file mode 100644
index 3b856478..00000000
--- a/vendor/github.com/tendermint/iavl/serialize.go
+++ /dev/null
@@ -1,200 +0,0 @@
-package iavl
-
-// NodeData groups together a key, value and depth.
-type NodeData struct {
-	Key   []byte
-	Value []byte
-	Depth uint8
-}
-
-// SerializeFunc is any implementation that can serialize
-// an iavl Node and its descendants.
-type SerializeFunc func(*Tree, *Node) []NodeData
-
-// RestoreFunc is an implementation that can restore an iavl tree from
-// NodeData.
-type RestoreFunc func(*Tree, []NodeData)
-
-// Restore will take an (empty) tree restore it
-// from the keys returned from a SerializeFunc.
-func Restore(empty *Tree, kvs []NodeData) {
-	for _, kv := range kvs {
-		empty.Set(kv.Key, kv.Value)
-	}
-	empty.Hash()
-}
-
-func RestoreUsingDepth(empty *Tree, kvs []NodeData) {
-	// Create an array of arrays of nodes. We're going to store each depth in
-	// here, forming a kind of pyramid.
-	depths := [][]*Node{}
-
-	// Go through all the leaf nodes, grouping them in pairs and creating their
-	// parents recursively.
-	for _, kv := range kvs {
-		var (
-			// Left and right nodes.
-			l     *Node = nil
-			r     *Node = NewNode(kv.Key, kv.Value, 1)
-			depth uint8 = kv.Depth
-		)
-		// Create depths as needed.
-		for len(depths) < int(depth)+1 {
-			depths = append(depths, []*Node{})
-		}
-		depths[depth] = append(depths[depth], r) // Add the leaf node to this depth.
-
-		// If the nodes at this level are uneven after adding a node to it, it
-		// means we have to wait for another node to be appended before we have
-		// a pair. If we do have a pair, go up the tree until we don't.
-		for d := depth; len(depths[d])%2 == 0; d-- {
-			nodes := depths[d] // List of nodes at this depth.
-
-			l = nodes[len(nodes)-1-1]
-			r = nodes[len(nodes)-1]
-
-			depths[d-1] = append(depths[d-1], &Node{
-				key:       leftmost(r).Key,
-				height:    maxInt8(l.height, r.height) + 1,
-				size:      l.size + r.size,
-				leftNode:  l,
-				rightNode: r,
-				version:   1,
-			})
-		}
-	}
-	empty.root = depths[0][0]
-	empty.Hash()
-}
-
-// InOrderSerialize returns all key-values in the
-// key order (as stored). May be nice to read, but
-// when recovering, it will create a different.
-func InOrderSerialize(t *Tree, root *Node) []NodeData {
-	res := make([]NodeData, 0, root.size)
-	root.traverseWithDepth(t, true, func(node *Node, depth uint8) bool {
-		if node.height == 0 {
-			kv := NodeData{Key: node.key, Value: node.value, Depth: depth}
-			res = append(res, kv)
-		}
-		return false
-	})
-	return res
-}
-
-// StableSerializeBFS serializes the tree in a breadth-first manner.
-func StableSerializeBFS(t *Tree, root *Node) []NodeData {
-	if root == nil {
-		return nil
-	}
-
-	size := root.size
-	visited := map[string][]byte{}
-	keys := make([][]byte, 0, size)
-	numKeys := -1
-
-	// Breadth-first search. At every depth, add keys in search order. Keep
-	// going as long as we find keys at that depth. When we reach a leaf, set
-	// its value in the visited map.
-	// Since we have an AVL+ tree, the inner nodes contain only keys and not
-	// values, while the leaves contain both. Note also that there are N-1 inner
-	// nodes for N keys, so one of the leaf keys is only set once we reach the leaves
-	// of the tree.
-	for depth := uint(0); len(keys) > numKeys; depth++ {
-		numKeys = len(keys)
-		root.traverseDepth(t, depth, func(node *Node) {
-			if _, ok := visited[string(node.key)]; !ok {
-				keys = append(keys, node.key)
-				visited[string(node.key)] = nil
-			}
-			if node.isLeaf() {
-				visited[string(node.key)] = node.value
-			}
-		})
-	}
-
-	nds := make([]NodeData, size)
-	for i, k := range keys {
-		nds[i] = NodeData{k, visited[string(k)], 0}
-	}
-	return nds
-}
-
-// StableSerializeFrey exports the key value pairs of the tree
-// in an order, such that when Restored from those keys, the
-// new tree would have the same structure (and thus same
-// shape) as the original tree.
-//
-// the algorithm is basically this: take the leftmost node
-// of the left half and the leftmost node of the righthalf.
-// Then go down a level...
-// each time adding leftmost node of the right side.
-// (bredth first search)
-//
-// Imagine 8 nodes in a balanced tree, split in half each time
-// 1
-// 1, 5
-// 1, 5, 3, 7
-// 1, 5, 3, 7, 2, 4, 6, 8
-func StableSerializeFrey(t *Tree, top *Node) []NodeData {
-	if top == nil {
-		return nil
-	}
-	size := top.size
-
-	// store all pending nodes for depth-first search
-	queue := make([]*Node, 0, size)
-	queue = append(queue, top)
-
-	// to store all results - started with
-	res := make([]NodeData, 0, size)
-	left := leftmost(top)
-	if left != nil {
-		res = append(res, *left)
-	}
-
-	var n *Node
-	for len(queue) > 0 {
-		// pop
-		n, queue = queue[0], queue[1:]
-
-		// l := n.getLeftNode(tree)
-		l := n.leftNode
-		if isInner(l) {
-			queue = append(queue, l)
-		}
-
-		// r := n.getRightNode(tree)
-		r := n.rightNode
-		if isInner(r) {
-			queue = append(queue, r)
-			left = leftmost(r)
-			if left != nil {
-				res = append(res, *left)
-			}
-		} else if isLeaf(r) {
-			kv := NodeData{Key: r.key, Value: r.value}
-			res = append(res, kv)
-		}
-	}
-
-	return res
-}
-
-func isInner(n *Node) bool {
-	return n != nil && !n.isLeaf()
-}
-
-func isLeaf(n *Node) bool {
-	return n != nil && n.isLeaf()
-}
-
-func leftmost(node *Node) *NodeData {
-	for isInner(node) {
-		node = node.leftNode
-	}
-	if node == nil {
-		return nil
-	}
-	return &NodeData{Key: node.key, Value: node.value}
-}
diff --git a/vendor/github.com/tendermint/iavl/sha256truncated/sha256truncated.go b/vendor/github.com/tendermint/iavl/sha256truncated/sha256truncated.go
deleted file mode 100644
index f62ff313..00000000
--- a/vendor/github.com/tendermint/iavl/sha256truncated/sha256truncated.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Package sha256truncated provides a sha256 hash.Hash whose output is truncated to 20 bytes (160 bits).
-//
-// This is the default hashing algorithm used by IAVL+ trees.
-//
-//   s256 := sha256.New() // crypto/sha256
-//   s256Truncated := New() // this package
-//
-//   // Use like any other hash.Hash ...
-//   // Contract:
-//   s256Trunc.Sum(nil) == s256.Sum(nil)[:20]
-package sha256truncated
-
-import (
-	"crypto/sha256"
-	"hash"
-)
-
-const Size = 20
-
-// New returns a new hash.Hash computing the truncated to the first 20 bytes SHA256 checksum.
-func New() hash.Hash {
-	return &digest{sha256.New()}
-}
-
-func (d *digest) Sum(in []byte) []byte {
-	return d.Hash.Sum(in)[:Size]
-}
-
-func (d *digest) Reset() {
-	d.Hash.Reset()
-}
-
-func (d *digest) Size() int {
-	return Size
-}
-
-func (d *digest) BlockSize() int {
-	return d.Hash.BlockSize()
-}
-
-// digest is just a wrapper around sha256
-type digest struct {
-	hash.Hash
-}
diff --git a/vendor/github.com/tendermint/iavl/tree.go b/vendor/github.com/tendermint/iavl/tree.go
index 85cf6e21..c9290153 100644
--- a/vendor/github.com/tendermint/iavl/tree.go
+++ b/vendor/github.com/tendermint/iavl/tree.go
@@ -4,9 +4,7 @@ import (
 	"fmt"
 	"strings"
 
-	dbm "github.com/tendermint/tmlibs/db"
-
-	"github.com/pkg/errors"
+	dbm "github.com/tendermint/tendermint/libs/db"
 )
 
 // Tree is a container for an immutable AVL+ Tree. Changes are performed by
@@ -143,39 +141,6 @@ func (t *Tree) GetByIndex64(index int64) (key []byte, value []byte) {
 	return t.root.getByIndex(t, index)
 }
 
-// GetWithProof gets the value under the key if it exists, or returns nil.
-// A proof of existence or absence is returned alongside the value.
-func (t *Tree) GetWithProof(key []byte) ([]byte, KeyProof, error) {
-	value, eproof, err := t.getWithProof(key)
-	if err == nil {
-		return value, eproof, nil
-	}
-
-	aproof, err := t.keyAbsentProof(key)
-	if err == nil {
-		return nil, aproof, nil
-	}
-	return nil, nil, errors.Wrap(err, "could not construct any proof")
-}
-
-// GetRangeWithProof gets key/value pairs within the specified range and limit. To specify a descending
-// range, swap the start and end keys.
-//
-// Returns a list of keys, a list of values and a proof.
-func (t *Tree) GetRangeWithProof(startKey []byte, endKey []byte, limit int) ([][]byte, [][]byte, *KeyRangeProof, error) {
-	return t.getRangeWithProof(startKey, endKey, limit)
-}
-
-// GetFirstInRangeWithProof gets the first key/value pair in the specified range, with a proof.
-func (t *Tree) GetFirstInRangeWithProof(startKey, endKey []byte) ([]byte, []byte, *KeyFirstInRangeProof, error) {
-	return t.getFirstInRangeWithProof(startKey, endKey)
-}
-
-// GetLastInRangeWithProof gets the last key/value pair in the specified range, with a proof.
-func (t *Tree) GetLastInRangeWithProof(startKey, endKey []byte) ([]byte, []byte, *KeyLastInRangeProof, error) {
-	return t.getLastInRangeWithProof(startKey, endKey)
-}
-
 // Remove tries to remove a key from the tree and if removed, returns its
 // value, and 'true'.
 func (t *Tree) Remove(key []byte) ([]byte, bool) {
@@ -210,9 +175,8 @@ func (t *Tree) Iterate(fn func(key []byte, value []byte) bool) (stopped bool) {
 	return t.root.traverse(t, true, func(node *Node) bool {
 		if node.height == 0 {
 			return fn(node.key, node.value)
-		} else {
-			return false
 		}
+		return false
 	})
 }
 
@@ -225,9 +189,8 @@ func (t *Tree) IterateRange(start, end []byte, ascending bool, fn func(key []byt
 	return t.root.traverseInRange(t, start, end, ascending, false, 0, func(node *Node, _ uint8) bool {
 		if node.height == 0 {
 			return fn(node.key, node.value)
-		} else {
-			return false
 		}
+		return false
 	})
 }
 
@@ -240,19 +203,18 @@ func (t *Tree) IterateRangeInclusive(start, end []byte, ascending bool, fn func(
 	return t.root.traverseInRange(t, start, end, ascending, true, 0, func(node *Node, _ uint8) bool {
 		if node.height == 0 {
 			return fn(node.key, node.value, node.version)
-		} else {
-			return false
 		}
+		return false
 	})
 }
 
 // Clone creates a clone of the tree.
 // Used internally by VersionedTree.
-func (tree *Tree) clone() *Tree {
+func (t *Tree) clone() *Tree {
 	return &Tree{
-		root:    tree.root,
-		ndb:     tree.ndb,
-		version: tree.version,
+		root:    t.root,
+		ndb:     t.ndb,
+		version: t.version,
 	}
 }
 
diff --git a/vendor/github.com/tendermint/iavl/tree_dotgraph.go b/vendor/github.com/tendermint/iavl/tree_dotgraph.go
index 57fc2167..14294851 100644
--- a/vendor/github.com/tendermint/iavl/tree_dotgraph.go
+++ b/vendor/github.com/tendermint/iavl/tree_dotgraph.go
@@ -41,7 +41,7 @@ var defaultGraphNodeAttrs = map[string]string{
 	"shape": "circle",
 }
 
-func WriteDOTGraph(w io.Writer, tree *Tree, paths []*PathToKey) {
+func WriteDOTGraph(w io.Writer, tree *Tree, paths []PathToLeaf) {
 	ctx := &graphContext{}
 
 	tree.root.hashWithCount()
@@ -69,7 +69,7 @@ func WriteDOTGraph(w io.Writer, tree *Tree, paths []*PathToKey) {
 		}
 
 		for _, path := range paths {
-			for _, n := range path.InnerNodes {
+			for _, n := range path {
 				if bytes.Equal(n.Left, node.hash) || bytes.Equal(n.Right, node.hash) {
 					graphNode.Attrs["peripheries"] = "2"
 					graphNode.Attrs["style"] = "filled"
diff --git a/vendor/github.com/tendermint/iavl/util.go b/vendor/github.com/tendermint/iavl/util.go
index b28b4877..96f75418 100644
--- a/vendor/github.com/tendermint/iavl/util.go
+++ b/vendor/github.com/tendermint/iavl/util.go
@@ -1,27 +1,45 @@
 package iavl
 
 import (
+	"bytes"
 	"fmt"
+	"sort"
 )
 
-func printNode(node *Node, indent int) {
+// PrintTree prints the whole tree in an indented form.
+func PrintTree(tree *Tree) {
+	ndb, root := tree.ndb, tree.root
+	printNode(ndb, root, 0)
+}
+
+func printNode(ndb *nodeDB, node *Node, indent int) {
 	indentPrefix := ""
 	for i := 0; i < indent; i++ {
 		indentPrefix += "    "
 	}
 
+	if node == nil {
+		fmt.Printf("%s<nil>\n", indentPrefix)
+		return
+	}
 	if node.rightNode != nil {
-		printNode(node.rightNode, indent+1)
+		printNode(ndb, node.rightNode, indent+1)
 	} else if node.rightHash != nil {
-		fmt.Printf("%s    %X\n", indentPrefix, node.rightHash)
+		rightNode := ndb.GetNode(node.rightHash)
+		printNode(ndb, rightNode, indent+1)
 	}
 
-	fmt.Printf("%s%v:%v\n", indentPrefix, node.key, node.height)
+	hash := node._hash()
+	fmt.Printf("%sh:%X\n", indentPrefix, hash)
+	if node.isLeaf() {
+		fmt.Printf("%s%X:%X (%v)\n", indentPrefix, node.key, node.value, node.height)
+	}
 
 	if node.leftNode != nil {
-		printNode(node.leftNode, indent+1)
+		printNode(ndb, node.leftNode, indent+1)
 	} else if node.leftHash != nil {
-		fmt.Printf("%s    %X\n", indentPrefix, node.leftHash)
+		leftNode := ndb.GetNode(node.leftHash)
+		printNode(ndb, leftNode, indent+1)
 	}
 
 }
@@ -32,3 +50,57 @@ func maxInt8(a, b int8) int8 {
 	}
 	return b
 }
+
+func cp(bz []byte) (ret []byte) {
+	ret = make([]byte, len(bz))
+	copy(ret, bz)
+	return ret
+}
+
+// Returns a slice of the same length (big endian)
+// except incremented by one.
+// Appends 0x00 if bz is all 0xFF.
+// CONTRACT: len(bz) > 0
+func cpIncr(bz []byte) (ret []byte) {
+	ret = cp(bz)
+	for i := len(bz) - 1; i >= 0; i-- {
+		if ret[i] < byte(0xFF) {
+			ret[i]++
+			return
+		}
+		ret[i] = byte(0x00)
+		if i == 0 {
+			return append(ret, 0x00)
+			// Overflow
+			return nil
+		}
+	}
+	return []byte{0x00}
+}
+
+type byteslices [][]byte
+
+func (bz byteslices) Len() int {
+	return len(bz)
+}
+
+func (bz byteslices) Less(i, j int) bool {
+	switch bytes.Compare(bz[i], bz[j]) {
+	case -1:
+		return true
+	case 0, 1:
+		return false
+	default:
+		panic("should not happen")
+	}
+}
+
+func (bz byteslices) Swap(i, j int) {
+	bz[j], bz[i] = bz[i], bz[j]
+}
+
+func sortByteSlices(src [][]byte) [][]byte {
+	bzz := byteslices(src)
+	sort.Sort(bzz)
+	return bzz
+}
diff --git a/vendor/github.com/tendermint/iavl/version.go b/vendor/github.com/tendermint/iavl/version.go
index 679f0cc3..9efd1114 100644
--- a/vendor/github.com/tendermint/iavl/version.go
+++ b/vendor/github.com/tendermint/iavl/version.go
@@ -1,3 +1,4 @@
 package iavl
 
-const Version = "0.8.0-dev"
+// Version of iavl.
+const Version = "0.9.2"
diff --git a/vendor/github.com/tendermint/iavl/versioned_tree.go b/vendor/github.com/tendermint/iavl/versioned_tree.go
index 9fef5a15..7d8108ea 100644
--- a/vendor/github.com/tendermint/iavl/versioned_tree.go
+++ b/vendor/github.com/tendermint/iavl/versioned_tree.go
@@ -4,10 +4,11 @@ import (
 	"bytes"
 	"fmt"
 
-	"github.com/pkg/errors"
-	dbm "github.com/tendermint/tmlibs/db"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	dbm "github.com/tendermint/tendermint/libs/db"
 )
 
+// ErrVersionDoesNotExist is returned if a requested version does not exist.
 var ErrVersionDoesNotExist = fmt.Errorf("version does not exist")
 
 // VersionedTree is a persistent tree which keeps track of versions.
@@ -160,7 +161,7 @@ func (tree *VersionedTree) SaveVersion() ([]byte, int64, error) {
 			tree.orphaningTree = newOrphaningTree(tree.versions[version].clone())
 			return existingHash, version, nil
 		}
-		return nil, version, errors.Errorf("version %d was already saved to different hash %X (existing hash %X)",
+		return nil, version, fmt.Errorf("version %d was already saved to different hash %X (existing hash %X)",
 			version, newHash, existingHash)
 	}
 
@@ -178,13 +179,13 @@ func (tree *VersionedTree) SaveVersion() ([]byte, int64, error) {
 // longer be accessed.
 func (tree *VersionedTree) DeleteVersion(version int64) error {
 	if version == 0 {
-		return errors.New("version must be greater than 0")
+		return cmn.NewError("version must be greater than 0")
 	}
 	if version == tree.version {
-		return errors.Errorf("cannot delete latest saved version (%d)", version)
+		return cmn.NewError("cannot delete latest saved version (%d)", version)
 	}
 	if _, ok := tree.versions[version]; !ok {
-		return errors.WithStack(ErrVersionDoesNotExist)
+		return cmn.ErrorWrap(ErrVersionDoesNotExist, "")
 	}
 
 	tree.ndb.DeleteVersion(version)
@@ -194,42 +195,3 @@ func (tree *VersionedTree) DeleteVersion(version int64) error {
 
 	return nil
 }
-
-// GetVersionedWithProof gets the value under the key at the specified version
-// if it exists, or returns nil.  A proof of existence or absence is returned
-// alongside the value.
-func (tree *VersionedTree) GetVersionedWithProof(key []byte, version int64) ([]byte, KeyProof, error) {
-	if t, ok := tree.versions[version]; ok {
-		return t.GetWithProof(key)
-	}
-	return nil, nil, errors.WithStack(ErrVersionDoesNotExist)
-}
-
-// GetVersionedRangeWithProof gets key/value pairs within the specified range
-// and limit. To specify a descending range, swap the start and end keys.
-//
-// Returns a list of keys, a list of values and a proof.
-func (tree *VersionedTree) GetVersionedRangeWithProof(startKey, endKey []byte, limit int, version int64) ([][]byte, [][]byte, *KeyRangeProof, error) {
-	if t, ok := tree.versions[version]; ok {
-		return t.GetRangeWithProof(startKey, endKey, limit)
-	}
-	return nil, nil, nil, errors.WithStack(ErrVersionDoesNotExist)
-}
-
-// GetVersionedFirstInRangeWithProof gets the first key/value pair in the
-// specified range, with a proof.
-func (tree *VersionedTree) GetVersionedFirstInRangeWithProof(startKey, endKey []byte, version int64) ([]byte, []byte, *KeyFirstInRangeProof, error) {
-	if t, ok := tree.versions[version]; ok {
-		return t.GetFirstInRangeWithProof(startKey, endKey)
-	}
-	return nil, nil, nil, errors.WithStack(ErrVersionDoesNotExist)
-}
-
-// GetVersionedLastInRangeWithProof gets the last key/value pair in the
-// specified range, with a proof.
-func (tree *VersionedTree) GetVersionedLastInRangeWithProof(startKey, endKey []byte, version int64) ([]byte, []byte, *KeyLastInRangeProof, error) {
-	if t, ok := tree.versions[version]; ok {
-		return t.GetLastInRangeWithProof(startKey, endKey)
-	}
-	return nil, nil, nil, errors.WithStack(ErrVersionDoesNotExist)
-}
diff --git a/vendor/github.com/tendermint/iavl/wire.go b/vendor/github.com/tendermint/iavl/wire.go
new file mode 100644
index 00000000..8549ae4a
--- /dev/null
+++ b/vendor/github.com/tendermint/iavl/wire.go
@@ -0,0 +1,17 @@
+package iavl
+
+import (
+	"github.com/tendermint/go-amino"
+)
+
+var cdc = amino.NewCodec()
+
+func init() {
+	// NOTE: It's important that there be no conflicts here,
+	// as that would change the canonical representations.
+	RegisterWire(cdc)
+}
+
+func RegisterWire(cdc *amino.Codec) {
+	// TODO
+}
diff --git a/vendor/github.com/tendermint/abci/client/client.go b/vendor/github.com/tendermint/tendermint/abci/client/client.go
similarity index 97%
rename from vendor/github.com/tendermint/abci/client/client.go
rename to vendor/github.com/tendermint/tendermint/abci/client/client.go
index ad0e5a7a..55858810 100644
--- a/vendor/github.com/tendermint/abci/client/client.go
+++ b/vendor/github.com/tendermint/tendermint/abci/client/client.go
@@ -4,8 +4,8 @@ import (
 	"fmt"
 	"sync"
 
-	"github.com/tendermint/abci/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/abci/types"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 const (
diff --git a/vendor/github.com/tendermint/abci/client/grpc_client.go b/vendor/github.com/tendermint/tendermint/abci/client/grpc_client.go
similarity index 98%
rename from vendor/github.com/tendermint/abci/client/grpc_client.go
rename to vendor/github.com/tendermint/tendermint/abci/client/grpc_client.go
index 0f405a9c..502ee0fc 100644
--- a/vendor/github.com/tendermint/abci/client/grpc_client.go
+++ b/vendor/github.com/tendermint/tendermint/abci/client/grpc_client.go
@@ -9,8 +9,8 @@ import (
 	context "golang.org/x/net/context"
 	grpc "google.golang.org/grpc"
 
-	"github.com/tendermint/abci/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/abci/types"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 var _ Client = (*grpcClient)(nil)
diff --git a/vendor/github.com/tendermint/abci/client/local_client.go b/vendor/github.com/tendermint/tendermint/abci/client/local_client.go
similarity index 98%
rename from vendor/github.com/tendermint/abci/client/local_client.go
rename to vendor/github.com/tendermint/tendermint/abci/client/local_client.go
index 64bf5fe0..3d1f8d8e 100644
--- a/vendor/github.com/tendermint/abci/client/local_client.go
+++ b/vendor/github.com/tendermint/tendermint/abci/client/local_client.go
@@ -3,8 +3,8 @@ package abcicli
 import (
 	"sync"
 
-	types "github.com/tendermint/abci/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	types "github.com/tendermint/tendermint/abci/types"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 var _ Client = (*localClient)(nil)
diff --git a/vendor/github.com/tendermint/abci/client/socket_client.go b/vendor/github.com/tendermint/tendermint/abci/client/socket_client.go
similarity index 96%
rename from vendor/github.com/tendermint/abci/client/socket_client.go
rename to vendor/github.com/tendermint/tendermint/abci/client/socket_client.go
index 5c010168..affea1a9 100644
--- a/vendor/github.com/tendermint/abci/client/socket_client.go
+++ b/vendor/github.com/tendermint/tendermint/abci/client/socket_client.go
@@ -10,8 +10,8 @@ import (
 	"sync"
 	"time"
 
-	"github.com/tendermint/abci/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/abci/types"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 const reqQueueSize = 256 // TODO make configurable
@@ -357,6 +357,13 @@ func (cli *socketClient) queueRequest(req *types.Request) *ReqRes {
 }
 
 func (cli *socketClient) flushQueue() {
+	// mark all in-flight messages as resolved (they will get cli.Error())
+	for req := cli.reqSent.Front(); req != nil; req = req.Next() {
+		reqres := req.Value.(*ReqRes)
+		reqres.Done()
+	}
+
+	// mark all queued messages as resolved
 LOOP:
 	for {
 		select {
diff --git a/vendor/github.com/tendermint/abci/example/code/code.go b/vendor/github.com/tendermint/tendermint/abci/example/code/code.go
similarity index 100%
rename from vendor/github.com/tendermint/abci/example/code/code.go
rename to vendor/github.com/tendermint/tendermint/abci/example/code/code.go
diff --git a/vendor/github.com/tendermint/abci/example/kvstore/README.md b/vendor/github.com/tendermint/tendermint/abci/example/kvstore/README.md
similarity index 100%
rename from vendor/github.com/tendermint/abci/example/kvstore/README.md
rename to vendor/github.com/tendermint/tendermint/abci/example/kvstore/README.md
diff --git a/vendor/github.com/tendermint/abci/example/kvstore/helpers.go b/vendor/github.com/tendermint/tendermint/abci/example/kvstore/helpers.go
similarity index 90%
rename from vendor/github.com/tendermint/abci/example/kvstore/helpers.go
rename to vendor/github.com/tendermint/tendermint/abci/example/kvstore/helpers.go
index 63bc31a6..0e69fab9 100644
--- a/vendor/github.com/tendermint/abci/example/kvstore/helpers.go
+++ b/vendor/github.com/tendermint/tendermint/abci/example/kvstore/helpers.go
@@ -1,8 +1,8 @@
 package kvstore
 
 import (
-	"github.com/tendermint/abci/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/abci/types"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // RandVal creates one random validator, with a key derived
diff --git a/vendor/github.com/tendermint/abci/example/kvstore/kvstore.go b/vendor/github.com/tendermint/tendermint/abci/example/kvstore/kvstore.go
similarity index 93%
rename from vendor/github.com/tendermint/abci/example/kvstore/kvstore.go
rename to vendor/github.com/tendermint/tendermint/abci/example/kvstore/kvstore.go
index 4ccbc56b..0f72b44e 100644
--- a/vendor/github.com/tendermint/abci/example/kvstore/kvstore.go
+++ b/vendor/github.com/tendermint/tendermint/abci/example/kvstore/kvstore.go
@@ -6,10 +6,10 @@ import (
 	"encoding/json"
 	"fmt"
 
-	"github.com/tendermint/abci/example/code"
-	"github.com/tendermint/abci/types"
-	cmn "github.com/tendermint/tmlibs/common"
-	dbm "github.com/tendermint/tmlibs/db"
+	"github.com/tendermint/tendermint/abci/example/code"
+	"github.com/tendermint/tendermint/abci/types"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	dbm "github.com/tendermint/tendermint/libs/db"
 )
 
 var (
diff --git a/vendor/github.com/tendermint/abci/example/kvstore/persistent_kvstore.go b/vendor/github.com/tendermint/tendermint/abci/example/kvstore/persistent_kvstore.go
similarity index 95%
rename from vendor/github.com/tendermint/abci/example/kvstore/persistent_kvstore.go
rename to vendor/github.com/tendermint/tendermint/abci/example/kvstore/persistent_kvstore.go
index 02f7ce74..12ccbab7 100644
--- a/vendor/github.com/tendermint/abci/example/kvstore/persistent_kvstore.go
+++ b/vendor/github.com/tendermint/tendermint/abci/example/kvstore/persistent_kvstore.go
@@ -7,11 +7,11 @@ import (
 	"strconv"
 	"strings"
 
-	"github.com/tendermint/abci/example/code"
-	"github.com/tendermint/abci/types"
-	cmn "github.com/tendermint/tmlibs/common"
-	dbm "github.com/tendermint/tmlibs/db"
-	"github.com/tendermint/tmlibs/log"
+	"github.com/tendermint/tendermint/abci/example/code"
+	"github.com/tendermint/tendermint/abci/types"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	dbm "github.com/tendermint/tendermint/libs/db"
+	"github.com/tendermint/tendermint/libs/log"
 )
 
 const (
diff --git a/vendor/github.com/tendermint/abci/types/application.go b/vendor/github.com/tendermint/tendermint/abci/types/application.go
similarity index 100%
rename from vendor/github.com/tendermint/abci/types/application.go
rename to vendor/github.com/tendermint/tendermint/abci/types/application.go
diff --git a/vendor/github.com/tendermint/abci/types/messages.go b/vendor/github.com/tendermint/tendermint/abci/types/messages.go
similarity index 100%
rename from vendor/github.com/tendermint/abci/types/messages.go
rename to vendor/github.com/tendermint/tendermint/abci/types/messages.go
diff --git a/vendor/github.com/tendermint/abci/types/pubkey.go b/vendor/github.com/tendermint/tendermint/abci/types/pubkey.go
similarity index 100%
rename from vendor/github.com/tendermint/abci/types/pubkey.go
rename to vendor/github.com/tendermint/tendermint/abci/types/pubkey.go
diff --git a/vendor/github.com/tendermint/abci/types/result.go b/vendor/github.com/tendermint/tendermint/abci/types/result.go
similarity index 100%
rename from vendor/github.com/tendermint/abci/types/result.go
rename to vendor/github.com/tendermint/tendermint/abci/types/result.go
diff --git a/vendor/github.com/tendermint/abci/types/types.pb.go b/vendor/github.com/tendermint/tendermint/abci/types/types.pb.go
similarity index 99%
rename from vendor/github.com/tendermint/abci/types/types.pb.go
rename to vendor/github.com/tendermint/tendermint/abci/types/types.pb.go
index a6b806fe..8135db50 100644
--- a/vendor/github.com/tendermint/abci/types/types.pb.go
+++ b/vendor/github.com/tendermint/tendermint/abci/types/types.pb.go
@@ -50,7 +50,7 @@ import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
 import _ "github.com/gogo/protobuf/gogoproto"
-import common "github.com/tendermint/tmlibs/common"
+import common "github.com/tendermint/tendermint/libs/common"
 
 import context "golang.org/x/net/context"
 import grpc "google.golang.org/grpc"
diff --git a/vendor/github.com/tendermint/abci/types/types.proto b/vendor/github.com/tendermint/tendermint/abci/types/types.proto
similarity index 100%
rename from vendor/github.com/tendermint/abci/types/types.proto
rename to vendor/github.com/tendermint/tendermint/abci/types/types.proto
diff --git a/vendor/github.com/tendermint/abci/types/util.go b/vendor/github.com/tendermint/tendermint/abci/types/util.go
similarity index 96%
rename from vendor/github.com/tendermint/abci/types/util.go
rename to vendor/github.com/tendermint/tendermint/abci/types/util.go
index 0924ab5f..458024c5 100644
--- a/vendor/github.com/tendermint/abci/types/util.go
+++ b/vendor/github.com/tendermint/tendermint/abci/types/util.go
@@ -5,7 +5,7 @@ import (
 	"encoding/json"
 	"sort"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 //------------------------------------------------------------------------------
diff --git a/vendor/github.com/tendermint/tendermint/blockchain/pool.go b/vendor/github.com/tendermint/tendermint/blockchain/pool.go
index 8b964e81..e379d846 100644
--- a/vendor/github.com/tendermint/tendermint/blockchain/pool.go
+++ b/vendor/github.com/tendermint/tendermint/blockchain/pool.go
@@ -8,9 +8,9 @@ import (
 	"sync/atomic"
 	"time"
 
-	cmn "github.com/tendermint/tmlibs/common"
-	flow "github.com/tendermint/tmlibs/flowrate"
-	"github.com/tendermint/tmlibs/log"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	flow "github.com/tendermint/tendermint/libs/flowrate"
+	"github.com/tendermint/tendermint/libs/log"
 
 	"github.com/tendermint/tendermint/p2p"
 	"github.com/tendermint/tendermint/types"
diff --git a/vendor/github.com/tendermint/tendermint/blockchain/reactor.go b/vendor/github.com/tendermint/tendermint/blockchain/reactor.go
index 33dfdd28..449a42ff 100644
--- a/vendor/github.com/tendermint/tendermint/blockchain/reactor.go
+++ b/vendor/github.com/tendermint/tendermint/blockchain/reactor.go
@@ -5,12 +5,13 @@ import (
 	"reflect"
 	"time"
 
-	"github.com/tendermint/go-amino"
+	amino "github.com/tendermint/go-amino"
+
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 	"github.com/tendermint/tendermint/p2p"
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
 )
 
 const (
@@ -174,7 +175,7 @@ func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage,
 
 // Receive implements Reactor by handling 4 types of messages (look below).
 func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
-	msg, err := DecodeMessage(msgBytes)
+	msg, err := decodeMsg(msgBytes)
 	if err != nil {
 		bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
 		bcR.Switch.StopPeerForError(src, err)
@@ -342,17 +343,11 @@ func RegisterBlockchainMessages(cdc *amino.Codec) {
 	cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/mempool/StatusRequest", nil)
 }
 
-// DecodeMessage decodes BlockchainMessage.
-// TODO: ensure that bz is completely read.
-func DecodeMessage(bz []byte) (msg BlockchainMessage, err error) {
+func decodeMsg(bz []byte) (msg BlockchainMessage, err error) {
 	if len(bz) > maxMsgSize {
-		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
-			len(bz), maxMsgSize)
+		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
 	}
 	err = cdc.UnmarshalBinaryBare(bz, &msg)
-	if err != nil {
-		err = cmn.ErrorWrap(err, "DecodeMessage() had bytes left over")
-	}
 	return
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/blockchain/store.go b/vendor/github.com/tendermint/tendermint/blockchain/store.go
index e7608b2c..f02d4fac 100644
--- a/vendor/github.com/tendermint/tendermint/blockchain/store.go
+++ b/vendor/github.com/tendermint/tendermint/blockchain/store.go
@@ -4,8 +4,8 @@ import (
 	"fmt"
 	"sync"
 
-	cmn "github.com/tendermint/tmlibs/common"
-	dbm "github.com/tendermint/tmlibs/db"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	dbm "github.com/tendermint/tendermint/libs/db"
 
 	"github.com/tendermint/tendermint/types"
 )
diff --git a/vendor/github.com/tendermint/tendermint/blockchain/wire.go b/vendor/github.com/tendermint/tendermint/blockchain/wire.go
index 55b4e60a..70b50565 100644
--- a/vendor/github.com/tendermint/tendermint/blockchain/wire.go
+++ b/vendor/github.com/tendermint/tendermint/blockchain/wire.go
@@ -2,7 +2,7 @@ package blockchain
 
 import (
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 var cdc = amino.NewCodec()
diff --git a/vendor/github.com/tendermint/tendermint/config/config.go b/vendor/github.com/tendermint/tendermint/config/config.go
index 5ba568f2..2df8eb8e 100644
--- a/vendor/github.com/tendermint/tendermint/config/config.go
+++ b/vendor/github.com/tendermint/tendermint/config/config.go
@@ -45,34 +45,37 @@ type Config struct {
 	BaseConfig `mapstructure:",squash"`
 
 	// Options for services
-	RPC       *RPCConfig       `mapstructure:"rpc"`
-	P2P       *P2PConfig       `mapstructure:"p2p"`
-	Mempool   *MempoolConfig   `mapstructure:"mempool"`
-	Consensus *ConsensusConfig `mapstructure:"consensus"`
-	TxIndex   *TxIndexConfig   `mapstructure:"tx_index"`
+	RPC             *RPCConfig             `mapstructure:"rpc"`
+	P2P             *P2PConfig             `mapstructure:"p2p"`
+	Mempool         *MempoolConfig         `mapstructure:"mempool"`
+	Consensus       *ConsensusConfig       `mapstructure:"consensus"`
+	TxIndex         *TxIndexConfig         `mapstructure:"tx_index"`
+	Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
 }
 
 // DefaultConfig returns a default configuration for a Tendermint node
 func DefaultConfig() *Config {
 	return &Config{
-		BaseConfig: DefaultBaseConfig(),
-		RPC:        DefaultRPCConfig(),
-		P2P:        DefaultP2PConfig(),
-		Mempool:    DefaultMempoolConfig(),
-		Consensus:  DefaultConsensusConfig(),
-		TxIndex:    DefaultTxIndexConfig(),
+		BaseConfig:      DefaultBaseConfig(),
+		RPC:             DefaultRPCConfig(),
+		P2P:             DefaultP2PConfig(),
+		Mempool:         DefaultMempoolConfig(),
+		Consensus:       DefaultConsensusConfig(),
+		TxIndex:         DefaultTxIndexConfig(),
+		Instrumentation: DefaultInstrumentationConfig(),
 	}
 }
 
 // TestConfig returns a configuration that can be used for testing
 func TestConfig() *Config {
 	return &Config{
-		BaseConfig: TestBaseConfig(),
-		RPC:        TestRPCConfig(),
-		P2P:        TestP2PConfig(),
-		Mempool:    TestMempoolConfig(),
-		Consensus:  TestConsensusConfig(),
-		TxIndex:    TestTxIndexConfig(),
+		BaseConfig:      TestBaseConfig(),
+		RPC:             TestRPCConfig(),
+		P2P:             TestP2PConfig(),
+		Mempool:         TestMempoolConfig(),
+		Consensus:       TestConsensusConfig(),
+		TxIndex:         TestTxIndexConfig(),
+		Instrumentation: TestInstrumentationConfig(),
 	}
 }
 
@@ -221,16 +224,36 @@ type RPCConfig struct {
 	// NOTE: This server only supports /broadcast_tx_commit
 	GRPCListenAddress string `mapstructure:"grpc_laddr"`
 
+	// Maximum number of simultaneous connections.
+	// Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
+	// If you want to accept more significant number than the default, make sure
+	// you increase your OS limits.
+	// 0 - unlimited.
+	GRPCMaxOpenConnections int `mapstructure:"grpc_max_open_connections"`
+
 	// Activate unsafe RPC commands like /dial_persistent_peers and /unsafe_flush_mempool
 	Unsafe bool `mapstructure:"unsafe"`
+
+	// Maximum number of simultaneous connections (including WebSocket).
+	// Does not include gRPC connections. See grpc_max_open_connections
+	// If you want to accept more significant number than the default, make sure
+	// you increase your OS limits.
+	// 0 - unlimited.
+	MaxOpenConnections int `mapstructure:"max_open_connections"`
 }
 
 // DefaultRPCConfig returns a default configuration for the RPC server
 func DefaultRPCConfig() *RPCConfig {
 	return &RPCConfig{
-		ListenAddress:     "tcp://0.0.0.0:26657",
-		GRPCListenAddress: "",
-		Unsafe:            false,
+		ListenAddress: "tcp://0.0.0.0:26657",
+
+		GRPCListenAddress:      "",
+		GRPCMaxOpenConnections: 900, // no ipv4
+
+		Unsafe: false,
+		// should be < {ulimit -Sn} - {MaxNumPeers} - {N of wal, db and other open files}
+		// 1024 - 50 - 50 = 924 = ~900
+		MaxOpenConnections: 900,
 	}
 }
 
@@ -253,6 +276,9 @@ type P2PConfig struct {
 	// Address to listen for incoming connections
 	ListenAddress string `mapstructure:"laddr"`
 
+	// Address to advertise to peers for them to dial
+	ExternalAddress string `mapstructure:"external_address"`
+
 	// Comma separated list of seed nodes to connect to
 	// We only use these if we can’t connect to peers in the addrbook
 	Seeds string `mapstructure:"seeds"`
@@ -261,8 +287,8 @@ type P2PConfig struct {
 	// Do not add private peers to this list if you don't want them advertised
 	PersistentPeers string `mapstructure:"persistent_peers"`
 
-	// Skip UPNP port forwarding
-	SkipUPNP bool `mapstructure:"skip_upnp"`
+	// UPNP port forwarding
+	UPNP bool `mapstructure:"upnp"`
 
 	// Path to address book
 	AddrBook string `mapstructure:"addr_book_file"`
@@ -317,6 +343,8 @@ type P2PConfig struct {
 func DefaultP2PConfig() *P2PConfig {
 	return &P2PConfig{
 		ListenAddress:           "tcp://0.0.0.0:26656",
+		ExternalAddress:         "",
+		UPNP:                    false,
 		AddrBook:                defaultAddrBookPath,
 		AddrBookStrict:          true,
 		MaxNumPeers:             50,
@@ -339,7 +367,6 @@ func DefaultP2PConfig() *P2PConfig {
 func TestP2PConfig() *P2PConfig {
 	cfg := DefaultP2PConfig()
 	cfg.ListenAddress = "tcp://0.0.0.0:36656"
-	cfg.SkipUPNP = true
 	cfg.FlushThrottleTimeout = 10
 	cfg.AllowDuplicateIP = true
 	return cfg
@@ -411,7 +438,7 @@ func (cfg *MempoolConfig) WalDir() string {
 //-----------------------------------------------------------------------------
 // ConsensusConfig
 
-// ConsensusConfig defines the confuguration for the Tendermint consensus service,
+// ConsensusConfig defines the configuration for the Tendermint consensus service,
 // including timeouts and details about the WAL and the block structure.
 type ConsensusConfig struct {
 	RootDir string `mapstructure:"home"`
@@ -430,10 +457,6 @@ type ConsensusConfig struct {
 	// Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
 	SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"`
 
-	// BlockSize
-	MaxBlockSizeTxs   int `mapstructure:"max_block_size_txs"`
-	MaxBlockSizeBytes int `mapstructure:"max_block_size_bytes"`
-
 	// EmptyBlocks mode and possible interval between empty blocks in seconds
 	CreateEmptyBlocks         bool `mapstructure:"create_empty_blocks"`
 	CreateEmptyBlocksInterval int  `mapstructure:"create_empty_blocks_interval"`
@@ -455,8 +478,6 @@ func DefaultConsensusConfig() *ConsensusConfig {
 		TimeoutPrecommitDelta:       500,
 		TimeoutCommit:               1000,
 		SkipTimeoutCommit:           false,
-		MaxBlockSizeTxs:             10000,
-		MaxBlockSizeBytes:           1, // TODO
 		CreateEmptyBlocks:           true,
 		CreateEmptyBlocksInterval:   0,
 		PeerGossipSleepDuration:     100,
@@ -536,14 +557,14 @@ func (cfg *ConsensusConfig) SetWalFile(walFile string) {
 //-----------------------------------------------------------------------------
 // TxIndexConfig
 
-// TxIndexConfig defines the confuguration for the transaction
+// TxIndexConfig defines the configuration for the transaction
 // indexer, including tags to index.
 type TxIndexConfig struct {
 	// What indexer to use for transactions
 	//
 	// Options:
-	//   1) "null" (default)
-	//   2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
+	//   1) "null"
+	//   2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
 	Indexer string `mapstructure:"indexer"`
 
 	// Comma-separated list of tags to index (by default the only tag is tx hash)
@@ -573,6 +594,42 @@ func TestTxIndexConfig() *TxIndexConfig {
 	return DefaultTxIndexConfig()
 }
 
+//-----------------------------------------------------------------------------
+// InstrumentationConfig
+
+// InstrumentationConfig defines the configuration for metrics reporting.
+type InstrumentationConfig struct {
+	// When true, Prometheus metrics are served under /metrics on
+	// PrometheusListenAddr.
+	// Check out the documentation for the list of available metrics.
+	Prometheus bool `mapstructure:"prometheus"`
+
+	// Address to listen for Prometheus collector(s) connections.
+	PrometheusListenAddr string `mapstructure:"prometheus_listen_addr"`
+
+	// Maximum number of simultaneous connections.
+	// If you want to accept more significant number than the default, make sure
+	// you increase your OS limits.
+	// 0 - unlimited.
+	MaxOpenConnections int `mapstructure:"max_open_connections"`
+}
+
+// DefaultInstrumentationConfig returns a default configuration for metrics
+// reporting.
+func DefaultInstrumentationConfig() *InstrumentationConfig {
+	return &InstrumentationConfig{
+		Prometheus:           false,
+		PrometheusListenAddr: ":26660",
+		MaxOpenConnections:   3,
+	}
+}
+
+// TestInstrumentationConfig returns a default configuration for metrics
+// reporting.
+func TestInstrumentationConfig() *InstrumentationConfig {
+	return DefaultInstrumentationConfig()
+}
+
 //-----------------------------------------------------------------------------
 // Utils
 
diff --git a/vendor/github.com/tendermint/tendermint/config/toml.go b/vendor/github.com/tendermint/tendermint/config/toml.go
index 7ed3e971..858d9b31 100644
--- a/vendor/github.com/tendermint/tendermint/config/toml.go
+++ b/vendor/github.com/tendermint/tendermint/config/toml.go
@@ -6,7 +6,7 @@ import (
 	"path/filepath"
 	"text/template"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 var configTemplate *template.Template
@@ -119,15 +119,35 @@ laddr = "{{ .RPC.ListenAddress }}"
 # NOTE: This server only supports /broadcast_tx_commit
 grpc_laddr = "{{ .RPC.GRPCListenAddress }}"
 
+# Maximum number of simultaneous connections.
+# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
+# If you want to accept more significant number than the default, make sure
+# you increase your OS limits.
+# 0 - unlimited.
+grpc_max_open_connections = {{ .RPC.GRPCMaxOpenConnections }}
+
 # Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
 unsafe = {{ .RPC.Unsafe }}
 
+# Maximum number of simultaneous connections (including WebSocket).
+# Does not include gRPC connections. See grpc_max_open_connections
+# If you want to accept more significant number than the default, make sure
+# you increase your OS limits.
+# 0 - unlimited.
+max_open_connections = {{ .RPC.MaxOpenConnections }}
+
 ##### peer to peer configuration options #####
 [p2p]
 
 # Address to listen for incoming connections
 laddr = "{{ .P2P.ListenAddress }}"
 
+# Address to advertise to peers for them to dial
+# If empty, will use the same port as the laddr,
+# and will introspect on the listener or use UPnP
+# to figure out the address.
+external_address = "{{ .P2P.ExternalAddress }}"
+
 # Comma separated list of seed nodes to connect to
 seeds = "{{ .P2P.Seeds }}"
 
@@ -135,6 +155,9 @@ seeds = "{{ .P2P.Seeds }}"
 # Do not add private peers to this list if you don't want them advertised
 persistent_peers = "{{ .P2P.PersistentPeers }}"
 
+# UPNP port forwarding
+upnp = {{ .P2P.UPNP }}
+
 # Path to address book
 addr_book_file = "{{ js .P2P.AddrBook }}"
 
@@ -199,10 +222,6 @@ timeout_commit = {{ .Consensus.TimeoutCommit }}
 # Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
 skip_timeout_commit = {{ .Consensus.SkipTimeoutCommit }}
 
-# BlockSize
-max_block_size_txs = {{ .Consensus.MaxBlockSizeTxs }}
-max_block_size_bytes = {{ .Consensus.MaxBlockSizeBytes }}
-
 # EmptyBlocks mode and possible interval between empty blocks in seconds
 create_empty_blocks = {{ .Consensus.CreateEmptyBlocks }}
 create_empty_blocks_interval = {{ .Consensus.CreateEmptyBlocksInterval }}
@@ -232,6 +251,23 @@ index_tags = "{{ .TxIndex.IndexTags }}"
 # desirable (see the comment above). IndexTags has a precedence over
 # IndexAllTags (i.e. when given both, IndexTags will be indexed).
 index_all_tags = {{ .TxIndex.IndexAllTags }}
+
+##### instrumentation configuration options #####
+[instrumentation]
+
+# When true, Prometheus metrics are served under /metrics on
+# PrometheusListenAddr.
+# Check out the documentation for the list of available metrics.
+prometheus = {{ .Instrumentation.Prometheus }}
+
+# Address to listen for Prometheus collector(s) connections
+prometheus_listen_addr = "{{ .Instrumentation.PrometheusListenAddr }}"
+
+# Maximum number of simultaneous connections.
+# If you want to accept more significant number than the default, make sure
+# you increase your OS limits.
+# 0 - unlimited.
+max_open_connections = {{ .Instrumentation.MaxOpenConnections }}
 `
 
 /****** these are for test settings ***********/
@@ -287,10 +323,10 @@ var testGenesis = `{
   "validators": [
     {
       "pub_key": {
-        "type": "AC26791624DE60",
+        "type": "tendermint/PubKeyEd25519",
         "value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="
       },
-      "power": 10,
+      "power": "10",
       "name": ""
     }
   ],
@@ -298,16 +334,16 @@ var testGenesis = `{
 }`
 
 var testPrivValidator = `{
-  "address": "849CB2C877F87A20925F35D00AE6688342D25B47",
+  "address": "A3258DCBF45DCA0DF052981870F2D1441A36D145",
   "pub_key": {
-    "type": "AC26791624DE60",
+    "type": "tendermint/PubKeyEd25519",
     "value": "AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="
   },
   "priv_key": {
-    "type": "954568A3288910",
+    "type": "tendermint/PrivKeyEd25519",
     "value": "EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="
   },
-  "last_height": 0,
-  "last_round": 0,
+  "last_height": "0",
+  "last_round": "0",
   "last_step": 0
 }`
diff --git a/vendor/github.com/tendermint/tendermint/consensus/README.md b/vendor/github.com/tendermint/tendermint/consensus/README.md
new file mode 100644
index 00000000..1111317d
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/consensus/README.md
@@ -0,0 +1 @@
+See the [consensus spec](https://github.com/tendermint/tendermint/tree/master/docs/spec/consensus) and the [reactor consensus spec](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/consensus) for more information.
diff --git a/vendor/github.com/tendermint/tendermint/consensus/metrics.go b/vendor/github.com/tendermint/tendermint/consensus/metrics.go
new file mode 100644
index 00000000..253880e8
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/consensus/metrics.go
@@ -0,0 +1,133 @@
+package consensus
+
+import (
+	"github.com/go-kit/kit/metrics"
+	"github.com/go-kit/kit/metrics/discard"
+
+	prometheus "github.com/go-kit/kit/metrics/prometheus"
+	stdprometheus "github.com/prometheus/client_golang/prometheus"
+)
+
+// Metrics contains metrics exposed by this package.
+type Metrics struct {
+	// Height of the chain.
+	Height metrics.Gauge
+
+	// Number of rounds.
+	Rounds metrics.Gauge
+
+	// Number of validators.
+	Validators metrics.Gauge
+	// Total power of all validators.
+	ValidatorsPower metrics.Gauge
+	// Number of validators who did not sign.
+	MissingValidators metrics.Gauge
+	// Total power of the missing validators.
+	MissingValidatorsPower metrics.Gauge
+	// Number of validators who tried to double sign.
+	ByzantineValidators metrics.Gauge
+	// Total power of the byzantine validators.
+	ByzantineValidatorsPower metrics.Gauge
+
+	// Time between this and the last block.
+	BlockIntervalSeconds metrics.Histogram
+
+	// Number of transactions.
+	NumTxs metrics.Gauge
+	// Size of the block.
+	BlockSizeBytes metrics.Gauge
+	// Total number of transactions.
+	TotalTxs metrics.Gauge
+}
+
+// PrometheusMetrics returns Metrics build using Prometheus client library.
+func PrometheusMetrics() *Metrics {
+	return &Metrics{
+		Height: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "height",
+			Help:      "Height of the chain.",
+		}, []string{}),
+		Rounds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "rounds",
+			Help:      "Number of rounds.",
+		}, []string{}),
+
+		Validators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "validators",
+			Help:      "Number of validators.",
+		}, []string{}),
+		ValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "validators_power",
+			Help:      "Total power of all validators.",
+		}, []string{}),
+		MissingValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "missing_validators",
+			Help:      "Number of validators who did not sign.",
+		}, []string{}),
+		MissingValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "missing_validators_power",
+			Help:      "Total power of the missing validators.",
+		}, []string{}),
+		ByzantineValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "byzantine_validators",
+			Help:      "Number of validators who tried to double sign.",
+		}, []string{}),
+		ByzantineValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "byzantine_validators_power",
+			Help:      "Total power of the byzantine validators.",
+		}, []string{}),
+
+		BlockIntervalSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
+			Subsystem: "consensus",
+			Name:      "block_interval_seconds",
+			Help:      "Time between this and the last block.",
+			Buckets:   []float64{1, 2.5, 5, 10, 60},
+		}, []string{}),
+
+		NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "num_txs",
+			Help:      "Number of transactions.",
+		}, []string{}),
+		BlockSizeBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "block_size_bytes",
+			Help:      "Size of the block.",
+		}, []string{}),
+		TotalTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "consensus",
+			Name:      "total_txs",
+			Help:      "Total number of transactions.",
+		}, []string{}),
+	}
+}
+
+// NopMetrics returns no-op Metrics.
+func NopMetrics() *Metrics {
+	return &Metrics{
+		Height: discard.NewGauge(),
+
+		Rounds: discard.NewGauge(),
+
+		Validators:               discard.NewGauge(),
+		ValidatorsPower:          discard.NewGauge(),
+		MissingValidators:        discard.NewGauge(),
+		MissingValidatorsPower:   discard.NewGauge(),
+		ByzantineValidators:      discard.NewGauge(),
+		ByzantineValidatorsPower: discard.NewGauge(),
+
+		BlockIntervalSeconds: discard.NewHistogram(),
+
+		NumTxs:         discard.NewGauge(),
+		BlockSizeBytes: discard.NewGauge(),
+		TotalTxs:       discard.NewGauge(),
+	}
+}
diff --git a/vendor/github.com/tendermint/tendermint/consensus/reactor.go b/vendor/github.com/tendermint/tendermint/consensus/reactor.go
index 2034ad34..3eb1d73a 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/reactor.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/reactor.go
@@ -9,11 +9,11 @@ import (
 	"github.com/pkg/errors"
 
 	amino "github.com/tendermint/go-amino"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
 
 	cstypes "github.com/tendermint/tendermint/consensus/types"
+	cmn "github.com/tendermint/tendermint/libs/common"
 	tmevents "github.com/tendermint/tendermint/libs/events"
+	"github.com/tendermint/tendermint/libs/log"
 	"github.com/tendermint/tendermint/p2p"
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/types"
@@ -80,6 +80,9 @@ func (conR *ConsensusReactor) OnStop() {
 	conR.BaseReactor.OnStop()
 	conR.unsubscribeFromBroadcastEvents()
 	conR.conS.Stop()
+	if !conR.FastSync() {
+		conR.conS.Wait()
+	}
 }
 
 // SwitchToConsensus switches from fast_sync mode to consensus mode.
@@ -183,7 +186,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
 		return
 	}
 
-	msg, err := DecodeMessage(msgBytes)
+	msg, err := decodeMsg(msgBytes)
 	if err != nil {
 		conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
 		conR.Switch.StopPeerForError(src, err)
@@ -1306,11 +1309,9 @@ func RegisterConsensusMessages(cdc *amino.Codec) {
 	cdc.RegisterConcrete(&ProposalHeartbeatMessage{}, "tendermint/ProposalHeartbeat", nil)
 }
 
-// DecodeMessage decodes the given bytes into a ConsensusMessage.
-func DecodeMessage(bz []byte) (msg ConsensusMessage, err error) {
+func decodeMsg(bz []byte) (msg ConsensusMessage, err error) {
 	if len(bz) > maxMsgSize {
-		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
-			len(bz), maxMsgSize)
+		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
 	}
 	err = cdc.UnmarshalBinaryBare(bz, &msg)
 	return
diff --git a/vendor/github.com/tendermint/tendermint/consensus/replay.go b/vendor/github.com/tendermint/tendermint/consensus/replay.go
index 13ec9e40..dd940998 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/replay.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/replay.go
@@ -10,11 +10,11 @@ import (
 	//"strings"
 	"time"
 
-	abci "github.com/tendermint/abci/types"
-	//auto "github.com/tendermint/tmlibs/autofile"
-	cmn "github.com/tendermint/tmlibs/common"
-	dbm "github.com/tendermint/tmlibs/db"
-	"github.com/tendermint/tmlibs/log"
+	abci "github.com/tendermint/tendermint/abci/types"
+	//auto "github.com/tendermint/tendermint/libs/autofile"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	dbm "github.com/tendermint/tendermint/libs/db"
+	"github.com/tendermint/tendermint/libs/log"
 
 	"github.com/tendermint/tendermint/proxy"
 	sm "github.com/tendermint/tendermint/state"
@@ -273,7 +273,7 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
 			ChainId:         h.genDoc.ChainID,
 			ConsensusParams: csParams,
 			Validators:      validators,
-			AppStateBytes:   h.genDoc.AppStateJSON,
+			AppStateBytes:   h.genDoc.AppState,
 		}
 		res, err := proxyApp.Consensus().InitChainSync(req)
 		if err != nil {
diff --git a/vendor/github.com/tendermint/tendermint/consensus/replay_file.go b/vendor/github.com/tendermint/tendermint/consensus/replay_file.go
index 57204b01..0c0b0dcb 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/replay_file.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/replay_file.go
@@ -16,9 +16,9 @@ import (
 	"github.com/tendermint/tendermint/proxy"
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
-	dbm "github.com/tendermint/tmlibs/db"
-	"github.com/tendermint/tmlibs/log"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	dbm "github.com/tendermint/tendermint/libs/db"
+	"github.com/tendermint/tendermint/libs/log"
 )
 
 const (
diff --git a/vendor/github.com/tendermint/tendermint/consensus/state.go b/vendor/github.com/tendermint/tendermint/consensus/state.go
index 3834b151..e4b360e0 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/state.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/state.go
@@ -10,8 +10,8 @@ import (
 	"time"
 
 	fail "github.com/ebuchman/fail-test"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 
 	cfg "github.com/tendermint/tendermint/config"
 	cstypes "github.com/tendermint/tendermint/consensus/types"
@@ -115,10 +115,24 @@ type ConsensusState struct {
 	// synchronous pubsub between consensus state and reactor.
 	// state only emits EventNewRoundStep, EventVote and EventProposalHeartbeat
 	evsw tmevents.EventSwitch
+
+	// for reporting metrics
+	metrics *Metrics
 }
 
+// CSOption sets an optional parameter on the ConsensusState.
+type CSOption func(*ConsensusState)
+
 // NewConsensusState returns a new ConsensusState.
-func NewConsensusState(config *cfg.ConsensusConfig, state sm.State, blockExec *sm.BlockExecutor, blockStore sm.BlockStore, mempool sm.Mempool, evpool sm.EvidencePool) *ConsensusState {
+func NewConsensusState(
+	config *cfg.ConsensusConfig,
+	state sm.State,
+	blockExec *sm.BlockExecutor,
+	blockStore sm.BlockStore,
+	mempool sm.Mempool,
+	evpool sm.EvidencePool,
+	options ...CSOption,
+) *ConsensusState {
 	cs := &ConsensusState{
 		config:           config,
 		blockExec:        blockExec,
@@ -132,6 +146,7 @@ func NewConsensusState(config *cfg.ConsensusConfig, state sm.State, blockExec *s
 		wal:              nilWAL{},
 		evpool:           evpool,
 		evsw:             tmevents.NewEventSwitch(),
+		metrics:          NopMetrics(),
 	}
 	// set function defaults (may be overwritten before calling Start)
 	cs.decideProposal = cs.defaultDecideProposal
@@ -143,6 +158,9 @@ func NewConsensusState(config *cfg.ConsensusConfig, state sm.State, blockExec *s
 	// We do that upon Start().
 	cs.reconstructLastCommit(state)
 	cs.BaseService = *cmn.NewBaseService(nil, "ConsensusState", cs)
+	for _, option := range options {
+		option(cs)
+	}
 	return cs
 }
 
@@ -161,6 +179,11 @@ func (cs *ConsensusState) SetEventBus(b *types.EventBus) {
 	cs.blockExec.SetEventBus(b)
 }
 
+// WithMetrics sets the metrics.
+func WithMetrics(metrics *Metrics) CSOption {
+	return func(cs *ConsensusState) { cs.metrics = metrics }
+}
+
 // String returns a string.
 func (cs *ConsensusState) String() string {
 	// better not to access shared variables
@@ -291,16 +314,8 @@ func (cs *ConsensusState) startRoutines(maxSteps int) {
 
 // OnStop implements cmn.Service. It stops all routines and waits for the WAL to finish.
 func (cs *ConsensusState) OnStop() {
-	cs.BaseService.OnStop()
-
 	cs.evsw.Stop()
-
 	cs.timeoutTicker.Stop()
-
-	// Make BaseService.Wait() wait until cs.wal.Wait()
-	if cs.IsRunning() {
-		cs.wal.Wait()
-	}
 }
 
 // Wait waits for the the main routine to return.
@@ -387,6 +402,7 @@ func (cs *ConsensusState) SetProposalAndBlock(proposal *types.Proposal, block *t
 // internal functions for managing the state
 
 func (cs *ConsensusState) updateHeight(height int64) {
+	cs.metrics.Height.Set(float64(height))
 	cs.Height = height
 }
 
@@ -579,6 +595,7 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
 
 			// close wal now that we're done writing to it
 			cs.wal.Stop()
+			cs.wal.Wait()
 
 			close(cs.done)
 			return
@@ -600,7 +617,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
 		err = cs.setProposal(msg.Proposal)
 	case *BlockPartMessage:
 		// if the proposal is complete, we'll enterPrevote or tryFinalizeCommit
-		_, err = cs.addProposalBlockPart(msg.Height, msg.Part)
+		_, err = cs.addProposalBlockPart(msg, peerID)
 		if err != nil && msg.Round != cs.Round {
 			cs.Logger.Debug("Received block part from wrong round", "height", cs.Height, "csRound", cs.Round, "blockRound", msg.Round)
 			err = nil
@@ -722,6 +739,7 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) {
 	cs.Votes.SetRound(round + 1) // also track next round (round+1) to allow round-skipping
 
 	cs.eventBus.PublishEventNewRound(cs.RoundStateEvent())
+	cs.metrics.Rounds.Set(float64(round))
 
 	// Wait for txs to be available in the mempool
 	// before we enterPropose in round 0. If the last block changed the app hash,
@@ -907,7 +925,7 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
 	}
 
 	// Mempool validated transactions
-	txs := cs.mempool.Reap(cs.config.MaxBlockSizeTxs)
+	txs := cs.mempool.Reap(cs.state.ConsensusParams.BlockSize.MaxTxs)
 	block, parts := cs.state.MakeBlock(cs.Height, txs, commit)
 	evidence := cs.evpool.PendingEvidence()
 	block.AddEvidence(evidence)
@@ -1280,6 +1298,9 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
 
 	fail.Fail() // XXX
 
+	// must be called before we update state
+	cs.recordMetrics(height, block)
+
 	// NewHeightStep!
 	cs.updateToState(stateCopy)
 
@@ -1295,6 +1316,44 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
 	// * cs.StartTime is set to when we will start round0.
 }
 
+func (cs *ConsensusState) recordMetrics(height int64, block *types.Block) {
+	cs.metrics.Validators.Set(float64(cs.Validators.Size()))
+	cs.metrics.ValidatorsPower.Set(float64(cs.Validators.TotalVotingPower()))
+	missingValidators := 0
+	missingValidatorsPower := int64(0)
+	for i, val := range cs.Validators.Validators {
+		var vote *types.Vote
+		if i < len(block.LastCommit.Precommits) {
+			vote = block.LastCommit.Precommits[i]
+		}
+		if vote == nil {
+			missingValidators++
+			missingValidatorsPower += val.VotingPower
+		}
+	}
+	cs.metrics.MissingValidators.Set(float64(missingValidators))
+	cs.metrics.MissingValidatorsPower.Set(float64(missingValidatorsPower))
+	cs.metrics.ByzantineValidators.Set(float64(len(block.Evidence.Evidence)))
+	byzantineValidatorsPower := int64(0)
+	for _, ev := range block.Evidence.Evidence {
+		if _, val := cs.Validators.GetByAddress(ev.Address()); val != nil {
+			byzantineValidatorsPower += val.VotingPower
+		}
+	}
+	cs.metrics.ByzantineValidatorsPower.Set(float64(byzantineValidatorsPower))
+
+	if height > 1 {
+		lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1)
+		cs.metrics.BlockIntervalSeconds.Observe(
+			block.Time.Sub(lastBlockMeta.Header.Time).Seconds(),
+		)
+	}
+
+	cs.metrics.NumTxs.Set(float64(block.NumTxs))
+	cs.metrics.BlockSizeBytes.Set(float64(block.Size()))
+	cs.metrics.TotalTxs.Set(float64(block.TotalTxs))
+}
+
 //-----------------------------------------------------------------------------
 
 func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error {
@@ -1333,17 +1392,22 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error {
 
 // NOTE: block is not necessarily valid.
 // Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, once we have the full block.
-func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part) (added bool, err error) {
+func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (added bool, err error) {
+	height, round, part := msg.Height, msg.Round, msg.Part
+
 	// Blocks might be reused, so round mismatch is OK
 	if cs.Height != height {
-		cs.Logger.Debug("Received block part from wrong height", "height", height)
+		cs.Logger.Debug("Received block part from wrong height", "height", height, "round", round)
 		return false, nil
 	}
 
 	// We're not expecting a block part.
 	if cs.ProposalBlockParts == nil {
-		cs.Logger.Info("Received a block part when we're not expecting any", "height", height)
-		return false, nil // TODO: bad peer? Return error?
+		// NOTE: this can happen when we've gone to a higher round and
+		// then receive parts from the previous round - not necessarily a bad peer.
+		cs.Logger.Info("Received a block part when we're not expecting any",
+			"height", height, "round", round, "index", part.Index, "peer", peerID)
+		return false, nil
 	}
 
 	added, err = cs.ProposalBlockParts.AddPart(part)
@@ -1377,7 +1441,7 @@ func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part) (
 			// procedure at this point.
 		}
 
-		if cs.Step == cstypes.RoundStepPropose && cs.isProposalComplete() {
+		if cs.Step <= cstypes.RoundStepPropose && cs.isProposalComplete() {
 			// Move onto the next step
 			cs.enterPrevote(height, cs.Round)
 		} else if cs.Step == cstypes.RoundStepCommit {
diff --git a/vendor/github.com/tendermint/tendermint/consensus/ticker.go b/vendor/github.com/tendermint/tendermint/consensus/ticker.go
index b37b7c49..a1e2174c 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/ticker.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/ticker.go
@@ -3,8 +3,8 @@ package consensus
 import (
 	"time"
 
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 )
 
 var (
diff --git a/vendor/github.com/tendermint/tendermint/consensus/types/height_vote_set.go b/vendor/github.com/tendermint/tendermint/consensus/types/height_vote_set.go
index 3c986794..70a38668 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/types/height_vote_set.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/types/height_vote_set.go
@@ -8,7 +8,7 @@ import (
 
 	"github.com/tendermint/tendermint/p2p"
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 type RoundVoteSet struct {
diff --git a/vendor/github.com/tendermint/tendermint/consensus/types/peer_round_state.go b/vendor/github.com/tendermint/tendermint/consensus/types/peer_round_state.go
index dcb6c8e0..7a5d69b8 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/types/peer_round_state.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/types/peer_round_state.go
@@ -5,7 +5,7 @@ import (
 	"time"
 
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 //-----------------------------------------------------------------------------
diff --git a/vendor/github.com/tendermint/tendermint/consensus/types/round_state.go b/vendor/github.com/tendermint/tendermint/consensus/types/round_state.go
index 14da1f14..cca560cc 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/types/round_state.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/types/round_state.go
@@ -6,7 +6,7 @@ import (
 	"time"
 
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 //-----------------------------------------------------------------------------
diff --git a/vendor/github.com/tendermint/tendermint/consensus/types/wire.go b/vendor/github.com/tendermint/tendermint/consensus/types/wire.go
index bd5c4497..6342d7eb 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/types/wire.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/types/wire.go
@@ -2,7 +2,7 @@ package types
 
 import (
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 var cdc = amino.NewCodec()
diff --git a/vendor/github.com/tendermint/tendermint/consensus/version.go b/vendor/github.com/tendermint/tendermint/consensus/version.go
index 2c137bf7..5c74a16d 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/version.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/version.go
@@ -1,7 +1,7 @@
 package consensus
 
 import (
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // kind of arbitrary
diff --git a/vendor/github.com/tendermint/tendermint/consensus/wal.go b/vendor/github.com/tendermint/tendermint/consensus/wal.go
index 3d9bf8af..8c4c10bc 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/wal.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/wal.go
@@ -12,8 +12,8 @@ import (
 
 	amino "github.com/tendermint/go-amino"
 	"github.com/tendermint/tendermint/types"
-	auto "github.com/tendermint/tmlibs/autofile"
-	cmn "github.com/tendermint/tmlibs/common"
+	auto "github.com/tendermint/tendermint/libs/autofile"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 const (
diff --git a/vendor/github.com/tendermint/tendermint/consensus/wal_generator.go b/vendor/github.com/tendermint/tendermint/consensus/wal_generator.go
index f61af15f..f3a36580 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/wal_generator.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/wal_generator.go
@@ -10,24 +10,24 @@ import (
 	"time"
 
 	"github.com/pkg/errors"
-	"github.com/tendermint/abci/example/kvstore"
+	"github.com/tendermint/tendermint/abci/example/kvstore"
 	bc "github.com/tendermint/tendermint/blockchain"
 	cfg "github.com/tendermint/tendermint/config"
 	"github.com/tendermint/tendermint/privval"
 	"github.com/tendermint/tendermint/proxy"
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/types"
-	auto "github.com/tendermint/tmlibs/autofile"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/db"
-	"github.com/tendermint/tmlibs/log"
+	auto "github.com/tendermint/tendermint/libs/autofile"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/db"
+	"github.com/tendermint/tendermint/libs/log"
 )
 
 // WALWithNBlocks generates a consensus WAL. It does this by spining up a
 // stripped down version of node (proxy app, event bus, consensus state) with a
 // persistent kvstore application and special consensus wal instance
 // (byteBufferWAL) and waits until numBlocks are created. Then it returns a WAL
-// content.
+// content. If the node fails to produce given numBlocks, it returns an error.
 func WALWithNBlocks(numBlocks int) (data []byte, err error) {
 	config := getConfig()
 
@@ -89,15 +89,15 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
 	if err := consensusState.Start(); err != nil {
 		return nil, errors.Wrap(err, "failed to start consensus state")
 	}
-	defer consensusState.Stop()
 
 	select {
 	case <-numBlocksWritten:
+		consensusState.Stop()
 		wr.Flush()
 		return b.Bytes(), nil
 	case <-time.After(1 * time.Minute):
-		wr.Flush()
-		return b.Bytes(), fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks)
+		consensusState.Stop()
+		return []byte{}, fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks)
 	}
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/consensus/wire.go b/vendor/github.com/tendermint/tendermint/consensus/wire.go
index 81223c68..5f231c0c 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/wire.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/wire.go
@@ -2,7 +2,7 @@ package consensus
 
 import (
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 var cdc = amino.NewCodec()
diff --git a/vendor/github.com/tendermint/tendermint/crypto/CHANGELOG.md b/vendor/github.com/tendermint/tendermint/crypto/CHANGELOG.md
new file mode 100644
index 00000000..dd7c1039
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/crypto/CHANGELOG.md
@@ -0,0 +1,154 @@
+# Changelog
+
+## 0.9.0
+
+BREAKING CHANGES
+
+- `priv.PubKey()` no longer returns an error. Any applicable errors (such as when fetching the public key from a hardware wallet) should be checked and returned when constructing the private key.
+
+## 0.8.0
+
+**TBD**
+
+## 0.7.0
+
+**May 30th, 2018**
+
+BREAKING CHANGES
+
+No breaking changes compared to 0.6.2, but making up for the version bump that
+should have happened in 0.6.1.
+
+We also bring in the `tmlibs/merkle` package with breaking changes:
+
+- change the hash function from RIPEMD160 to tmhash (first 20-bytes of SHA256)
+- remove unused funcs and unexport SimpleMap
+
+FEATURES
+
+- [xchacha20poly1305] New authenticated encryption module
+- [merkle] Moved in from tmlibs
+- [merkle/tmhash] New hash function: the first 20-bytes of SHA256
+
+IMPROVEMENTS
+
+- Remove some dead code
+- Use constant-time compare for signatures
+
+BUG FIXES
+
+- Fix MixEntropy weakness
+- Fix PrivKeyEd25519.Generate()
+
+## 0.6.2 (April 9, 2018)
+
+IMPROVEMENTS
+
+- Update for latest go-amino
+
+## 0.6.1 (March 26, 2018)
+
+BREAKING CHANGES
+
+- Encoding uses MarshalBinaryBare rather than MarshalBinary (which auto-length-prefixes) for pub/priv/sig.
+
+## 0.6.0 (March 2, 2018)
+
+BREAKING CHANGES
+
+- Update Amino names from "com.tendermint/..." to "tendermint/"
+
+## 0.5.0 (March 2, 2018)
+
+BREAKING CHANGES
+
+- nano: moved to `_nano` now while we're having build issues
+- bcrypt: moved to `keys/bcrypt`
+- hd: moved to `keys/hd`; `BTC` added to some function names; other function cleanup
+- keys/cryptostore: moved to `keys`, renamed to `keybase`, and completely refactored
+- keys: moved BIP39 related code to `keys/words`
+
+FEATURE
+
+- `Address` is a type alias for `cmn.HexBytes`
+
+BUG FIX
+
+- PrivKey comparisons done in constant time
+
+## 0.4.1 (October 27, 2017)
+
+This release removes support for bcrypt as it was merged too soon without an upgrade plan
+for existing keys.
+
+REVERTS THE FOLLOWING COMMITS:
+
+- Parameterize and lower bcrypt cost - dfc4cdd2d71513e4a9922d679c74f36357c4c862
+- Upgrade keys to use bcrypt with salts (#38)  - 8e7f0e7701f92206679ad093d013b9b162427631
+
+## 0.4.0 (October 27, 2017)
+
+BREAKING CHANGES:
+
+- `keys`: use bcrypt plus salt
+
+FEATURES:
+
+- add support for signing via Ledger Nano
+
+IMPROVEMENTS:
+
+- linting and comments
+
+## 0.3.0 (September 22, 2017)
+
+BREAKING CHANGES:
+
+- Remove `cmd` and `keys/tx` packages altogether: move it to the cosmos-sdk
+- `cryptostore.Generator` takes a secret 
+- Remove `String()` from `Signature` interface
+
+FEATURES:
+
+- `keys`: add CRC16 error correcting code
+
+IMPROVEMENTS:
+
+- Allow no passwords on keys for development convenience
+
+
+## 0.2.1 (June 21, 2017)
+
+- Improve keys command
+  - No password prompts in non-interactive mode (echo 'foobar' | keys new foo)
+  - Added support for seed phrases
+    - Seed phrase now returned on `keys new`
+    - Add `keys restore` to restore private key from key phrase
+    - Checksum to verify typos in the seed phrase (rather than just a useless key)
+  - Add `keys delete` to remove a key if needed
+
+## 0.2.0 (May 18, 2017)
+
+BREAKING CHANGES:
+
+- [hd] The following functions no longer take a `coin string` as argument: `ComputeAddress`, `AddrFromPubKeyBytes`, `ComputeAddressForPrivKey`, `ComputeWIF`, `WIFFromPrivKeyBytes`
+- Changes to `PrivKey`, `PubKey`, and `Signature` (denoted `Xxx` below):
+  - interfaces are renamed `XxxInner`, and are not for use outside the package, though they must be exposed for sake of serialization.
+  - `Xxx` is now a struct that wraps the corresponding `XxxInner` interface
+
+FEATURES:
+
+- `github.com/tendermint/go-keys -> github.com/tendermint/go-crypto/keys` - command and lib for generating and managing encrypted keys
+- [hd] New function `WIFFromPrivKeyBytes(privKeyBytes []byte, compress bool) string`
+- Changes to `PrivKey`, `PubKey`, and `Signature` (denoted `Xxx` below):
+  - Expose a new method `Unwrap() XxxInner` on the `Xxx` struct which returns the corresponding `XxxInner` interface
+  - Expose a new method `Wrap() Xxx` on the `XxxInner` interface which returns the corresponding `Xxx` struct
+
+IMPROVEMENTS:
+
+- Update to use new `tmlibs` repository
+
+## 0.1.0 (April 14, 2017)
+
+Initial release
+
diff --git a/vendor/github.com/tendermint/tendermint/crypto/README.md b/vendor/github.com/tendermint/tendermint/crypto/README.md
new file mode 100644
index 00000000..32afde69
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/crypto/README.md
@@ -0,0 +1,25 @@
+# crypto
+
+crypto is the cryptographic package adapted for Tendermint's uses
+
+## Importing it
+`import "github.com/tendermint/tendermint/crypto"`
+
+## Binary encoding
+
+For Binary encoding, please refer to the [Tendermint encoding spec](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/encoding.md).
+
+## JSON Encoding
+
+crypto `.Bytes()` uses Amino:binary encoding, but Amino:JSON is also supported.
+
+```go
+Example Amino:JSON encodings:
+
+crypto.PrivKeyEd25519     - {"type":"954568A3288910","value":"EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="}
+crypto.SignatureEd25519   - {"type":"6BF5903DA1DB28","value":"77sQNZOrf7ltExpf7AV1WaYPCHbyRLgjBsoWVzcduuLk+jIGmYk+s5R6Emm29p12HeiNAuhUJgdFGmwkpeGJCA=="}
+crypto.PubKeyEd25519      - {"type":"AC26791624DE60","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="}
+crypto.PrivKeySecp256k1   - {"type":"019E82E1B0F798","value":"zx4Pnh67N+g2V+5vZbQzEyRerX9c4ccNZOVzM9RvJ0Y="}
+crypto.SignatureSecp256k1 - {"type":"6D1EA416E1FEE8","value":"MEUCIQCIg5TqS1l7I+MKTrSPIuUN2+4m5tA29dcauqn3NhEJ2wIgICaZ+lgRc5aOTVahU/XoLopXKn8BZcl0bnuYWLvohR8="}
+crypto.PubKeySecp256k1    - {"type":"F8CCEAEB5AE980","value":"A8lPKJXcNl5VHt1FK8a244K9EJuS4WX1hFBnwisi0IJx"}
+```
diff --git a/vendor/github.com/tendermint/go-crypto/amino.go b/vendor/github.com/tendermint/tendermint/crypto/amino.go
similarity index 85%
rename from vendor/github.com/tendermint/go-crypto/amino.go
rename to vendor/github.com/tendermint/tendermint/crypto/amino.go
index 89636895..6a8703fc 100644
--- a/vendor/github.com/tendermint/go-crypto/amino.go
+++ b/vendor/github.com/tendermint/tendermint/crypto/amino.go
@@ -15,6 +15,7 @@ func init() {
 	RegisterAmino(cdc)
 }
 
+// RegisterAmino registers all crypto related types in the given (amino) codec.
 func RegisterAmino(cdc *amino.Codec) {
 	cdc.RegisterInterface((*PubKey)(nil), nil)
 	cdc.RegisterConcrete(PubKeyEd25519{},
@@ -30,7 +31,7 @@ func RegisterAmino(cdc *amino.Codec) {
 
 	cdc.RegisterInterface((*Signature)(nil), nil)
 	cdc.RegisterConcrete(SignatureEd25519{},
-		"tendermint/SignatureKeyEd25519", nil)
+		"tendermint/SignatureEd25519", nil)
 	cdc.RegisterConcrete(SignatureSecp256k1{},
-		"tendermint/SignatureKeySecp256k1", nil)
+		"tendermint/SignatureSecp256k1", nil)
 }
diff --git a/vendor/github.com/tendermint/go-crypto/armor.go b/vendor/github.com/tendermint/tendermint/crypto/armor.go
similarity index 76%
rename from vendor/github.com/tendermint/go-crypto/armor.go
rename to vendor/github.com/tendermint/tendermint/crypto/armor.go
index 5f199df4..4146048a 100644
--- a/vendor/github.com/tendermint/go-crypto/armor.go
+++ b/vendor/github.com/tendermint/tendermint/crypto/armor.go
@@ -2,9 +2,9 @@ package crypto
 
 import (
 	"bytes"
+	"fmt"
 	"io/ioutil"
 
-	. "github.com/tendermint/tmlibs/common"
 	"golang.org/x/crypto/openpgp/armor"
 )
 
@@ -12,15 +12,15 @@ func EncodeArmor(blockType string, headers map[string]string, data []byte) strin
 	buf := new(bytes.Buffer)
 	w, err := armor.Encode(buf, blockType, headers)
 	if err != nil {
-		PanicSanity("Error encoding ascii armor: " + err.Error())
+		panic(fmt.Errorf("could not encode ascii armor: %s", err))
 	}
 	_, err = w.Write(data)
 	if err != nil {
-		PanicSanity("Error encoding ascii armor: " + err.Error())
+		panic(fmt.Errorf("could not encode ascii armor: %s", err))
 	}
 	err = w.Close()
 	if err != nil {
-		PanicSanity("Error encoding ascii armor: " + err.Error())
+		panic(fmt.Errorf("could not encode ascii armor: %s", err))
 	}
 	return buf.String()
 }
diff --git a/vendor/github.com/tendermint/tendermint/crypto/doc.go b/vendor/github.com/tendermint/tendermint/crypto/doc.go
new file mode 100644
index 00000000..544e0df3
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/crypto/doc.go
@@ -0,0 +1,45 @@
+// crypto is a customized/convenience cryptography package for supporting
+// Tendermint.
+
+// It wraps select functionality of equivalent functions in the
+// Go standard library, for easy usage with our libraries.
+
+// Keys:
+
+// All key generation functions return an instance of the PrivKey interface
+// which implements methods
+
+//     AssertIsPrivKeyInner()
+//     Bytes() []byte
+//     Sign(msg []byte) Signature
+//     PubKey() PubKey
+//     Equals(PrivKey) bool
+//     Wrap() PrivKey
+
+// From the above method we can:
+// a) Retrieve the public key if needed
+
+//     pubKey := key.PubKey()
+
+// For example:
+//     privKey, err := crypto.GenPrivKeyEd25519()
+//     if err != nil {
+// 	...
+//     }
+//     pubKey := privKey.PubKey()
+//     ...
+//     // And then you can use the private and public key
+//     doSomething(privKey, pubKey)
+
+// We also provide hashing wrappers around algorithms:
+
+// Sha256
+//     sum := crypto.Sha256([]byte("This is Tendermint"))
+//     fmt.Printf("%x\n", sum)
+
+// Ripemd160
+//     sum := crypto.Ripemd160([]byte("This is consensus"))
+//     fmt.Printf("%x\n", sum)
+package crypto
+
+// TODO: Add more docs in here
diff --git a/vendor/github.com/tendermint/go-crypto/hash.go b/vendor/github.com/tendermint/tendermint/crypto/hash.go
similarity index 100%
rename from vendor/github.com/tendermint/go-crypto/hash.go
rename to vendor/github.com/tendermint/tendermint/crypto/hash.go
diff --git a/vendor/github.com/tendermint/tmlibs/merkle/README.md b/vendor/github.com/tendermint/tendermint/crypto/merkle/README.md
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/merkle/README.md
rename to vendor/github.com/tendermint/tendermint/crypto/merkle/README.md
diff --git a/vendor/github.com/tendermint/tendermint/crypto/merkle/doc.go b/vendor/github.com/tendermint/tendermint/crypto/merkle/doc.go
new file mode 100644
index 00000000..865c3021
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/crypto/merkle/doc.go
@@ -0,0 +1,31 @@
+/*
+Package merkle computes a deterministic minimal height Merkle tree hash.
+If the number of items is not a power of two, some leaves
+will be at different levels. Tries to keep both sides of
+the tree the same size, but the left may be one greater.
+
+Use this for short deterministic trees, such as the validator list.
+For larger datasets, use IAVLTree.
+
+Be aware that the current implementation by itself does not prevent
+second pre-image attacks. Hence, use this library with caution.
+Otherwise you might run into similar issues as, e.g., in early Bitcoin:
+https://bitcointalk.org/?topic=102395
+
+                        *
+                       / \
+                     /     \
+                   /         \
+                 /             \
+                *               *
+               / \             / \
+              /   \           /   \
+             /     \         /     \
+            *       *       *       h6
+           / \     / \     / \
+          h0  h1  h2  h3  h4  h5
+
+TODO(ismail): add 2nd pre-image protection or clarify further on how we use this and why this secure.
+
+*/
+package merkle
diff --git a/vendor/github.com/tendermint/tmlibs/merkle/simple_map.go b/vendor/github.com/tendermint/tendermint/crypto/merkle/simple_map.go
similarity index 53%
rename from vendor/github.com/tendermint/tmlibs/merkle/simple_map.go
rename to vendor/github.com/tendermint/tendermint/crypto/merkle/simple_map.go
index cd38de76..ba4b9309 100644
--- a/vendor/github.com/tendermint/tmlibs/merkle/simple_map.go
+++ b/vendor/github.com/tendermint/tendermint/crypto/merkle/simple_map.go
@@ -1,47 +1,48 @@
 package merkle
 
 import (
-	cmn "github.com/tendermint/tmlibs/common"
-	"golang.org/x/crypto/ripemd160"
+	"github.com/tendermint/tendermint/crypto/tmhash"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
-type SimpleMap struct {
+// Merkle tree from a map.
+// Leaves are `hash(key) | hash(value)`.
+// Leaves are sorted before Merkle hashing.
+type simpleMap struct {
 	kvs    cmn.KVPairs
 	sorted bool
 }
 
-func NewSimpleMap() *SimpleMap {
-	return &SimpleMap{
+func newSimpleMap() *simpleMap {
+	return &simpleMap{
 		kvs:    nil,
 		sorted: false,
 	}
 }
 
-func (sm *SimpleMap) Set(key string, value Hasher) {
+// Set hashes the key and value and appends it to the kv pairs.
+func (sm *simpleMap) Set(key string, value Hasher) {
 	sm.sorted = false
 
-	// Hash the key to blind it... why not?
-	khash := SimpleHashFromBytes([]byte(key))
-
-	// And the value is hashed too, so you can
+	// The value is hashed, so you can
 	// check for equality with a cached value (say)
 	// and make a determination to fetch or not.
 	vhash := value.Hash()
 
 	sm.kvs = append(sm.kvs, cmn.KVPair{
-		Key:   khash,
+		Key:   []byte(key),
 		Value: vhash,
 	})
 }
 
-// Merkle root hash of items sorted by key
+// Hash Merkle root hash of items sorted by key
 // (UNSTABLE: and by value too if duplicate key).
-func (sm *SimpleMap) Hash() []byte {
+func (sm *simpleMap) Hash() []byte {
 	sm.Sort()
 	return hashKVPairs(sm.kvs)
 }
 
-func (sm *SimpleMap) Sort() {
+func (sm *simpleMap) Sort() {
 	if sm.sorted {
 		return
 	}
@@ -50,7 +51,8 @@ func (sm *SimpleMap) Sort() {
 }
 
 // Returns a copy of sorted KVPairs.
-func (sm *SimpleMap) KVPairs() cmn.KVPairs {
+// NOTE these contain the hashed key and value.
+func (sm *simpleMap) KVPairs() cmn.KVPairs {
 	sm.Sort()
 	kvs := make(cmn.KVPairs, len(sm.kvs))
 	copy(kvs, sm.kvs)
@@ -60,10 +62,12 @@ func (sm *SimpleMap) KVPairs() cmn.KVPairs {
 //----------------------------------------
 
 // A local extension to KVPair that can be hashed.
+// Key and value are length prefixed and concatenated,
+// then hashed.
 type KVPair cmn.KVPair
 
 func (kv KVPair) Hash() []byte {
-	hasher := ripemd160.New()
+	hasher := tmhash.New()
 	err := encodeByteSlice(hasher, kv.Key)
 	if err != nil {
 		panic(err)
@@ -76,9 +80,9 @@ func (kv KVPair) Hash() []byte {
 }
 
 func hashKVPairs(kvs cmn.KVPairs) []byte {
-	kvsH := make([]Hasher, 0, len(kvs))
-	for _, kvp := range kvs {
-		kvsH = append(kvsH, KVPair(kvp))
+	kvsH := make([]Hasher, len(kvs))
+	for i, kvp := range kvs {
+		kvsH[i] = KVPair(kvp)
 	}
 	return SimpleHashFromHashers(kvsH)
 }
diff --git a/vendor/github.com/tendermint/tmlibs/merkle/simple_proof.go b/vendor/github.com/tendermint/tendermint/crypto/merkle/simple_proof.go
similarity index 79%
rename from vendor/github.com/tendermint/tmlibs/merkle/simple_proof.go
rename to vendor/github.com/tendermint/tendermint/crypto/merkle/simple_proof.go
index ca6ccf37..2541b6d3 100644
--- a/vendor/github.com/tendermint/tmlibs/merkle/simple_proof.go
+++ b/vendor/github.com/tendermint/tendermint/crypto/merkle/simple_proof.go
@@ -5,10 +5,12 @@ import (
 	"fmt"
 )
 
+// SimpleProof represents a simple merkle proof.
 type SimpleProof struct {
 	Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child.
 }
 
+// SimpleProofsFromHashers computes inclusion proof for given items.
 // proofs[0] is the proof for items[0].
 func SimpleProofsFromHashers(items []Hasher) (rootHash []byte, proofs []*SimpleProof) {
 	trails, rootSPN := trailsFromHashers(items)
@@ -22,8 +24,11 @@ func SimpleProofsFromHashers(items []Hasher) (rootHash []byte, proofs []*SimpleP
 	return
 }
 
-func SimpleProofsFromMap(m map[string]Hasher) (rootHash []byte, proofs []*SimpleProof) {
-	sm := NewSimpleMap()
+// SimpleProofsFromMap generates proofs from a map. The keys/values of the map will be used as the keys/values
+// in the underlying key-value pairs.
+// The keys are sorted before the proofs are computed.
+func SimpleProofsFromMap(m map[string]Hasher) (rootHash []byte, proofs map[string]*SimpleProof, keys []string) {
+	sm := newSimpleMap()
 	for k, v := range m {
 		sm.Set(k, v)
 	}
@@ -33,7 +38,15 @@ func SimpleProofsFromMap(m map[string]Hasher) (rootHash []byte, proofs []*Simple
 	for _, kvp := range kvs {
 		kvsH = append(kvsH, KVPair(kvp))
 	}
-	return SimpleProofsFromHashers(kvsH)
+
+	rootHash, proofList := SimpleProofsFromHashers(kvsH)
+	proofs = make(map[string]*SimpleProof)
+	keys = make([]string, len(proofList))
+	for i, kvp := range kvs {
+		proofs[string(kvp.Key)] = proofList[i]
+		keys[i] = string(kvp.Key)
+	}
+	return
 }
 
 // Verify that leafHash is a leaf hash of the simple-merkle-tree
@@ -43,10 +56,13 @@ func (sp *SimpleProof) Verify(index int, total int, leafHash []byte, rootHash []
 	return computedHash != nil && bytes.Equal(computedHash, rootHash)
 }
 
+// String implements the stringer interface for SimpleProof.
+// It is a wrapper around StringIndented.
 func (sp *SimpleProof) String() string {
 	return sp.StringIndented("")
 }
 
+// StringIndented generates a canonical string representation of a SimpleProof.
 func (sp *SimpleProof) StringIndented(indent string) string {
 	return fmt.Sprintf(`SimpleProof{
 %s  Aunts: %X
@@ -90,7 +106,7 @@ func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][
 	}
 }
 
-// Helper structure to construct merkle proof.
+// SimpleProofNode is a helper structure to construct merkle proof.
 // The node and the tree is thrown away afterwards.
 // Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil.
 // node.Parent.Hash = hash(node.Hash, node.Right.Hash) or
@@ -102,8 +118,8 @@ type SimpleProofNode struct {
 	Right  *SimpleProofNode // Right sibling (only one of Left,Right is set)
 }
 
-// Starting from a leaf SimpleProofNode, FlattenAunts() will return
-// the inner hashes for the item corresponding to the leaf.
+// FlattenAunts will return the inner hashes for the item corresponding to the leaf,
+// starting from a leaf SimpleProofNode.
 func (spn *SimpleProofNode) FlattenAunts() [][]byte {
 	// Nonrecursive impl.
 	innerHashes := [][]byte{}
diff --git a/vendor/github.com/tendermint/tendermint/crypto/merkle/simple_tree.go b/vendor/github.com/tendermint/tendermint/crypto/merkle/simple_tree.go
new file mode 100644
index 00000000..46a07590
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/crypto/merkle/simple_tree.go
@@ -0,0 +1,58 @@
+package merkle
+
+import (
+	"github.com/tendermint/tendermint/crypto/tmhash"
+)
+
+// SimpleHashFromTwoHashes is the basic operation of the Merkle tree: Hash(left | right).
+func SimpleHashFromTwoHashes(left, right []byte) []byte {
+	var hasher = tmhash.New()
+	err := encodeByteSlice(hasher, left)
+	if err != nil {
+		panic(err)
+	}
+	err = encodeByteSlice(hasher, right)
+	if err != nil {
+		panic(err)
+	}
+	return hasher.Sum(nil)
+}
+
+// SimpleHashFromHashers computes a Merkle tree from items that can be hashed.
+func SimpleHashFromHashers(items []Hasher) []byte {
+	hashes := make([][]byte, len(items))
+	for i, item := range items {
+		hash := item.Hash()
+		hashes[i] = hash
+	}
+	return simpleHashFromHashes(hashes)
+}
+
+// SimpleHashFromMap computes a Merkle tree from sorted map.
+// Like calling SimpleHashFromHashers with
+// `item = []byte(Hash(key) | Hash(value))`,
+// sorted by `item`.
+func SimpleHashFromMap(m map[string]Hasher) []byte {
+	sm := newSimpleMap()
+	for k, v := range m {
+		sm.Set(k, v)
+	}
+	return sm.Hash()
+}
+
+//----------------------------------------------------------------
+
+// Expects hashes!
+func simpleHashFromHashes(hashes [][]byte) []byte {
+	// Recursive impl.
+	switch len(hashes) {
+	case 0:
+		return nil
+	case 1:
+		return hashes[0]
+	default:
+		left := simpleHashFromHashes(hashes[:(len(hashes)+1)/2])
+		right := simpleHashFromHashes(hashes[(len(hashes)+1)/2:])
+		return SimpleHashFromTwoHashes(left, right)
+	}
+}
diff --git a/vendor/github.com/tendermint/tmlibs/merkle/types.go b/vendor/github.com/tendermint/tendermint/crypto/merkle/types.go
similarity index 72%
rename from vendor/github.com/tendermint/tmlibs/merkle/types.go
rename to vendor/github.com/tendermint/tendermint/crypto/merkle/types.go
index a0c491a7..2fcb3f39 100644
--- a/vendor/github.com/tendermint/tmlibs/merkle/types.go
+++ b/vendor/github.com/tendermint/tendermint/crypto/merkle/types.go
@@ -1,10 +1,12 @@
 package merkle
 
 import (
-	"encoding/binary"
 	"io"
+
+	amino "github.com/tendermint/go-amino"
 )
 
+// Tree is a Merkle tree interface.
 type Tree interface {
 	Size() (size int)
 	Height() (height int8)
@@ -23,25 +25,14 @@ type Tree interface {
 	IterateRange(start []byte, end []byte, ascending bool, fx func(key []byte, value []byte) (stop bool)) (stopped bool)
 }
 
+// Hasher represents a hashable piece of data which can be hashed in the Tree.
 type Hasher interface {
 	Hash() []byte
 }
 
 //-----------------------------------------------------------------------
-// NOTE: these are duplicated from go-amino so we dont need go-amino as a dep
 
+// Uvarint length prefixed byteslice
 func encodeByteSlice(w io.Writer, bz []byte) (err error) {
-	err = encodeUvarint(w, uint64(len(bz)))
-	if err != nil {
-		return
-	}
-	_, err = w.Write(bz)
-	return
-}
-
-func encodeUvarint(w io.Writer, i uint64) (err error) {
-	var buf [10]byte
-	n := binary.PutUvarint(buf[:], i)
-	_, err = w.Write(buf[0:n])
-	return
+	return amino.EncodeByteSlice(w, bz)
 }
diff --git a/vendor/github.com/tendermint/go-crypto/priv_key.go b/vendor/github.com/tendermint/tendermint/crypto/priv_key.go
similarity index 84%
rename from vendor/github.com/tendermint/go-crypto/priv_key.go
rename to vendor/github.com/tendermint/tendermint/crypto/priv_key.go
index 61d373f6..dbfe64c3 100644
--- a/vendor/github.com/tendermint/go-crypto/priv_key.go
+++ b/vendor/github.com/tendermint/tendermint/crypto/priv_key.go
@@ -6,7 +6,6 @@ import (
 	secp256k1 "github.com/btcsuite/btcd/btcec"
 	"github.com/tendermint/ed25519"
 	"github.com/tendermint/ed25519/extra25519"
-	. "github.com/tendermint/tmlibs/common"
 )
 
 func PrivKeyFromBytes(privKeyBytes []byte) (privKey PrivKey, err error) {
@@ -18,7 +17,7 @@ func PrivKeyFromBytes(privKeyBytes []byte) (privKey PrivKey, err error) {
 
 type PrivKey interface {
 	Bytes() []byte
-	Sign(msg []byte) Signature
+	Sign(msg []byte) (Signature, error)
 	PubKey() PubKey
 	Equals(PrivKey) bool
 }
@@ -31,17 +30,13 @@ var _ PrivKey = PrivKeyEd25519{}
 type PrivKeyEd25519 [64]byte
 
 func (privKey PrivKeyEd25519) Bytes() []byte {
-	bz, err := cdc.MarshalBinaryBare(privKey)
-	if err != nil {
-		panic(err)
-	}
-	return bz
+	return cdc.MustMarshalBinaryBare(privKey)
 }
 
-func (privKey PrivKeyEd25519) Sign(msg []byte) Signature {
+func (privKey PrivKeyEd25519) Sign(msg []byte) (Signature, error) {
 	privKeyBytes := [64]byte(privKey)
 	signatureBytes := ed25519.Sign(&privKeyBytes, msg)
-	return SignatureEd25519(*signatureBytes)
+	return SignatureEd25519(*signatureBytes), nil
 }
 
 func (privKey PrivKeyEd25519) PubKey() PubKey {
@@ -67,12 +62,6 @@ func (privKey PrivKeyEd25519) ToCurve25519() *[32]byte {
 	return keyCurve25519
 }
 
-/*
-func (privKey PrivKeyEd25519) String() string {
-	return Fmt("PrivKeyEd25519{*****}")
-}
-*/
-
 // Deterministically generates new priv-key bytes from key.
 func (privKey PrivKeyEd25519) Generate(index int) PrivKeyEd25519 {
 	bz, err := cdc.MarshalBinaryBare(struct {
@@ -83,9 +72,10 @@ func (privKey PrivKeyEd25519) Generate(index int) PrivKeyEd25519 {
 		panic(err)
 	}
 	newBytes := Sha256(bz)
-	var newKey [64]byte
-	copy(newKey[:], newBytes)
-	return PrivKeyEd25519(newKey)
+	newKey := new([64]byte)
+	copy(newKey[:32], newBytes)
+	ed25519.MakePublicKey(newKey)
+	return PrivKeyEd25519(*newKey)
 }
 
 func GenPrivKeyEd25519() PrivKeyEd25519 {
@@ -113,20 +103,16 @@ var _ PrivKey = PrivKeySecp256k1{}
 type PrivKeySecp256k1 [32]byte
 
 func (privKey PrivKeySecp256k1) Bytes() []byte {
-	bz, err := cdc.MarshalBinaryBare(privKey)
-	if err != nil {
-		panic(err)
-	}
-	return bz
+	return cdc.MustMarshalBinaryBare(privKey)
 }
 
-func (privKey PrivKeySecp256k1) Sign(msg []byte) Signature {
+func (privKey PrivKeySecp256k1) Sign(msg []byte) (Signature, error) {
 	priv__, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey[:])
 	sig__, err := priv__.Sign(Sha256(msg))
 	if err != nil {
-		PanicSanity(err)
+		return nil, err
 	}
-	return SignatureSecp256k1(sig__.Serialize())
+	return SignatureSecp256k1(sig__.Serialize()), nil
 }
 
 func (privKey PrivKeySecp256k1) PubKey() PubKey {
@@ -146,12 +132,6 @@ func (privKey PrivKeySecp256k1) Equals(other PrivKey) bool {
 	}
 }
 
-/*
-func (privKey PrivKeySecp256k1) String() string {
-	return Fmt("PrivKeySecp256k1{*****}")
-}
-*/
-
 /*
 // Deterministically generates new priv-key bytes from key.
 func (key PrivKeySecp256k1) Generate(index int) PrivKeySecp256k1 {
diff --git a/vendor/github.com/tendermint/go-crypto/pub_key.go b/vendor/github.com/tendermint/tendermint/crypto/pub_key.go
similarity index 83%
rename from vendor/github.com/tendermint/go-crypto/pub_key.go
rename to vendor/github.com/tendermint/tendermint/crypto/pub_key.go
index 9be64acd..588c5411 100644
--- a/vendor/github.com/tendermint/go-crypto/pub_key.go
+++ b/vendor/github.com/tendermint/tendermint/crypto/pub_key.go
@@ -5,11 +5,16 @@ import (
 	"crypto/sha256"
 	"fmt"
 
+	"golang.org/x/crypto/ripemd160"
+
 	secp256k1 "github.com/btcsuite/btcd/btcec"
+
 	"github.com/tendermint/ed25519"
 	"github.com/tendermint/ed25519/extra25519"
-	cmn "github.com/tendermint/tmlibs/common"
-	"golang.org/x/crypto/ripemd160"
+
+	cmn "github.com/tendermint/tendermint/libs/common"
+
+	"github.com/tendermint/tendermint/crypto/tmhash"
 )
 
 // An address is a []byte, but hex-encoded even in JSON.
@@ -35,14 +40,14 @@ type PubKey interface {
 
 var _ PubKey = PubKeyEd25519{}
 
+const PubKeyEd25519Size = 32
+
 // Implements PubKeyInner
-type PubKeyEd25519 [32]byte
+type PubKeyEd25519 [PubKeyEd25519Size]byte
 
+// Address is the SHA256-20 of the raw pubkey bytes.
 func (pubKey PubKeyEd25519) Address() Address {
-	// append type byte
-	hasher := ripemd160.New()
-	hasher.Write(pubKey.Bytes()) // does not error
-	return Address(hasher.Sum(nil))
+	return Address(tmhash.Sum(pubKey[:]))
 }
 
 func (pubKey PubKeyEd25519) Bytes() []byte {
@@ -59,15 +64,15 @@ func (pubKey PubKeyEd25519) VerifyBytes(msg []byte, sig_ Signature) bool {
 	if !ok {
 		return false
 	}
-	pubKeyBytes := [32]byte(pubKey)
-	sigBytes := [64]byte(sig)
+	pubKeyBytes := [PubKeyEd25519Size]byte(pubKey)
+	sigBytes := [SignatureEd25519Size]byte(sig)
 	return ed25519.Verify(&pubKeyBytes, msg, &sigBytes)
 }
 
 // For use with golang/crypto/nacl/box
 // If error, returns nil.
-func (pubKey PubKeyEd25519) ToCurve25519() *[32]byte {
-	keyCurve25519, pubKeyBytes := new([32]byte), [32]byte(pubKey)
+func (pubKey PubKeyEd25519) ToCurve25519() *[PubKeyEd25519Size]byte {
+	keyCurve25519, pubKeyBytes := new([PubKeyEd25519Size]byte), [PubKeyEd25519Size]byte(pubKey)
 	ok := extra25519.PublicKeyToCurve25519(keyCurve25519, &pubKeyBytes)
 	if !ok {
 		return nil
@@ -91,10 +96,12 @@ func (pubKey PubKeyEd25519) Equals(other PubKey) bool {
 
 var _ PubKey = PubKeySecp256k1{}
 
+const PubKeySecp256k1Size = 33
+
 // Implements PubKey.
 // Compressed pubkey (just the x-cord),
 // prefixed with 0x02 or 0x03, depending on the y-cord.
-type PubKeySecp256k1 [33]byte
+type PubKeySecp256k1 [PubKeySecp256k1Size]byte
 
 // Implements Bitcoin style addresses: RIPEMD160(SHA256(pubkey))
 func (pubKey PubKeySecp256k1) Address() Address {
diff --git a/vendor/github.com/tendermint/go-crypto/random.go b/vendor/github.com/tendermint/tendermint/crypto/random.go
similarity index 89%
rename from vendor/github.com/tendermint/go-crypto/random.go
rename to vendor/github.com/tendermint/tendermint/crypto/random.go
index 46754219..5c5057d3 100644
--- a/vendor/github.com/tendermint/go-crypto/random.go
+++ b/vendor/github.com/tendermint/tendermint/crypto/random.go
@@ -4,11 +4,12 @@ import (
 	"crypto/aes"
 	"crypto/cipher"
 	crand "crypto/rand"
+	"crypto/sha256"
 	"encoding/hex"
 	"io"
 	"sync"
 
-	. "github.com/tendermint/tmlibs/common"
+	. "github.com/tendermint/tendermint/libs/common"
 )
 
 var gRandInfo *randInfo
@@ -72,8 +73,12 @@ type randInfo struct {
 func (ri *randInfo) MixEntropy(seedBytes []byte) {
 	ri.mtx.Lock()
 	defer ri.mtx.Unlock()
-	// Make new ri.seedBytes
-	hashBytes := Sha256(seedBytes)
+	// Make new ri.seedBytes using passed seedBytes and current ri.seedBytes:
+	// ri.seedBytes = sha256( seedBytes || ri.seedBytes )
+	h := sha256.New()
+	h.Write(seedBytes)
+	h.Write(ri.seedBytes[:])
+	hashBytes := h.Sum(nil)
 	hashBytes32 := [32]byte{}
 	copy(hashBytes32[:], hashBytes)
 	ri.seedBytes = xorBytes32(ri.seedBytes, hashBytes32)
diff --git a/vendor/github.com/tendermint/go-crypto/signature.go b/vendor/github.com/tendermint/tendermint/crypto/signature.go
similarity index 79%
rename from vendor/github.com/tendermint/go-crypto/signature.go
rename to vendor/github.com/tendermint/tendermint/crypto/signature.go
index cfe92713..ae447da6 100644
--- a/vendor/github.com/tendermint/go-crypto/signature.go
+++ b/vendor/github.com/tendermint/tendermint/crypto/signature.go
@@ -1,10 +1,11 @@
 package crypto
 
 import (
-	"bytes"
 	"fmt"
 
-	. "github.com/tendermint/tmlibs/common"
+	"crypto/subtle"
+
+	. "github.com/tendermint/tendermint/libs/common"
 )
 
 func SignatureFromBytes(pubKeyBytes []byte) (pubKey Signature, err error) {
@@ -24,8 +25,10 @@ type Signature interface {
 
 var _ Signature = SignatureEd25519{}
 
+const SignatureEd25519Size = 64
+
 // Implements Signature
-type SignatureEd25519 [64]byte
+type SignatureEd25519 [SignatureEd25519Size]byte
 
 func (sig SignatureEd25519) Bytes() []byte {
 	bz, err := cdc.MarshalBinaryBare(sig)
@@ -41,7 +44,7 @@ func (sig SignatureEd25519) String() string { return fmt.Sprintf("/%X.../", Fing
 
 func (sig SignatureEd25519) Equals(other Signature) bool {
 	if otherEd, ok := other.(SignatureEd25519); ok {
-		return bytes.Equal(sig[:], otherEd[:])
+		return subtle.ConstantTimeCompare(sig[:], otherEd[:]) == 1
 	} else {
 		return false
 	}
@@ -74,8 +77,14 @@ func (sig SignatureSecp256k1) String() string { return fmt.Sprintf("/%X.../", Fi
 
 func (sig SignatureSecp256k1) Equals(other Signature) bool {
 	if otherSecp, ok := other.(SignatureSecp256k1); ok {
-		return bytes.Equal(sig[:], otherSecp[:])
+		return subtle.ConstantTimeCompare(sig[:], otherSecp[:]) == 1
 	} else {
 		return false
 	}
 }
+
+func SignatureSecp256k1FromBytes(data []byte) Signature {
+	sig := make(SignatureSecp256k1, len(data))
+	copy(sig[:], data)
+	return sig
+}
diff --git a/vendor/github.com/tendermint/go-crypto/symmetric.go b/vendor/github.com/tendermint/tendermint/crypto/symmetric.go
similarity index 97%
rename from vendor/github.com/tendermint/go-crypto/symmetric.go
rename to vendor/github.com/tendermint/tendermint/crypto/symmetric.go
index d4ac9b55..62379c15 100644
--- a/vendor/github.com/tendermint/go-crypto/symmetric.go
+++ b/vendor/github.com/tendermint/tendermint/crypto/symmetric.go
@@ -3,7 +3,7 @@ package crypto
 import (
 	"errors"
 
-	. "github.com/tendermint/tmlibs/common"
+	. "github.com/tendermint/tendermint/libs/common"
 	"golang.org/x/crypto/nacl/secretbox"
 )
 
diff --git a/vendor/github.com/tendermint/tendermint/crypto/tmhash/hash.go b/vendor/github.com/tendermint/tendermint/crypto/tmhash/hash.go
new file mode 100644
index 00000000..1b29d868
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/crypto/tmhash/hash.go
@@ -0,0 +1,48 @@
+package tmhash
+
+import (
+	"crypto/sha256"
+	"hash"
+)
+
+const (
+	Size      = 20
+	BlockSize = sha256.BlockSize
+)
+
+type sha256trunc struct {
+	sha256 hash.Hash
+}
+
+func (h sha256trunc) Write(p []byte) (n int, err error) {
+	return h.sha256.Write(p)
+}
+func (h sha256trunc) Sum(b []byte) []byte {
+	shasum := h.sha256.Sum(b)
+	return shasum[:Size]
+}
+
+func (h sha256trunc) Reset() {
+	h.sha256.Reset()
+}
+
+func (h sha256trunc) Size() int {
+	return Size
+}
+
+func (h sha256trunc) BlockSize() int {
+	return h.sha256.BlockSize()
+}
+
+// New returns a new hash.Hash.
+func New() hash.Hash {
+	return sha256trunc{
+		sha256: sha256.New(),
+	}
+}
+
+// Sum returns the first 20 bytes of SHA256 of the bz.
+func Sum(bz []byte) []byte {
+	hash := sha256.Sum256(bz)
+	return hash[:Size]
+}
diff --git a/vendor/github.com/tendermint/tendermint/crypto/version.go b/vendor/github.com/tendermint/tendermint/crypto/version.go
new file mode 100644
index 00000000..77c0bed8
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/crypto/version.go
@@ -0,0 +1,3 @@
+package crypto
+
+const Version = "0.9.0-dev"
diff --git a/vendor/github.com/tendermint/tendermint/evidence/pool.go b/vendor/github.com/tendermint/tendermint/evidence/pool.go
index 4bad355f..247629b6 100644
--- a/vendor/github.com/tendermint/tendermint/evidence/pool.go
+++ b/vendor/github.com/tendermint/tendermint/evidence/pool.go
@@ -4,9 +4,9 @@ import (
 	"fmt"
 	"sync"
 
-	clist "github.com/tendermint/tmlibs/clist"
-	dbm "github.com/tendermint/tmlibs/db"
-	"github.com/tendermint/tmlibs/log"
+	clist "github.com/tendermint/tendermint/libs/clist"
+	dbm "github.com/tendermint/tendermint/libs/db"
+	"github.com/tendermint/tendermint/libs/log"
 
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/types"
diff --git a/vendor/github.com/tendermint/tendermint/evidence/reactor.go b/vendor/github.com/tendermint/tendermint/evidence/reactor.go
index 5159572e..bf11ac10 100644
--- a/vendor/github.com/tendermint/tendermint/evidence/reactor.go
+++ b/vendor/github.com/tendermint/tendermint/evidence/reactor.go
@@ -5,10 +5,10 @@ import (
 	"reflect"
 	"time"
 
-	"github.com/tendermint/go-amino"
-	clist "github.com/tendermint/tmlibs/clist"
-	"github.com/tendermint/tmlibs/log"
+	amino "github.com/tendermint/go-amino"
 
+	clist "github.com/tendermint/tendermint/libs/clist"
+	"github.com/tendermint/tendermint/libs/log"
 	"github.com/tendermint/tendermint/p2p"
 	"github.com/tendermint/tendermint/types"
 )
@@ -73,7 +73,7 @@ func (evR *EvidenceReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
 // Receive implements Reactor.
 // It adds any received evidence to the evpool.
 func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
-	msg, err := DecodeMessage(msgBytes)
+	msg, err := decodeMsg(msgBytes)
 	if err != nil {
 		evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
 		evR.Switch.StopPeerForError(src, err)
@@ -204,11 +204,9 @@ func RegisterEvidenceMessages(cdc *amino.Codec) {
 		"tendermint/evidence/EvidenceListMessage", nil)
 }
 
-// DecodeMessage decodes a byte-array into a EvidenceMessage.
-func DecodeMessage(bz []byte) (msg EvidenceMessage, err error) {
+func decodeMsg(bz []byte) (msg EvidenceMessage, err error) {
 	if len(bz) > maxMsgSize {
-		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
-			len(bz), maxMsgSize)
+		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
 	}
 	err = cdc.UnmarshalBinaryBare(bz, &msg)
 	return
diff --git a/vendor/github.com/tendermint/tendermint/evidence/store.go b/vendor/github.com/tendermint/tendermint/evidence/store.go
index 6af5d75d..20b37bdb 100644
--- a/vendor/github.com/tendermint/tendermint/evidence/store.go
+++ b/vendor/github.com/tendermint/tendermint/evidence/store.go
@@ -4,7 +4,7 @@ import (
 	"fmt"
 
 	"github.com/tendermint/tendermint/types"
-	dbm "github.com/tendermint/tmlibs/db"
+	dbm "github.com/tendermint/tendermint/libs/db"
 )
 
 /*
diff --git a/vendor/github.com/tendermint/tendermint/evidence/wire.go b/vendor/github.com/tendermint/tendermint/evidence/wire.go
index 842e0707..fb3a177c 100644
--- a/vendor/github.com/tendermint/tendermint/evidence/wire.go
+++ b/vendor/github.com/tendermint/tendermint/evidence/wire.go
@@ -2,7 +2,7 @@ package evidence
 
 import (
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 	"github.com/tendermint/tendermint/types"
 )
 
diff --git a/vendor/github.com/tendermint/tmlibs/autofile/README.md b/vendor/github.com/tendermint/tendermint/libs/autofile/README.md
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/autofile/README.md
rename to vendor/github.com/tendermint/tendermint/libs/autofile/README.md
diff --git a/vendor/github.com/tendermint/tmlibs/autofile/autofile.go b/vendor/github.com/tendermint/tendermint/libs/autofile/autofile.go
similarity index 97%
rename from vendor/github.com/tendermint/tmlibs/autofile/autofile.go
rename to vendor/github.com/tendermint/tendermint/libs/autofile/autofile.go
index 790be522..313da678 100644
--- a/vendor/github.com/tendermint/tmlibs/autofile/autofile.go
+++ b/vendor/github.com/tendermint/tendermint/libs/autofile/autofile.go
@@ -5,7 +5,7 @@ import (
 	"sync"
 	"time"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 /* AutoFile usage
diff --git a/vendor/github.com/tendermint/tmlibs/autofile/group.go b/vendor/github.com/tendermint/tendermint/libs/autofile/group.go
similarity index 99%
rename from vendor/github.com/tendermint/tmlibs/autofile/group.go
rename to vendor/github.com/tendermint/tendermint/libs/autofile/group.go
index 1ae54503..b4368ed9 100644
--- a/vendor/github.com/tendermint/tmlibs/autofile/group.go
+++ b/vendor/github.com/tendermint/tendermint/libs/autofile/group.go
@@ -15,7 +15,7 @@ import (
 	"sync"
 	"time"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 const (
diff --git a/vendor/github.com/tendermint/tmlibs/autofile/sighup_watcher.go b/vendor/github.com/tendermint/tendermint/libs/autofile/sighup_watcher.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/autofile/sighup_watcher.go
rename to vendor/github.com/tendermint/tendermint/libs/autofile/sighup_watcher.go
diff --git a/vendor/github.com/tendermint/tmlibs/clist/clist.go b/vendor/github.com/tendermint/tendermint/libs/clist/clist.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/clist/clist.go
rename to vendor/github.com/tendermint/tendermint/libs/clist/clist.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/LICENSE b/vendor/github.com/tendermint/tendermint/libs/common/LICENSE
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/LICENSE
rename to vendor/github.com/tendermint/tendermint/libs/common/LICENSE
diff --git a/vendor/github.com/tendermint/tmlibs/common/async.go b/vendor/github.com/tendermint/tendermint/libs/common/async.go
similarity index 95%
rename from vendor/github.com/tendermint/tmlibs/common/async.go
rename to vendor/github.com/tendermint/tendermint/libs/common/async.go
index 7be09a3c..e3293ab4 100644
--- a/vendor/github.com/tendermint/tmlibs/common/async.go
+++ b/vendor/github.com/tendermint/tendermint/libs/common/async.go
@@ -76,17 +76,15 @@ func (trs *TaskResultSet) Reap() *TaskResultSet {
 func (trs *TaskResultSet) Wait() *TaskResultSet {
 	for i := 0; i < len(trs.results); i++ {
 		var trch = trs.chz[i]
-		select {
-		case result, ok := <-trch:
-			if ok {
-				// Write result.
-				trs.results[i] = taskResultOK{
-					TaskResult: result,
-					OK:         true,
-				}
-			} else {
-				// We already wrote it.
+		result, ok := <-trch
+		if ok {
+			// Write result.
+			trs.results[i] = taskResultOK{
+				TaskResult: result,
+				OK:         true,
 			}
+		} else {
+			// We already wrote it.
 		}
 	}
 	return trs
diff --git a/vendor/github.com/tendermint/tmlibs/common/bit_array.go b/vendor/github.com/tendermint/tendermint/libs/common/bit_array.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/bit_array.go
rename to vendor/github.com/tendermint/tendermint/libs/common/bit_array.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/bytes.go b/vendor/github.com/tendermint/tendermint/libs/common/bytes.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/bytes.go
rename to vendor/github.com/tendermint/tendermint/libs/common/bytes.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/byteslice.go b/vendor/github.com/tendermint/tendermint/libs/common/byteslice.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/byteslice.go
rename to vendor/github.com/tendermint/tendermint/libs/common/byteslice.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/cmap.go b/vendor/github.com/tendermint/tendermint/libs/common/cmap.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/cmap.go
rename to vendor/github.com/tendermint/tendermint/libs/common/cmap.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/colors.go b/vendor/github.com/tendermint/tendermint/libs/common/colors.go
similarity index 87%
rename from vendor/github.com/tendermint/tmlibs/common/colors.go
rename to vendor/github.com/tendermint/tendermint/libs/common/colors.go
index 85e59224..049ce7a5 100644
--- a/vendor/github.com/tendermint/tmlibs/common/colors.go
+++ b/vendor/github.com/tendermint/tendermint/libs/common/colors.go
@@ -81,3 +81,15 @@ func Cyan(args ...interface{}) string {
 func White(args ...interface{}) string {
 	return treatAll(ANSIFgWhite, args...)
 }
+
+func ColoredBytes(data []byte, textColor, bytesColor func(...interface{}) string) string {
+	s := ""
+	for _, b := range data {
+		if 0x21 <= b && b < 0x7F {
+			s += textColor(string(b))
+		} else {
+			s += bytesColor(Fmt("%02X", b))
+		}
+	}
+	return s
+}
diff --git a/vendor/github.com/tendermint/tmlibs/common/date.go b/vendor/github.com/tendermint/tendermint/libs/common/date.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/date.go
rename to vendor/github.com/tendermint/tendermint/libs/common/date.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/errors.go b/vendor/github.com/tendermint/tendermint/libs/common/errors.go
similarity index 53%
rename from vendor/github.com/tendermint/tmlibs/common/errors.go
rename to vendor/github.com/tendermint/tendermint/libs/common/errors.go
index 5992b234..5c31b896 100644
--- a/vendor/github.com/tendermint/tmlibs/common/errors.go
+++ b/vendor/github.com/tendermint/tendermint/libs/common/errors.go
@@ -6,106 +6,81 @@ import (
 )
 
 //----------------------------------------
-// Convenience methods
+// Convenience method.
 
-// ErrorWrap will just call .TraceFrom(), or create a new *cmnError.
 func ErrorWrap(cause interface{}, format string, args ...interface{}) Error {
-	msg := Fmt(format, args...)
 	if causeCmnError, ok := cause.(*cmnError); ok {
-		return causeCmnError.TraceFrom(1, msg)
+		msg := Fmt(format, args...)
+		return causeCmnError.Stacktrace().Trace(1, msg)
+	} else if cause == nil {
+		return newCmnError(FmtError{format, args}).Stacktrace()
+	} else {
+		// NOTE: causeCmnError is a typed nil here.
+		msg := Fmt(format, args...)
+		return newCmnError(cause).Stacktrace().Trace(1, msg)
 	}
-	// NOTE: cause may be nil.
-	// NOTE: do not use causeCmnError here, not the same as nil.
-	return newError(msg, cause, cause).Stacktrace()
 }
 
 //----------------------------------------
 // Error & cmnError
 
 /*
-Usage:
+
+Usage with arbitrary error data:
 
 ```go
 	// Error construction
-	var someT = errors.New("Some err type")
-	var err1 error = NewErrorWithT(someT, "my message")
+	type MyError struct{}
+	var err1 error = NewErrorWithData(MyError{}, "my message")
 	...
 	// Wrapping
 	var err2 error  = ErrorWrap(err1, "another message")
 	if (err1 != err2) { panic("should be the same")
 	...
 	// Error handling
-	switch err2.T() {
-		case someT: ...
+	switch err2.Data().(type){
+		case MyError: ...
 	    default: ...
 	}
 ```
-
 */
 type Error interface {
 	Error() string
-	Message() string
 	Stacktrace() Error
-	Trace(format string, args ...interface{}) Error
-	TraceFrom(offset int, format string, args ...interface{}) Error
-	Cause() interface{}
-	WithT(t interface{}) Error
-	T() interface{}
-	Format(s fmt.State, verb rune)
+	Trace(offset int, format string, args ...interface{}) Error
+	Data() interface{}
 }
 
-// New Error with no cause where the type is the format string of the message..
+// New Error with formatted message.
+// The Error's Data will be a FmtError type.
 func NewError(format string, args ...interface{}) Error {
-	msg := Fmt(format, args...)
-	return newError(msg, nil, format)
-
+	err := FmtError{format, args}
+	return newCmnError(err)
 }
 
-// New Error with specified type and message.
-func NewErrorWithT(t interface{}, format string, args ...interface{}) Error {
-	msg := Fmt(format, args...)
-	return newError(msg, nil, t)
-}
-
-// NOTE: The name of a function "NewErrorWithCause()" implies that you are
-// creating a new Error, yet, if the cause is an Error, creating a new Error to
-// hold a ref to the old Error is probably *not* what you want to do.
-// So, use ErrorWrap(cause, format, a...) instead, which returns the same error
-// if cause is an Error.
-// IF you must set an Error as the cause of an Error,
-// then you can use the WithCauser interface to do so manually.
-// e.g. (error).(tmlibs.WithCauser).WithCause(causeError)
-
-type WithCauser interface {
-	WithCause(cause interface{}) Error
+// New Error with specified data.
+func NewErrorWithData(data interface{}) Error {
+	return newCmnError(data)
 }
 
 type cmnError struct {
-	msg        string         // first msg which also appears in msg
-	cause      interface{}    // underlying cause (or panic object)
-	t          interface{}    // for switching on error
+	data       interface{}    // associated data
 	msgtraces  []msgtraceItem // all messages traced
 	stacktrace []uintptr      // first stack trace
 }
 
-var _ WithCauser = &cmnError{}
 var _ Error = &cmnError{}
 
 // NOTE: do not expose.
-func newError(msg string, cause interface{}, t interface{}) *cmnError {
+func newCmnError(data interface{}) *cmnError {
 	return &cmnError{
-		msg:        msg,
-		cause:      cause,
-		t:          t,
+		data:       data,
 		msgtraces:  nil,
 		stacktrace: nil,
 	}
 }
 
-func (err *cmnError) Message() string {
-	return err.msg
-}
-
+// Implements error.
 func (err *cmnError) Error() string {
 	return fmt.Sprintf("%v", err)
 }
@@ -121,42 +96,17 @@ func (err *cmnError) Stacktrace() Error {
 }
 
 // Add tracing information with msg.
-func (err *cmnError) Trace(format string, args ...interface{}) Error {
-	msg := Fmt(format, args...)
-	return err.doTrace(msg, 0)
-}
-
-// Same as Trace, but traces the line `offset` calls out.
-// If n == 0, the behavior is identical to Trace().
-func (err *cmnError) TraceFrom(offset int, format string, args ...interface{}) Error {
+// Set n=0 unless wrapped with some function, then n > 0.
+func (err *cmnError) Trace(offset int, format string, args ...interface{}) Error {
 	msg := Fmt(format, args...)
 	return err.doTrace(msg, offset)
 }
 
-// Return last known cause.
-// NOTE: The meaning of "cause" is left for the caller to define.
-// There exists no "canonical" definition of "cause".
-// Instead of blaming, try to handle it, or organize it.
-func (err *cmnError) Cause() interface{} {
-	return err.cause
-}
-
-// Overwrites the Error's cause.
-func (err *cmnError) WithCause(cause interface{}) Error {
-	err.cause = cause
-	return err
-}
-
-// Overwrites the Error's type.
-func (err *cmnError) WithT(t interface{}) Error {
-	err.t = t
-	return err
-}
-
-// Return the "type" of this message, primarily for switching
-// to handle this Error.
-func (err *cmnError) T() interface{} {
-	return err.t
+// Return the "data" of this error.
+// Data could be used for error handling/switching,
+// or for holding general error/debug information.
+func (err *cmnError) Data() interface{} {
+	return err.data
 }
 
 func (err *cmnError) doTrace(msg string, n int) Error {
@@ -177,12 +127,8 @@ func (err *cmnError) Format(s fmt.State, verb rune) {
 	default:
 		if s.Flag('#') {
 			s.Write([]byte("--= Error =--\n"))
-			// Write msg.
-			s.Write([]byte(fmt.Sprintf("Message: %s\n", err.msg)))
-			// Write cause.
-			s.Write([]byte(fmt.Sprintf("Cause: %#v\n", err.cause)))
-			// Write type.
-			s.Write([]byte(fmt.Sprintf("T: %#v\n", err.t)))
+			// Write data.
+			s.Write([]byte(fmt.Sprintf("Data: %#v\n", err.data)))
 			// Write msg trace items.
 			s.Write([]byte(fmt.Sprintf("Msg Traces:\n")))
 			for i, msgtrace := range err.msgtraces {
@@ -200,11 +146,7 @@ func (err *cmnError) Format(s fmt.State, verb rune) {
 			s.Write([]byte("--= /Error =--\n"))
 		} else {
 			// Write msg.
-			if err.cause != nil {
-				s.Write([]byte(fmt.Sprintf("Error{`%s` (cause: %v)}", err.msg, err.cause))) // TODO tick-esc?
-			} else {
-				s.Write([]byte(fmt.Sprintf("Error{`%s`}", err.msg))) // TODO tick-esc?
-			}
+			s.Write([]byte(fmt.Sprintf("Error{%v}", err.data))) // TODO tick-esc?
 		}
 	}
 }
@@ -232,6 +174,45 @@ func (mti msgtraceItem) String() string {
 	)
 }
 
+//----------------------------------------
+// fmt error
+
+/*
+
+FmtError is the data type for NewError() (e.g. NewError().Data().(FmtError))
+Theoretically it could be used to switch on the format string.
+
+```go
+	// Error construction
+	var err1 error = NewError("invalid username %v", "BOB")
+	var err2 error = NewError("another kind of error")
+	...
+	// Error handling
+	switch err1.Data().(cmn.FmtError).Format() {
+		case "invalid username %v": ...
+		case "another kind of error": ...
+	    default: ...
+	}
+```
+*/
+type FmtError struct {
+	format string
+	args   []interface{}
+}
+
+func (fe FmtError) Error() string {
+	return fmt.Sprintf(fe.format, fe.args...)
+}
+
+func (fe FmtError) String() string {
+	return fmt.Sprintf("FmtError{format:%v,args:%v}",
+		fe.format, fe.args)
+}
+
+func (fe FmtError) Format() string {
+	return fe.format
+}
+
 //----------------------------------------
 // Panic wrappers
 // XXX DEPRECATED
diff --git a/vendor/github.com/tendermint/tmlibs/common/heap.go b/vendor/github.com/tendermint/tendermint/libs/common/heap.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/heap.go
rename to vendor/github.com/tendermint/tendermint/libs/common/heap.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/int.go b/vendor/github.com/tendermint/tendermint/libs/common/int.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/int.go
rename to vendor/github.com/tendermint/tendermint/libs/common/int.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/io.go b/vendor/github.com/tendermint/tendermint/libs/common/io.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/io.go
rename to vendor/github.com/tendermint/tendermint/libs/common/io.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/kvpair.go b/vendor/github.com/tendermint/tendermint/libs/common/kvpair.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/kvpair.go
rename to vendor/github.com/tendermint/tendermint/libs/common/kvpair.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/math.go b/vendor/github.com/tendermint/tendermint/libs/common/math.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/math.go
rename to vendor/github.com/tendermint/tendermint/libs/common/math.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/net.go b/vendor/github.com/tendermint/tendermint/libs/common/net.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/net.go
rename to vendor/github.com/tendermint/tendermint/libs/common/net.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/nil.go b/vendor/github.com/tendermint/tendermint/libs/common/nil.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/nil.go
rename to vendor/github.com/tendermint/tendermint/libs/common/nil.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/os.go b/vendor/github.com/tendermint/tendermint/libs/common/os.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/os.go
rename to vendor/github.com/tendermint/tendermint/libs/common/os.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/random.go b/vendor/github.com/tendermint/tendermint/libs/common/random.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/random.go
rename to vendor/github.com/tendermint/tendermint/libs/common/random.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/repeat_timer.go b/vendor/github.com/tendermint/tendermint/libs/common/repeat_timer.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/repeat_timer.go
rename to vendor/github.com/tendermint/tendermint/libs/common/repeat_timer.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/service.go b/vendor/github.com/tendermint/tendermint/libs/common/service.go
similarity index 99%
rename from vendor/github.com/tendermint/tmlibs/common/service.go
rename to vendor/github.com/tendermint/tendermint/libs/common/service.go
index 2f90fa4f..b6f166e7 100644
--- a/vendor/github.com/tendermint/tmlibs/common/service.go
+++ b/vendor/github.com/tendermint/tendermint/libs/common/service.go
@@ -5,7 +5,7 @@ import (
 	"fmt"
 	"sync/atomic"
 
-	"github.com/tendermint/tmlibs/log"
+	"github.com/tendermint/tendermint/libs/log"
 )
 
 var (
diff --git a/vendor/github.com/tendermint/tmlibs/common/string.go b/vendor/github.com/tendermint/tendermint/libs/common/string.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/string.go
rename to vendor/github.com/tendermint/tendermint/libs/common/string.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/throttle_timer.go b/vendor/github.com/tendermint/tendermint/libs/common/throttle_timer.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/throttle_timer.go
rename to vendor/github.com/tendermint/tendermint/libs/common/throttle_timer.go
diff --git a/vendor/github.com/tendermint/tmlibs/common/types.pb.go b/vendor/github.com/tendermint/tendermint/libs/common/types.pb.go
similarity index 68%
rename from vendor/github.com/tendermint/tmlibs/common/types.pb.go
rename to vendor/github.com/tendermint/tendermint/libs/common/types.pb.go
index 047b7aee..f6645602 100644
--- a/vendor/github.com/tendermint/tmlibs/common/types.pb.go
+++ b/vendor/github.com/tendermint/tendermint/libs/common/types.pb.go
@@ -1,4 +1,4 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// Code generated by protoc-gen-go. DO NOT EDIT.
 // source: common/types.proto
 
 /*
@@ -14,10 +14,9 @@ It has these top-level messages:
 //nolint: gas
 package common
 
-import proto "github.com/gogo/protobuf/proto"
+import proto "github.com/golang/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -28,7 +27,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
 
 // Define these here for compatibility but use tmlibs/common.KVPair.
 type KVPair struct {
@@ -39,7 +38,7 @@ type KVPair struct {
 func (m *KVPair) Reset()                    { *m = KVPair{} }
 func (m *KVPair) String() string            { return proto.CompactTextString(m) }
 func (*KVPair) ProtoMessage()               {}
-func (*KVPair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} }
+func (*KVPair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
 
 func (m *KVPair) GetKey() []byte {
 	if m != nil {
@@ -58,13 +57,13 @@ func (m *KVPair) GetValue() []byte {
 // Define these here for compatibility but use tmlibs/common.KI64Pair.
 type KI64Pair struct {
 	Key   []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
-	Value int64  `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+	Value int64  `protobuf:"varint,2,opt,name=value" json:"value,omitempty"`
 }
 
 func (m *KI64Pair) Reset()                    { *m = KI64Pair{} }
 func (m *KI64Pair) String() string            { return proto.CompactTextString(m) }
 func (*KI64Pair) ProtoMessage()               {}
-func (*KI64Pair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} }
+func (*KI64Pair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
 
 func (m *KI64Pair) GetKey() []byte {
 	if m != nil {
@@ -85,17 +84,15 @@ func init() {
 	proto.RegisterType((*KI64Pair)(nil), "common.KI64Pair")
 }
 
-func init() { proto.RegisterFile("common/types.proto", fileDescriptorTypes) }
+func init() { proto.RegisterFile("common/types.proto", fileDescriptor0) }
 
-var fileDescriptorTypes = []byte{
-	// 137 bytes of a gzipped FileDescriptorProto
+var fileDescriptor0 = []byte{
+	// 107 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4a, 0xce, 0xcf, 0xcd,
 	0xcd, 0xcf, 0xd3, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62,
-	0x83, 0x88, 0x49, 0xe9, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7,
-	0xe7, 0xa7, 0xe7, 0xeb, 0x83, 0xa5, 0x93, 0x4a, 0xd3, 0xc0, 0x3c, 0x30, 0x07, 0xcc, 0x82, 0x68,
-	0x53, 0x32, 0xe0, 0x62, 0xf3, 0x0e, 0x0b, 0x48, 0xcc, 0x2c, 0x12, 0x12, 0xe0, 0x62, 0xce, 0x4e,
-	0xad, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0x31, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x12,
-	0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x92, 0x11, 0x17, 0x87, 0xb7, 0xa7, 0x99,
-	0x09, 0x31, 0x7a, 0x98, 0xa1, 0x7a, 0x92, 0xd8, 0xc0, 0x96, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff,
-	0xff, 0x5c, 0xb8, 0x46, 0xc5, 0xb9, 0x00, 0x00, 0x00,
+	0x83, 0x88, 0x29, 0x19, 0x70, 0xb1, 0x79, 0x87, 0x05, 0x24, 0x66, 0x16, 0x09, 0x09, 0x70, 0x31,
+	0x67, 0xa7, 0x56, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x04, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac,
+	0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12, 0x4c, 0x60, 0x31, 0x08, 0x47, 0xc9, 0x88, 0x8b, 0xc3, 0xdb,
+	0xd3, 0xcc, 0x84, 0x18, 0x3d, 0xcc, 0x50, 0x3d, 0x49, 0x6c, 0x60, 0x4b, 0x8d, 0x01, 0x01, 0x00,
+	0x00, 0xff, 0xff, 0xd8, 0xf1, 0xc3, 0x8c, 0x8a, 0x00, 0x00, 0x00,
 }
diff --git a/vendor/github.com/tendermint/tmlibs/common/types.proto b/vendor/github.com/tendermint/tendermint/libs/common/types.proto
similarity index 56%
rename from vendor/github.com/tendermint/tmlibs/common/types.proto
rename to vendor/github.com/tendermint/tendermint/libs/common/types.proto
index 94abcccc..8406fcfd 100644
--- a/vendor/github.com/tendermint/tmlibs/common/types.proto
+++ b/vendor/github.com/tendermint/tendermint/libs/common/types.proto
@@ -1,13 +1,6 @@
 syntax = "proto3";
 package common;
 
-// For more information on gogo.proto, see:
-// https://github.com/gogo/protobuf/blob/master/extensions.md
-// NOTE: Try really hard not to use custom types,
-// it's often complicated, broken, nor not worth it.
-import "github.com/gogo/protobuf/gogoproto/gogo.proto";
-
-
 //----------------------------------------
 // Abstract types
 
diff --git a/vendor/github.com/tendermint/tmlibs/common/word.go b/vendor/github.com/tendermint/tendermint/libs/common/word.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/common/word.go
rename to vendor/github.com/tendermint/tendermint/libs/common/word.go
diff --git a/vendor/github.com/tendermint/tmlibs/db/LICENSE.md b/vendor/github.com/tendermint/tendermint/libs/db/LICENSE.md
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/db/LICENSE.md
rename to vendor/github.com/tendermint/tendermint/libs/db/LICENSE.md
diff --git a/vendor/github.com/tendermint/tmlibs/db/README.md b/vendor/github.com/tendermint/tendermint/libs/db/README.md
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/db/README.md
rename to vendor/github.com/tendermint/tendermint/libs/db/README.md
diff --git a/vendor/github.com/tendermint/tmlibs/db/c_level_db.go b/vendor/github.com/tendermint/tendermint/libs/db/c_level_db.go
similarity index 88%
rename from vendor/github.com/tendermint/tmlibs/db/c_level_db.go
rename to vendor/github.com/tendermint/tendermint/libs/db/c_level_db.go
index e3e6c1d5..30746126 100644
--- a/vendor/github.com/tendermint/tmlibs/db/c_level_db.go
+++ b/vendor/github.com/tendermint/tendermint/libs/db/c_level_db.go
@@ -190,7 +190,8 @@ func (db *CLevelDB) Iterator(start, end []byte) Iterator {
 }
 
 func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator {
-	panic("not implemented yet") // XXX
+	itr := db.db.NewIterator(db.ro)
+	return newCLevelDBIterator(itr, start, end, true)
 }
 
 var _ Iterator = (*cLevelDBIterator)(nil)
@@ -204,12 +205,25 @@ type cLevelDBIterator struct {
 
 func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse bool) *cLevelDBIterator {
 	if isReverse {
-		panic("not implemented yet") // XXX
-	}
-	if start != nil {
-		source.Seek(start)
+		if start == nil {
+			source.SeekToLast()
+		} else {
+			source.Seek(start)
+			if source.Valid() {
+				soakey := source.Key() // start or after key
+				if bytes.Compare(start, soakey) < 0 {
+					source.Prev()
+				}
+			} else {
+				source.SeekToLast()
+			}
+		}
 	} else {
-		source.SeekToFirst()
+		if start == nil {
+			source.SeekToFirst()
+		} else {
+			source.Seek(start)
+		}
 	}
 	return &cLevelDBIterator{
 		source:    source,
@@ -243,9 +257,16 @@ func (itr cLevelDBIterator) Valid() bool {
 	// If key is end or past it, invalid.
 	var end = itr.end
 	var key = itr.source.Key()
-	if end != nil && bytes.Compare(end, key) <= 0 {
-		itr.isInvalid = true
-		return false
+	if itr.isReverse {
+		if end != nil && bytes.Compare(key, end) <= 0 {
+			itr.isInvalid = true
+			return false
+		}
+	} else {
+		if end != nil && bytes.Compare(end, key) <= 0 {
+			itr.isInvalid = true
+			return false
+		}
 	}
 
 	// It's valid.
@@ -267,7 +288,11 @@ func (itr cLevelDBIterator) Value() []byte {
 func (itr cLevelDBIterator) Next() {
 	itr.assertNoError()
 	itr.assertIsValid()
-	itr.source.Next()
+	if itr.isReverse {
+		itr.source.Prev()
+	} else {
+		itr.source.Next()
+	}
 }
 
 func (itr cLevelDBIterator) Close() {
diff --git a/vendor/github.com/tendermint/tmlibs/db/db.go b/vendor/github.com/tendermint/tendermint/libs/db/db.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/db/db.go
rename to vendor/github.com/tendermint/tendermint/libs/db/db.go
diff --git a/vendor/github.com/tendermint/tmlibs/db/debug_db.go b/vendor/github.com/tendermint/tendermint/libs/db/debug_db.go
similarity index 68%
rename from vendor/github.com/tendermint/tmlibs/db/debug_db.go
rename to vendor/github.com/tendermint/tendermint/libs/db/debug_db.go
index 7666ed9f..bb361a26 100644
--- a/vendor/github.com/tendermint/tmlibs/db/debug_db.go
+++ b/vendor/github.com/tendermint/tendermint/libs/db/debug_db.go
@@ -4,13 +4,9 @@ import (
 	"fmt"
 	"sync"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
-func _fmt(f string, az ...interface{}) string {
-	return fmt.Sprintf(f, az...)
-}
-
 //----------------------------------------
 // debugDB
 
@@ -33,7 +29,9 @@ func (ddb debugDB) Mutex() *sync.Mutex { return nil }
 // Implements DB.
 func (ddb debugDB) Get(key []byte) (value []byte) {
 	defer func() {
-		fmt.Printf("%v.Get(%v) %v\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Blue(_fmt("%X", value)))
+		fmt.Printf("%v.Get(%v) %v\n", ddb.label,
+			cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue),
+			cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
 	}()
 	value = ddb.db.Get(key)
 	return
@@ -42,68 +40,85 @@ func (ddb debugDB) Get(key []byte) (value []byte) {
 // Implements DB.
 func (ddb debugDB) Has(key []byte) (has bool) {
 	defer func() {
-		fmt.Printf("%v.Has(%v) %v\n", ddb.label, cmn.Cyan(_fmt("%X", key)), has)
+		fmt.Printf("%v.Has(%v) %v\n", ddb.label,
+			cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue), has)
 	}()
 	return ddb.db.Has(key)
 }
 
 // Implements DB.
 func (ddb debugDB) Set(key []byte, value []byte) {
-	fmt.Printf("%v.Set(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value)))
+	fmt.Printf("%v.Set(%v, %v)\n", ddb.label,
+		cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue),
+		cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
 	ddb.db.Set(key, value)
 }
 
 // Implements DB.
 func (ddb debugDB) SetSync(key []byte, value []byte) {
-	fmt.Printf("%v.SetSync(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value)))
+	fmt.Printf("%v.SetSync(%v, %v)\n", ddb.label,
+		cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue),
+		cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
 	ddb.db.SetSync(key, value)
 }
 
 // Implements atomicSetDeleter.
 func (ddb debugDB) SetNoLock(key []byte, value []byte) {
-	fmt.Printf("%v.SetNoLock(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value)))
+	fmt.Printf("%v.SetNoLock(%v, %v)\n", ddb.label,
+		cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue),
+		cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
 	ddb.db.(atomicSetDeleter).SetNoLock(key, value)
 }
 
 // Implements atomicSetDeleter.
 func (ddb debugDB) SetNoLockSync(key []byte, value []byte) {
-	fmt.Printf("%v.SetNoLockSync(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value)))
+	fmt.Printf("%v.SetNoLockSync(%v, %v)\n", ddb.label,
+		cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue),
+		cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
 	ddb.db.(atomicSetDeleter).SetNoLockSync(key, value)
 }
 
 // Implements DB.
 func (ddb debugDB) Delete(key []byte) {
-	fmt.Printf("%v.Delete(%v)\n", ddb.label, cmn.Red(_fmt("%X", key)))
+	fmt.Printf("%v.Delete(%v)\n", ddb.label,
+		cmn.ColoredBytes(key, cmn.Red, cmn.Yellow))
 	ddb.db.Delete(key)
 }
 
 // Implements DB.
 func (ddb debugDB) DeleteSync(key []byte) {
-	fmt.Printf("%v.DeleteSync(%v)\n", ddb.label, cmn.Red(_fmt("%X", key)))
+	fmt.Printf("%v.DeleteSync(%v)\n", ddb.label,
+		cmn.ColoredBytes(key, cmn.Red, cmn.Yellow))
 	ddb.db.DeleteSync(key)
 }
 
 // Implements atomicSetDeleter.
 func (ddb debugDB) DeleteNoLock(key []byte) {
-	fmt.Printf("%v.DeleteNoLock(%v)\n", ddb.label, cmn.Red(_fmt("%X", key)))
+	fmt.Printf("%v.DeleteNoLock(%v)\n", ddb.label,
+		cmn.ColoredBytes(key, cmn.Red, cmn.Yellow))
 	ddb.db.(atomicSetDeleter).DeleteNoLock(key)
 }
 
 // Implements atomicSetDeleter.
 func (ddb debugDB) DeleteNoLockSync(key []byte) {
-	fmt.Printf("%v.DeleteNoLockSync(%v)\n", ddb.label, cmn.Red(_fmt("%X", key)))
+	fmt.Printf("%v.DeleteNoLockSync(%v)\n", ddb.label,
+		cmn.ColoredBytes(key, cmn.Red, cmn.Yellow))
 	ddb.db.(atomicSetDeleter).DeleteNoLockSync(key)
 }
 
 // Implements DB.
 func (ddb debugDB) Iterator(start, end []byte) Iterator {
-	fmt.Printf("%v.Iterator(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", start)), cmn.Blue(_fmt("%X", end)))
+	fmt.Printf("%v.Iterator(%v, %v)\n", ddb.label,
+		cmn.ColoredBytes(start, cmn.Cyan, cmn.Blue),
+		cmn.ColoredBytes(end, cmn.Cyan, cmn.Blue))
 	return NewDebugIterator(ddb.label, ddb.db.Iterator(start, end))
 }
 
 // Implements DB.
 func (ddb debugDB) ReverseIterator(start, end []byte) Iterator {
-	fmt.Printf("%v.ReverseIterator(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", start)), cmn.Blue(_fmt("%X", end)))
+	fmt.Printf("%v.ReverseIterator(%v, %v)\n", ddb.label,
+		cmn.ColoredBytes(start, cmn.Cyan, cmn.Blue),
+		cmn.ColoredBytes(end, cmn.Cyan, cmn.Blue))
 	return NewDebugIterator(ddb.label, ddb.db.ReverseIterator(start, end))
 }
 
@@ -173,15 +188,17 @@ func (ditr debugIterator) Next() {
 
 // Implements Iterator.
 func (ditr debugIterator) Key() (key []byte) {
-	fmt.Printf("%v.itr.Key() %v\n", ditr.label, cmn.Cyan(_fmt("%X", key)))
 	key = ditr.itr.Key()
+	fmt.Printf("%v.itr.Key() %v\n", ditr.label,
+		cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue))
 	return
 }
 
 // Implements Iterator.
 func (ditr debugIterator) Value() (value []byte) {
-	fmt.Printf("%v.itr.Value() %v\n", ditr.label, cmn.Blue(_fmt("%X", value)))
 	value = ditr.itr.Value()
+	fmt.Printf("%v.itr.Value() %v\n", ditr.label,
+		cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
 	return
 }
 
@@ -209,13 +226,16 @@ func NewDebugBatch(label string, bch Batch) debugBatch {
 
 // Implements Batch.
 func (dbch debugBatch) Set(key, value []byte) {
-	fmt.Printf("%v.batch.Set(%v, %v)\n", dbch.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value)))
+	fmt.Printf("%v.batch.Set(%v, %v)\n", dbch.label,
+		cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue),
+		cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
 	dbch.bch.Set(key, value)
 }
 
 // Implements Batch.
 func (dbch debugBatch) Delete(key []byte) {
-	fmt.Printf("%v.batch.Delete(%v)\n", dbch.label, cmn.Red(_fmt("%X", key)))
+	fmt.Printf("%v.batch.Delete(%v)\n", dbch.label,
+		cmn.ColoredBytes(key, cmn.Red, cmn.Yellow))
 	dbch.bch.Delete(key)
 }
 
diff --git a/vendor/github.com/tendermint/tmlibs/db/fsdb.go b/vendor/github.com/tendermint/tendermint/libs/db/fsdb.go
similarity index 90%
rename from vendor/github.com/tendermint/tmlibs/db/fsdb.go
rename to vendor/github.com/tendermint/tendermint/libs/db/fsdb.go
index 578c1785..fc861dec 100644
--- a/vendor/github.com/tendermint/tmlibs/db/fsdb.go
+++ b/vendor/github.com/tendermint/tendermint/libs/db/fsdb.go
@@ -10,7 +10,7 @@ import (
 	"sync"
 
 	"github.com/pkg/errors"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 const (
@@ -151,21 +151,29 @@ func (db *FSDB) Mutex() *sync.Mutex {
 }
 
 func (db *FSDB) Iterator(start, end []byte) Iterator {
+	return db.MakeIterator(start, end, false)
+}
+
+func (db *FSDB) MakeIterator(start, end []byte, isReversed bool) Iterator {
 	db.mtx.Lock()
 	defer db.mtx.Unlock()
 
 	// We need a copy of all of the keys.
 	// Not the best, but probably not a bottleneck depending.
-	keys, err := list(db.dir, start, end)
+	keys, err := list(db.dir, start, end, isReversed)
 	if err != nil {
 		panic(errors.Wrapf(err, "Listing keys in %s", db.dir))
 	}
-	sort.Strings(keys)
+	if isReversed {
+		sort.Sort(sort.Reverse(sort.StringSlice(keys)))
+	} else {
+		sort.Strings(keys)
+	}
 	return newMemDBIterator(db, keys, start, end)
 }
 
 func (db *FSDB) ReverseIterator(start, end []byte) Iterator {
-	panic("not implemented yet") // XXX
+	return db.MakeIterator(start, end, true)
 }
 
 func (db *FSDB) nameToPath(name []byte) string {
@@ -213,7 +221,7 @@ func remove(path string) error {
 
 // List keys in a directory, stripping of escape sequences and dir portions.
 // CONTRACT: returns os errors directly without wrapping.
-func list(dirPath string, start, end []byte) ([]string, error) {
+func list(dirPath string, start, end []byte, isReversed bool) ([]string, error) {
 	dir, err := os.Open(dirPath)
 	if err != nil {
 		return nil, err
@@ -231,7 +239,7 @@ func list(dirPath string, start, end []byte) ([]string, error) {
 			return nil, fmt.Errorf("Failed to unescape %s while listing", name)
 		}
 		key := unescapeKey([]byte(n))
-		if IsKeyInDomain(key, start, end, false) {
+		if IsKeyInDomain(key, start, end, isReversed) {
 			keys = append(keys, string(key))
 		}
 	}
diff --git a/vendor/github.com/tendermint/tmlibs/db/go_level_db.go b/vendor/github.com/tendermint/tendermint/libs/db/go_level_db.go
similarity index 88%
rename from vendor/github.com/tendermint/tmlibs/db/go_level_db.go
rename to vendor/github.com/tendermint/tendermint/libs/db/go_level_db.go
index 9ff162e3..349e447b 100644
--- a/vendor/github.com/tendermint/tmlibs/db/go_level_db.go
+++ b/vendor/github.com/tendermint/tendermint/libs/db/go_level_db.go
@@ -10,7 +10,7 @@ import (
 	"github.com/syndtr/goleveldb/leveldb/iterator"
 	"github.com/syndtr/goleveldb/leveldb/opt"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 func init() {
@@ -193,7 +193,8 @@ func (db *GoLevelDB) Iterator(start, end []byte) Iterator {
 
 // Implements DB.
 func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator {
-	panic("not implemented yet") // XXX
+	itr := db.db.NewIterator(nil, nil)
+	return newGoLevelDBIterator(itr, start, end, true)
 }
 
 type goLevelDBIterator struct {
@@ -208,9 +209,26 @@ var _ Iterator = (*goLevelDBIterator)(nil)
 
 func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse bool) *goLevelDBIterator {
 	if isReverse {
-		panic("not implemented yet") // XXX
+		if start == nil {
+			source.Last()
+		} else {
+			valid := source.Seek(start)
+			if valid {
+				soakey := source.Key() // start or after key
+				if bytes.Compare(start, soakey) < 0 {
+					source.Prev()
+				}
+			} else {
+				source.Last()
+			}
+		}
+	} else {
+		if start == nil {
+			source.First()
+		} else {
+			source.Seek(start)
+		}
 	}
-	source.Seek(start)
 	return &goLevelDBIterator{
 		source:    source,
 		start:     start,
@@ -245,9 +263,17 @@ func (itr *goLevelDBIterator) Valid() bool {
 	// If key is end or past it, invalid.
 	var end = itr.end
 	var key = itr.source.Key()
-	if end != nil && bytes.Compare(end, key) <= 0 {
-		itr.isInvalid = true
-		return false
+
+	if itr.isReverse {
+		if end != nil && bytes.Compare(key, end) <= 0 {
+			itr.isInvalid = true
+			return false
+		}
+	} else {
+		if end != nil && bytes.Compare(end, key) <= 0 {
+			itr.isInvalid = true
+			return false
+		}
 	}
 
 	// Valid
@@ -276,7 +302,11 @@ func (itr *goLevelDBIterator) Value() []byte {
 func (itr *goLevelDBIterator) Next() {
 	itr.assertNoError()
 	itr.assertIsValid()
-	itr.source.Next()
+	if itr.isReverse {
+		itr.source.Prev()
+	} else {
+		itr.source.Next()
+	}
 }
 
 // Implements Iterator.
diff --git a/vendor/github.com/tendermint/tmlibs/db/mem_batch.go b/vendor/github.com/tendermint/tendermint/libs/db/mem_batch.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/db/mem_batch.go
rename to vendor/github.com/tendermint/tendermint/libs/db/mem_batch.go
diff --git a/vendor/github.com/tendermint/tmlibs/db/mem_db.go b/vendor/github.com/tendermint/tendermint/libs/db/mem_db.go
similarity index 98%
rename from vendor/github.com/tendermint/tmlibs/db/mem_db.go
rename to vendor/github.com/tendermint/tendermint/libs/db/mem_db.go
index 1521f87a..58012301 100644
--- a/vendor/github.com/tendermint/tmlibs/db/mem_db.go
+++ b/vendor/github.com/tendermint/tendermint/libs/db/mem_db.go
@@ -114,7 +114,7 @@ func (db *MemDB) Close() {
 	// database, we don't have a destination
 	// to flush contents to nor do we want
 	// any data loss on invoking Close()
-	// See the discussion in https://github.com/tendermint/tmlibs/pull/56
+	// See the discussion in https://github.com/tendermint/tendermint/libs/pull/56
 }
 
 // Implements DB.
diff --git a/vendor/github.com/tendermint/tmlibs/db/prefix_db.go b/vendor/github.com/tendermint/tendermint/libs/db/prefix_db.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/db/prefix_db.go
rename to vendor/github.com/tendermint/tendermint/libs/db/prefix_db.go
diff --git a/vendor/github.com/tendermint/tmlibs/db/types.go b/vendor/github.com/tendermint/tendermint/libs/db/types.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/db/types.go
rename to vendor/github.com/tendermint/tendermint/libs/db/types.go
diff --git a/vendor/github.com/tendermint/tmlibs/db/util.go b/vendor/github.com/tendermint/tendermint/libs/db/util.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/db/util.go
rename to vendor/github.com/tendermint/tendermint/libs/db/util.go
diff --git a/vendor/github.com/tendermint/tendermint/libs/events/Makefile b/vendor/github.com/tendermint/tendermint/libs/events/Makefile
new file mode 100644
index 00000000..696aafff
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/libs/events/Makefile
@@ -0,0 +1,9 @@
+.PHONY: docs
+REPO:=github.com/tendermint/tendermint/libs/events
+
+docs:
+	@go get github.com/davecheney/godoc2md
+	godoc2md $(REPO) > README.md
+
+test:
+	go test -v ./...
diff --git a/vendor/github.com/tendermint/tendermint/libs/events/README.md b/vendor/github.com/tendermint/tendermint/libs/events/README.md
new file mode 100644
index 00000000..14aa498f
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/libs/events/README.md
@@ -0,0 +1,175 @@
+
+
+# events
+`import "github.com/tendermint/tendermint/libs/events"`
+
+* [Overview](#pkg-overview)
+* [Index](#pkg-index)
+
+## <a name="pkg-overview">Overview</a>
+Pub-Sub in go with event caching
+
+
+
+
+## <a name="pkg-index">Index</a>
+* [type EventCache](#EventCache)
+  * [func NewEventCache(evsw Fireable) *EventCache](#NewEventCache)
+  * [func (evc *EventCache) FireEvent(event string, data EventData)](#EventCache.FireEvent)
+  * [func (evc *EventCache) Flush()](#EventCache.Flush)
+* [type EventCallback](#EventCallback)
+* [type EventData](#EventData)
+* [type EventSwitch](#EventSwitch)
+  * [func NewEventSwitch() EventSwitch](#NewEventSwitch)
+* [type Eventable](#Eventable)
+* [type Fireable](#Fireable)
+
+
+#### <a name="pkg-files">Package files</a>
+[event_cache.go](/src/github.com/tendermint/tendermint/libs/events/event_cache.go) [events.go](/src/github.com/tendermint/tendermint/libs/events/events.go) 
+
+
+
+
+
+
+## <a name="EventCache">type</a> [EventCache](/src/target/event_cache.go?s=116:179#L5)
+``` go
+type EventCache struct {
+    // contains filtered or unexported fields
+}
+```
+An EventCache buffers events for a Fireable
+All events are cached. Filtering happens on Flush
+
+
+
+
+
+
+
+### <a name="NewEventCache">func</a> [NewEventCache](/src/target/event_cache.go?s=239:284#L11)
+``` go
+func NewEventCache(evsw Fireable) *EventCache
+```
+Create a new EventCache with an EventSwitch as backend
+
+
+
+
+
+### <a name="EventCache.FireEvent">func</a> (\*EventCache) [FireEvent](/src/target/event_cache.go?s=449:511#L24)
+``` go
+func (evc *EventCache) FireEvent(event string, data EventData)
+```
+Cache an event to be fired upon finality.
+
+
+
+
+### <a name="EventCache.Flush">func</a> (\*EventCache) [Flush](/src/target/event_cache.go?s=735:765#L31)
+``` go
+func (evc *EventCache) Flush()
+```
+Fire events by running evsw.FireEvent on all cached events. Blocks.
+Clears cached events
+
+
+
+
+## <a name="EventCallback">type</a> [EventCallback](/src/target/events.go?s=4201:4240#L185)
+``` go
+type EventCallback func(data EventData)
+```
+
+
+
+
+
+
+
+
+
+## <a name="EventData">type</a> [EventData](/src/target/events.go?s=243:294#L14)
+``` go
+type EventData interface {
+}
+```
+Generic event data can be typed and registered with tendermint/go-amino
+via concrete implementation of this interface
+
+
+
+
+
+
+
+
+
+
+## <a name="EventSwitch">type</a> [EventSwitch](/src/target/events.go?s=560:771#L29)
+``` go
+type EventSwitch interface {
+    cmn.Service
+    Fireable
+
+    AddListenerForEvent(listenerID, event string, cb EventCallback)
+    RemoveListenerForEvent(event string, listenerID string)
+    RemoveListener(listenerID string)
+}
+```
+
+
+
+
+
+
+### <a name="NewEventSwitch">func</a> [NewEventSwitch](/src/target/events.go?s=917:950#L46)
+``` go
+func NewEventSwitch() EventSwitch
+```
+
+
+
+
+## <a name="Eventable">type</a> [Eventable](/src/target/events.go?s=378:440#L20)
+``` go
+type Eventable interface {
+    SetEventSwitch(evsw EventSwitch)
+}
+```
+reactors and other modules should export
+this interface to become eventable
+
+
+
+
+
+
+
+
+
+
+## <a name="Fireable">type</a> [Fireable](/src/target/events.go?s=490:558#L25)
+``` go
+type Fireable interface {
+    FireEvent(event string, data EventData)
+}
+```
+an event switch or cache implements fireable
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
diff --git a/vendor/github.com/tendermint/tendermint/libs/events/events.go b/vendor/github.com/tendermint/tendermint/libs/events/events.go
index 075f9b42..9c7f0fd0 100644
--- a/vendor/github.com/tendermint/tendermint/libs/events/events.go
+++ b/vendor/github.com/tendermint/tendermint/libs/events/events.go
@@ -6,7 +6,7 @@ package events
 import (
 	"sync"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // Generic event data can be typed and registered with tendermint/go-amino
diff --git a/vendor/github.com/tendermint/tmlibs/flowrate/README.md b/vendor/github.com/tendermint/tendermint/libs/flowrate/README.md
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/flowrate/README.md
rename to vendor/github.com/tendermint/tendermint/libs/flowrate/README.md
diff --git a/vendor/github.com/tendermint/tmlibs/flowrate/flowrate.go b/vendor/github.com/tendermint/tendermint/libs/flowrate/flowrate.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/flowrate/flowrate.go
rename to vendor/github.com/tendermint/tendermint/libs/flowrate/flowrate.go
diff --git a/vendor/github.com/tendermint/tmlibs/flowrate/io.go b/vendor/github.com/tendermint/tendermint/libs/flowrate/io.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/flowrate/io.go
rename to vendor/github.com/tendermint/tendermint/libs/flowrate/io.go
diff --git a/vendor/github.com/tendermint/tmlibs/flowrate/util.go b/vendor/github.com/tendermint/tendermint/libs/flowrate/util.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/flowrate/util.go
rename to vendor/github.com/tendermint/tendermint/libs/flowrate/util.go
diff --git a/vendor/github.com/tendermint/tmlibs/log/filter.go b/vendor/github.com/tendermint/tendermint/libs/log/filter.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/log/filter.go
rename to vendor/github.com/tendermint/tendermint/libs/log/filter.go
diff --git a/vendor/github.com/tendermint/tmlibs/log/logger.go b/vendor/github.com/tendermint/tendermint/libs/log/logger.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/log/logger.go
rename to vendor/github.com/tendermint/tendermint/libs/log/logger.go
diff --git a/vendor/github.com/tendermint/tmlibs/log/nop_logger.go b/vendor/github.com/tendermint/tendermint/libs/log/nop_logger.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/log/nop_logger.go
rename to vendor/github.com/tendermint/tendermint/libs/log/nop_logger.go
diff --git a/vendor/github.com/tendermint/tmlibs/log/testing_logger.go b/vendor/github.com/tendermint/tendermint/libs/log/testing_logger.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/log/testing_logger.go
rename to vendor/github.com/tendermint/tendermint/libs/log/testing_logger.go
diff --git a/vendor/github.com/tendermint/tmlibs/log/tm_json_logger.go b/vendor/github.com/tendermint/tendermint/libs/log/tm_json_logger.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/log/tm_json_logger.go
rename to vendor/github.com/tendermint/tendermint/libs/log/tm_json_logger.go
diff --git a/vendor/github.com/tendermint/tmlibs/log/tm_logger.go b/vendor/github.com/tendermint/tendermint/libs/log/tm_logger.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/log/tm_logger.go
rename to vendor/github.com/tendermint/tendermint/libs/log/tm_logger.go
diff --git a/vendor/github.com/tendermint/tmlibs/log/tmfmt_logger.go b/vendor/github.com/tendermint/tendermint/libs/log/tmfmt_logger.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/log/tmfmt_logger.go
rename to vendor/github.com/tendermint/tendermint/libs/log/tmfmt_logger.go
diff --git a/vendor/github.com/tendermint/tmlibs/log/tracing_logger.go b/vendor/github.com/tendermint/tendermint/libs/log/tracing_logger.go
similarity index 100%
rename from vendor/github.com/tendermint/tmlibs/log/tracing_logger.go
rename to vendor/github.com/tendermint/tendermint/libs/log/tracing_logger.go
diff --git a/vendor/github.com/tendermint/tendermint/libs/pubsub/pubsub.go b/vendor/github.com/tendermint/tendermint/libs/pubsub/pubsub.go
index 776e0653..4c0d97e2 100644
--- a/vendor/github.com/tendermint/tendermint/libs/pubsub/pubsub.go
+++ b/vendor/github.com/tendermint/tendermint/libs/pubsub/pubsub.go
@@ -16,7 +16,7 @@ import (
 	"errors"
 	"sync"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 type operation int
@@ -163,6 +163,8 @@ func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, ou
 		return nil
 	case <-ctx.Done():
 		return ctx.Err()
+	case <-s.Quit():
+		return nil
 	}
 }
 
@@ -190,6 +192,8 @@ func (s *Server) Unsubscribe(ctx context.Context, clientID string, query Query)
 		return nil
 	case <-ctx.Done():
 		return ctx.Err()
+	case <-s.Quit():
+		return nil
 	}
 }
 
@@ -211,6 +215,8 @@ func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error {
 		return nil
 	case <-ctx.Done():
 		return ctx.Err()
+	case <-s.Quit():
+		return nil
 	}
 }
 
@@ -229,6 +235,8 @@ func (s *Server) PublishWithTags(ctx context.Context, msg interface{}, tags TagM
 		return nil
 	case <-ctx.Done():
 		return ctx.Err()
+	case <-s.Quit():
+		return nil
 	}
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/libs/pubsub/query/Makefile b/vendor/github.com/tendermint/tendermint/libs/pubsub/query/Makefile
new file mode 100644
index 00000000..aef42b2d
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/libs/pubsub/query/Makefile
@@ -0,0 +1,11 @@
+gen_query_parser:
+	go get -u -v github.com/pointlander/peg
+	peg -inline -switch query.peg
+
+fuzzy_test:
+	go get -u -v github.com/dvyukov/go-fuzz/go-fuzz
+	go get -u -v github.com/dvyukov/go-fuzz/go-fuzz-build
+	go-fuzz-build github.com/tendermint/tendermint/libs/pubsub/query/fuzz_test
+	go-fuzz -bin=./fuzz_test-fuzz.zip -workdir=./fuzz_test/output
+
+.PHONY: gen_query_parser fuzzy_test
diff --git a/vendor/github.com/tendermint/tendermint/libs/pubsub/query/query.peg b/vendor/github.com/tendermint/tendermint/libs/pubsub/query/query.peg
new file mode 100644
index 00000000..739892e4
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/libs/pubsub/query/query.peg
@@ -0,0 +1,33 @@
+package query
+
+type QueryParser Peg {
+}
+
+e <- '\"' condition ( ' '+ and ' '+ condition )* '\"' !.
+
+condition <- tag ' '* (le ' '* (number / time / date)
+                      / ge ' '* (number / time / date)
+                      / l ' '* (number / time / date)
+                      / g ' '* (number / time / date)
+                      / equal ' '* (number / time / date / value)
+                      / contains ' '* value
+                      )
+
+tag <- < (![ \t\n\r\\()"'=><] .)+ >
+value <- < '\'' (!["'] .)* '\''>
+number <- < ('0'
+           / [1-9] digit* ('.' digit*)?) >
+digit <- [0-9]
+time <- "TIME " < year '-' month '-' day 'T' digit digit ':' digit digit ':' digit digit (('-' / '+') digit digit ':' digit digit / 'Z') >
+date <- "DATE " < year '-' month '-' day >
+year <- ('1' / '2') digit digit digit
+month <- ('0' / '1') digit
+day <- ('0' / '1' / '2' / '3') digit
+and <- "AND"
+
+equal <- "="
+contains <- "CONTAINS"
+le <- "<="
+ge <- ">="
+l <- "<"
+g <- ">"
diff --git a/vendor/github.com/tendermint/tendermint/libs/pubsub/query/query.peg.go b/vendor/github.com/tendermint/tendermint/libs/pubsub/query/query.peg.go
index c86e4a47..c1cc60aa 100644
--- a/vendor/github.com/tendermint/tendermint/libs/pubsub/query/query.peg.go
+++ b/vendor/github.com/tendermint/tendermint/libs/pubsub/query/query.peg.go
@@ -1,6 +1,8 @@
 // nolint
 package query
 
+//go:generate peg -inline -switch query.peg
+
 import (
 	"fmt"
 	"math"
diff --git a/vendor/github.com/tendermint/tendermint/mempool/mempool.go b/vendor/github.com/tendermint/tendermint/mempool/mempool.go
index 935dfaac..06852c9a 100644
--- a/vendor/github.com/tendermint/tendermint/mempool/mempool.go
+++ b/vendor/github.com/tendermint/tendermint/mempool/mempool.go
@@ -10,11 +10,11 @@ import (
 
 	"github.com/pkg/errors"
 
-	abci "github.com/tendermint/abci/types"
-	auto "github.com/tendermint/tmlibs/autofile"
-	"github.com/tendermint/tmlibs/clist"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
+	abci "github.com/tendermint/tendermint/abci/types"
+	auto "github.com/tendermint/tendermint/libs/autofile"
+	"github.com/tendermint/tendermint/libs/clist"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 
 	cfg "github.com/tendermint/tendermint/config"
 	"github.com/tendermint/tendermint/proxy"
@@ -57,6 +57,11 @@ var (
 	ErrMempoolIsFull = errors.New("Mempool is full")
 )
 
+// TxID is the hex encoded hash of the bytes as a types.Tx.
+func TxID(tx []byte) string {
+	return fmt.Sprintf("%X", types.Tx(tx).Hash())
+}
+
 // Mempool is an ordered in-memory pool for transactions before they are proposed in a consensus
 // round. Transaction validity is checked using the CheckTx abci message before the transaction is
 // added to the pool. The Mempool uses a concurrent list structure for storing transactions that
@@ -83,10 +88,20 @@ type Mempool struct {
 	wal *auto.AutoFile
 
 	logger log.Logger
+
+	metrics *Metrics
 }
 
+// MempoolOption sets an optional parameter on the Mempool.
+type MempoolOption func(*Mempool)
+
 // NewMempool returns a new Mempool with the given configuration and connection to an application.
-func NewMempool(config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool, height int64) *Mempool {
+func NewMempool(
+	config *cfg.MempoolConfig,
+	proxyAppConn proxy.AppConnMempool,
+	height int64,
+	options ...MempoolOption,
+) *Mempool {
 	mempool := &Mempool{
 		config:        config,
 		proxyAppConn:  proxyAppConn,
@@ -97,6 +112,7 @@ func NewMempool(config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool, he
 		recheckCursor: nil,
 		recheckEnd:    nil,
 		logger:        log.NewNopLogger(),
+		metrics:       NopMetrics(),
 	}
 	if config.CacheSize > 0 {
 		mempool.cache = newMapTxCache(config.CacheSize)
@@ -104,6 +120,9 @@ func NewMempool(config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool, he
 		mempool.cache = nopTxCache{}
 	}
 	proxyAppConn.SetResponseCallback(mempool.resCb)
+	for _, option := range options {
+		option(mempool)
+	}
 	return mempool
 }
 
@@ -119,6 +138,11 @@ func (mem *Mempool) SetLogger(l log.Logger) {
 	mem.logger = l
 }
 
+// WithMetrics sets the metrics.
+func WithMetrics(metrics *Metrics) MempoolOption {
+	return func(mem *Mempool) { mem.metrics = metrics }
+}
+
 // CloseWAL closes and discards the underlying WAL file.
 // Any further writes will not be relayed to disk.
 func (mem *Mempool) CloseWAL() bool {
@@ -254,6 +278,7 @@ func (mem *Mempool) resCb(req *abci.Request, res *abci.Response) {
 	} else {
 		mem.resCbRecheck(req, res)
 	}
+	mem.metrics.Size.Set(float64(mem.Size()))
 }
 
 func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) {
@@ -268,11 +293,11 @@ func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) {
 				tx:      tx,
 			}
 			mem.txs.PushBack(memTx)
-			mem.logger.Info("Added good transaction", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "res", r)
+			mem.logger.Info("Added good transaction", "tx", TxID(tx), "res", r, "total", mem.Size())
 			mem.notifyTxsAvailable()
 		} else {
 			// ignore bad transaction
-			mem.logger.Info("Rejected bad transaction", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "res", r)
+			mem.logger.Info("Rejected bad transaction", "tx", TxID(tx), "res", r)
 
 			// remove from cache (it might be good later)
 			mem.cache.Remove(tx)
@@ -397,6 +422,7 @@ func (mem *Mempool) Update(height int64, txs types.Txs) error {
 		// mem.recheckCursor re-scans mem.txs and possibly removes some txs.
 		// Before mem.Reap(), we should wait for mem.recheckCursor to be nil.
 	}
+	mem.metrics.Size.Set(float64(mem.Size()))
 	return nil
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/mempool/metrics.go b/vendor/github.com/tendermint/tendermint/mempool/metrics.go
new file mode 100644
index 00000000..f381678c
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/mempool/metrics.go
@@ -0,0 +1,34 @@
+package mempool
+
+import (
+	"github.com/go-kit/kit/metrics"
+	"github.com/go-kit/kit/metrics/discard"
+
+	prometheus "github.com/go-kit/kit/metrics/prometheus"
+	stdprometheus "github.com/prometheus/client_golang/prometheus"
+)
+
+// Metrics contains metrics exposed by this package.
+// see MetricsProvider for descriptions.
+type Metrics struct {
+	// Size of the mempool.
+	Size metrics.Gauge
+}
+
+// PrometheusMetrics returns Metrics build using Prometheus client library.
+func PrometheusMetrics() *Metrics {
+	return &Metrics{
+		Size: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "mempool",
+			Name:      "size",
+			Help:      "Size of the mempool (number of uncommitted transactions).",
+		}, []string{}),
+	}
+}
+
+// NopMetrics returns no-op Metrics.
+func NopMetrics() *Metrics {
+	return &Metrics{
+		Size: discard.NewGauge(),
+	}
+}
diff --git a/vendor/github.com/tendermint/tendermint/mempool/reactor.go b/vendor/github.com/tendermint/tendermint/mempool/reactor.go
index 5d1f4e79..96988be7 100644
--- a/vendor/github.com/tendermint/tendermint/mempool/reactor.go
+++ b/vendor/github.com/tendermint/tendermint/mempool/reactor.go
@@ -5,10 +5,10 @@ import (
 	"reflect"
 	"time"
 
-	abci "github.com/tendermint/abci/types"
 	amino "github.com/tendermint/go-amino"
-	"github.com/tendermint/tmlibs/clist"
-	"github.com/tendermint/tmlibs/log"
+	abci "github.com/tendermint/tendermint/abci/types"
+	"github.com/tendermint/tendermint/libs/clist"
+	"github.com/tendermint/tendermint/libs/log"
 
 	cfg "github.com/tendermint/tendermint/config"
 	"github.com/tendermint/tendermint/p2p"
@@ -78,7 +78,7 @@ func (memR *MempoolReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
 // Receive implements Reactor.
 // It adds any received transactions to the mempool.
 func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
-	msg, err := DecodeMessage(msgBytes)
+	msg, err := decodeMsg(msgBytes)
 	if err != nil {
 		memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
 		memR.Switch.StopPeerForError(src, err)
@@ -90,7 +90,7 @@ func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
 	case *TxMessage:
 		err := memR.Mempool.CheckTx(msg.Tx, nil)
 		if err != nil {
-			memR.Logger.Info("Could not check tx", "tx", msg.Tx, "err", err)
+			memR.Logger.Info("Could not check tx", "tx", TxID(msg.Tx), "err", err)
 		}
 		// broadcasting happens from go routines per peer
 	default:
@@ -174,11 +174,9 @@ func RegisterMempoolMessages(cdc *amino.Codec) {
 	cdc.RegisterConcrete(&TxMessage{}, "tendermint/mempool/TxMessage", nil)
 }
 
-// DecodeMessage decodes a byte-array into a MempoolMessage.
-func DecodeMessage(bz []byte) (msg MempoolMessage, err error) {
+func decodeMsg(bz []byte) (msg MempoolMessage, err error) {
 	if len(bz) > maxMsgSize {
-		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
-			len(bz), maxMsgSize)
+		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
 	}
 	err = cdc.UnmarshalBinaryBare(bz, &msg)
 	return
diff --git a/vendor/github.com/tendermint/tendermint/node/id.go b/vendor/github.com/tendermint/tendermint/node/id.go
index fa391f94..5100597c 100644
--- a/vendor/github.com/tendermint/tendermint/node/id.go
+++ b/vendor/github.com/tendermint/tendermint/node/id.go
@@ -3,7 +3,7 @@ package node
 import (
 	"time"
 
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 type NodeID struct {
diff --git a/vendor/github.com/tendermint/tendermint/node/node.go b/vendor/github.com/tendermint/tendermint/node/node.go
index efeb17ee..faf33d88 100644
--- a/vendor/github.com/tendermint/tendermint/node/node.go
+++ b/vendor/github.com/tendermint/tendermint/node/node.go
@@ -2,21 +2,25 @@ package node
 
 import (
 	"bytes"
+	"context"
 	"errors"
 	"fmt"
 	"net"
 	"net/http"
 
-	abci "github.com/tendermint/abci/types"
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/client_golang/prometheus/promhttp"
+
 	amino "github.com/tendermint/go-amino"
-	crypto "github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
-	dbm "github.com/tendermint/tmlibs/db"
-	"github.com/tendermint/tmlibs/log"
+	abci "github.com/tendermint/tendermint/abci/types"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	dbm "github.com/tendermint/tendermint/libs/db"
+	"github.com/tendermint/tendermint/libs/log"
 
 	bc "github.com/tendermint/tendermint/blockchain"
 	cfg "github.com/tendermint/tendermint/config"
 	cs "github.com/tendermint/tendermint/consensus"
+	"github.com/tendermint/tendermint/crypto"
 	"github.com/tendermint/tendermint/evidence"
 	mempl "github.com/tendermint/tendermint/mempool"
 	"github.com/tendermint/tendermint/p2p"
@@ -81,10 +85,25 @@ func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
 		proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
 		DefaultGenesisDocProviderFunc(config),
 		DefaultDBProvider,
+		DefaultMetricsProvider,
 		logger,
 	)
 }
 
+// MetricsProvider returns a consensus, p2p and mempool Metrics.
+type MetricsProvider func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics)
+
+// DefaultMetricsProvider returns consensus, p2p and mempool Metrics build
+// using Prometheus client library.
+func DefaultMetricsProvider() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics) {
+	return cs.PrometheusMetrics(), p2p.PrometheusMetrics(), mempl.PrometheusMetrics()
+}
+
+// NopMetricsProvider returns consensus, p2p and mempool Metrics as no-op.
+func NopMetricsProvider() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics) {
+	return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics()
+}
+
 //------------------------------------------------------------------------------
 
 // Node is the highest level interface to a full Tendermint node.
@@ -114,6 +133,7 @@ type Node struct {
 	rpcListeners     []net.Listener         // rpc servers
 	txIndexer        txindex.TxIndexer
 	indexerService   *txindex.IndexerService
+	prometheusSrv    *http.Server
 }
 
 // NewNode returns a new, ready to go, Tendermint Node.
@@ -122,6 +142,7 @@ func NewNode(config *cfg.Config,
 	clientCreator proxy.ClientCreator,
 	genesisDocProvider GenesisDocProvider,
 	dbProvider DBProvider,
+	metricsProvider MetricsProvider,
 	logger log.Logger) (*Node, error) {
 
 	// Get BlockStore
@@ -208,11 +229,28 @@ func NewNode(config *cfg.Config,
 		consensusLogger.Info("This node is not a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey())
 	}
 
+	// metrics
+	var (
+		csMetrics    *cs.Metrics
+		p2pMetrics   *p2p.Metrics
+		memplMetrics *mempl.Metrics
+	)
+	if config.Instrumentation.Prometheus {
+		csMetrics, p2pMetrics, memplMetrics = metricsProvider()
+	} else {
+		csMetrics, p2pMetrics, memplMetrics = NopMetricsProvider()
+	}
+
 	// Make MempoolReactor
 	mempoolLogger := logger.With("module", "mempool")
-	mempool := mempl.NewMempool(config.Mempool, proxyApp.Mempool(), state.LastBlockHeight)
-	mempool.InitWAL() // no need to have the mempool wal during tests
+	mempool := mempl.NewMempool(
+		config.Mempool,
+		proxyApp.Mempool(),
+		state.LastBlockHeight,
+		mempl.WithMetrics(memplMetrics),
+	)
 	mempool.SetLogger(mempoolLogger)
+	mempool.InitWAL() // no need to have the mempool wal during tests
 	mempoolReactor := mempl.NewMempoolReactor(config.Mempool, mempool)
 	mempoolReactor.SetLogger(mempoolLogger)
 
@@ -241,8 +279,15 @@ func NewNode(config *cfg.Config,
 	bcReactor.SetLogger(logger.With("module", "blockchain"))
 
 	// Make ConsensusReactor
-	consensusState := cs.NewConsensusState(config.Consensus, state.Copy(),
-		blockExec, blockStore, mempool, evidencePool)
+	consensusState := cs.NewConsensusState(
+		config.Consensus,
+		state.Copy(),
+		blockExec,
+		blockStore,
+		mempool,
+		evidencePool,
+		cs.WithMetrics(csMetrics),
+	)
 	consensusState.SetLogger(consensusLogger)
 	if privValidator != nil {
 		consensusState.SetPrivValidator(privValidator)
@@ -252,7 +297,7 @@ func NewNode(config *cfg.Config,
 
 	p2pLogger := logger.With("module", "p2p")
 
-	sw := p2p.NewSwitch(config.P2P)
+	sw := p2p.NewSwitch(config.P2P, p2p.WithMetrics(p2pMetrics))
 	sw.SetLogger(p2pLogger)
 	sw.AddReactor("MEMPOOL", mempoolReactor)
 	sw.AddReactor("BLOCKCHAIN", bcReactor)
@@ -382,8 +427,11 @@ func (n *Node) OnStart() error {
 	}
 
 	// Create & add listener
-	protocol, address := cmn.ProtocolAndAddress(n.config.P2P.ListenAddress)
-	l := p2p.NewDefaultListener(protocol, address, n.config.P2P.SkipUPNP, n.Logger.With("module", "p2p"))
+	l := p2p.NewDefaultListener(
+		n.config.P2P.ListenAddress,
+		n.config.P2P.ExternalAddress,
+		n.config.P2P.UPNP,
+		n.Logger.With("module", "p2p"))
 	n.sw.AddListener(l)
 
 	// Generate node PrivKey
@@ -411,6 +459,10 @@ func (n *Node) OnStart() error {
 		n.rpcListeners = listeners
 	}
 
+	if n.config.Instrumentation.Prometheus {
+		n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr)
+	}
+
 	// Start the switch (the P2P server).
 	err = n.sw.Start()
 	if err != nil {
@@ -434,9 +486,16 @@ func (n *Node) OnStop() {
 	n.BaseService.OnStop()
 
 	n.Logger.Info("Stopping Node")
+
+	// first stop the non-reactor services
+	n.eventBus.Stop()
+	n.indexerService.Stop()
+
+	// now stop the reactors
 	// TODO: gracefully disconnect from peers.
 	n.sw.Stop()
 
+	// finally stop the listeners / external services
 	for _, l := range n.rpcListeners {
 		n.Logger.Info("Closing rpc listener", "listener", l)
 		if err := l.Close(); err != nil {
@@ -444,14 +503,18 @@ func (n *Node) OnStop() {
 		}
 	}
 
-	n.eventBus.Stop()
-	n.indexerService.Stop()
-
 	if pvsc, ok := n.privValidator.(*privval.SocketPV); ok {
 		if err := pvsc.Stop(); err != nil {
 			n.Logger.Error("Error stopping priv validator socket client", "err", err)
 		}
 	}
+
+	if n.prometheusSrv != nil {
+		if err := n.prometheusSrv.Shutdown(context.Background()); err != nil {
+			// Error from closing listeners, or context timeout:
+			n.Logger.Error("Prometheus HTTP server Shutdown", "err", err)
+		}
+	}
 }
 
 // RunForever waits for an interrupt signal and stops the node.
@@ -507,7 +570,12 @@ func (n *Node) startRPC() ([]net.Listener, error) {
 		wm.SetLogger(rpcLogger.With("protocol", "websocket"))
 		mux.HandleFunc("/websocket", wm.WebsocketHandler)
 		rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, coreCodec, rpcLogger)
-		listener, err := rpcserver.StartHTTPServer(listenAddr, mux, rpcLogger)
+		listener, err := rpcserver.StartHTTPServer(
+			listenAddr,
+			mux,
+			rpcLogger,
+			rpcserver.Config{MaxOpenConnections: n.config.RPC.MaxOpenConnections},
+		)
 		if err != nil {
 			return nil, err
 		}
@@ -517,7 +585,12 @@ func (n *Node) startRPC() ([]net.Listener, error) {
 	// we expose a simplified api over grpc for convenience to app devs
 	grpcListenAddr := n.config.RPC.GRPCListenAddress
 	if grpcListenAddr != "" {
-		listener, err := grpccore.StartGRPCServer(grpcListenAddr)
+		listener, err := grpccore.StartGRPCServer(
+			grpcListenAddr,
+			grpccore.Config{
+				MaxOpenConnections: n.config.RPC.GRPCMaxOpenConnections,
+			},
+		)
 		if err != nil {
 			return nil, err
 		}
@@ -527,6 +600,27 @@ func (n *Node) startRPC() ([]net.Listener, error) {
 	return listeners, nil
 }
 
+// startPrometheusServer starts a Prometheus HTTP server, listening for metrics
+// collectors on addr.
+func (n *Node) startPrometheusServer(addr string) *http.Server {
+	srv := &http.Server{
+		Addr: addr,
+		Handler: promhttp.InstrumentMetricHandler(
+			prometheus.DefaultRegisterer, promhttp.HandlerFor(
+				prometheus.DefaultGatherer,
+				promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections},
+			),
+		),
+	}
+	go func() {
+		if err := srv.ListenAndServe(); err != http.ErrServerClosed {
+			// Error starting or closing listener:
+			n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
+		}
+	}()
+	return srv
+}
+
 // Switch returns the Node's Switch.
 func (n *Node) Switch() *p2p.Switch {
 	return n.sw
@@ -615,7 +709,7 @@ func (n *Node) makeNodeInfo(nodeID p2p.ID) p2p.NodeInfo {
 	}
 
 	p2pListener := n.sw.Listeners()[0]
-	p2pHost := p2pListener.ExternalAddress().IP.String()
+	p2pHost := p2pListener.ExternalAddressHost()
 	p2pPort := p2pListener.ExternalAddress().Port
 	nodeInfo.ListenAddr = cmn.Fmt("%v:%v", p2pHost, p2pPort)
 
diff --git a/vendor/github.com/tendermint/tendermint/node/wire.go b/vendor/github.com/tendermint/tendermint/node/wire.go
index a0d7677d..8b3ae895 100644
--- a/vendor/github.com/tendermint/tendermint/node/wire.go
+++ b/vendor/github.com/tendermint/tendermint/node/wire.go
@@ -2,7 +2,7 @@ package node
 
 import (
 	amino "github.com/tendermint/go-amino"
-	crypto "github.com/tendermint/go-crypto"
+	crypto "github.com/tendermint/tendermint/crypto"
 )
 
 var cdc = amino.NewCodec()
diff --git a/vendor/github.com/tendermint/tendermint/p2p/README.md b/vendor/github.com/tendermint/tendermint/p2p/README.md
new file mode 100644
index 00000000..819a5056
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/p2p/README.md
@@ -0,0 +1,11 @@
+# p2p
+
+The p2p package provides an abstraction around peer-to-peer communication.
+
+Docs:
+
+- [Connection](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/p2p/connection.md) for details on how connections and multiplexing work
+- [Peer](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/p2p/peer.md) for details on peer ID, handshakes, and peer exchange
+- [Node](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/p2p/node.md) for details about different types of nodes and how they should work
+- [Pex](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/reactors/pex/pex.md) for details on peer discovery and exchange
+- [Config](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/p2p/config.md) for details on some config option
diff --git a/vendor/github.com/tendermint/tendermint/p2p/base_reactor.go b/vendor/github.com/tendermint/tendermint/p2p/base_reactor.go
index 83c8efa4..da1296da 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/base_reactor.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/base_reactor.go
@@ -2,7 +2,7 @@ package p2p
 
 import (
 	"github.com/tendermint/tendermint/p2p/conn"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 type Reactor interface {
diff --git a/vendor/github.com/tendermint/tendermint/p2p/conn/connection.go b/vendor/github.com/tendermint/tendermint/p2p/conn/connection.go
index 5c7f19cf..9672e011 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/conn/connection.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/conn/connection.go
@@ -12,14 +12,13 @@ import (
 	"time"
 
 	amino "github.com/tendermint/go-amino"
-	cmn "github.com/tendermint/tmlibs/common"
-	flow "github.com/tendermint/tmlibs/flowrate"
-	"github.com/tendermint/tmlibs/log"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	flow "github.com/tendermint/tendermint/libs/flowrate"
+	"github.com/tendermint/tendermint/libs/log"
 )
 
 const (
-	maxPacketMsgPayloadSizeDefault = 1024 // NOTE: Must be below 16,384 bytes for 14 below.
-	maxPacketMsgOverheadSize       = 14   // NOTE: See connection_test for derivation.
+	defaultMaxPacketMsgPayloadSize = 1024
 
 	numBatchPacketMsgs = 10
 	minReadBufferSize  = 1024
@@ -96,6 +95,8 @@ type MConnection struct {
 	chStatsTimer *cmn.RepeatTimer // update channel stats periodically
 
 	created time.Time // time of creation
+
+	_maxPacketMsgSize int
 }
 
 // MConnConfig is a MConnection configuration.
@@ -116,16 +117,12 @@ type MConnConfig struct {
 	PongTimeout time.Duration `mapstructure:"pong_timeout"`
 }
 
-func (cfg *MConnConfig) maxPacketMsgTotalSize() int {
-	return cfg.MaxPacketMsgPayloadSize + maxPacketMsgOverheadSize
-}
-
 // DefaultMConnConfig returns the default config.
 func DefaultMConnConfig() MConnConfig {
 	return MConnConfig{
 		SendRate:                defaultSendRate,
 		RecvRate:                defaultRecvRate,
-		MaxPacketMsgPayloadSize: maxPacketMsgPayloadSizeDefault,
+		MaxPacketMsgPayloadSize: defaultMaxPacketMsgPayloadSize,
 		FlushThrottle:           defaultFlushThrottle,
 		PingInterval:            defaultPingInterval,
 		PongTimeout:             defaultPongTimeout,
@@ -175,6 +172,9 @@ func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onRec
 
 	mconn.BaseService = *cmn.NewBaseService(nil, "MConnection", mconn)
 
+	// maxPacketMsgSize() is a bit heavy, so call just once
+	mconn._maxPacketMsgSize = mconn.maxPacketMsgSize()
+
 	return mconn
 }
 
@@ -399,7 +399,7 @@ func (c *MConnection) sendSomePacketMsgs() bool {
 	// Block until .sendMonitor says we can write.
 	// Once we're ready we send more than we asked for,
 	// but amortized it should even out.
-	c.sendMonitor.Limit(c.config.maxPacketMsgTotalSize(), atomic.LoadInt64(&c.config.SendRate), true)
+	c.sendMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.SendRate), true)
 
 	// Now send some PacketMsgs.
 	for i := 0; i < numBatchPacketMsgs; i++ {
@@ -457,7 +457,7 @@ func (c *MConnection) recvRoutine() {
 FOR_LOOP:
 	for {
 		// Block until .recvMonitor says we can read.
-		c.recvMonitor.Limit(c.config.maxPacketMsgTotalSize(), atomic.LoadInt64(&c.config.RecvRate), true)
+		c.recvMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.RecvRate), true)
 
 		// Peek into bufConnReader for debugging
 		/*
@@ -477,7 +477,7 @@ FOR_LOOP:
 		var packet Packet
 		var _n int64
 		var err error
-		_n, err = cdc.UnmarshalBinaryReader(c.bufConnReader, &packet, int64(c.config.maxPacketMsgTotalSize()))
+		_n, err = cdc.UnmarshalBinaryReader(c.bufConnReader, &packet, int64(c._maxPacketMsgSize))
 		c.recvMonitor.Update(int(_n))
 		if err != nil {
 			if c.IsRunning() {
@@ -550,6 +550,16 @@ func (c *MConnection) stopPongTimer() {
 	}
 }
 
+// maxPacketMsgSize returns a maximum size of PacketMsg, including the overhead
+// of amino encoding.
+func (c *MConnection) maxPacketMsgSize() int {
+	return len(cdc.MustMarshalBinary(PacketMsg{
+		ChannelID: 0x01,
+		EOF:       1,
+		Bytes:     make([]byte, c.config.MaxPacketMsgPayloadSize),
+	})) + 10 // leave room for changes in amino
+}
+
 type ConnectionStatus struct {
 	Duration    time.Duration
 	SendMonitor flow.Status
diff --git a/vendor/github.com/tendermint/tendermint/p2p/conn/secret_connection.go b/vendor/github.com/tendermint/tendermint/p2p/conn/secret_connection.go
index 2a507f88..a2cbe008 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/conn/secret_connection.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/conn/secret_connection.go
@@ -20,8 +20,8 @@ import (
 	"golang.org/x/crypto/nacl/secretbox"
 	"golang.org/x/crypto/ripemd160"
 
-	"github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // 4 + 1024 == 1028 total frame size
@@ -267,7 +267,11 @@ func genChallenge(loPubKey, hiPubKey *[32]byte) (challenge *[32]byte) {
 }
 
 func signChallenge(challenge *[32]byte, locPrivKey crypto.PrivKey) (signature crypto.Signature) {
-	signature = locPrivKey.Sign(challenge[:])
+	signature, err := locPrivKey.Sign(challenge[:])
+	// TODO(ismail): let signChallenge return an error instead
+	if err != nil {
+		panic(err)
+	}
 	return
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/p2p/conn/wire.go b/vendor/github.com/tendermint/tendermint/p2p/conn/wire.go
index 02d67f6f..3182fde3 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/conn/wire.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/conn/wire.go
@@ -2,7 +2,7 @@ package conn
 
 import (
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 var cdc *amino.Codec = amino.NewCodec()
diff --git a/vendor/github.com/tendermint/tendermint/p2p/fuzz.go b/vendor/github.com/tendermint/tendermint/p2p/fuzz.go
index 8d00ba40..80e4fed6 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/fuzz.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/fuzz.go
@@ -6,7 +6,7 @@ import (
 	"time"
 
 	"github.com/tendermint/tendermint/config"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // FuzzedConnection wraps any net.Conn and depending on the mode either delays
diff --git a/vendor/github.com/tendermint/tendermint/p2p/key.go b/vendor/github.com/tendermint/tendermint/p2p/key.go
index 73103ebd..9548d34f 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/key.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/key.go
@@ -6,8 +6,8 @@ import (
 	"fmt"
 	"io/ioutil"
 
-	crypto "github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
+	crypto "github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // ID is a hex-encoded crypto.Address
diff --git a/vendor/github.com/tendermint/tendermint/p2p/listener.go b/vendor/github.com/tendermint/tendermint/p2p/listener.go
index e698765c..3509ec69 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/listener.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/listener.go
@@ -4,22 +4,30 @@ import (
 	"fmt"
 	"net"
 	"strconv"
+	"strings"
 	"time"
 
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 	"github.com/tendermint/tendermint/p2p/upnp"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
 )
 
+// Listener is a network listener for stream-oriented protocols, providing
+// convenient methods to get listener's internal and external addresses.
+// Clients are supposed to read incoming connections from a channel, returned
+// by Connections() method.
 type Listener interface {
 	Connections() <-chan net.Conn
 	InternalAddress() *NetAddress
 	ExternalAddress() *NetAddress
+	ExternalAddressHost() string
 	String() string
 	Stop() error
 }
 
-// Implements Listener
+// DefaultListener is a cmn.Service, running net.Listener underneath.
+// Optionally, UPnP is used upon calling NewDefaultListener to resolve external
+// address.
 type DefaultListener struct {
 	cmn.BaseService
 
@@ -29,6 +37,8 @@ type DefaultListener struct {
 	connections chan net.Conn
 }
 
+var _ Listener = (*DefaultListener)(nil)
+
 const (
 	numBufferedConnections = 10
 	defaultExternalPort    = 8770
@@ -47,9 +57,16 @@ func splitHostPort(addr string) (host string, port int) {
 	return host, port
 }
 
-// skipUPNP: If true, does not try getUPNPExternalAddress()
-func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log.Logger) Listener {
-	// Local listen IP & port
+// NewDefaultListener creates a new DefaultListener on lAddr, optionally trying
+// to determine external address using UPnP.
+func NewDefaultListener(
+	fullListenAddrString string,
+	externalAddrString string,
+	useUPnP bool,
+	logger log.Logger) Listener {
+
+	// Split protocol, address, and port.
+	protocol, lAddr := cmn.ProtocolAndAddress(fullListenAddrString)
 	lAddrIP, lAddrPort := splitHostPort(lAddr)
 
 	// Create listener
@@ -77,17 +94,28 @@ func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log
 		panic(err)
 	}
 
-	// Determine external address...
+	inAddrAny := lAddrIP == "" || lAddrIP == "0.0.0.0"
+
+	// Determine external address.
 	var extAddr *NetAddress
-	if !skipUPNP {
-		// If the lAddrIP is INADDR_ANY, try UPnP
-		if lAddrIP == "" || lAddrIP == "0.0.0.0" {
-			extAddr = getUPNPExternalAddress(lAddrPort, listenerPort, logger)
+
+	if externalAddrString != "" {
+		var err error
+		extAddr, err = NewNetAddressStringWithOptionalID(externalAddrString)
+		if err != nil {
+			panic(fmt.Sprintf("Error in ExternalAddress: %v", err))
 		}
 	}
-	// Otherwise just use the local address...
+
+	// If the lAddrIP is INADDR_ANY, try UPnP.
+	if extAddr == nil && useUPnP && inAddrAny {
+		extAddr = getUPNPExternalAddress(lAddrPort, listenerPort, logger)
+	}
+
+	// Otherwise just use the local address.
 	if extAddr == nil {
-		extAddr = getNaiveExternalAddress(listenerPort, false, logger)
+		defaultToIPv4 := inAddrAny
+		extAddr = getNaiveExternalAddress(defaultToIPv4, listenerPort, false, logger)
 	}
 	if extAddr == nil {
 		panic("Could not determine external address!")
@@ -107,6 +135,8 @@ func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log
 	return dl
 }
 
+// OnStart implements cmn.Service by spinning a goroutine, listening for new
+// connections.
 func (l *DefaultListener) OnStart() error {
 	if err := l.BaseService.OnStart(); err != nil {
 		return err
@@ -115,6 +145,7 @@ func (l *DefaultListener) OnStart() error {
 	return nil
 }
 
+// OnStop implements cmn.Service by closing the listener.
 func (l *DefaultListener) OnStop() {
 	l.BaseService.OnStop()
 	l.listener.Close() // nolint: errcheck
@@ -145,24 +176,33 @@ func (l *DefaultListener) listenRoutine() {
 	}
 }
 
-// A channel of inbound connections.
+// Connections returns a channel of inbound connections.
 // It gets closed when the listener closes.
 func (l *DefaultListener) Connections() <-chan net.Conn {
 	return l.connections
 }
 
+// InternalAddress returns the internal NetAddress (address used for
+// listening).
 func (l *DefaultListener) InternalAddress() *NetAddress {
 	return l.intAddr
 }
 
+// ExternalAddress returns the external NetAddress (publicly available,
+// determined using either UPnP or local resolver).
 func (l *DefaultListener) ExternalAddress() *NetAddress {
 	return l.extAddr
 }
 
-// NOTE: The returned listener is already Accept()'ing.
-// So it's not suitable to pass into http.Serve().
-func (l *DefaultListener) NetListener() net.Listener {
-	return l.listener
+// ExternalAddressHost returns the external NetAddress IP string. If an IP is
+// IPv6, it's wrapped in brackets ("[2001:db8:1f70::999:de8:7648:6e8]").
+func (l *DefaultListener) ExternalAddressHost() string {
+	ip := l.ExternalAddress().IP
+	if isIpv6(ip) {
+		// Means it's ipv6, so format it with brackets
+		return "[" + ip.String() + "]"
+	}
+	return ip.String()
 }
 
 func (l *DefaultListener) String() string {
@@ -201,8 +241,20 @@ func getUPNPExternalAddress(externalPort, internalPort int, logger log.Logger) *
 	return NewNetAddressIPPort(ext, uint16(externalPort))
 }
 
+func isIpv6(ip net.IP) bool {
+	v4 := ip.To4()
+	if v4 != nil {
+		return false
+	}
+
+	ipString := ip.String()
+
+	// Extra check just to be sure it's IPv6
+	return (strings.Contains(ipString, ":") && !strings.Contains(ipString, "."))
+}
+
 // TODO: use syscalls: see issue #712
-func getNaiveExternalAddress(port int, settleForLocal bool, logger log.Logger) *NetAddress {
+func getNaiveExternalAddress(defaultToIPv4 bool, port int, settleForLocal bool, logger log.Logger) *NetAddress {
 	addrs, err := net.InterfaceAddrs()
 	if err != nil {
 		panic(cmn.Fmt("Could not fetch interface addresses: %v", err))
@@ -213,14 +265,20 @@ func getNaiveExternalAddress(port int, settleForLocal bool, logger log.Logger) *
 		if !ok {
 			continue
 		}
-		v4 := ipnet.IP.To4()
-		if v4 == nil || (!settleForLocal && v4[0] == 127) {
+		if defaultToIPv4 || !isIpv6(ipnet.IP) {
+			v4 := ipnet.IP.To4()
+			if v4 == nil || (!settleForLocal && v4[0] == 127) {
+				// loopback
+				continue
+			}
+		} else if !settleForLocal && ipnet.IP.IsLoopback() {
+			// IPv6, check for loopback
 			continue
-		} // loopback
+		}
 		return NewNetAddressIPPort(ipnet.IP, uint16(port))
 	}
 
 	// try again, but settle for local
 	logger.Info("Node may not be connected to internet. Settling for local address")
-	return getNaiveExternalAddress(port, true, logger)
+	return getNaiveExternalAddress(defaultToIPv4, port, true, logger)
 }
diff --git a/vendor/github.com/tendermint/tendermint/p2p/metrics.go b/vendor/github.com/tendermint/tendermint/p2p/metrics.go
new file mode 100644
index 00000000..ab876ee7
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/p2p/metrics.go
@@ -0,0 +1,33 @@
+package p2p
+
+import (
+	"github.com/go-kit/kit/metrics"
+	"github.com/go-kit/kit/metrics/discard"
+
+	prometheus "github.com/go-kit/kit/metrics/prometheus"
+	stdprometheus "github.com/prometheus/client_golang/prometheus"
+)
+
+// Metrics contains metrics exposed by this package.
+type Metrics struct {
+	// Number of peers.
+	Peers metrics.Gauge
+}
+
+// PrometheusMetrics returns Metrics build using Prometheus client library.
+func PrometheusMetrics() *Metrics {
+	return &Metrics{
+		Peers: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
+			Subsystem: "p2p",
+			Name:      "peers",
+			Help:      "Number of peers.",
+		}, []string{}),
+	}
+}
+
+// NopMetrics returns no-op Metrics.
+func NopMetrics() *Metrics {
+	return &Metrics{
+		Peers: discard.NewGauge(),
+	}
+}
diff --git a/vendor/github.com/tendermint/tendermint/p2p/netaddress.go b/vendor/github.com/tendermint/tendermint/p2p/netaddress.go
index 3e0d99d6..ebac8cc8 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/netaddress.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/netaddress.go
@@ -13,7 +13,7 @@ import (
 	"strings"
 	"time"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // NetAddress defines information about a peer on the network
diff --git a/vendor/github.com/tendermint/tendermint/p2p/node_info.go b/vendor/github.com/tendermint/tendermint/p2p/node_info.go
index 60383bc5..5e8160a3 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/node_info.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/node_info.go
@@ -2,7 +2,7 @@ package p2p
 
 import (
 	"fmt"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 	"strings"
 )
 
diff --git a/vendor/github.com/tendermint/tendermint/p2p/peer.go b/vendor/github.com/tendermint/tendermint/p2p/peer.go
index da69fe74..5c615275 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/peer.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/peer.go
@@ -6,9 +6,9 @@ import (
 	"sync/atomic"
 	"time"
 
-	crypto "github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
+	crypto "github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 
 	"github.com/tendermint/tendermint/config"
 	tmconn "github.com/tendermint/tendermint/p2p/conn"
diff --git a/vendor/github.com/tendermint/tendermint/p2p/peer_set.go b/vendor/github.com/tendermint/tendermint/p2p/peer_set.go
index e048cf4e..25785615 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/peer_set.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/peer_set.go
@@ -55,8 +55,8 @@ func (ps *PeerSet) Add(peer Peer) error {
 	return nil
 }
 
-// Has returns true iff the PeerSet contains
-// the peer referred to by this peerKey.
+// Has returns true if the set contains the peer referred to by this
+// peerKey, otherwise false.
 func (ps *PeerSet) Has(peerKey ID) bool {
 	ps.mtx.Lock()
 	_, ok := ps.lookup[peerKey]
@@ -64,8 +64,8 @@ func (ps *PeerSet) Has(peerKey ID) bool {
 	return ok
 }
 
-// HasIP returns true if the PeerSet contains the peer referred to by this IP
-// address.
+// HasIP returns true if the set contains the peer referred to by this IP
+// address, otherwise false.
 func (ps *PeerSet) HasIP(peerIP net.IP) bool {
 	ps.mtx.Lock()
 	defer ps.mtx.Unlock()
@@ -85,7 +85,8 @@ func (ps *PeerSet) hasIP(peerIP net.IP) bool {
 	return false
 }
 
-// Get looks up a peer by the provided peerKey.
+// Get looks up a peer by the provided peerKey. Returns nil if peer is not
+// found.
 func (ps *PeerSet) Get(peerKey ID) Peer {
 	ps.mtx.Lock()
 	defer ps.mtx.Unlock()
diff --git a/vendor/github.com/tendermint/tendermint/p2p/pex/addrbook.go b/vendor/github.com/tendermint/tendermint/p2p/pex/addrbook.go
index dc51761f..421aa135 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/pex/addrbook.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/pex/addrbook.go
@@ -12,9 +12,9 @@ import (
 	"sync"
 	"time"
 
-	crypto "github.com/tendermint/go-crypto"
+	crypto "github.com/tendermint/tendermint/crypto"
 	"github.com/tendermint/tendermint/p2p"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 const (
diff --git a/vendor/github.com/tendermint/tendermint/p2p/pex/file.go b/vendor/github.com/tendermint/tendermint/p2p/pex/file.go
index 38142dd9..3237e125 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/pex/file.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/pex/file.go
@@ -4,7 +4,7 @@ import (
 	"encoding/json"
 	"os"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 /* Loading & Saving */
diff --git a/vendor/github.com/tendermint/tendermint/p2p/pex/pex_reactor.go b/vendor/github.com/tendermint/tendermint/p2p/pex/pex_reactor.go
index 27ed422c..e90665a3 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/pex/pex_reactor.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/pex/pex_reactor.go
@@ -8,7 +8,7 @@ import (
 	"time"
 
 	amino "github.com/tendermint/go-amino"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 
 	"github.com/tendermint/tendermint/p2p"
 	"github.com/tendermint/tendermint/p2p/conn"
@@ -77,10 +77,10 @@ type PEXReactor struct {
 	attemptsToDial sync.Map // address (string) -> {number of attempts (int), last time dialed (time.Time)}
 }
 
-func (pexR *PEXReactor) minReceiveRequestInterval() time.Duration {
+func (r *PEXReactor) minReceiveRequestInterval() time.Duration {
 	// NOTE: must be less than ensurePeersPeriod, otherwise we'll request
 	// peers too quickly from others and they'll think we're bad!
-	return pexR.ensurePeersPeriod / 3
+	return r.ensurePeersPeriod / 3
 }
 
 // PEXReactorConfig holds reactor specific configuration data.
@@ -206,7 +206,7 @@ func (r *PEXReactor) RemovePeer(p Peer, reason interface{}) {
 
 // Receive implements Reactor by handling incoming PEX messages.
 func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) {
-	msg, err := DecodeMessage(msgBytes)
+	msg, err := decodeMsg(msgBytes)
 	if err != nil {
 		r.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
 		r.Switch.StopPeerForError(src, err)
@@ -287,7 +287,7 @@ func (r *PEXReactor) RequestAddrs(p Peer) {
 		return
 	}
 	r.requestsSent.Set(id, struct{}{})
-	p.Send(PexChannel, cdc.MustMarshalBinary(&pexRequestMessage{}))
+	p.Send(PexChannel, cdc.MustMarshalBinaryBare(&pexRequestMessage{}))
 }
 
 // ReceiveAddrs adds the given addrs to the addrbook if theres an open
@@ -324,7 +324,7 @@ func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error {
 
 // SendAddrs sends addrs to the peer.
 func (r *PEXReactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) {
-	p.Send(PexChannel, cdc.MustMarshalBinary(&pexAddrsMessage{Addrs: netAddrs}))
+	p.Send(PexChannel, cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: netAddrs}))
 }
 
 // SetEnsurePeersPeriod sets period to ensure peers connected.
@@ -628,7 +628,9 @@ func (r *PEXReactor) crawlPeers() {
 		}
 		// Ask for more addresses
 		peer := r.Switch.Peers().Get(pi.Addr.ID)
-		r.RequestAddrs(peer)
+		if peer != nil {
+			r.RequestAddrs(peer)
+		}
 	}
 }
 
@@ -668,13 +670,11 @@ func RegisterPexMessage(cdc *amino.Codec) {
 	cdc.RegisterConcrete(&pexAddrsMessage{}, "tendermint/p2p/PexAddrsMessage", nil)
 }
 
-// DecodeMessage implements interface registered above.
-func DecodeMessage(bz []byte) (msg PexMessage, err error) {
+func decodeMsg(bz []byte) (msg PexMessage, err error) {
 	if len(bz) > maxMsgSize {
-		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
-			len(bz), maxMsgSize)
+		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
 	}
-	err = cdc.UnmarshalBinary(bz, &msg)
+	err = cdc.UnmarshalBinaryBare(bz, &msg)
 	return
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/p2p/switch.go b/vendor/github.com/tendermint/tendermint/p2p/switch.go
index f1ceee5c..d1e2ef23 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/switch.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/switch.go
@@ -9,7 +9,7 @@ import (
 
 	"github.com/tendermint/tendermint/config"
 	"github.com/tendermint/tendermint/p2p/conn"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 const (
@@ -73,10 +73,15 @@ type Switch struct {
 	mConfig conn.MConnConfig
 
 	rng *cmn.Rand // seed for randomizing dial times and orders
+
+	metrics *Metrics
 }
 
+// SwitchOption sets an optional parameter on the Switch.
+type SwitchOption func(*Switch)
+
 // NewSwitch creates a new Switch with the given config.
-func NewSwitch(cfg *config.P2PConfig) *Switch {
+func NewSwitch(cfg *config.P2PConfig, options ...SwitchOption) *Switch {
 	sw := &Switch{
 		config:       cfg,
 		reactors:     make(map[string]Reactor),
@@ -85,6 +90,7 @@ func NewSwitch(cfg *config.P2PConfig) *Switch {
 		peers:        NewPeerSet(),
 		dialing:      cmn.NewCMap(),
 		reconnecting: cmn.NewCMap(),
+		metrics:      NopMetrics(),
 	}
 
 	// Ensure we have a completely undeterministic PRNG.
@@ -99,9 +105,19 @@ func NewSwitch(cfg *config.P2PConfig) *Switch {
 	sw.mConfig = mConfig
 
 	sw.BaseService = *cmn.NewBaseService(nil, "P2P Switch", sw)
+
+	for _, option := range options {
+		option(sw)
+	}
+
 	return sw
 }
 
+// WithMetrics sets the metrics.
+func WithMetrics(metrics *Metrics) SwitchOption {
+	return func(sw *Switch) { sw.metrics = metrics }
+}
+
 //---------------------------------------------------------------------
 // Switch setup
 
@@ -279,6 +295,7 @@ func (sw *Switch) StopPeerGracefully(peer Peer) {
 
 func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) {
 	sw.peers.Remove(peer)
+	sw.metrics.Peers.Add(float64(-1))
 	peer.Stop()
 	for _, reactor := range sw.reactors {
 		reactor.RemovePeer(peer, reason)
@@ -623,6 +640,7 @@ func (sw *Switch) addPeer(pc peerConn) error {
 	if err := sw.peers.Add(peer); err != nil {
 		return err
 	}
+	sw.metrics.Peers.Add(float64(1))
 
 	sw.Logger.Info("Added peer", "peer", peer)
 	return nil
diff --git a/vendor/github.com/tendermint/tendermint/p2p/test_util.go b/vendor/github.com/tendermint/tendermint/p2p/test_util.go
index 0d2ba6c5..467532f0 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/test_util.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/test_util.go
@@ -4,9 +4,9 @@ import (
 	"fmt"
 	"net"
 
-	crypto "github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
+	crypto "github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 
 	"github.com/tendermint/tendermint/config"
 	"github.com/tendermint/tendermint/p2p/conn"
diff --git a/vendor/github.com/tendermint/tendermint/p2p/upnp/probe.go b/vendor/github.com/tendermint/tendermint/p2p/upnp/probe.go
index 55479415..2de5e790 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/upnp/probe.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/upnp/probe.go
@@ -5,8 +5,8 @@ import (
 	"net"
 	"time"
 
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 )
 
 type UPNPCapabilities struct {
diff --git a/vendor/github.com/tendermint/tendermint/p2p/wire.go b/vendor/github.com/tendermint/tendermint/p2p/wire.go
index a90ac851..b7ae4125 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/wire.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/wire.go
@@ -2,7 +2,7 @@ package p2p
 
 import (
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 var cdc = amino.NewCodec()
diff --git a/vendor/github.com/tendermint/tendermint/privval/priv_validator.go b/vendor/github.com/tendermint/tendermint/privval/priv_validator.go
index 2bb5ef32..1e85bf7b 100644
--- a/vendor/github.com/tendermint/tendermint/privval/priv_validator.go
+++ b/vendor/github.com/tendermint/tendermint/privval/priv_validator.go
@@ -8,9 +8,9 @@ import (
 	"sync"
 	"time"
 
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
 )
 
 // TODO: type ?
@@ -91,6 +91,10 @@ func LoadFilePV(filePath string) *FilePV {
 		cmn.Exit(cmn.Fmt("Error reading PrivValidator from %v: %v\n", filePath, err))
 	}
 
+	// overwrite pubkey and address for convenience
+	pv.PubKey = pv.PrivKey.PubKey()
+	pv.Address = pv.PubKey.Address()
+
 	pv.filePath = filePath
 	return pv
 }
@@ -222,7 +226,10 @@ func (pv *FilePV) signVote(chainID string, vote *types.Vote) error {
 	}
 
 	// It passed the checks. Sign the vote
-	sig := pv.PrivKey.Sign(signBytes)
+	sig, err := pv.PrivKey.Sign(signBytes)
+	if err != nil {
+		return err
+	}
 	pv.saveSigned(height, round, step, signBytes, sig)
 	vote.Signature = sig
 	return nil
@@ -258,7 +265,10 @@ func (pv *FilePV) signProposal(chainID string, proposal *types.Proposal) error {
 	}
 
 	// It passed the checks. Sign the proposal
-	sig := pv.PrivKey.Sign(signBytes)
+	sig, err := pv.PrivKey.Sign(signBytes)
+	if err != nil {
+		return err
+	}
 	pv.saveSigned(height, round, step, signBytes, sig)
 	proposal.Signature = sig
 	return nil
@@ -281,7 +291,11 @@ func (pv *FilePV) saveSigned(height int64, round int, step int8,
 func (pv *FilePV) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) error {
 	pv.mtx.Lock()
 	defer pv.mtx.Unlock()
-	heartbeat.Signature = pv.PrivKey.Sign(heartbeat.SignBytes(chainID))
+	sig, err := pv.PrivKey.Sign(heartbeat.SignBytes(chainID))
+	if err != nil {
+		return err
+	}
+	heartbeat.Signature = sig
 	return nil
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/privval/socket.go b/vendor/github.com/tendermint/tendermint/privval/socket.go
index 9f59a815..1e8a3807 100644
--- a/vendor/github.com/tendermint/tendermint/privval/socket.go
+++ b/vendor/github.com/tendermint/tendermint/privval/socket.go
@@ -8,9 +8,9 @@ import (
 	"time"
 
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
+	"github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 
 	p2pconn "github.com/tendermint/tendermint/p2p/conn"
 	"github.com/tendermint/tendermint/types"
diff --git a/vendor/github.com/tendermint/tendermint/privval/wire.go b/vendor/github.com/tendermint/tendermint/privval/wire.go
index 68891083..c42ba40d 100644
--- a/vendor/github.com/tendermint/tendermint/privval/wire.go
+++ b/vendor/github.com/tendermint/tendermint/privval/wire.go
@@ -2,7 +2,7 @@ package privval
 
 import (
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 var cdc = amino.NewCodec()
diff --git a/vendor/github.com/tendermint/tendermint/proxy/app_conn.go b/vendor/github.com/tendermint/tendermint/proxy/app_conn.go
index 2319fed8..2f792671 100644
--- a/vendor/github.com/tendermint/tendermint/proxy/app_conn.go
+++ b/vendor/github.com/tendermint/tendermint/proxy/app_conn.go
@@ -1,8 +1,8 @@
 package proxy
 
 import (
-	abcicli "github.com/tendermint/abci/client"
-	"github.com/tendermint/abci/types"
+	abcicli "github.com/tendermint/tendermint/abci/client"
+	"github.com/tendermint/tendermint/abci/types"
 )
 
 //----------------------------------------------------------------------------------------
diff --git a/vendor/github.com/tendermint/tendermint/proxy/client.go b/vendor/github.com/tendermint/tendermint/proxy/client.go
index 6c987368..87f4e716 100644
--- a/vendor/github.com/tendermint/tendermint/proxy/client.go
+++ b/vendor/github.com/tendermint/tendermint/proxy/client.go
@@ -5,9 +5,9 @@ import (
 
 	"github.com/pkg/errors"
 
-	abcicli "github.com/tendermint/abci/client"
-	"github.com/tendermint/abci/example/kvstore"
-	"github.com/tendermint/abci/types"
+	abcicli "github.com/tendermint/tendermint/abci/client"
+	"github.com/tendermint/tendermint/abci/example/kvstore"
+	"github.com/tendermint/tendermint/abci/types"
 )
 
 // NewABCIClient returns newly connected client
diff --git a/vendor/github.com/tendermint/tendermint/proxy/multi_app_conn.go b/vendor/github.com/tendermint/tendermint/proxy/multi_app_conn.go
index 5d89ef19..279fa42e 100644
--- a/vendor/github.com/tendermint/tendermint/proxy/multi_app_conn.go
+++ b/vendor/github.com/tendermint/tendermint/proxy/multi_app_conn.go
@@ -3,7 +3,7 @@ package proxy
 import (
 	"github.com/pkg/errors"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 //-----------------------------
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/README.md b/vendor/github.com/tendermint/tendermint/rpc/core/README.md
new file mode 100644
index 00000000..9547079b
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/README.md
@@ -0,0 +1,20 @@
+# Tendermint RPC
+
+## Generate markdown for [Slate](https://github.com/tendermint/slate)
+
+We are using [Slate](https://github.com/tendermint/slate) to power our RPC
+documentation. For generating markdown use:
+
+```shell
+go get github.com/davecheney/godoc2md
+
+godoc2md -template rpc/core/doc_template.txt github.com/tendermint/tendermint/rpc/core | grep -v -e "pipe.go" -e "routes.go" -e "dev.go" | sed 's$/src/target$https://github.com/tendermint/tendermint/tree/master/rpc/core$'
+```
+
+For more information see the [CI script for building the Slate docs](/scripts/slate.sh)
+
+## Pagination
+
+Requests that return multiple items will be paginated to 30 items by default.
+You can specify further pages with the ?page parameter. You can also set a
+custom page size up to 100 with the ?per_page parameter.
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/abci.go b/vendor/github.com/tendermint/tendermint/rpc/core/abci.go
index 067108c4..a5eede3f 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/abci.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/abci.go
@@ -1,10 +1,10 @@
 package core
 
 import (
-	abci "github.com/tendermint/abci/types"
+	abci "github.com/tendermint/tendermint/abci/types"
 	ctypes "github.com/tendermint/tendermint/rpc/core/types"
 	"github.com/tendermint/tendermint/version"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // Query the application for some information.
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/blocks.go b/vendor/github.com/tendermint/tendermint/rpc/core/blocks.go
index a5ad5b4c..0e887315 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/blocks.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/blocks.go
@@ -6,7 +6,7 @@ import (
 	ctypes "github.com/tendermint/tendermint/rpc/core/types"
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // Get block headers for minHeight <= height <= maxHeight.
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/consensus.go b/vendor/github.com/tendermint/tendermint/rpc/core/consensus.go
index dad64b6b..c026cd91 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/consensus.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/consensus.go
@@ -87,7 +87,7 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) {
 //           {
 //             "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244",
 //             "pub_key": {
-//               "type": "AC26791624DE60",
+//               "type": "tendermint/PubKeyEd25519",
 //               "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg="
 //             },
 //             "voting_power": 10,
@@ -97,7 +97,7 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) {
 //         "proposer": {
 //           "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244",
 //           "pub_key": {
-//             "type": "AC26791624DE60",
+//             "type": "tendermint/PubKeyEd25519",
 //             "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg="
 //           },
 //           "voting_power": 10,
@@ -133,7 +133,7 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) {
 //           {
 //             "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244",
 //             "pub_key": {
-//               "type": "AC26791624DE60",
+//               "type": "tendermint/PubKeyEd25519",
 //               "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg="
 //             },
 //             "voting_power": 10,
@@ -143,7 +143,7 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) {
 //         "proposer": {
 //           "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244",
 //           "pub_key": {
-//             "type": "AC26791624DE60",
+//             "type": "tendermint/PubKeyEd25519",
 //             "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg="
 //           },
 //           "voting_power": 10,
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/doc_template.txt b/vendor/github.com/tendermint/tendermint/rpc/core/doc_template.txt
new file mode 100644
index 00000000..896d0c27
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/doc_template.txt
@@ -0,0 +1,8 @@
+{{with .PDoc}}
+{{comment_md .Doc}}
+{{example_html $ ""}}
+
+{{range .Funcs}}{{$name_html := html .Name}}## [{{$name_html}}]({{posLink_url $ .Decl}})
+{{comment_md .Doc}}{{end}}
+{{end}}
+---
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go b/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go
index 515ada87..ecc41ce1 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go
@@ -7,10 +7,10 @@ import (
 
 	"github.com/pkg/errors"
 
-	abci "github.com/tendermint/abci/types"
+	abci "github.com/tendermint/tendermint/abci/types"
 	ctypes "github.com/tendermint/tendermint/rpc/core/types"
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 //-----------------------------------------------------------------------------
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/pipe.go b/vendor/github.com/tendermint/tendermint/rpc/core/pipe.go
index 7a042362..128b3e9a 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/pipe.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/pipe.go
@@ -3,15 +3,15 @@ package core
 import (
 	"time"
 
-	crypto "github.com/tendermint/go-crypto"
 	"github.com/tendermint/tendermint/consensus"
+	crypto "github.com/tendermint/tendermint/crypto"
 	"github.com/tendermint/tendermint/p2p"
 	"github.com/tendermint/tendermint/proxy"
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/state/txindex"
 	"github.com/tendermint/tendermint/types"
-	dbm "github.com/tendermint/tmlibs/db"
-	"github.com/tendermint/tmlibs/log"
+	dbm "github.com/tendermint/tendermint/libs/db"
+	"github.com/tendermint/tendermint/libs/log"
 )
 
 const (
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/status.go b/vendor/github.com/tendermint/tendermint/rpc/core/status.go
index 044c1289..63e62b2c 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/status.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/status.go
@@ -7,7 +7,7 @@ import (
 	ctypes "github.com/tendermint/tendermint/rpc/core/types"
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // Get Tendermint status including node info, pubkey, latest block
@@ -50,12 +50,12 @@ import (
 //      "latest_app_hash": "0000000000000000",
 //      "latest_block_height": 231,
 //      "latest_block_time": "2018-04-27T23:18:08.459766485-04:00",
-//      "syncing": false
+//      "catching_up": false
 //    },
 //    "validator_info": {
 //      "address": "5875562FF0FFDECC895C20E32FC14988952E99E7",
 //      "pub_key": {
-//        "type": "AC26791624DE60",
+//        "type": "tendermint/PubKeyEd25519",
 //        "value": "PpDJRUrLG2RgFqYYjawfn/AcAgacSXpLFrmfYYQnuzE="
 //      },
 //      "voting_power": 10
@@ -92,7 +92,7 @@ func Status() (*ctypes.ResultStatus, error) {
 			LatestAppHash:     latestAppHash,
 			LatestBlockHeight: latestHeight,
 			LatestBlockTime:   latestBlockTime,
-			Syncing:           consensusReactor.FastSync(),
+			CatchingUp:        consensusReactor.FastSync(),
 		},
 		ValidatorInfo: ctypes.ValidatorInfo{
 			Address:     pubKey.Address(),
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/tx.go b/vendor/github.com/tendermint/tendermint/rpc/core/tx.go
index 2fa7825f..f53d82f1 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/tx.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/tx.go
@@ -3,7 +3,7 @@ package core
 import (
 	"fmt"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 
 	tmquery "github.com/tendermint/tendermint/libs/pubsub/query"
 	ctypes "github.com/tendermint/tendermint/rpc/core/types"
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/types/responses.go b/vendor/github.com/tendermint/tendermint/rpc/core/types/responses.go
index 5b001d7d..4fec416e 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/types/responses.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/types/responses.go
@@ -5,9 +5,9 @@ import (
 	"strings"
 	"time"
 
-	abci "github.com/tendermint/abci/types"
-	crypto "github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
+	abci "github.com/tendermint/tendermint/abci/types"
+	crypto "github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
 
 	"github.com/tendermint/tendermint/p2p"
 	"github.com/tendermint/tendermint/state"
@@ -65,7 +65,7 @@ type SyncInfo struct {
 	LatestAppHash     cmn.HexBytes `json:"latest_app_hash"`
 	LatestBlockHeight int64        `json:"latest_block_height"`
 	LatestBlockTime   time.Time    `json:"latest_block_time"`
-	Syncing           bool         `json:"syncing"`
+	CatchingUp        bool         `json:"catching_up"`
 }
 
 // Info about the node's validator
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/types/wire.go b/vendor/github.com/tendermint/tendermint/rpc/core/types/wire.go
index 6648364b..d3a31dc3 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/types/wire.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/types/wire.go
@@ -2,7 +2,7 @@ package core_types
 
 import (
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 	"github.com/tendermint/tendermint/types"
 )
 
diff --git a/vendor/github.com/tendermint/tendermint/rpc/grpc/api.go b/vendor/github.com/tendermint/tendermint/rpc/grpc/api.go
index c0a92004..0b840e3e 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/grpc/api.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/grpc/api.go
@@ -3,7 +3,7 @@ package core_grpc
 import (
 	"context"
 
-	abci "github.com/tendermint/abci/types"
+	abci "github.com/tendermint/tendermint/abci/types"
 	core "github.com/tendermint/tendermint/rpc/core"
 )
 
diff --git a/vendor/github.com/tendermint/tendermint/rpc/grpc/client_server.go b/vendor/github.com/tendermint/tendermint/rpc/grpc/client_server.go
index 80d736f5..c8898968 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/grpc/client_server.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/grpc/client_server.go
@@ -6,13 +6,21 @@ import (
 	"strings"
 	"time"
 
+	"golang.org/x/net/netutil"
 	"google.golang.org/grpc"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
-// Start the grpcServer in a go routine
-func StartGRPCServer(protoAddr string) (net.Listener, error) {
+// Config is an gRPC server configuration.
+type Config struct {
+	MaxOpenConnections int
+}
+
+// StartGRPCServer starts a new gRPC BroadcastAPIServer, listening on
+// protoAddr, in a goroutine. Returns a listener and an error, if it fails to
+// parse an address.
+func StartGRPCServer(protoAddr string, config Config) (net.Listener, error) {
 	parts := strings.SplitN(protoAddr, "://", 2)
 	if len(parts) != 2 {
 		return nil, fmt.Errorf("Invalid listen address for grpc server (did you forget a tcp:// prefix?) : %s", protoAddr)
@@ -22,6 +30,9 @@ func StartGRPCServer(protoAddr string) (net.Listener, error) {
 	if err != nil {
 		return nil, err
 	}
+	if config.MaxOpenConnections > 0 {
+		ln = netutil.LimitListener(ln, config.MaxOpenConnections)
+	}
 
 	grpcServer := grpc.NewServer()
 	RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{})
@@ -30,7 +41,8 @@ func StartGRPCServer(protoAddr string) (net.Listener, error) {
 	return ln, nil
 }
 
-// Start the client by dialing the server
+// StartGRPCClient dials the gRPC server using protoAddr and returns a new
+// BroadcastAPIClient.
 func StartGRPCClient(protoAddr string) BroadcastAPIClient {
 	conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc))
 	if err != nil {
diff --git a/vendor/github.com/tendermint/tendermint/rpc/grpc/compile.sh b/vendor/github.com/tendermint/tendermint/rpc/grpc/compile.sh
new file mode 100644
index 00000000..2c4629c8
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/rpc/grpc/compile.sh
@@ -0,0 +1,3 @@
+#! /bin/bash
+
+protoc --go_out=plugins=grpc:. -I $GOPATH/src/ -I . types.proto
diff --git a/vendor/github.com/tendermint/tendermint/rpc/grpc/types.pb.go b/vendor/github.com/tendermint/tendermint/rpc/grpc/types.pb.go
index cf7a5ec7..be16b711 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/grpc/types.pb.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/grpc/types.pb.go
@@ -19,7 +19,7 @@ package core_grpc
 import proto "github.com/golang/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import types "github.com/tendermint/abci/types"
+import types "github.com/tendermint/tendermint/abci/types"
 
 import (
 	"context"
diff --git a/vendor/github.com/tendermint/tendermint/rpc/grpc/types.proto b/vendor/github.com/tendermint/tendermint/rpc/grpc/types.proto
new file mode 100644
index 00000000..d7980d5e
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/rpc/grpc/types.proto
@@ -0,0 +1,36 @@
+syntax = "proto3";
+package core_grpc;
+
+import "github.com/tendermint/tendermint/abci/types/types.proto";
+
+//----------------------------------------
+// Message types
+
+//----------------------------------------
+// Request types
+
+message RequestPing {
+}
+
+message RequestBroadcastTx {
+  bytes tx = 1;
+}
+
+//----------------------------------------
+// Response types
+
+message ResponsePing{
+}
+
+message ResponseBroadcastTx{
+  types.ResponseCheckTx check_tx = 1;
+  types.ResponseDeliverTx deliver_tx = 2;
+}
+
+//----------------------------------------
+// Service Definition
+
+service BroadcastAPI {
+  rpc Ping(RequestPing) returns (ResponsePing) ;
+  rpc BroadcastTx(RequestBroadcastTx) returns (ResponseBroadcastTx) ;
+}
diff --git a/vendor/github.com/tendermint/tendermint/rpc/lib/doc.go b/vendor/github.com/tendermint/tendermint/rpc/lib/doc.go
index 2bc43859..b96b9123 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/lib/doc.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/lib/doc.go
@@ -98,6 +98,6 @@ Each route is available as a GET request, as a JSONRPCv2 POST request, and via J
 # Examples
 
 * [Tendermint](https://github.com/tendermint/tendermint/blob/master/rpc/core/routes.go)
-* [tm-monitor](https://github.com/tendermint/tools/blob/master/tm-monitor/rpc.go)
+* [tm-monitor](https://github.com/tendermint/tendermint/blob/master/tools/tm-monitor/rpc.go)
 */
 package rpc
diff --git a/vendor/github.com/tendermint/tendermint/rpc/lib/server/handlers.go b/vendor/github.com/tendermint/tendermint/rpc/lib/server/handlers.go
index 6cc03012..3ec5f81e 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/lib/server/handlers.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/lib/server/handlers.go
@@ -18,9 +18,9 @@ import (
 	"github.com/pkg/errors"
 
 	amino "github.com/tendermint/go-amino"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 	types "github.com/tendermint/tendermint/rpc/lib/types"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
 )
 
 // RegisterRPCFuncs adds a route for each function in the funcMap, as well as general jsonrpc and websocket handlers for all functions.
@@ -294,7 +294,7 @@ func httpParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, r *http.Request) ([]re
 			continue
 		}
 
-		v, err, ok := nonJSONToArg(cdc, argType, arg)
+		v, err, ok := nonJSONStringToArg(cdc, argType, arg)
 		if err != nil {
 			return nil, err
 		}
@@ -303,7 +303,7 @@ func httpParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, r *http.Request) ([]re
 			continue
 		}
 
-		values[i], err = _jsonStringToArg(cdc, argType, arg)
+		values[i], err = jsonStringToArg(cdc, argType, arg)
 		if err != nil {
 			return nil, err
 		}
@@ -312,26 +312,64 @@ func httpParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, r *http.Request) ([]re
 	return values, nil
 }
 
-func _jsonStringToArg(cdc *amino.Codec, ty reflect.Type, arg string) (reflect.Value, error) {
-	v := reflect.New(ty)
-	err := cdc.UnmarshalJSON([]byte(arg), v.Interface())
+func jsonStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect.Value, error) {
+	rv := reflect.New(rt)
+	err := cdc.UnmarshalJSON([]byte(arg), rv.Interface())
 	if err != nil {
-		return v, err
+		return rv, err
 	}
-	v = v.Elem()
-	return v, nil
+	rv = rv.Elem()
+	return rv, nil
 }
 
-func nonJSONToArg(cdc *amino.Codec, ty reflect.Type, arg string) (reflect.Value, error, bool) {
+func nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect.Value, error, bool) {
+	if rt.Kind() == reflect.Ptr {
+		rv_, err, ok := nonJSONStringToArg(cdc, rt.Elem(), arg)
+		if err != nil {
+			return reflect.Value{}, err, false
+		} else if ok {
+			rv := reflect.New(rt.Elem())
+			rv.Elem().Set(rv_)
+			return rv, nil, true
+		} else {
+			return reflect.Value{}, nil, false
+		}
+	} else {
+		return _nonJSONStringToArg(cdc, rt, arg)
+	}
+}
+
+// NOTE: rt.Kind() isn't a pointer.
+func _nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect.Value, error, bool) {
+	isIntString := RE_INT.Match([]byte(arg))
 	isQuotedString := strings.HasPrefix(arg, `"`) && strings.HasSuffix(arg, `"`)
 	isHexString := strings.HasPrefix(strings.ToLower(arg), "0x")
-	expectingString := ty.Kind() == reflect.String
-	expectingByteSlice := ty.Kind() == reflect.Slice && ty.Elem().Kind() == reflect.Uint8
+
+	var expectingString, expectingByteSlice, expectingInt bool
+	switch rt.Kind() {
+	case reflect.Int, reflect.Uint, reflect.Int8, reflect.Uint8, reflect.Int16, reflect.Uint16, reflect.Int32, reflect.Uint32, reflect.Int64, reflect.Uint64:
+		expectingInt = true
+	case reflect.String:
+		expectingString = true
+	case reflect.Slice:
+		expectingByteSlice = rt.Elem().Kind() == reflect.Uint8
+	}
+
+	if isIntString && expectingInt {
+		qarg := `"` + arg + `"`
+		// jsonStringToArg
+		rv, err := jsonStringToArg(cdc, rt, qarg)
+		if err != nil {
+			return rv, err, false
+		} else {
+			return rv, nil, true
+		}
+	}
 
 	if isHexString {
 		if !expectingString && !expectingByteSlice {
 			err := errors.Errorf("Got a hex string arg, but expected '%s'",
-				ty.Kind().String())
+				rt.Kind().String())
 			return reflect.ValueOf(nil), err, false
 		}
 
@@ -340,7 +378,7 @@ func nonJSONToArg(cdc *amino.Codec, ty reflect.Type, arg string) (reflect.Value,
 		if err != nil {
 			return reflect.ValueOf(nil), err, false
 		}
-		if ty.Kind() == reflect.String {
+		if rt.Kind() == reflect.String {
 			return reflect.ValueOf(string(value)), nil, true
 		}
 		return reflect.ValueOf([]byte(value)), nil, true
@@ -406,7 +444,13 @@ type wsConnection struct {
 // description of how to configure ping period and pong wait time. NOTE: if the
 // write buffer is full, pongs may be dropped, which may cause clients to
 // disconnect. see https://github.com/gorilla/websocket/issues/97
-func NewWSConnection(baseConn *websocket.Conn, funcMap map[string]*RPCFunc, cdc *amino.Codec, options ...func(*wsConnection)) *wsConnection {
+func NewWSConnection(
+	baseConn *websocket.Conn,
+	funcMap map[string]*RPCFunc,
+	cdc *amino.Codec,
+	options ...func(*wsConnection),
+) *wsConnection {
+	baseConn.SetReadLimit(maxBodyBytes)
 	wsc := &wsConnection{
 		remoteAddr:        baseConn.RemoteAddr().String(),
 		baseConn:          baseConn,
diff --git a/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_params.go b/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_params.go
index 56506067..3c948c0b 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_params.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_params.go
@@ -15,6 +15,7 @@ var (
 	dotAtom = atom + `(?:\.` + atom + `)*`
 	domain  = `[A-Z0-9.-]+\.[A-Z]{2,4}`
 
+	RE_INT     = regexp.MustCompile(`^-?[0-9]+$`)
 	RE_HEX     = regexp.MustCompile(`^(?i)[a-f0-9]+$`)
 	RE_EMAIL   = regexp.MustCompile(`^(?i)(` + dotAtom + `)@(` + dotAtom + `)$`)
 	RE_ADDRESS = regexp.MustCompile(`^(?i)[a-z0-9]{25,34}$`)
diff --git a/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go b/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go
index 3f54c61e..5d816ef2 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go
@@ -12,16 +12,38 @@ import (
 	"time"
 
 	"github.com/pkg/errors"
+	"golang.org/x/net/netutil"
 
 	types "github.com/tendermint/tendermint/rpc/lib/types"
-	"github.com/tendermint/tmlibs/log"
+	"github.com/tendermint/tendermint/libs/log"
 )
 
-func StartHTTPServer(listenAddr string, handler http.Handler, logger log.Logger) (listener net.Listener, err error) {
+// Config is an RPC server configuration.
+type Config struct {
+	MaxOpenConnections int
+}
+
+const (
+	// maxBodyBytes controls the maximum number of bytes the
+	// server will read parsing the request body.
+	maxBodyBytes = int64(1000000) // 1MB
+)
+
+// StartHTTPServer starts an HTTP server on listenAddr with the given handler.
+// It wraps handler with RecoverAndLogHandler.
+func StartHTTPServer(
+	listenAddr string,
+	handler http.Handler,
+	logger log.Logger,
+	config Config,
+) (listener net.Listener, err error) {
 	var proto, addr string
 	parts := strings.SplitN(listenAddr, "://", 2)
 	if len(parts) != 2 {
-		return nil, errors.Errorf("Invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", listenAddr)
+		return nil, errors.Errorf(
+			"Invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)",
+			listenAddr,
+		)
 	}
 	proto, addr = parts[0], parts[1]
 
@@ -30,35 +52,60 @@ func StartHTTPServer(listenAddr string, handler http.Handler, logger log.Logger)
 	if err != nil {
 		return nil, errors.Errorf("Failed to listen on %v: %v", listenAddr, err)
 	}
+	if config.MaxOpenConnections > 0 {
+		listener = netutil.LimitListener(listener, config.MaxOpenConnections)
+	}
 
 	go func() {
 		err := http.Serve(
 			listener,
-			RecoverAndLogHandler(handler, logger),
+			RecoverAndLogHandler(maxBytesHandler{h: handler, n: maxBodyBytes}, logger),
 		)
 		logger.Error("RPC HTTP server stopped", "err", err)
 	}()
 	return listener, nil
 }
 
-func StartHTTPAndTLSServer(listenAddr string, handler http.Handler, certFile, keyFile string, logger log.Logger) (listener net.Listener, err error) {
+// StartHTTPAndTLSServer starts an HTTPS server on listenAddr with the given
+// handler.
+// It wraps handler with RecoverAndLogHandler.
+func StartHTTPAndTLSServer(
+	listenAddr string,
+	handler http.Handler,
+	certFile, keyFile string,
+	logger log.Logger,
+	config Config,
+) (listener net.Listener, err error) {
 	var proto, addr string
 	parts := strings.SplitN(listenAddr, "://", 2)
 	if len(parts) != 2 {
-		return nil, errors.Errorf("Invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", listenAddr)
+		return nil, errors.Errorf(
+			"Invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)",
+			listenAddr,
+		)
 	}
 	proto, addr = parts[0], parts[1]
 
-	logger.Info(fmt.Sprintf("Starting RPC HTTPS server on %s (cert: %q, key: %q)", listenAddr, certFile, keyFile))
+	logger.Info(
+		fmt.Sprintf(
+			"Starting RPC HTTPS server on %s (cert: %q, key: %q)",
+			listenAddr,
+			certFile,
+			keyFile,
+		),
+	)
 	listener, err = net.Listen(proto, addr)
 	if err != nil {
 		return nil, errors.Errorf("Failed to listen on %v: %v", listenAddr, err)
 	}
+	if config.MaxOpenConnections > 0 {
+		listener = netutil.LimitListener(listener, config.MaxOpenConnections)
+	}
 
 	go func() {
 		err := http.ServeTLS(
 			listener,
-			RecoverAndLogHandler(handler, logger),
+			RecoverAndLogHandler(maxBytesHandler{h: handler, n: maxBodyBytes}, logger),
 			certFile,
 			keyFile,
 		)
@@ -67,7 +114,11 @@ func StartHTTPAndTLSServer(listenAddr string, handler http.Handler, certFile, ke
 	return listener, nil
 }
 
-func WriteRPCResponseHTTPError(w http.ResponseWriter, httpCode int, res types.RPCResponse) {
+func WriteRPCResponseHTTPError(
+	w http.ResponseWriter,
+	httpCode int,
+	res types.RPCResponse,
+) {
 	jsonBytes, err := json.MarshalIndent(res, "", "  ")
 	if err != nil {
 		panic(err)
@@ -117,7 +168,10 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler
 					WriteRPCResponseHTTP(rww, res)
 				} else {
 					// For the rest,
-					logger.Error("Panic in RPC HTTP handler", "err", e, "stack", string(debug.Stack()))
+					logger.Error(
+						"Panic in RPC HTTP handler", "err", e, "stack",
+						string(debug.Stack()),
+					)
 					rww.WriteHeader(http.StatusInternalServerError)
 					WriteRPCResponseHTTP(rww, types.RPCInternalError("", e.(error)))
 				}
@@ -154,3 +208,13 @@ func (w *ResponseWriterWrapper) WriteHeader(status int) {
 func (w *ResponseWriterWrapper) Hijack() (net.Conn, *bufio.ReadWriter, error) {
 	return w.ResponseWriter.(http.Hijacker).Hijack()
 }
+
+type maxBytesHandler struct {
+	h http.Handler
+	n int64
+}
+
+func (h maxBytesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	r.Body = http.MaxBytesReader(w, r.Body, h.n)
+	h.h.ServeHTTP(w, r)
+}
diff --git a/vendor/github.com/tendermint/tendermint/state/errors.go b/vendor/github.com/tendermint/tendermint/state/errors.go
index afb5737d..d40c7e14 100644
--- a/vendor/github.com/tendermint/tendermint/state/errors.go
+++ b/vendor/github.com/tendermint/tendermint/state/errors.go
@@ -1,7 +1,7 @@
 package state
 
 import (
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 type (
diff --git a/vendor/github.com/tendermint/tendermint/state/execution.go b/vendor/github.com/tendermint/tendermint/state/execution.go
index e6b94429..601abec9 100644
--- a/vendor/github.com/tendermint/tendermint/state/execution.go
+++ b/vendor/github.com/tendermint/tendermint/state/execution.go
@@ -4,11 +4,11 @@ import (
 	"fmt"
 
 	fail "github.com/ebuchman/fail-test"
-	abci "github.com/tendermint/abci/types"
+	abci "github.com/tendermint/tendermint/abci/types"
+	dbm "github.com/tendermint/tendermint/libs/db"
+	"github.com/tendermint/tendermint/libs/log"
 	"github.com/tendermint/tendermint/proxy"
 	"github.com/tendermint/tendermint/types"
-	dbm "github.com/tendermint/tmlibs/db"
-	"github.com/tendermint/tmlibs/log"
 )
 
 //-----------------------------------------------------------------------------
@@ -278,20 +278,24 @@ func updateValidators(currentSet *types.ValidatorSet, abciUpdates []abci.Validat
 
 	// these are tendermint types now
 	for _, valUpdate := range updates {
+		if valUpdate.VotingPower < 0 {
+			return fmt.Errorf("Voting power can't be negative %v", valUpdate)
+		}
+
 		address := valUpdate.Address
 		_, val := currentSet.GetByAddress(address)
-		if val == nil {
-			// add val
-			added := currentSet.Add(valUpdate)
-			if !added {
-				return fmt.Errorf("Failed to add new validator %v", valUpdate)
-			}
-		} else if valUpdate.VotingPower == 0 {
+		if valUpdate.VotingPower == 0 {
 			// remove val
 			_, removed := currentSet.Remove(address)
 			if !removed {
 				return fmt.Errorf("Failed to remove validator %X", address)
 			}
+		} else if val == nil {
+			// add val
+			added := currentSet.Add(valUpdate)
+			if !added {
+				return fmt.Errorf("Failed to add new validator %v", valUpdate)
+			}
 		} else {
 			// update val
 			updated := currentSet.Update(valUpdate)
diff --git a/vendor/github.com/tendermint/tendermint/state/services.go b/vendor/github.com/tendermint/tendermint/state/services.go
index bef286b2..bf0b1a6f 100644
--- a/vendor/github.com/tendermint/tendermint/state/services.go
+++ b/vendor/github.com/tendermint/tendermint/state/services.go
@@ -1,7 +1,7 @@
 package state
 
 import (
-	abci "github.com/tendermint/abci/types"
+	abci "github.com/tendermint/tendermint/abci/types"
 	"github.com/tendermint/tendermint/types"
 )
 
diff --git a/vendor/github.com/tendermint/tendermint/state/store.go b/vendor/github.com/tendermint/tendermint/state/store.go
index 2164d699..9e94e36f 100644
--- a/vendor/github.com/tendermint/tendermint/state/store.go
+++ b/vendor/github.com/tendermint/tendermint/state/store.go
@@ -3,10 +3,10 @@ package state
 import (
 	"fmt"
 
-	abci "github.com/tendermint/abci/types"
+	abci "github.com/tendermint/tendermint/abci/types"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	dbm "github.com/tendermint/tendermint/libs/db"
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
-	dbm "github.com/tendermint/tmlibs/db"
 )
 
 //------------------------------------------------------------------------
@@ -175,8 +175,13 @@ func LoadValidators(db dbm.DB, height int64) (*types.ValidatorSet, error) {
 	if valInfo.ValidatorSet == nil {
 		valInfo2 := loadValidatorsInfo(db, valInfo.LastHeightChanged)
 		if valInfo2 == nil {
-			cmn.PanicSanity(fmt.Sprintf(`Couldn't find validators at height %d as
-                        last changed from height %d`, valInfo.LastHeightChanged, height))
+			panic(
+				fmt.Sprintf(
+					"Couldn't find validators at height %d as last changed from height %d",
+					valInfo.LastHeightChanged,
+					height,
+				),
+			)
 		}
 		valInfo = valInfo2
 	}
@@ -239,11 +244,17 @@ func LoadConsensusParams(db dbm.DB, height int64) (types.ConsensusParams, error)
 	}
 
 	if paramsInfo.ConsensusParams == empty {
-		paramsInfo = loadConsensusParamsInfo(db, paramsInfo.LastHeightChanged)
-		if paramsInfo == nil {
-			cmn.PanicSanity(fmt.Sprintf(`Couldn't find consensus params at height %d as
-                        last changed from height %d`, paramsInfo.LastHeightChanged, height))
+		paramsInfo2 := loadConsensusParamsInfo(db, paramsInfo.LastHeightChanged)
+		if paramsInfo2 == nil {
+			panic(
+				fmt.Sprintf(
+					"Couldn't find consensus params at height %d as last changed from height %d",
+					paramsInfo.LastHeightChanged,
+					height,
+				),
+			)
 		}
+		paramsInfo = paramsInfo2
 	}
 
 	return paramsInfo.ConsensusParams, nil
diff --git a/vendor/github.com/tendermint/tendermint/state/txindex/indexer_service.go b/vendor/github.com/tendermint/tendermint/state/txindex/indexer_service.go
index 264be1fd..088252f5 100644
--- a/vendor/github.com/tendermint/tendermint/state/txindex/indexer_service.go
+++ b/vendor/github.com/tendermint/tendermint/state/txindex/indexer_service.go
@@ -3,7 +3,7 @@ package txindex
 import (
 	"context"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 
 	"github.com/tendermint/tendermint/types"
 )
diff --git a/vendor/github.com/tendermint/tendermint/state/txindex/kv/kv.go b/vendor/github.com/tendermint/tendermint/state/txindex/kv/kv.go
index 718a55d1..70732592 100644
--- a/vendor/github.com/tendermint/tendermint/state/txindex/kv/kv.go
+++ b/vendor/github.com/tendermint/tendermint/state/txindex/kv/kv.go
@@ -10,8 +10,8 @@ import (
 	"time"
 
 	"github.com/pkg/errors"
-	cmn "github.com/tendermint/tmlibs/common"
-	dbm "github.com/tendermint/tmlibs/db"
+	cmn "github.com/tendermint/tendermint/libs/common"
+	dbm "github.com/tendermint/tendermint/libs/db"
 
 	"github.com/tendermint/tendermint/libs/pubsub/query"
 	"github.com/tendermint/tendermint/state/txindex"
diff --git a/vendor/github.com/tendermint/tendermint/state/validation.go b/vendor/github.com/tendermint/tendermint/state/validation.go
index 84a4cc82..c3633920 100644
--- a/vendor/github.com/tendermint/tendermint/state/validation.go
+++ b/vendor/github.com/tendermint/tendermint/state/validation.go
@@ -6,7 +6,7 @@ import (
 	"fmt"
 
 	"github.com/tendermint/tendermint/types"
-	dbm "github.com/tendermint/tmlibs/db"
+	dbm "github.com/tendermint/tendermint/libs/db"
 )
 
 //-----------------------------------------------------
diff --git a/vendor/github.com/tendermint/tendermint/state/wire.go b/vendor/github.com/tendermint/tendermint/state/wire.go
index 3e8b544d..af743c7b 100644
--- a/vendor/github.com/tendermint/tendermint/state/wire.go
+++ b/vendor/github.com/tendermint/tendermint/state/wire.go
@@ -2,7 +2,7 @@ package state
 
 import (
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 var cdc = amino.NewCodec()
diff --git a/vendor/github.com/tendermint/abci/LICENSE b/vendor/github.com/tendermint/tendermint/tools/build/LICENSE
similarity index 93%
rename from vendor/github.com/tendermint/abci/LICENSE
rename to vendor/github.com/tendermint/tendermint/tools/build/LICENSE
index 57951bb8..bb66bb35 100644
--- a/vendor/github.com/tendermint/abci/LICENSE
+++ b/vendor/github.com/tendermint/tendermint/tools/build/LICENSE
@@ -1,11 +1,9 @@
-Tendermint ABCI
-Copyright (C) 2015 Tendermint
-
-
+Tendermint Core
+License: Apache2.0
 
                                  Apache License
                            Version 2.0, January 2004
-                        https://www.apache.org/licenses/
+                        http://www.apache.org/licenses/
 
    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 
@@ -180,11 +178,24 @@ Copyright (C) 2015 Tendermint
 
    END OF TERMS AND CONDITIONS
 
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2016 All in Bits, Inc
+
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
    You may obtain a copy of the License at
 
-       https://www.apache.org/licenses/LICENSE-2.0
+       http://www.apache.org/licenses/LICENSE-2.0
 
    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/github.com/tendermint/tendermint/tools/build/basecoind/DEBIAN/copyright b/vendor/github.com/tendermint/tendermint/tools/build/basecoind/DEBIAN/copyright
new file mode 100644
index 00000000..fe449650
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/tools/build/basecoind/DEBIAN/copyright
@@ -0,0 +1,21 @@
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: basecoind
+Source: https://github.com/cosmos/cosmos-sdk
+
+Files: *
+Copyright: 2017 All In Bits, Inc.
+License: Apache-2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ .
+ On Debian systems, the full text of the Apache License 2.0 can be found
+ in the file `/usr/share/common-licenses/Apache-2.0'.
diff --git a/vendor/github.com/tendermint/tendermint/tools/build/ethermint/DEBIAN/copyright b/vendor/github.com/tendermint/tendermint/tools/build/ethermint/DEBIAN/copyright
new file mode 100644
index 00000000..6d1bab01
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/tools/build/ethermint/DEBIAN/copyright
@@ -0,0 +1,21 @@
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: ethermint
+Source: https://github.com/tendermint/ethermint
+
+Files: *
+Copyright: 2017 All In Bits, Inc.
+License: Apache-2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ .
+ On Debian systems, the full text of the Apache License 2.0 can be found
+ in the file `/usr/share/common-licenses/Apache-2.0'.
diff --git a/vendor/github.com/tendermint/tendermint/tools/build/gaia/DEBIAN/copyright b/vendor/github.com/tendermint/tendermint/tools/build/gaia/DEBIAN/copyright
new file mode 100644
index 00000000..ffc23013
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/tools/build/gaia/DEBIAN/copyright
@@ -0,0 +1,21 @@
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: gaia
+Source: https://github.com/cosmos/gaia
+
+Files: *
+Copyright: 2017 All In Bits, Inc.
+License: Apache-2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ .
+ On Debian systems, the full text of the Apache License 2.0 can be found
+ in the file `/usr/share/common-licenses/Apache-2.0'.
diff --git a/vendor/github.com/tendermint/tendermint/tools/build/tendermint/DEBIAN/copyright b/vendor/github.com/tendermint/tendermint/tools/build/tendermint/DEBIAN/copyright
new file mode 100644
index 00000000..15ee960d
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/tools/build/tendermint/DEBIAN/copyright
@@ -0,0 +1,21 @@
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: tendermint
+Source: https://github.com/tendermint/tendermint
+
+Files: *
+Copyright: 2017 All In Bits, Inc.
+License: Apache-2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ .
+ On Debian systems, the full text of the Apache License 2.0 can be found
+ in the file `/usr/share/common-licenses/Apache-2.0'.
diff --git a/vendor/github.com/tendermint/tmlibs/LICENSE b/vendor/github.com/tendermint/tendermint/tools/mintnet-kubernetes/LICENSE
similarity index 99%
rename from vendor/github.com/tendermint/tmlibs/LICENSE
rename to vendor/github.com/tendermint/tendermint/tools/mintnet-kubernetes/LICENSE
index 06bc5e1c..64a33ddf 100644
--- a/vendor/github.com/tendermint/tmlibs/LICENSE
+++ b/vendor/github.com/tendermint/tendermint/tools/mintnet-kubernetes/LICENSE
@@ -1,4 +1,3 @@
-Tendermint Libraries
 Copyright (C) 2017 Tendermint
 
 
diff --git a/vendor/github.com/tendermint/go-crypto/LICENSE b/vendor/github.com/tendermint/tendermint/tools/tm-bench/LICENSE
similarity index 93%
rename from vendor/github.com/tendermint/go-crypto/LICENSE
rename to vendor/github.com/tendermint/tendermint/tools/tm-bench/LICENSE
index 3beb77b1..f4891396 100644
--- a/vendor/github.com/tendermint/go-crypto/LICENSE
+++ b/vendor/github.com/tendermint/tendermint/tools/tm-bench/LICENSE
@@ -1,11 +1,9 @@
-Tendermint Go-Crypto
-Copyright (C) 2015 Tendermint
-
-
+Tendermint Bench
+Copyright 2017 Tendermint
 
                                  Apache License
                            Version 2.0, January 2004
-                        https://www.apache.org/licenses/
+                        http://www.apache.org/licenses/
 
    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 
@@ -180,11 +178,24 @@ Copyright (C) 2015 Tendermint
 
    END OF TERMS AND CONDITIONS
 
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
    You may obtain a copy of the License at
 
-       https://www.apache.org/licenses/LICENSE-2.0
+       http://www.apache.org/licenses/LICENSE-2.0
 
    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/github.com/tendermint/tendermint/tools/tm-monitor/LICENSE b/vendor/github.com/tendermint/tendermint/tools/tm-monitor/LICENSE
new file mode 100644
index 00000000..20728d31
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/tools/tm-monitor/LICENSE
@@ -0,0 +1,204 @@
+Tendermint Monitor
+Copyright 2017 Tendermint
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/tendermint/tendermint/types/block.go b/vendor/github.com/tendermint/tendermint/types/block.go
index 3004672c..e23fd71d 100644
--- a/vendor/github.com/tendermint/tendermint/types/block.go
+++ b/vendor/github.com/tendermint/tendermint/types/block.go
@@ -8,9 +8,9 @@ import (
 	"sync"
 	"time"
 
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/merkle"
-	"golang.org/x/crypto/ripemd160"
+	"github.com/tendermint/tendermint/crypto/merkle"
+	"github.com/tendermint/tendermint/crypto/tmhash"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // Block defines the atomic unit of a Tendermint blockchain.
@@ -107,6 +107,7 @@ func (b *Block) Hash() cmn.HexBytes {
 
 // MakePartSet returns a PartSet containing parts of a serialized block.
 // This is the form in which the block is gossipped to peers.
+// CONTRACT: partSize is greater than zero.
 func (b *Block) MakePartSet(partSize int) *PartSet {
 	if b == nil {
 		return nil
@@ -135,6 +136,15 @@ func (b *Block) HashesTo(hash []byte) bool {
 	return bytes.Equal(b.Hash(), hash)
 }
 
+// Size returns size of the block in bytes.
+func (b *Block) Size() int {
+	bz, err := cdc.MarshalBinaryBare(b)
+	if err != nil {
+		return 0
+	}
+	return len(bz)
+}
+
 // String returns a string representation of the block
 func (b *Block) String() string {
 	return b.StringIndented("")
@@ -199,7 +209,7 @@ type Header struct {
 // Hash returns the hash of the header.
 // Returns nil if ValidatorHash is missing,
 // since a Header is not valid unless there is
-// a ValidaotrsHash (corresponding to the validator set).
+// a ValidatorsHash (corresponding to the validator set).
 func (h *Header) Hash() cmn.HexBytes {
 	if h == nil || len(h.ValidatorsHash) == 0 {
 		return nil
@@ -383,6 +393,9 @@ func (commit *Commit) ValidateBasic() error {
 
 // Hash returns the hash of the commit
 func (commit *Commit) Hash() cmn.HexBytes {
+	if commit == nil {
+		return nil
+	}
 	if commit.hash == nil {
 		bs := make([]merkle.Hasher, len(commit.Precommits))
 		for i, precommit := range commit.Precommits {
@@ -455,7 +468,7 @@ func (data *Data) StringIndented(indent string) string {
 			txStrings[i] = fmt.Sprintf("... (%v total)", len(data.Txs))
 			break
 		}
-		txStrings[i] = fmt.Sprintf("Tx:%v", tx)
+		txStrings[i] = fmt.Sprintf("%X (%d bytes)", tx.Hash(), len(tx))
 	}
 	return fmt.Sprintf(`Data{
 %s  %v
@@ -495,7 +508,7 @@ func (data *EvidenceData) StringIndented(indent string) string {
 		}
 		evStrings[i] = fmt.Sprintf("Evidence:%v", ev)
 	}
-	return fmt.Sprintf(`Data{
+	return fmt.Sprintf(`EvidenceData{
 %s  %v
 %s}#%v`,
 		indent, strings.Join(evStrings, "\n"+indent+"  "),
@@ -543,7 +556,7 @@ type hasher struct {
 }
 
 func (h hasher) Hash() []byte {
-	hasher := ripemd160.New()
+	hasher := tmhash.New()
 	if h.item != nil && !cmn.IsTypedNil(h.item) && !cmn.IsEmpty(h.item) {
 		bz, err := cdc.MarshalBinaryBare(h.item)
 		if err != nil {
diff --git a/vendor/github.com/tendermint/tendermint/types/canonical_json.go b/vendor/github.com/tendermint/tendermint/types/canonical_json.go
index 95ade9c6..189a8a7a 100644
--- a/vendor/github.com/tendermint/tendermint/types/canonical_json.go
+++ b/vendor/github.com/tendermint/tendermint/types/canonical_json.go
@@ -3,14 +3,13 @@ package types
 import (
 	"time"
 
-	"github.com/tendermint/go-amino"
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // Canonical json is amino's json for structs with fields in alphabetical order
 
 // TimeFormat is used for generating the sigs
-const TimeFormat = amino.RFC3339Millis
+const TimeFormat = "2006-01-02T15:04:05.000Z"
 
 type CanonicalJSONBlockID struct {
 	Hash        cmn.HexBytes               `json:"hash,omitempty"`
diff --git a/vendor/github.com/tendermint/tendermint/types/event_buffer.go b/vendor/github.com/tendermint/tendermint/types/event_buffer.go
deleted file mode 100644
index 18b41014..00000000
--- a/vendor/github.com/tendermint/tendermint/types/event_buffer.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package types
-
-// Interface assertions
-var _ TxEventPublisher = (*TxEventBuffer)(nil)
-
-// TxEventBuffer is a buffer of events, which uses a slice to temporarily store
-// events.
-type TxEventBuffer struct {
-	next     TxEventPublisher
-	capacity int
-	events   []EventDataTx
-}
-
-// NewTxEventBuffer accepts a TxEventPublisher and returns a new buffer with the given
-// capacity.
-func NewTxEventBuffer(next TxEventPublisher, capacity int) *TxEventBuffer {
-	return &TxEventBuffer{
-		next:     next,
-		capacity: capacity,
-		events:   make([]EventDataTx, 0, capacity),
-	}
-}
-
-// Len returns the number of events cached.
-func (b TxEventBuffer) Len() int {
-	return len(b.events)
-}
-
-// PublishEventTx buffers an event to be fired upon finality.
-func (b *TxEventBuffer) PublishEventTx(e EventDataTx) error {
-	b.events = append(b.events, e)
-	return nil
-}
-
-// Flush publishes events by running next.PublishWithTags on all cached events.
-// Blocks. Clears cached events.
-func (b *TxEventBuffer) Flush() error {
-	for _, e := range b.events {
-		err := b.next.PublishEventTx(e)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Clear out the elements and set the length to 0
-	// but maintain the underlying slice's capacity.
-	// See Issue https://github.com/tendermint/tendermint/issues/1189
-	b.events = b.events[:0]
-	return nil
-}
diff --git a/vendor/github.com/tendermint/tendermint/types/event_bus.go b/vendor/github.com/tendermint/tendermint/types/event_bus.go
index cb4b17d5..b4965fee 100644
--- a/vendor/github.com/tendermint/tendermint/types/event_bus.go
+++ b/vendor/github.com/tendermint/tendermint/types/event_bus.go
@@ -4,9 +4,9 @@ import (
 	"context"
 	"fmt"
 
+	cmn "github.com/tendermint/tendermint/libs/common"
+	"github.com/tendermint/tendermint/libs/log"
 	tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
 )
 
 const defaultCapacity = 0
@@ -49,7 +49,7 @@ func (b *EventBus) OnStart() error {
 }
 
 func (b *EventBus) OnStop() {
-	b.pubsub.OnStop()
+	b.pubsub.Stop()
 }
 
 func (b *EventBus) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error {
diff --git a/vendor/github.com/tendermint/tendermint/types/events.go b/vendor/github.com/tendermint/tendermint/types/events.go
index 2b87297c..891c6a90 100644
--- a/vendor/github.com/tendermint/tendermint/types/events.go
+++ b/vendor/github.com/tendermint/tendermint/types/events.go
@@ -10,22 +10,17 @@ import (
 
 // Reserved event types
 const (
-	EventBond              = "Bond"
 	EventCompleteProposal  = "CompleteProposal"
-	EventDupeout           = "Dupeout"
-	EventFork              = "Fork"
 	EventLock              = "Lock"
 	EventNewBlock          = "NewBlock"
 	EventNewBlockHeader    = "NewBlockHeader"
 	EventNewRound          = "NewRound"
 	EventNewRoundStep      = "NewRoundStep"
 	EventPolka             = "Polka"
-	EventRebond            = "Rebond"
 	EventRelock            = "Relock"
 	EventTimeoutPropose    = "TimeoutPropose"
 	EventTimeoutWait       = "TimeoutWait"
 	EventTx                = "Tx"
-	EventUnbond            = "Unbond"
 	EventUnlock            = "Unlock"
 	EventVote              = "Vote"
 	EventProposalHeartbeat = "ProposalHeartbeat"
@@ -113,11 +108,6 @@ const (
 )
 
 var (
-	EventQueryBond              = QueryForEvent(EventBond)
-	EventQueryUnbond            = QueryForEvent(EventUnbond)
-	EventQueryRebond            = QueryForEvent(EventRebond)
-	EventQueryDupeout           = QueryForEvent(EventDupeout)
-	EventQueryFork              = QueryForEvent(EventFork)
 	EventQueryNewBlock          = QueryForEvent(EventNewBlock)
 	EventQueryNewBlockHeader    = QueryForEvent(EventNewBlockHeader)
 	EventQueryNewRound          = QueryForEvent(EventNewRound)
diff --git a/vendor/github.com/tendermint/tendermint/types/evidence.go b/vendor/github.com/tendermint/tendermint/types/evidence.go
index 10907869..6313f43a 100644
--- a/vendor/github.com/tendermint/tendermint/types/evidence.go
+++ b/vendor/github.com/tendermint/tendermint/types/evidence.go
@@ -4,9 +4,10 @@ import (
 	"bytes"
 	"fmt"
 
-	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
-	"github.com/tendermint/tmlibs/merkle"
+	amino "github.com/tendermint/go-amino"
+
+	"github.com/tendermint/tendermint/crypto"
+	"github.com/tendermint/tendermint/crypto/merkle"
 )
 
 // ErrEvidenceInvalid wraps a piece of evidence and the error denoting how or why it is invalid.
@@ -180,7 +181,7 @@ type EvidenceList []Evidence
 // Hash returns the simple merkle root hash of the EvidenceList.
 func (evl EvidenceList) Hash() []byte {
 	// Recursive impl.
-	// Copied from tmlibs/merkle to avoid allocations
+	// Copied from crypto/merkle to avoid allocations
 	switch len(evl) {
 	case 0:
 		return nil
diff --git a/vendor/github.com/tendermint/tendermint/types/genesis.go b/vendor/github.com/tendermint/tendermint/types/genesis.go
index aee8e076..220ee0e0 100644
--- a/vendor/github.com/tendermint/tendermint/types/genesis.go
+++ b/vendor/github.com/tendermint/tendermint/types/genesis.go
@@ -5,8 +5,8 @@ import (
 	"io/ioutil"
 	"time"
 
-	"github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 //------------------------------------------------------------
@@ -26,17 +26,7 @@ type GenesisDoc struct {
 	ConsensusParams *ConsensusParams   `json:"consensus_params,omitempty"`
 	Validators      []GenesisValidator `json:"validators"`
 	AppHash         cmn.HexBytes       `json:"app_hash"`
-	AppStateJSON    json.RawMessage    `json:"app_state,omitempty"`
-	AppOptions      json.RawMessage    `json:"app_options,omitempty"` // DEPRECATED
-}
-
-// AppState returns raw application state.
-// TODO: replace with AppState field during next breaking release (0.18)
-func (genDoc *GenesisDoc) AppState() json.RawMessage {
-	if len(genDoc.AppOptions) > 0 {
-		return genDoc.AppOptions
-	}
-	return genDoc.AppStateJSON
+	AppState        json.RawMessage    `json:"app_state,omitempty"`
 }
 
 // SaveAs is a utility method for saving GenensisDoc as a JSON file.
diff --git a/vendor/github.com/tendermint/tendermint/types/heartbeat.go b/vendor/github.com/tendermint/tendermint/types/heartbeat.go
index 097dd22d..cebe2864 100644
--- a/vendor/github.com/tendermint/tendermint/types/heartbeat.go
+++ b/vendor/github.com/tendermint/tendermint/types/heartbeat.go
@@ -3,8 +3,8 @@ package types
 import (
 	"fmt"
 
-	"github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // Heartbeat is a simple vote-like structure so validators can
diff --git a/vendor/github.com/tendermint/tendermint/types/params.go b/vendor/github.com/tendermint/tendermint/types/params.go
index 2df092d6..3056c82a 100644
--- a/vendor/github.com/tendermint/tendermint/types/params.go
+++ b/vendor/github.com/tendermint/tendermint/types/params.go
@@ -1,12 +1,13 @@
 package types
 
 import (
-	abci "github.com/tendermint/abci/types"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/merkle"
+	abci "github.com/tendermint/tendermint/abci/types"
+	"github.com/tendermint/tendermint/crypto/merkle"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 const (
+	// MaxBlockSizeBytes is the maximum permitted size of the blocks.
 	MaxBlockSizeBytes = 104857600 // 100MB
 )
 
@@ -56,7 +57,7 @@ func DefaultConsensusParams() *ConsensusParams {
 func DefaultBlockSize() BlockSize {
 	return BlockSize{
 		MaxBytes: 22020096, // 21MB
-		MaxTxs:   100000,
+		MaxTxs:   10000,
 		MaxGas:   -1,
 	}
 }
diff --git a/vendor/github.com/tendermint/tendermint/types/part_set.go b/vendor/github.com/tendermint/tendermint/types/part_set.go
index 18cfe802..f6d7f6b6 100644
--- a/vendor/github.com/tendermint/tendermint/types/part_set.go
+++ b/vendor/github.com/tendermint/tendermint/types/part_set.go
@@ -7,10 +7,9 @@ import (
 	"io"
 	"sync"
 
-	"golang.org/x/crypto/ripemd160"
-
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/merkle"
+	"github.com/tendermint/tendermint/crypto/merkle"
+	"github.com/tendermint/tendermint/crypto/tmhash"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 var (
@@ -31,7 +30,7 @@ func (part *Part) Hash() []byte {
 	if part.hash != nil {
 		return part.hash
 	}
-	hasher := ripemd160.New()
+	hasher := tmhash.New()
 	hasher.Write(part.Bytes) // nolint: errcheck, gas
 	part.hash = hasher.Sum(nil)
 	return part.hash
diff --git a/vendor/github.com/tendermint/tendermint/types/priv_validator.go b/vendor/github.com/tendermint/tendermint/types/priv_validator.go
index 8759d3f9..85db65a4 100644
--- a/vendor/github.com/tendermint/tendermint/types/priv_validator.go
+++ b/vendor/github.com/tendermint/tendermint/types/priv_validator.go
@@ -4,7 +4,7 @@ import (
 	"bytes"
 	"fmt"
 
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 // PrivValidator defines the functionality of a local Tendermint validator
@@ -63,7 +63,10 @@ func (pv *MockPV) GetPubKey() crypto.PubKey {
 // Implements PrivValidator.
 func (pv *MockPV) SignVote(chainID string, vote *Vote) error {
 	signBytes := vote.SignBytes(chainID)
-	sig := pv.privKey.Sign(signBytes)
+	sig, err := pv.privKey.Sign(signBytes)
+	if err != nil {
+		return err
+	}
 	vote.Signature = sig
 	return nil
 }
@@ -71,14 +74,20 @@ func (pv *MockPV) SignVote(chainID string, vote *Vote) error {
 // Implements PrivValidator.
 func (pv *MockPV) SignProposal(chainID string, proposal *Proposal) error {
 	signBytes := proposal.SignBytes(chainID)
-	sig := pv.privKey.Sign(signBytes)
+	sig, err := pv.privKey.Sign(signBytes)
+	if err != nil {
+		return err
+	}
 	proposal.Signature = sig
 	return nil
 }
 
 // signHeartbeat signs the heartbeat without any checking.
 func (pv *MockPV) SignHeartbeat(chainID string, heartbeat *Heartbeat) error {
-	sig := pv.privKey.Sign(heartbeat.SignBytes(chainID))
+	sig, err := pv.privKey.Sign(heartbeat.SignBytes(chainID))
+	if err != nil {
+		return err
+	}
 	heartbeat.Signature = sig
 	return nil
 }
diff --git a/vendor/github.com/tendermint/tendermint/types/proposal.go b/vendor/github.com/tendermint/tendermint/types/proposal.go
index 95008897..52ce8756 100644
--- a/vendor/github.com/tendermint/tendermint/types/proposal.go
+++ b/vendor/github.com/tendermint/tendermint/types/proposal.go
@@ -5,7 +5,7 @@ import (
 	"fmt"
 	"time"
 
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 var (
diff --git a/vendor/github.com/tendermint/tendermint/types/protobuf.go b/vendor/github.com/tendermint/tendermint/types/protobuf.go
index eb684ae7..4fe44825 100644
--- a/vendor/github.com/tendermint/tendermint/types/protobuf.go
+++ b/vendor/github.com/tendermint/tendermint/types/protobuf.go
@@ -6,8 +6,8 @@ import (
 	"reflect"
 	"time"
 
-	abci "github.com/tendermint/abci/types"
-	crypto "github.com/tendermint/go-crypto"
+	abci "github.com/tendermint/tendermint/abci/types"
+	crypto "github.com/tendermint/tendermint/crypto"
 )
 
 //-------------------------------------------------------
@@ -58,7 +58,7 @@ func (tm2pb) Validator(val *Validator) abci.Validator {
 }
 
 // XXX: panics on nil or unknown pubkey type
-// TODO: add cases when new pubkey types are added to go-crypto
+// TODO: add cases when new pubkey types are added to crypto
 func (tm2pb) PubKey(pubKey crypto.PubKey) abci.PubKey {
 	switch pk := pubKey.(type) {
 	case crypto.PubKeyEd25519:
@@ -78,7 +78,7 @@ func (tm2pb) PubKey(pubKey crypto.PubKey) abci.PubKey {
 
 // XXX: panics on nil or unknown pubkey type
 func (tm2pb) Validators(vals *ValidatorSet) []abci.Validator {
-	validators := make([]abci.Validator, len(vals.Validators))
+	validators := make([]abci.Validator, vals.Size())
 	for i, val := range vals.Validators {
 		validators[i] = TM2PB.Validator(val)
 	}
@@ -153,7 +153,7 @@ var PB2TM = pb2tm{}
 type pb2tm struct{}
 
 func (pb2tm) PubKey(pubKey abci.PubKey) (crypto.PubKey, error) {
-	// TODO: define these in go-crypto and use them
+	// TODO: define these in crypto and use them
 	sizeEd := 32
 	sizeSecp := 33
 	switch pubKey.Type {
diff --git a/vendor/github.com/tendermint/tendermint/types/results.go b/vendor/github.com/tendermint/tendermint/types/results.go
index 326cee48..17d5891c 100644
--- a/vendor/github.com/tendermint/tendermint/types/results.go
+++ b/vendor/github.com/tendermint/tendermint/types/results.go
@@ -1,9 +1,9 @@
 package types
 
 import (
-	abci "github.com/tendermint/abci/types"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/merkle"
+	abci "github.com/tendermint/tendermint/abci/types"
+	"github.com/tendermint/tendermint/crypto/merkle"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 //-----------------------------------------------------------------------------
@@ -24,15 +24,16 @@ func (a ABCIResult) Hash() []byte {
 // ABCIResults wraps the deliver tx results to return a proof
 type ABCIResults []ABCIResult
 
-// NewResults creates ABCIResults from ResponseDeliverTx
-func NewResults(del []*abci.ResponseDeliverTx) ABCIResults {
-	res := make(ABCIResults, len(del))
-	for i, d := range del {
+// NewResults creates ABCIResults from the list of ResponseDeliverTx.
+func NewResults(responses []*abci.ResponseDeliverTx) ABCIResults {
+	res := make(ABCIResults, len(responses))
+	for i, d := range responses {
 		res[i] = NewResultFromResponse(d)
 	}
 	return res
 }
 
+// NewResultFromResponse creates ABCIResult from ResponseDeliverTx.
 func NewResultFromResponse(response *abci.ResponseDeliverTx) ABCIResult {
 	return ABCIResult{
 		Code: response.Code,
@@ -51,6 +52,8 @@ func (a ABCIResults) Bytes() []byte {
 
 // Hash returns a merkle hash of all results
 func (a ABCIResults) Hash() []byte {
+	// NOTE: we copy the impl of the merkle tree for txs -
+	// we should be consistent and either do it for both or not.
 	return merkle.SimpleHashFromHashers(a.toHashers())
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/types/tx.go b/vendor/github.com/tendermint/tendermint/types/tx.go
index e7247693..489f0b23 100644
--- a/vendor/github.com/tendermint/tendermint/types/tx.go
+++ b/vendor/github.com/tendermint/tendermint/types/tx.go
@@ -5,20 +5,20 @@ import (
 	"errors"
 	"fmt"
 
-	abci "github.com/tendermint/abci/types"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/merkle"
+	abci "github.com/tendermint/tendermint/abci/types"
+	"github.com/tendermint/tendermint/crypto/merkle"
+	"github.com/tendermint/tendermint/crypto/tmhash"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // Tx is an arbitrary byte array.
 // NOTE: Tx has no types at this level, so when wire encoded it's just length-prefixed.
-// Alternatively, it may make sense to add types here and let
-// []byte be type 0x1 so we can have versioned txs if need be in the future.
+// Might we want types here ?
 type Tx []byte
 
-// Hash computes the RIPEMD160 hash of the wire encoded transaction.
+// Hash computes the TMHASH hash of the wire encoded transaction.
 func (tx Tx) Hash() []byte {
-	return aminoHasher(tx).Hash()
+	return tmhash.Sum(tx)
 }
 
 // String returns the hex-encoded transaction as a string.
@@ -32,7 +32,7 @@ type Txs []Tx
 // Hash returns the simple Merkle root hash of the transactions.
 func (txs Txs) Hash() []byte {
 	// Recursive impl.
-	// Copied from tmlibs/merkle to avoid allocations
+	// Copied from tendermint/crypto/merkle to avoid allocations
 	switch len(txs) {
 	case 0:
 		return nil
diff --git a/vendor/github.com/tendermint/tendermint/types/validator.go b/vendor/github.com/tendermint/tendermint/types/validator.go
index 46dc61d0..e43acf09 100644
--- a/vendor/github.com/tendermint/tendermint/types/validator.go
+++ b/vendor/github.com/tendermint/tendermint/types/validator.go
@@ -4,8 +4,8 @@ import (
 	"bytes"
 	"fmt"
 
-	"github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // Volatile state for each Validator
diff --git a/vendor/github.com/tendermint/tendermint/types/validator_set.go b/vendor/github.com/tendermint/tendermint/types/validator_set.go
index f2fac292..60fc2d83 100644
--- a/vendor/github.com/tendermint/tendermint/types/validator_set.go
+++ b/vendor/github.com/tendermint/tendermint/types/validator_set.go
@@ -7,8 +7,8 @@ import (
 	"sort"
 	"strings"
 
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/merkle"
+	"github.com/tendermint/tendermint/crypto/merkle"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // ValidatorSet represent a set of *Validator at a given height.
@@ -39,14 +39,15 @@ func NewValidatorSet(vals []*Validator) *ValidatorSet {
 		Validators: validators,
 	}
 
-	if vals != nil {
+	if len(vals) > 0 {
 		vs.IncrementAccum(1)
 	}
 
 	return vs
 }
 
-// incrementAccum and update the proposer
+// IncrementAccum increments accum of each validator and updates the
+// proposer. Panics if validator set is empty.
 func (valSet *ValidatorSet) IncrementAccum(times int) {
 	// Add VotingPower * times to each validator and order into heap.
 	validatorsHeap := cmn.NewHeap()
diff --git a/vendor/github.com/tendermint/tendermint/types/vote.go b/vendor/github.com/tendermint/tendermint/types/vote.go
index e4ead612..ed4ebd73 100644
--- a/vendor/github.com/tendermint/tendermint/types/vote.go
+++ b/vendor/github.com/tendermint/tendermint/types/vote.go
@@ -6,8 +6,8 @@ import (
 	"fmt"
 	"time"
 
-	crypto "github.com/tendermint/go-crypto"
-	cmn "github.com/tendermint/tmlibs/common"
+	crypto "github.com/tendermint/tendermint/crypto"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 var (
diff --git a/vendor/github.com/tendermint/tendermint/types/vote_set.go b/vendor/github.com/tendermint/tendermint/types/vote_set.go
index a60d95da..c5168105 100644
--- a/vendor/github.com/tendermint/tendermint/types/vote_set.go
+++ b/vendor/github.com/tendermint/tendermint/types/vote_set.go
@@ -8,7 +8,7 @@ import (
 
 	"github.com/pkg/errors"
 
-	cmn "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tendermint/libs/common"
 )
 
 // UNSTABLE
diff --git a/vendor/github.com/tendermint/tendermint/types/wire.go b/vendor/github.com/tendermint/tendermint/types/wire.go
index bd5c4497..6342d7eb 100644
--- a/vendor/github.com/tendermint/tendermint/types/wire.go
+++ b/vendor/github.com/tendermint/tendermint/types/wire.go
@@ -2,7 +2,7 @@ package types
 
 import (
 	"github.com/tendermint/go-amino"
-	"github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/crypto"
 )
 
 var cdc = amino.NewCodec()
diff --git a/vendor/github.com/tendermint/tendermint/version/version.go b/vendor/github.com/tendermint/tendermint/version/version.go
index df553115..165f2582 100644
--- a/vendor/github.com/tendermint/tendermint/version/version.go
+++ b/vendor/github.com/tendermint/tendermint/version/version.go
@@ -3,14 +3,14 @@ package version
 // Version components
 const (
 	Maj = "0"
-	Min = "21"
-	Fix = "0"
+	Min = "22"
+	Fix = "4"
 )
 
 var (
 	// Version is the current version of Tendermint
 	// Must be a string because scripts like dist.sh read this file.
-	Version = "0.21.0"
+	Version = "0.22.4"
 
 	// GitCommit is the current HEAD set using ldflags.
 	GitCommit string
diff --git a/vendor/github.com/tendermint/tmlibs/common/array.go b/vendor/github.com/tendermint/tmlibs/common/array.go
deleted file mode 100644
index adedc42b..00000000
--- a/vendor/github.com/tendermint/tmlibs/common/array.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package common
-
-func Arr(items ...interface{}) []interface{} {
-	return items
-}
diff --git a/vendor/github.com/tendermint/tmlibs/merkle/simple_tree.go b/vendor/github.com/tendermint/tmlibs/merkle/simple_tree.go
deleted file mode 100644
index 9bdf52cb..00000000
--- a/vendor/github.com/tendermint/tmlibs/merkle/simple_tree.go
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
-Computes a deterministic minimal height merkle tree hash.
-If the number of items is not a power of two, some leaves
-will be at different levels. Tries to keep both sides of
-the tree the same size, but the left may be one greater.
-
-Use this for short deterministic trees, such as the validator list.
-For larger datasets, use IAVLTree.
-
-                        *
-                       / \
-                     /     \
-                   /         \
-                 /             \
-                *               *
-               / \             / \
-              /   \           /   \
-             /     \         /     \
-            *       *       *       h6
-           / \     / \     / \
-          h0  h1  h2  h3  h4  h5
-
-*/
-
-package merkle
-
-import (
-	"golang.org/x/crypto/ripemd160"
-)
-
-func SimpleHashFromTwoHashes(left []byte, right []byte) []byte {
-	var hasher = ripemd160.New()
-	err := encodeByteSlice(hasher, left)
-	if err != nil {
-		panic(err)
-	}
-	err = encodeByteSlice(hasher, right)
-	if err != nil {
-		panic(err)
-	}
-	return hasher.Sum(nil)
-}
-
-func SimpleHashFromHashes(hashes [][]byte) []byte {
-	// Recursive impl.
-	switch len(hashes) {
-	case 0:
-		return nil
-	case 1:
-		return hashes[0]
-	default:
-		left := SimpleHashFromHashes(hashes[:(len(hashes)+1)/2])
-		right := SimpleHashFromHashes(hashes[(len(hashes)+1)/2:])
-		return SimpleHashFromTwoHashes(left, right)
-	}
-}
-
-// NOTE: Do not implement this, use SimpleHashFromByteslices instead.
-// type Byteser interface { Bytes() []byte }
-// func SimpleHashFromBytesers(items []Byteser) []byte { ... }
-
-func SimpleHashFromByteslices(bzs [][]byte) []byte {
-	hashes := make([][]byte, len(bzs))
-	for i, bz := range bzs {
-		hashes[i] = SimpleHashFromBytes(bz)
-	}
-	return SimpleHashFromHashes(hashes)
-}
-
-func SimpleHashFromBytes(bz []byte) []byte {
-	hasher := ripemd160.New()
-	hasher.Write(bz)
-	return hasher.Sum(nil)
-}
-
-func SimpleHashFromHashers(items []Hasher) []byte {
-	hashes := make([][]byte, len(items))
-	for i, item := range items {
-		hash := item.Hash()
-		hashes[i] = hash
-	}
-	return SimpleHashFromHashes(hashes)
-}
-
-func SimpleHashFromMap(m map[string]Hasher) []byte {
-	sm := NewSimpleMap()
-	for k, v := range m {
-		sm.Set(k, v)
-	}
-	return sm.Hash()
-}
diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go
index bdf5652b..f4d9b5ec 100644
--- a/vendor/golang.org/x/net/http2/client_conn_pool.go
+++ b/vendor/golang.org/x/net/http2/client_conn_pool.go
@@ -52,9 +52,31 @@ const (
 	noDialOnMiss = false
 )
 
+// shouldTraceGetConn reports whether getClientConn should call any
+// ClientTrace.GetConn hook associated with the http.Request.
+//
+// This complexity is needed to avoid double calls of the GetConn hook
+// during the back-and-forth between net/http and x/net/http2 (when the
+// net/http.Transport is upgraded to also speak http2), as well as support
+// the case where x/net/http2 is being used directly.
+func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool {
+	// If our Transport wasn't made via ConfigureTransport, always
+	// trace the GetConn hook if provided, because that means the
+	// http2 package is being used directly and it's the one
+	// dialing, as opposed to net/http.
+	if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok {
+		return true
+	}
+	// Otherwise, only use the GetConn hook if this connection has
+	// been used previously for other requests. For fresh
+	// connections, the net/http package does the dialing.
+	return !st.freshConn
+}
+
 func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
 	if isConnectionCloseRequest(req) && dialOnMiss {
 		// It gets its own connection.
+		traceGetConn(req, addr)
 		const singleUse = true
 		cc, err := p.t.dialClientConn(addr, singleUse)
 		if err != nil {
@@ -64,7 +86,10 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
 	}
 	p.mu.Lock()
 	for _, cc := range p.conns[addr] {
-		if cc.CanTakeNewRequest() {
+		if st := cc.idleState(); st.canTakeNewRequest {
+			if p.shouldTraceGetConn(st) {
+				traceGetConn(req, addr)
+			}
 			p.mu.Unlock()
 			return cc, nil
 		}
@@ -73,6 +98,7 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
 		p.mu.Unlock()
 		return nil, ErrNoCachedConn
 	}
+	traceGetConn(req, addr)
 	call := p.getStartDialLocked(addr)
 	p.mu.Unlock()
 	<-call.done
diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go
new file mode 100644
index 00000000..e38ea290
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go111.go
@@ -0,0 +1,17 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.11
+
+package http2
+
+func traceHasWroteHeaderField(trace *clientTrace) bool {
+	return trace != nil && trace.WroteHeaderField != nil
+}
+
+func traceWroteHeaderField(trace *clientTrace, k, v string) {
+	if trace != nil && trace.WroteHeaderField != nil {
+		trace.WroteHeaderField(k, []string{v})
+	}
+}
diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go
index 47b7fae0..d957b7bc 100644
--- a/vendor/golang.org/x/net/http2/go17.go
+++ b/vendor/golang.org/x/net/http2/go17.go
@@ -18,6 +18,8 @@ type contextContext interface {
 	context.Context
 }
 
+var errCanceled = context.Canceled
+
 func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
 	ctx, cancel = context.WithCancel(context.Background())
 	ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
@@ -48,6 +50,14 @@ func (t *Transport) idleConnTimeout() time.Duration {
 
 func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
 
+func traceGetConn(req *http.Request, hostPort string) {
+	trace := httptrace.ContextClientTrace(req.Context())
+	if trace == nil || trace.GetConn == nil {
+		return
+	}
+	trace.GetConn(hostPort)
+}
+
 func traceGotConn(req *http.Request, cc *ClientConn) {
 	trace := httptrace.ContextClientTrace(req.Context())
 	if trace == nil || trace.GotConn == nil {
@@ -104,3 +114,8 @@ func requestTrace(req *http.Request) *clientTrace {
 func (cc *ClientConn) Ping(ctx context.Context) error {
 	return cc.ping(ctx)
 }
+
+// Shutdown gracefully closes the client connection, waiting for running streams to complete.
+func (cc *ClientConn) Shutdown(ctx context.Context) error {
+	return cc.shutdown(ctx)
+}
diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go
new file mode 100644
index 00000000..d036b013
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/not_go111.go
@@ -0,0 +1,11 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.11
+
+package http2
+
+func traceHasWroteHeaderField(trace *clientTrace) bool { return false }
+
+func traceWroteHeaderField(trace *clientTrace, k, v string) {}
diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go
index 140434a7..7ffb2504 100644
--- a/vendor/golang.org/x/net/http2/not_go17.go
+++ b/vendor/golang.org/x/net/http2/not_go17.go
@@ -8,6 +8,7 @@ package http2
 
 import (
 	"crypto/tls"
+	"errors"
 	"net"
 	"net/http"
 	"time"
@@ -18,6 +19,8 @@ type contextContext interface {
 	Err() error
 }
 
+var errCanceled = errors.New("canceled")
+
 type fakeContext struct{}
 
 func (fakeContext) Done() <-chan struct{} { return nil }
@@ -34,6 +37,7 @@ func setResponseUncompressed(res *http.Response) {
 type clientTrace struct{}
 
 func requestTrace(*http.Request) *clientTrace { return nil }
+func traceGetConn(*http.Request, string)      {}
 func traceGotConn(*http.Request, *ClientConn) {}
 func traceFirstResponseByte(*clientTrace)     {}
 func traceWroteHeaders(*clientTrace)          {}
@@ -84,4 +88,8 @@ func (cc *ClientConn) Ping(ctx contextContext) error {
 	return cc.ping(ctx)
 }
 
+func (cc *ClientConn) Shutdown(ctx contextContext) error {
+	return cc.shutdown(ctx)
+}
+
 func (t *Transport) idleConnTimeout() time.Duration { return 0 }
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 79389916..e111019d 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -1721,6 +1721,13 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
 			// processing this frame.
 			return nil
 		}
+		// RFC 7540, sec 5.1: If an endpoint receives additional frames, other than
+		// WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in
+		// this state, it MUST respond with a stream error (Section 5.4.2) of
+		// type STREAM_CLOSED.
+		if st.state == stateHalfClosedRemote {
+			return streamError(id, ErrCodeStreamClosed)
+		}
 		return st.processTrailerHeaders(f)
 	}
 
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index d23a2262..300b02fe 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -159,6 +159,7 @@ type ClientConn struct {
 	cond            *sync.Cond // hold mu; broadcast on flow/closed changes
 	flow            flow       // our conn-level flow control quota (cs.flow is per stream)
 	inflow          flow       // peer's conn-level flow control
+	closing         bool
 	closed          bool
 	wantSettingsAck bool                     // we sent a SETTINGS frame and haven't heard back
 	goAway          *GoAwayFrame             // if non-nil, the GoAwayFrame we received
@@ -630,12 +631,32 @@ func (cc *ClientConn) CanTakeNewRequest() bool {
 	return cc.canTakeNewRequestLocked()
 }
 
-func (cc *ClientConn) canTakeNewRequestLocked() bool {
+// clientConnIdleState describes the suitability of a client
+// connection to initiate a new RoundTrip request.
+type clientConnIdleState struct {
+	canTakeNewRequest bool
+	freshConn         bool // whether it's unused by any previous request
+}
+
+func (cc *ClientConn) idleState() clientConnIdleState {
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+	return cc.idleStateLocked()
+}
+
+func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
 	if cc.singleUse && cc.nextStreamID > 1 {
-		return false
+		return
 	}
-	return cc.goAway == nil && !cc.closed &&
+	st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing &&
 		int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32
+	st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest
+	return
+}
+
+func (cc *ClientConn) canTakeNewRequestLocked() bool {
+	st := cc.idleStateLocked()
+	return st.canTakeNewRequest
 }
 
 // onIdleTimeout is called from a time.AfterFunc goroutine. It will
@@ -665,6 +686,88 @@ func (cc *ClientConn) closeIfIdle() {
 	cc.tconn.Close()
 }
 
+var shutdownEnterWaitStateHook = func() {}
+
+// Shutdown gracefully close the client connection, waiting for running streams to complete.
+// Public implementation is in go17.go and not_go17.go
+func (cc *ClientConn) shutdown(ctx contextContext) error {
+	if err := cc.sendGoAway(); err != nil {
+		return err
+	}
+	// Wait for all in-flight streams to complete or connection to close
+	done := make(chan error, 1)
+	cancelled := false // guarded by cc.mu
+	go func() {
+		cc.mu.Lock()
+		defer cc.mu.Unlock()
+		for {
+			if len(cc.streams) == 0 || cc.closed {
+				cc.closed = true
+				done <- cc.tconn.Close()
+				break
+			}
+			if cancelled {
+				break
+			}
+			cc.cond.Wait()
+		}
+	}()
+	shutdownEnterWaitStateHook()
+	select {
+	case err := <-done:
+		return err
+	case <-ctx.Done():
+		cc.mu.Lock()
+		// Free the goroutine above
+		cancelled = true
+		cc.cond.Broadcast()
+		cc.mu.Unlock()
+		return ctx.Err()
+	}
+}
+
+func (cc *ClientConn) sendGoAway() error {
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+	cc.wmu.Lock()
+	defer cc.wmu.Unlock()
+	if cc.closing {
+		// GOAWAY sent already
+		return nil
+	}
+	// Send a graceful shutdown frame to server
+	maxStreamID := cc.nextStreamID
+	if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil {
+		return err
+	}
+	if err := cc.bw.Flush(); err != nil {
+		return err
+	}
+	// Prevent new requests
+	cc.closing = true
+	return nil
+}
+
+// Close closes the client connection immediately.
+//
+// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
+func (cc *ClientConn) Close() error {
+	cc.mu.Lock()
+	defer cc.cond.Broadcast()
+	defer cc.mu.Unlock()
+	err := errors.New("http2: client connection force closed via ClientConn.Close")
+	for id, cs := range cc.streams {
+		select {
+		case cs.resc <- resAndError{err: err}:
+		default:
+		}
+		cs.bufPipe.CloseWithError(err)
+		delete(cc.streams, id)
+	}
+	cc.closed = true
+	return cc.tconn.Close()
+}
+
 const maxAllocFrameSize = 512 << 10
 
 // frameBuffer returns a scratch buffer suitable for writing DATA frames.
@@ -747,7 +850,7 @@ func checkConnHeaders(req *http.Request) error {
 	if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
 		return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
 	}
-	if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") {
+	if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !strings.EqualFold(vv[0], "close") && !strings.EqualFold(vv[0], "keep-alive")) {
 		return fmt.Errorf("http2: invalid Connection request header: %q", vv)
 	}
 	return nil
@@ -1291,9 +1394,16 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
 		return nil, errRequestHeaderListSize
 	}
 
+	trace := requestTrace(req)
+	traceHeaders := traceHasWroteHeaderField(trace)
+
 	// Header list size is ok. Write the headers.
 	enumerateHeaders(func(name, value string) {
-		cc.writeHeader(strings.ToLower(name), value)
+		name = strings.ToLower(name)
+		cc.writeHeader(name, value)
+		if traceHeaders {
+			traceWroteHeaderField(trace, name, value)
+		}
 	})
 
 	return cc.hbuf.Bytes(), nil
diff --git a/vendor/golang.org/x/net/netutil/listen.go b/vendor/golang.org/x/net/netutil/listen.go
new file mode 100644
index 00000000..cee46e33
--- /dev/null
+++ b/vendor/golang.org/x/net/netutil/listen.go
@@ -0,0 +1,74 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package netutil provides network utility functions, complementing the more
+// common ones in the net package.
+package netutil // import "golang.org/x/net/netutil"
+
+import (
+	"net"
+	"sync"
+)
+
+// LimitListener returns a Listener that accepts at most n simultaneous
+// connections from the provided Listener.
+func LimitListener(l net.Listener, n int) net.Listener {
+	return &limitListener{
+		Listener: l,
+		sem:      make(chan struct{}, n),
+		done:     make(chan struct{}),
+	}
+}
+
+type limitListener struct {
+	net.Listener
+	sem       chan struct{}
+	closeOnce sync.Once     // ensures the done chan is only closed once
+	done      chan struct{} // no values sent; closed when Close is called
+}
+
+// acquire acquires the limiting semaphore. Returns true if successfully
+// accquired, false if the listener is closed and the semaphore is not
+// acquired.
+func (l *limitListener) acquire() bool {
+	select {
+	case <-l.done:
+		return false
+	case l.sem <- struct{}{}:
+		return true
+	}
+}
+func (l *limitListener) release() { <-l.sem }
+
+func (l *limitListener) Accept() (net.Conn, error) {
+	acquired := l.acquire()
+	// If the semaphore isn't acquired because the listener was closed, expect
+	// that this call to accept won't block, but immediately return an error.
+	c, err := l.Listener.Accept()
+	if err != nil {
+		if acquired {
+			l.release()
+		}
+		return nil, err
+	}
+	return &limitListenerConn{Conn: c, release: l.release}, nil
+}
+
+func (l *limitListener) Close() error {
+	err := l.Listener.Close()
+	l.closeOnce.Do(func() { close(l.done) })
+	return err
+}
+
+type limitListenerConn struct {
+	net.Conn
+	releaseOnce sync.Once
+	release     func()
+}
+
+func (l *limitListenerConn) Close() error {
+	err := l.Conn.Close()
+	l.releaseOnce.Do(l.release)
+	return err
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go
index 53fb8518..33c8b5f0 100644
--- a/vendor/golang.org/x/sys/unix/syscall_bsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go
@@ -206,7 +206,7 @@ func (sa *SockaddrDatalink) sockaddr() (unsafe.Pointer, _Socklen, error) {
 	return unsafe.Pointer(&sa.raw), SizeofSockaddrDatalink, nil
 }
 
-func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
+func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
 	switch rsa.Addr.Family {
 	case AF_LINK:
 		pp := (*RawSockaddrDatalink)(unsafe.Pointer(rsa))
@@ -286,7 +286,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) {
 		Close(nfd)
 		return 0, nil, ECONNABORTED
 	}
-	sa, err = anyToSockaddr(&rsa)
+	sa, err = anyToSockaddr(fd, &rsa)
 	if err != nil {
 		Close(nfd)
 		nfd = 0
@@ -306,7 +306,7 @@ func Getsockname(fd int) (sa Sockaddr, err error) {
 		rsa.Addr.Family = AF_UNIX
 		rsa.Addr.Len = SizeofSockaddrUnix
 	}
-	return anyToSockaddr(&rsa)
+	return anyToSockaddr(fd, &rsa)
 }
 
 //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
@@ -356,7 +356,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from
 	recvflags = int(msg.Flags)
 	// source address is only specified if the socket is unconnected
 	if rsa.Addr.Family != AF_UNSPEC {
-		from, err = anyToSockaddr(&rsa)
+		from, err = anyToSockaddr(fd, &rsa)
 	}
 	return
 }
diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
index b5072de2..e34abe29 100644
--- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
+++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
@@ -87,7 +87,7 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) {
 	if len > SizeofSockaddrAny {
 		panic("RawSockaddrAny too small")
 	}
-	sa, err = anyToSockaddr(&rsa)
+	sa, err = anyToSockaddr(fd, &rsa)
 	if err != nil {
 		Close(nfd)
 		nfd = 0
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go
index ba9df4ac..5561a3eb 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go
@@ -89,7 +89,7 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) {
 	if len > SizeofSockaddrAny {
 		panic("RawSockaddrAny too small")
 	}
-	sa, err = anyToSockaddr(&rsa)
+	sa, err = anyToSockaddr(fd, &rsa)
 	if err != nil {
 		Close(nfd)
 		nfd = 0
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 9908030c..690c2c87 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -489,6 +489,47 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) {
 	return unsafe.Pointer(&sa.raw), SizeofSockaddrL2, nil
 }
 
+// SockaddrRFCOMM implements the Sockaddr interface for AF_BLUETOOTH type sockets
+// using the RFCOMM protocol.
+//
+// Server example:
+//
+//      fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
+//      _ = unix.Bind(fd, &unix.SockaddrRFCOMM{
+//      	Channel: 1,
+//      	Addr:    [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00
+//      })
+//      _ = Listen(fd, 1)
+//      nfd, sa, _ := Accept(fd)
+//      fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd)
+//      Read(nfd, buf)
+//
+// Client example:
+//
+//      fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
+//      _ = Connect(fd, &SockaddrRFCOMM{
+//      	Channel: 1,
+//      	Addr:    [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11
+//      })
+//      Write(fd, []byte(`hello`))
+type SockaddrRFCOMM struct {
+	// Addr represents a bluetooth address, byte ordering is little-endian.
+	Addr [6]uint8
+
+	// Channel is a designated bluetooth channel, only 1-30 are available for use.
+	// Since Linux 2.6.7 and further zero value is the first available channel.
+	Channel uint8
+
+	raw RawSockaddrRFCOMM
+}
+
+func (sa *SockaddrRFCOMM) sockaddr() (unsafe.Pointer, _Socklen, error) {
+	sa.raw.Family = AF_BLUETOOTH
+	sa.raw.Channel = sa.Channel
+	sa.raw.Bdaddr = sa.Addr
+	return unsafe.Pointer(&sa.raw), SizeofSockaddrRFCOMM, nil
+}
+
 // SockaddrCAN implements the Sockaddr interface for AF_CAN type sockets.
 // The RxID and TxID fields are used for transport protocol addressing in
 // (CAN_TP16, CAN_TP20, CAN_MCNET, and CAN_ISOTP), they can be left with
@@ -651,7 +692,7 @@ func (sa *SockaddrVM) sockaddr() (unsafe.Pointer, _Socklen, error) {
 	return unsafe.Pointer(&sa.raw), SizeofSockaddrVM, nil
 }
 
-func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
+func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
 	switch rsa.Addr.Family {
 	case AF_NETLINK:
 		pp := (*RawSockaddrNetlink)(unsafe.Pointer(rsa))
@@ -728,6 +769,30 @@ func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
 			Port: pp.Port,
 		}
 		return sa, nil
+	case AF_BLUETOOTH:
+		proto, err := GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL)
+		if err != nil {
+			return nil, err
+		}
+		// only BTPROTO_L2CAP and BTPROTO_RFCOMM can accept connections
+		switch proto {
+		case BTPROTO_L2CAP:
+			pp := (*RawSockaddrL2)(unsafe.Pointer(rsa))
+			sa := &SockaddrL2{
+				PSM:      pp.Psm,
+				CID:      pp.Cid,
+				Addr:     pp.Bdaddr,
+				AddrType: pp.Bdaddr_type,
+			}
+			return sa, nil
+		case BTPROTO_RFCOMM:
+			pp := (*RawSockaddrRFCOMM)(unsafe.Pointer(rsa))
+			sa := &SockaddrRFCOMM{
+				Channel: pp.Channel,
+				Addr:    pp.Bdaddr,
+			}
+			return sa, nil
+		}
 	}
 	return nil, EAFNOSUPPORT
 }
@@ -739,7 +804,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) {
 	if err != nil {
 		return
 	}
-	sa, err = anyToSockaddr(&rsa)
+	sa, err = anyToSockaddr(fd, &rsa)
 	if err != nil {
 		Close(nfd)
 		nfd = 0
@@ -757,7 +822,7 @@ func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) {
 	if len > SizeofSockaddrAny {
 		panic("RawSockaddrAny too small")
 	}
-	sa, err = anyToSockaddr(&rsa)
+	sa, err = anyToSockaddr(fd, &rsa)
 	if err != nil {
 		Close(nfd)
 		nfd = 0
@@ -771,7 +836,7 @@ func Getsockname(fd int) (sa Sockaddr, err error) {
 	if err = getsockname(fd, &rsa, &len); err != nil {
 		return
 	}
-	return anyToSockaddr(&rsa)
+	return anyToSockaddr(fd, &rsa)
 }
 
 func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) {
@@ -960,7 +1025,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from
 	recvflags = int(msg.Flags)
 	// source address is only specified if the socket is unconnected
 	if rsa.Addr.Family != AF_UNSPEC {
-		from, err = anyToSockaddr(&rsa)
+		from, err = anyToSockaddr(fd, &rsa)
 	}
 	return
 }
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
index 820ef77a..a05337d5 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -112,7 +112,7 @@ func Getsockname(fd int) (sa Sockaddr, err error) {
 	if err = getsockname(fd, &rsa, &len); err != nil {
 		return
 	}
-	return anyToSockaddr(&rsa)
+	return anyToSockaddr(fd, &rsa)
 }
 
 // GetsockoptString returns the string value of the socket option opt for the
@@ -360,7 +360,7 @@ func Futimes(fd int, tv []Timeval) error {
 	return futimesat(fd, nil, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
 }
 
-func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
+func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
 	switch rsa.Addr.Family {
 	case AF_UNIX:
 		pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa))
@@ -411,7 +411,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) {
 	if nfd == -1 {
 		return
 	}
-	sa, err = anyToSockaddr(&rsa)
+	sa, err = anyToSockaddr(fd, &rsa)
 	if err != nil {
 		Close(nfd)
 		nfd = 0
@@ -448,7 +448,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from
 	oobn = int(msg.Accrightslen)
 	// source address is only specified if the socket is unconnected
 	if rsa.Addr.Family != AF_UNSPEC {
-		from, err = anyToSockaddr(&rsa)
+		from, err = anyToSockaddr(fd, &rsa)
 	}
 	return
 }
diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go
index b835bad0..95b2180a 100644
--- a/vendor/golang.org/x/sys/unix/syscall_unix.go
+++ b/vendor/golang.org/x/sys/unix/syscall_unix.go
@@ -219,7 +219,7 @@ func Getpeername(fd int) (sa Sockaddr, err error) {
 	if err = getpeername(fd, &rsa, &len); err != nil {
 		return
 	}
-	return anyToSockaddr(&rsa)
+	return anyToSockaddr(fd, &rsa)
 }
 
 func GetsockoptByte(fd, level, opt int) (value byte, err error) {
@@ -291,7 +291,7 @@ func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) {
 		return
 	}
 	if rsa.Addr.Family != AF_UNSPEC {
-		from, err = anyToSockaddr(&rsa)
+		from, err = anyToSockaddr(fd, &rsa)
 	}
 	return
 }
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
index 0ae2aa84..7cc1bfd1 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
@@ -1474,8 +1474,13 @@ func Munlockall() (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Dup2(oldfd int, newfd int) (err error) {
-	_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
+func faccessat(dirfd int, path string, mode uint32) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
 	if e1 != 0 {
 		err = errnoErr(e1)
 	}
@@ -1484,9 +1489,8 @@ func Dup2(oldfd int, newfd int) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func EpollCreate(size int) (fd int, err error) {
-	r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0)
-	fd = int(r0)
+func Dup2(oldfd int, newfd int) (err error) {
+	_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
 	if e1 != 0 {
 		err = errnoErr(e1)
 	}
@@ -1495,13 +1499,9 @@ func EpollCreate(size int) (fd int, err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func faccessat(dirfd int, path string, mode uint32) (err error) {
-	var _p0 *byte
-	_p0, err = BytePtrFromString(path)
-	if err != nil {
-		return
-	}
-	_, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+func EpollCreate(size int) (fd int, err error) {
+	r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0)
+	fd = int(r0)
 	if e1 != 0 {
 		err = errnoErr(e1)
 	}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
index fa16c165..c3dcb381 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
@@ -1474,8 +1474,13 @@ func Munlockall() (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Dup2(oldfd int, newfd int) (err error) {
-	_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
+func faccessat(dirfd int, path string, mode uint32) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
 	if e1 != 0 {
 		err = errnoErr(e1)
 	}
@@ -1484,9 +1489,8 @@ func Dup2(oldfd int, newfd int) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func EpollCreate(size int) (fd int, err error) {
-	r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0)
-	fd = int(r0)
+func Dup2(oldfd int, newfd int) (err error) {
+	_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
 	if e1 != 0 {
 		err = errnoErr(e1)
 	}
@@ -1495,13 +1499,9 @@ func EpollCreate(size int) (fd int, err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func faccessat(dirfd int, path string, mode uint32) (err error) {
-	var _p0 *byte
-	_p0, err = BytePtrFromString(path)
-	if err != nil {
-		return
-	}
-	_, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+func EpollCreate(size int) (fd int, err error) {
+	r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0)
+	fd = int(r0)
 	if e1 != 0 {
 		err = errnoErr(e1)
 	}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index e89bc6b3..4c250033 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -248,6 +248,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -401,6 +408,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index d95372ba..2e4d709b 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -250,6 +250,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -405,6 +412,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index 77875ba0..bf38e5e2 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -251,6 +251,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -404,6 +411,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index 5a9df694..972c1b87 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -251,6 +251,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -406,6 +413,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
index dcb239de..783e70e8 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
@@ -249,6 +249,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -402,6 +409,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
index 9cf85f72..5c6ea719 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -251,6 +251,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -406,6 +413,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
index 6fd66e75..93effc8e 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -251,6 +251,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -406,6 +413,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
index faa5b3ef..cc5ca242 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
@@ -249,6 +249,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -402,6 +409,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index ad4c4524..712f6402 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -252,6 +252,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -407,6 +414,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index 1fdb2f21..1be45320 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -252,6 +252,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -407,6 +414,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
index d32079d1..932b655f 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
@@ -250,6 +250,13 @@ type RawSockaddrL2 struct {
 	_           [1]byte
 }
 
+type RawSockaddrRFCOMM struct {
+	Family  uint16
+	Bdaddr  [6]uint8
+	Channel uint8
+	_       [1]byte
+}
+
 type RawSockaddrCAN struct {
 	Family  uint16
 	_       [2]byte
@@ -405,6 +412,7 @@ const (
 	SizeofSockaddrNetlink   = 0xc
 	SizeofSockaddrHCI       = 0x6
 	SizeofSockaddrL2        = 0xe
+	SizeofSockaddrRFCOMM    = 0xa
 	SizeofSockaddrCAN       = 0x10
 	SizeofSockaddrALG       = 0x58
 	SizeofSockaddrVM        = 0x10
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index b4e42478..7d2a6794 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -94,16 +94,29 @@ const (
 	FILE_APPEND_DATA      = 0x00000004
 	FILE_WRITE_ATTRIBUTES = 0x00000100
 
-	FILE_SHARE_READ              = 0x00000001
-	FILE_SHARE_WRITE             = 0x00000002
-	FILE_SHARE_DELETE            = 0x00000004
-	FILE_ATTRIBUTE_READONLY      = 0x00000001
-	FILE_ATTRIBUTE_HIDDEN        = 0x00000002
-	FILE_ATTRIBUTE_SYSTEM        = 0x00000004
-	FILE_ATTRIBUTE_DIRECTORY     = 0x00000010
-	FILE_ATTRIBUTE_ARCHIVE       = 0x00000020
-	FILE_ATTRIBUTE_NORMAL        = 0x00000080
-	FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400
+	FILE_SHARE_READ   = 0x00000001
+	FILE_SHARE_WRITE  = 0x00000002
+	FILE_SHARE_DELETE = 0x00000004
+
+	FILE_ATTRIBUTE_READONLY              = 0x00000001
+	FILE_ATTRIBUTE_HIDDEN                = 0x00000002
+	FILE_ATTRIBUTE_SYSTEM                = 0x00000004
+	FILE_ATTRIBUTE_DIRECTORY             = 0x00000010
+	FILE_ATTRIBUTE_ARCHIVE               = 0x00000020
+	FILE_ATTRIBUTE_DEVICE                = 0x00000040
+	FILE_ATTRIBUTE_NORMAL                = 0x00000080
+	FILE_ATTRIBUTE_TEMPORARY             = 0x00000100
+	FILE_ATTRIBUTE_SPARSE_FILE           = 0x00000200
+	FILE_ATTRIBUTE_REPARSE_POINT         = 0x00000400
+	FILE_ATTRIBUTE_COMPRESSED            = 0x00000800
+	FILE_ATTRIBUTE_OFFLINE               = 0x00001000
+	FILE_ATTRIBUTE_NOT_CONTENT_INDEXED   = 0x00002000
+	FILE_ATTRIBUTE_ENCRYPTED             = 0x00004000
+	FILE_ATTRIBUTE_INTEGRITY_STREAM      = 0x00008000
+	FILE_ATTRIBUTE_VIRTUAL               = 0x00010000
+	FILE_ATTRIBUTE_NO_SCRUB_DATA         = 0x00020000
+	FILE_ATTRIBUTE_RECALL_ON_OPEN        = 0x00040000
+	FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS = 0x00400000
 
 	INVALID_FILE_ATTRIBUTES = 0xffffffff
 
-- 
GitLab