From 9353744f360acdabe426776e3edb45ac2bd80e3a Mon Sep 17 00:00:00 2001
From: Silas Davis <silas@monax.io>
Date: Thu, 12 Apr 2018 10:15:47 -0400
Subject: [PATCH] Update vendor

Signed-off-by: Silas Davis <silas@monax.io>
---
 .../github.com/go-playground/locales/LICENSE  |   21 -
 .../locales/currency/currency.go              |  306 ----
 .../github.com/go-playground/locales/rules.go |  293 ---
 .../universal-translator/LICENSE              |   21 -
 .../universal-translator/errors.go            |  148 --
 .../universal-translator/import_export.go     |  274 ---
 .../universal-translator/translator.go        |  420 -----
 .../universal_translator.go                   |  113 --
 vendor/github.com/imdario/mergo/map.go        |   14 +-
 vendor/github.com/imdario/mergo/merge.go      |   60 +-
 vendor/github.com/rcrowley/go-metrics/ewma.go |   46 +-
 .../rcrowley/go-metrics/gauge_float64.go      |   16 +-
 .../github.com/rcrowley/go-metrics/meter.go   |   83 +-
 .../rcrowley/go-metrics/registry.go           |   19 +-
 vendor/github.com/spf13/cobra/args.go         |   25 +-
 .../spf13/cobra/bash_completions.go           |  138 +-
 vendor/github.com/spf13/cobra/cobra.go        |   12 +-
 vendor/github.com/spf13/cobra/command.go      |  120 +-
 .../github.com/syndtr/goleveldb/leveldb/db.go |    6 +
 .../syndtr/goleveldb/leveldb/session.go       |    4 +-
 .../syndtr/goleveldb/leveldb/storage.go       |   63 +
 .../leveldb/storage/file_storage_plan9.go     |    4 +-
 .../goleveldb/leveldb/storage/mem_storage.go  |    8 +-
 .../tendermint/abci/client/grpc_client.go     |    3 +-
 .../tendermint/abci/client/socket_client.go   |    6 +-
 .../tendermint/abci/example/code/code.go      |    2 -
 .../tendermint/abci/example/dummy/dummy.go    |   99 --
 .../example/{dummy => kvstore}/helpers.go     |   22 +-
 .../abci/example/kvstore/kvstore.go           |  126 ++
 .../persistent_kvstore.go}                    |   97 +-
 .../tendermint/abci/types/application.go      |    4 +-
 .../tendermint/abci/types/messages.go         |   59 +-
 .../tendermint/abci/types/result.go           |   37 +-
 .../tendermint/abci/types/types.pb.go         |  536 +++---
 .../github.com/tendermint/abci/types/util.go  |   33 +-
 .../tendermint/go-crypto/priv_key.go          |   10 +-
 .../tendermint/go-crypto/pub_key.go           |   34 +-
 .../github.com/tendermint/go-crypto/random.go |    5 +-
 .../tendermint/go-crypto/signature.go         |    4 +-
 .../tendermint/go-crypto/version.go           |    2 +-
 .../nowriter/tmlegacy/tm_encoder_legacy.go    |  176 --
 vendor/github.com/tendermint/go-wire/util.go  |   34 +
 .../github.com/tendermint/go-wire/version.go  |    2 +-
 vendor/github.com/tendermint/iavl/chunk.go    |  185 ++
 vendor/github.com/tendermint/iavl/doc.go      |    2 +-
 vendor/github.com/tendermint/iavl/logger.go   |   11 +
 vendor/github.com/tendermint/iavl/node.go     |  109 +-
 vendor/github.com/tendermint/iavl/nodedb.go   |  246 ++-
 .../tendermint/iavl/orphaning_tree.go         |   62 +-
 vendor/github.com/tendermint/iavl/path.go     |    6 +-
 vendor/github.com/tendermint/iavl/proof.go    |   93 +-
 .../github.com/tendermint/iavl/proof_key.go   |   72 +-
 .../github.com/tendermint/iavl/proof_range.go |   65 +-
 .../github.com/tendermint/iavl/serialize.go   |  200 +++
 vendor/github.com/tendermint/iavl/tree.go     |   76 +-
 vendor/github.com/tendermint/iavl/version.go  |    2 +-
 .../tendermint/iavl/versioned_tree.go         |  134 +-
 .../tendermint/tendermint/blockchain/pool.go  |  108 +-
 .../tendermint/blockchain/reactor.go          |  101 +-
 .../tendermint/tendermint/blockchain/store.go |   20 +-
 .../tendermint/tendermint/config/config.go    |  252 ++-
 .../tendermint/tendermint/config/toml.go      |  249 ++-
 .../tendermint/consensus/reactor.go           |  116 +-
 .../tendermint/tendermint/consensus/replay.go |   48 +-
 .../tendermint/consensus/replay_file.go       |   31 +-
 .../tendermint/tendermint/consensus/state.go  |  287 +--
 .../tendermint/tendermint/consensus/ticker.go |    2 +-
 .../consensus/types/height_vote_set.go        |   33 +-
 .../tendermint/consensus/types/state.go       |    7 +
 .../tendermint/tendermint/consensus/wal.go    |    2 +-
 .../tendermint/consensus/wal_generator.go     |    8 +-
 .../tendermint/tendermint/evidence/reactor.go |    8 +-
 .../tendermint/tendermint/evidence/store.go   |    4 +-
 .../tendermint/tendermint/mempool/mempool.go  |   36 +-
 .../tendermint/tendermint/mempool/reactor.go  |   37 +-
 .../tendermint/tendermint/node/node.go        |  165 +-
 .../tendermint/tendermint/p2p/base_reactor.go |   53 +
 .../tendermint/p2p/{ => conn}/conn_go110.go   |    4 +-
 .../p2p/{ => conn}/conn_notgo110.go           |    4 +-
 .../tendermint/p2p/{ => conn}/connection.go   |  127 +-
 .../p2p/{ => conn}/secret_connection.go       |  110 +-
 .../tendermint/tendermint/p2p/errors.go       |   20 +
 .../tendermint/tendermint/p2p/key.go          |  112 ++
 .../tendermint/tendermint/p2p/listener.go     |    2 +-
 .../tendermint/tendermint/p2p/netaddress.go   |  107 +-
 .../tendermint/tendermint/p2p/node_info.go    |  139 ++
 .../tendermint/tendermint/p2p/peer.go         |  368 ++--
 .../tendermint/tendermint/p2p/peer_set.go     |   27 +-
 .../tendermint/p2p/{ => pex}/addrbook.go      |  606 +++----
 .../tendermint/tendermint/p2p/pex/file.go     |   83 +
 .../tendermint/p2p/pex/known_address.go       |  142 ++
 .../tendermint/tendermint/p2p/pex/params.go   |   55 +
 .../tendermint/p2p/pex/pex_reactor.go         |  652 +++++++
 .../tendermint/tendermint/p2p/pex_reactor.go  |  356 ----
 .../tendermint/tendermint/p2p/switch.go       |  637 +++----
 .../tendermint/tendermint/p2p/test_util.go    |  152 ++
 .../tendermint/tendermint/p2p/trust/metric.go |    4 +-
 .../tendermint/tendermint/p2p/trust/store.go  |    2 +-
 .../tendermint/tendermint/p2p/trust/ticker.go |    2 +-
 .../tendermint/tendermint/p2p/types.go        |   79 +-
 .../tendermint/tendermint/p2p/upnp/upnp.go    |    2 +-
 .../tendermint/tendermint/p2p/util.go         |   15 -
 .../tendermint/tendermint/proxy/client.go     |   10 +-
 .../tendermint/tendermint/rpc/core/abci.go    |    4 +-
 .../tendermint/rpc/core/consensus.go          |    5 +-
 .../tendermint/tendermint/rpc/core/doc.go     |    4 +-
 .../tendermint/tendermint/rpc/core/events.go  |   75 +-
 .../tendermint/tendermint/rpc/core/health.go  |   31 +
 .../tendermint/tendermint/rpc/core/mempool.go |    4 +-
 .../tendermint/tendermint/rpc/core/net.go     |   25 +-
 .../tendermint/tendermint/rpc/core/pipe.go    |   12 +-
 .../tendermint/tendermint/rpc/core/routes.go  |    2 +
 .../tendermint/tendermint/rpc/core/status.go  |   55 +-
 .../tendermint/tendermint/rpc/core/tx.go      |   14 +-
 .../tendermint/rpc/core/types/responses.go    |   44 +-
 .../tendermint/tendermint/rpc/grpc/api.go     |    2 +-
 .../tendermint/rpc/lib/client/http_client.go  |    3 +-
 .../tendermint/rpc/lib/client/ws_client.go    |   36 +-
 .../tendermint/rpc/lib/server/handlers.go     |   12 +-
 .../tendermint/rpc/lib/server/http_server.go  |   45 +-
 .../tendermint/rpc/lib/types/types.go         |    3 +-
 .../tendermint/tendermint/state/execution.go  |   86 +-
 .../tendermint/tendermint/state/state.go      |    6 +-
 .../tendermint/tendermint/state/store.go      |   47 +-
 .../tendermint/state/txindex/kv/kv.go         |  202 ++-
 .../tendermint/tendermint/types/block.go      |  141 +-
 .../tendermint/types/canonical_json.go        |   30 +-
 .../tendermint/types/event_buffer.go          |    6 +-
 .../tendermint/tendermint/types/event_bus.go  |   11 +-
 .../tendermint/tendermint/types/evidence.go   |   14 +-
 .../tendermint/tendermint/types/genesis.go    |   15 +-
 .../tendermint/tendermint/types/heartbeat.go  |   18 +-
 .../tendermint/tendermint/types/params.go     |   14 +-
 .../tendermint/tendermint/types/part_set.go   |   46 +-
 .../tendermint/types/priv_validator.go        |  286 ++-
 .../tendermint/types/priv_validator/json.go   |  197 ++
 .../types/priv_validator/sign_info.go         |  238 +++
 .../tendermint/types/priv_validator/socket.go |  564 ++++++
 .../types/priv_validator/socket_tcp.go        |   66 +
 .../types/priv_validator/unencrypted.go       |   66 +
 .../types/priv_validator/upgrade.go           |   59 +
 .../tendermint/tendermint/types/proposal.go   |   15 +-
 .../tendermint/tendermint/types/protobuf.go   |   20 +-
 .../tendermint/tendermint/types/results.go    |   30 +-
 .../tendermint/tendermint/types/services.go   |    2 +
 .../tendermint/tendermint/types/signable.go   |   26 +-
 .../tendermint/tendermint/types/test_util.go  |    2 +-
 .../tendermint/tendermint/types/tx.go         |   16 +-
 .../tendermint/tendermint/types/validator.go  |   33 +-
 .../tendermint/types/validator_set.go         |  167 +-
 .../tendermint/tendermint/types/vote.go       |   23 +-
 .../tendermint/tendermint/types/vote_set.go   |   67 +-
 .../tendermint/tendermint/version/version.go  |    4 +-
 .../tendermint/tendermint/wire/wire.go        |   60 +
 .../tendermint/tmlibs/autofile/autofile.go    |    7 +-
 .../tendermint/tmlibs/autofile/group.go       |   48 +-
 .../tendermint/tmlibs/clist/clist.go          |   79 +-
 .../tendermint/tmlibs/common/async.go         |  157 +-
 .../tendermint/tmlibs/common/bit_array.go     |   22 +-
 .../tendermint/tmlibs/common/bytes.go         |   62 +
 .../tendermint/tmlibs/common/colors.go        |    3 +-
 .../tendermint/tmlibs/common/errors.go        |  238 ++-
 .../tendermint/tmlibs/common/heap.go          |   56 +-
 .../tendermint/tmlibs/common/http.go          |  153 --
 .../github.com/tendermint/tmlibs/common/io.go |    3 +-
 .../tendermint/tmlibs/common/kvpair.go        |   67 +
 .../tendermint/tmlibs/common/nil.go           |   29 +
 .../github.com/tendermint/tmlibs/common/os.go |   58 +-
 .../tendermint/tmlibs/common/random.go        |  274 ++-
 .../tendermint/tmlibs/common/repeat_timer.go  |   24 +-
 .../tendermint/tmlibs/common/service.go       |   67 +-
 .../tendermint/tmlibs/common/string.go        |   42 +-
 .../tendermint/tmlibs/common/types.pb.go      |  101 ++
 .../tendermint/tmlibs/common/word.go          |    3 +-
 .../tendermint/tmlibs/db/c_level_db.go        |  187 +-
 vendor/github.com/tendermint/tmlibs/db/db.go  |   54 +-
 .../tendermint/tmlibs/db/debug_db.go          |  216 +++
 .../github.com/tendermint/tmlibs/db/fsdb.go   |  254 +++
 .../tendermint/tmlibs/db/go_level_db.go       |  222 ++-
 .../tendermint/tmlibs/db/mem_batch.go         |   71 +
 .../github.com/tendermint/tmlibs/db/mem_db.go |  229 ++-
 .../tendermint/tmlibs/db/prefix_db.go         |  263 +++
 .../github.com/tendermint/tmlibs/db/types.go  |  134 ++
 .../github.com/tendermint/tmlibs/db/util.go   |   54 +
 .../tendermint/tmlibs/merkle/simple_map.go    |   84 +
 .../tendermint/tmlibs/merkle/simple_proof.go  |  130 ++
 .../tendermint/tmlibs/merkle/simple_tree.go   |  230 +--
 .../tendermint/tmlibs/merkle/types.go         |   26 +-
 .../tendermint/tmlibs/pubsub/pubsub.go        |   35 +-
 vendor/github.com/ugorji/go/codec/0doc.go     |    2 +-
 vendor/github.com/ugorji/go/codec/binc.go     |   82 +-
 vendor/github.com/ugorji/go/codec/cbor.go     |   54 +-
 vendor/github.com/ugorji/go/codec/decode.go   |   42 +-
 vendor/github.com/ugorji/go/codec/encode.go   |  122 +-
 .../ugorji/go/codec/gen.generated.go          |   51 +-
 vendor/github.com/ugorji/go/codec/gen.go      |   42 +-
 vendor/github.com/ugorji/go/codec/helper.go   |   15 +-
 .../ugorji/go/codec/helper_unsafe.go          |   25 +-
 vendor/github.com/ugorji/go/codec/json.go     |   23 +-
 vendor/github.com/ugorji/go/codec/msgpack.go  |  112 +-
 vendor/github.com/ugorji/go/codec/noop.go     |  219 ---
 vendor/github.com/ugorji/go/codec/rpc.go      |   11 +-
 vendor/github.com/ugorji/go/codec/simple.go   |   20 +-
 vendor/golang.org/x/crypto/ed25519/ed25519.go |   13 +-
 .../internal/edwards25519/edwards25519.go     |   22 +
 .../x/crypto/ripemd160/ripemd160block.go      |   64 +-
 vendor/golang.org/x/net/http2/hpack/encode.go |    2 +-
 vendor/golang.org/x/net/http2/http2.go        |    2 +-
 vendor/golang.org/x/net/http2/server.go       |    8 +-
 vendor/golang.org/x/net/trace/trace.go        |   59 +-
 .../x/sys/unix/{flock.go => fcntl.go}         |    6 +
 ...ck_linux_32bit.go => fcntl_linux_32bit.go} |    0
 vendor/golang.org/x/sys/unix/syscall.go       |   11 +-
 vendor/golang.org/x/sys/unix/syscall_bsd.go   |   41 -
 .../golang.org/x/sys/unix/syscall_darwin.go   |    1 +
 .../x/sys/unix/syscall_dragonfly.go           |    2 +
 .../golang.org/x/sys/unix/syscall_freebsd.go  |   15 +-
 vendor/golang.org/x/sys/unix/syscall_linux.go |   34 -
 .../x/sys/unix/syscall_linux_arm64.go         |    8 +-
 .../x/sys/unix/syscall_linux_gccgo.go         |   21 +
 .../x/sys/unix/syscall_linux_mips64x.go       |    8 +-
 .../x/sys/unix/syscall_linux_mipsx.go         |    1 +
 .../x/sys/unix/syscall_linux_ppc64x.go        |    1 +
 .../x/sys/unix/syscall_linux_sparc64.go       |    1 +
 .../golang.org/x/sys/unix/syscall_netbsd.go   |    4 +-
 .../golang.org/x/sys/unix/syscall_openbsd.go  |    2 +
 .../golang.org/x/sys/unix/syscall_solaris.go  |   10 +-
 .../x/sys/unix/syscall_solaris_amd64.go       |    5 -
 vendor/golang.org/x/sys/unix/syscall_unix.go  |   74 +-
 vendor/golang.org/x/sys/unix/types_netbsd.go  |   11 +
 .../x/sys/unix/zerrors_dragonfly_amd64.go     |    3 +
 .../x/sys/unix/zerrors_linux_386.go           |   39 +-
 .../x/sys/unix/zerrors_linux_amd64.go         |   39 +-
 .../x/sys/unix/zerrors_linux_arm.go           |   42 +-
 .../x/sys/unix/zerrors_linux_arm64.go         |   39 +-
 .../x/sys/unix/zerrors_linux_mips.go          |   39 +-
 .../x/sys/unix/zerrors_linux_mips64.go        |   39 +-
 .../x/sys/unix/zerrors_linux_mips64le.go      |   39 +-
 .../x/sys/unix/zerrors_linux_mipsle.go        |   39 +-
 .../x/sys/unix/zerrors_linux_ppc64.go         |   39 +-
 .../x/sys/unix/zerrors_linux_ppc64le.go       |   39 +-
 .../x/sys/unix/zerrors_linux_s390x.go         |   39 +-
 .../x/sys/unix/zerrors_netbsd_386.go          |    1 +
 .../x/sys/unix/zerrors_netbsd_amd64.go        |    1 +
 .../x/sys/unix/zerrors_netbsd_arm.go          |    1 +
 .../x/sys/unix/zerrors_openbsd_386.go         |    1 +
 .../x/sys/unix/zerrors_openbsd_amd64.go       |    1 +
 .../x/sys/unix/zerrors_openbsd_arm.go         |    1 +
 .../x/sys/unix/zsyscall_darwin_386.go         |   15 +
 .../x/sys/unix/zsyscall_darwin_amd64.go       |   15 +
 .../x/sys/unix/zsyscall_darwin_arm.go         |   15 +
 .../x/sys/unix/zsyscall_darwin_arm64.go       |   15 +
 .../x/sys/unix/zsyscall_dragonfly_amd64.go    |   30 +
 .../x/sys/unix/zsyscall_freebsd_386.go        |   15 +
 .../x/sys/unix/zsyscall_freebsd_amd64.go      |   15 +
 .../x/sys/unix/zsyscall_freebsd_arm.go        |   15 +
 .../x/sys/unix/zsyscall_linux_arm64.go        |   10 +
 .../x/sys/unix/zsyscall_linux_mips.go         |   10 +
 .../x/sys/unix/zsyscall_linux_mips64.go       |   10 +
 .../x/sys/unix/zsyscall_linux_mips64le.go     |   10 +
 .../x/sys/unix/zsyscall_linux_mipsle.go       |   10 +
 .../x/sys/unix/zsyscall_linux_ppc64.go        |   10 +
 .../x/sys/unix/zsyscall_linux_ppc64le.go      |   10 +
 .../x/sys/unix/zsyscall_linux_sparc64.go      |   10 +
 .../x/sys/unix/zsyscall_netbsd_386.go         |   40 +
 .../x/sys/unix/zsyscall_netbsd_amd64.go       |   40 +
 .../x/sys/unix/zsyscall_netbsd_arm.go         |   40 +
 .../x/sys/unix/zsyscall_openbsd_386.go        |   30 +
 .../x/sys/unix/zsyscall_openbsd_amd64.go      |   30 +
 .../x/sys/unix/zsyscall_openbsd_arm.go        |   30 +
 .../x/sys/unix/zsyscall_solaris_amd64.go      |   28 +
 .../x/sys/unix/zsysnum_linux_ppc64.go         |    3 +
 .../x/sys/unix/zsysnum_linux_ppc64le.go       |    3 +
 .../x/sys/unix/zsysnum_linux_s390x.go         |   45 +-
 .../x/sys/unix/ztypes_darwin_386.go           |  112 +-
 .../x/sys/unix/ztypes_darwin_amd64.go         |  158 +-
 .../x/sys/unix/ztypes_darwin_arm.go           |  112 +-
 .../x/sys/unix/ztypes_darwin_arm64.go         |  158 +-
 .../x/sys/unix/ztypes_dragonfly_amd64.go      |  100 +-
 .../golang.org/x/sys/unix/ztypes_linux_386.go |  208 ++-
 .../x/sys/unix/ztypes_linux_amd64.go          |  208 ++-
 .../golang.org/x/sys/unix/ztypes_linux_arm.go |  208 ++-
 .../x/sys/unix/ztypes_linux_arm64.go          |  208 ++-
 .../x/sys/unix/ztypes_linux_mips.go           |  208 ++-
 .../x/sys/unix/ztypes_linux_mips64.go         |  208 ++-
 .../x/sys/unix/ztypes_linux_mips64le.go       |  208 ++-
 .../x/sys/unix/ztypes_linux_mipsle.go         |  208 ++-
 .../x/sys/unix/ztypes_linux_ppc64.go          |  208 ++-
 .../x/sys/unix/ztypes_linux_ppc64le.go        |  208 ++-
 .../x/sys/unix/ztypes_linux_s390x.go          |  208 ++-
 .../x/sys/unix/ztypes_linux_sparc64.go        |  208 ++-
 .../x/sys/unix/ztypes_netbsd_386.go           |    9 +
 .../x/sys/unix/ztypes_netbsd_amd64.go         |    9 +
 .../x/sys/unix/ztypes_netbsd_arm.go           |    9 +
 .../x/sys/unix/ztypes_solaris_amd64.go        |  174 +-
 vendor/google.golang.org/grpc/backoff.go      |   14 +-
 vendor/google.golang.org/grpc/balancer.go     |    3 +-
 .../grpc/balancer/balancer.go                 |   35 +-
 .../grpc/balancer/base/balancer.go            |  209 ---
 .../grpc/balancer/base/base.go                |   52 -
 .../grpc/balancer/roundrobin/roundrobin.go    |   79 -
 .../grpc/balancer_conn_wrappers.go            |   66 +-
 .../grpc/balancer_v1_wrapper.go               |   42 +-
 vendor/google.golang.org/grpc/call.go         |  189 +-
 vendor/google.golang.org/grpc/clientconn.go   |  701 +++-----
 vendor/google.golang.org/grpc/codec.go        |   34 +-
 .../grpc/codes/code_string.go                 |   66 +-
 vendor/google.golang.org/grpc/codes/codes.go  |   43 +-
 .../grpc/credentials/credentials.go           |   22 +-
 .../grpc/encoding/encoding.go                 |   61 -
 vendor/google.golang.org/grpc/go16.go         |   98 -
 vendor/google.golang.org/grpc/go17.go         |   99 --
 vendor/google.golang.org/grpc/grpclb.go       |  820 ++++++---
 .../google.golang.org/grpc/grpclb_picker.go   |  159 --
 .../grpc/grpclb_remote_balancer.go            |  254 ---
 vendor/google.golang.org/grpc/grpclb_util.go  |   90 -
 .../grpc/internal/internal.go                 |    7 +
 vendor/google.golang.org/grpc/naming/go17.go  |    2 +-
 .../google.golang.org/grpc/picker_wrapper.go  |    4 +-
 vendor/google.golang.org/grpc/pickfirst.go    |   17 +-
 vendor/google.golang.org/grpc/proxy.go        |    3 +-
 .../grpc/resolver/dns/dns_resolver.go         |  377 ----
 .../grpc/resolver/dns/go17.go                 |   35 -
 .../grpc/resolver/dns/go18.go                 |   29 -
 .../grpc/resolver/passthrough/passthrough.go  |   57 -
 .../grpc/resolver/resolver.go                 |   24 +-
 .../grpc/resolver_conn_wrapper.go             |   64 +-
 vendor/google.golang.org/grpc/rpc_util.go     |  271 +--
 vendor/google.golang.org/grpc/server.go       |  288 +--
 .../google.golang.org/grpc/service_config.go  |  226 ---
 .../google.golang.org/grpc/status/status.go   |   17 +-
 vendor/google.golang.org/grpc/stream.go       |  182 +-
 .../grpc/transport/bdp_estimator.go           |    9 +-
 .../grpc/transport/control.go                 |  113 +-
 .../google.golang.org/grpc/transport/go16.go  |   51 -
 .../google.golang.org/grpc/transport/go17.go  |   52 -
 .../grpc/transport/handler_server.go          |    6 +-
 .../grpc/transport/http2_client.go            |  281 ++-
 .../grpc/transport/http2_server.go            |  151 +-
 .../grpc/transport/transport.go               |  192 +-
 .../go-playground/validator.v9/LICENSE        |   22 -
 .../go-playground/validator.v9/baked_in.go    | 1579 -----------------
 .../go-playground/validator.v9/cache.go       |  337 ----
 .../go-playground/validator.v9/doc.go         |  907 ----------
 .../go-playground/validator.v9/errors.go      |  272 ---
 .../go-playground/validator.v9/field_level.go |   69 -
 .../go-playground/validator.v9/regexes.go     |   67 -
 .../validator.v9/struct_level.go              |  175 --
 .../validator.v9/translations.go              |   11 -
 .../go-playground/validator.v9/util.go        |  257 ---
 .../go-playground/validator.v9/validator.go   |  475 -----
 .../validator.v9/validator_instance.go        |  586 ------
 vendor/gopkg.in/yaml.v2/NOTICE                |   13 +
 vendor/gopkg.in/yaml.v2/apic.go               |   55 +-
 vendor/gopkg.in/yaml.v2/decode.go             |  238 ++-
 vendor/gopkg.in/yaml.v2/emitterc.go           |    5 +-
 vendor/gopkg.in/yaml.v2/encode.go             |  136 +-
 vendor/gopkg.in/yaml.v2/readerc.go            |   20 +-
 vendor/gopkg.in/yaml.v2/resolve.go            |   80 +-
 vendor/gopkg.in/yaml.v2/scannerc.go           |   29 +-
 vendor/gopkg.in/yaml.v2/sorter.go             |    9 +
 vendor/gopkg.in/yaml.v2/writerc.go            |   65 +-
 vendor/gopkg.in/yaml.v2/yaml.go               |  123 +-
 vendor/gopkg.in/yaml.v2/yamlh.go              |   30 +-
 364 files changed, 16173 insertions(+), 17296 deletions(-)
 delete mode 100644 vendor/github.com/go-playground/locales/LICENSE
 delete mode 100644 vendor/github.com/go-playground/locales/currency/currency.go
 delete mode 100644 vendor/github.com/go-playground/locales/rules.go
 delete mode 100644 vendor/github.com/go-playground/universal-translator/LICENSE
 delete mode 100644 vendor/github.com/go-playground/universal-translator/errors.go
 delete mode 100644 vendor/github.com/go-playground/universal-translator/import_export.go
 delete mode 100644 vendor/github.com/go-playground/universal-translator/translator.go
 delete mode 100644 vendor/github.com/go-playground/universal-translator/universal_translator.go
 create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage.go
 delete mode 100644 vendor/github.com/tendermint/abci/example/dummy/dummy.go
 rename vendor/github.com/tendermint/abci/example/{dummy => kvstore}/helpers.go (56%)
 create mode 100644 vendor/github.com/tendermint/abci/example/kvstore/kvstore.go
 rename vendor/github.com/tendermint/abci/example/{dummy/persistent_dummy.go => kvstore/persistent_kvstore.go} (57%)
 delete mode 100644 vendor/github.com/tendermint/go-wire/nowriter/tmlegacy/tm_encoder_legacy.go
 create mode 100644 vendor/github.com/tendermint/iavl/chunk.go
 create mode 100644 vendor/github.com/tendermint/iavl/logger.go
 create mode 100644 vendor/github.com/tendermint/iavl/serialize.go
 create mode 100644 vendor/github.com/tendermint/tendermint/p2p/base_reactor.go
 rename vendor/github.com/tendermint/tendermint/p2p/{ => conn}/conn_go110.go (86%)
 rename vendor/github.com/tendermint/tendermint/p2p/{ => conn}/conn_notgo110.go (94%)
 rename vendor/github.com/tendermint/tendermint/p2p/{ => conn}/connection.go (86%)
 rename vendor/github.com/tendermint/tendermint/p2p/{ => conn}/secret_connection.go (79%)
 create mode 100644 vendor/github.com/tendermint/tendermint/p2p/errors.go
 create mode 100644 vendor/github.com/tendermint/tendermint/p2p/key.go
 create mode 100644 vendor/github.com/tendermint/tendermint/p2p/node_info.go
 rename vendor/github.com/tendermint/tendermint/p2p/{ => pex}/addrbook.go (56%)
 create mode 100644 vendor/github.com/tendermint/tendermint/p2p/pex/file.go
 create mode 100644 vendor/github.com/tendermint/tendermint/p2p/pex/known_address.go
 create mode 100644 vendor/github.com/tendermint/tendermint/p2p/pex/params.go
 create mode 100644 vendor/github.com/tendermint/tendermint/p2p/pex/pex_reactor.go
 delete mode 100644 vendor/github.com/tendermint/tendermint/p2p/pex_reactor.go
 create mode 100644 vendor/github.com/tendermint/tendermint/p2p/test_util.go
 delete mode 100644 vendor/github.com/tendermint/tendermint/p2p/util.go
 create mode 100644 vendor/github.com/tendermint/tendermint/rpc/core/health.go
 create mode 100644 vendor/github.com/tendermint/tendermint/types/priv_validator/json.go
 create mode 100644 vendor/github.com/tendermint/tendermint/types/priv_validator/sign_info.go
 create mode 100644 vendor/github.com/tendermint/tendermint/types/priv_validator/socket.go
 create mode 100644 vendor/github.com/tendermint/tendermint/types/priv_validator/socket_tcp.go
 create mode 100644 vendor/github.com/tendermint/tendermint/types/priv_validator/unencrypted.go
 create mode 100644 vendor/github.com/tendermint/tendermint/types/priv_validator/upgrade.go
 create mode 100644 vendor/github.com/tendermint/tendermint/wire/wire.go
 create mode 100644 vendor/github.com/tendermint/tmlibs/common/bytes.go
 delete mode 100644 vendor/github.com/tendermint/tmlibs/common/http.go
 create mode 100644 vendor/github.com/tendermint/tmlibs/common/kvpair.go
 create mode 100644 vendor/github.com/tendermint/tmlibs/common/nil.go
 create mode 100644 vendor/github.com/tendermint/tmlibs/common/types.pb.go
 create mode 100644 vendor/github.com/tendermint/tmlibs/db/debug_db.go
 create mode 100644 vendor/github.com/tendermint/tmlibs/db/fsdb.go
 create mode 100644 vendor/github.com/tendermint/tmlibs/db/mem_batch.go
 create mode 100644 vendor/github.com/tendermint/tmlibs/db/prefix_db.go
 create mode 100644 vendor/github.com/tendermint/tmlibs/db/types.go
 create mode 100644 vendor/github.com/tendermint/tmlibs/db/util.go
 create mode 100644 vendor/github.com/tendermint/tmlibs/merkle/simple_map.go
 create mode 100644 vendor/github.com/tendermint/tmlibs/merkle/simple_proof.go
 delete mode 100644 vendor/github.com/ugorji/go/codec/noop.go
 rename vendor/golang.org/x/sys/unix/{flock.go => fcntl.go} (74%)
 rename vendor/golang.org/x/sys/unix/{flock_linux_32bit.go => fcntl_linux_32bit.go} (100%)
 create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo.go
 delete mode 100644 vendor/google.golang.org/grpc/balancer/base/balancer.go
 delete mode 100644 vendor/google.golang.org/grpc/balancer/base/base.go
 delete mode 100644 vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
 delete mode 100644 vendor/google.golang.org/grpc/encoding/encoding.go
 delete mode 100644 vendor/google.golang.org/grpc/go16.go
 delete mode 100644 vendor/google.golang.org/grpc/go17.go
 delete mode 100644 vendor/google.golang.org/grpc/grpclb_picker.go
 delete mode 100644 vendor/google.golang.org/grpc/grpclb_remote_balancer.go
 delete mode 100644 vendor/google.golang.org/grpc/grpclb_util.go
 delete mode 100644 vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
 delete mode 100644 vendor/google.golang.org/grpc/resolver/dns/go17.go
 delete mode 100644 vendor/google.golang.org/grpc/resolver/dns/go18.go
 delete mode 100644 vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
 delete mode 100644 vendor/google.golang.org/grpc/service_config.go
 delete mode 100644 vendor/google.golang.org/grpc/transport/go16.go
 delete mode 100644 vendor/google.golang.org/grpc/transport/go17.go
 delete mode 100644 vendor/gopkg.in/go-playground/validator.v9/LICENSE
 delete mode 100644 vendor/gopkg.in/go-playground/validator.v9/baked_in.go
 delete mode 100644 vendor/gopkg.in/go-playground/validator.v9/cache.go
 delete mode 100644 vendor/gopkg.in/go-playground/validator.v9/doc.go
 delete mode 100644 vendor/gopkg.in/go-playground/validator.v9/errors.go
 delete mode 100644 vendor/gopkg.in/go-playground/validator.v9/field_level.go
 delete mode 100644 vendor/gopkg.in/go-playground/validator.v9/regexes.go
 delete mode 100644 vendor/gopkg.in/go-playground/validator.v9/struct_level.go
 delete mode 100644 vendor/gopkg.in/go-playground/validator.v9/translations.go
 delete mode 100644 vendor/gopkg.in/go-playground/validator.v9/util.go
 delete mode 100644 vendor/gopkg.in/go-playground/validator.v9/validator.go
 delete mode 100644 vendor/gopkg.in/go-playground/validator.v9/validator_instance.go
 create mode 100644 vendor/gopkg.in/yaml.v2/NOTICE

diff --git a/vendor/github.com/go-playground/locales/LICENSE b/vendor/github.com/go-playground/locales/LICENSE
deleted file mode 100644
index 75854ac4..00000000
--- a/vendor/github.com/go-playground/locales/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2016 Go Playground
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/locales/currency/currency.go b/vendor/github.com/go-playground/locales/currency/currency.go
deleted file mode 100644
index 6e75ec58..00000000
--- a/vendor/github.com/go-playground/locales/currency/currency.go
+++ /dev/null
@@ -1,306 +0,0 @@
-package currency
-
-// Type is the currency type associated with the locales currency enum
-type Type int
-
-// locale currencies
-const (
-	ADP Type = iota
-	AED
-	AFA
-	AFN
-	ALK
-	ALL
-	AMD
-	ANG
-	AOA
-	AOK
-	AON
-	AOR
-	ARA
-	ARL
-	ARM
-	ARP
-	ARS
-	ATS
-	AUD
-	AWG
-	AZM
-	AZN
-	BAD
-	BAM
-	BAN
-	BBD
-	BDT
-	BEC
-	BEF
-	BEL
-	BGL
-	BGM
-	BGN
-	BGO
-	BHD
-	BIF
-	BMD
-	BND
-	BOB
-	BOL
-	BOP
-	BOV
-	BRB
-	BRC
-	BRE
-	BRL
-	BRN
-	BRR
-	BRZ
-	BSD
-	BTN
-	BUK
-	BWP
-	BYB
-	BYN
-	BYR
-	BZD
-	CAD
-	CDF
-	CHE
-	CHF
-	CHW
-	CLE
-	CLF
-	CLP
-	CNX
-	CNY
-	COP
-	COU
-	CRC
-	CSD
-	CSK
-	CUC
-	CUP
-	CVE
-	CYP
-	CZK
-	DDM
-	DEM
-	DJF
-	DKK
-	DOP
-	DZD
-	ECS
-	ECV
-	EEK
-	EGP
-	ERN
-	ESA
-	ESB
-	ESP
-	ETB
-	EUR
-	FIM
-	FJD
-	FKP
-	FRF
-	GBP
-	GEK
-	GEL
-	GHC
-	GHS
-	GIP
-	GMD
-	GNF
-	GNS
-	GQE
-	GRD
-	GTQ
-	GWE
-	GWP
-	GYD
-	HKD
-	HNL
-	HRD
-	HRK
-	HTG
-	HUF
-	IDR
-	IEP
-	ILP
-	ILR
-	ILS
-	INR
-	IQD
-	IRR
-	ISJ
-	ISK
-	ITL
-	JMD
-	JOD
-	JPY
-	KES
-	KGS
-	KHR
-	KMF
-	KPW
-	KRH
-	KRO
-	KRW
-	KWD
-	KYD
-	KZT
-	LAK
-	LBP
-	LKR
-	LRD
-	LSL
-	LTL
-	LTT
-	LUC
-	LUF
-	LUL
-	LVL
-	LVR
-	LYD
-	MAD
-	MAF
-	MCF
-	MDC
-	MDL
-	MGA
-	MGF
-	MKD
-	MKN
-	MLF
-	MMK
-	MNT
-	MOP
-	MRO
-	MTL
-	MTP
-	MUR
-	MVP
-	MVR
-	MWK
-	MXN
-	MXP
-	MXV
-	MYR
-	MZE
-	MZM
-	MZN
-	NAD
-	NGN
-	NIC
-	NIO
-	NLG
-	NOK
-	NPR
-	NZD
-	OMR
-	PAB
-	PEI
-	PEN
-	PES
-	PGK
-	PHP
-	PKR
-	PLN
-	PLZ
-	PTE
-	PYG
-	QAR
-	RHD
-	ROL
-	RON
-	RSD
-	RUB
-	RUR
-	RWF
-	SAR
-	SBD
-	SCR
-	SDD
-	SDG
-	SDP
-	SEK
-	SGD
-	SHP
-	SIT
-	SKK
-	SLL
-	SOS
-	SRD
-	SRG
-	SSP
-	STD
-	SUR
-	SVC
-	SYP
-	SZL
-	THB
-	TJR
-	TJS
-	TMM
-	TMT
-	TND
-	TOP
-	TPE
-	TRL
-	TRY
-	TTD
-	TWD
-	TZS
-	UAH
-	UAK
-	UGS
-	UGX
-	USD
-	USN
-	USS
-	UYI
-	UYP
-	UYU
-	UZS
-	VEB
-	VEF
-	VND
-	VNN
-	VUV
-	WST
-	XAF
-	XAG
-	XAU
-	XBA
-	XBB
-	XBC
-	XBD
-	XCD
-	XDR
-	XEU
-	XFO
-	XFU
-	XOF
-	XPD
-	XPF
-	XPT
-	XRE
-	XSU
-	XTS
-	XUA
-	XXX
-	YDD
-	YER
-	YUD
-	YUM
-	YUN
-	YUR
-	ZAL
-	ZAR
-	ZMK
-	ZMW
-	ZRN
-	ZRZ
-	ZWD
-	ZWL
-	ZWR
-)
diff --git a/vendor/github.com/go-playground/locales/rules.go b/vendor/github.com/go-playground/locales/rules.go
deleted file mode 100644
index 92029001..00000000
--- a/vendor/github.com/go-playground/locales/rules.go
+++ /dev/null
@@ -1,293 +0,0 @@
-package locales
-
-import (
-	"strconv"
-	"time"
-
-	"github.com/go-playground/locales/currency"
-)
-
-// // ErrBadNumberValue is returned when the number passed for
-// // plural rule determination cannot be parsed
-// type ErrBadNumberValue struct {
-// 	NumberValue string
-// 	InnerError  error
-// }
-
-// // Error returns ErrBadNumberValue error string
-// func (e *ErrBadNumberValue) Error() string {
-// 	return fmt.Sprintf("Invalid Number Value '%s' %s", e.NumberValue, e.InnerError)
-// }
-
-// var _ error = new(ErrBadNumberValue)
-
-// PluralRule denotes the type of plural rules
-type PluralRule int
-
-// PluralRule's
-const (
-	PluralRuleUnknown PluralRule = iota
-	PluralRuleZero               // zero
-	PluralRuleOne                // one - singular
-	PluralRuleTwo                // two - dual
-	PluralRuleFew                // few - paucal
-	PluralRuleMany               // many - also used for fractions if they have a separate class
-	PluralRuleOther              // other - required—general plural form—also used if the language only has a single form
-)
-
-const (
-	pluralsString = "UnknownZeroOneTwoFewManyOther"
-)
-
-// Translator encapsulates an instance of a locale
-// NOTE: some values are returned as a []byte just in case the caller
-// wishes to add more and can help avoid allocations; otherwise just cast as string
-type Translator interface {
-
-	// The following Functions are for overriding, debugging or developing
-	// with a Translator Locale
-
-	// Locale returns the string value of the translator
-	Locale() string
-
-	// returns an array of cardinal plural rules associated
-	// with this translator
-	PluralsCardinal() []PluralRule
-
-	// returns an array of ordinal plural rules associated
-	// with this translator
-	PluralsOrdinal() []PluralRule
-
-	// returns an array of range plural rules associated
-	// with this translator
-	PluralsRange() []PluralRule
-
-	// returns the cardinal PluralRule given 'num' and digits/precision of 'v' for locale
-	CardinalPluralRule(num float64, v uint64) PluralRule
-
-	// returns the ordinal PluralRule given 'num' and digits/precision of 'v' for locale
-	OrdinalPluralRule(num float64, v uint64) PluralRule
-
-	// returns the ordinal PluralRule given 'num1', 'num2' and digits/precision of 'v1' and 'v2' for locale
-	RangePluralRule(num1 float64, v1 uint64, num2 float64, v2 uint64) PluralRule
-
-	// returns the locales abbreviated month given the 'month' provided
-	MonthAbbreviated(month time.Month) string
-
-	// returns the locales abbreviated months
-	MonthsAbbreviated() []string
-
-	// returns the locales narrow month given the 'month' provided
-	MonthNarrow(month time.Month) string
-
-	// returns the locales narrow months
-	MonthsNarrow() []string
-
-	// returns the locales wide month given the 'month' provided
-	MonthWide(month time.Month) string
-
-	// returns the locales wide months
-	MonthsWide() []string
-
-	// returns the locales abbreviated weekday given the 'weekday' provided
-	WeekdayAbbreviated(weekday time.Weekday) string
-
-	// returns the locales abbreviated weekdays
-	WeekdaysAbbreviated() []string
-
-	// returns the locales narrow weekday given the 'weekday' provided
-	WeekdayNarrow(weekday time.Weekday) string
-
-	// WeekdaysNarrowreturns the locales narrow weekdays
-	WeekdaysNarrow() []string
-
-	// returns the locales short weekday given the 'weekday' provided
-	WeekdayShort(weekday time.Weekday) string
-
-	// returns the locales short weekdays
-	WeekdaysShort() []string
-
-	// returns the locales wide weekday given the 'weekday' provided
-	WeekdayWide(weekday time.Weekday) string
-
-	// returns the locales wide weekdays
-	WeekdaysWide() []string
-
-	// The following Functions are common Formatting functionsfor the Translator's Locale
-
-	// returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v'
-	FmtNumber(num float64, v uint64) string
-
-	// returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v'
-	// NOTE: 'num' passed into FmtPercent is assumed to be in percent already
-	FmtPercent(num float64, v uint64) string
-
-	// returns the currency representation of 'num' with digits/precision of 'v' for locale
-	FmtCurrency(num float64, v uint64, currency currency.Type) string
-
-	// returns the currency representation of 'num' with digits/precision of 'v' for locale
-	// in accounting notation.
-	FmtAccounting(num float64, v uint64, currency currency.Type) string
-
-	// returns the short date representation of 't' for locale
-	FmtDateShort(t time.Time) string
-
-	// returns the medium date representation of 't' for locale
-	FmtDateMedium(t time.Time) string
-
-	//  returns the long date representation of 't' for locale
-	FmtDateLong(t time.Time) string
-
-	// returns the full date representation of 't' for locale
-	FmtDateFull(t time.Time) string
-
-	// returns the short time representation of 't' for locale
-	FmtTimeShort(t time.Time) string
-
-	// returns the medium time representation of 't' for locale
-	FmtTimeMedium(t time.Time) string
-
-	// returns the long time representation of 't' for locale
-	FmtTimeLong(t time.Time) string
-
-	// returns the full time representation of 't' for locale
-	FmtTimeFull(t time.Time) string
-}
-
-// String returns the string value  of PluralRule
-func (p PluralRule) String() string {
-
-	switch p {
-	case PluralRuleZero:
-		return pluralsString[7:11]
-	case PluralRuleOne:
-		return pluralsString[11:14]
-	case PluralRuleTwo:
-		return pluralsString[14:17]
-	case PluralRuleFew:
-		return pluralsString[17:20]
-	case PluralRuleMany:
-		return pluralsString[20:24]
-	case PluralRuleOther:
-		return pluralsString[24:]
-	default:
-		return pluralsString[:7]
-	}
-}
-
-//
-// Precision Notes:
-//
-// must specify a precision >= 0, and here is why https://play.golang.org/p/LyL90U0Vyh
-//
-// 	v := float64(3.141)
-// 	i := float64(int64(v))
-//
-// 	fmt.Println(v - i)
-//
-// 	or
-//
-// 	s := strconv.FormatFloat(v-i, 'f', -1, 64)
-// 	fmt.Println(s)
-//
-// these will not print what you'd expect: 0.14100000000000001
-// and so this library requires a precision to be specified, or
-// inaccurate plural rules could be applied.
-//
-//
-//
-// n - absolute value of the source number (integer and decimals).
-// i - integer digits of n.
-// v - number of visible fraction digits in n, with trailing zeros.
-// w - number of visible fraction digits in n, without trailing zeros.
-// f - visible fractional digits in n, with trailing zeros.
-// t - visible fractional digits in n, without trailing zeros.
-//
-//
-// Func(num float64, v uint64) // v = digits/precision and prevents -1 as a special case as this can lead to very unexpected behaviour, see precision note's above.
-//
-// n := math.Abs(num)
-// i := int64(n)
-// v := v
-//
-//
-// w := strconv.FormatFloat(num-float64(i), 'f', int(v), 64)  // then parse backwards on string until no more zero's....
-// f := strconv.FormatFloat(n, 'f', int(v), 64) 			  // then turn everything after decimal into an int64
-// t := strconv.FormatFloat(n, 'f', int(v), 64) 			  // then parse backwards on string until no more zero's....
-//
-//
-//
-// General Inclusion Rules
-// - v will always be available inherently
-// - all require n
-// - w requires i
-//
-
-// W returns the number of visible fraction digits in N, without trailing zeros.
-func W(n float64, v uint64) (w int64) {
-
-	s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
-
-	// with either be '0' or '0.xxxx', so if 1 then w will be zero
-	// otherwise need to parse
-	if len(s) != 1 {
-
-		s = s[2:]
-		end := len(s) + 1
-
-		for i := end; i >= 0; i-- {
-			if s[i] != '0' {
-				end = i + 1
-				break
-			}
-		}
-
-		w = int64(len(s[:end]))
-	}
-
-	return
-}
-
-// F returns the visible fractional digits in N, with trailing zeros.
-func F(n float64, v uint64) (f int64) {
-
-	s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
-
-	// with either be '0' or '0.xxxx', so if 1 then f will be zero
-	// otherwise need to parse
-	if len(s) != 1 {
-
-		// ignoring error, because it can't fail as we generated
-		// the string internally from a real number
-		f, _ = strconv.ParseInt(s[2:], 10, 64)
-	}
-
-	return
-}
-
-// T returns the visible fractional digits in N, without trailing zeros.
-func T(n float64, v uint64) (t int64) {
-
-	s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
-
-	// with either be '0' or '0.xxxx', so if 1 then t will be zero
-	// otherwise need to parse
-	if len(s) != 1 {
-
-		s = s[2:]
-		end := len(s) + 1
-
-		for i := end; i >= 0; i-- {
-			if s[i] != '0' {
-				end = i + 1
-				break
-			}
-		}
-
-		// ignoring error, because it can't fail as we generated
-		// the string internally from a real number
-		t, _ = strconv.ParseInt(s[:end], 10, 64)
-	}
-
-	return
-}
diff --git a/vendor/github.com/go-playground/universal-translator/LICENSE b/vendor/github.com/go-playground/universal-translator/LICENSE
deleted file mode 100644
index 8d8aba15..00000000
--- a/vendor/github.com/go-playground/universal-translator/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2016 Go Playground
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/go-playground/universal-translator/errors.go b/vendor/github.com/go-playground/universal-translator/errors.go
deleted file mode 100644
index 38b163b6..00000000
--- a/vendor/github.com/go-playground/universal-translator/errors.go
+++ /dev/null
@@ -1,148 +0,0 @@
-package ut
-
-import (
-	"errors"
-	"fmt"
-
-	"github.com/go-playground/locales"
-)
-
-var (
-	// ErrUnknowTranslation indicates the translation could not be found
-	ErrUnknowTranslation = errors.New("Unknown Translation")
-)
-
-var _ error = new(ErrConflictingTranslation)
-var _ error = new(ErrRangeTranslation)
-var _ error = new(ErrOrdinalTranslation)
-var _ error = new(ErrCardinalTranslation)
-var _ error = new(ErrMissingPluralTranslation)
-var _ error = new(ErrExistingTranslator)
-
-// ErrExistingTranslator is the error representing a conflicting translator
-type ErrExistingTranslator struct {
-	locale string
-}
-
-// Error returns ErrExistingTranslator's internal error text
-func (e *ErrExistingTranslator) Error() string {
-	return fmt.Sprintf("error: conflicting translator for locale '%s'", e.locale)
-}
-
-// ErrConflictingTranslation is the error representing a conflicting translation
-type ErrConflictingTranslation struct {
-	locale string
-	key    interface{}
-	rule   locales.PluralRule
-	text   string
-}
-
-// Error returns ErrConflictingTranslation's internal error text
-func (e *ErrConflictingTranslation) Error() string {
-
-	if _, ok := e.key.(string); !ok {
-		return fmt.Sprintf("error: conflicting key '%#v' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale)
-	}
-
-	return fmt.Sprintf("error: conflicting key '%s' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale)
-}
-
-// ErrRangeTranslation is the error representing a range translation error
-type ErrRangeTranslation struct {
-	text string
-}
-
-// Error returns ErrRangeTranslation's internal error text
-func (e *ErrRangeTranslation) Error() string {
-	return e.text
-}
-
-// ErrOrdinalTranslation is the error representing an ordinal translation error
-type ErrOrdinalTranslation struct {
-	text string
-}
-
-// Error returns ErrOrdinalTranslation's internal error text
-func (e *ErrOrdinalTranslation) Error() string {
-	return e.text
-}
-
-// ErrCardinalTranslation is the error representing a cardinal translation error
-type ErrCardinalTranslation struct {
-	text string
-}
-
-// Error returns ErrCardinalTranslation's internal error text
-func (e *ErrCardinalTranslation) Error() string {
-	return e.text
-}
-
-// ErrMissingPluralTranslation is the error signifying a missing translation given
-// the locales plural rules.
-type ErrMissingPluralTranslation struct {
-	locale          string
-	key             interface{}
-	rule            locales.PluralRule
-	translationType string
-}
-
-// Error returns ErrMissingPluralTranslation's internal error text
-func (e *ErrMissingPluralTranslation) Error() string {
-
-	if _, ok := e.key.(string); !ok {
-		return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%#v' and locale '%s'", e.translationType, e.rule, e.key, e.locale)
-	}
-
-	return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%s' and locale '%s'", e.translationType, e.rule, e.key, e.locale)
-}
-
-// ErrMissingBracket is the error representing a missing bracket in a translation
-// eg. This is a {0 <-- missing ending '}'
-type ErrMissingBracket struct {
-	locale string
-	key    interface{}
-	text   string
-}
-
-// Error returns ErrMissingBracket error message
-func (e *ErrMissingBracket) Error() string {
-	return fmt.Sprintf("error: missing bracket '{}', in translation. locale: '%s' key: '%v' text: '%s'", e.locale, e.key, e.text)
-}
-
-// ErrBadParamSyntax is the error representing a bad parameter definition in a translation
-// eg. This is a {must-be-int}
-type ErrBadParamSyntax struct {
-	locale string
-	param  string
-	key    interface{}
-	text   string
-}
-
-// Error returns ErrBadParamSyntax error message
-func (e *ErrBadParamSyntax) Error() string {
-	return fmt.Sprintf("error: bad parameter syntax, missing parameter '%s' in translation. locale: '%s' key: '%v' text: '%s'", e.param, e.locale, e.key, e.text)
-}
-
-// import/export errors
-
-// ErrMissingLocale is the error representing an expected locale that could
-// not be found aka locale not registered with the UniversalTranslator Instance
-type ErrMissingLocale struct {
-	locale string
-}
-
-// Error returns ErrMissingLocale's internal error text
-func (e *ErrMissingLocale) Error() string {
-	return fmt.Sprintf("error: locale '%s' not registered.", e.locale)
-}
-
-// ErrBadPluralDefinition is the error representing an incorrect plural definition
-// usually found within translations defined within files during the import process.
-type ErrBadPluralDefinition struct {
-	tl translation
-}
-
-// Error returns ErrBadPluralDefinition's internal error text
-func (e *ErrBadPluralDefinition) Error() string {
-	return fmt.Sprintf("error: bad plural definition '%#v'", e.tl)
-}
diff --git a/vendor/github.com/go-playground/universal-translator/import_export.go b/vendor/github.com/go-playground/universal-translator/import_export.go
deleted file mode 100644
index 7bd76f26..00000000
--- a/vendor/github.com/go-playground/universal-translator/import_export.go
+++ /dev/null
@@ -1,274 +0,0 @@
-package ut
-
-import (
-	"encoding/json"
-	"fmt"
-	"io/ioutil"
-	"os"
-	"path/filepath"
-
-	"io"
-
-	"github.com/go-playground/locales"
-)
-
-type translation struct {
-	Locale           string      `json:"locale"`
-	Key              interface{} `json:"key"` // either string or integer
-	Translation      string      `json:"trans"`
-	PluralType       string      `json:"type,omitempty"`
-	PluralRule       string      `json:"rule,omitempty"`
-	OverrideExisting bool        `json:"override,omitempty"`
-}
-
-const (
-	cardinalType = "Cardinal"
-	ordinalType  = "Ordinal"
-	rangeType    = "Range"
-)
-
-// ImportExportFormat is the format of the file import or export
-type ImportExportFormat uint8
-
-// supported Export Formats
-const (
-	FormatJSON ImportExportFormat = iota
-)
-
-// Export writes the translations out to a file on disk.
-//
-// NOTE: this currently only works with string or int translations keys.
-func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string) error {
-
-	_, err := os.Stat(dirname)
-	fmt.Println(dirname, err, os.IsNotExist(err))
-	if err != nil {
-
-		if !os.IsNotExist(err) {
-			return err
-		}
-
-		if err = os.MkdirAll(dirname, 0744); err != nil {
-			return err
-		}
-	}
-
-	// build up translations
-	var trans []translation
-	var b []byte
-	var ext string
-
-	for _, locale := range t.translators {
-
-		for k, v := range locale.(*translator).translations {
-			trans = append(trans, translation{
-				Locale:      locale.Locale(),
-				Key:         k,
-				Translation: v.text,
-			})
-		}
-
-		for k, pluralTrans := range locale.(*translator).cardinalTanslations {
-
-			for i, plural := range pluralTrans {
-
-				// leave enough for all plural rules
-				// but not all are set for all languages.
-				if plural == nil {
-					continue
-				}
-
-				trans = append(trans, translation{
-					Locale:      locale.Locale(),
-					Key:         k.(string),
-					Translation: plural.text,
-					PluralType:  cardinalType,
-					PluralRule:  locales.PluralRule(i).String(),
-				})
-			}
-		}
-
-		for k, pluralTrans := range locale.(*translator).ordinalTanslations {
-
-			for i, plural := range pluralTrans {
-
-				// leave enough for all plural rules
-				// but not all are set for all languages.
-				if plural == nil {
-					continue
-				}
-
-				trans = append(trans, translation{
-					Locale:      locale.Locale(),
-					Key:         k.(string),
-					Translation: plural.text,
-					PluralType:  ordinalType,
-					PluralRule:  locales.PluralRule(i).String(),
-				})
-			}
-		}
-
-		for k, pluralTrans := range locale.(*translator).rangeTanslations {
-
-			for i, plural := range pluralTrans {
-
-				// leave enough for all plural rules
-				// but not all are set for all languages.
-				if plural == nil {
-					continue
-				}
-
-				trans = append(trans, translation{
-					Locale:      locale.Locale(),
-					Key:         k.(string),
-					Translation: plural.text,
-					PluralType:  rangeType,
-					PluralRule:  locales.PluralRule(i).String(),
-				})
-			}
-		}
-
-		switch format {
-		case FormatJSON:
-			b, err = json.MarshalIndent(trans, "", "    ")
-			ext = ".json"
-		}
-
-		if err != nil {
-			return err
-		}
-
-		err = ioutil.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644)
-		if err != nil {
-			return err
-		}
-
-		trans = trans[0:0]
-	}
-
-	return nil
-}
-
-// Import reads the translations out of a file or directory on disk.
-//
-// NOTE: this currently only works with string or int translations keys.
-func (t *UniversalTranslator) Import(format ImportExportFormat, dirnameOrFilename string) error {
-
-	fi, err := os.Stat(dirnameOrFilename)
-	if err != nil {
-		return err
-	}
-
-	processFn := func(filename string) error {
-
-		f, err := os.Open(filename)
-		if err != nil {
-			return err
-		}
-		defer f.Close()
-
-		return t.ImportByReader(format, f)
-	}
-
-	if !fi.IsDir() {
-		return processFn(dirnameOrFilename)
-	}
-
-	// recursively go through directory
-	walker := func(path string, info os.FileInfo, err error) error {
-
-		if info.IsDir() {
-			return nil
-		}
-
-		switch format {
-		case FormatJSON:
-			// skip non JSON files
-			if filepath.Ext(info.Name()) != ".json" {
-				return nil
-			}
-		}
-
-		return processFn(path)
-	}
-
-	return filepath.Walk(dirnameOrFilename, walker)
-}
-
-// ImportByReader imports the the translations found within the contents read from the supplied reader.
-//
-// NOTE: generally used when assets have been embedded into the binary and are already in memory.
-func (t *UniversalTranslator) ImportByReader(format ImportExportFormat, reader io.Reader) error {
-
-	b, err := ioutil.ReadAll(reader)
-	if err != nil {
-		return err
-	}
-
-	var trans []translation
-
-	switch format {
-	case FormatJSON:
-		err = json.Unmarshal(b, &trans)
-	}
-
-	if err != nil {
-		return err
-	}
-
-	for _, tl := range trans {
-
-		locale, found := t.FindTranslator(tl.Locale)
-		if !found {
-			return &ErrMissingLocale{locale: tl.Locale}
-		}
-
-		pr := stringToPR(tl.PluralRule)
-
-		if pr == locales.PluralRuleUnknown {
-
-			err = locale.Add(tl.Key, tl.Translation, tl.OverrideExisting)
-			if err != nil {
-				return err
-			}
-
-			continue
-		}
-
-		switch tl.PluralType {
-		case cardinalType:
-			err = locale.AddCardinal(tl.Key, tl.Translation, pr, tl.OverrideExisting)
-		case ordinalType:
-			err = locale.AddOrdinal(tl.Key, tl.Translation, pr, tl.OverrideExisting)
-		case rangeType:
-			err = locale.AddRange(tl.Key, tl.Translation, pr, tl.OverrideExisting)
-		default:
-			return &ErrBadPluralDefinition{tl: tl}
-		}
-
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func stringToPR(s string) locales.PluralRule {
-
-	switch s {
-	case "One":
-		return locales.PluralRuleOne
-	case "Two":
-		return locales.PluralRuleTwo
-	case "Few":
-		return locales.PluralRuleFew
-	case "Many":
-		return locales.PluralRuleMany
-	case "Other":
-		return locales.PluralRuleOther
-	default:
-		return locales.PluralRuleUnknown
-	}
-
-}
diff --git a/vendor/github.com/go-playground/universal-translator/translator.go b/vendor/github.com/go-playground/universal-translator/translator.go
deleted file mode 100644
index cfafce8a..00000000
--- a/vendor/github.com/go-playground/universal-translator/translator.go
+++ /dev/null
@@ -1,420 +0,0 @@
-package ut
-
-import (
-	"fmt"
-	"strconv"
-	"strings"
-
-	"github.com/go-playground/locales"
-)
-
-const (
-	paramZero          = "{0}"
-	paramOne           = "{1}"
-	unknownTranslation = ""
-)
-
-// Translator is universal translators
-// translator instance which is a thin wrapper
-// around locales.Translator instance providing
-// some extra functionality
-type Translator interface {
-	locales.Translator
-
-	// adds a normal translation for a particular language/locale
-	// {#} is the only replacement type accepted and are ad infinitum
-	// eg. one: '{0} day left' other: '{0} days left'
-	Add(key interface{}, text string, override bool) error
-
-	// adds a cardinal plural translation for a particular language/locale
-	// {0} is the only replacement type accepted and only one variable is accepted as
-	// multiple cannot be used for a plural rule determination, unless it is a range;
-	// see AddRange below.
-	// eg. in locale 'en' one: '{0} day left' other: '{0} days left'
-	AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error
-
-	// adds an ordinal plural translation for a particular language/locale
-	// {0} is the only replacement type accepted and only one variable is accepted as
-	// multiple cannot be used for a plural rule determination, unless it is a range;
-	// see AddRange below.
-	// eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring'
-	// - 1st, 2nd, 3rd...
-	AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error
-
-	// adds a range plural translation for a particular language/locale
-	// {0} and {1} are the only replacement types accepted and only these are accepted.
-	// eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left'
-	AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error
-
-	// creates the translation for the locale given the 'key' and params passed in
-	T(key interface{}, params ...string) (string, error)
-
-	// creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments
-	//  and param passed in
-	C(key interface{}, num float64, digits uint64, param string) (string, error)
-
-	// creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments
-	// and param passed in
-	O(key interface{}, num float64, digits uint64, param string) (string, error)
-
-	//  creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and
-	//  'digit2' arguments and 'param1' and 'param2' passed in
-	R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error)
-
-	// VerifyTranslations checks to ensures that no plural rules have been
-	// missed within the translations.
-	VerifyTranslations() error
-}
-
-var _ Translator = new(translator)
-var _ locales.Translator = new(translator)
-
-type translator struct {
-	locales.Translator
-	translations        map[interface{}]*transText
-	cardinalTanslations map[interface{}][]*transText // array index is mapped to locales.PluralRule index + the locales.PluralRuleUnknown
-	ordinalTanslations  map[interface{}][]*transText
-	rangeTanslations    map[interface{}][]*transText
-}
-
-type transText struct {
-	text    string
-	indexes []int
-}
-
-func newTranslator(trans locales.Translator) Translator {
-	return &translator{
-		Translator:          trans,
-		translations:        make(map[interface{}]*transText), // translation text broken up by byte index
-		cardinalTanslations: make(map[interface{}][]*transText),
-		ordinalTanslations:  make(map[interface{}][]*transText),
-		rangeTanslations:    make(map[interface{}][]*transText),
-	}
-}
-
-// Add adds a normal translation for a particular language/locale
-// {#} is the only replacement type accepted and are ad infinitum
-// eg. one: '{0} day left' other: '{0} days left'
-func (t *translator) Add(key interface{}, text string, override bool) error {
-
-	if _, ok := t.translations[key]; ok && !override {
-		return &ErrConflictingTranslation{locale: t.Locale(), key: key, text: text}
-	}
-
-	lb := strings.Count(text, "{")
-	rb := strings.Count(text, "}")
-
-	if lb != rb {
-		return &ErrMissingBracket{locale: t.Locale(), key: key, text: text}
-	}
-
-	trans := &transText{
-		text: text,
-	}
-
-	var idx int
-
-	for i := 0; i < lb; i++ {
-		s := "{" + strconv.Itoa(i) + "}"
-		idx = strings.Index(text, s)
-		if idx == -1 {
-			return &ErrBadParamSyntax{locale: t.Locale(), param: s, key: key, text: text}
-		}
-
-		trans.indexes = append(trans.indexes, idx)
-		trans.indexes = append(trans.indexes, idx+len(s))
-	}
-
-	t.translations[key] = trans
-
-	return nil
-}
-
-// AddCardinal adds a cardinal plural translation for a particular language/locale
-// {0} is the only replacement type accepted and only one variable is accepted as
-// multiple cannot be used for a plural rule determination, unless it is a range;
-// see AddRange below.
-// eg. in locale 'en' one: '{0} day left' other: '{0} days left'
-func (t *translator) AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error {
-
-	var verified bool
-
-	// verify plural rule exists for locale
-	for _, pr := range t.PluralsCardinal() {
-		if pr == rule {
-			verified = true
-			break
-		}
-	}
-
-	if !verified {
-		return &ErrCardinalTranslation{text: fmt.Sprintf("error: cardinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
-	}
-
-	tarr, ok := t.cardinalTanslations[key]
-	if ok {
-		// verify not adding a conflicting record
-		if len(tarr) > 0 && tarr[rule] != nil && !override {
-			return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
-		}
-
-	} else {
-		tarr = make([]*transText, 7, 7)
-		t.cardinalTanslations[key] = tarr
-	}
-
-	trans := &transText{
-		text:    text,
-		indexes: make([]int, 2, 2),
-	}
-
-	tarr[rule] = trans
-
-	idx := strings.Index(text, paramZero)
-	if idx == -1 {
-		tarr[rule] = nil
-		return &ErrCardinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddCardinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
-	}
-
-	trans.indexes[0] = idx
-	trans.indexes[1] = idx + len(paramZero)
-
-	return nil
-}
-
-// AddOrdinal adds an ordinal plural translation for a particular language/locale
-// {0} is the only replacement type accepted and only one variable is accepted as
-// multiple cannot be used for a plural rule determination, unless it is a range;
-// see AddRange below.
-// eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring' - 1st, 2nd, 3rd...
-func (t *translator) AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error {
-
-	var verified bool
-
-	// verify plural rule exists for locale
-	for _, pr := range t.PluralsOrdinal() {
-		if pr == rule {
-			verified = true
-			break
-		}
-	}
-
-	if !verified {
-		return &ErrOrdinalTranslation{text: fmt.Sprintf("error: ordinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
-	}
-
-	tarr, ok := t.ordinalTanslations[key]
-	if ok {
-		// verify not adding a conflicting record
-		if len(tarr) > 0 && tarr[rule] != nil && !override {
-			return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
-		}
-
-	} else {
-		tarr = make([]*transText, 7, 7)
-		t.ordinalTanslations[key] = tarr
-	}
-
-	trans := &transText{
-		text:    text,
-		indexes: make([]int, 2, 2),
-	}
-
-	tarr[rule] = trans
-
-	idx := strings.Index(text, paramZero)
-	if idx == -1 {
-		tarr[rule] = nil
-		return &ErrOrdinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddOrdinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
-	}
-
-	trans.indexes[0] = idx
-	trans.indexes[1] = idx + len(paramZero)
-
-	return nil
-}
-
-// AddRange adds a range plural translation for a particular language/locale
-// {0} and {1} are the only replacement types accepted and only these are accepted.
-// eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left'
-func (t *translator) AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error {
-
-	var verified bool
-
-	// verify plural rule exists for locale
-	for _, pr := range t.PluralsRange() {
-		if pr == rule {
-			verified = true
-			break
-		}
-	}
-
-	if !verified {
-		return &ErrRangeTranslation{text: fmt.Sprintf("error: range plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
-	}
-
-	tarr, ok := t.rangeTanslations[key]
-	if ok {
-		// verify not adding a conflicting record
-		if len(tarr) > 0 && tarr[rule] != nil && !override {
-			return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
-		}
-
-	} else {
-		tarr = make([]*transText, 7, 7)
-		t.rangeTanslations[key] = tarr
-	}
-
-	trans := &transText{
-		text:    text,
-		indexes: make([]int, 4, 4),
-	}
-
-	tarr[rule] = trans
-
-	idx := strings.Index(text, paramZero)
-	if idx == -1 {
-		tarr[rule] = nil
-		return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, are you sure you're adding a Range Translation? locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
-	}
-
-	trans.indexes[0] = idx
-	trans.indexes[1] = idx + len(paramZero)
-
-	idx = strings.Index(text, paramOne)
-	if idx == -1 {
-		tarr[rule] = nil
-		return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, a Range Translation requires two parameters. locale: '%s' key: '%v' text: '%s'", paramOne, t.Locale(), key, text)}
-	}
-
-	trans.indexes[2] = idx
-	trans.indexes[3] = idx + len(paramOne)
-
-	return nil
-}
-
-// T creates the translation for the locale given the 'key' and params passed in
-func (t *translator) T(key interface{}, params ...string) (string, error) {
-
-	trans, ok := t.translations[key]
-	if !ok {
-		return unknownTranslation, ErrUnknowTranslation
-	}
-
-	b := make([]byte, 0, 64)
-
-	var start, end, count int
-
-	for i := 0; i < len(trans.indexes); i++ {
-		end = trans.indexes[i]
-		b = append(b, trans.text[start:end]...)
-		b = append(b, params[count]...)
-		i++
-		start = trans.indexes[i]
-		count++
-	}
-
-	b = append(b, trans.text[start:]...)
-
-	return string(b), nil
-}
-
-// C creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in
-func (t *translator) C(key interface{}, num float64, digits uint64, param string) (string, error) {
-
-	tarr, ok := t.cardinalTanslations[key]
-	if !ok {
-		return unknownTranslation, ErrUnknowTranslation
-	}
-
-	rule := t.CardinalPluralRule(num, digits)
-
-	trans := tarr[rule]
-
-	b := make([]byte, 0, 64)
-	b = append(b, trans.text[:trans.indexes[0]]...)
-	b = append(b, param...)
-	b = append(b, trans.text[trans.indexes[1]:]...)
-
-	return string(b), nil
-}
-
-// O creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in
-func (t *translator) O(key interface{}, num float64, digits uint64, param string) (string, error) {
-
-	tarr, ok := t.ordinalTanslations[key]
-	if !ok {
-		return unknownTranslation, ErrUnknowTranslation
-	}
-
-	rule := t.OrdinalPluralRule(num, digits)
-
-	trans := tarr[rule]
-
-	b := make([]byte, 0, 64)
-	b = append(b, trans.text[:trans.indexes[0]]...)
-	b = append(b, param...)
-	b = append(b, trans.text[trans.indexes[1]:]...)
-
-	return string(b), nil
-}
-
-// R creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and 'digit2' arguments
-// and 'param1' and 'param2' passed in
-func (t *translator) R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error) {
-
-	tarr, ok := t.rangeTanslations[key]
-	if !ok {
-		return unknownTranslation, ErrUnknowTranslation
-	}
-
-	rule := t.RangePluralRule(num1, digits1, num2, digits2)
-
-	trans := tarr[rule]
-
-	b := make([]byte, 0, 64)
-	b = append(b, trans.text[:trans.indexes[0]]...)
-	b = append(b, param1...)
-	b = append(b, trans.text[trans.indexes[1]:trans.indexes[2]]...)
-	b = append(b, param2...)
-	b = append(b, trans.text[trans.indexes[3]:]...)
-
-	return string(b), nil
-}
-
-// VerifyTranslations checks to ensures that no plural rules have been
-// missed within the translations.
-func (t *translator) VerifyTranslations() error {
-
-	for k, v := range t.cardinalTanslations {
-
-		for _, rule := range t.PluralsCardinal() {
-
-			if v[rule] == nil {
-				return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "plural", rule: rule, key: k}
-			}
-		}
-	}
-
-	for k, v := range t.ordinalTanslations {
-
-		for _, rule := range t.PluralsOrdinal() {
-
-			if v[rule] == nil {
-				return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "ordinal", rule: rule, key: k}
-			}
-		}
-	}
-
-	for k, v := range t.rangeTanslations {
-
-		for _, rule := range t.PluralsRange() {
-
-			if v[rule] == nil {
-				return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "range", rule: rule, key: k}
-			}
-		}
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/go-playground/universal-translator/universal_translator.go b/vendor/github.com/go-playground/universal-translator/universal_translator.go
deleted file mode 100644
index dbf707f5..00000000
--- a/vendor/github.com/go-playground/universal-translator/universal_translator.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package ut
-
-import (
-	"strings"
-
-	"github.com/go-playground/locales"
-)
-
-// UniversalTranslator holds all locale & translation data
-type UniversalTranslator struct {
-	translators map[string]Translator
-	fallback    Translator
-}
-
-// New returns a new UniversalTranslator instance set with
-// the fallback locale and locales it should support
-func New(fallback locales.Translator, supportedLocales ...locales.Translator) *UniversalTranslator {
-
-	t := &UniversalTranslator{
-		translators: make(map[string]Translator),
-	}
-
-	for _, v := range supportedLocales {
-
-		trans := newTranslator(v)
-		t.translators[strings.ToLower(trans.Locale())] = trans
-
-		if fallback.Locale() == v.Locale() {
-			t.fallback = trans
-		}
-	}
-
-	if t.fallback == nil && fallback != nil {
-		t.fallback = newTranslator(fallback)
-	}
-
-	return t
-}
-
-// FindTranslator trys to find a Translator based on an array of locales
-// and returns the first one it can find, otherwise returns the
-// fallback translator.
-func (t *UniversalTranslator) FindTranslator(locales ...string) (trans Translator, found bool) {
-
-	for _, locale := range locales {
-
-		if trans, found = t.translators[strings.ToLower(locale)]; found {
-			return
-		}
-	}
-
-	return t.fallback, false
-}
-
-// GetTranslator returns the specified translator for the given locale,
-// or fallback if not found
-func (t *UniversalTranslator) GetTranslator(locale string) (trans Translator, found bool) {
-
-	if trans, found = t.translators[strings.ToLower(locale)]; found {
-		return
-	}
-
-	return t.fallback, false
-}
-
-// GetFallback returns the fallback locale
-func (t *UniversalTranslator) GetFallback() Translator {
-	return t.fallback
-}
-
-// AddTranslator adds the supplied translator, if it already exists the override param
-// will be checked and if false an error will be returned, otherwise the translator will be
-// overridden; if the fallback matches the supplied translator it will be overridden as well
-// NOTE: this is normally only used when translator is embedded within a library
-func (t *UniversalTranslator) AddTranslator(translator locales.Translator, override bool) error {
-
-	lc := strings.ToLower(translator.Locale())
-	_, ok := t.translators[lc]
-	if ok && !override {
-		return &ErrExistingTranslator{locale: translator.Locale()}
-	}
-
-	trans := newTranslator(translator)
-
-	if t.fallback.Locale() == translator.Locale() {
-
-		// because it's optional to have a fallback, I don't impose that limitation
-		// don't know why you wouldn't but...
-		if !override {
-			return &ErrExistingTranslator{locale: translator.Locale()}
-		}
-
-		t.fallback = trans
-	}
-
-	t.translators[lc] = trans
-
-	return nil
-}
-
-// VerifyTranslations runs through all locales and identifies any issues
-// eg. missing plural rules for a locale
-func (t *UniversalTranslator) VerifyTranslations() (err error) {
-
-	for _, trans := range t.translators {
-		err = trans.VerifyTranslations()
-		if err != nil {
-			return
-		}
-	}
-
-	return
-}
diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go
index 20981432..6ea38e63 100644
--- a/vendor/github.com/imdario/mergo/map.go
+++ b/vendor/github.com/imdario/mergo/map.go
@@ -31,8 +31,8 @@ func isExported(field reflect.StructField) bool {
 // Traverses recursively both values, assigning src's fields values to dst.
 // The map argument tracks comparisons that have already been seen, which allows
 // short circuiting on recursive types.
-func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *config) (err error) {
-	overwrite := config.overwrite
+func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
+	overwrite := config.Overwrite
 	if dst.CanAddr() {
 		addr := dst.UnsafeAddr()
 		h := 17 * addr
@@ -128,23 +128,23 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
 // doesn't apply if dst is a map.
 // This is separated method from Merge because it is cleaner and it keeps sane
 // semantics: merging equal types, mapping different (restricted) types.
-func Map(dst, src interface{}, opts ...func(*config)) error {
+func Map(dst, src interface{}, opts ...func(*Config)) error {
 	return _map(dst, src, opts...)
 }
 
-// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overriden by
+// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by
 // non-empty src attribute values.
 // Deprecated: Use Map(…) with WithOverride
-func MapWithOverwrite(dst, src interface{}, opts ...func(*config)) error {
+func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
 	return _map(dst, src, append(opts, WithOverride)...)
 }
 
-func _map(dst, src interface{}, opts ...func(*config)) error {
+func _map(dst, src interface{}, opts ...func(*Config)) error {
 	var (
 		vDst, vSrc reflect.Value
 		err        error
 	)
-	config := &config{}
+	config := &Config{}
 
 	for _, opt := range opts {
 		opt(config)
diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go
index 8ca10c91..f0e17924 100644
--- a/vendor/github.com/imdario/mergo/merge.go
+++ b/vendor/github.com/imdario/mergo/merge.go
@@ -8,7 +8,9 @@
 
 package mergo
 
-import "reflect"
+import (
+	"reflect"
+)
 
 func hasExportedField(dst reflect.Value) (exported bool) {
 	for i, n := 0, dst.NumField(); i < n; i++ {
@@ -22,20 +24,21 @@ func hasExportedField(dst reflect.Value) (exported bool) {
 	return
 }
 
-type config struct {
-	overwrite    bool
-	transformers transformers
+type Config struct {
+	Overwrite    bool
+	AppendSlice  bool
+	Transformers Transformers
 }
 
-type transformers interface {
+type Transformers interface {
 	Transformer(reflect.Type) func(dst, src reflect.Value) error
 }
 
 // Traverses recursively both values, assigning src's fields values to dst.
 // The map argument tracks comparisons that have already been seen, which allows
 // short circuiting on recursive types.
-func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *config) (err error) {
-	overwrite := config.overwrite
+func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
+	overwrite := config.Overwrite
 
 	if !src.IsValid() {
 		return
@@ -54,8 +57,8 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
 		visited[h] = &visit{addr, typ, seen}
 	}
 
-	if config.transformers != nil && !isEmptyValue(dst) {
-		if fn := config.transformers.Transformer(dst.Type()); fn != nil {
+	if config.Transformers != nil && !isEmptyValue(dst) {
+		if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
 			err = fn(dst, src)
 			return
 		}
@@ -75,9 +78,8 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
 			}
 		}
 	case reflect.Map:
-		if len(src.MapKeys()) == 0 && !src.IsNil() && len(dst.MapKeys()) == 0 {
+		if dst.IsNil() && !src.IsNil() {
 			dst.Set(reflect.MakeMap(dst.Type()))
-			return
 		}
 		for _, key := range src.MapKeys() {
 			srcElement := src.MapIndex(key)
@@ -86,7 +88,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
 			}
 			dstElement := dst.MapIndex(key)
 			switch srcElement.Kind() {
-			case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice:
+			case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
 				if srcElement.IsNil() {
 					continue
 				}
@@ -122,7 +124,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
 				continue
 			}
 
-			if !isEmptyValue(srcElement) && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) {
+			if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) {
 				if dst.IsNil() {
 					dst.Set(reflect.MakeMap(dst.Type()))
 				}
@@ -130,7 +132,14 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
 			}
 		}
 	case reflect.Slice:
-		dst.Set(reflect.AppendSlice(dst, src))
+		if !dst.CanSet() {
+			break
+		}
+		if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
+			dst.Set(src)
+		} else {
+			dst.Set(reflect.AppendSlice(dst, src))
+		}
 	case reflect.Ptr:
 		fallthrough
 	case reflect.Interface:
@@ -174,36 +183,41 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
 // src attributes if they themselves are not empty. dst and src must be valid same-type structs
 // and dst must be a pointer to struct.
 // It won't merge unexported (private) fields and will do recursively any exported field.
-func Merge(dst, src interface{}, opts ...func(*config)) error {
+func Merge(dst, src interface{}, opts ...func(*Config)) error {
 	return merge(dst, src, opts...)
 }
 
 // MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by
 // non-empty src attribute values.
 // Deprecated: use Merge(…) with WithOverride
-func MergeWithOverwrite(dst, src interface{}, opts ...func(*config)) error {
+func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
 	return merge(dst, src, append(opts, WithOverride)...)
 }
 
 // WithTransformers adds transformers to merge, allowing to customize the merging of some types.
-func WithTransformers(transformers transformers) func(*config) {
-	return func(config *config) {
-		config.transformers = transformers
+func WithTransformers(transformers Transformers) func(*Config) {
+	return func(config *Config) {
+		config.Transformers = transformers
 	}
 }
 
 // WithOverride will make merge override non-empty dst attributes with non-empty src attributes values.
-func WithOverride(config *config) {
-	config.overwrite = true
+func WithOverride(config *Config) {
+	config.Overwrite = true
+}
+
+// WithAppendSlice will make merge append slices instead of overwriting it
+func WithAppendSlice(config *Config) {
+	config.AppendSlice = true
 }
 
-func merge(dst, src interface{}, opts ...func(*config)) error {
+func merge(dst, src interface{}, opts ...func(*Config)) error {
 	var (
 		vDst, vSrc reflect.Value
 		err        error
 	)
 
-	config := &config{}
+	config := &Config{}
 
 	for _, opt := range opts {
 		opt(config)
diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go
index 694a1d03..a8183dd7 100644
--- a/vendor/github.com/rcrowley/go-metrics/ewma.go
+++ b/vendor/github.com/rcrowley/go-metrics/ewma.go
@@ -79,16 +79,15 @@ func (NilEWMA) Update(n int64) {}
 type StandardEWMA struct {
 	uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
 	alpha     float64
-	rate      float64
-	init      bool
+	rate      uint64
+	init      uint32
 	mutex     sync.Mutex
 }
 
 // Rate returns the moving average rate of events per second.
 func (a *StandardEWMA) Rate() float64 {
-	a.mutex.Lock()
-	defer a.mutex.Unlock()
-	return a.rate * float64(1e9)
+	currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate)) * float64(1e9)
+	return currentRate
 }
 
 // Snapshot returns a read-only copy of the EWMA.
@@ -99,17 +98,38 @@ func (a *StandardEWMA) Snapshot() EWMA {
 // Tick ticks the clock to update the moving average.  It assumes it is called
 // every five seconds.
 func (a *StandardEWMA) Tick() {
+	// Optimization to avoid mutex locking in the hot-path.
+	if atomic.LoadUint32(&a.init) == 1 {
+		a.updateRate(a.fetchInstantRate())
+	} else {
+		// Slow-path: this is only needed on the first Tick() and preserves transactional updating
+		// of init and rate in the else block. The first conditional is needed below because
+		// a different thread could have set a.init = 1 between the time of the first atomic load and when
+		// the lock was acquired.
+		a.mutex.Lock()
+		if atomic.LoadUint32(&a.init) == 1 {
+			// The fetchInstantRate() uses atomic loading, which is unecessary in this critical section
+			// but again, this section is only invoked on the first successful Tick() operation.
+			a.updateRate(a.fetchInstantRate())
+		} else {
+			atomic.StoreUint32(&a.init, 1)
+			atomic.StoreUint64(&a.rate, math.Float64bits(a.fetchInstantRate()))
+		}
+		a.mutex.Unlock()
+	}
+}
+
+func (a *StandardEWMA) fetchInstantRate() float64 {
 	count := atomic.LoadInt64(&a.uncounted)
 	atomic.AddInt64(&a.uncounted, -count)
 	instantRate := float64(count) / float64(5e9)
-	a.mutex.Lock()
-	defer a.mutex.Unlock()
-	if a.init {
-		a.rate += a.alpha * (instantRate - a.rate)
-	} else {
-		a.init = true
-		a.rate = instantRate
-	}
+	return instantRate
+}
+
+func (a *StandardEWMA) updateRate(instantRate float64) {
+	currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate))
+	currentRate += a.alpha * (instantRate - currentRate)
+	atomic.StoreUint64(&a.rate, math.Float64bits(currentRate))
 }
 
 // Update adds n uncounted events.
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
index 6f93920b..3962e6db 100644
--- a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
+++ b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
@@ -1,6 +1,9 @@
 package metrics
 
-import "sync"
+import (
+	"math"
+	"sync/atomic"
+)
 
 // GaugeFloat64s hold a float64 value that can be set arbitrarily.
 type GaugeFloat64 interface {
@@ -85,8 +88,7 @@ func (NilGaugeFloat64) Value() float64 { return 0.0 }
 // StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
 // sync.Mutex to manage a single float64 value.
 type StandardGaugeFloat64 struct {
-	mutex sync.Mutex
-	value float64
+	value uint64
 }
 
 // Snapshot returns a read-only copy of the gauge.
@@ -96,16 +98,12 @@ func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 {
 
 // Update updates the gauge's value.
 func (g *StandardGaugeFloat64) Update(v float64) {
-	g.mutex.Lock()
-	defer g.mutex.Unlock()
-	g.value = v
+	atomic.StoreUint64(&g.value, math.Float64bits(v))
 }
 
 // Value returns the gauge's current value.
 func (g *StandardGaugeFloat64) Value() float64 {
-	g.mutex.Lock()
-	defer g.mutex.Unlock()
-	return g.value
+	return math.Float64frombits(atomic.LoadUint64(&g.value))
 }
 
 // FunctionalGaugeFloat64 returns value from given function
diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go
index 53ff329b..7807406a 100644
--- a/vendor/github.com/rcrowley/go-metrics/meter.go
+++ b/vendor/github.com/rcrowley/go-metrics/meter.go
@@ -1,7 +1,9 @@
 package metrics
 
 import (
+	"math"
 	"sync"
+	"sync/atomic"
 	"time"
 )
 
@@ -62,7 +64,7 @@ func NewRegisteredMeter(name string, r Registry) Meter {
 // MeterSnapshot is a read-only copy of another Meter.
 type MeterSnapshot struct {
 	count                          int64
-	rate1, rate5, rate15, rateMean float64
+	rate1, rate5, rate15, rateMean uint64
 }
 
 // Count returns the count of events at the time the snapshot was taken.
@@ -75,19 +77,19 @@ func (*MeterSnapshot) Mark(n int64) {
 
 // Rate1 returns the one-minute moving average rate of events per second at the
 // time the snapshot was taken.
-func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
+func (m *MeterSnapshot) Rate1() float64 { return math.Float64frombits(m.rate1) }
 
 // Rate5 returns the five-minute moving average rate of events per second at
 // the time the snapshot was taken.
-func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
+func (m *MeterSnapshot) Rate5() float64 { return math.Float64frombits(m.rate5) }
 
 // Rate15 returns the fifteen-minute moving average rate of events per second
 // at the time the snapshot was taken.
-func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
+func (m *MeterSnapshot) Rate15() float64 { return math.Float64frombits(m.rate15) }
 
 // RateMean returns the meter's mean rate of events per second at the time the
 // snapshot was taken.
-func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
+func (m *MeterSnapshot) RateMean() float64 { return math.Float64frombits(m.rateMean) }
 
 // Snapshot returns the snapshot.
 func (m *MeterSnapshot) Snapshot() Meter { return m }
@@ -124,11 +126,12 @@ func (NilMeter) Stop() {}
 
 // StandardMeter is the standard implementation of a Meter.
 type StandardMeter struct {
-	lock        sync.RWMutex
+	// Only used on stop.
+	lock        sync.Mutex
 	snapshot    *MeterSnapshot
 	a1, a5, a15 EWMA
 	startTime   time.Time
-	stopped     bool
+	stopped     uint32
 }
 
 func newStandardMeter() *StandardMeter {
@@ -145,9 +148,9 @@ func newStandardMeter() *StandardMeter {
 func (m *StandardMeter) Stop() {
 	m.lock.Lock()
 	stopped := m.stopped
-	m.stopped = true
+	m.stopped = 1
 	m.lock.Unlock()
-	if !stopped {
+	if stopped != 1 {
 		arbiter.Lock()
 		delete(arbiter.meters, m)
 		arbiter.Unlock()
@@ -156,20 +159,17 @@ func (m *StandardMeter) Stop() {
 
 // Count returns the number of events recorded.
 func (m *StandardMeter) Count() int64 {
-	m.lock.RLock()
-	count := m.snapshot.count
-	m.lock.RUnlock()
-	return count
+	return atomic.LoadInt64(&m.snapshot.count)
 }
 
 // Mark records the occurance of n events.
 func (m *StandardMeter) Mark(n int64) {
-	m.lock.Lock()
-	defer m.lock.Unlock()
-	if m.stopped {
+	if atomic.LoadUint32(&m.stopped) == 1 {
 		return
 	}
-	m.snapshot.count += n
+
+	atomic.AddInt64(&m.snapshot.count, n)
+
 	m.a1.Update(n)
 	m.a5.Update(n)
 	m.a15.Update(n)
@@ -178,56 +178,49 @@ func (m *StandardMeter) Mark(n int64) {
 
 // Rate1 returns the one-minute moving average rate of events per second.
 func (m *StandardMeter) Rate1() float64 {
-	m.lock.RLock()
-	rate1 := m.snapshot.rate1
-	m.lock.RUnlock()
-	return rate1
+	return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate1))
 }
 
 // Rate5 returns the five-minute moving average rate of events per second.
 func (m *StandardMeter) Rate5() float64 {
-	m.lock.RLock()
-	rate5 := m.snapshot.rate5
-	m.lock.RUnlock()
-	return rate5
+	return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate5))
 }
 
 // Rate15 returns the fifteen-minute moving average rate of events per second.
 func (m *StandardMeter) Rate15() float64 {
-	m.lock.RLock()
-	rate15 := m.snapshot.rate15
-	m.lock.RUnlock()
-	return rate15
+	return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate15))
 }
 
 // RateMean returns the meter's mean rate of events per second.
 func (m *StandardMeter) RateMean() float64 {
-	m.lock.RLock()
-	rateMean := m.snapshot.rateMean
-	m.lock.RUnlock()
-	return rateMean
+	return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rateMean))
 }
 
 // Snapshot returns a read-only copy of the meter.
 func (m *StandardMeter) Snapshot() Meter {
-	m.lock.RLock()
-	snapshot := *m.snapshot
-	m.lock.RUnlock()
-	return &snapshot
+	copiedSnapshot := MeterSnapshot{
+		count:    atomic.LoadInt64(&m.snapshot.count),
+		rate1:    atomic.LoadUint64(&m.snapshot.rate1),
+		rate5:    atomic.LoadUint64(&m.snapshot.rate5),
+		rate15:   atomic.LoadUint64(&m.snapshot.rate15),
+		rateMean: atomic.LoadUint64(&m.snapshot.rateMean),
+	}
+	return &copiedSnapshot
 }
 
 func (m *StandardMeter) updateSnapshot() {
-	// should run with write lock held on m.lock
-	snapshot := m.snapshot
-	snapshot.rate1 = m.a1.Rate()
-	snapshot.rate5 = m.a5.Rate()
-	snapshot.rate15 = m.a15.Rate()
-	snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds()
+	rate1 := math.Float64bits(m.a1.Rate())
+	rate5 := math.Float64bits(m.a5.Rate())
+	rate15 := math.Float64bits(m.a15.Rate())
+	rateMean := math.Float64bits(float64(m.Count()) / time.Since(m.startTime).Seconds())
+
+	atomic.StoreUint64(&m.snapshot.rate1, rate1)
+	atomic.StoreUint64(&m.snapshot.rate5, rate5)
+	atomic.StoreUint64(&m.snapshot.rate15, rate15)
+	atomic.StoreUint64(&m.snapshot.rateMean, rateMean)
 }
 
 func (m *StandardMeter) tick() {
-	m.lock.Lock()
-	defer m.lock.Unlock()
 	m.a1.Tick()
 	m.a5.Tick()
 	m.a15.Tick()
diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go
index 6c0007b1..b3bab64e 100644
--- a/vendor/github.com/rcrowley/go-metrics/registry.go
+++ b/vendor/github.com/rcrowley/go-metrics/registry.go
@@ -54,7 +54,7 @@ type Registry interface {
 // of names to metrics.
 type StandardRegistry struct {
 	metrics map[string]interface{}
-	mutex   sync.Mutex
+	mutex   sync.RWMutex
 }
 
 // Create a new registry.
@@ -71,8 +71,8 @@ func (r *StandardRegistry) Each(f func(string, interface{})) {
 
 // Get the metric by the given name or nil if none is registered.
 func (r *StandardRegistry) Get(name string) interface{} {
-	r.mutex.Lock()
-	defer r.mutex.Unlock()
+	r.mutex.RLock()
+	defer r.mutex.RUnlock()
 	return r.metrics[name]
 }
 
@@ -81,6 +81,15 @@ func (r *StandardRegistry) Get(name string) interface{} {
 // The interface can be the metric to register if not found in registry,
 // or a function returning the metric for lazy instantiation.
 func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} {
+	// access the read lock first which should be re-entrant
+	r.mutex.RLock()
+	metric, ok := r.metrics[name]
+	r.mutex.RUnlock()
+	if ok {
+		return metric
+	}
+
+	// only take the write lock if we'll be modifying the metrics map
 	r.mutex.Lock()
 	defer r.mutex.Unlock()
 	if metric, ok := r.metrics[name]; ok {
@@ -103,8 +112,8 @@ func (r *StandardRegistry) Register(name string, i interface{}) error {
 
 // Run all registered healthchecks.
 func (r *StandardRegistry) RunHealthchecks() {
-	r.mutex.Lock()
-	defer r.mutex.Unlock()
+	r.mutex.RLock()
+	defer r.mutex.RUnlock()
 	for _, i := range r.metrics {
 		if h, ok := i.(Healthcheck); ok {
 			h.Check()
diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go
index 94a6ca27..a5d8a927 100644
--- a/vendor/github.com/spf13/cobra/args.go
+++ b/vendor/github.com/spf13/cobra/args.go
@@ -16,14 +16,14 @@ func legacyArgs(cmd *Command, args []string) error {
 		return nil
 	}
 
-	// root command with subcommands, do subcommand checking
+	// root command with subcommands, do subcommand checking.
 	if !cmd.HasParent() && len(args) > 0 {
 		return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0]))
 	}
 	return nil
 }
 
-// NoArgs returns an error if any args are included
+// NoArgs returns an error if any args are included.
 func NoArgs(cmd *Command, args []string) error {
 	if len(args) > 0 {
 		return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath())
@@ -31,7 +31,7 @@ func NoArgs(cmd *Command, args []string) error {
 	return nil
 }
 
-// OnlyValidArgs returns an error if any args are not in the list of ValidArgs
+// OnlyValidArgs returns an error if any args are not in the list of ValidArgs.
 func OnlyValidArgs(cmd *Command, args []string) error {
 	if len(cmd.ValidArgs) > 0 {
 		for _, v := range args {
@@ -43,21 +43,12 @@ func OnlyValidArgs(cmd *Command, args []string) error {
 	return nil
 }
 
-func stringInSlice(a string, list []string) bool {
-	for _, b := range list {
-		if b == a {
-			return true
-		}
-	}
-	return false
-}
-
-// ArbitraryArgs never returns an error
+// ArbitraryArgs never returns an error.
 func ArbitraryArgs(cmd *Command, args []string) error {
 	return nil
 }
 
-// MinimumNArgs returns an error if there is not at least N args
+// MinimumNArgs returns an error if there is not at least N args.
 func MinimumNArgs(n int) PositionalArgs {
 	return func(cmd *Command, args []string) error {
 		if len(args) < n {
@@ -67,7 +58,7 @@ func MinimumNArgs(n int) PositionalArgs {
 	}
 }
 
-// MaximumNArgs returns an error if there are more than N args
+// MaximumNArgs returns an error if there are more than N args.
 func MaximumNArgs(n int) PositionalArgs {
 	return func(cmd *Command, args []string) error {
 		if len(args) > n {
@@ -77,7 +68,7 @@ func MaximumNArgs(n int) PositionalArgs {
 	}
 }
 
-// ExactArgs returns an error if there are not exactly n args
+// ExactArgs returns an error if there are not exactly n args.
 func ExactArgs(n int) PositionalArgs {
 	return func(cmd *Command, args []string) error {
 		if len(args) != n {
@@ -87,7 +78,7 @@ func ExactArgs(n int) PositionalArgs {
 	}
 }
 
-// RangeArgs returns an error if the number of args is not within the expected range
+// RangeArgs returns an error if the number of args is not within the expected range.
 func RangeArgs(min int, max int) PositionalArgs {
 	return func(cmd *Command, args []string) error {
 		if len(args) < min || len(args) > max {
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
index c19fe7a0..291eae7d 100644
--- a/vendor/github.com/spf13/cobra/bash_completions.go
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -21,8 +21,8 @@ const (
 
 func writePreamble(buf *bytes.Buffer, name string) {
 	buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
-	buf.WriteString(`
-__debug()
+	buf.WriteString(fmt.Sprintf(`
+__%[1]s_debug()
 {
     if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
         echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
@@ -31,13 +31,13 @@ __debug()
 
 # Homebrew on Macs have version 1.3 of bash-completion which doesn't include
 # _init_completion. This is a very minimal version of that function.
-__my_init_completion()
+__%[1]s_init_completion()
 {
     COMPREPLY=()
     _get_comp_words_by_ref "$@" cur prev words cword
 }
 
-__index_of_word()
+__%[1]s_index_of_word()
 {
     local w word=$1
     shift
@@ -49,7 +49,7 @@ __index_of_word()
     index=-1
 }
 
-__contains_word()
+__%[1]s_contains_word()
 {
     local w word=$1; shift
     for w in "$@"; do
@@ -58,9 +58,9 @@ __contains_word()
     return 1
 }
 
-__handle_reply()
+__%[1]s_handle_reply()
 {
-    __debug "${FUNCNAME[0]}"
+    __%[1]s_debug "${FUNCNAME[0]}"
     case $cur in
         -*)
             if [[ $(type -t compopt) = "builtin" ]]; then
@@ -85,7 +85,7 @@ __handle_reply()
 
                 local index flag
                 flag="${cur%%=*}"
-                __index_of_word "${flag}" "${flags_with_completion[@]}"
+                __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}"
                 COMPREPLY=()
                 if [[ ${index} -ge 0 ]]; then
                     PREFIX=""
@@ -103,7 +103,7 @@ __handle_reply()
 
     # check if we are handling a flag with special work handling
     local index
-    __index_of_word "${prev}" "${flags_with_completion[@]}"
+    __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}"
     if [[ ${index} -ge 0 ]]; then
         ${flags_completion[${index}]}
         return
@@ -136,24 +136,30 @@ __handle_reply()
     if declare -F __ltrim_colon_completions >/dev/null; then
         __ltrim_colon_completions "$cur"
     fi
+
+    # If there is only 1 completion and it is a flag with an = it will be completed
+    # but we don't want a space after the =
+    if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then
+       compopt -o nospace
+    fi
 }
 
 # The arguments should be in the form "ext1|ext2|extn"
-__handle_filename_extension_flag()
+__%[1]s_handle_filename_extension_flag()
 {
     local ext="$1"
     _filedir "@(${ext})"
 }
 
-__handle_subdirs_in_dir_flag()
+__%[1]s_handle_subdirs_in_dir_flag()
 {
     local dir="$1"
     pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1
 }
 
-__handle_flag()
+__%[1]s_handle_flag()
 {
-    __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+    __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
 
     # if a command required a flag, and we found it, unset must_have_one_flag()
     local flagname=${words[c]}
@@ -164,27 +170,30 @@ __handle_flag()
         flagname=${flagname%%=*} # strip everything after the =
         flagname="${flagname}=" # but put the = back
     fi
-    __debug "${FUNCNAME[0]}: looking for ${flagname}"
-    if __contains_word "${flagname}" "${must_have_one_flag[@]}"; then
+    __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}"
+    if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then
         must_have_one_flag=()
     fi
 
     # if you set a flag which only applies to this command, don't show subcommands
-    if __contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then
+    if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then
       commands=()
     fi
 
     # keep flag value with flagname as flaghash
-    if [ -n "${flagvalue}" ] ; then
-        flaghash[${flagname}]=${flagvalue}
-    elif [ -n "${words[ $((c+1)) ]}" ] ; then
-        flaghash[${flagname}]=${words[ $((c+1)) ]}
-    else
-        flaghash[${flagname}]="true" # pad "true" for bool flag
+    # flaghash variable is an associative array which is only supported in bash > 3.
+    if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+        if [ -n "${flagvalue}" ] ; then
+            flaghash[${flagname}]=${flagvalue}
+        elif [ -n "${words[ $((c+1)) ]}" ] ; then
+            flaghash[${flagname}]=${words[ $((c+1)) ]}
+        else
+            flaghash[${flagname}]="true" # pad "true" for bool flag
+        fi
     fi
 
     # skip the argument to a two word flag
-    if __contains_word "${words[c]}" "${two_word_flags[@]}"; then
+    if __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
         c=$((c+1))
         # if we are looking for a flags value, don't show commands
         if [[ $c -eq $cword ]]; then
@@ -196,13 +205,13 @@ __handle_flag()
 
 }
 
-__handle_noun()
+__%[1]s_handle_noun()
 {
-    __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+    __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
 
-    if __contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
+    if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
         must_have_one_noun=()
-    elif __contains_word "${words[c]}" "${noun_aliases[@]}"; then
+    elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then
         must_have_one_noun=()
     fi
 
@@ -210,45 +219,45 @@ __handle_noun()
     c=$((c+1))
 }
 
-__handle_command()
+__%[1]s_handle_command()
 {
-    __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+    __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
 
     local next_command
     if [[ -n ${last_command} ]]; then
         next_command="_${last_command}_${words[c]//:/__}"
     else
         if [[ $c -eq 0 ]]; then
-            next_command="_$(basename "${words[c]//:/__}")"
+            next_command="_%[1]s_root_command"
         else
             next_command="_${words[c]//:/__}"
         fi
     fi
     c=$((c+1))
-    __debug "${FUNCNAME[0]}: looking for ${next_command}"
+    __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}"
     declare -F "$next_command" >/dev/null && $next_command
 }
 
-__handle_word()
+__%[1]s_handle_word()
 {
     if [[ $c -ge $cword ]]; then
-        __handle_reply
+        __%[1]s_handle_reply
         return
     fi
-    __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+    __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
     if [[ "${words[c]}" == -* ]]; then
-        __handle_flag
-    elif __contains_word "${words[c]}" "${commands[@]}"; then
-        __handle_command
-    elif [[ $c -eq 0 ]] && __contains_word "$(basename "${words[c]}")" "${commands[@]}"; then
-        __handle_command
+        __%[1]s_handle_flag
+    elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then
+        __%[1]s_handle_command
+    elif [[ $c -eq 0 ]]; then
+        __%[1]s_handle_command
     else
-        __handle_noun
+        __%[1]s_handle_noun
     fi
-    __handle_word
+    __%[1]s_handle_word
 }
 
-`)
+`, name))
 }
 
 func writePostscript(buf *bytes.Buffer, name string) {
@@ -260,7 +269,7 @@ func writePostscript(buf *bytes.Buffer, name string) {
     if declare -F _init_completion >/dev/null 2>&1; then
         _init_completion -s || return
     else
-        __my_init_completion -n "=" || return
+        __%[1]s_init_completion -n "=" || return
     fi
 
     local c=0
@@ -269,13 +278,13 @@ func writePostscript(buf *bytes.Buffer, name string) {
     local local_nonpersistent_flags=()
     local flags_with_completion=()
     local flags_completion=()
-    local commands=("%s")
+    local commands=("%[1]s")
     local must_have_one_flag=()
     local must_have_one_noun=()
     local last_command
     local nouns=()
 
-    __handle_word
+    __%[1]s_handle_word
 }
 
 `, name))
@@ -300,7 +309,7 @@ func writeCommands(buf *bytes.Buffer, cmd *Command) {
 	buf.WriteString("\n")
 }
 
-func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string) {
+func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) {
 	for key, value := range annotations {
 		switch key {
 		case BashCompFilenameExt:
@@ -308,7 +317,7 @@ func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]s
 
 			var ext string
 			if len(value) > 0 {
-				ext = "__handle_filename_extension_flag " + strings.Join(value, "|")
+				ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|")
 			} else {
 				ext = "_filedir"
 			}
@@ -326,7 +335,7 @@ func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]s
 
 			var ext string
 			if len(value) == 1 {
-				ext = "__handle_subdirs_in_dir_flag " + value[0]
+				ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0]
 			} else {
 				ext = "_filedir -d"
 			}
@@ -335,7 +344,7 @@ func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]s
 	}
 }
 
-func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag) {
+func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
 	name := flag.Shorthand
 	format := "    "
 	if len(flag.NoOptDefVal) == 0 {
@@ -343,10 +352,10 @@ func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag) {
 	}
 	format += "flags+=(\"-%s\")\n"
 	buf.WriteString(fmt.Sprintf(format, name))
-	writeFlagHandler(buf, "-"+name, flag.Annotations)
+	writeFlagHandler(buf, "-"+name, flag.Annotations, cmd)
 }
 
-func writeFlag(buf *bytes.Buffer, flag *pflag.Flag) {
+func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
 	name := flag.Name
 	format := "    flags+=(\"--%s"
 	if len(flag.NoOptDefVal) == 0 {
@@ -354,7 +363,7 @@ func writeFlag(buf *bytes.Buffer, flag *pflag.Flag) {
 	}
 	format += "\")\n"
 	buf.WriteString(fmt.Sprintf(format, name))
-	writeFlagHandler(buf, "--"+name, flag.Annotations)
+	writeFlagHandler(buf, "--"+name, flag.Annotations, cmd)
 }
 
 func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) {
@@ -380,9 +389,9 @@ func writeFlags(buf *bytes.Buffer, cmd *Command) {
 		if nonCompletableFlag(flag) {
 			return
 		}
-		writeFlag(buf, flag)
+		writeFlag(buf, flag, cmd)
 		if len(flag.Shorthand) > 0 {
-			writeShortFlag(buf, flag)
+			writeShortFlag(buf, flag, cmd)
 		}
 		if localNonPersistentFlags.Lookup(flag.Name) != nil {
 			writeLocalNonPersistentFlag(buf, flag)
@@ -392,9 +401,9 @@ func writeFlags(buf *bytes.Buffer, cmd *Command) {
 		if nonCompletableFlag(flag) {
 			return
 		}
-		writeFlag(buf, flag)
+		writeFlag(buf, flag, cmd)
 		if len(flag.Shorthand) > 0 {
-			writeShortFlag(buf, flag)
+			writeShortFlag(buf, flag, cmd)
 		}
 	})
 
@@ -452,7 +461,13 @@ func gen(buf *bytes.Buffer, cmd *Command) {
 	commandName := cmd.CommandPath()
 	commandName = strings.Replace(commandName, " ", "_", -1)
 	commandName = strings.Replace(commandName, ":", "__", -1)
-	buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName))
+
+	if cmd.Root() == cmd {
+		buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName))
+	} else {
+		buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName))
+	}
+
 	buf.WriteString(fmt.Sprintf("    last_command=%q\n", commandName))
 	writeCommands(buf, cmd)
 	writeFlags(buf, cmd)
@@ -491,17 +506,20 @@ func (c *Command) GenBashCompletionFile(filename string) error {
 	return c.GenBashCompletion(outFile)
 }
 
-// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag, if it exists.
+// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
+// and causes your command to report an error if invoked without the flag.
 func (c *Command) MarkFlagRequired(name string) error {
 	return MarkFlagRequired(c.Flags(), name)
 }
 
-// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag, if it exists.
+// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists,
+// and causes your command to report an error if invoked without the flag.
 func (c *Command) MarkPersistentFlagRequired(name string) error {
 	return MarkFlagRequired(c.PersistentFlags(), name)
 }
 
-// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag in the flag set, if it exists.
+// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
+// and causes your command to report an error if invoked without the flag.
 func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
 	return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"})
 }
diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go
index 8928cefc..7010fd15 100644
--- a/vendor/github.com/spf13/cobra/cobra.go
+++ b/vendor/github.com/spf13/cobra/cobra.go
@@ -70,7 +70,8 @@ func AddTemplateFuncs(tmplFuncs template.FuncMap) {
 	}
 }
 
-// OnInitialize takes a series of func() arguments and appends them to a slice of func().
+// OnInitialize sets the passed functions to be run when each command's
+// Execute method is called.
 func OnInitialize(y ...func()) {
 	initializers = append(initializers, y...)
 }
@@ -188,3 +189,12 @@ func ld(s, t string, ignoreCase bool) int {
 	}
 	return d[len(s)][len(t)]
 }
+
+func stringInSlice(a string, list []string) bool {
+	for _, b := range list {
+		if b == a {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
index 58e6ceb0..15b81127 100644
--- a/vendor/github.com/spf13/cobra/command.go
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -75,6 +75,11 @@ type Command struct {
 	// group commands.
 	Annotations map[string]string
 
+	// Version defines the version for this command. If this value is non-empty and the command does not
+	// define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
+	// will print content of the "Version" variable.
+	Version string
+
 	// The *Run functions are executed in the following order:
 	//   * PersistentPreRun()
 	//   * PreRun()
@@ -118,6 +123,10 @@ type Command struct {
 	// will be printed by generating docs for this command.
 	DisableAutoGenTag bool
 
+	// DisableFlagsInUseLine will disable the addition of [flags] to the usage
+	// line of a command when printing help or generating docs
+	DisableFlagsInUseLine bool
+
 	// DisableSuggestions disables the suggestions based on Levenshtein distance
 	// that go along with 'unknown command' messages.
 	DisableSuggestions bool
@@ -138,6 +147,11 @@ type Command struct {
 	commandsMaxNameLen        int
 	// commandsAreSorted defines, if command slice are sorted or not.
 	commandsAreSorted bool
+	// commandCalledAs is the name or alias value used to call this command.
+	commandCalledAs struct {
+		name   string
+		called bool
+	}
 
 	// args is actual args parsed from flags.
 	args []string
@@ -173,6 +187,8 @@ type Command struct {
 	// helpCommand is command with usage 'help'. If it's not defined by user,
 	// cobra uses default help command.
 	helpCommand *Command
+	// versionTemplate is the version template defined by user.
+	versionTemplate string
 }
 
 // SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
@@ -218,6 +234,11 @@ func (c *Command) SetHelpTemplate(s string) {
 	c.helpTemplate = s
 }
 
+// SetVersionTemplate sets version template to be used. Application can use it to set custom template.
+func (c *Command) SetVersionTemplate(s string) {
+	c.versionTemplate = s
+}
+
 // SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands.
 // The user should not have a cyclic dependency on commands.
 func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) {
@@ -407,6 +428,19 @@ func (c *Command) HelpTemplate() string {
 {{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
 }
 
+// VersionTemplate return version template for the command.
+func (c *Command) VersionTemplate() string {
+	if c.versionTemplate != "" {
+		return c.versionTemplate
+	}
+
+	if c.HasParent() {
+		return c.parent.VersionTemplate()
+	}
+	return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}}
+`
+}
+
 func hasNoOptDefVal(name string, fs *flag.FlagSet) bool {
 	flag := fs.Lookup(name)
 	if flag == nil {
@@ -441,6 +475,9 @@ Loop:
 		s := args[0]
 		args = args[1:]
 		switch {
+		case s == "--":
+			// "--" terminates the flags
+			break Loop
 		case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags):
 			// If '--flag arg' then
 			// delete arg from args.
@@ -528,6 +565,7 @@ func (c *Command) findNext(next string) *Command {
 	matches := make([]*Command, 0)
 	for _, cmd := range c.commands {
 		if cmd.Name() == next || cmd.HasAlias(next) {
+			cmd.commandCalledAs.name = next
 			return cmd
 		}
 		if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) {
@@ -538,6 +576,7 @@ func (c *Command) findNext(next string) *Command {
 	if len(matches) == 1 {
 		return matches[0]
 	}
+
 	return nil
 }
 
@@ -621,10 +660,8 @@ func (c *Command) Root() *Command {
 	return c
 }
 
-// ArgsLenAtDash will return the length of f.Args at the moment when a -- was
-// found during arg parsing. This allows your program to know which args were
-// before the -- and which came after. (Description from
-// https://godoc.org/github.com/spf13/pflag#FlagSet.ArgsLenAtDash).
+// ArgsLenAtDash will return the length of c.Flags().Args at the moment
+// when a -- was found during args parsing.
 func (c *Command) ArgsLenAtDash() int {
 	return c.Flags().ArgsLenAtDash()
 }
@@ -638,9 +675,10 @@ func (c *Command) execute(a []string) (err error) {
 		c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated)
 	}
 
-	// initialize help flag as the last point possible to allow for user
+	// initialize help and version flag at the last point possible to allow for user
 	// overriding
 	c.InitDefaultHelpFlag()
+	c.InitDefaultVersionFlag()
 
 	err = c.ParseFlags(a)
 	if err != nil {
@@ -657,7 +695,27 @@ func (c *Command) execute(a []string) (err error) {
 		return err
 	}
 
-	if helpVal || !c.Runnable() {
+	if helpVal {
+		return flag.ErrHelp
+	}
+
+	// for back-compat, only add version flag behavior if version is defined
+	if c.Version != "" {
+		versionVal, err := c.Flags().GetBool("version")
+		if err != nil {
+			c.Println("\"version\" flag declared as non-bool. Please correct your code")
+			return err
+		}
+		if versionVal {
+			err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c)
+			if err != nil {
+				c.Println(err)
+			}
+			return err
+		}
+	}
+
+	if !c.Runnable() {
 		return flag.ErrHelp
 	}
 
@@ -780,6 +838,11 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
 		return c, err
 	}
 
+	cmd.commandCalledAs.called = true
+	if cmd.commandCalledAs.name == "" {
+		cmd.commandCalledAs.name = cmd.Name()
+	}
+
 	err = cmd.execute(flags)
 	if err != nil {
 		// Always show help if requested, even if SilenceErrors is in
@@ -825,7 +888,7 @@ func (c *Command) validateRequiredFlags() error {
 	})
 
 	if len(missingFlagNames) > 0 {
-		return fmt.Errorf(`Required flag(s) "%s" have/has not been set`, strings.Join(missingFlagNames, `", "`))
+		return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`))
 	}
 	return nil
 }
@@ -846,6 +909,27 @@ func (c *Command) InitDefaultHelpFlag() {
 	}
 }
 
+// InitDefaultVersionFlag adds default version flag to c.
+// It is called automatically by executing the c.
+// If c already has a version flag, it will do nothing.
+// If c.Version is empty, it will do nothing.
+func (c *Command) InitDefaultVersionFlag() {
+	if c.Version == "" {
+		return
+	}
+
+	c.mergePersistentFlags()
+	if c.Flags().Lookup("version") == nil {
+		usage := "version for "
+		if c.Name() == "" {
+			usage += "this command"
+		} else {
+			usage += c.Name()
+		}
+		c.Flags().Bool("version", false, usage)
+	}
+}
+
 // InitDefaultHelpCmd adds default help command to c.
 // It is called automatically by executing the c or by calling help and usage.
 // If c already has help command or c has no subcommands, it will do nothing.
@@ -877,7 +961,7 @@ Simply type ` + c.Name() + ` help [path to command] for full details.`,
 	c.AddCommand(c.helpCommand)
 }
 
-// ResetCommands used for testing.
+// ResetCommands delete parent, subcommand and help command from c.
 func (c *Command) ResetCommands() {
 	c.parent = nil
 	c.commands = nil
@@ -996,6 +1080,9 @@ func (c *Command) UseLine() string {
 	} else {
 		useline = c.Use
 	}
+	if c.DisableFlagsInUseLine {
+		return useline
+	}
 	if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") {
 		useline += " [flags]"
 	}
@@ -1063,14 +1150,25 @@ func (c *Command) HasAlias(s string) bool {
 	return false
 }
 
+// CalledAs returns the command name or alias that was used to invoke
+// this command or an empty string if the command has not been called.
+func (c *Command) CalledAs() string {
+	if c.commandCalledAs.called {
+		return c.commandCalledAs.name
+	}
+	return ""
+}
+
 // hasNameOrAliasPrefix returns true if the Name or any of aliases start
 // with prefix
 func (c *Command) hasNameOrAliasPrefix(prefix string) bool {
 	if strings.HasPrefix(c.Name(), prefix) {
+		c.commandCalledAs.name = c.Name()
 		return true
 	}
 	for _, alias := range c.Aliases {
 		if strings.HasPrefix(alias, prefix) {
+			c.commandCalledAs.name = alias
 			return true
 		}
 	}
@@ -1163,7 +1261,7 @@ func (c *Command) HasAvailableSubCommands() bool {
 		}
 	}
 
-	// the command either has no sub comamnds, or no available (non deprecated/help/hidden)
+	// the command either has no sub commands, or no available (non deprecated/help/hidden)
 	// sub commands
 	return false
 }
@@ -1173,7 +1271,7 @@ func (c *Command) HasParent() bool {
 	return c.parent != nil
 }
 
-// GlobalNormalizationFunc returns the global normalization function or nil if doesn't exists.
+// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist.
 func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName {
 	return c.globNormFunc
 }
@@ -1273,7 +1371,7 @@ func (c *Command) PersistentFlags() *flag.FlagSet {
 	return c.pflags
 }
 
-// ResetFlags is used in testing.
+// ResetFlags deletes all flags from command.
 func (c *Command) ResetFlags() {
 	c.flagErrorBuf = new(bytes.Buffer)
 	c.flagErrorBuf.Reset()
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db.go b/vendor/github.com/syndtr/goleveldb/leveldb/db.go
index ea5595eb..3655418a 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/db.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db.go
@@ -906,6 +906,8 @@ func (db *DB) GetSnapshot() (*Snapshot, error) {
 //		Returns the number of files at level 'n'.
 //	leveldb.stats
 //		Returns statistics of the underlying DB.
+//	leveldb.iostats
+//		Returns statistics of effective disk read and write.
 //	leveldb.writedelay
 //		Returns cumulative write delay caused by compaction.
 //	leveldb.sstables
@@ -959,6 +961,10 @@ func (db *DB) GetProperty(name string) (value string, err error) {
 				level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(),
 				float64(read)/1048576.0, float64(write)/1048576.0)
 		}
+	case p == "iostats":
+		value = fmt.Sprintf("Read(MB):%.5f Write(MB):%.5f",
+			float64(db.s.stor.reads())/1048576.0,
+			float64(db.s.stor.writes())/1048576.0)
 	case p == "writedelay":
 		writeDelayN, writeDelay := atomic.LoadInt32(&db.cWriteDelayN), time.Duration(atomic.LoadInt64(&db.cWriteDelay))
 		value = fmt.Sprintf("DelayN:%d Delay:%s", writeDelayN, writeDelay)
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session.go b/vendor/github.com/syndtr/goleveldb/leveldb/session.go
index ad68a870..3f391f93 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/session.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/session.go
@@ -42,7 +42,7 @@ type session struct {
 	stTempFileNum    int64
 	stSeqNum         uint64 // last mem compacted seq; need external synchronization
 
-	stor     storage.Storage
+	stor     *iStorage
 	storLock storage.Locker
 	o        *cachedOptions
 	icmp     *iComparer
@@ -68,7 +68,7 @@ func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
 		return
 	}
 	s = &session{
-		stor:     stor,
+		stor:     newIStorage(stor),
 		storLock: storLock,
 		fileRef:  make(map[int64]int),
 	}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage.go
new file mode 100644
index 00000000..d45fb5df
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage.go
@@ -0,0 +1,63 @@
+package leveldb
+
+import (
+	"github.com/syndtr/goleveldb/leveldb/storage"
+	"sync/atomic"
+)
+
+type iStorage struct {
+	storage.Storage
+	read  uint64
+	write uint64
+}
+
+func (c *iStorage) Open(fd storage.FileDesc) (storage.Reader, error) {
+	r, err := c.Storage.Open(fd)
+	return &iStorageReader{r, c}, err
+}
+
+func (c *iStorage) Create(fd storage.FileDesc) (storage.Writer, error) {
+	w, err := c.Storage.Create(fd)
+	return &iStorageWriter{w, c}, err
+}
+
+func (c *iStorage) reads() uint64 {
+	return atomic.LoadUint64(&c.read)
+}
+
+func (c *iStorage) writes() uint64 {
+	return atomic.LoadUint64(&c.write)
+}
+
+// newIStorage returns the given storage wrapped by iStorage.
+func newIStorage(s storage.Storage) *iStorage {
+	return &iStorage{s, 0, 0}
+}
+
+type iStorageReader struct {
+	storage.Reader
+	c *iStorage
+}
+
+func (r *iStorageReader) Read(p []byte) (n int, err error) {
+	n, err = r.Reader.Read(p)
+	atomic.AddUint64(&r.c.read, uint64(n))
+	return n, err
+}
+
+func (r *iStorageReader) ReadAt(p []byte, off int64) (n int, err error) {
+	n, err = r.Reader.ReadAt(p, off)
+	atomic.AddUint64(&r.c.read, uint64(n))
+	return n, err
+}
+
+type iStorageWriter struct {
+	storage.Writer
+	c *iStorage
+}
+
+func (w *iStorageWriter) Write(p []byte) (n int, err error) {
+	n, err = w.Writer.Write(p)
+	atomic.AddUint64(&w.c.write, uint64(n))
+	return n, err
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go
index bab62bfc..b8297980 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go
@@ -8,7 +8,6 @@ package storage
 
 import (
 	"os"
-	"path/filepath"
 )
 
 type plan9FileLock struct {
@@ -48,8 +47,7 @@ func rename(oldpath, newpath string) error {
 		}
 	}
 
-	_, fname := filepath.Split(newpath)
-	return os.Rename(oldpath, fname)
+	return os.Rename(oldpath, newpath)
 }
 
 func syncDir(name string) error {
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
index 9b0421f0..838f1bee 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
@@ -12,7 +12,11 @@ import (
 	"sync"
 )
 
-const typeShift = 3
+const typeShift = 4
+
+// Verify at compile-time that typeShift is large enough to cover all FileType
+// values by confirming that 0 == 0.
+var _ [0]struct{} = [TypeAll >> typeShift]struct{}{}
 
 type memStorageLock struct {
 	ms *memStorage
@@ -143,7 +147,7 @@ func (ms *memStorage) Remove(fd FileDesc) error {
 }
 
 func (ms *memStorage) Rename(oldfd, newfd FileDesc) error {
-	if FileDescOk(oldfd) || FileDescOk(newfd) {
+	if !FileDescOk(oldfd) || !FileDescOk(newfd) {
 		return ErrInvalidFile
 	}
 	if oldfd == newfd {
diff --git a/vendor/github.com/tendermint/abci/client/grpc_client.go b/vendor/github.com/tendermint/abci/client/grpc_client.go
index f277e1d7..0f405a9c 100644
--- a/vendor/github.com/tendermint/abci/client/grpc_client.go
+++ b/vendor/github.com/tendermint/abci/client/grpc_client.go
@@ -6,7 +6,6 @@ import (
 	"sync"
 	"time"
 
-	"github.com/pkg/errors"
 	context "golang.org/x/net/context"
 	grpc "google.golang.org/grpc"
 
@@ -105,7 +104,7 @@ func (cli *grpcClient) StopForError(err error) {
 func (cli *grpcClient) Error() error {
 	cli.mtx.Lock()
 	defer cli.mtx.Unlock()
-	return errors.Wrap(cli.err, "grpc client error")
+	return cli.err
 }
 
 // Set listener for all responses
diff --git a/vendor/github.com/tendermint/abci/client/socket_client.go b/vendor/github.com/tendermint/abci/client/socket_client.go
index ecdc3694..5c010168 100644
--- a/vendor/github.com/tendermint/abci/client/socket_client.go
+++ b/vendor/github.com/tendermint/abci/client/socket_client.go
@@ -3,13 +3,13 @@ package abcicli
 import (
 	"bufio"
 	"container/list"
+	"errors"
 	"fmt"
 	"net"
 	"reflect"
 	"sync"
 	"time"
 
-	"github.com/pkg/errors"
 	"github.com/tendermint/abci/types"
 	cmn "github.com/tendermint/tmlibs/common"
 )
@@ -111,7 +111,7 @@ func (cli *socketClient) StopForError(err error) {
 func (cli *socketClient) Error() error {
 	cli.mtx.Lock()
 	defer cli.mtx.Unlock()
-	return errors.Wrap(cli.err, "socket client error")
+	return cli.err
 }
 
 // Set listener for all responses
@@ -135,7 +135,7 @@ func (cli *socketClient) sendRequestsRoutine(conn net.Conn) {
 			default:
 				// Probably will fill the buffer, or retry later.
 			}
-		case <-cli.BaseService.Quit:
+		case <-cli.Quit():
 			return
 		case reqres := <-cli.reqQueue:
 			cli.willSendReq(reqres)
diff --git a/vendor/github.com/tendermint/abci/example/code/code.go b/vendor/github.com/tendermint/abci/example/code/code.go
index b7e37d36..94e9d015 100644
--- a/vendor/github.com/tendermint/abci/example/code/code.go
+++ b/vendor/github.com/tendermint/abci/example/code/code.go
@@ -6,6 +6,4 @@ const (
 	CodeTypeEncodingError uint32 = 1
 	CodeTypeBadNonce      uint32 = 2
 	CodeTypeUnauthorized  uint32 = 3
-
-	CodeTypeBadOption uint32 = 101
 )
diff --git a/vendor/github.com/tendermint/abci/example/dummy/dummy.go b/vendor/github.com/tendermint/abci/example/dummy/dummy.go
deleted file mode 100644
index fdb4851c..00000000
--- a/vendor/github.com/tendermint/abci/example/dummy/dummy.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package dummy
-
-import (
-	"bytes"
-	"fmt"
-
-	"github.com/tendermint/abci/example/code"
-	"github.com/tendermint/abci/types"
-	wire "github.com/tendermint/go-wire"
-	"github.com/tendermint/iavl"
-	dbm "github.com/tendermint/tmlibs/db"
-)
-
-var _ types.Application = (*DummyApplication)(nil)
-
-type DummyApplication struct {
-	types.BaseApplication
-
-	state *iavl.VersionedTree
-}
-
-func NewDummyApplication() *DummyApplication {
-	state := iavl.NewVersionedTree(0, dbm.NewMemDB())
-	return &DummyApplication{state: state}
-}
-
-func (app *DummyApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
-	return types.ResponseInfo{Data: fmt.Sprintf("{\"size\":%v}", app.state.Size())}
-}
-
-// tx is either "key=value" or just arbitrary bytes
-func (app *DummyApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
-	var key, value []byte
-	parts := bytes.Split(tx, []byte("="))
-	if len(parts) == 2 {
-		key, value = parts[0], parts[1]
-	} else {
-		key, value = tx, tx
-	}
-	app.state.Set(key, value)
-
-	tags := []*types.KVPair{
-		{Key: "app.creator", ValueType: types.KVPair_STRING, ValueString: "jae"},
-		{Key: "app.key", ValueType: types.KVPair_STRING, ValueString: string(key)},
-	}
-	return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags}
-}
-
-func (app *DummyApplication) CheckTx(tx []byte) types.ResponseCheckTx {
-	return types.ResponseCheckTx{Code: code.CodeTypeOK}
-}
-
-func (app *DummyApplication) Commit() types.ResponseCommit {
-	// Save a new version
-	var hash []byte
-	var err error
-
-	if app.state.Size() > 0 {
-		// just add one more to height (kind of arbitrarily stupid)
-		height := app.state.LatestVersion() + 1
-		hash, err = app.state.SaveVersion(height)
-		if err != nil {
-			// if this wasn't a dummy app, we'd do something smarter
-			panic(err)
-		}
-	}
-
-	return types.ResponseCommit{Code: code.CodeTypeOK, Data: hash}
-}
-
-func (app *DummyApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
-	if reqQuery.Prove {
-		value, proof, err := app.state.GetWithProof(reqQuery.Data)
-		// if this wasn't a dummy app, we'd do something smarter
-		if err != nil {
-			panic(err)
-		}
-		resQuery.Index = -1 // TODO make Proof return index
-		resQuery.Key = reqQuery.Data
-		resQuery.Value = value
-		resQuery.Proof = wire.BinaryBytes(proof)
-		if value != nil {
-			resQuery.Log = "exists"
-		} else {
-			resQuery.Log = "does not exist"
-		}
-		return
-	} else {
-		index, value := app.state.Get(reqQuery.Data)
-		resQuery.Index = int64(index)
-		resQuery.Value = value
-		if value != nil {
-			resQuery.Log = "exists"
-		} else {
-			resQuery.Log = "does not exist"
-		}
-		return
-	}
-}
diff --git a/vendor/github.com/tendermint/abci/example/dummy/helpers.go b/vendor/github.com/tendermint/abci/example/kvstore/helpers.go
similarity index 56%
rename from vendor/github.com/tendermint/abci/example/dummy/helpers.go
rename to vendor/github.com/tendermint/abci/example/kvstore/helpers.go
index d6b4338c..c71e371a 100644
--- a/vendor/github.com/tendermint/abci/example/dummy/helpers.go
+++ b/vendor/github.com/tendermint/abci/example/kvstore/helpers.go
@@ -1,34 +1,36 @@
-package dummy
+package kvstore
 
 import (
 	"github.com/tendermint/abci/types"
-	crypto "github.com/tendermint/go-crypto"
 	cmn "github.com/tendermint/tmlibs/common"
 )
 
 // RandVal creates one random validator, with a key derived
 // from the input value
-func RandVal(i int) *types.Validator {
-	pubkey := crypto.GenPrivKeyEd25519FromSecret([]byte(cmn.Fmt("test%d", i))).PubKey().Bytes()
+func RandVal(i int) types.Validator {
+	pubkey := cmn.RandBytes(33)
 	power := cmn.RandUint16() + 1
-	return &types.Validator{pubkey, int64(power)}
+	return types.Validator{pubkey, int64(power)}
 }
 
 // RandVals returns a list of cnt validators for initializing
 // the application. Note that the keys are deterministically
 // derived from the index in the array, while the power is
 // random (Change this if not desired)
-func RandVals(cnt int) []*types.Validator {
-	res := make([]*types.Validator, cnt)
+func RandVals(cnt int) []types.Validator {
+	res := make([]types.Validator, cnt)
 	for i := 0; i < cnt; i++ {
 		res[i] = RandVal(i)
 	}
 	return res
 }
 
-// InitDummy initializes the dummy app with some data,
+// InitKVStore initializes the kvstore app with some data,
 // which allows tests to pass and is fine as long as you
 // don't make any tx that modify the validator state
-func InitDummy(app *PersistentDummyApplication) {
-	app.InitChain(types.RequestInitChain{RandVals(1)})
+func InitKVStore(app *PersistentKVStoreApplication) {
+	app.InitChain(types.RequestInitChain{
+		Validators:    RandVals(1),
+		AppStateBytes: []byte("[]"),
+	})
 }
diff --git a/vendor/github.com/tendermint/abci/example/kvstore/kvstore.go b/vendor/github.com/tendermint/abci/example/kvstore/kvstore.go
new file mode 100644
index 00000000..4ccbc56b
--- /dev/null
+++ b/vendor/github.com/tendermint/abci/example/kvstore/kvstore.go
@@ -0,0 +1,126 @@
+package kvstore
+
+import (
+	"bytes"
+	"encoding/binary"
+	"encoding/json"
+	"fmt"
+
+	"github.com/tendermint/abci/example/code"
+	"github.com/tendermint/abci/types"
+	cmn "github.com/tendermint/tmlibs/common"
+	dbm "github.com/tendermint/tmlibs/db"
+)
+
+var (
+	stateKey        = []byte("stateKey")
+	kvPairPrefixKey = []byte("kvPairKey:")
+)
+
+type State struct {
+	db      dbm.DB
+	Size    int64  `json:"size"`
+	Height  int64  `json:"height"`
+	AppHash []byte `json:"app_hash"`
+}
+
+func loadState(db dbm.DB) State {
+	stateBytes := db.Get(stateKey)
+	var state State
+	if len(stateBytes) != 0 {
+		err := json.Unmarshal(stateBytes, &state)
+		if err != nil {
+			panic(err)
+		}
+	}
+	state.db = db
+	return state
+}
+
+func saveState(state State) {
+	stateBytes, err := json.Marshal(state)
+	if err != nil {
+		panic(err)
+	}
+	state.db.Set(stateKey, stateBytes)
+}
+
+func prefixKey(key []byte) []byte {
+	return append(kvPairPrefixKey, key...)
+}
+
+//---------------------------------------------------
+
+var _ types.Application = (*KVStoreApplication)(nil)
+
+type KVStoreApplication struct {
+	types.BaseApplication
+
+	state State
+}
+
+func NewKVStoreApplication() *KVStoreApplication {
+	state := loadState(dbm.NewMemDB())
+	return &KVStoreApplication{state: state}
+}
+
+func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
+	return types.ResponseInfo{Data: fmt.Sprintf("{\"size\":%v}", app.state.Size)}
+}
+
+// tx is either "key=value" or just arbitrary bytes
+func (app *KVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
+	var key, value []byte
+	parts := bytes.Split(tx, []byte("="))
+	if len(parts) == 2 {
+		key, value = parts[0], parts[1]
+	} else {
+		key, value = tx, tx
+	}
+	app.state.db.Set(prefixKey(key), value)
+	app.state.Size += 1
+
+	tags := []cmn.KVPair{
+		{[]byte("app.creator"), []byte("jae")},
+		{[]byte("app.key"), key},
+	}
+	return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags}
+}
+
+func (app *KVStoreApplication) CheckTx(tx []byte) types.ResponseCheckTx {
+	return types.ResponseCheckTx{Code: code.CodeTypeOK}
+}
+
+func (app *KVStoreApplication) Commit() types.ResponseCommit {
+	// Using a memdb - just return the big endian size of the db
+	appHash := make([]byte, 8)
+	binary.PutVarint(appHash, app.state.Size)
+	app.state.AppHash = appHash
+	app.state.Height += 1
+	saveState(app.state)
+	return types.ResponseCommit{Data: appHash}
+}
+
+func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
+	if reqQuery.Prove {
+		value := app.state.db.Get(prefixKey(reqQuery.Data))
+		resQuery.Index = -1 // TODO make Proof return index
+		resQuery.Key = reqQuery.Data
+		resQuery.Value = value
+		if value != nil {
+			resQuery.Log = "exists"
+		} else {
+			resQuery.Log = "does not exist"
+		}
+		return
+	} else {
+		value := app.state.db.Get(prefixKey(reqQuery.Data))
+		resQuery.Value = value
+		if value != nil {
+			resQuery.Log = "exists"
+		} else {
+			resQuery.Log = "does not exist"
+		}
+		return
+	}
+}
diff --git a/vendor/github.com/tendermint/abci/example/dummy/persistent_dummy.go b/vendor/github.com/tendermint/abci/example/kvstore/persistent_kvstore.go
similarity index 57%
rename from vendor/github.com/tendermint/abci/example/dummy/persistent_dummy.go
rename to vendor/github.com/tendermint/abci/example/kvstore/persistent_kvstore.go
index 4165b9f7..888258ff 100644
--- a/vendor/github.com/tendermint/abci/example/dummy/persistent_dummy.go
+++ b/vendor/github.com/tendermint/abci/example/kvstore/persistent_kvstore.go
@@ -1,4 +1,4 @@
-package dummy
+package kvstore
 
 import (
 	"bytes"
@@ -9,8 +9,6 @@ import (
 
 	"github.com/tendermint/abci/example/code"
 	"github.com/tendermint/abci/types"
-	crypto "github.com/tendermint/go-crypto"
-	"github.com/tendermint/iavl"
 	cmn "github.com/tendermint/tmlibs/common"
 	dbm "github.com/tendermint/tmlibs/db"
 	"github.com/tendermint/tmlibs/log"
@@ -22,51 +20,49 @@ const (
 
 //-----------------------------------------
 
-var _ types.Application = (*PersistentDummyApplication)(nil)
+var _ types.Application = (*PersistentKVStoreApplication)(nil)
 
-type PersistentDummyApplication struct {
-	app *DummyApplication
+type PersistentKVStoreApplication struct {
+	app *KVStoreApplication
 
 	// validator set
-	ValUpdates []*types.Validator
+	ValUpdates []types.Validator
 
 	logger log.Logger
 }
 
-func NewPersistentDummyApplication(dbDir string) *PersistentDummyApplication {
-	name := "dummy"
+func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication {
+	name := "kvstore"
 	db, err := dbm.NewGoLevelDB(name, dbDir)
 	if err != nil {
 		panic(err)
 	}
 
-	stateTree := iavl.NewVersionedTree(500, db)
-	stateTree.Load()
+	state := loadState(db)
 
-	return &PersistentDummyApplication{
-		app:    &DummyApplication{state: stateTree},
+	return &PersistentKVStoreApplication{
+		app:    &KVStoreApplication{state: state},
 		logger: log.NewNopLogger(),
 	}
 }
 
-func (app *PersistentDummyApplication) SetLogger(l log.Logger) {
+func (app *PersistentKVStoreApplication) SetLogger(l log.Logger) {
 	app.logger = l
 }
 
-func (app *PersistentDummyApplication) Info(req types.RequestInfo) types.ResponseInfo {
+func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.ResponseInfo {
 	res := app.app.Info(req)
-	var latestVersion uint64 = app.app.state.LatestVersion() // TODO: change to int64
-	res.LastBlockHeight = int64(latestVersion)
-	res.LastBlockAppHash = app.app.state.Hash()
+	res.LastBlockHeight = app.app.state.Height
+	res.LastBlockAppHash = app.app.state.AppHash
 	return res
 }
 
-func (app *PersistentDummyApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption {
+func (app *PersistentKVStoreApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption {
 	return app.app.SetOption(req)
 }
 
 // tx is either "val:pubkey/power" or "key=value" or just arbitrary bytes
-func (app *PersistentDummyApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
+func (app *PersistentKVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
 	// if it starts with "val:", update the validator set
 	// format is "val:pubkey/power"
 	if isValidatorTx(tx) {
@@ -79,34 +75,21 @@ func (app *PersistentDummyApplication) DeliverTx(tx []byte) types.ResponseDelive
 	return app.app.DeliverTx(tx)
 }
 
-func (app *PersistentDummyApplication) CheckTx(tx []byte) types.ResponseCheckTx {
+func (app *PersistentKVStoreApplication) CheckTx(tx []byte) types.ResponseCheckTx {
 	return app.app.CheckTx(tx)
 }
 
 // Commit will panic if InitChain was not called
-func (app *PersistentDummyApplication) Commit() types.ResponseCommit {
-
-	// Save a new version for next height
-	height := app.app.state.LatestVersion() + 1
-	var appHash []byte
-	var err error
-
-	appHash, err = app.app.state.SaveVersion(height)
-	if err != nil {
-		// if this wasn't a dummy app, we'd do something smarter
-		panic(err)
-	}
-
-	app.logger.Info("Commit block", "height", height, "root", appHash)
-	return types.ResponseCommit{Code: code.CodeTypeOK, Data: appHash}
+func (app *PersistentKVStoreApplication) Commit() types.ResponseCommit {
+	return app.app.Commit()
 }
 
-func (app *PersistentDummyApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery {
+func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery {
 	return app.app.Query(reqQuery)
 }
 
 // Save the validators in the merkle tree
-func (app *PersistentDummyApplication) InitChain(req types.RequestInitChain) types.ResponseInitChain {
+func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) types.ResponseInitChain {
 	for _, v := range req.Validators {
 		r := app.updateValidator(v)
 		if r.IsErr() {
@@ -117,32 +100,32 @@ func (app *PersistentDummyApplication) InitChain(req types.RequestInitChain) typ
 }
 
 // Track the block hash and header information
-func (app *PersistentDummyApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
+func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
 	// reset valset changes
-	app.ValUpdates = make([]*types.Validator, 0)
+	app.ValUpdates = make([]types.Validator, 0)
 	return types.ResponseBeginBlock{}
 }
 
 // Update the validator set
-func (app *PersistentDummyApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
+func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
 	return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates}
 }
 
 //---------------------------------------------
 // update validators
 
-func (app *PersistentDummyApplication) Validators() (validators []*types.Validator) {
-	app.app.state.Iterate(func(key, value []byte) bool {
-		if isValidatorTx(key) {
+func (app *PersistentKVStoreApplication) Validators() (validators []types.Validator) {
+	itr := app.app.state.db.Iterator(nil, nil)
+	for ; itr.Valid(); itr.Next() {
+		if isValidatorTx(itr.Key()) {
 			validator := new(types.Validator)
-			err := types.ReadMessage(bytes.NewBuffer(value), validator)
+			err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator)
 			if err != nil {
 				panic(err)
 			}
-			validators = append(validators, validator)
+			validators = append(validators, *validator)
 		}
-		return false
-	})
+	}
 	return
 }
 
@@ -155,7 +138,7 @@ func isValidatorTx(tx []byte) bool {
 }
 
 // format is "val:pubkey1/power1,addr2/power2,addr3/power3"tx
-func (app *PersistentDummyApplication) execValidatorTx(tx []byte) types.ResponseDeliverTx {
+func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.ResponseDeliverTx {
 	tx = tx[len(ValidatorSetChangePrefix):]
 
 	//get the pubkey and power
@@ -174,12 +157,12 @@ func (app *PersistentDummyApplication) execValidatorTx(tx []byte) types.Response
 			Code: code.CodeTypeEncodingError,
 			Log:  fmt.Sprintf("Pubkey (%s) is invalid hex", pubkeyS)}
 	}
-	_, err = crypto.PubKeyFromBytes(pubkey)
+	/*_, err = crypto.PubKeyFromBytes(pubkey)
 	if err != nil {
 		return types.ResponseDeliverTx{
 			Code: code.CodeTypeEncodingError,
 			Log:  fmt.Sprintf("Pubkey (%X) is invalid go-crypto encoded", pubkey)}
-	}
+	}*/
 
 	// decode the power
 	power, err := strconv.ParseInt(powerS, 10, 64)
@@ -190,29 +173,29 @@ func (app *PersistentDummyApplication) execValidatorTx(tx []byte) types.Response
 	}
 
 	// update
-	return app.updateValidator(&types.Validator{pubkey, power})
+	return app.updateValidator(types.Validator{pubkey, power})
 }
 
 // add, update, or remove a validator
-func (app *PersistentDummyApplication) updateValidator(v *types.Validator) types.ResponseDeliverTx {
+func (app *PersistentKVStoreApplication) updateValidator(v types.Validator) types.ResponseDeliverTx {
 	key := []byte("val:" + string(v.PubKey))
 	if v.Power == 0 {
 		// remove validator
-		if !app.app.state.Has(key) {
+		if !app.app.state.db.Has(key) {
 			return types.ResponseDeliverTx{
 				Code: code.CodeTypeUnauthorized,
 				Log:  fmt.Sprintf("Cannot remove non-existent validator %X", key)}
 		}
-		app.app.state.Remove(key)
+		app.app.state.db.Delete(key)
 	} else {
 		// add or update validator
 		value := bytes.NewBuffer(make([]byte, 0))
-		if err := types.WriteMessage(v, value); err != nil {
+		if err := types.WriteMessage(&v, value); err != nil {
 			return types.ResponseDeliverTx{
 				Code: code.CodeTypeEncodingError,
 				Log:  fmt.Sprintf("Error encoding validator: %v", err)}
 		}
-		app.app.state.Set(key, value.Bytes())
+		app.app.state.db.Set(key, value.Bytes())
 	}
 
 	// we only update the changes array if we successfully updated the tree
diff --git a/vendor/github.com/tendermint/abci/types/application.go b/vendor/github.com/tendermint/abci/types/application.go
index c8ea19c3..ef1bc92e 100644
--- a/vendor/github.com/tendermint/abci/types/application.go
+++ b/vendor/github.com/tendermint/abci/types/application.go
@@ -42,7 +42,7 @@ func (BaseApplication) Info(req RequestInfo) ResponseInfo {
 }
 
 func (BaseApplication) SetOption(req RequestSetOption) ResponseSetOption {
-	return ResponseSetOption{Code: CodeTypeOK}
+	return ResponseSetOption{}
 }
 
 func (BaseApplication) DeliverTx(tx []byte) ResponseDeliverTx {
@@ -54,7 +54,7 @@ func (BaseApplication) CheckTx(tx []byte) ResponseCheckTx {
 }
 
 func (BaseApplication) Commit() ResponseCommit {
-	return ResponseCommit{Code: CodeTypeOK}
+	return ResponseCommit{}
 }
 
 func (BaseApplication) Query(req RequestQuery) ResponseQuery {
diff --git a/vendor/github.com/tendermint/abci/types/messages.go b/vendor/github.com/tendermint/abci/types/messages.go
index 5ce234cf..52e4b675 100644
--- a/vendor/github.com/tendermint/abci/types/messages.go
+++ b/vendor/github.com/tendermint/abci/types/messages.go
@@ -1,33 +1,70 @@
 package types
 
 import (
+	"bufio"
+	"encoding/binary"
 	"io"
 
 	"github.com/gogo/protobuf/proto"
-	wire "github.com/tendermint/go-wire"
 )
 
-// WriteMessage writes a length-delimited protobuf message.
+const (
+	maxMsgSize = 104857600 // 100MB
+)
+
+// WriteMessage writes a varint length-delimited protobuf message.
 func WriteMessage(msg proto.Message, w io.Writer) error {
 	bz, err := proto.Marshal(msg)
 	if err != nil {
 		return err
 	}
-	var n int
-	wire.WriteByteSlice(bz, w, &n, &err)
-	return err
+	return encodeByteSlice(w, bz)
 }
 
-// ReadMessage reads a length delimited protobuf message.
+// ReadMessage reads a varint length-delimited protobuf message.
 func ReadMessage(r io.Reader, msg proto.Message) error {
-	var n int
-	var err error
-	bz := wire.ReadByteSlice(r, 0, &n, &err) //XXX: no max
+	return readProtoMsg(r, msg, maxMsgSize)
+}
+
+func readProtoMsg(r io.Reader, msg proto.Message, maxSize int) error {
+	// binary.ReadVarint takes an io.ByteReader, eg. a bufio.Reader
+	reader, ok := r.(*bufio.Reader)
+	if !ok {
+		reader = bufio.NewReader(r)
+	}
+	length64, err := binary.ReadVarint(reader)
 	if err != nil {
 		return err
 	}
-	err = proto.Unmarshal(bz, msg)
-	return err
+	length := int(length64)
+	if length < 0 || length > maxSize {
+		return io.ErrShortBuffer
+	}
+	buf := make([]byte, length)
+	if _, err := io.ReadFull(reader, buf); err != nil {
+		return err
+	}
+	return proto.Unmarshal(buf, msg)
+}
+
+//-----------------------------------------------------------------------
+// NOTE: we copied wire.EncodeByteSlice from go-wire rather than keep
+// go-wire as a dep
+
+func encodeByteSlice(w io.Writer, bz []byte) (err error) {
+	err = encodeVarint(w, int64(len(bz)))
+	if err != nil {
+		return
+	}
+	_, err = w.Write(bz)
+	return
+}
+
+func encodeVarint(w io.Writer, i int64) (err error) {
+	var buf [10]byte
+	n := binary.PutVarint(buf[:], i)
+	_, err = w.Write(buf[0:n])
+	return
 }
 
 //----------------------------------------
diff --git a/vendor/github.com/tendermint/abci/types/result.go b/vendor/github.com/tendermint/abci/types/result.go
index d094b78d..dbf409f4 100644
--- a/vendor/github.com/tendermint/abci/types/result.go
+++ b/vendor/github.com/tendermint/abci/types/result.go
@@ -3,7 +3,6 @@ package types
 import (
 	"bytes"
 	"encoding/json"
-	"fmt"
 
 	"github.com/gogo/protobuf/jsonpb"
 )
@@ -22,11 +21,6 @@ func (r ResponseCheckTx) IsErr() bool {
 	return r.Code != CodeTypeOK
 }
 
-// Error implements error interface by formatting response as string.
-func (r ResponseCheckTx) Error() string {
-	return fmtError(r.Code, r.Log)
-}
-
 // IsOK returns true if Code is OK.
 func (r ResponseDeliverTx) IsOK() bool {
 	return r.Code == CodeTypeOK
@@ -37,26 +31,6 @@ func (r ResponseDeliverTx) IsErr() bool {
 	return r.Code != CodeTypeOK
 }
 
-// Error implements error interface by formatting response as string.
-func (r ResponseDeliverTx) Error() string {
-	return fmtError(r.Code, r.Log)
-}
-
-// IsOK returns true if Code is OK.
-func (r ResponseCommit) IsOK() bool {
-	return r.Code == CodeTypeOK
-}
-
-// IsErr returns true if Code is something other than OK.
-func (r ResponseCommit) IsErr() bool {
-	return r.Code != CodeTypeOK
-}
-
-// Error implements error interface by formatting response as string.
-func (r ResponseCommit) Error() string {
-	return fmtError(r.Code, r.Log)
-}
-
 // IsOK returns true if Code is OK.
 func (r ResponseQuery) IsOK() bool {
 	return r.Code == CodeTypeOK
@@ -67,15 +41,6 @@ func (r ResponseQuery) IsErr() bool {
 	return r.Code != CodeTypeOK
 }
 
-// Error implements error interface by formatting response as string.
-func (r ResponseQuery) Error() string {
-	return fmtError(r.Code, r.Log)
-}
-
-func fmtError(code uint32, log string) string {
-	return fmt.Sprintf("Error code (%d): %s", code, log)
-}
-
 //---------------------------------------------------------------------------
 // override JSON marshalling so we dont emit defaults (ie. disable omitempty)
 // note we need Unmarshal functions too because protobuf had the bright idea
@@ -84,7 +49,7 @@ func fmtError(code uint32, log string) string {
 var (
 	jsonpbMarshaller = jsonpb.Marshaler{
 		EnumsAsInts:  true,
-		EmitDefaults: true,
+		EmitDefaults: false,
 	}
 	jsonpbUnmarshaller = jsonpb.Unmarshaler{}
 )
diff --git a/vendor/github.com/tendermint/abci/types/types.pb.go b/vendor/github.com/tendermint/abci/types/types.pb.go
index 7ab18fe4..92cbe7d8 100644
--- a/vendor/github.com/tendermint/abci/types/types.pb.go
+++ b/vendor/github.com/tendermint/abci/types/types.pb.go
@@ -42,7 +42,6 @@ It has these top-level messages:
 	PartSetHeader
 	Validator
 	Evidence
-	KVPair
 */
 //nolint: gas
 package types
@@ -51,8 +50,7 @@ import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
 import _ "github.com/gogo/protobuf/gogoproto"
-
-import github_com_tendermint_go_wire_data "github.com/tendermint/go-wire/data"
+import common "github.com/tendermint/tmlibs/common"
 
 import context "golang.org/x/net/context"
 import grpc "google.golang.org/grpc"
@@ -68,27 +66,6 @@ var _ = math.Inf
 // proto package needs to be updated.
 const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 
-type KVPair_Type int32
-
-const (
-	KVPair_STRING KVPair_Type = 0
-	KVPair_INT    KVPair_Type = 1
-)
-
-var KVPair_Type_name = map[int32]string{
-	0: "STRING",
-	1: "INT",
-}
-var KVPair_Type_value = map[string]int32{
-	"STRING": 0,
-	"INT":    1,
-}
-
-func (x KVPair_Type) String() string {
-	return proto.EnumName(KVPair_Type_name, int32(x))
-}
-func (KVPair_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{34, 0} }
-
 type Request struct {
 	// Types that are valid to be assigned to Value:
 	//	*Request_Echo
@@ -529,6 +506,7 @@ func (m *RequestInfo) GetVersion() string {
 	return ""
 }
 
+// nondeterministic
 type RequestSetOption struct {
 	Key   string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
 	Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
@@ -554,7 +532,8 @@ func (m *RequestSetOption) GetValue() string {
 }
 
 type RequestInitChain struct {
-	Validators []*Validator `protobuf:"bytes,1,rep,name=validators" json:"validators,omitempty"`
+	Validators    []Validator `protobuf:"bytes,1,rep,name=validators" json:"validators"`
+	AppStateBytes []byte      `protobuf:"bytes,2,opt,name=app_state_bytes,json=appStateBytes,proto3" json:"app_state_bytes,omitempty"`
 }
 
 func (m *RequestInitChain) Reset()                    { *m = RequestInitChain{} }
@@ -562,13 +541,20 @@ func (m *RequestInitChain) String() string            { return proto.CompactText
 func (*RequestInitChain) ProtoMessage()               {}
 func (*RequestInitChain) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{5} }
 
-func (m *RequestInitChain) GetValidators() []*Validator {
+func (m *RequestInitChain) GetValidators() []Validator {
 	if m != nil {
 		return m.Validators
 	}
 	return nil
 }
 
+func (m *RequestInitChain) GetAppStateBytes() []byte {
+	if m != nil {
+		return m.AppStateBytes
+	}
+	return nil
+}
+
 type RequestQuery struct {
 	Data   []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
 	Path   string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
@@ -610,10 +596,10 @@ func (m *RequestQuery) GetProve() bool {
 }
 
 type RequestBeginBlock struct {
-	Hash                []byte      `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"`
-	Header              *Header     `protobuf:"bytes,2,opt,name=header" json:"header,omitempty"`
-	AbsentValidators    []int32     `protobuf:"varint,3,rep,packed,name=absent_validators,json=absentValidators" json:"absent_validators,omitempty"`
-	ByzantineValidators []*Evidence `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators" json:"byzantine_validators,omitempty"`
+	Hash                []byte     `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"`
+	Header              Header     `protobuf:"bytes,2,opt,name=header" json:"header"`
+	AbsentValidators    []int32    `protobuf:"varint,3,rep,packed,name=absent_validators,json=absentValidators" json:"absent_validators,omitempty"`
+	ByzantineValidators []Evidence `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators" json:"byzantine_validators"`
 }
 
 func (m *RequestBeginBlock) Reset()                    { *m = RequestBeginBlock{} }
@@ -628,11 +614,11 @@ func (m *RequestBeginBlock) GetHash() []byte {
 	return nil
 }
 
-func (m *RequestBeginBlock) GetHeader() *Header {
+func (m *RequestBeginBlock) GetHeader() Header {
 	if m != nil {
 		return m.Header
 	}
-	return nil
+	return Header{}
 }
 
 func (m *RequestBeginBlock) GetAbsentValidators() []int32 {
@@ -642,7 +628,7 @@ func (m *RequestBeginBlock) GetAbsentValidators() []int32 {
 	return nil
 }
 
-func (m *RequestBeginBlock) GetByzantineValidators() []*Evidence {
+func (m *RequestBeginBlock) GetByzantineValidators() []Evidence {
 	if m != nil {
 		return m.ByzantineValidators
 	}
@@ -1136,6 +1122,7 @@ func _Response_OneofSizer(msg proto.Message) (n int) {
 	return n
 }
 
+// nondeterministic
 type ResponseException struct {
 	Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
 }
@@ -1216,9 +1203,12 @@ func (m *ResponseInfo) GetLastBlockAppHash() []byte {
 	return nil
 }
 
+// nondeterministic
 type ResponseSetOption struct {
 	Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
-	Log  string `protobuf:"bytes,2,opt,name=log,proto3" json:"log,omitempty"`
+	// bytes data = 2;
+	Log  string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"`
+	Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"`
 }
 
 func (m *ResponseSetOption) Reset()                    { *m = ResponseSetOption{} }
@@ -1240,6 +1230,13 @@ func (m *ResponseSetOption) GetLog() string {
 	return ""
 }
 
+func (m *ResponseSetOption) GetInfo() string {
+	if m != nil {
+		return m.Info
+	}
+	return ""
+}
+
 type ResponseInitChain struct {
 }
 
@@ -1249,13 +1246,15 @@ func (*ResponseInitChain) ProtoMessage()               {}
 func (*ResponseInitChain) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{18} }
 
 type ResponseQuery struct {
-	Code   uint32                                   `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
-	Index  int64                                    `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"`
-	Key    github_com_tendermint_go_wire_data.Bytes `protobuf:"bytes,3,opt,name=key,proto3,customtype=github.com/tendermint/go-wire/data.Bytes" json:"key"`
-	Value  github_com_tendermint_go_wire_data.Bytes `protobuf:"bytes,4,opt,name=value,proto3,customtype=github.com/tendermint/go-wire/data.Bytes" json:"value"`
-	Proof  github_com_tendermint_go_wire_data.Bytes `protobuf:"bytes,5,opt,name=proof,proto3,customtype=github.com/tendermint/go-wire/data.Bytes" json:"proof"`
-	Height int64                                    `protobuf:"varint,6,opt,name=height,proto3" json:"height,omitempty"`
-	Log    string                                   `protobuf:"bytes,7,opt,name=log,proto3" json:"log,omitempty"`
+	Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
+	// bytes data = 2; // use "value" instead.
+	Log    string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"`
+	Info   string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"`
+	Index  int64  `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"`
+	Key    []byte `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"`
+	Value  []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"`
+	Proof  []byte `protobuf:"bytes,8,opt,name=proof,proto3" json:"proof,omitempty"`
+	Height int64  `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"`
 }
 
 func (m *ResponseQuery) Reset()                    { *m = ResponseQuery{} }
@@ -1270,6 +1269,20 @@ func (m *ResponseQuery) GetCode() uint32 {
 	return 0
 }
 
+func (m *ResponseQuery) GetLog() string {
+	if m != nil {
+		return m.Log
+	}
+	return ""
+}
+
+func (m *ResponseQuery) GetInfo() string {
+	if m != nil {
+		return m.Info
+	}
+	return ""
+}
+
 func (m *ResponseQuery) GetIndex() int64 {
 	if m != nil {
 		return m.Index
@@ -1277,18 +1290,32 @@ func (m *ResponseQuery) GetIndex() int64 {
 	return 0
 }
 
-func (m *ResponseQuery) GetHeight() int64 {
+func (m *ResponseQuery) GetKey() []byte {
 	if m != nil {
-		return m.Height
+		return m.Key
 	}
-	return 0
+	return nil
 }
 
-func (m *ResponseQuery) GetLog() string {
+func (m *ResponseQuery) GetValue() []byte {
 	if m != nil {
-		return m.Log
+		return m.Value
 	}
-	return ""
+	return nil
+}
+
+func (m *ResponseQuery) GetProof() []byte {
+	if m != nil {
+		return m.Proof
+	}
+	return nil
+}
+
+func (m *ResponseQuery) GetHeight() int64 {
+	if m != nil {
+		return m.Height
+	}
+	return 0
 }
 
 type ResponseBeginBlock struct {
@@ -1300,11 +1327,14 @@ func (*ResponseBeginBlock) ProtoMessage()               {}
 func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{20} }
 
 type ResponseCheckTx struct {
-	Code uint32                                   `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
-	Data github_com_tendermint_go_wire_data.Bytes `protobuf:"bytes,2,opt,name=data,proto3,customtype=github.com/tendermint/go-wire/data.Bytes" json:"data"`
-	Log  string                                   `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"`
-	Gas  int64                                    `protobuf:"varint,4,opt,name=gas,proto3" json:"gas,omitempty"`
-	Fee  int64                                    `protobuf:"varint,5,opt,name=fee,proto3" json:"fee,omitempty"`
+	Code      uint32          `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
+	Data      []byte          `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+	Log       string          `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"`
+	Info      string          `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"`
+	GasWanted int64           `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"`
+	GasUsed   int64           `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"`
+	Tags      []common.KVPair `protobuf:"bytes,7,rep,name=tags" json:"tags,omitempty"`
+	Fee       common.KI64Pair `protobuf:"bytes,8,opt,name=fee" json:"fee"`
 }
 
 func (m *ResponseCheckTx) Reset()                    { *m = ResponseCheckTx{} }
@@ -1319,6 +1349,13 @@ func (m *ResponseCheckTx) GetCode() uint32 {
 	return 0
 }
 
+func (m *ResponseCheckTx) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
 func (m *ResponseCheckTx) GetLog() string {
 	if m != nil {
 		return m.Log
@@ -1326,25 +1363,50 @@ func (m *ResponseCheckTx) GetLog() string {
 	return ""
 }
 
-func (m *ResponseCheckTx) GetGas() int64 {
+func (m *ResponseCheckTx) GetInfo() string {
 	if m != nil {
-		return m.Gas
+		return m.Info
+	}
+	return ""
+}
+
+func (m *ResponseCheckTx) GetGasWanted() int64 {
+	if m != nil {
+		return m.GasWanted
 	}
 	return 0
 }
 
-func (m *ResponseCheckTx) GetFee() int64 {
+func (m *ResponseCheckTx) GetGasUsed() int64 {
 	if m != nil {
-		return m.Fee
+		return m.GasUsed
 	}
 	return 0
 }
 
+func (m *ResponseCheckTx) GetTags() []common.KVPair {
+	if m != nil {
+		return m.Tags
+	}
+	return nil
+}
+
+func (m *ResponseCheckTx) GetFee() common.KI64Pair {
+	if m != nil {
+		return m.Fee
+	}
+	return common.KI64Pair{}
+}
+
 type ResponseDeliverTx struct {
-	Code uint32                                   `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
-	Data github_com_tendermint_go_wire_data.Bytes `protobuf:"bytes,2,opt,name=data,proto3,customtype=github.com/tendermint/go-wire/data.Bytes" json:"data"`
-	Log  string                                   `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"`
-	Tags []*KVPair                                `protobuf:"bytes,4,rep,name=tags" json:"tags,omitempty"`
+	Code      uint32          `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
+	Data      []byte          `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+	Log       string          `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"`
+	Info      string          `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"`
+	GasWanted int64           `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"`
+	GasUsed   int64           `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"`
+	Tags      []common.KVPair `protobuf:"bytes,7,rep,name=tags" json:"tags,omitempty"`
+	Fee       common.KI64Pair `protobuf:"bytes,8,opt,name=fee" json:"fee"`
 }
 
 func (m *ResponseDeliverTx) Reset()                    { *m = ResponseDeliverTx{} }
@@ -1359,6 +1421,13 @@ func (m *ResponseDeliverTx) GetCode() uint32 {
 	return 0
 }
 
+func (m *ResponseDeliverTx) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
 func (m *ResponseDeliverTx) GetLog() string {
 	if m != nil {
 		return m.Log
@@ -1366,15 +1435,43 @@ func (m *ResponseDeliverTx) GetLog() string {
 	return ""
 }
 
-func (m *ResponseDeliverTx) GetTags() []*KVPair {
+func (m *ResponseDeliverTx) GetInfo() string {
+	if m != nil {
+		return m.Info
+	}
+	return ""
+}
+
+func (m *ResponseDeliverTx) GetGasWanted() int64 {
+	if m != nil {
+		return m.GasWanted
+	}
+	return 0
+}
+
+func (m *ResponseDeliverTx) GetGasUsed() int64 {
+	if m != nil {
+		return m.GasUsed
+	}
+	return 0
+}
+
+func (m *ResponseDeliverTx) GetTags() []common.KVPair {
 	if m != nil {
 		return m.Tags
 	}
 	return nil
 }
 
+func (m *ResponseDeliverTx) GetFee() common.KI64Pair {
+	if m != nil {
+		return m.Fee
+	}
+	return common.KI64Pair{}
+}
+
 type ResponseEndBlock struct {
-	ValidatorUpdates      []*Validator     `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates" json:"validator_updates,omitempty"`
+	ValidatorUpdates      []Validator      `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates" json:"validator_updates"`
 	ConsensusParamUpdates *ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates" json:"consensus_param_updates,omitempty"`
 }
 
@@ -1383,7 +1480,7 @@ func (m *ResponseEndBlock) String() string            { return proto.CompactText
 func (*ResponseEndBlock) ProtoMessage()               {}
 func (*ResponseEndBlock) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{23} }
 
-func (m *ResponseEndBlock) GetValidatorUpdates() []*Validator {
+func (m *ResponseEndBlock) GetValidatorUpdates() []Validator {
 	if m != nil {
 		return m.ValidatorUpdates
 	}
@@ -1398,9 +1495,8 @@ func (m *ResponseEndBlock) GetConsensusParamUpdates() *ConsensusParams {
 }
 
 type ResponseCommit struct {
-	Code uint32                                   `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
-	Data github_com_tendermint_go_wire_data.Bytes `protobuf:"bytes,2,opt,name=data,proto3,customtype=github.com/tendermint/go-wire/data.Bytes" json:"data"`
-	Log  string                                   `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"`
+	// reserve 1
+	Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
 }
 
 func (m *ResponseCommit) Reset()                    { *m = ResponseCommit{} }
@@ -1408,18 +1504,11 @@ func (m *ResponseCommit) String() string            { return proto.CompactTextSt
 func (*ResponseCommit) ProtoMessage()               {}
 func (*ResponseCommit) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{24} }
 
-func (m *ResponseCommit) GetCode() uint32 {
+func (m *ResponseCommit) GetData() []byte {
 	if m != nil {
-		return m.Code
-	}
-	return 0
-}
-
-func (m *ResponseCommit) GetLog() string {
-	if m != nil {
-		return m.Log
+		return m.Data
 	}
-	return ""
+	return nil
 }
 
 // ConsensusParams contains all consensus-relevant parameters
@@ -1534,15 +1623,15 @@ func (m *BlockGossip) GetBlockPartSizeBytes() int32 {
 }
 
 type Header struct {
-	ChainID        string   `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"`
-	Height         int64    `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"`
-	Time           int64    `protobuf:"varint,3,opt,name=time,proto3" json:"time,omitempty"`
-	NumTxs         int32    `protobuf:"varint,4,opt,name=num_txs,json=numTxs,proto3" json:"num_txs,omitempty"`
-	LastBlockID    *BlockID `protobuf:"bytes,5,opt,name=last_block_id,json=lastBlockId" json:"last_block_id,omitempty"`
-	LastCommitHash []byte   `protobuf:"bytes,6,opt,name=last_commit_hash,json=lastCommitHash,proto3" json:"last_commit_hash,omitempty"`
-	DataHash       []byte   `protobuf:"bytes,7,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"`
-	ValidatorsHash []byte   `protobuf:"bytes,8,opt,name=validators_hash,json=validatorsHash,proto3" json:"validators_hash,omitempty"`
-	AppHash        []byte   `protobuf:"bytes,9,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"`
+	ChainID        string  `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"`
+	Height         int64   `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"`
+	Time           int64   `protobuf:"varint,3,opt,name=time,proto3" json:"time,omitempty"`
+	NumTxs         int32   `protobuf:"varint,4,opt,name=num_txs,json=numTxs,proto3" json:"num_txs,omitempty"`
+	LastBlockID    BlockID `protobuf:"bytes,5,opt,name=last_block_id,json=lastBlockId" json:"last_block_id"`
+	LastCommitHash []byte  `protobuf:"bytes,6,opt,name=last_commit_hash,json=lastCommitHash,proto3" json:"last_commit_hash,omitempty"`
+	DataHash       []byte  `protobuf:"bytes,7,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"`
+	ValidatorsHash []byte  `protobuf:"bytes,8,opt,name=validators_hash,json=validatorsHash,proto3" json:"validators_hash,omitempty"`
+	AppHash        []byte  `protobuf:"bytes,9,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"`
 }
 
 func (m *Header) Reset()                    { *m = Header{} }
@@ -1578,11 +1667,11 @@ func (m *Header) GetNumTxs() int32 {
 	return 0
 }
 
-func (m *Header) GetLastBlockID() *BlockID {
+func (m *Header) GetLastBlockID() BlockID {
 	if m != nil {
 		return m.LastBlockID
 	}
-	return nil
+	return BlockID{}
 }
 
 func (m *Header) GetLastCommitHash() []byte {
@@ -1614,8 +1703,8 @@ func (m *Header) GetAppHash() []byte {
 }
 
 type BlockID struct {
-	Hash  []byte         `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"`
-	Parts *PartSetHeader `protobuf:"bytes,2,opt,name=parts" json:"parts,omitempty"`
+	Hash  []byte        `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"`
+	Parts PartSetHeader `protobuf:"bytes,2,opt,name=parts" json:"parts"`
 }
 
 func (m *BlockID) Reset()                    { *m = BlockID{} }
@@ -1630,11 +1719,11 @@ func (m *BlockID) GetHash() []byte {
 	return nil
 }
 
-func (m *BlockID) GetParts() *PartSetHeader {
+func (m *BlockID) GetParts() PartSetHeader {
 	if m != nil {
 		return m.Parts
 	}
-	return nil
+	return PartSetHeader{}
 }
 
 type PartSetHeader struct {
@@ -1709,46 +1798,6 @@ func (m *Evidence) GetHeight() int64 {
 	return 0
 }
 
-type KVPair struct {
-	Key         string      `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
-	ValueType   KVPair_Type `protobuf:"varint,2,opt,name=value_type,json=valueType,proto3,enum=types.KVPair_Type" json:"value_type,omitempty"`
-	ValueString string      `protobuf:"bytes,3,opt,name=value_string,json=valueString,proto3" json:"value_string,omitempty"`
-	ValueInt    int64       `protobuf:"varint,4,opt,name=value_int,json=valueInt,proto3" json:"value_int,omitempty"`
-}
-
-func (m *KVPair) Reset()                    { *m = KVPair{} }
-func (m *KVPair) String() string            { return proto.CompactTextString(m) }
-func (*KVPair) ProtoMessage()               {}
-func (*KVPair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{34} }
-
-func (m *KVPair) GetKey() string {
-	if m != nil {
-		return m.Key
-	}
-	return ""
-}
-
-func (m *KVPair) GetValueType() KVPair_Type {
-	if m != nil {
-		return m.ValueType
-	}
-	return KVPair_STRING
-}
-
-func (m *KVPair) GetValueString() string {
-	if m != nil {
-		return m.ValueString
-	}
-	return ""
-}
-
-func (m *KVPair) GetValueInt() int64 {
-	if m != nil {
-		return m.ValueInt
-	}
-	return 0
-}
-
 func init() {
 	proto.RegisterType((*Request)(nil), "types.Request")
 	proto.RegisterType((*RequestEcho)(nil), "types.RequestEcho")
@@ -1784,8 +1833,6 @@ func init() {
 	proto.RegisterType((*PartSetHeader)(nil), "types.PartSetHeader")
 	proto.RegisterType((*Validator)(nil), "types.Validator")
 	proto.RegisterType((*Evidence)(nil), "types.Evidence")
-	proto.RegisterType((*KVPair)(nil), "types.KVPair")
-	proto.RegisterEnum("types.KVPair_Type", KVPair_Type_name, KVPair_Type_value)
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -2193,116 +2240,115 @@ var _ABCIApplication_serviceDesc = grpc.ServiceDesc{
 func init() { proto.RegisterFile("types/types.proto", fileDescriptorTypes) }
 
 var fileDescriptorTypes = []byte{
-	// 1766 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x72, 0x1b, 0xc7,
-	0x11, 0x26, 0xfe, 0xb1, 0x0d, 0xfe, 0x80, 0x43, 0x4a, 0x82, 0xa1, 0x83, 0xe8, 0xad, 0x8a, 0x0d,
-	0xd9, 0x16, 0x69, 0xd3, 0xa5, 0x94, 0x68, 0x27, 0xae, 0x08, 0xa4, 0x2c, 0xa2, 0x9c, 0x52, 0x94,
-	0x15, 0xe3, 0x43, 0x2e, 0xa8, 0x01, 0x76, 0x08, 0x6c, 0x09, 0xd8, 0x5d, 0xef, 0x0e, 0x68, 0x50,
-	0x95, 0x47, 0xf0, 0x3d, 0xe7, 0xe4, 0x98, 0x17, 0xc8, 0x31, 0xa7, 0xa4, 0xf2, 0x0c, 0x39, 0xe8,
-	0xe0, 0x27, 0x49, 0x75, 0xcf, 0xec, 0x2f, 0x76, 0x53, 0x29, 0x1d, 0x74, 0x01, 0x66, 0xa6, 0x7f,
-	0xb6, 0xbb, 0xa7, 0xe7, 0xeb, 0x9e, 0x81, 0x7d, 0x79, 0xeb, 0x8b, 0xf0, 0x84, 0x7e, 0x8f, 0xfd,
-	0xc0, 0x93, 0x1e, 0x6b, 0xd0, 0xa4, 0xff, 0x68, 0xe6, 0xc8, 0xf9, 0x6a, 0x72, 0x3c, 0xf5, 0x96,
-	0x27, 0x33, 0x6f, 0xe6, 0x9d, 0x10, 0x75, 0xb2, 0xba, 0xa6, 0x19, 0x4d, 0x68, 0xa4, 0xa4, 0xcc,
-	0x7f, 0xd5, 0xa1, 0x65, 0x89, 0x1f, 0x56, 0x22, 0x94, 0x6c, 0x00, 0x75, 0x31, 0x9d, 0x7b, 0xbd,
-	0xea, 0x51, 0x65, 0xd0, 0x39, 0x65, 0xc7, 0x4a, 0xbb, 0xa6, 0x3e, 0x9b, 0xce, 0xbd, 0xcb, 0x2d,
-	0x8b, 0x38, 0xd8, 0xa7, 0xd0, 0xb8, 0x5e, 0xac, 0xc2, 0x79, 0xaf, 0x46, 0xac, 0x07, 0x59, 0xd6,
-	0x6f, 0x91, 0x74, 0xb9, 0x65, 0x29, 0x1e, 0x54, 0xeb, 0xb8, 0xd7, 0x5e, 0xaf, 0x5e, 0xa4, 0x76,
-	0xe4, 0x5e, 0x93, 0x5a, 0xe4, 0x60, 0x4f, 0x00, 0x42, 0x21, 0xc7, 0x9e, 0x2f, 0x1d, 0xcf, 0xed,
-	0x35, 0x88, 0xff, 0x5e, 0x96, 0xff, 0x95, 0x90, 0xbf, 0x23, 0xf2, 0xe5, 0x96, 0x65, 0x84, 0xd1,
-	0x04, 0x25, 0x1d, 0xd7, 0x91, 0xe3, 0xe9, 0x9c, 0x3b, 0x6e, 0xaf, 0x59, 0x24, 0x39, 0x72, 0x1d,
-	0x79, 0x8e, 0x64, 0x94, 0x74, 0xa2, 0x09, 0xba, 0xf2, 0xc3, 0x4a, 0x04, 0xb7, 0xbd, 0x56, 0x91,
-	0x2b, 0xbf, 0x47, 0x12, 0xba, 0x42, 0x3c, 0xec, 0x6b, 0xe8, 0x4c, 0xc4, 0xcc, 0x71, 0xc7, 0x93,
-	0x85, 0x37, 0x7d, 0xdd, 0x6b, 0x93, 0x48, 0x2f, 0x2b, 0x32, 0x44, 0x86, 0x21, 0xd2, 0x2f, 0xb7,
-	0x2c, 0x98, 0xc4, 0x33, 0x76, 0x0a, 0xed, 0xe9, 0x5c, 0x4c, 0x5f, 0x8f, 0xe5, 0xba, 0x67, 0x90,
-	0xe4, 0x9d, 0xac, 0xe4, 0x39, 0x52, 0xaf, 0xd6, 0x97, 0x5b, 0x56, 0x6b, 0xaa, 0x86, 0xe8, 0x97,
-	0x2d, 0x16, 0xce, 0x8d, 0x08, 0x50, 0xea, 0xa0, 0xc8, 0xaf, 0x0b, 0x45, 0x27, 0x39, 0xc3, 0x8e,
-	0x26, 0xec, 0x31, 0x18, 0xc2, 0xb5, 0xb5, 0xa1, 0x1d, 0x12, 0xbc, 0x9b, 0xdb, 0x51, 0xd7, 0x8e,
-	0xcc, 0x6c, 0x0b, 0x3d, 0x66, 0xc7, 0xd0, 0x9c, 0x7a, 0xcb, 0xa5, 0x23, 0x7b, 0xdb, 0x24, 0x73,
-	0x98, 0x33, 0x91, 0x68, 0x97, 0x5b, 0x96, 0xe6, 0x1a, 0xb6, 0xa0, 0x71, 0xc3, 0x17, 0x2b, 0x61,
-	0x7e, 0x0c, 0x9d, 0x54, 0xa6, 0xb0, 0x1e, 0xb4, 0x96, 0x22, 0x0c, 0xf9, 0x4c, 0xf4, 0x2a, 0x47,
-	0x95, 0x81, 0x61, 0x45, 0x53, 0x73, 0x17, 0xb6, 0xd3, 0x79, 0x92, 0x12, 0xc4, 0x5c, 0x40, 0xc1,
-	0x1b, 0x11, 0x84, 0x98, 0x00, 0x5a, 0x50, 0x4f, 0xcd, 0xaf, 0xa0, 0x9b, 0x4f, 0x02, 0xd6, 0x85,
-	0xda, 0x6b, 0x71, 0xab, 0x39, 0x71, 0xc8, 0x0e, 0xb5, 0x41, 0x94, 0xc5, 0x86, 0xa5, 0xad, 0xbb,
-	0x88, 0x65, 0xe3, 0x34, 0x60, 0x9f, 0x03, 0xdc, 0xf0, 0x85, 0x63, 0x73, 0xe9, 0x05, 0x61, 0xaf,
-	0x72, 0x54, 0x1b, 0x74, 0x4e, 0xbb, 0xda, 0xdd, 0xef, 0x23, 0x82, 0x95, 0xe2, 0x31, 0xed, 0xd8,
-	0x74, 0xca, 0x0b, 0xc6, 0xa0, 0x6e, 0x73, 0xc9, 0xe9, 0xf3, 0xdb, 0x16, 0x8d, 0x71, 0xcd, 0xe7,
-	0x72, 0xae, 0x3f, 0x4f, 0x63, 0x76, 0x17, 0x9a, 0x73, 0xe1, 0xcc, 0xe6, 0x92, 0xce, 0x4b, 0xcd,
-	0xd2, 0x33, 0xb4, 0xd5, 0x0f, 0xbc, 0x1b, 0x41, 0x47, 0xa3, 0x6d, 0xa9, 0x89, 0xf9, 0x8f, 0x0a,
-	0xec, 0x6f, 0xe4, 0x12, 0xea, 0x9d, 0xf3, 0x70, 0x1e, 0x7d, 0x0b, 0xc7, 0xec, 0x17, 0xa8, 0x97,
-	0xdb, 0x22, 0xd0, 0x47, 0x76, 0x47, 0x5b, 0x7f, 0x49, 0x8b, 0x96, 0x26, 0xb2, 0x4f, 0x61, 0x9f,
-	0x4f, 0x42, 0xe1, 0xca, 0x71, 0xca, 0xdf, 0xda, 0x51, 0x6d, 0xd0, 0xb0, 0xba, 0x8a, 0x10, 0xbb,
-	0x1b, 0xb2, 0x21, 0x1c, 0x4e, 0x6e, 0xdf, 0x70, 0x57, 0x3a, 0xae, 0x48, 0xf3, 0xd7, 0x29, 0x3e,
-	0x7b, 0xfa, 0x0b, 0xcf, 0x6e, 0x1c, 0x5b, 0xb8, 0x53, 0x61, 0x1d, 0xc4, 0xcc, 0x89, 0x0e, 0xf3,
-	0x08, 0x76, 0xb3, 0x29, 0xcd, 0x76, 0xa1, 0x2a, 0xd7, 0xda, 0xf6, 0xaa, 0x5c, 0x9b, 0x66, 0xbc,
-	0x1f, 0x71, 0xfa, 0x6e, 0xf0, 0x3c, 0x84, 0xbd, 0x5c, 0xa6, 0xa6, 0x02, 0x59, 0x49, 0x07, 0xd2,
-	0xdc, 0x83, 0x9d, 0x4c, 0x82, 0x9a, 0x3f, 0x35, 0xa0, 0x6d, 0x89, 0xd0, 0xf7, 0xdc, 0x50, 0xb0,
-	0x27, 0x60, 0x88, 0xf5, 0x54, 0x28, 0x54, 0xa9, 0xe4, 0xce, 0xac, 0xe2, 0x79, 0x16, 0xd1, 0xf1,
-	0x10, 0xc5, 0xcc, 0xec, 0x61, 0x06, 0x11, 0x0f, 0xf2, 0x42, 0x69, 0x48, 0xfc, 0x2c, 0x0b, 0x89,
-	0x87, 0x39, 0xde, 0x1c, 0x26, 0x3e, 0xcc, 0x60, 0x62, 0x5e, 0x71, 0x06, 0x14, 0xcf, 0x0a, 0x40,
-	0x31, 0x6f, 0x7e, 0x09, 0x2a, 0x9e, 0x15, 0xa0, 0x62, 0x6f, 0xe3, 0x5b, 0x85, 0xb0, 0xf8, 0x59,
-	0x16, 0x16, 0xf3, 0xee, 0xe4, 0x70, 0xf1, 0x57, 0x45, 0xb8, 0xf8, 0x41, 0x4e, 0xa6, 0x14, 0x18,
-	0xbf, 0xdc, 0x00, 0xc6, 0xbb, 0x39, 0xd1, 0x02, 0x64, 0x3c, 0xcb, 0x20, 0x23, 0x14, 0xfa, 0x56,
-	0x02, 0x8d, 0xbf, 0xdc, 0x84, 0xc6, 0x7b, 0xf9, 0xad, 0x2d, 0xc2, 0xc6, 0x93, 0x1c, 0x36, 0xde,
-	0xc9, 0x5b, 0x59, 0x0a, 0x8e, 0x0f, 0xf1, 0x44, 0xe7, 0x32, 0x0d, 0x4f, 0xbf, 0x08, 0x02, 0x2f,
-	0xd0, 0xe8, 0xa5, 0x26, 0xe6, 0x00, 0x31, 0x26, 0xc9, 0xaf, 0xff, 0x01, 0xa4, 0x94, 0xf4, 0xa9,
-	0xec, 0x32, 0xff, 0x5c, 0x49, 0x64, 0x09, 0x4b, 0xd3, 0xf8, 0x64, 0x68, 0x7c, 0x4a, 0xe1, 0x6b,
-	0x35, 0x83, 0xaf, 0xec, 0x13, 0xd8, 0x5f, 0xf0, 0x50, 0xaa, 0xb8, 0x8c, 0x33, 0x80, 0xb5, 0x87,
-	0x04, 0x15, 0x10, 0x85, 0x5c, 0x8f, 0xe0, 0x20, 0xc5, 0xcb, 0x7d, 0x7f, 0x4c, 0xe0, 0x54, 0xa7,
-	0xc3, 0xdb, 0x8d, 0xb9, 0x9f, 0xfa, 0xfe, 0x25, 0x0f, 0xe7, 0xe6, 0x59, 0xe2, 0x7f, 0x82, 0xdd,
-	0x0c, 0xea, 0x53, 0xcf, 0x56, 0x6e, 0xed, 0x58, 0x34, 0x46, 0x3c, 0x5f, 0x78, 0x33, 0x6d, 0x19,
-	0x0e, 0xcd, 0x83, 0x44, 0x34, 0x4e, 0x55, 0xf3, 0xef, 0xd5, 0xc4, 0xf7, 0x18, 0x8a, 0x37, 0x94,
-	0x1d, 0x42, 0xc3, 0x71, 0x6d, 0xb1, 0x26, 0x75, 0x35, 0x4b, 0x4d, 0xd8, 0x50, 0x95, 0x0c, 0x74,
-	0x6c, 0x7b, 0xf8, 0xf9, 0xbf, 0xdf, 0x3e, 0xd8, 0xfa, 0xcf, 0xdb, 0x07, 0x83, 0x54, 0xd7, 0x24,
-	0x85, 0x6b, 0x8b, 0x60, 0xe9, 0xb8, 0xf2, 0x64, 0xe6, 0x3d, 0xfa, 0xd1, 0x09, 0xc4, 0x09, 0x46,
-	0xee, 0x78, 0x78, 0x2b, 0x45, 0xa8, 0x8a, 0xcc, 0xb7, 0x51, 0x91, 0xa9, 0xbf, 0xa3, 0x16, 0x25,
-	0x8e, 0x7a, 0xfc, 0xc0, 0xf3, 0xae, 0xe9, 0x58, 0xbf, 0x93, 0x1e, 0x12, 0x4f, 0xe1, 0x62, 0x33,
-	0x53, 0x60, 0x74, 0x38, 0x5b, 0x49, 0x38, 0x0f, 0x81, 0x6d, 0x9e, 0x47, 0xf3, 0x2f, 0x15, 0xc4,
-	0xda, 0xcc, 0x59, 0x2b, 0x8c, 0xe8, 0x85, 0x4e, 0xa8, 0xea, 0x3b, 0x9a, 0xab, 0x52, 0x50, 0x5b,
-	0x55, 0x8b, 0xad, 0xc2, 0x95, 0x19, 0x0f, 0x29, 0x9a, 0x35, 0x0b, 0x87, 0xb8, 0x72, 0x2d, 0x04,
-	0xc5, 0xa5, 0x66, 0xe1, 0xd0, 0xfc, 0x6b, 0x25, 0xc9, 0x84, 0xa4, 0x68, 0xbc, 0x4f, 0x2b, 0x3f,
-	0x84, 0xba, 0xe4, 0xb3, 0xa8, 0x14, 0x46, 0xc5, 0xf6, 0xbb, 0xef, 0x5f, 0x72, 0x27, 0xb0, 0x88,
-	0x84, 0x81, 0xec, 0xe6, 0x31, 0x84, 0xfd, 0x1a, 0xf6, 0xe3, 0x42, 0x3a, 0x5e, 0xf9, 0x36, 0x97,
-	0xa2, 0xbc, 0xdf, 0xe8, 0xc6, 0xac, 0x7f, 0x50, 0x9c, 0xec, 0x05, 0xdc, 0x9b, 0xa2, 0x3e, 0x37,
-	0x5c, 0x85, 0x63, 0x9f, 0x07, 0x7c, 0x19, 0x2b, 0xa9, 0x66, 0xd0, 0xf2, 0x3c, 0xe2, 0x7a, 0x89,
-	0x4c, 0xa1, 0x75, 0x67, 0x9a, 0x59, 0xd0, 0xfa, 0xcc, 0x3f, 0x61, 0x75, 0x4e, 0x23, 0xd6, 0xfb,
-	0x0c, 0x22, 0xa5, 0x5a, 0xce, 0x50, 0x76, 0x02, 0xa0, 0x80, 0x24, 0x74, 0xde, 0x08, 0x5d, 0xa1,
-	0xa3, 0xc8, 0x50, 0x08, 0x5f, 0x39, 0x6f, 0x84, 0x65, 0x4c, 0xa2, 0x21, 0xfb, 0x08, 0x5a, 0x72,
-	0xad, 0xb8, 0xb3, 0x9d, 0xcf, 0xd5, 0x9a, 0x58, 0x9b, 0x92, 0xfe, 0xd9, 0x63, 0xd8, 0x56, 0x8a,
-	0x67, 0x5e, 0x18, 0x3a, 0xbe, 0xae, 0xcd, 0x2c, 0xad, 0xfa, 0x39, 0x51, 0xac, 0xce, 0x24, 0x99,
-	0x98, 0x7f, 0x04, 0x23, 0xfe, 0x2c, 0xbb, 0x0f, 0xc6, 0x92, 0xaf, 0xc7, 0x93, 0x5b, 0xb5, 0x6b,
-	0x95, 0x41, 0xc3, 0x6a, 0x2f, 0xf9, 0x9a, 0xbc, 0x64, 0xf7, 0xa0, 0x85, 0x44, 0xb9, 0x56, 0x7b,
-	0xd1, 0xb0, 0x9a, 0x4b, 0xbe, 0xbe, 0x5a, 0xc7, 0x04, 0xcc, 0x6a, 0xdd, 0xf3, 0x2d, 0xf9, 0xfa,
-	0x39, 0x0f, 0xcd, 0x6f, 0xa0, 0xa9, 0x8c, 0xfc, 0xbf, 0x14, 0xa3, 0x7c, 0x35, 0x23, 0xff, 0x1b,
-	0xe8, 0xa4, 0xec, 0x66, 0x5f, 0xc0, 0x1d, 0xe5, 0xa1, 0xcf, 0x03, 0x49, 0x11, 0xc9, 0x28, 0x64,
-	0x44, 0x7c, 0xc9, 0x03, 0x89, 0x9f, 0x24, 0xd5, 0xe6, 0x3f, 0xab, 0xd0, 0x54, 0x1d, 0x22, 0xfb,
-	0x08, 0x2b, 0x2f, 0x77, 0xdc, 0xb1, 0x63, 0xab, 0x22, 0x31, 0xec, 0xfc, 0xfc, 0xf6, 0x41, 0x8b,
-	0x40, 0x76, 0x74, 0x81, 0xc5, 0x16, 0x07, 0x76, 0x0a, 0x5f, 0xaa, 0x19, 0x7c, 0x61, 0x50, 0x97,
-	0xce, 0x52, 0x68, 0x17, 0x69, 0x8c, 0x96, 0xbb, 0xab, 0x25, 0x85, 0xa4, 0xae, 0x42, 0xe2, 0xae,
-	0x96, 0x18, 0x92, 0x73, 0xd8, 0x49, 0xd5, 0x0c, 0xc7, 0xd6, 0xbd, 0xcc, 0x6e, 0x7a, 0x37, 0x46,
-	0x17, 0xc3, 0xbd, 0x9f, 0xdf, 0x3e, 0xe8, 0xfc, 0x36, 0xaa, 0x20, 0xa3, 0x0b, 0xab, 0x13, 0x97,
-	0x93, 0x91, 0xcd, 0x06, 0x40, 0xd5, 0x65, 0xac, 0x2a, 0xac, 0xaa, 0x3a, 0x4d, 0xaa, 0x3a, 0xbb,
-	0xb8, 0xae, 0x4b, 0x30, 0x36, 0xc7, 0xf7, 0xc1, 0xc0, 0x14, 0x54, 0x2c, 0x2d, 0x62, 0x69, 0xe3,
-	0x02, 0x11, 0x3f, 0x86, 0xbd, 0xa4, 0xb7, 0x55, 0x2c, 0x6d, 0xa5, 0x25, 0x59, 0x26, 0xc6, 0x0f,
-	0xa0, 0x1d, 0x57, 0x37, 0x83, 0x38, 0x5a, 0x5c, 0x17, 0xb5, 0x11, 0xb4, 0xb4, 0x89, 0x85, 0xcd,
-	0xf9, 0x27, 0xd0, 0xc0, 0x3d, 0x89, 0x0e, 0x69, 0xd4, 0x41, 0xd1, 0x5e, 0x08, 0xa9, 0x5b, 0x74,
-	0xc5, 0x62, 0x9e, 0xc1, 0x4e, 0x66, 0x1d, 0x4b, 0x97, 0xf4, 0x24, 0x5f, 0xe8, 0x6d, 0x54, 0x93,
-	0xf8, 0x33, 0xd5, 0xe4, 0x33, 0xe6, 0x57, 0x60, 0xc4, 0xe0, 0x81, 0xb1, 0xf7, 0x57, 0x93, 0x71,
-	0x74, 0x25, 0xda, 0xb6, 0x9a, 0xfe, 0x6a, 0xf2, 0x9d, 0xba, 0x15, 0xf9, 0xde, 0x8f, 0xfa, 0xa2,
-	0x50, 0xb3, 0xd4, 0xc4, 0xfc, 0x1a, 0xda, 0x51, 0x23, 0x5f, 0x2e, 0x5a, 0xb2, 0xf7, 0xe6, 0xdf,
-	0x2a, 0xd0, 0x54, 0xd8, 0x57, 0x70, 0x0b, 0xfb, 0x82, 0xee, 0x56, 0x2b, 0x31, 0x46, 0xa7, 0x49,
-	0x70, 0x37, 0x3e, 0x76, 0x4a, 0xe8, 0xf8, 0xea, 0xd6, 0x17, 0x96, 0x41, 0x5c, 0x38, 0x64, 0x1f,
-	0xc2, 0xb6, 0x12, 0x09, 0x65, 0xe0, 0xb8, 0x11, 0x66, 0x74, 0x68, 0xed, 0x15, 0x2d, 0xe1, 0x96,
-	0x2a, 0x16, 0xc7, 0x95, 0xba, 0x58, 0xb4, 0x69, 0x61, 0xe4, 0x4a, 0xf3, 0x3e, 0xd4, 0x49, 0x0f,
-	0x40, 0xf3, 0xd5, 0x95, 0x35, 0x7a, 0xf1, 0xbc, 0xbb, 0xc5, 0x5a, 0x50, 0x1b, 0xbd, 0xb8, 0xea,
-	0x56, 0x4e, 0x7f, 0x6a, 0xc0, 0xde, 0xd3, 0xe1, 0xf9, 0xe8, 0xa9, 0xef, 0x2f, 0x9c, 0x29, 0xa7,
-	0xfe, 0xe3, 0x04, 0xea, 0xd4, 0x61, 0x15, 0x3c, 0x74, 0xf4, 0x8b, 0x5a, 0x7d, 0x76, 0x0a, 0x0d,
-	0x6a, 0xb4, 0x58, 0xd1, 0x7b, 0x47, 0xbf, 0xb0, 0xe3, 0xc7, 0x8f, 0xa8, 0x56, 0x6c, 0xf3, 0xd9,
-	0xa3, 0x5f, 0xd4, 0xf6, 0xb3, 0x6f, 0xc0, 0x48, 0x5a, 0xa4, 0xb2, 0xc7, 0x8f, 0x7e, 0xe9, 0x05,
-	0x00, 0xe5, 0x93, 0xea, 0x58, 0xf6, 0x54, 0xd0, 0x2f, 0xed, 0x94, 0xd9, 0x13, 0x68, 0x45, 0x1d,
-	0x40, 0xf1, 0xf3, 0x44, 0xbf, 0xa4, 0x39, 0xc7, 0xf0, 0xa8, 0x5e, 0xac, 0xe8, 0x0d, 0xa5, 0x5f,
-	0x78, 0x83, 0x60, 0x8f, 0xa1, 0xa9, 0x6b, 0x50, 0xe1, 0x43, 0x43, 0xbf, 0xb8, 0xc5, 0x46, 0x27,
-	0x93, 0x7b, 0x7c, 0xd9, 0x3b, 0x4f, 0xbf, 0xf4, 0xaa, 0xc3, 0x9e, 0x02, 0xa4, 0xae, 0xd6, 0xa5,
-	0x0f, 0x38, 0xfd, 0xf2, 0x2b, 0x0c, 0xc3, 0xb3, 0x13, 0x5f, 0x4b, 0x8b, 0x1f, 0x56, 0xfa, 0x65,
-	0xb7, 0x8a, 0x49, 0x93, 0x1e, 0xdf, 0xbe, 0xfc, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7b, 0xb9,
-	0xeb, 0x12, 0xc7, 0x13, 0x00, 0x00,
+	// 1757 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x58, 0xcd, 0x6e, 0x1b, 0xc9,
+	0x11, 0x16, 0xff, 0x39, 0x45, 0x89, 0x94, 0x5a, 0xb2, 0x4d, 0x73, 0x11, 0x58, 0x18, 0x04, 0x5e,
+	0x3a, 0xf6, 0x8a, 0x89, 0x36, 0x36, 0x6c, 0x6f, 0xb0, 0x88, 0x29, 0x39, 0x26, 0xb1, 0x49, 0xd6,
+	0x19, 0x7b, 0x1d, 0x20, 0x17, 0xa2, 0xc9, 0x69, 0x91, 0x03, 0x73, 0x7e, 0x76, 0xba, 0xa9, 0xa5,
+	0x7c, 0xcb, 0x7d, 0xef, 0x39, 0xe7, 0x94, 0x27, 0xc8, 0x2b, 0x04, 0x09, 0xf2, 0x0e, 0x3a, 0xec,
+	0x31, 0x2f, 0x91, 0xa0, 0xba, 0x7b, 0x7e, 0x35, 0xb3, 0x58, 0xe4, 0xba, 0x17, 0xb2, 0xab, 0xeb,
+	0xab, 0xee, 0xae, 0xee, 0xea, 0xaf, 0x6a, 0x1a, 0x0e, 0xc4, 0x55, 0xc0, 0xf8, 0x48, 0xfe, 0x9e,
+	0x04, 0xa1, 0x2f, 0x7c, 0xd2, 0x90, 0xc2, 0xe0, 0x93, 0xa5, 0x23, 0x56, 0x9b, 0xf9, 0xc9, 0xc2,
+	0x77, 0x47, 0x4b, 0x7f, 0xe9, 0x8f, 0xa4, 0x76, 0xbe, 0xb9, 0x90, 0x92, 0x14, 0x64, 0x4b, 0x59,
+	0x0d, 0x46, 0x29, 0xb8, 0x60, 0x9e, 0xcd, 0x42, 0xd7, 0xf1, 0xc4, 0x48, 0xb8, 0x6b, 0x67, 0xce,
+	0x47, 0x0b, 0xdf, 0x75, 0x7d, 0x2f, 0x3d, 0x8d, 0xf9, 0x8f, 0x3a, 0xb4, 0x2c, 0xf6, 0xf5, 0x86,
+	0x71, 0x41, 0x86, 0x50, 0x67, 0x8b, 0x95, 0xdf, 0xaf, 0x1e, 0x57, 0x86, 0x9d, 0x53, 0x72, 0xa2,
+	0x70, 0x5a, 0xfb, 0x72, 0xb1, 0xf2, 0x27, 0x3b, 0x96, 0x44, 0x90, 0x87, 0xd0, 0xb8, 0x58, 0x6f,
+	0xf8, 0xaa, 0x5f, 0x93, 0xd0, 0xc3, 0x2c, 0xf4, 0x37, 0xa8, 0x9a, 0xec, 0x58, 0x0a, 0x83, 0xc3,
+	0x3a, 0xde, 0x85, 0xdf, 0xaf, 0x17, 0x0d, 0x3b, 0xf5, 0x2e, 0xe4, 0xb0, 0x88, 0x20, 0x4f, 0x01,
+	0x38, 0x13, 0x33, 0x3f, 0x10, 0x8e, 0xef, 0xf5, 0x1b, 0x12, 0x7f, 0x27, 0x8b, 0x7f, 0xc3, 0xc4,
+	0x97, 0x52, 0x3d, 0xd9, 0xb1, 0x0c, 0x1e, 0x09, 0x68, 0xe9, 0x78, 0x8e, 0x98, 0x2d, 0x56, 0xd4,
+	0xf1, 0xfa, 0xcd, 0x22, 0xcb, 0xa9, 0xe7, 0x88, 0x33, 0x54, 0xa3, 0xa5, 0x13, 0x09, 0xe8, 0xca,
+	0xd7, 0x1b, 0x16, 0x5e, 0xf5, 0x5b, 0x45, 0xae, 0xfc, 0x01, 0x55, 0xe8, 0x8a, 0xc4, 0x90, 0xcf,
+	0xa0, 0x33, 0x67, 0x4b, 0xc7, 0x9b, 0xcd, 0xd7, 0xfe, 0xe2, 0x7d, 0xbf, 0x2d, 0x4d, 0xfa, 0x59,
+	0x93, 0x31, 0x02, 0xc6, 0xa8, 0x9f, 0xec, 0x58, 0x30, 0x8f, 0x25, 0x72, 0x0a, 0xed, 0xc5, 0x8a,
+	0x2d, 0xde, 0xcf, 0xc4, 0xb6, 0x6f, 0x48, 0xcb, 0x5b, 0x59, 0xcb, 0x33, 0xd4, 0xbe, 0xdd, 0x4e,
+	0x76, 0xac, 0xd6, 0x42, 0x35, 0xd1, 0x2f, 0x9b, 0xad, 0x9d, 0x4b, 0x16, 0xa2, 0xd5, 0x61, 0x91,
+	0x5f, 0xe7, 0x4a, 0x2f, 0xed, 0x0c, 0x3b, 0x12, 0xc8, 0x63, 0x30, 0x98, 0x67, 0xeb, 0x85, 0x76,
+	0xa4, 0xe1, 0xed, 0xdc, 0x89, 0x7a, 0x76, 0xb4, 0xcc, 0x36, 0xd3, 0x6d, 0x72, 0x02, 0x4d, 0x8c,
+	0x12, 0x47, 0xf4, 0x77, 0xa5, 0xcd, 0x51, 0x6e, 0x89, 0x52, 0x37, 0xd9, 0xb1, 0x34, 0x6a, 0xdc,
+	0x82, 0xc6, 0x25, 0x5d, 0x6f, 0x98, 0xf9, 0x31, 0x74, 0x52, 0x91, 0x42, 0xfa, 0xd0, 0x72, 0x19,
+	0xe7, 0x74, 0xc9, 0xfa, 0x95, 0xe3, 0xca, 0xd0, 0xb0, 0x22, 0xd1, 0xec, 0xc2, 0x6e, 0x3a, 0x4e,
+	0x52, 0x86, 0x18, 0x0b, 0x68, 0x78, 0xc9, 0x42, 0x8e, 0x01, 0xa0, 0x0d, 0xb5, 0x68, 0x3e, 0x87,
+	0xfd, 0x7c, 0x10, 0x90, 0x7d, 0xa8, 0xbd, 0x67, 0x57, 0x1a, 0x89, 0x4d, 0x72, 0xa4, 0x17, 0x24,
+	0xa3, 0xd8, 0xb0, 0xf4, 0xea, 0xc2, 0xd8, 0x36, 0x0e, 0x03, 0xf2, 0x04, 0xe0, 0x92, 0xae, 0x1d,
+	0x9b, 0x0a, 0x3f, 0xe4, 0xfd, 0xca, 0x71, 0x6d, 0xd8, 0x39, 0xdd, 0xd7, 0xee, 0xbe, 0x8b, 0x14,
+	0xe3, 0xfa, 0x3f, 0xaf, 0xef, 0xed, 0x58, 0x29, 0x24, 0xb9, 0x0f, 0x3d, 0x1a, 0x04, 0x33, 0x2e,
+	0xa8, 0x60, 0xb3, 0xf9, 0x95, 0x60, 0x5c, 0xce, 0xb5, 0x6b, 0xed, 0xd1, 0x20, 0x78, 0x83, 0xbd,
+	0x63, 0xec, 0x34, 0xed, 0xd8, 0x51, 0x19, 0x45, 0x84, 0x40, 0xdd, 0xa6, 0x82, 0xca, 0xc5, 0xee,
+	0x5a, 0xb2, 0x8d, 0x7d, 0x01, 0x15, 0x2b, 0xbd, 0x58, 0xd9, 0x26, 0xb7, 0xa1, 0xb9, 0x62, 0xce,
+	0x72, 0x25, 0xe4, 0xed, 0xaa, 0x59, 0x5a, 0x42, 0xcf, 0x82, 0xd0, 0xbf, 0x64, 0xf2, 0x22, 0xb5,
+	0x2d, 0x25, 0x98, 0xff, 0xae, 0xc0, 0xc1, 0x8d, 0xc8, 0xc3, 0x71, 0x57, 0x94, 0xaf, 0xa2, 0xb9,
+	0xb0, 0x4d, 0x1e, 0xe2, 0xb8, 0xd4, 0x66, 0xa1, 0xbe, 0xe0, 0x7b, 0xda, 0xd7, 0x89, 0xec, 0xd4,
+	0x8e, 0x6a, 0x08, 0x79, 0x08, 0x07, 0x74, 0xce, 0x99, 0x27, 0x66, 0xa9, 0x3d, 0xaa, 0x1d, 0xd7,
+	0x86, 0x0d, 0x6b, 0x5f, 0x29, 0xde, 0x25, 0x3b, 0x32, 0x81, 0xa3, 0xf9, 0xd5, 0x07, 0xea, 0x09,
+	0xc7, 0x63, 0x69, 0x7c, 0x5d, 0xee, 0x69, 0x4f, 0xcf, 0xf3, 0xf2, 0xd2, 0xb1, 0x99, 0xb7, 0x60,
+	0x7a, 0xa6, 0xc3, 0xd8, 0x24, 0x19, 0xc9, 0x3c, 0x86, 0x6e, 0xf6, 0x32, 0x90, 0x2e, 0x54, 0xc5,
+	0x56, 0xfb, 0x51, 0x15, 0x5b, 0xd3, 0x8c, 0x4f, 0x32, 0x0e, 0xfc, 0x1b, 0x98, 0x07, 0xd0, 0xcb,
+	0xc5, 0x78, 0x6a, 0x53, 0x2b, 0xe9, 0x4d, 0x35, 0x7b, 0xb0, 0x97, 0x09, 0x6d, 0xf3, 0xdb, 0x06,
+	0xb4, 0x2d, 0xc6, 0x03, 0xdf, 0xe3, 0x8c, 0x3c, 0x05, 0x83, 0x6d, 0x17, 0x4c, 0xf1, 0x51, 0x25,
+	0x77, 0xdb, 0x15, 0xe6, 0x65, 0xa4, 0xc7, 0xeb, 0x17, 0x83, 0xc9, 0x83, 0x0c, 0x97, 0x1e, 0xe6,
+	0x8d, 0xd2, 0x64, 0xfa, 0x28, 0x4b, 0xa6, 0x47, 0x39, 0x6c, 0x8e, 0x4d, 0x1f, 0x64, 0xd8, 0x34,
+	0x3f, 0x70, 0x86, 0x4e, 0x9f, 0x15, 0xd0, 0x69, 0x7e, 0xf9, 0x25, 0x7c, 0xfa, 0xac, 0x80, 0x4f,
+	0xfb, 0x37, 0xe6, 0x2a, 0x24, 0xd4, 0x47, 0x59, 0x42, 0xcd, 0xbb, 0x93, 0x63, 0xd4, 0x5f, 0x15,
+	0x31, 0xea, 0xdd, 0x9c, 0x4d, 0x29, 0xa5, 0x7e, 0x7a, 0x83, 0x52, 0x6f, 0xe7, 0x4c, 0x0b, 0x38,
+	0xf5, 0x59, 0x86, 0x53, 0xa1, 0xd0, 0xb7, 0x12, 0x52, 0x7d, 0x72, 0x93, 0x54, 0xef, 0xe4, 0x8f,
+	0xb6, 0x88, 0x55, 0x47, 0x39, 0x56, 0xbd, 0x95, 0x5f, 0x65, 0x29, 0xad, 0x3e, 0xc0, 0xdb, 0x9d,
+	0x8b, 0x34, 0x64, 0x02, 0x16, 0x86, 0x7e, 0xa8, 0x79, 0x4f, 0x09, 0xe6, 0x10, 0xf9, 0x26, 0x89,
+	0xaf, 0xef, 0xa1, 0x60, 0x19, 0xf4, 0xa9, 0xe8, 0x32, 0xff, 0x52, 0x49, 0x6c, 0x25, 0x0b, 0xa7,
+	0xb9, 0xca, 0xd0, 0x5c, 0x95, 0x62, 0xe6, 0x6a, 0x86, 0x99, 0xc9, 0xcf, 0xe0, 0x60, 0x4d, 0xb9,
+	0x50, 0xfb, 0x32, 0xcb, 0x90, 0x57, 0x0f, 0x15, 0x6a, 0x43, 0x14, 0x8b, 0x7d, 0x02, 0x87, 0x29,
+	0x2c, 0x12, 0xa9, 0x24, 0xaa, 0xba, 0xbc, 0xbc, 0xfb, 0x31, 0xfa, 0x45, 0x10, 0x4c, 0x28, 0x5f,
+	0x99, 0xbf, 0x4b, 0xfc, 0x4f, 0x58, 0x9f, 0x40, 0x7d, 0xe1, 0xdb, 0xca, 0xad, 0x3d, 0x4b, 0xb6,
+	0x31, 0x13, 0xac, 0xfd, 0xa5, 0x9c, 0xd5, 0xb0, 0xb0, 0x89, 0xa8, 0xf8, 0xa6, 0x18, 0xea, 0x4a,
+	0x98, 0x87, 0xc9, 0x70, 0x71, 0xf8, 0x9a, 0x7f, 0xaf, 0x24, 0xfb, 0x11, 0x53, 0xf5, 0xff, 0x37,
+	0x01, 0x1e, 0x8d, 0xe3, 0xd9, 0x6c, 0x2b, 0xaf, 0x5b, 0xcd, 0x52, 0x42, 0x94, 0xa6, 0x9a, 0xd2,
+	0xc9, 0x6c, 0x9a, 0x6a, 0xc9, 0x3e, 0x25, 0x68, 0x8a, 0xf7, 0x2f, 0xe4, 0x3d, 0xd8, 0xb5, 0x94,
+	0x90, 0xe2, 0x2e, 0x23, 0xc3, 0x5d, 0x47, 0x40, 0x6e, 0xde, 0x10, 0xf3, 0xbf, 0x15, 0x64, 0xbf,
+	0x4c, 0xf4, 0x17, 0xfa, 0x13, 0x1d, 0x71, 0x35, 0x95, 0x8e, 0x7e, 0x98, 0x8f, 0x3f, 0x01, 0x58,
+	0x52, 0x3e, 0xfb, 0x86, 0x7a, 0x82, 0xd9, 0xda, 0x51, 0x63, 0x49, 0xf9, 0x1f, 0x65, 0x07, 0xb9,
+	0x0b, 0x6d, 0x54, 0x6f, 0x38, 0xb3, 0xa5, 0xc7, 0x35, 0xab, 0xb5, 0xa4, 0xfc, 0x2b, 0xce, 0x6c,
+	0xf2, 0x1c, 0xea, 0x82, 0x2e, 0x79, 0xbf, 0x25, 0x13, 0x43, 0xf7, 0x44, 0x15, 0xa4, 0x27, 0x5f,
+	0xbc, 0x7b, 0x4d, 0x9d, 0x70, 0x7c, 0x1b, 0xf3, 0xc2, 0x7f, 0xae, 0xef, 0x75, 0x11, 0xf3, 0xc8,
+	0x77, 0x1d, 0xc1, 0xdc, 0x40, 0x5c, 0x59, 0xd2, 0x86, 0x0c, 0xa1, 0x76, 0xc1, 0x98, 0x66, 0x88,
+	0xfd, 0xd8, 0x74, 0xfa, 0xe4, 0x97, 0xd2, 0x58, 0x25, 0x15, 0x84, 0x98, 0x7f, 0xae, 0x26, 0xa7,
+	0x9c, 0x24, 0x89, 0x1f, 0xd7, 0x1e, 0xfc, 0xad, 0x82, 0x79, 0x32, 0x4b, 0x49, 0xe4, 0x0c, 0x0e,
+	0xe2, 0xec, 0x3c, 0xdb, 0x04, 0x36, 0xc5, 0xda, 0xe5, 0xfb, 0x0b, 0x9f, 0xfd, 0xd8, 0xe0, 0x2b,
+	0x85, 0x27, 0xbf, 0x87, 0x3b, 0x0b, 0x1c, 0xd5, 0xe3, 0x1b, 0x3e, 0x0b, 0x68, 0x48, 0xdd, 0x78,
+	0xa8, 0x6a, 0x86, 0x82, 0xcf, 0x22, 0xd4, 0x6b, 0x04, 0x71, 0xeb, 0xd6, 0x22, 0xd3, 0xa1, 0xc7,
+	0x33, 0x7f, 0x8a, 0x29, 0x3f, 0x4d, 0x83, 0x45, 0xa7, 0x62, 0xfe, 0xb5, 0x02, 0xbd, 0xdc, 0x80,
+	0x64, 0x04, 0xa0, 0x58, 0x84, 0x3b, 0x1f, 0x98, 0x4e, 0xcf, 0x91, 0x1f, 0xd2, 0xe1, 0x37, 0xce,
+	0x07, 0x66, 0x19, 0xf3, 0xa8, 0x49, 0xee, 0x43, 0x4b, 0x6c, 0x15, 0x3a, 0x5b, 0x02, 0xbd, 0xdd,
+	0x4a, 0x68, 0x53, 0xc8, 0x7f, 0xf2, 0x18, 0x76, 0xd5, 0xc0, 0x4b, 0x9f, 0x73, 0x27, 0xd0, 0x89,
+	0x99, 0xa4, 0x87, 0x7e, 0x25, 0x35, 0x56, 0x67, 0x9e, 0x08, 0xe6, 0x9f, 0xc0, 0x88, 0xa7, 0x25,
+	0x1f, 0x81, 0xe1, 0xd2, 0xad, 0xae, 0x0f, 0x71, 0x6d, 0x0d, 0xab, 0xed, 0xd2, 0xad, 0x2c, 0x0d,
+	0xc9, 0x1d, 0x68, 0xa1, 0x52, 0x6c, 0xd5, 0x9e, 0x35, 0xac, 0xa6, 0x4b, 0xb7, 0x6f, 0xb7, 0xb1,
+	0x62, 0x49, 0x79, 0x54, 0xfc, 0xb9, 0x74, 0xfb, 0x8a, 0x72, 0xf3, 0x73, 0x68, 0xaa, 0x45, 0xfe,
+	0xa0, 0x81, 0xd1, 0xbe, 0x9a, 0xb1, 0xff, 0x35, 0x74, 0x52, 0xeb, 0x26, 0xbf, 0x80, 0x5b, 0xca,
+	0xc3, 0x80, 0x86, 0x42, 0xee, 0x48, 0x66, 0x40, 0x22, 0x95, 0xaf, 0x69, 0x28, 0x70, 0x4a, 0x55,
+	0xce, 0xfe, 0xab, 0x0a, 0x4d, 0x55, 0x2a, 0x92, 0xfb, 0x98, 0x76, 0xa9, 0xe3, 0xcd, 0x1c, 0x5b,
+	0x65, 0x88, 0x71, 0xe7, 0xbb, 0xeb, 0x7b, 0x2d, 0xc9, 0xa6, 0xd3, 0x73, 0xcc, 0xb4, 0xd8, 0xb0,
+	0x53, 0xc4, 0x55, 0xcd, 0x54, 0xb2, 0x04, 0xea, 0xc2, 0x71, 0x99, 0x76, 0x51, 0xb6, 0x71, 0xe5,
+	0xde, 0xc6, 0x95, 0x5b, 0x52, 0x57, 0x5b, 0xe2, 0x6d, 0x5c, 0xdc, 0x92, 0x57, 0xb0, 0x97, 0x4a,
+	0x18, 0x8e, 0xad, 0x0b, 0x99, 0x6e, 0xfa, 0x34, 0xa6, 0xe7, 0xe3, 0x43, 0x0c, 0xd7, 0xef, 0xae,
+	0xef, 0x75, 0x7e, 0x1b, 0xa5, 0x90, 0xe9, 0xb9, 0xd5, 0x89, 0xf3, 0xc9, 0xd4, 0x26, 0x43, 0x90,
+	0xe9, 0x65, 0xa6, 0x52, 0xac, 0x4a, 0x3b, 0x8a, 0x91, 0xbb, 0xd8, 0xaf, 0x73, 0x30, 0x56, 0xca,
+	0x1f, 0x81, 0x81, 0x41, 0xa7, 0x20, 0x8a, 0xa0, 0xdb, 0xd8, 0x21, 0x95, 0x1f, 0x43, 0x2f, 0x29,
+	0x71, 0x15, 0x44, 0xb1, 0x75, 0x37, 0xe9, 0x96, 0xc0, 0xbb, 0xd0, 0x8e, 0xd3, 0x9b, 0x21, 0x11,
+	0x2d, 0xaa, 0xb3, 0xda, 0x97, 0xd0, 0xd2, 0x4b, 0x2c, 0xac, 0xd4, 0x7f, 0x0e, 0x0d, 0x3c, 0x97,
+	0xe8, 0x42, 0x45, 0x25, 0x94, 0x3c, 0x0f, 0x26, 0x32, 0xf5, 0xba, 0x02, 0x9a, 0xcf, 0x60, 0x2f,
+	0xa3, 0xc5, 0x4c, 0x22, 0x7c, 0x41, 0xd7, 0xfa, 0x40, 0x95, 0x10, 0x4f, 0x56, 0x4d, 0x26, 0x33,
+	0x9f, 0x83, 0x11, 0x5f, 0x7a, 0x3c, 0x85, 0x60, 0x33, 0x9f, 0x45, 0xdf, 0x54, 0xbb, 0x56, 0x33,
+	0xd8, 0xcc, 0xbf, 0x50, 0xf9, 0x2a, 0xf0, 0xbf, 0xd1, 0xdf, 0x0e, 0x35, 0x4b, 0x09, 0xe6, 0x67,
+	0xd0, 0x8e, 0xaa, 0xfa, 0x72, 0xd3, 0x92, 0x28, 0x38, 0xfd, 0xb6, 0x01, 0xbd, 0x17, 0xe3, 0xb3,
+	0xe9, 0x8b, 0x20, 0x58, 0x3b, 0x0b, 0x2a, 0x33, 0xfb, 0x08, 0xea, 0xb2, 0x76, 0x29, 0x78, 0x7c,
+	0x18, 0x14, 0x15, 0xd1, 0xe4, 0x14, 0x1a, 0xb2, 0x84, 0x21, 0x45, 0x6f, 0x10, 0x83, 0xc2, 0x5a,
+	0x1a, 0x27, 0x51, 0x45, 0xce, 0xcd, 0xa7, 0x88, 0x41, 0x51, 0x41, 0x4d, 0x3e, 0x07, 0x23, 0x29,
+	0x3e, 0xca, 0x1e, 0x24, 0x06, 0xa5, 0xa5, 0x35, 0xda, 0x27, 0x79, 0xa8, 0xec, 0xf3, 0x7d, 0x50,
+	0x5a, 0x83, 0x92, 0xa7, 0xd0, 0x8a, 0x32, 0x79, 0xf1, 0x93, 0xc1, 0xa0, 0xa4, 0xec, 0xc5, 0xed,
+	0x51, 0x15, 0x4d, 0xd1, 0xbb, 0xc6, 0xa0, 0xb0, 0x36, 0x27, 0x8f, 0xa1, 0xa9, 0x89, 0xb8, 0xf0,
+	0xe3, 0x7f, 0x50, 0x5c, 0xbc, 0xa2, 0x93, 0xc9, 0xb7, 0x75, 0xd9, 0xdb, 0xcb, 0xa0, 0xf4, 0x23,
+	0x82, 0xbc, 0x00, 0x48, 0x7d, 0xc0, 0x96, 0x3e, 0xaa, 0x0c, 0xca, 0x3f, 0x0e, 0x08, 0x86, 0x63,
+	0xfc, 0xc1, 0x57, 0xfc, 0xd8, 0x31, 0x28, 0xab, 0xd7, 0xe7, 0x4d, 0xf9, 0x20, 0xf6, 0xe9, 0xff,
+	0x02, 0x00, 0x00, 0xff, 0xff, 0x46, 0xbe, 0x48, 0x9c, 0x8c, 0x13, 0x00, 0x00,
 }
diff --git a/vendor/github.com/tendermint/abci/types/util.go b/vendor/github.com/tendermint/abci/types/util.go
index 17c53f65..39a24e02 100644
--- a/vendor/github.com/tendermint/abci/types/util.go
+++ b/vendor/github.com/tendermint/abci/types/util.go
@@ -3,15 +3,12 @@ package types
 import (
 	"bytes"
 	"encoding/json"
-
-	"github.com/tendermint/go-wire/data"
-	cmn "github.com/tendermint/tmlibs/common"
 )
 
 //------------------------------------------------------------------------------
 
 // Validators is a list of validators that implements the Sort interface
-type Validators []*Validator
+type Validators []Validator
 
 func (v Validators) Len() int {
 	return len(v)
@@ -31,36 +28,16 @@ func (v Validators) Swap(i, j int) {
 func ValidatorsString(vs Validators) string {
 	s := make([]validatorPretty, len(vs))
 	for i, v := range vs {
-		s[i] = validatorPretty{v.PubKey, v.Power}
+		s[i] = validatorPretty(v)
 	}
 	b, err := json.Marshal(s)
 	if err != nil {
-		cmn.PanicSanity(err.Error())
+		panic(err.Error())
 	}
 	return string(b)
 }
 
 type validatorPretty struct {
-	PubKey data.Bytes `json:"pub_key"`
-	Power  int64      `json:"power"`
-}
-
-//------------------------------------------------------------------------------
-
-// KVPairInt is a helper method to build KV pair with an integer value.
-func KVPairInt(key string, val int64) *KVPair {
-	return &KVPair{
-		Key:       key,
-		ValueInt:  val,
-		ValueType: KVPair_INT,
-	}
-}
-
-// KVPairString is a helper method to build KV pair with a string value.
-func KVPairString(key, val string) *KVPair {
-	return &KVPair{
-		Key:         key,
-		ValueString: val,
-		ValueType:   KVPair_STRING,
-	}
+	PubKey []byte `json:"pub_key"`
+	Power  int64  `json:"power"`
 }
diff --git a/vendor/github.com/tendermint/go-crypto/priv_key.go b/vendor/github.com/tendermint/go-crypto/priv_key.go
index e6e7ac03..11dcb686 100644
--- a/vendor/github.com/tendermint/go-crypto/priv_key.go
+++ b/vendor/github.com/tendermint/go-crypto/priv_key.go
@@ -1,7 +1,7 @@
 package crypto
 
 import (
-	"bytes"
+	"crypto/subtle"
 
 	secp256k1 "github.com/btcsuite/btcd/btcec"
 	"github.com/tendermint/ed25519"
@@ -69,9 +69,11 @@ func (privKey PrivKeyEd25519) PubKey() PubKey {
 	return PubKeyEd25519(pubBytes).Wrap()
 }
 
+// Equals - you probably don't need to use this.
+// Runs in constant time based on length of the keys.
 func (privKey PrivKeyEd25519) Equals(other PrivKey) bool {
 	if otherEd, ok := other.Unwrap().(PrivKeyEd25519); ok {
-		return bytes.Equal(privKey[:], otherEd[:])
+		return subtle.ConstantTimeCompare(privKey[:], otherEd[:]) == 1
 	} else {
 		return false
 	}
@@ -156,9 +158,11 @@ func (privKey PrivKeySecp256k1) PubKey() PubKey {
 	return pub.Wrap()
 }
 
+// Equals - you probably don't need to use this.
+// Runs in constant time based on length of the keys.
 func (privKey PrivKeySecp256k1) Equals(other PrivKey) bool {
 	if otherSecp, ok := other.Unwrap().(PrivKeySecp256k1); ok {
-		return bytes.Equal(privKey[:], otherSecp[:])
+		return subtle.ConstantTimeCompare(privKey[:], otherSecp[:]) == 1
 	} else {
 		return false
 	}
diff --git a/vendor/github.com/tendermint/go-crypto/pub_key.go b/vendor/github.com/tendermint/go-crypto/pub_key.go
index 4d5c31b2..32c0b323 100644
--- a/vendor/github.com/tendermint/go-crypto/pub_key.go
+++ b/vendor/github.com/tendermint/go-crypto/pub_key.go
@@ -3,19 +3,27 @@ package crypto
 import (
 	"bytes"
 	"crypto/sha256"
+	"fmt"
 
 	secp256k1 "github.com/btcsuite/btcd/btcec"
 	"github.com/tendermint/ed25519"
 	"github.com/tendermint/ed25519/extra25519"
 	"github.com/tendermint/go-wire"
 	data "github.com/tendermint/go-wire/data"
-	. "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tmlibs/common"
 	"golang.org/x/crypto/ripemd160"
 )
 
+// An address is a []byte, but hex-encoded even in JSON.
+// []byte leaves us the option to change the address length.
+// Use an alias so Unmarshal methods (with ptr receivers) are available too.
+type Address = cmn.HexBytes
+
 func PubKeyFromBytes(pubKeyBytes []byte) (pubKey PubKey, err error) {
-	err = wire.ReadBinaryBytes(pubKeyBytes, &pubKey)
-	return
+	if err := wire.ReadBinaryBytes(pubKeyBytes, &pubKey); err != nil {
+		return PubKey{}, err
+	}
+	return pubKey, nil
 }
 
 //----------------------------------------
@@ -25,7 +33,7 @@ func PubKeyFromBytes(pubKeyBytes []byte) (pubKey PubKey, err error) {
 // +gen wrapper:"PubKey,Impl[PubKeyEd25519,PubKeySecp256k1],ed25519,secp256k1"
 type PubKeyInner interface {
 	AssertIsPubKeyInner()
-	Address() []byte
+	Address() Address
 	Bytes() []byte
 	KeyString() string
 	VerifyBytes(msg []byte, sig Signature) bool
@@ -42,17 +50,17 @@ type PubKeyEd25519 [32]byte
 
 func (pubKey PubKeyEd25519) AssertIsPubKeyInner() {}
 
-func (pubKey PubKeyEd25519) Address() []byte {
+func (pubKey PubKeyEd25519) Address() Address {
 	w, n, err := new(bytes.Buffer), new(int), new(error)
 	wire.WriteBinary(pubKey[:], w, n, err)
 	if *err != nil {
-		PanicCrisis(*err)
+		panic(*err)
 	}
 	// append type byte
 	encodedPubkey := append([]byte{TypeEd25519}, w.Bytes()...)
 	hasher := ripemd160.New()
 	hasher.Write(encodedPubkey) // does not error
-	return hasher.Sum(nil)
+	return Address(hasher.Sum(nil))
 }
 
 func (pubKey PubKeyEd25519) Bytes() []byte {
@@ -93,13 +101,13 @@ func (pubKey PubKeyEd25519) ToCurve25519() *[32]byte {
 }
 
 func (pubKey PubKeyEd25519) String() string {
-	return Fmt("PubKeyEd25519{%X}", pubKey[:])
+	return fmt.Sprintf("PubKeyEd25519{%X}", pubKey[:])
 }
 
 // Must return the full bytes in hex.
 // Used for map keying, etc.
 func (pubKey PubKeyEd25519) KeyString() string {
-	return Fmt("%X", pubKey[:])
+	return fmt.Sprintf("%X", pubKey[:])
 }
 
 func (pubKey PubKeyEd25519) Equals(other PubKey) bool {
@@ -122,14 +130,14 @@ type PubKeySecp256k1 [33]byte
 func (pubKey PubKeySecp256k1) AssertIsPubKeyInner() {}
 
 // Implements Bitcoin style addresses: RIPEMD160(SHA256(pubkey))
-func (pubKey PubKeySecp256k1) Address() []byte {
+func (pubKey PubKeySecp256k1) Address() Address {
 	hasherSHA256 := sha256.New()
 	hasherSHA256.Write(pubKey[:]) // does not error
 	sha := hasherSHA256.Sum(nil)
 
 	hasherRIPEMD160 := ripemd160.New()
 	hasherRIPEMD160.Write(sha) // does not error
-	return hasherRIPEMD160.Sum(nil)
+	return Address(hasherRIPEMD160.Sum(nil))
 }
 
 func (pubKey PubKeySecp256k1) Bytes() []byte {
@@ -166,13 +174,13 @@ func (p *PubKeySecp256k1) UnmarshalJSON(enc []byte) error {
 }
 
 func (pubKey PubKeySecp256k1) String() string {
-	return Fmt("PubKeySecp256k1{%X}", pubKey[:])
+	return fmt.Sprintf("PubKeySecp256k1{%X}", pubKey[:])
 }
 
 // Must return the full bytes in hex.
 // Used for map keying, etc.
 func (pubKey PubKeySecp256k1) KeyString() string {
-	return Fmt("%X", pubKey[:])
+	return fmt.Sprintf("%X", pubKey[:])
 }
 
 func (pubKey PubKeySecp256k1) Equals(other PubKey) bool {
diff --git a/vendor/github.com/tendermint/go-crypto/random.go b/vendor/github.com/tendermint/go-crypto/random.go
index 40cbcf8f..46754219 100644
--- a/vendor/github.com/tendermint/go-crypto/random.go
+++ b/vendor/github.com/tendermint/go-crypto/random.go
@@ -44,7 +44,10 @@ func CRandBytes(numBytes int) []byte {
 	return b
 }
 
-// RandHex(24) gives 96 bits of randomness, strong enough for most purposes.
+// CRandHex returns a hex encoded string that's floor(numDigits/2) * 2 long.
+//
+// Note: CRandHex(24) gives 96 bits of randomness that
+// are usually strong enough for most purposes.
 func CRandHex(numDigits int) string {
 	return hex.EncodeToString(CRandBytes(numDigits / 2))
 }
diff --git a/vendor/github.com/tendermint/go-crypto/signature.go b/vendor/github.com/tendermint/go-crypto/signature.go
index d2ea4513..cd40331c 100644
--- a/vendor/github.com/tendermint/go-crypto/signature.go
+++ b/vendor/github.com/tendermint/go-crypto/signature.go
@@ -87,8 +87,8 @@ func (sig SignatureSecp256k1) IsZero() bool { return len(sig) == 0 }
 func (sig SignatureSecp256k1) String() string { return fmt.Sprintf("/%X.../", Fingerprint(sig[:])) }
 
 func (sig SignatureSecp256k1) Equals(other Signature) bool {
-	if otherEd, ok := other.Unwrap().(SignatureSecp256k1); ok {
-		return bytes.Equal(sig[:], otherEd[:])
+	if otherSecp, ok := other.Unwrap().(SignatureSecp256k1); ok {
+		return bytes.Equal(sig[:], otherSecp[:])
 	} else {
 		return false
 	}
diff --git a/vendor/github.com/tendermint/go-crypto/version.go b/vendor/github.com/tendermint/go-crypto/version.go
index c39dd627..0281a5ea 100644
--- a/vendor/github.com/tendermint/go-crypto/version.go
+++ b/vendor/github.com/tendermint/go-crypto/version.go
@@ -1,3 +1,3 @@
 package crypto
 
-const Version = "0.4.1"
+const Version = "0.5.0"
diff --git a/vendor/github.com/tendermint/go-wire/nowriter/tmlegacy/tm_encoder_legacy.go b/vendor/github.com/tendermint/go-wire/nowriter/tmlegacy/tm_encoder_legacy.go
deleted file mode 100644
index 2b862b7a..00000000
--- a/vendor/github.com/tendermint/go-wire/nowriter/tmlegacy/tm_encoder_legacy.go
+++ /dev/null
@@ -1,176 +0,0 @@
-package tmlegacy
-
-import (
-	"encoding/binary"
-	cmn "github.com/tendermint/tmlibs/common"
-	"io"
-	"math"
-	"time"
-)
-
-// Implementation of the legacy (`TMEncoderFastIOWriter`) interface
-type TMEncoderLegacy struct {
-}
-
-var Legacy *TMEncoderLegacy = &TMEncoderLegacy{} // convenience
-
-// Does not use builder pattern to encourage migration away from this struct
-func (e *TMEncoderLegacy) WriteBool(b bool, w io.Writer, n *int, err *error) {
-	var bb byte
-	if b {
-		bb = 0x01
-	} else {
-		bb = 0x00
-	}
-	e.WriteTo([]byte{bb}, w, n, err)
-}
-
-func (e *TMEncoderLegacy) WriteFloat32(f float32, w io.Writer, n *int, err *error) {
-	e.WriteUint32(math.Float32bits(f), w, n, err)
-}
-
-func (e *TMEncoderLegacy) WriteFloat64(f float64, w io.Writer, n *int, err *error) {
-	e.WriteUint64(math.Float64bits(f), w, n, err)
-}
-
-func (e *TMEncoderLegacy) WriteInt8(i int8, w io.Writer, n *int, err *error) {
-	e.WriteOctet(byte(i), w, n, err)
-}
-
-func (e *TMEncoderLegacy) WriteInt16(i int16, w io.Writer, n *int, err *error) {
-	var buf [2]byte
-	binary.BigEndian.PutUint16(buf[:], uint16(i))
-	e.WriteTo(buf[:], w, n, err)
-}
-
-func (e *TMEncoderLegacy) WriteInt32(i int32, w io.Writer, n *int, err *error) {
-	var buf [4]byte
-	binary.BigEndian.PutUint32(buf[:], uint32(i))
-	e.WriteTo(buf[:], w, n, err)
-}
-func (e *TMEncoderLegacy) WriteInt64(i int64, w io.Writer, n *int, err *error) {
-	var buf [8]byte
-	binary.BigEndian.PutUint64(buf[:], uint64(i))
-	e.WriteTo(buf[:], w, n, err)
-}
-
-func (e *TMEncoderLegacy) WriteOctet(b byte, w io.Writer, n *int, err *error) {
-	e.WriteTo([]byte{b}, w, n, err)
-}
-
-func (e *TMEncoderLegacy) WriteTime(t time.Time, w io.Writer, n *int, err *error) {
-	nanosecs := t.UnixNano()
-	millisecs := nanosecs / 1000000
-	if nanosecs < 0 {
-		cmn.PanicSanity("can't encode times below 1970")
-	} else {
-		e.WriteInt64(millisecs*1000000, w, n, err)
-	}
-}
-
-func (e *TMEncoderLegacy) WriteUint8(i uint8, w io.Writer, n *int, err *error) {
-	e.WriteOctet(byte(i), w, n, err)
-}
-
-func (e *TMEncoderLegacy) WriteUint16(i uint16, w io.Writer, n *int, err *error) {
-	var buf [2]byte
-	binary.BigEndian.PutUint16(buf[:], uint16(i))
-	e.WriteTo(buf[:], w, n, err)
-}
-
-func (e *TMEncoderLegacy) WriteUint16s(iz []uint16, w io.Writer, n *int, err *error) {
-	e.WriteUint32(uint32(len(iz)), w, n, err)
-	for _, i := range iz {
-		e.WriteUint16(i, w, n, err)
-		if *err != nil {
-			return
-		}
-	}
-}
-func (e *TMEncoderLegacy) WriteUint32(i uint32, w io.Writer, n *int, err *error) {
-	var buf [4]byte
-	binary.BigEndian.PutUint32(buf[:], uint32(i))
-	e.WriteTo(buf[:], w, n, err)
-}
-
-func (e *TMEncoderLegacy) WriteUint64(i uint64, w io.Writer, n *int, err *error) {
-	var buf [8]byte
-	binary.BigEndian.PutUint64(buf[:], uint64(i))
-	e.WriteTo(buf[:], w, n, err)
-}
-
-func (e *TMEncoderLegacy) WriteUvarint(i uint, w io.Writer, n *int, err *error) {
-	var size = uvarintSize(uint64(i))
-	e.WriteUint8(uint8(size), w, n, err)
-	if size > 0 {
-		var buf [8]byte
-		binary.BigEndian.PutUint64(buf[:], uint64(i))
-		e.WriteTo(buf[(8-size):], w, n, err)
-	}
-}
-
-func (e *TMEncoderLegacy) WriteVarint(i int, w io.Writer, n *int, err *error) {
-	var negate = false
-	if i < 0 {
-		negate = true
-		i = -i
-	}
-	var size = uvarintSize(uint64(i))
-	if negate {
-		// e.g. 0xF1 for a single negative byte
-		e.WriteUint8(uint8(size+0xF0), w, n, err)
-	} else {
-		e.WriteUint8(uint8(size), w, n, err)
-	}
-	if size > 0 {
-		var buf [8]byte
-		binary.BigEndian.PutUint64(buf[:], uint64(i))
-		e.WriteTo(buf[(8-size):], w, n, err)
-	}
-}
-
-// Write all of bz to w
-// Increment n and set err accordingly.
-func (e *TMEncoderLegacy) WriteTo(bz []byte, w io.Writer, n *int, err *error) {
-	if *err != nil {
-		return
-	}
-	n_, err_ := w.Write(bz)
-	*n += n_
-	*err = err_
-}
-
-func uvarintSize(i uint64) int {
-	if i == 0 {
-		return 0
-	}
-	if i < 1<<8 {
-		return 1
-	}
-	if i < 1<<16 {
-		return 2
-	}
-	if i < 1<<24 {
-		return 3
-	}
-	if i < 1<<32 {
-		return 4
-	}
-	if i < 1<<40 {
-		return 5
-	}
-	if i < 1<<48 {
-		return 6
-	}
-	if i < 1<<56 {
-		return 7
-	}
-	return 8
-}
-
-func (e *TMEncoderLegacy) WriteOctetSlice(bz []byte, w io.Writer, n *int, err *error) {
-	e.WriteVarint(len(bz), w, n, err)
-	if len(bz) > 0 {
-		e.WriteTo(bz, w, n, err)
-	}
-}
diff --git a/vendor/github.com/tendermint/go-wire/util.go b/vendor/github.com/tendermint/go-wire/util.go
index 4e206197..907dc183 100644
--- a/vendor/github.com/tendermint/go-wire/util.go
+++ b/vendor/github.com/tendermint/go-wire/util.go
@@ -10,6 +10,40 @@ import (
 	cmn "github.com/tendermint/tmlibs/common"
 )
 
+//-------------------------------------------------------
+// New go-wire API
+
+func MarshalBinary(o interface{}) ([]byte, error) {
+	w, n, err := new(bytes.Buffer), new(int), new(error)
+	WriteBinary(o, w, n, err)
+	if *err != nil {
+		return nil, *err
+	}
+	return w.Bytes(), nil
+}
+
+func UnmarshalBinary(bz []byte, ptr interface{}) error {
+	r, n, err := bytes.NewBuffer(bz), new(int), new(error)
+	ReadBinaryPtr(ptr, r, len(bz), n, err)
+	return *err
+}
+
+func MarshalJSON(o interface{}) ([]byte, error) {
+	w, n, err := new(bytes.Buffer), new(int), new(error)
+	WriteJSON(o, w, n, err)
+	if *err != nil {
+		return nil, *err
+	}
+	return w.Bytes(), nil
+}
+
+func UnmarshalJSON(bz []byte, ptr interface{}) (err error) {
+	ReadJSONPtr(ptr, bz, &err)
+	return
+}
+
+//-------------------------------------------------------
+
 func BinaryBytes(o interface{}) []byte {
 	w, n, err := new(bytes.Buffer), new(int), new(error)
 	WriteBinary(o, w, n, err)
diff --git a/vendor/github.com/tendermint/go-wire/version.go b/vendor/github.com/tendermint/go-wire/version.go
index ca9e78c7..463d2015 100644
--- a/vendor/github.com/tendermint/go-wire/version.go
+++ b/vendor/github.com/tendermint/go-wire/version.go
@@ -1,3 +1,3 @@
 package wire
 
-const Version = "0.7.2"
+const Version = "0.7.3"
diff --git a/vendor/github.com/tendermint/iavl/chunk.go b/vendor/github.com/tendermint/iavl/chunk.go
new file mode 100644
index 00000000..b1cbdd22
--- /dev/null
+++ b/vendor/github.com/tendermint/iavl/chunk.go
@@ -0,0 +1,185 @@
+package iavl
+
+import (
+	"sort"
+
+	"github.com/pkg/errors"
+	cmn "github.com/tendermint/tmlibs/common"
+)
+
+// Chunk is a list of ordered nodes.
+// It can be sorted, merged, exported from a tree and
+// used to generate a new tree.
+type Chunk []OrderedNodeData
+
+// OrderedNodeData is the data to recreate a leaf node,
+// along with a SortOrder to define a BFS insertion order.
+type OrderedNodeData struct {
+	SortOrder uint64
+	NodeData
+}
+
+// NewOrderedNode creates the data from a leaf node.
+func NewOrderedNode(leaf *Node, prefix uint64) OrderedNodeData {
+	return OrderedNodeData{
+		SortOrder: prefix,
+		NodeData: NodeData{
+			Key:   leaf.key,
+			Value: leaf.value,
+		},
+	}
+}
+
+// getChunkHashes returns all the "checksum" hashes for
+// the chunks that will be sent.
+func getChunkHashes(tree *Tree, depth uint) ([][]byte, [][]byte, uint, error) {
+	maxDepth := uint(tree.root.height / 2)
+	if depth > maxDepth {
+		return nil, nil, 0, errors.New("depth exceeds maximum allowed")
+	}
+
+	nodes := getNodes(tree, depth)
+	hashes := make([][]byte, len(nodes))
+	keys := make([][]byte, len(nodes))
+	for i, n := range nodes {
+		hashes[i] = n.hash
+		keys[i] = n.key
+	}
+	return hashes, keys, depth, nil
+}
+
+// GetChunkHashesWithProofs takes a tree and returns the list of chunks with
+// proofs that can be used to synchronize a tree across the network.
+func GetChunkHashesWithProofs(tree *Tree) ([][]byte, []*InnerKeyProof, uint) {
+	hashes, keys, depth, err := getChunkHashes(tree, uint(tree.root.height/2))
+	if err != nil {
+		cmn.PanicSanity(cmn.Fmt("GetChunkHashes: %s", err))
+	}
+	proofs := make([]*InnerKeyProof, len(keys))
+
+	for i, k := range keys {
+		proof, err := tree.getInnerWithProof(k)
+		if err != nil {
+			cmn.PanicSanity(cmn.Fmt("Error getting inner key proof: %s", err))
+		}
+		proofs[i] = proof
+	}
+	return hashes, proofs, depth
+}
+
+// getNodes returns an array of nodes at the given depth.
+func getNodes(tree *Tree, depth uint) []*Node {
+	nodes := make([]*Node, 0, 1<<depth)
+	tree.root.traverseDepth(tree, depth, func(node *Node) {
+		nodes = append(nodes, node)
+	})
+	return nodes
+}
+
+// call cb for every node exactly depth levels below it
+// depth first search to return in tree ordering.
+func (node *Node) traverseDepth(t *Tree, depth uint, cb func(*Node)) {
+	// base case
+	if depth == 0 {
+		cb(node)
+		return
+	}
+	if node.isLeaf() {
+		return
+	}
+
+	// otherwise, descend one more level
+	node.getLeftNode(t).traverseDepth(t, depth-1, cb)
+	node.getRightNode(t).traverseDepth(t, depth-1, cb)
+}
+
+// position to key can calculate the appropriate sort order
+// for the count-th node at a given depth, assuming a full
+// tree above this height.
+func positionToKey(depth, count uint) (key uint64) {
+	for d := depth; d > 0; d-- {
+		// lowest digit of count * 2^(d-1)
+		key += uint64((count & 1) << (d - 1))
+		count = count >> 1
+	}
+	return
+}
+
+// GetChunk finds the count-th subtree at depth and
+// generates a Chunk for that data.
+func GetChunk(tree *Tree, depth, count uint) Chunk {
+	node := getNodes(tree, depth)[count]
+	prefix := positionToKey(depth, count)
+	return getChunk(tree, node, prefix, depth)
+}
+
+// getChunk takes a node and serializes all nodes below it
+//
+// As it is part of a larger tree, prefix defines the path
+// up to this point, and depth the current depth
+// (which defines where we add to the prefix)
+//
+// TODO: make this more efficient, *Chunk as arg???
+func getChunk(t *Tree, node *Node, prefix uint64, depth uint) Chunk {
+	if node.isLeaf() {
+		return Chunk{NewOrderedNode(node, prefix)}
+	}
+	res := make(Chunk, 0, node.size)
+	if node.leftNode != nil {
+		left := getChunk(t, node.getLeftNode(t), prefix, depth+1)
+		res = append(res, left...)
+	}
+	if node.rightNode != nil {
+		offset := prefix + 1<<depth
+		right := getChunk(t, node.getRightNode(t), offset, depth+1)
+		res = append(res, right...)
+	}
+	return res
+}
+
+// Sort does an inline quicksort.
+func (c Chunk) Sort() {
+	sort.Slice(c, func(i, j int) bool {
+		return c[i].SortOrder < c[j].SortOrder
+	})
+}
+
+// MergeChunks does a merge sort of the two Chunks,
+// assuming they were already in sorted order.
+func MergeChunks(left, right Chunk) Chunk {
+	size, i, j := len(left)+len(right), 0, 0
+	slice := make([]OrderedNodeData, size)
+
+	for k := 0; k < size; k++ {
+		if i > len(left)-1 && j <= len(right)-1 {
+			slice[k] = right[j]
+			j++
+		} else if j > len(right)-1 && i <= len(left)-1 {
+			slice[k] = left[i]
+			i++
+		} else if left[i].SortOrder < right[j].SortOrder {
+			slice[k] = left[i]
+			i++
+		} else {
+			slice[k] = right[j]
+			j++
+		}
+	}
+	return Chunk(slice)
+}
+
+// CalculateRoot creates a temporary in-memory
+// iavl tree to calculate the root hash of inserting
+// all the nodes.
+func (c Chunk) CalculateRoot() []byte {
+	test := NewTree(nil, 2*len(c))
+	c.PopulateTree(test)
+	return test.Hash()
+}
+
+// PopulateTree adds all the chunks in order to the given tree.
+func (c Chunk) PopulateTree(empty *Tree) {
+	for _, data := range c {
+		empty.Set(data.Key, data.Value)
+	}
+}
diff --git a/vendor/github.com/tendermint/iavl/doc.go b/vendor/github.com/tendermint/iavl/doc.go
index b347fc65..d6c38c6c 100644
--- a/vendor/github.com/tendermint/iavl/doc.go
+++ b/vendor/github.com/tendermint/iavl/doc.go
@@ -4,7 +4,7 @@
 //  import "github.com/tendermint/tmlibs/db"
 //  ...
 //
-//  tree := iavl.NewVersionedTree(128, db.NewMemDB())
+//  tree := iavl.NewVersionedTree(db.NewMemDB(), 128)
 //
 //  tree.IsEmpty() // true
 //
diff --git a/vendor/github.com/tendermint/iavl/logger.go b/vendor/github.com/tendermint/iavl/logger.go
new file mode 100644
index 00000000..b53ce34e
--- /dev/null
+++ b/vendor/github.com/tendermint/iavl/logger.go
@@ -0,0 +1,11 @@
+package iavl
+
+import (
+	"fmt"
+)
+
+func debug(format string, args ...interface{}) {
+	if false {
+		fmt.Printf(format, args...)
+	}
+}
diff --git a/vendor/github.com/tendermint/iavl/node.go b/vendor/github.com/tendermint/iavl/node.go
index a785feba..afd92d4c 100644
--- a/vendor/github.com/tendermint/iavl/node.go
+++ b/vendor/github.com/tendermint/iavl/node.go
@@ -1,5 +1,8 @@
 package iavl
 
+// NOTE: This file favors int64 as opposed to int for size/counts.
+// The Tree on the other hand favors int.  This is intentional.
+
 import (
 	"bytes"
 	"fmt"
@@ -8,16 +11,15 @@ import (
 	"golang.org/x/crypto/ripemd160"
 
 	"github.com/tendermint/go-wire"
-	cmn "github.com/tendermint/tmlibs/common"
 )
 
 // Node represents a node in a Tree.
 type Node struct {
 	key       []byte
 	value     []byte
-	version   uint64
+	version   int64
 	height    int8
-	size      int
+	size      int64
 	hash      []byte
 	leftHash  []byte
 	leftNode  *Node
@@ -26,14 +28,14 @@ type Node struct {
 	persisted bool
 }
 
-// NewNode returns a new node from a key and value.
-func NewNode(key []byte, value []byte) *Node {
+// NewNode returns a new node from a key, value and version.
+func NewNode(key []byte, value []byte, version int64) *Node {
 	return &Node{
 		key:     key,
 		value:   value,
 		height:  0,
 		size:    1,
-		version: 0,
+		version: version,
 	}
 }
 
@@ -51,11 +53,11 @@ func MakeNode(buf []byte) (node *Node, err error) {
 	n := 1 // Keeps track of bytes read.
 	buf = buf[n:]
 
-	node.size, n, err = wire.GetVarint(buf)
-	if err != nil {
-		return nil, err
-	}
-	buf = buf[n:]
+	node.size = wire.GetInt64(buf)
+	buf = buf[8:]
+
+	node.version = wire.GetInt64(buf)
+	buf = buf[8:]
 
 	node.key, n, err = wire.GetByteSlice(buf)
 	if err != nil {
@@ -63,9 +65,6 @@ func MakeNode(buf []byte) (node *Node, err error) {
 	}
 	buf = buf[n:]
 
-	node.version = wire.GetUint64(buf)
-	buf = buf[8:]
-
 	// Read node body.
 
 	if node.isLeaf() {
@@ -100,14 +99,14 @@ func (node *Node) String() string {
 }
 
 // clone creates a shallow copy of a node with its hash set to nil.
-func (node *Node) clone() *Node {
+func (node *Node) clone(version int64) *Node {
 	if node.isLeaf() {
-		cmn.PanicSanity("Attempt to copy a leaf node")
+		panic("Attempt to copy a leaf node")
 	}
 	return &Node{
 		key:       node.key,
 		height:    node.height,
-		version:   node.version,
+		version:   version,
 		size:      node.size,
 		hash:      nil,
 		leftHash:  node.leftHash,
@@ -138,7 +137,7 @@ func (node *Node) has(t *Tree, key []byte) (has bool) {
 }
 
 // Get a key under the node.
-func (node *Node) get(t *Tree, key []byte) (index int, value []byte) {
+func (node *Node) get(t *Tree, key []byte) (index int64, value []byte) {
 	if node.isLeaf() {
 		switch bytes.Compare(node.key, key) {
 		case -1:
@@ -160,7 +159,7 @@ func (node *Node) get(t *Tree, key []byte) (index int, value []byte) {
 	}
 }
 
-func (node *Node) getByIndex(t *Tree, index int) (key []byte, value []byte) {
+func (node *Node) getByIndex(t *Tree, index int64) (key []byte, value []byte) {
 	if node.isLeaf() {
 		if index == 0 {
 			return node.key, node.value
@@ -189,7 +188,7 @@ func (node *Node) _hash() []byte {
 	hasher := ripemd160.New()
 	buf := new(bytes.Buffer)
 	if _, err := node.writeHashBytes(buf); err != nil {
-		cmn.PanicCrisis(err)
+		panic(err)
 	}
 	hasher.Write(buf.Bytes())
 	node.hash = hasher.Sum(nil)
@@ -199,7 +198,7 @@ func (node *Node) _hash() []byte {
 
 // Hash the node and its descendants recursively. This usually mutates all
 // descendant nodes. Returns the node hash and number of nodes hashed.
-func (node *Node) hashWithCount() ([]byte, int) {
+func (node *Node) hashWithCount() ([]byte, int64) {
 	if node.hash != nil {
 		return node.hash, 0
 	}
@@ -208,7 +207,7 @@ func (node *Node) hashWithCount() ([]byte, int) {
 	buf := new(bytes.Buffer)
 	_, hashCount, err := node.writeHashBytesRecursively(buf)
 	if err != nil {
-		cmn.PanicCrisis(err)
+		panic(err)
 	}
 	hasher.Write(buf.Bytes())
 	node.hash = hasher.Sum(nil)
@@ -220,17 +219,17 @@ func (node *Node) hashWithCount() ([]byte, int) {
 // child hashes to be already set.
 func (node *Node) writeHashBytes(w io.Writer) (n int, err error) {
 	wire.WriteInt8(node.height, w, &n, &err)
-	wire.WriteVarint(node.size, w, &n, &err)
+	wire.WriteInt64(node.size, w, &n, &err)
+	wire.WriteInt64(node.version, w, &n, &err)
 
 	// Key is not written for inner nodes, unlike writeBytes.
 
 	if node.isLeaf() {
 		wire.WriteByteSlice(node.key, w, &n, &err)
 		wire.WriteByteSlice(node.value, w, &n, &err)
-		wire.WriteUint64(node.version, w, &n, &err)
 	} else {
 		if node.leftHash == nil || node.rightHash == nil {
-			cmn.PanicSanity("Found an empty child hash")
+			panic("Found an empty child hash")
 		}
 		wire.WriteByteSlice(node.leftHash, w, &n, &err)
 		wire.WriteByteSlice(node.rightHash, w, &n, &err)
@@ -240,7 +239,7 @@ func (node *Node) writeHashBytes(w io.Writer) (n int, err error) {
 
 // Writes the node's hash to the given io.Writer.
 // This function has the side-effect of calling hashWithCount.
-func (node *Node) writeHashBytesRecursively(w io.Writer) (n int, hashCount int, err error) {
+func (node *Node) writeHashBytesRecursively(w io.Writer) (n int, hashCount int64, err error) {
 	if node.leftNode != nil {
 		leftHash, leftCount := node.leftNode.hashWithCount()
 		node.leftHash = leftHash
@@ -259,22 +258,22 @@ func (node *Node) writeHashBytesRecursively(w io.Writer) (n int, hashCount int,
 // Writes the node as a serialized byte slice to the supplied io.Writer.
 func (node *Node) writeBytes(w io.Writer) (n int, err error) {
 	wire.WriteInt8(node.height, w, &n, &err)
-	wire.WriteVarint(node.size, w, &n, &err)
+	wire.WriteInt64(node.size, w, &n, &err)
+	wire.WriteInt64(node.version, w, &n, &err)
 
 	// Unlike writeHashBytes, key is written for inner nodes.
 	wire.WriteByteSlice(node.key, w, &n, &err)
-	wire.WriteUint64(node.version, w, &n, &err)
 
 	if node.isLeaf() {
 		wire.WriteByteSlice(node.value, w, &n, &err)
 	} else {
 		if node.leftHash == nil {
-			cmn.PanicSanity("node.leftHash was nil in writeBytes")
+			panic("node.leftHash was nil in writeBytes")
 		}
 		wire.WriteByteSlice(node.leftHash, w, &n, &err)
 
 		if node.rightHash == nil {
-			cmn.PanicSanity("node.rightHash was nil in writeBytes")
+			panic("node.rightHash was nil in writeBytes")
 		}
 		wire.WriteByteSlice(node.rightHash, w, &n, &err)
 	}
@@ -284,6 +283,8 @@ func (node *Node) writeBytes(w io.Writer) (n int, err error) {
 func (node *Node) set(t *Tree, key []byte, value []byte) (
 	newSelf *Node, updated bool, orphaned []*Node,
 ) {
+	version := t.version + 1
+
 	if node.isLeaf() {
 		switch bytes.Compare(key, node.key) {
 		case -1:
@@ -291,8 +292,9 @@ func (node *Node) set(t *Tree, key []byte, value []byte) (
 				key:       node.key,
 				height:    1,
 				size:      2,
-				leftNode:  NewNode(key, value),
+				leftNode:  NewNode(key, value, version),
 				rightNode: node,
+				version:   version,
 			}, false, []*Node{}
 		case 1:
 			return &Node{
@@ -300,14 +302,15 @@ func (node *Node) set(t *Tree, key []byte, value []byte) (
 				height:    1,
 				size:      2,
 				leftNode:  node,
-				rightNode: NewNode(key, value),
+				rightNode: NewNode(key, value, version),
+				version:   version,
 			}, false, []*Node{}
 		default:
-			return NewNode(key, value), true, []*Node{node}
+			return NewNode(key, value, version), true, []*Node{node}
 		}
 	} else {
 		orphaned = append(orphaned, node)
-		node = node.clone()
+		node = node.clone(version)
 
 		if bytes.Compare(key, node.key) < 0 {
 			var leftOrphaned []*Node
@@ -337,6 +340,8 @@ func (node *Node) set(t *Tree, key []byte, value []byte) (
 func (node *Node) remove(t *Tree, key []byte) (
 	newHash []byte, newNode *Node, newKey []byte, value []byte, orphaned []*Node,
 ) {
+	version := t.version + 1
+
 	if node.isLeaf() {
 		if bytes.Equal(key, node.key) {
 			return nil, nil, nil, node.value, []*Node{node}
@@ -358,7 +363,7 @@ func (node *Node) remove(t *Tree, key []byte) (
 		}
 		orphaned = append(orphaned, node)
 
-		newNode := node.clone()
+		newNode := node.clone(version)
 		newNode.leftHash, newNode.leftNode = newLeftHash, newLeftNode
 		newNode.calcHeightAndSize(t)
 		newNode, balanceOrphaned := newNode.balance(t)
@@ -378,7 +383,7 @@ func (node *Node) remove(t *Tree, key []byte) (
 		}
 		orphaned = append(orphaned, node)
 
-		newNode := node.clone()
+		newNode := node.clone(version)
 		newNode.rightHash, newNode.rightNode = newRightHash, newRightNode
 		if newKey != nil {
 			newNode.key = newKey
@@ -406,10 +411,12 @@ func (node *Node) getRightNode(t *Tree) *Node {
 
 // Rotate right and return the new node and orphan.
 func (node *Node) rotateRight(t *Tree) (newNode *Node, orphan *Node) {
+	version := t.version + 1
+
 	// TODO: optimize balance & rotate.
-	node = node.clone()
+	node = node.clone(version)
 	l := node.getLeftNode(t)
-	_l := l.clone()
+	_l := l.clone(version)
 
 	_lrHash, _lrCached := _l.rightHash, _l.rightNode
 	_l.rightHash, _l.rightNode = node.hash, node
@@ -423,10 +430,12 @@ func (node *Node) rotateRight(t *Tree) (newNode *Node, orphan *Node) {
 
 // Rotate left and return the new node and orphan.
 func (node *Node) rotateLeft(t *Tree) (newNode *Node, orphan *Node) {
+	version := t.version + 1
+
 	// TODO: optimize balance & rotate.
-	node = node.clone()
+	node = node.clone(version)
 	r := node.getRightNode(t)
-	_r := r.clone()
+	_r := r.clone(version)
 
 	_rlHash, _rlCached := _r.leftHash, _r.leftNode
 	_r.leftHash, _r.leftNode = node.hash, node
@@ -496,10 +505,16 @@ func (node *Node) balance(t *Tree) (newSelf *Node, orphaned []*Node) {
 
 // traverse is a wrapper over traverseInRange when we want the whole tree
 func (node *Node) traverse(t *Tree, ascending bool, cb func(*Node) bool) bool {
-	return node.traverseInRange(t, nil, nil, ascending, false, cb)
+	return node.traverseInRange(t, nil, nil, ascending, false, 0, func(node *Node, depth uint8) bool {
+		return cb(node)
+	})
+}
+
+func (node *Node) traverseWithDepth(t *Tree, ascending bool, cb func(*Node, uint8) bool) bool {
+	return node.traverseInRange(t, nil, nil, ascending, false, 0, cb)
 }
 
-func (node *Node) traverseInRange(t *Tree, start, end []byte, ascending bool, inclusive bool, cb func(*Node) bool) bool {
+func (node *Node) traverseInRange(t *Tree, start, end []byte, ascending bool, inclusive bool, depth uint8, cb func(*Node, uint8) bool) bool {
 	afterStart := start == nil || bytes.Compare(start, node.key) <= 0
 	beforeEnd := end == nil || bytes.Compare(node.key, end) < 0
 	if inclusive {
@@ -509,7 +524,7 @@ func (node *Node) traverseInRange(t *Tree, start, end []byte, ascending bool, in
 	stop := false
 	if afterStart && beforeEnd {
 		// IterateRange ignores this if not leaf
-		stop = cb(node)
+		stop = cb(node, depth)
 	}
 	if stop {
 		return stop
@@ -521,24 +536,24 @@ func (node *Node) traverseInRange(t *Tree, start, end []byte, ascending bool, in
 	if ascending {
 		// check lower nodes, then higher
 		if afterStart {
-			stop = node.getLeftNode(t).traverseInRange(t, start, end, ascending, inclusive, cb)
+			stop = node.getLeftNode(t).traverseInRange(t, start, end, ascending, inclusive, depth+1, cb)
 		}
 		if stop {
 			return stop
 		}
 		if beforeEnd {
-			stop = node.getRightNode(t).traverseInRange(t, start, end, ascending, inclusive, cb)
+			stop = node.getRightNode(t).traverseInRange(t, start, end, ascending, inclusive, depth+1, cb)
 		}
 	} else {
 		// check the higher nodes first
 		if beforeEnd {
-			stop = node.getRightNode(t).traverseInRange(t, start, end, ascending, inclusive, cb)
+			stop = node.getRightNode(t).traverseInRange(t, start, end, ascending, inclusive, depth+1, cb)
 		}
 		if stop {
 			return stop
 		}
 		if afterStart {
-			stop = node.getLeftNode(t).traverseInRange(t, start, end, ascending, inclusive, cb)
+			stop = node.getLeftNode(t).traverseInRange(t, start, end, ascending, inclusive, depth+1, cb)
 		}
 	}
 
diff --git a/vendor/github.com/tendermint/iavl/nodedb.go b/vendor/github.com/tendermint/iavl/nodedb.go
index 79ac4b3f..fce58a10 100644
--- a/vendor/github.com/tendermint/iavl/nodedb.go
+++ b/vendor/github.com/tendermint/iavl/nodedb.go
@@ -3,37 +3,31 @@ package iavl
 import (
 	"bytes"
 	"container/list"
-	"errors"
 	"fmt"
 	"sort"
 	"sync"
 
-	cmn "github.com/tendermint/tmlibs/common"
 	dbm "github.com/tendermint/tmlibs/db"
 )
 
 var (
 	// All node keys are prefixed with this. This ensures no collision is
 	// possible with the other keys, and makes them easier to traverse.
-	nodesPrefix = "n/"
-	nodesKeyFmt = "n/%x"
+	nodePrefix = "n/"
+	nodeKeyFmt = "n/%x"
 
 	// Orphans are keyed in the database by their expected lifetime.
 	// The first number represents the *last* version at which the orphan needs
 	// to exist, while the second number represents the *earliest* version at
 	// which it is expected to exist - which starts out by being the version
 	// of the node being orphaned.
-	orphansPrefix    = "o/"
-	orphansPrefixFmt = "o/%d/"      // o/<version>/
-	orphansKeyFmt    = "o/%d/%d/%x" // o/<version>/<version>/<hash>
-
-	// These keys are used for the orphan reverse-lookups by node hash.
-	orphansIndexPrefix = "O/"
-	orphansIndexKeyFmt = "O/%x"
+	orphanPrefix    = "o/"
+	orphanPrefixFmt = "o/%d/"      // o/<last-version>/
+	orphanKeyFmt    = "o/%d/%d/%x" // o/<last-version>/<first-version>/<hash>
 
 	// r/<version>
-	rootsPrefix    = "r/"
-	rootsPrefixFmt = "r/%d"
+	rootPrefix    = "r/"
+	rootPrefixFmt = "r/%d"
 )
 
 type nodeDB struct {
@@ -41,22 +35,23 @@ type nodeDB struct {
 	db    dbm.DB     // Persistent node storage.
 	batch dbm.Batch  // Batched writing buffer.
 
-	versionCache  map[uint64][]byte // Cache of tree (root) versions.
-	latestVersion uint64            // Latest root version.
+	versionCache  map[int64][]byte // Cache of tree (root) versions.
+	latestVersion int64            // Latest root version.
 
 	nodeCache      map[string]*list.Element // Node cache.
 	nodeCacheSize  int                      // Node cache size limit in elements.
 	nodeCacheQueue *list.List               // LRU queue of cache elements. Used for deletion.
 }
 
-func newNodeDB(cacheSize int, db dbm.DB) *nodeDB {
+func newNodeDB(db dbm.DB, cacheSize int) *nodeDB {
 	ndb := &nodeDB{
+		db:             db,
+		batch:          db.NewBatch(),
+		versionCache:   map[int64][]byte{},
+		latestVersion:  0, // initially invalid
 		nodeCache:      make(map[string]*list.Element),
 		nodeCacheSize:  cacheSize,
 		nodeCacheQueue: list.New(),
-		db:             db,
-		batch:          db.NewBatch(),
-		versionCache:   map[uint64][]byte{},
 	}
 	return ndb
 }
@@ -67,6 +62,10 @@ func (ndb *nodeDB) GetNode(hash []byte) *Node {
 	ndb.mtx.Lock()
 	defer ndb.mtx.Unlock()
 
+	if len(hash) == 0 {
+		panic("nodeDB.GetNode() requires hash")
+	}
+
 	// Check the cache.
 	if elem, ok := ndb.nodeCache[string(hash)]; ok {
 		// Already exists. Move to back of nodeCacheQueue.
@@ -77,12 +76,12 @@ func (ndb *nodeDB) GetNode(hash []byte) *Node {
 	// Doesn't exist, load.
 	buf := ndb.db.Get(ndb.nodeKey(hash))
 	if buf == nil {
-		cmn.PanicSanity(cmn.Fmt("Value missing for key %x", hash))
+		panic(fmt.Sprintf("Value missing for hash %x corresponding to nodeKey %s", hash, ndb.nodeKey(hash)))
 	}
 
 	node, err := MakeNode(buf)
 	if err != nil {
-		cmn.PanicCrisis(cmn.Fmt("Error reading Node. bytes: %x, error: %v", buf, err))
+		panic(fmt.Sprintf("Error reading Node. bytes: %x, error: %v", buf, err))
 	}
 
 	node.hash = hash
@@ -98,18 +97,19 @@ func (ndb *nodeDB) SaveNode(node *Node) {
 	defer ndb.mtx.Unlock()
 
 	if node.hash == nil {
-		cmn.PanicSanity("Expected to find node.hash, but none found.")
+		panic("Expected to find node.hash, but none found.")
 	}
 	if node.persisted {
-		cmn.PanicSanity("Shouldn't be calling save on an already persisted node.")
+		panic("Shouldn't be calling save on an already persisted node.")
 	}
 
 	// Save node bytes to db.
 	buf := new(bytes.Buffer)
 	if _, err := node.writeBytes(buf); err != nil {
-		cmn.PanicCrisis(err)
+		panic(err)
 	}
 	ndb.batch.Set(ndb.nodeKey(node.hash), buf.Bytes())
+	debug("BATCH SAVE %X %p\n", node.hash, node)
 
 	node.persisted = true
 	ndb.cacheNode(node)
@@ -122,33 +122,27 @@ func (ndb *nodeDB) Has(hash []byte) bool {
 	if ldb, ok := ndb.db.(*dbm.GoLevelDB); ok {
 		exists, err := ldb.DB().Has(key, nil)
 		if err != nil {
-			cmn.PanicSanity("Got error from leveldb: " + err.Error())
+			panic("Got error from leveldb: " + err.Error())
 		}
 		return exists
 	}
 	return ndb.db.Get(key) != nil
 }
 
-// SaveBranch saves the given node and all of its descendants. For each node
-// about to be saved, the supplied callback is called and the returned node is
-// is saved. You may pass nil as the callback as a pass-through.
-//
-// Note that this function clears leftNode/rigthNode recursively and calls
-// hashWithCount on the given node.
-func (ndb *nodeDB) SaveBranch(node *Node, cb func(*Node)) []byte {
+// SaveBranch saves the given node and all of its descendants.
+// NOTE: This function clears leftNode/rigthNode recursively and
+// calls _hash() on the given node.
+// TODO refactor, maybe use hashWithCount() but provide a callback.
+func (ndb *nodeDB) SaveBranch(node *Node) []byte {
 	if node.persisted {
 		return node.hash
 	}
 
 	if node.leftNode != nil {
-		node.leftHash = ndb.SaveBranch(node.leftNode, cb)
+		node.leftHash = ndb.SaveBranch(node.leftNode)
 	}
 	if node.rightNode != nil {
-		node.rightHash = ndb.SaveBranch(node.rightNode, cb)
-	}
-
-	if cb != nil {
-		cb(node)
+		node.rightHash = ndb.SaveBranch(node.rightNode)
 	}
 
 	node._hash()
@@ -161,7 +155,7 @@ func (ndb *nodeDB) SaveBranch(node *Node, cb func(*Node)) []byte {
 }
 
 // DeleteVersion deletes a tree version from disk.
-func (ndb *nodeDB) DeleteVersion(version uint64) {
+func (ndb *nodeDB) DeleteVersion(version int64) {
 	ndb.mtx.Lock()
 	defer ndb.mtx.Unlock()
 
@@ -169,61 +163,46 @@ func (ndb *nodeDB) DeleteVersion(version uint64) {
 	ndb.deleteRoot(version)
 }
 
-// Unorphan deletes the orphan entry from disk, but not the node it points to.
-func (ndb *nodeDB) Unorphan(hash []byte) {
-	ndb.mtx.Lock()
-	defer ndb.mtx.Unlock()
-
-	indexKey := ndb.orphanIndexKey(hash)
-
-	if orphansKey := ndb.db.Get(indexKey); len(orphansKey) > 0 {
-		ndb.batch.Delete(orphansKey)
-		ndb.batch.Delete(indexKey)
-	}
-}
-
 // Saves orphaned nodes to disk under a special prefix.
-func (ndb *nodeDB) SaveOrphans(version uint64, orphans map[string]uint64) {
+// version: the new version being saved.
+// orphans: the orphan nodes created since version-1
+func (ndb *nodeDB) SaveOrphans(version int64, orphans map[string]int64) {
 	ndb.mtx.Lock()
 	defer ndb.mtx.Unlock()
 
 	toVersion := ndb.getPreviousVersion(version)
 
 	for hash, fromVersion := range orphans {
+		debug("SAVEORPHAN %v-%v %X\n", fromVersion, toVersion, hash)
 		ndb.saveOrphan([]byte(hash), fromVersion, toVersion)
 	}
 }
 
 // Saves a single orphan to disk.
-func (ndb *nodeDB) saveOrphan(hash []byte, fromVersion, toVersion uint64) {
+func (ndb *nodeDB) saveOrphan(hash []byte, fromVersion, toVersion int64) {
 	if fromVersion > toVersion {
-		cmn.PanicSanity("Orphan expires before it comes alive")
+		panic(fmt.Sprintf("Orphan expires before it comes alive.  %d > %d", fromVersion, toVersion))
 	}
 	key := ndb.orphanKey(fromVersion, toVersion, hash)
 	ndb.batch.Set(key, hash)
-
-	// Set reverse-lookup index.
-	indexKey := ndb.orphanIndexKey(hash)
-	ndb.batch.Set(indexKey, key)
 }
 
 // deleteOrphans deletes orphaned nodes from disk, and the associated orphan
 // entries.
-func (ndb *nodeDB) deleteOrphans(version uint64) {
+func (ndb *nodeDB) deleteOrphans(version int64) {
 	// Will be zero if there is no previous version.
 	predecessor := ndb.getPreviousVersion(version)
 
 	// Traverse orphans with a lifetime ending at the version specified.
 	ndb.traverseOrphansVersion(version, func(key, hash []byte) {
-		var fromVersion, toVersion uint64
+		var fromVersion, toVersion int64
 
-		// See comment on `orphansKeyFmt`. Note that here, `version` and
+		// See comment on `orphanKeyFmt`. Note that here, `version` and
 		// `toVersion` are always equal.
-		fmt.Sscanf(string(key), orphansKeyFmt, &toVersion, &fromVersion)
+		fmt.Sscanf(string(key), orphanKeyFmt, &toVersion, &fromVersion)
 
 		// Delete orphan key and reverse-lookup key.
 		ndb.batch.Delete(key)
-		ndb.batch.Delete(ndb.orphanIndexKey(hash))
 
 		// If there is no predecessor, or the predecessor is earlier than the
 		// beginning of the lifetime (ie: negative lifetime), or the lifetime
@@ -231,49 +210,47 @@ func (ndb *nodeDB) deleteOrphans(version uint64) {
 		// can delete the orphan.  Otherwise, we shorten its lifetime, by
 		// moving its endpoint to the previous version.
 		if predecessor < fromVersion || fromVersion == toVersion {
+			debug("DELETE predecessor:%v fromVersion:%v toVersion:%v %X\n", predecessor, fromVersion, toVersion, hash)
 			ndb.batch.Delete(ndb.nodeKey(hash))
 			ndb.uncacheNode(hash)
 		} else {
+			debug("MOVE predecessor:%v fromVersion:%v toVersion:%v %X\n", predecessor, fromVersion, toVersion, hash)
 			ndb.saveOrphan(hash, fromVersion, predecessor)
 		}
 	})
 }
 
 func (ndb *nodeDB) nodeKey(hash []byte) []byte {
-	return []byte(fmt.Sprintf(nodesKeyFmt, hash))
+	return []byte(fmt.Sprintf(nodeKeyFmt, hash))
 }
 
-func (ndb *nodeDB) orphanIndexKey(hash []byte) []byte {
-	return []byte(fmt.Sprintf(orphansIndexKeyFmt, hash))
+func (ndb *nodeDB) orphanKey(fromVersion, toVersion int64, hash []byte) []byte {
+	return []byte(fmt.Sprintf(orphanKeyFmt, toVersion, fromVersion, hash))
 }
 
-func (ndb *nodeDB) orphanKey(fromVersion, toVersion uint64, hash []byte) []byte {
-	return []byte(fmt.Sprintf(orphansKeyFmt, toVersion, fromVersion, hash))
+func (ndb *nodeDB) rootKey(version int64) []byte {
+	return []byte(fmt.Sprintf(rootPrefixFmt, version))
 }
 
-func (ndb *nodeDB) rootKey(version uint64) []byte {
-	return []byte(fmt.Sprintf(rootsPrefixFmt, version))
-}
-
-func (ndb *nodeDB) getLatestVersion() uint64 {
+func (ndb *nodeDB) getLatestVersion() int64 {
 	if ndb.latestVersion == 0 {
 		ndb.getVersions()
 	}
 	return ndb.latestVersion
 }
 
-func (ndb *nodeDB) getVersions() map[uint64][]byte {
+func (ndb *nodeDB) getVersions() map[int64][]byte {
 	if len(ndb.versionCache) == 0 {
-		ndb.traversePrefix([]byte(rootsPrefix), func(k, hash []byte) {
-			var version uint64
-			fmt.Sscanf(string(k), rootsPrefixFmt, &version)
+		ndb.traversePrefix([]byte(rootPrefix), func(k, hash []byte) {
+			var version int64
+			fmt.Sscanf(string(k), rootPrefixFmt, &version)
 			ndb.cacheVersion(version, hash)
 		})
 	}
 	return ndb.versionCache
 }
 
-func (ndb *nodeDB) cacheVersion(version uint64, hash []byte) {
+func (ndb *nodeDB) cacheVersion(version int64, hash []byte) {
 	ndb.versionCache[version] = hash
 
 	if version > ndb.getLatestVersion() {
@@ -281,8 +258,8 @@ func (ndb *nodeDB) cacheVersion(version uint64, hash []byte) {
 	}
 }
 
-func (ndb *nodeDB) getPreviousVersion(version uint64) uint64 {
-	var result uint64
+func (ndb *nodeDB) getPreviousVersion(version int64) int64 {
+	var result int64
 	for v := range ndb.getVersions() {
 		if v < version && v > result {
 			result = v
@@ -292,50 +269,43 @@ func (ndb *nodeDB) getPreviousVersion(version uint64) uint64 {
 }
 
 // deleteRoot deletes the root entry from disk, but not the node it points to.
-func (ndb *nodeDB) deleteRoot(version uint64) {
+func (ndb *nodeDB) deleteRoot(version int64) {
+	if version == ndb.getLatestVersion() {
+		panic("Tried to delete latest version")
+	}
+
 	key := ndb.rootKey(version)
 	ndb.batch.Delete(key)
-
 	delete(ndb.versionCache, version)
-
-	if version == ndb.getLatestVersion() {
-		cmn.PanicSanity("Tried to delete latest version")
-	}
 }
 
 func (ndb *nodeDB) traverseOrphans(fn func(k, v []byte)) {
-	ndb.traversePrefix([]byte(orphansPrefix), fn)
+	ndb.traversePrefix([]byte(orphanPrefix), fn)
 }
 
 // Traverse orphans ending at a certain version.
-func (ndb *nodeDB) traverseOrphansVersion(version uint64, fn func(k, v []byte)) {
-	prefix := fmt.Sprintf(orphansPrefixFmt, version)
+func (ndb *nodeDB) traverseOrphansVersion(version int64, fn func(k, v []byte)) {
+	prefix := fmt.Sprintf(orphanPrefixFmt, version)
 	ndb.traversePrefix([]byte(prefix), fn)
 }
 
 // Traverse all keys.
 func (ndb *nodeDB) traverse(fn func(key, value []byte)) {
-	it := ndb.db.Iterator()
-	defer it.Release()
+	itr := ndb.db.Iterator(nil, nil)
+	defer itr.Close()
 
-	for it.Next() {
-		fn(it.Key(), it.Value())
-	}
-	if err := it.Error(); err != nil {
-		cmn.PanicSanity(err.Error())
+	for ; itr.Valid(); itr.Next() {
+		fn(itr.Key(), itr.Value())
 	}
 }
 
 // Traverse all keys with a certain prefix.
 func (ndb *nodeDB) traversePrefix(prefix []byte, fn func(k, v []byte)) {
-	it := ndb.db.IteratorPrefix(prefix)
-	defer it.Release()
+	itr := dbm.IteratePrefix(ndb.db, prefix)
+	defer itr.Close()
 
-	for it.Next() {
-		fn(it.Key(), it.Value())
-	}
-	if err := it.Error(); err != nil {
-		cmn.PanicSanity(err.Error())
+	for ; itr.Valid(); itr.Next() {
+		fn(itr.Key(), itr.Value())
 	}
 }
 
@@ -368,12 +338,12 @@ func (ndb *nodeDB) Commit() {
 	ndb.batch = ndb.db.NewBatch()
 }
 
-func (ndb *nodeDB) getRoots() (map[uint64][]byte, error) {
-	roots := map[uint64][]byte{}
+func (ndb *nodeDB) getRoots() (map[int64][]byte, error) {
+	roots := map[int64][]byte{}
 
-	ndb.traversePrefix([]byte(rootsPrefix), func(k, v []byte) {
-		var version uint64
-		fmt.Sscanf(string(k), rootsPrefixFmt, &version)
+	ndb.traversePrefix([]byte(rootPrefix), func(k, v []byte) {
+		var version int64
+		fmt.Sscanf(string(k), rootPrefixFmt, &version)
 		roots[version] = v
 	})
 	return roots, nil
@@ -381,23 +351,29 @@ func (ndb *nodeDB) getRoots() (map[uint64][]byte, error) {
 
 // SaveRoot creates an entry on disk for the given root, so that it can be
 // loaded later.
-func (ndb *nodeDB) SaveRoot(root *Node, version uint64) error {
+func (ndb *nodeDB) SaveRoot(root *Node, version int64) error {
+	if len(root.hash) == 0 {
+		panic("Hash should not be empty")
+	}
+	return ndb.saveRoot(root.hash, version)
+}
+
+// SaveEmptyRoot creates an entry on disk for an empty root.
+func (ndb *nodeDB) SaveEmptyRoot(version int64) error {
+	return ndb.saveRoot([]byte{}, version)
+}
+
+func (ndb *nodeDB) saveRoot(hash []byte, version int64) error {
 	ndb.mtx.Lock()
 	defer ndb.mtx.Unlock()
 
-	if len(root.hash) == 0 {
-		cmn.PanicSanity("Hash should not be empty")
-	}
-	if version <= ndb.getLatestVersion() {
-		return errors.New("can't save root with lower or equal version than latest")
+	if version != ndb.getLatestVersion()+1 {
+		return fmt.Errorf("Must save consecutive versions. Expected %d, got %d", ndb.getLatestVersion()+1, version)
 	}
 
-	// Note that we don't use the version attribute of the root. This is
-	// because we might be saving an old root at a new version in the case
-	// where the tree wasn't modified between versions.
 	key := ndb.rootKey(version)
-	ndb.batch.Set(key, root.hash)
-	ndb.cacheVersion(version, root.hash)
+	ndb.batch.Set(key, hash)
+	ndb.cacheVersion(version, hash)
 
 	return nil
 }
@@ -433,30 +409,31 @@ func (ndb *nodeDB) orphans() [][]byte {
 	return orphans
 }
 
-func (ndb *nodeDB) roots() map[uint64][]byte {
+func (ndb *nodeDB) roots() map[int64][]byte {
 	roots, _ := ndb.getRoots()
 	return roots
 }
 
+// Not efficient.
+// NOTE: DB cannot implement Size() because
+// mutations are not always synchronous.
 func (ndb *nodeDB) size() int {
-	it := ndb.db.Iterator()
 	size := 0
-
-	for it.Next() {
+	ndb.traverse(func(k, v []byte) {
 		size++
-	}
+	})
 	return size
 }
 
 func (ndb *nodeDB) traverseNodes(fn func(hash []byte, node *Node)) {
 	nodes := []*Node{}
 
-	ndb.traversePrefix([]byte(nodesPrefix), func(key, value []byte) {
+	ndb.traversePrefix([]byte(nodePrefix), func(key, value []byte) {
 		node, err := MakeNode(value)
 		if err != nil {
-			cmn.PanicSanity("Couldn't decode node from database")
+			panic("Couldn't decode node from database")
 		}
-		fmt.Sscanf(string(key), nodesKeyFmt, &node.hash)
+		fmt.Sscanf(string(key), nodeKeyFmt, &node.hash)
 		nodes = append(nodes, node)
 	})
 
@@ -473,7 +450,7 @@ func (ndb *nodeDB) String() string {
 	var str string
 	index := 0
 
-	ndb.traversePrefix([]byte(rootsPrefix), func(key, value []byte) {
+	ndb.traversePrefix([]byte(rootPrefix), func(key, value []byte) {
 		str += fmt.Sprintf("%s: %x\n", string(key), value)
 	})
 	str += "\n"
@@ -483,20 +460,15 @@ func (ndb *nodeDB) String() string {
 	})
 	str += "\n"
 
-	ndb.traversePrefix([]byte(orphansIndexPrefix), func(key, value []byte) {
-		str += fmt.Sprintf("%s: %s\n", string(key), value)
-	})
-	str += "\n"
-
 	ndb.traverseNodes(func(hash []byte, node *Node) {
 		if len(hash) == 0 {
 			str += fmt.Sprintf("<nil>\n")
 		} else if node == nil {
-			str += fmt.Sprintf("%s%40x: <nil>\n", nodesPrefix, hash)
+			str += fmt.Sprintf("%s%40x: <nil>\n", nodePrefix, hash)
 		} else if node.value == nil && node.height > 0 {
-			str += fmt.Sprintf("%s%40x: %s   %-16s h=%d version=%d\n", nodesPrefix, hash, node.key, "", node.height, node.version)
+			str += fmt.Sprintf("%s%40x: %s   %-16s h=%d version=%d\n", nodePrefix, hash, node.key, "", node.height, node.version)
 		} else {
-			str += fmt.Sprintf("%s%40x: %s = %-16s h=%d version=%d\n", nodesPrefix, hash, node.key, node.value, node.height, node.version)
+			str += fmt.Sprintf("%s%40x: %s = %-16s h=%d version=%d\n", nodePrefix, hash, node.key, node.value, node.height, node.version)
 		}
 		index++
 	})
diff --git a/vendor/github.com/tendermint/iavl/orphaning_tree.go b/vendor/github.com/tendermint/iavl/orphaning_tree.go
index 432cb4b0..893afeaf 100644
--- a/vendor/github.com/tendermint/iavl/orphaning_tree.go
+++ b/vendor/github.com/tendermint/iavl/orphaning_tree.go
@@ -1,7 +1,7 @@
 package iavl
 
 import (
-	cmn "github.com/tendermint/tmlibs/common"
+	"fmt"
 )
 
 // orphaningTree is a tree which keeps track of orphaned nodes.
@@ -11,14 +11,14 @@ type orphaningTree struct {
 	// A map of orphan hash to orphan version.
 	// The version stored here is the one at which the orphan's lifetime
 	// begins.
-	orphans map[string]uint64
+	orphans map[string]int64
 }
 
 // newOrphaningTree creates a new orphaning tree from the given *Tree.
 func newOrphaningTree(t *Tree) *orphaningTree {
 	return &orphaningTree{
 		Tree:    t,
-		orphans: map[string]uint64{},
+		orphans: map[string]int64{},
 	}
 }
 
@@ -36,31 +36,26 @@ func (tree *orphaningTree) Remove(key []byte) ([]byte, bool) {
 	return val, removed
 }
 
-// Unorphan undoes the orphaning of a node, removing the orphan entry on disk
-// if necessary.
-func (tree *orphaningTree) unorphan(hash []byte) {
-	tree.deleteOrphan(hash)
-	tree.ndb.Unorphan(hash)
-}
-
-// Save the underlying Tree. Saves orphans too.
-func (tree *orphaningTree) SaveVersion(version uint64) {
-	// Save the current tree at the given version. For each saved node, we
-	// delete any existing orphan entries in the previous trees.
-	// This is necessary because sometimes tree re-balancing causes nodes to be
-	// incorrectly marked as orphaned, since tree patterns after a re-balance
-	// may mirror previous tree patterns, with matching hashes.
-	tree.ndb.SaveBranch(tree.root, func(node *Node) {
-		// The node version is set here since it isn't known until we save.
-		// Note that we only want to set the version for inner nodes the first
-		// time, as they represent the beginning of the lifetime of that node.
-		// So unless it's a leaf node, we only update version when it's 0.
-		if node.version == 0 || node.isLeaf() {
-			node.version = version
-		}
-		tree.unorphan(node._hash())
-	})
-	tree.ndb.SaveOrphans(version, tree.orphans)
+// SaveAs saves the underlying Tree and assigns it a new version.
+// Saves orphans too.
+func (tree *orphaningTree) SaveAs(version int64) {
+	if version != tree.version+1 {
+		panic(fmt.Sprintf("Expected to save version %d but tried to save %d", tree.version+1, version))
+	}
+	if tree.root == nil {
+		// There can still be orphans, for example if the root is the node being
+		// removed.
+		tree.ndb.SaveOrphans(version, tree.orphans)
+		tree.ndb.SaveEmptyRoot(version)
+	} else {
+		debug("SAVE TREE %v\n", version)
+		// Save the current tree.
+		tree.ndb.SaveBranch(tree.root)
+		tree.ndb.SaveOrphans(version, tree.orphans)
+		tree.ndb.SaveRoot(tree.root, version)
+	}
+	tree.ndb.Commit()
+	tree.version = version
 }
 
 // Add orphans to the orphan list. Doesn't write to disk.
@@ -71,17 +66,8 @@ func (tree *orphaningTree) addOrphans(orphans []*Node) {
 			continue
 		}
 		if len(node.hash) == 0 {
-			cmn.PanicSanity("Expected to find node hash, but was empty")
+			panic("Expected to find node hash, but was empty")
 		}
 		tree.orphans[string(node.hash)] = node.version
 	}
 }
-
-// Delete an orphan from the orphan list. Doesn't write to disk.
-func (tree *orphaningTree) deleteOrphan(hash []byte) (version uint64, deleted bool) {
-	if version, ok := tree.orphans[string(hash)]; ok {
-		delete(tree.orphans, string(hash))
-		return version, true
-	}
-	return 0, false
-}
diff --git a/vendor/github.com/tendermint/iavl/path.go b/vendor/github.com/tendermint/iavl/path.go
index 3351fbef..7e3bb01d 100644
--- a/vendor/github.com/tendermint/iavl/path.go
+++ b/vendor/github.com/tendermint/iavl/path.go
@@ -23,8 +23,8 @@ func (p *PathToKey) String() string {
 
 // verify check that the leafNode's hash matches the path's LeafHash and that
 // the root is the merkle hash of all the inner nodes.
-func (p *PathToKey) verify(leafNode proofLeafNode, root []byte) error {
-	hash := leafNode.Hash()
+func (p *PathToKey) verify(leafHash []byte, root []byte) error {
+	hash := leafHash
 	for _, branch := range p.InnerNodes {
 		hash = branch.Hash(hash)
 	}
@@ -92,7 +92,7 @@ type pathWithNode struct {
 }
 
 func (p *pathWithNode) verify(root []byte) error {
-	return p.Path.verify(p.Node, root)
+	return p.Path.verify(p.Node.Hash(), root)
 }
 
 // verifyPaths verifies the left and right paths individually, and makes sure
diff --git a/vendor/github.com/tendermint/iavl/proof.go b/vendor/github.com/tendermint/iavl/proof.go
index 5c87eae8..9335a285 100644
--- a/vendor/github.com/tendermint/iavl/proof.go
+++ b/vendor/github.com/tendermint/iavl/proof.go
@@ -8,7 +8,6 @@ import (
 	"golang.org/x/crypto/ripemd160"
 
 	"github.com/tendermint/go-wire"
-	"github.com/tendermint/go-wire/data"
 	cmn "github.com/tendermint/tmlibs/common"
 )
 
@@ -27,14 +26,15 @@ var (
 )
 
 type proofInnerNode struct {
-	Height int8
-	Size   int
-	Left   []byte
-	Right  []byte
+	Height  int8
+	Size    int64
+	Version int64
+	Left    []byte
+	Right   []byte
 }
 
 func (n *proofInnerNode) String() string {
-	return fmt.Sprintf("proofInnerNode[height=%d, %x / %x]", n.Height, n.Left, n.Right)
+	return fmt.Sprintf("proofInnerNode[height=%d, ver=%d %x / %x]", n.Height, n.Version, n.Left, n.Right)
 }
 
 func (branch proofInnerNode) Hash(childHash []byte) []byte {
@@ -43,7 +43,8 @@ func (branch proofInnerNode) Hash(childHash []byte) []byte {
 	n, err := int(0), error(nil)
 
 	wire.WriteInt8(branch.Height, buf, &n, &err)
-	wire.WriteVarint(branch.Size, buf, &n, &err)
+	wire.WriteInt64(branch.Size, buf, &n, &err)
+	wire.WriteInt64(branch.Version, buf, &n, &err)
 
 	if len(branch.Left) == 0 {
 		wire.WriteByteSlice(childHash, buf, &n, &err)
@@ -53,7 +54,7 @@ func (branch proofInnerNode) Hash(childHash []byte) []byte {
 		wire.WriteByteSlice(childHash, buf, &n, &err)
 	}
 	if err != nil {
-		cmn.PanicCrisis(cmn.Fmt("Failed to hash proofInnerNode: %v", err))
+		panic(fmt.Sprintf("Failed to hash proofInnerNode: %v", err))
 	}
 	hasher.Write(buf.Bytes())
 
@@ -61,9 +62,9 @@ func (branch proofInnerNode) Hash(childHash []byte) []byte {
 }
 
 type proofLeafNode struct {
-	KeyBytes   data.Bytes `json:"key"`
-	ValueBytes data.Bytes `json:"value"`
-	Version    uint64     `json:"version"`
+	KeyBytes   cmn.HexBytes `json:"key"`
+	ValueBytes cmn.HexBytes `json:"value"`
+	Version    int64        `json:"version"`
 }
 
 func (leaf proofLeafNode) Hash() []byte {
@@ -72,13 +73,13 @@ func (leaf proofLeafNode) Hash() []byte {
 	n, err := int(0), error(nil)
 
 	wire.WriteInt8(0, buf, &n, &err)
-	wire.WriteVarint(1, buf, &n, &err)
+	wire.WriteInt64(1, buf, &n, &err)
+	wire.WriteInt64(leaf.Version, buf, &n, &err)
 	wire.WriteByteSlice(leaf.KeyBytes, buf, &n, &err)
 	wire.WriteByteSlice(leaf.ValueBytes, buf, &n, &err)
-	wire.WriteUint64(leaf.Version, buf, &n, &err)
 
 	if err != nil {
-		cmn.PanicCrisis(cmn.Fmt("Failed to hash proofLeafNode: %v", err))
+		panic(fmt.Sprintf("Failed to hash proofLeafNode: %v", err))
 	}
 	hasher.Write(buf.Bytes())
 
@@ -93,42 +94,52 @@ func (leaf proofLeafNode) isGreaterThan(key []byte) bool {
 	return bytes.Compare(leaf.KeyBytes, key) == 1
 }
 
+func (node *Node) pathToInnerKey(t *Tree, key []byte) (*PathToKey, *Node, error) {
+	path := &PathToKey{}
+	val, err := node._pathToKey(t, key, false, path)
+	return path, val, err
+}
+
 func (node *Node) pathToKey(t *Tree, key []byte) (*PathToKey, *Node, error) {
 	path := &PathToKey{}
-	val, err := node._pathToKey(t, key, path)
+	val, err := node._pathToKey(t, key, true, path)
 	return path, val, err
 }
-func (node *Node) _pathToKey(t *Tree, key []byte, path *PathToKey) (*Node, error) {
+func (node *Node) _pathToKey(t *Tree, key []byte, skipInner bool, path *PathToKey) (*Node, error) {
 	if node.height == 0 {
 		if bytes.Equal(node.key, key) {
 			return node, nil
 		}
 		return nil, errors.New("key does not exist")
+	} else if !skipInner && bytes.Equal(node.key, key) {
+		return node, nil
 	}
 
 	if bytes.Compare(key, node.key) < 0 {
-		if n, err := node.getLeftNode(t)._pathToKey(t, key, path); err != nil {
+		if n, err := node.getLeftNode(t)._pathToKey(t, key, skipInner, path); err != nil {
 			return nil, err
 		} else {
 			branch := proofInnerNode{
-				Height: node.height,
-				Size:   node.size,
-				Left:   nil,
-				Right:  node.getRightNode(t).hash,
+				Height:  node.height,
+				Size:    node.size,
+				Version: node.version,
+				Left:    nil,
+				Right:   node.getRightNode(t).hash,
 			}
 			path.InnerNodes = append(path.InnerNodes, branch)
 			return n, nil
 		}
 	}
 
-	if n, err := node.getRightNode(t)._pathToKey(t, key, path); err != nil {
+	if n, err := node.getRightNode(t)._pathToKey(t, key, skipInner, path); err != nil {
 		return nil, err
 	} else {
 		branch := proofInnerNode{
-			Height: node.height,
-			Size:   node.size,
-			Left:   node.getLeftNode(t).hash,
-			Right:  nil,
+			Height:  node.height,
+			Size:    node.size,
+			Version: node.version,
+			Left:    node.getLeftNode(t).hash,
+			Right:   nil,
 		}
 		path.InnerNodes = append(path.InnerNodes, branch)
 		return n, nil
@@ -137,7 +148,7 @@ func (node *Node) _pathToKey(t *Tree, key []byte, path *PathToKey) (*Node, error
 
 func (t *Tree) constructKeyAbsentProof(key []byte, proof *KeyAbsentProof) error {
 	// Get the index of the first key greater than the requested key, if the key doesn't exist.
-	idx, val := t.Get(key)
+	idx, val := t.Get64(key)
 	if val != nil {
 		return errors.Errorf("couldn't construct non-existence proof: key 0x%x exists", key)
 	}
@@ -147,10 +158,10 @@ func (t *Tree) constructKeyAbsentProof(key []byte, proof *KeyAbsentProof) error
 		rkey, rval []byte
 	)
 	if idx > 0 {
-		lkey, lval = t.GetByIndex(idx - 1)
+		lkey, lval = t.GetByIndex64(idx - 1)
 	}
-	if idx <= t.Size()-1 {
-		rkey, rval = t.GetByIndex(idx)
+	if idx <= t.Size64()-1 {
+		rkey, rval = t.GetByIndex64(idx)
 	}
 
 	if lkey == nil && rkey == nil {
@@ -194,6 +205,27 @@ func (t *Tree) getWithProof(key []byte) (value []byte, proof *KeyExistsProof, er
 	return node.value, proof, nil
 }
 
+func (t *Tree) getInnerWithProof(key []byte) (proof *InnerKeyProof, err error) {
+	if t.root == nil {
+		return nil, errors.WithStack(ErrNilRoot)
+	}
+	t.root.hashWithCount() // Ensure that all hashes are calculated.
+
+	path, node, err := t.root.pathToInnerKey(t, key)
+	if err != nil {
+		return nil, errors.Wrap(err, "could not construct path to key")
+	}
+
+	proof = &InnerKeyProof{
+		&KeyExistsProof{
+			RootHash:  t.root.hash,
+			PathToKey: path,
+			Version:   node.version,
+		},
+	}
+	return proof, nil
+}
+
 func (t *Tree) keyAbsentProof(key []byte) (*KeyAbsentProof, error) {
 	if t.root == nil {
 		return nil, errors.WithStack(ErrNilRoot)
@@ -202,7 +234,6 @@ func (t *Tree) keyAbsentProof(key []byte) (*KeyAbsentProof, error) {
 
 	proof := &KeyAbsentProof{
 		RootHash: t.root.hash,
-		Version:  t.root.version,
 	}
 	if err := t.constructKeyAbsentProof(key, proof); err != nil {
 		return nil, errors.Wrap(err, "could not construct proof of non-existence")
diff --git a/vendor/github.com/tendermint/iavl/proof_key.go b/vendor/github.com/tendermint/iavl/proof_key.go
index a09c93fc..646508fb 100644
--- a/vendor/github.com/tendermint/iavl/proof_key.go
+++ b/vendor/github.com/tendermint/iavl/proof_key.go
@@ -6,7 +6,7 @@ import (
 
 	"github.com/pkg/errors"
 	"github.com/tendermint/go-wire"
-	"github.com/tendermint/go-wire/data"
+	cmn "github.com/tendermint/tmlibs/common"
 )
 
 // KeyProof represents a proof of existence or absence of a single key.
@@ -22,10 +22,16 @@ type KeyProof interface {
 	Bytes() []byte
 }
 
+const (
+	// Used for serialization of proofs.
+	keyExistsMagicNumber = 0x50
+	keyAbsentMagicNumber = 0x51
+)
+
 // KeyExistsProof represents a proof of existence of a single key.
 type KeyExistsProof struct {
-	RootHash data.Bytes `json:"root_hash"`
-	Version  uint64     `json:"version"`
+	RootHash cmn.HexBytes `json:"root_hash"`
+	Version  int64        `json:"version"`
 
 	*PathToKey `json:"path"`
 }
@@ -42,25 +48,26 @@ func (proof *KeyExistsProof) Verify(key []byte, value []byte, root []byte) error
 	if key == nil || value == nil {
 		return errors.WithStack(ErrInvalidInputs)
 	}
-	return proof.PathToKey.verify(proofLeafNode{key, value, proof.Version}, root)
+	return proof.PathToKey.verify(proofLeafNode{key, value, proof.Version}.Hash(), root)
 }
 
 // Bytes returns a go-wire binary serialization
 func (proof *KeyExistsProof) Bytes() []byte {
-	return wire.BinaryBytes(proof)
+	return append([]byte{keyExistsMagicNumber}, wire.BinaryBytes(proof)...)
 }
 
-// ReadKeyExistsProof will deserialize a KeyExistsProof from bytes.
-func ReadKeyExistsProof(data []byte) (*KeyExistsProof, error) {
+// readKeyExistsProof will deserialize a KeyExistsProof from bytes.
+func readKeyExistsProof(data []byte) (*KeyExistsProof, error) {
 	proof := new(KeyExistsProof)
 	err := wire.ReadBinaryBytes(data, &proof)
 	return proof, err
 }
 
+///////////////////////////////////////////////////////////////////////////////
+
 // KeyAbsentProof represents a proof of the absence of a single key.
 type KeyAbsentProof struct {
-	RootHash data.Bytes `json:"root_hash"`
-	Version  uint64     `json:"version"`
+	RootHash cmn.HexBytes `json:"root_hash"`
 
 	Left  *pathWithNode `json:"left"`
 	Right *pathWithNode `json:"right"`
@@ -95,12 +102,53 @@ func (proof *KeyAbsentProof) Verify(key, value []byte, root []byte) error {
 
 // Bytes returns a go-wire binary serialization
 func (proof *KeyAbsentProof) Bytes() []byte {
-	return wire.BinaryBytes(proof)
+	return append([]byte{keyAbsentMagicNumber}, wire.BinaryBytes(proof)...)
 }
 
-// ReadKeyAbsentProof will deserialize a KeyAbsentProof from bytes.
-func ReadKeyAbsentProof(data []byte) (*KeyAbsentProof, error) {
+// readKeyAbsentProof will deserialize a KeyAbsentProof from bytes.
+func readKeyAbsentProof(data []byte) (*KeyAbsentProof, error) {
 	proof := new(KeyAbsentProof)
 	err := wire.ReadBinaryBytes(data, &proof)
 	return proof, err
 }
+
+// ReadKeyProof reads a KeyProof from a byte-slice.
+func ReadKeyProof(data []byte) (KeyProof, error) {
+	if len(data) == 0 {
+		return nil, errors.New("proof bytes are empty")
+	}
+	b, val := data[0], data[1:]
+
+	switch b {
+	case keyExistsMagicNumber:
+		return readKeyExistsProof(val)
+	case keyAbsentMagicNumber:
+		return readKeyAbsentProof(val)
+	}
+	return nil, errors.New("unrecognized proof")
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// InnerKeyProof represents a proof of existence of an inner node key.
+type InnerKeyProof struct {
+	*KeyExistsProof
+}
+
+// Verify verifies the proof is valid and returns an error if it isn't.
+func (proof *InnerKeyProof) Verify(hash []byte, value []byte, root []byte) error {
+	if !bytes.Equal(proof.RootHash, root) {
+		return errors.WithStack(ErrInvalidRoot)
+	}
+	if hash == nil || value != nil {
+		return errors.WithStack(ErrInvalidInputs)
+	}
+	return proof.PathToKey.verify(hash, root)
+}
+
+// ReadKeyInnerProof will deserialize a InnerKeyProof from bytes.
+func ReadInnerKeyProof(data []byte) (*InnerKeyProof, error) {
+	proof := new(InnerKeyProof)
+	err := wire.ReadBinaryBytes(data, &proof)
+	return proof, err
+}
diff --git a/vendor/github.com/tendermint/iavl/proof_range.go b/vendor/github.com/tendermint/iavl/proof_range.go
index 07cac742..40374055 100644
--- a/vendor/github.com/tendermint/iavl/proof_range.go
+++ b/vendor/github.com/tendermint/iavl/proof_range.go
@@ -5,7 +5,7 @@ import (
 	"fmt"
 
 	"github.com/pkg/errors"
-	"github.com/tendermint/go-wire/data"
+	cmn "github.com/tendermint/tmlibs/common"
 )
 
 // KeyInRangeProof is an interface which covers both first-in-range and last-in-range proofs.
@@ -120,8 +120,8 @@ func (proof *KeyLastInRangeProof) Verify(startKey, endKey, key, value []byte, ro
 
 // KeyRangeProof is proof that a range of keys does or does not exist.
 type KeyRangeProof struct {
-	RootHash   data.Bytes   `json:"root_hash"`
-	Version    uint64       `json:"version"`
+	RootHash   cmn.HexBytes `json:"root_hash"`
+	Versions   []int64      `json:"versions"`
 	PathToKeys []*PathToKey `json:"paths"`
 
 	Left  *pathWithNode `json:"left"`
@@ -134,11 +134,11 @@ type KeyRangeProof struct {
 func (proof *KeyRangeProof) Verify(
 	startKey, endKey []byte, limit int, keys, values [][]byte, root []byte,
 ) error {
-	if len(proof.PathToKeys) != len(keys) || len(values) != len(keys) {
-		return ErrInvalidInputs
+	if len(proof.PathToKeys) != len(keys) || len(values) != len(keys) || len(proof.Versions) != len(keys) {
+		return errors.WithStack(ErrInvalidInputs)
 	}
 	if limit > 0 && len(keys) > limit {
-		return ErrInvalidInputs
+		return errors.WithStack(ErrInvalidInputs)
 	}
 
 	// If startKey > endKey, reverse the keys and values, since our proofs are
@@ -177,8 +177,12 @@ func (proof *KeyRangeProof) Verify(
 	// If we've reached this point, it means our range isn't empty, and we have
 	// a list of keys.
 	for i, path := range proof.PathToKeys {
-		leafNode := proofLeafNode{KeyBytes: keys[i], ValueBytes: values[i]}
-		if err := path.verify(leafNode, root); err != nil {
+		leafNode := proofLeafNode{
+			KeyBytes:   keys[i],
+			ValueBytes: values[i],
+			Version:    proof.Versions[i],
+		}
+		if err := path.verify(leafNode.Hash(), root); err != nil {
 			return errors.WithStack(err)
 		}
 	}
@@ -238,20 +242,25 @@ func (t *Tree) getRangeWithProof(keyStart, keyEnd []byte, limit int) (
 		rangeStart, rangeEnd = rangeEnd, rangeStart
 	}
 
-	limited := t.IterateRangeInclusive(rangeStart, rangeEnd, ascending, func(k, v []byte) bool {
+	versions := []int64{}
+	limited := t.IterateRangeInclusive(rangeStart, rangeEnd, ascending, func(k, v []byte, version int64) bool {
 		keys = append(keys, k)
 		values = append(values, v)
+		versions = append(versions, version)
 		return len(keys) == limit
 	})
 
 	// Construct the paths such that they are always in ascending order.
 	rangeProof.PathToKeys = make([]*PathToKey, len(keys))
+	rangeProof.Versions = make([]int64, len(keys))
 	for i, k := range keys {
 		path, _, _ := t.root.pathToKey(t, k)
 		if ascending {
 			rangeProof.PathToKeys[i] = path
+			rangeProof.Versions[i] = versions[i]
 		} else {
 			rangeProof.PathToKeys[len(keys)-i-1] = path
+			rangeProof.Versions[len(keys)-i-1] = versions[i]
 		}
 	}
 
@@ -294,12 +303,12 @@ func (t *Tree) getRangeWithProof(keyStart, keyEnd []byte, limit int) (
 	if needsLeft {
 		// Find index of first key to the left, and include proof if it isn't the
 		// leftmost key.
-		if idx, _ := t.Get(rangeStart); idx > 0 {
-			lkey, lval := t.GetByIndex(idx - 1)
-			path, _, _ := t.root.pathToKey(t, lkey)
+		if idx, _ := t.Get64(rangeStart); idx > 0 {
+			lkey, lval := t.GetByIndex64(idx - 1)
+			path, node, _ := t.root.pathToKey(t, lkey)
 			rangeProof.Left = &pathWithNode{
 				Path: path,
-				Node: proofLeafNode{KeyBytes: lkey, ValueBytes: lval},
+				Node: proofLeafNode{lkey, lval, node.version},
 			}
 		}
 	}
@@ -310,12 +319,12 @@ func (t *Tree) getRangeWithProof(keyStart, keyEnd []byte, limit int) (
 	if needsRight {
 		// Find index of first key to the right, and include proof if it isn't the
 		// rightmost key.
-		if idx, _ := t.Get(rangeEnd); idx <= t.Size()-1 {
-			rkey, rval := t.GetByIndex(idx)
-			path, _, _ := t.root.pathToKey(t, rkey)
+		if idx, _ := t.Get64(rangeEnd); idx <= t.Size64()-1 {
+			rkey, rval := t.GetByIndex64(idx)
+			path, node, _ := t.root.pathToKey(t, rkey)
 			rangeProof.Right = &pathWithNode{
 				Path: path,
-				Node: proofLeafNode{KeyBytes: rkey, ValueBytes: rval},
+				Node: proofLeafNode{rkey, rval, node.version},
 			}
 		}
 	}
@@ -332,9 +341,10 @@ func (t *Tree) getFirstInRangeWithProof(keyStart, keyEnd []byte) (
 	t.root.hashWithCount() // Ensure that all hashes are calculated.
 	proof = &KeyFirstInRangeProof{}
 	proof.RootHash = t.root.hash
+	proof.Version = t.root.version
 
 	// Get the first value in the range.
-	t.IterateRangeInclusive(keyStart, keyEnd, true, func(k, v []byte) bool {
+	t.IterateRangeInclusive(keyStart, keyEnd, true, func(k, v []byte, _ int64) bool {
 		key, value = k, v
 		return true
 	})
@@ -344,8 +354,8 @@ func (t *Tree) getFirstInRangeWithProof(keyStart, keyEnd []byte) (
 	}
 
 	if !bytes.Equal(key, keyStart) {
-		if idx, _ := t.Get(keyStart); idx-1 >= 0 && idx-1 <= t.Size()-1 {
-			k, v := t.GetByIndex(idx - 1)
+		if idx, _ := t.Get64(keyStart); idx-1 >= 0 && idx-1 <= t.Size64()-1 {
+			k, v := t.GetByIndex64(idx - 1)
 			path, node, _ := t.root.pathToKey(t, k)
 			proof.Left = &pathWithNode{
 				Path: path,
@@ -355,8 +365,8 @@ func (t *Tree) getFirstInRangeWithProof(keyStart, keyEnd []byte) (
 	}
 
 	if !bytes.Equal(key, keyEnd) {
-		if idx, val := t.Get(keyEnd); idx <= t.Size()-1 && val == nil {
-			k, v := t.GetByIndex(idx)
+		if idx, val := t.Get64(keyEnd); idx <= t.Size64()-1 && val == nil {
+			k, v := t.GetByIndex64(idx)
 			path, node, _ := t.root.pathToKey(t, k)
 			proof.Right = &pathWithNode{
 				Path: path,
@@ -378,9 +388,10 @@ func (t *Tree) getLastInRangeWithProof(keyStart, keyEnd []byte) (
 
 	proof = &KeyLastInRangeProof{}
 	proof.RootHash = t.root.hash
+	proof.Version = t.root.version
 
 	// Get the last value in the range.
-	t.IterateRangeInclusive(keyStart, keyEnd, false, func(k, v []byte) bool {
+	t.IterateRangeInclusive(keyStart, keyEnd, false, func(k, v []byte, _ int64) bool {
 		key, value = k, v
 		return true
 	})
@@ -390,8 +401,8 @@ func (t *Tree) getLastInRangeWithProof(keyStart, keyEnd []byte) (
 	}
 
 	if !bytes.Equal(key, keyEnd) {
-		if idx, _ := t.Get(keyEnd); idx <= t.Size()-1 {
-			k, v := t.GetByIndex(idx)
+		if idx, _ := t.Get64(keyEnd); idx <= t.Size64()-1 {
+			k, v := t.GetByIndex64(idx)
 			path, node, _ := t.root.pathToKey(t, k)
 			proof.Right = &pathWithNode{
 				Path: path,
@@ -401,8 +412,8 @@ func (t *Tree) getLastInRangeWithProof(keyStart, keyEnd []byte) (
 	}
 
 	if !bytes.Equal(key, keyStart) {
-		if idx, _ := t.Get(keyStart); idx-1 >= 0 && idx-1 <= t.Size()-1 {
-			k, v := t.GetByIndex(idx - 1)
+		if idx, _ := t.Get64(keyStart); idx-1 >= 0 && idx-1 <= t.Size64()-1 {
+			k, v := t.GetByIndex64(idx - 1)
 			path, node, _ := t.root.pathToKey(t, k)
 			proof.Left = &pathWithNode{
 				Path: path,
diff --git a/vendor/github.com/tendermint/iavl/serialize.go b/vendor/github.com/tendermint/iavl/serialize.go
new file mode 100644
index 00000000..3b856478
--- /dev/null
+++ b/vendor/github.com/tendermint/iavl/serialize.go
@@ -0,0 +1,200 @@
+package iavl
+
+// NodeData groups together a key, value and depth.
+type NodeData struct {
+	Key   []byte
+	Value []byte
+	Depth uint8
+}
+
+// SerializeFunc is any implementation that can serialize
+// an iavl Node and its descendants.
+type SerializeFunc func(*Tree, *Node) []NodeData
+
+// RestoreFunc is an implementation that can restore an iavl tree from
+// NodeData.
+type RestoreFunc func(*Tree, []NodeData)
+
+// Restore will take an (empty) tree restore it
+// from the keys returned from a SerializeFunc.
+func Restore(empty *Tree, kvs []NodeData) {
+	for _, kv := range kvs {
+		empty.Set(kv.Key, kv.Value)
+	}
+	empty.Hash()
+}
+
+func RestoreUsingDepth(empty *Tree, kvs []NodeData) {
+	// Create an array of arrays of nodes. We're going to store each depth in
+	// here, forming a kind of pyramid.
+	depths := [][]*Node{}
+
+	// Go through all the leaf nodes, grouping them in pairs and creating their
+	// parents recursively.
+	for _, kv := range kvs {
+		var (
+			// Left and right nodes.
+			l     *Node = nil
+			r     *Node = NewNode(kv.Key, kv.Value, 1)
+			depth uint8 = kv.Depth
+		)
+		// Create depths as needed.
+		for len(depths) < int(depth)+1 {
+			depths = append(depths, []*Node{})
+		}
+		depths[depth] = append(depths[depth], r) // Add the leaf node to this depth.
+
+		// If the nodes at this level are uneven after adding a node to it, it
+		// means we have to wait for another node to be appended before we have
+		// a pair. If we do have a pair, go up the tree until we don't.
+		for d := depth; len(depths[d])%2 == 0; d-- {
+			nodes := depths[d] // List of nodes at this depth.
+
+			l = nodes[len(nodes)-1-1]
+			r = nodes[len(nodes)-1]
+
+			depths[d-1] = append(depths[d-1], &Node{
+				key:       leftmost(r).Key,
+				height:    maxInt8(l.height, r.height) + 1,
+				size:      l.size + r.size,
+				leftNode:  l,
+				rightNode: r,
+				version:   1,
+			})
+		}
+	}
+	empty.root = depths[0][0]
+	empty.Hash()
+}
+
+// InOrderSerialize returns all key-values in the
+// key order (as stored). May be nice to read, but
+// when recovering, it will create a different.
+func InOrderSerialize(t *Tree, root *Node) []NodeData {
+	res := make([]NodeData, 0, root.size)
+	root.traverseWithDepth(t, true, func(node *Node, depth uint8) bool {
+		if node.height == 0 {
+			kv := NodeData{Key: node.key, Value: node.value, Depth: depth}
+			res = append(res, kv)
+		}
+		return false
+	})
+	return res
+}
+
+// StableSerializeBFS serializes the tree in a breadth-first manner.
+func StableSerializeBFS(t *Tree, root *Node) []NodeData {
+	if root == nil {
+		return nil
+	}
+
+	size := root.size
+	visited := map[string][]byte{}
+	keys := make([][]byte, 0, size)
+	numKeys := -1
+
+	// Breadth-first search. At every depth, add keys in search order. Keep
+	// going as long as we find keys at that depth. When we reach a leaf, set
+	// its value in the visited map.
+	// Since we have an AVL+ tree, the inner nodes contain only keys and not
+	// values, while the leaves contain both. Note also that there are N-1 inner
+	// nodes for N keys, so one of the leaf keys is only set once we reach the leaves
+	// of the tree.
+	for depth := uint(0); len(keys) > numKeys; depth++ {
+		numKeys = len(keys)
+		root.traverseDepth(t, depth, func(node *Node) {
+			if _, ok := visited[string(node.key)]; !ok {
+				keys = append(keys, node.key)
+				visited[string(node.key)] = nil
+			}
+			if node.isLeaf() {
+				visited[string(node.key)] = node.value
+			}
+		})
+	}
+
+	nds := make([]NodeData, size)
+	for i, k := range keys {
+		nds[i] = NodeData{k, visited[string(k)], 0}
+	}
+	return nds
+}
+
+// StableSerializeFrey exports the key value pairs of the tree
+// in an order, such that when Restored from those keys, the
+// new tree would have the same structure (and thus same
+// shape) as the original tree.
+//
+// the algorithm is basically this: take the leftmost node
+// of the left half and the leftmost node of the righthalf.
+// Then go down a level...
+// each time adding leftmost node of the right side.
+// (bredth first search)
+//
+// Imagine 8 nodes in a balanced tree, split in half each time
+// 1
+// 1, 5
+// 1, 5, 3, 7
+// 1, 5, 3, 7, 2, 4, 6, 8
+func StableSerializeFrey(t *Tree, top *Node) []NodeData {
+	if top == nil {
+		return nil
+	}
+	size := top.size
+
+	// store all pending nodes for depth-first search
+	queue := make([]*Node, 0, size)
+	queue = append(queue, top)
+
+	// to store all results - started with
+	res := make([]NodeData, 0, size)
+	left := leftmost(top)
+	if left != nil {
+		res = append(res, *left)
+	}
+
+	var n *Node
+	for len(queue) > 0 {
+		// pop
+		n, queue = queue[0], queue[1:]
+
+		// l := n.getLeftNode(tree)
+		l := n.leftNode
+		if isInner(l) {
+			queue = append(queue, l)
+		}
+
+		// r := n.getRightNode(tree)
+		r := n.rightNode
+		if isInner(r) {
+			queue = append(queue, r)
+			left = leftmost(r)
+			if left != nil {
+				res = append(res, *left)
+			}
+		} else if isLeaf(r) {
+			kv := NodeData{Key: r.key, Value: r.value}
+			res = append(res, kv)
+		}
+	}
+
+	return res
+}
+
+func isInner(n *Node) bool {
+	return n != nil && !n.isLeaf()
+}
+
+func isLeaf(n *Node) bool {
+	return n != nil && n.isLeaf()
+}
+
+func leftmost(node *Node) *NodeData {
+	for isInner(node) {
+		node = node.leftNode
+	}
+	if node == nil {
+		return nil
+	}
+	return &NodeData{Key: node.key, Value: node.value}
+}
diff --git a/vendor/github.com/tendermint/iavl/tree.go b/vendor/github.com/tendermint/iavl/tree.go
index cd4d5f04..85cf6e21 100644
--- a/vendor/github.com/tendermint/iavl/tree.go
+++ b/vendor/github.com/tendermint/iavl/tree.go
@@ -4,27 +4,29 @@ import (
 	"fmt"
 	"strings"
 
-	cmn "github.com/tendermint/tmlibs/common"
 	dbm "github.com/tendermint/tmlibs/db"
 
 	"github.com/pkg/errors"
 )
 
-// Tree is an immutable AVL+ Tree. Note that this tree is not thread-safe.
+// Tree is a container for an immutable AVL+ Tree. Changes are performed by
+// swapping the internal root with a new one, while the container is mutable.
+// Note that this tree is not thread-safe.
 type Tree struct {
-	root *Node
-	ndb  *nodeDB
+	root    *Node
+	ndb     *nodeDB
+	version int64
 }
 
-// NewTree creates both im-memory and persistent instances
-func NewTree(cacheSize int, db dbm.DB) *Tree {
+// NewTree creates both in-memory and persistent instances
+func NewTree(db dbm.DB, cacheSize int) *Tree {
 	if db == nil {
 		// In-memory Tree.
 		return &Tree{}
 	}
 	return &Tree{
 		// NodeDB-backed Tree.
-		ndb: newNodeDB(cacheSize, db),
+		ndb: newNodeDB(db, cacheSize),
 	}
 }
 
@@ -40,14 +42,31 @@ func (t *Tree) String() string {
 
 // Size returns the number of leaf nodes in the tree.
 func (t *Tree) Size() int {
+	return int(t.Size64())
+}
+
+func (t *Tree) Size64() int64 {
 	if t.root == nil {
 		return 0
 	}
 	return t.root.size
 }
 
+// Version returns the version of the tree.
+func (t *Tree) Version() int {
+	return int(t.Version64())
+}
+
+func (t *Tree) Version64() int64 {
+	return t.version
+}
+
 // Height returns the height of the tree.
-func (t *Tree) Height() int8 {
+func (t *Tree) Height() int {
+	return int(t.Height8())
+}
+
+func (t *Tree) Height8() int8 {
 	if t.root == nil {
 		return 0
 	}
@@ -70,10 +89,10 @@ func (t *Tree) Set(key []byte, value []byte) (updated bool) {
 
 func (t *Tree) set(key []byte, value []byte) (orphaned []*Node, updated bool) {
 	if value == nil {
-		cmn.PanicSanity(cmn.Fmt("Attempt to store nil value at key '%s'", key))
+		panic(fmt.Sprintf("Attempt to store nil value at key '%s'", key))
 	}
 	if t.root == nil {
-		t.root = NewNode(key, value)
+		t.root = NewNode(key, value, t.version+1)
 		return nil, false
 	}
 	t.root, updated, orphaned = t.root.set(t, key, value)
@@ -91,7 +110,7 @@ func (t *Tree) Hash() []byte {
 }
 
 // hashWithCount returns the root hash and hash count.
-func (t *Tree) hashWithCount() ([]byte, int) {
+func (t *Tree) hashWithCount() ([]byte, int64) {
 	if t.root == nil {
 		return nil, 0
 	}
@@ -101,6 +120,11 @@ func (t *Tree) hashWithCount() ([]byte, int) {
 // Get returns the index and value of the specified key if it exists, or nil
 // and the next index, if it doesn't.
 func (t *Tree) Get(key []byte) (index int, value []byte) {
+	index64, value := t.Get64(key)
+	return int(index64), value
+}
+
+func (t *Tree) Get64(key []byte) (index int64, value []byte) {
 	if t.root == nil {
 		return 0, nil
 	}
@@ -109,6 +133,10 @@ func (t *Tree) Get(key []byte) (index int, value []byte) {
 
 // GetByIndex gets the key and value at the specified index.
 func (t *Tree) GetByIndex(index int) (key []byte, value []byte) {
+	return t.GetByIndex64(int64(index))
+}
+
+func (t *Tree) GetByIndex64(index int64) (key []byte, value []byte) {
 	if t.root == nil {
 		return nil, nil
 	}
@@ -194,7 +222,7 @@ func (t *Tree) IterateRange(start, end []byte, ascending bool, fn func(key []byt
 	if t.root == nil {
 		return false
 	}
-	return t.root.traverseInRange(t, start, end, ascending, false, func(node *Node) bool {
+	return t.root.traverseInRange(t, start, end, ascending, false, 0, func(node *Node, _ uint8) bool {
 		if node.height == 0 {
 			return fn(node.key, node.value)
 		} else {
@@ -205,35 +233,27 @@ func (t *Tree) IterateRange(start, end []byte, ascending bool, fn func(key []byt
 
 // IterateRangeInclusive makes a callback for all nodes with key between start and end inclusive.
 // If either are nil, then it is open on that side (nil, nil is the same as Iterate)
-func (t *Tree) IterateRangeInclusive(start, end []byte, ascending bool, fn func(key []byte, value []byte) bool) (stopped bool) {
+func (t *Tree) IterateRangeInclusive(start, end []byte, ascending bool, fn func(key, value []byte, version int64) bool) (stopped bool) {
 	if t.root == nil {
 		return false
 	}
-	return t.root.traverseInRange(t, start, end, ascending, true, func(node *Node) bool {
+	return t.root.traverseInRange(t, start, end, ascending, true, 0, func(node *Node, _ uint8) bool {
 		if node.height == 0 {
-			return fn(node.key, node.value)
+			return fn(node.key, node.value, node.version)
 		} else {
 			return false
 		}
 	})
 }
 
-// Clone creates a clone of the tree. Used internally by VersionedTree.
+// Clone creates a clone of the tree.
+// Used internally by VersionedTree.
 func (tree *Tree) clone() *Tree {
 	return &Tree{
-		root: tree.root,
-		ndb:  tree.ndb,
-	}
-}
-
-// Load the tree from disk, from the given root hash, including all orphans.
-// Used internally by VersionedTree.
-func (tree *Tree) load(root []byte) {
-	if len(root) == 0 {
-		tree.root = nil
-		return
+		root:    tree.root,
+		ndb:     tree.ndb,
+		version: tree.version,
 	}
-	tree.root = tree.ndb.GetNode(root)
 }
 
 // nodeSize is like Size, but includes inner nodes too.
diff --git a/vendor/github.com/tendermint/iavl/version.go b/vendor/github.com/tendermint/iavl/version.go
index 24e0790d..34df9474 100644
--- a/vendor/github.com/tendermint/iavl/version.go
+++ b/vendor/github.com/tendermint/iavl/version.go
@@ -1,3 +1,3 @@
 package iavl
 
-const Version = "0.5.0"
+const Version = "0.7.0"
diff --git a/vendor/github.com/tendermint/iavl/versioned_tree.go b/vendor/github.com/tendermint/iavl/versioned_tree.go
index 607b10c0..9fef5a15 100644
--- a/vendor/github.com/tendermint/iavl/versioned_tree.go
+++ b/vendor/github.com/tendermint/iavl/versioned_tree.go
@@ -1,6 +1,7 @@
 package iavl
 
 import (
+	"bytes"
 	"fmt"
 
 	"github.com/pkg/errors"
@@ -11,29 +12,23 @@ var ErrVersionDoesNotExist = fmt.Errorf("version does not exist")
 
 // VersionedTree is a persistent tree which keeps track of versions.
 type VersionedTree struct {
-	*orphaningTree                  // The current, working tree.
-	versions       map[uint64]*Tree // The previous, saved versions of the tree.
-	latestVersion  uint64           // The latest saved version.
+	*orphaningTree                 // The current, working tree.
+	versions       map[int64]*Tree // The previous, saved versions of the tree.
 	ndb            *nodeDB
 }
 
 // NewVersionedTree returns a new tree with the specified cache size and datastore.
-func NewVersionedTree(cacheSize int, db dbm.DB) *VersionedTree {
-	ndb := newNodeDB(cacheSize, db)
+func NewVersionedTree(db dbm.DB, cacheSize int) *VersionedTree {
+	ndb := newNodeDB(db, cacheSize)
 	head := &Tree{ndb: ndb}
 
 	return &VersionedTree{
 		orphaningTree: newOrphaningTree(head),
-		versions:      map[uint64]*Tree{},
+		versions:      map[int64]*Tree{},
 		ndb:           ndb,
 	}
 }
 
-// LatestVersion returns the latest saved version of the tree.
-func (tree *VersionedTree) LatestVersion() uint64 {
-	return tree.latestVersion
-}
-
 // IsEmpty returns whether or not the tree has any keys. Only trees that are
 // not empty can be saved.
 func (tree *VersionedTree) IsEmpty() bool {
@@ -41,7 +36,7 @@ func (tree *VersionedTree) IsEmpty() bool {
 }
 
 // VersionExists returns whether or not a version exists.
-func (tree *VersionedTree) VersionExists(version uint64) bool {
+func (tree *VersionedTree) VersionExists(version int64) bool {
 	_, ok := tree.versions[version]
 	return ok
 }
@@ -54,8 +49,8 @@ func (tree *VersionedTree) Tree() *Tree {
 // Hash returns the hash of the latest saved version of the tree, as returned
 // by SaveVersion. If no versions have been saved, Hash returns nil.
 func (tree *VersionedTree) Hash() []byte {
-	if tree.latestVersion > 0 {
-		return tree.versions[tree.latestVersion].Hash()
+	if tree.version > 0 {
+		return tree.versions[tree.version].Hash()
 	}
 	return nil
 }
@@ -75,38 +70,75 @@ func (tree *VersionedTree) Remove(key []byte) ([]byte, bool) {
 	return tree.orphaningTree.Remove(key)
 }
 
-// Load a versioned tree from disk. All tree versions are loaded automatically.
-func (tree *VersionedTree) Load() error {
+// Load the latest versioned tree from disk.
+//
+// Returns the version number of the latest version found
+func (tree *VersionedTree) Load() (int64, error) {
+	return tree.LoadVersion(0)
+}
+
+// Load a versioned tree from disk.
+//
+// If version is 0, the latest version is loaded.
+//
+// Returns the version number of the latest version found
+func (tree *VersionedTree) LoadVersion(targetVersion int64) (int64, error) {
 	roots, err := tree.ndb.getRoots()
 	if err != nil {
-		return err
+		return 0, err
 	}
 	if len(roots) == 0 {
-		return nil
+		return 0, nil
 	}
 
 	// Load all roots from the database.
+	latestVersion := int64(0)
 	for version, root := range roots {
-		t := &Tree{ndb: tree.ndb}
-		t.load(root)
 
+		// Construct a tree manually.
+		t := &Tree{}
+		t.ndb = tree.ndb
+		t.version = version
+		if len(root) != 0 {
+			t.root = tree.ndb.GetNode(root)
+		}
 		tree.versions[version] = t
 
-		if version > tree.latestVersion {
-			tree.latestVersion = version
+		if version > latestVersion &&
+			(targetVersion == 0 || version <= targetVersion) {
+
+			latestVersion = version
 		}
 	}
 
+	// Validate latestVersion
+	if !(targetVersion == 0 || latestVersion == targetVersion) {
+		return latestVersion, fmt.Errorf("Wanted to load target %v but only found up to %v",
+			targetVersion, latestVersion)
+	}
+
 	// Set the working tree to a copy of the latest.
 	tree.orphaningTree = newOrphaningTree(
-		tree.versions[tree.latestVersion].clone(),
+		tree.versions[latestVersion].clone(),
 	)
 
-	return nil
+	return latestVersion, nil
+}
+
+// Rollback resets the working tree to the latest saved version, discarding
+// any unsaved modifications.
+func (tree *VersionedTree) Rollback() {
+	if tree.version > 0 {
+		tree.orphaningTree = newOrphaningTree(
+			tree.versions[tree.version].clone(),
+		)
+	} else {
+		tree.orphaningTree = newOrphaningTree(&Tree{ndb: tree.ndb, version: 0})
+	}
 }
 
 // GetVersioned gets the value at the specified key and version.
-func (tree *VersionedTree) GetVersioned(key []byte, version uint64) (
+func (tree *VersionedTree) GetVersioned(key []byte, version int64) (
 	index int, value []byte,
 ) {
 	if t, ok := tree.versions[version]; ok {
@@ -116,43 +148,39 @@ func (tree *VersionedTree) GetVersioned(key []byte, version uint64) (
 }
 
 // SaveVersion saves a new tree version to disk, based on the current state of
-// the tree. Multiple calls to SaveVersion with the same version are not allowed.
-func (tree *VersionedTree) SaveVersion(version uint64) ([]byte, error) {
+// the tree. Returns the hash and new version number.
+func (tree *VersionedTree) SaveVersion() ([]byte, int64, error) {
+	version := tree.version + 1
+
 	if _, ok := tree.versions[version]; ok {
-		return nil, errors.Errorf("version %d was already saved", version)
-	}
-	if tree.root == nil {
-		return nil, ErrNilRoot
-	}
-	if version == 0 {
-		return nil, errors.New("version must be greater than zero")
-	}
-	if version <= tree.latestVersion {
-		return nil, errors.Errorf("version must be greater than latest (%d <= %d)",
-			version, tree.latestVersion)
+		// Same hash means idempotent.  Return success.
+		var existingHash = tree.versions[version].Hash()
+		var newHash = tree.orphaningTree.Hash()
+		if bytes.Equal(existingHash, newHash) {
+			tree.orphaningTree = newOrphaningTree(tree.versions[version].clone())
+			return existingHash, version, nil
+		}
+		return nil, version, errors.Errorf("version %d was already saved to different hash %X (existing hash %X)",
+			version, newHash, existingHash)
 	}
 
-	tree.latestVersion = version
+	// Persist version and stash to .versions.
+	tree.orphaningTree.SaveAs(version)
 	tree.versions[version] = tree.orphaningTree.Tree
 
-	tree.orphaningTree.SaveVersion(version)
-	tree.orphaningTree = newOrphaningTree(
-		tree.versions[version].clone(),
-	)
-
-	tree.ndb.SaveRoot(tree.root, version)
-	tree.ndb.Commit()
+	// Set new working tree.
+	tree.orphaningTree = newOrphaningTree(tree.orphaningTree.clone())
 
-	return tree.root.hash, nil
+	return tree.Hash(), version, nil
 }
 
 // DeleteVersion deletes a tree version from disk. The version can then no
 // longer be accessed.
-func (tree *VersionedTree) DeleteVersion(version uint64) error {
+func (tree *VersionedTree) DeleteVersion(version int64) error {
 	if version == 0 {
 		return errors.New("version must be greater than 0")
 	}
-	if version == tree.latestVersion {
+	if version == tree.version {
 		return errors.Errorf("cannot delete latest saved version (%d)", version)
 	}
 	if _, ok := tree.versions[version]; !ok {
@@ -170,7 +198,7 @@ func (tree *VersionedTree) DeleteVersion(version uint64) error {
 // GetVersionedWithProof gets the value under the key at the specified version
 // if it exists, or returns nil.  A proof of existence or absence is returned
 // alongside the value.
-func (tree *VersionedTree) GetVersionedWithProof(key []byte, version uint64) ([]byte, KeyProof, error) {
+func (tree *VersionedTree) GetVersionedWithProof(key []byte, version int64) ([]byte, KeyProof, error) {
 	if t, ok := tree.versions[version]; ok {
 		return t.GetWithProof(key)
 	}
@@ -181,7 +209,7 @@ func (tree *VersionedTree) GetVersionedWithProof(key []byte, version uint64) ([]
 // and limit. To specify a descending range, swap the start and end keys.
 //
 // Returns a list of keys, a list of values and a proof.
-func (tree *VersionedTree) GetVersionedRangeWithProof(startKey, endKey []byte, limit int, version uint64) ([][]byte, [][]byte, *KeyRangeProof, error) {
+func (tree *VersionedTree) GetVersionedRangeWithProof(startKey, endKey []byte, limit int, version int64) ([][]byte, [][]byte, *KeyRangeProof, error) {
 	if t, ok := tree.versions[version]; ok {
 		return t.GetRangeWithProof(startKey, endKey, limit)
 	}
@@ -190,7 +218,7 @@ func (tree *VersionedTree) GetVersionedRangeWithProof(startKey, endKey []byte, l
 
 // GetVersionedFirstInRangeWithProof gets the first key/value pair in the
 // specified range, with a proof.
-func (tree *VersionedTree) GetVersionedFirstInRangeWithProof(startKey, endKey []byte, version uint64) ([]byte, []byte, *KeyFirstInRangeProof, error) {
+func (tree *VersionedTree) GetVersionedFirstInRangeWithProof(startKey, endKey []byte, version int64) ([]byte, []byte, *KeyFirstInRangeProof, error) {
 	if t, ok := tree.versions[version]; ok {
 		return t.GetFirstInRangeWithProof(startKey, endKey)
 	}
@@ -199,7 +227,7 @@ func (tree *VersionedTree) GetVersionedFirstInRangeWithProof(startKey, endKey []
 
 // GetVersionedLastInRangeWithProof gets the last key/value pair in the
 // specified range, with a proof.
-func (tree *VersionedTree) GetVersionedLastInRangeWithProof(startKey, endKey []byte, version uint64) ([]byte, []byte, *KeyLastInRangeProof, error) {
+func (tree *VersionedTree) GetVersionedLastInRangeWithProof(startKey, endKey []byte, version int64) ([]byte, []byte, *KeyLastInRangeProof, error) {
 	if t, ok := tree.versions[version]; ok {
 		return t.GetLastInRangeWithProof(startKey, endKey)
 	}
diff --git a/vendor/github.com/tendermint/tendermint/blockchain/pool.go b/vendor/github.com/tendermint/tendermint/blockchain/pool.go
index e39749dc..603b4bf2 100644
--- a/vendor/github.com/tendermint/tendermint/blockchain/pool.go
+++ b/vendor/github.com/tendermint/tendermint/blockchain/pool.go
@@ -1,18 +1,21 @@
 package blockchain
 
 import (
+	"errors"
+	"fmt"
 	"math"
 	"sync"
 	"time"
 
-	"github.com/tendermint/tendermint/types"
 	cmn "github.com/tendermint/tmlibs/common"
 	flow "github.com/tendermint/tmlibs/flowrate"
 	"github.com/tendermint/tmlibs/log"
+
+	"github.com/tendermint/tendermint/p2p"
+	"github.com/tendermint/tendermint/types"
 )
 
 /*
-
 eg, L = latency = 0.1s
 	P = num peers = 10
 	FN = num full nodes
@@ -22,7 +25,6 @@ eg, L = latency = 0.1s
 	B/S = CB/P/BS = 12.8 blocks/s
 
 	12.8 * 0.1 = 1.28 blocks on conn
-
 */
 
 const (
@@ -30,10 +32,20 @@ const (
 	maxTotalRequesters        = 1000
 	maxPendingRequests        = maxTotalRequesters
 	maxPendingRequestsPerPeer = 50
-	minRecvRate               = 10240 // 10Kb/s
+
+	// Minimum recv rate to ensure we're receiving blocks from a peer fast
+	// enough. If a peer is not sending us data at at least that rate, we
+	// consider them to have timedout and we disconnect.
+	//
+	// Assuming a DSL connection (not a good choice) 128 Kbps (upload) ~ 15 KB/s,
+	// sending data across atlantic ~ 7.5 KB/s.
+	minRecvRate = 7680
+
+	// Maximum difference between current and new block's height.
+	maxDiffBetweenCurrentAndReceivedBlockHeight = 100
 )
 
-var peerTimeoutSeconds = time.Duration(15) // not const so we can override with tests
+var peerTimeout = 15 * time.Second // not const so we can override with tests
 
 /*
 	Peers self report their heights when we join the block pool.
@@ -56,23 +68,23 @@ type BlockPool struct {
 	height     int64 // the lowest key in requesters.
 	numPending int32 // number of requests pending assignment or block response
 	// peers
-	peers         map[string]*bpPeer
+	peers         map[p2p.ID]*bpPeer
 	maxPeerHeight int64
 
 	requestsCh chan<- BlockRequest
-	timeoutsCh chan<- string
+	errorsCh   chan<- peerError
 }
 
-func NewBlockPool(start int64, requestsCh chan<- BlockRequest, timeoutsCh chan<- string) *BlockPool {
+func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool {
 	bp := &BlockPool{
-		peers: make(map[string]*bpPeer),
+		peers: make(map[p2p.ID]*bpPeer),
 
 		requesters: make(map[int64]*bpRequester),
 		height:     start,
 		numPending: 0,
 
 		requestsCh: requestsCh,
-		timeoutsCh: timeoutsCh,
+		errorsCh:   errorsCh,
 	}
 	bp.BaseService = *cmn.NewBaseService(nil, "BlockPool", bp)
 	return bp
@@ -88,7 +100,6 @@ func (pool *BlockPool) OnStop() {}
 
 // Run spawns requesters as needed.
 func (pool *BlockPool) makeRequestersRoutine() {
-
 	for {
 		if !pool.IsRunning() {
 			break
@@ -119,10 +130,14 @@ func (pool *BlockPool) removeTimedoutPeers() {
 	for _, peer := range pool.peers {
 		if !peer.didTimeout && peer.numPending > 0 {
 			curRate := peer.recvMonitor.Status().CurRate
-			// XXX remove curRate != 0
+			// curRate can be 0 on start
 			if curRate != 0 && curRate < minRecvRate {
-				pool.sendTimeout(peer.id)
-				pool.Logger.Error("SendTimeout", "peer", peer.id, "reason", "curRate too low")
+				err := errors.New("peer is not sending us data fast enough")
+				pool.sendError(err, peer.id)
+				pool.Logger.Error("SendTimeout", "peer", peer.id,
+					"reason", err,
+					"curRate", fmt.Sprintf("%d KB/s", curRate/1024),
+					"minRate", fmt.Sprintf("%d KB/s", minRecvRate/1024))
 				peer.didTimeout = true
 			}
 		}
@@ -189,35 +204,43 @@ func (pool *BlockPool) PopRequest() {
 		delete(pool.requesters, pool.height)
 		pool.height++
 	} else {
-		cmn.PanicSanity(cmn.Fmt("Expected requester to pop, got nothing at height %v", pool.height))
+		panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height))
 	}
 }
 
 // Invalidates the block at pool.height,
 // Remove the peer and redo request from others.
-func (pool *BlockPool) RedoRequest(height int64) {
+// Returns the ID of the removed peer.
+func (pool *BlockPool) RedoRequest(height int64) p2p.ID {
 	pool.mtx.Lock()
 	defer pool.mtx.Unlock()
 
 	request := pool.requesters[height]
 
 	if request.block == nil {
-		cmn.PanicSanity("Expected block to be non-nil")
+		panic("Expected block to be non-nil")
 	}
+
 	// RemovePeer will redo all requesters associated with this peer.
-	// TODO: record this malfeasance
 	pool.removePeer(request.peerID)
+	return request.peerID
 }
 
 // TODO: ensure that blocks come in order for each peer.
-func (pool *BlockPool) AddBlock(peerID string, block *types.Block, blockSize int) {
+func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int) {
 	pool.mtx.Lock()
 	defer pool.mtx.Unlock()
 
 	requester := pool.requesters[block.Height]
 	if requester == nil {
-		// a block we didn't expect.
-		// TODO:if height is too far ahead, punish peer
+		pool.Logger.Info("peer sent us a block we didn't expect", "peer", peerID, "curHeight", pool.height, "blockHeight", block.Height)
+		diff := pool.height - block.Height
+		if diff < 0 {
+			diff *= -1
+		}
+		if diff > maxDiffBetweenCurrentAndReceivedBlockHeight {
+			pool.sendError(errors.New("peer sent us a block we didn't expect with a height too far ahead/behind"), peerID)
+		}
 		return
 	}
 
@@ -240,7 +263,7 @@ func (pool *BlockPool) MaxPeerHeight() int64 {
 }
 
 // Sets the peer's alleged blockchain height.
-func (pool *BlockPool) SetPeerHeight(peerID string, height int64) {
+func (pool *BlockPool) SetPeerHeight(peerID p2p.ID, height int64) {
 	pool.mtx.Lock()
 	defer pool.mtx.Unlock()
 
@@ -258,14 +281,14 @@ func (pool *BlockPool) SetPeerHeight(peerID string, height int64) {
 	}
 }
 
-func (pool *BlockPool) RemovePeer(peerID string) {
+func (pool *BlockPool) RemovePeer(peerID p2p.ID) {
 	pool.mtx.Lock()
 	defer pool.mtx.Unlock()
 
 	pool.removePeer(peerID)
 }
 
-func (pool *BlockPool) removePeer(peerID string) {
+func (pool *BlockPool) removePeer(peerID p2p.ID) {
 	for _, requester := range pool.requesters {
 		if requester.getPeerID() == peerID {
 			if requester.getBlock() != nil {
@@ -321,18 +344,18 @@ func (pool *BlockPool) requestersLen() int64 {
 	return int64(len(pool.requesters))
 }
 
-func (pool *BlockPool) sendRequest(height int64, peerID string) {
+func (pool *BlockPool) sendRequest(height int64, peerID p2p.ID) {
 	if !pool.IsRunning() {
 		return
 	}
 	pool.requestsCh <- BlockRequest{height, peerID}
 }
 
-func (pool *BlockPool) sendTimeout(peerID string) {
+func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
 	if !pool.IsRunning() {
 		return
 	}
-	pool.timeoutsCh <- peerID
+	pool.errorsCh <- peerError{err, peerID}
 }
 
 // unused by tendermint; left for debugging purposes
@@ -357,7 +380,7 @@ func (pool *BlockPool) debug() string {
 
 type bpPeer struct {
 	pool        *BlockPool
-	id          string
+	id          p2p.ID
 	recvMonitor *flow.Monitor
 
 	height     int64
@@ -368,7 +391,7 @@ type bpPeer struct {
 	logger log.Logger
 }
 
-func newBPPeer(pool *BlockPool, peerID string, height int64) *bpPeer {
+func newBPPeer(pool *BlockPool, peerID p2p.ID, height int64) *bpPeer {
 	peer := &bpPeer{
 		pool:       pool,
 		id:         peerID,
@@ -391,9 +414,9 @@ func (peer *bpPeer) resetMonitor() {
 
 func (peer *bpPeer) resetTimeout() {
 	if peer.timeout == nil {
-		peer.timeout = time.AfterFunc(time.Second*peerTimeoutSeconds, peer.onTimeout)
+		peer.timeout = time.AfterFunc(peerTimeout, peer.onTimeout)
 	} else {
-		peer.timeout.Reset(time.Second * peerTimeoutSeconds)
+		peer.timeout.Reset(peerTimeout)
 	}
 }
 
@@ -419,8 +442,9 @@ func (peer *bpPeer) onTimeout() {
 	peer.pool.mtx.Lock()
 	defer peer.pool.mtx.Unlock()
 
-	peer.pool.sendTimeout(peer.id)
-	peer.logger.Error("SendTimeout", "reason", "onTimeout")
+	err := errors.New("peer did not send us anything")
+	peer.pool.sendError(err, peer.id)
+	peer.logger.Error("SendTimeout", "reason", err, "timeout", peerTimeout)
 	peer.didTimeout = true
 }
 
@@ -434,7 +458,7 @@ type bpRequester struct {
 	redoCh     chan struct{}
 
 	mtx    sync.Mutex
-	peerID string
+	peerID p2p.ID
 	block  *types.Block
 }
 
@@ -458,7 +482,7 @@ func (bpr *bpRequester) OnStart() error {
 }
 
 // Returns true if the peer matches
-func (bpr *bpRequester) setBlock(block *types.Block, peerID string) bool {
+func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.ID) bool {
 	bpr.mtx.Lock()
 	if bpr.block != nil || bpr.peerID != peerID {
 		bpr.mtx.Unlock()
@@ -477,7 +501,7 @@ func (bpr *bpRequester) getBlock() *types.Block {
 	return bpr.block
 }
 
-func (bpr *bpRequester) getPeerID() string {
+func (bpr *bpRequester) getPeerID() p2p.ID {
 	bpr.mtx.Lock()
 	defer bpr.mtx.Unlock()
 	return bpr.peerID
@@ -502,7 +526,7 @@ func (bpr *bpRequester) requestRoutine() {
 OUTER_LOOP:
 	for {
 		// Pick a peer to send request to.
-		var peer *bpPeer = nil
+		var peer *bpPeer
 	PICK_PEER_LOOP:
 		for {
 			if !bpr.IsRunning() || !bpr.pool.IsRunning() {
@@ -523,10 +547,10 @@ OUTER_LOOP:
 		// Send request and wait.
 		bpr.pool.sendRequest(bpr.height, peer.id)
 		select {
-		case <-bpr.pool.Quit:
+		case <-bpr.pool.Quit():
 			bpr.Stop()
 			return
-		case <-bpr.Quit:
+		case <-bpr.Quit():
 			return
 		case <-bpr.redoCh:
 			bpr.reset()
@@ -534,10 +558,10 @@ OUTER_LOOP:
 		case <-bpr.gotBlockCh:
 			// We got the block, now see if it's good.
 			select {
-			case <-bpr.pool.Quit:
+			case <-bpr.pool.Quit():
 				bpr.Stop()
 				return
-			case <-bpr.Quit:
+			case <-bpr.Quit():
 				return
 			case <-bpr.redoCh:
 				bpr.reset()
@@ -551,5 +575,5 @@ OUTER_LOOP:
 
 type BlockRequest struct {
 	Height int64
-	PeerID string
+	PeerID p2p.ID
 }
diff --git a/vendor/github.com/tendermint/tendermint/blockchain/reactor.go b/vendor/github.com/tendermint/tendermint/blockchain/reactor.go
index d4b803dd..3c25eed2 100644
--- a/vendor/github.com/tendermint/tendermint/blockchain/reactor.go
+++ b/vendor/github.com/tendermint/tendermint/blockchain/reactor.go
@@ -3,24 +3,26 @@ package blockchain
 import (
 	"bytes"
 	"errors"
+	"fmt"
 	"reflect"
 	"sync"
 	"time"
 
 	wire "github.com/tendermint/go-wire"
+
+	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tmlibs/log"
+
 	"github.com/tendermint/tendermint/p2p"
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/types"
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/log"
 )
 
 const (
 	// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
 	BlockchainChannel = byte(0x40)
 
-	defaultChannelCapacity = 1000
-	trySyncIntervalMS      = 50
+	trySyncIntervalMS = 50
 	// stop syncing when last block's time is
 	// within this much of the system time.
 	// stopSyncingDurationMinutes = 10
@@ -37,6 +39,15 @@ type consensusReactor interface {
 	SwitchToConsensus(sm.State, int)
 }
 
+type peerError struct {
+	err    error
+	peerID p2p.ID
+}
+
+func (e peerError) Error() string {
+	return fmt.Sprintf("error with peer %v: %s", e.peerID, e.err.Error())
+}
+
 // BlockchainReactor handles long-term catchup syncing.
 type BlockchainReactor struct {
 	p2p.BaseReactor
@@ -47,27 +58,34 @@ type BlockchainReactor struct {
 	// immutable
 	initialState sm.State
 
-	blockExec  *sm.BlockExecutor
-	store      *BlockStore
-	pool       *BlockPool
-	fastSync   bool
-	requestsCh chan BlockRequest
-	timeoutsCh chan string
+	blockExec *sm.BlockExecutor
+	store     *BlockStore
+	pool      *BlockPool
+	fastSync  bool
+
+	requestsCh <-chan BlockRequest
+	errorsCh   <-chan peerError
 }
 
 // NewBlockchainReactor returns new reactor instance.
-func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *BlockStore, fastSync bool) *BlockchainReactor {
+func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *BlockStore,
+	fastSync bool) *BlockchainReactor {
+
 	if state.LastBlockHeight != store.Height() {
-		cmn.PanicSanity(cmn.Fmt("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height()))
+		panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
+			store.Height()))
 	}
 
-	requestsCh := make(chan BlockRequest, defaultChannelCapacity)
-	timeoutsCh := make(chan string, defaultChannelCapacity)
+	const capacity = 1000 // must be bigger than peers count
+	requestsCh := make(chan BlockRequest, capacity)
+	errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock
+
 	pool := NewBlockPool(
 		store.Height()+1,
 		requestsCh,
-		timeoutsCh,
+		errorsCh,
 	)
+
 	bcR := &BlockchainReactor{
 		params:       state.ConsensusParams,
 		initialState: state,
@@ -76,7 +94,7 @@ func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *Bl
 		pool:         pool,
 		fastSync:     fastSync,
 		requestsCh:   requestsCh,
-		timeoutsCh:   timeoutsCh,
+		errorsCh:     errorsCh,
 	}
 	bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
 	return bcR
@@ -122,7 +140,8 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
 
 // AddPeer implements Reactor by sending our state to peer.
 func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
-	if !peer.Send(BlockchainChannel, struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}}) {
+	if !peer.Send(BlockchainChannel,
+		struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}}) {
 		// doing nothing, will try later in `poolRoutine`
 	}
 	// peer is added to the pool once we receive the first
@@ -131,14 +150,16 @@ func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
 
 // RemovePeer implements Reactor by removing peer from the pool.
 func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
-	bcR.pool.RemovePeer(peer.Key())
+	bcR.pool.RemovePeer(peer.ID())
 }
 
 // respondToPeer loads a block and sends it to the requesting peer,
 // if we have it. Otherwise, we'll respond saying we don't have it.
 // According to the Tendermint spec, if all nodes are honest,
 // no node should be requesting for a block that's non-existent.
-func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage, src p2p.Peer) (queued bool) {
+func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage,
+	src p2p.Peer) (queued bool) {
+
 	block := bcR.store.LoadBlock(msg.Height)
 	if block != nil {
 		msg := &bcBlockResponseMessage{Block: block}
@@ -156,13 +177,13 @@ func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage, src p2p.
 func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
 	_, msg, err := DecodeMessage(msgBytes, bcR.maxMsgSize())
 	if err != nil {
-		bcR.Logger.Error("Error decoding message", "err", err)
+		bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
+		bcR.Switch.StopPeerForError(src, err)
 		return
 	}
 
 	bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
 
-	// TODO: improve logic to satisfy megacheck
 	switch msg := msg.(type) {
 	case *bcBlockRequestMessage:
 		if queued := bcR.respondToPeer(msg, src); !queued {
@@ -170,16 +191,17 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
 		}
 	case *bcBlockResponseMessage:
 		// Got a block.
-		bcR.pool.AddBlock(src.Key(), msg.Block, len(msgBytes))
+		bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes))
 	case *bcStatusRequestMessage:
 		// Send peer our state.
-		queued := src.TrySend(BlockchainChannel, struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}})
+		queued := src.TrySend(BlockchainChannel,
+			struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}})
 		if !queued {
 			// sorry
 		}
 	case *bcStatusResponseMessage:
 		// Got a peer status. Unverified.
-		bcR.pool.SetPeerHeight(src.Key(), msg.Height)
+		bcR.pool.SetPeerHeight(src.ID(), msg.Height)
 	default:
 		bcR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
 	}
@@ -220,7 +242,7 @@ func (bcR *BlockchainReactor) poolRoutine() {
 FOR_LOOP:
 	for {
 		select {
-		case request := <-bcR.requestsCh: // chan BlockRequest
+		case request := <-bcR.requestsCh:
 			peer := bcR.Switch.Peers().Get(request.PeerID)
 			if peer == nil {
 				continue FOR_LOOP // Peer has since been disconnected.
@@ -232,11 +254,10 @@ FOR_LOOP:
 				// The pool handles timeouts, just let it go.
 				continue FOR_LOOP
 			}
-		case peerID := <-bcR.timeoutsCh: // chan string
-			// Peer timed out.
-			peer := bcR.Switch.Peers().Get(peerID)
+		case err := <-bcR.errorsCh:
+			peer := bcR.Switch.Peers().Get(err.peerID)
 			if peer != nil {
-				bcR.Switch.StopPeerForError(peer, errors.New("BlockchainReactor Timeout"))
+				bcR.Switch.StopPeerForError(peer, err)
 			}
 		case <-statusUpdateTicker.C:
 			// ask for status updates
@@ -277,23 +298,28 @@ FOR_LOOP:
 					chainID, firstID, first.Height, second.LastCommit)
 				if err != nil {
 					bcR.Logger.Error("Error in validation", "err", err)
-					bcR.pool.RedoRequest(first.Height)
+					peerID := bcR.pool.RedoRequest(first.Height)
+					peer := bcR.Switch.Peers().Get(peerID)
+					if peer != nil {
+						bcR.Switch.StopPeerForError(peer, fmt.Errorf("BlockchainReactor validation error: %v", err))
+					}
 					break SYNC_LOOP
 				} else {
 					bcR.pool.PopRequest()
 
+					// TODO: batch saves so we dont persist to disk every block
 					bcR.store.SaveBlock(first, firstParts, second.LastCommit)
 
-					// NOTE: we could improve performance if we
-					// didn't make the app commit to disk every block
-					// ... but we would need a way to get the hash without it persisting
+					// TODO: same thing for app - but we would need a way to
+					// get the hash without persisting the state
 					var err error
 					state, err = bcR.blockExec.ApplyBlock(state, firstID, first)
 					if err != nil {
 						// TODO This is bad, are we zombie?
-						cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
+						cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v",
+							first.Height, first.Hash(), err))
 					}
-					blocksSynced += 1
+					blocksSynced++
 
 					// update the consensus params
 					bcR.updateConsensusParams(state.ConsensusParams)
@@ -307,7 +333,7 @@ FOR_LOOP:
 				}
 			}
 			continue FOR_LOOP
-		case <-bcR.Quit:
+		case <-bcR.Quit():
 			break FOR_LOOP
 		}
 	}
@@ -315,7 +341,8 @@ FOR_LOOP:
 
 // BroadcastStatusRequest broadcasts `BlockStore` height.
 func (bcR *BlockchainReactor) BroadcastStatusRequest() error {
-	bcR.Switch.Broadcast(BlockchainChannel, struct{ BlockchainMessage }{&bcStatusRequestMessage{bcR.store.Height()}})
+	bcR.Switch.Broadcast(BlockchainChannel,
+		struct{ BlockchainMessage }{&bcStatusRequestMessage{bcR.store.Height()}})
 	return nil
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/blockchain/store.go b/vendor/github.com/tendermint/tendermint/blockchain/store.go
index 1033999f..b949bc90 100644
--- a/vendor/github.com/tendermint/tendermint/blockchain/store.go
+++ b/vendor/github.com/tendermint/tendermint/blockchain/store.go
@@ -8,9 +8,11 @@ import (
 	"sync"
 
 	wire "github.com/tendermint/go-wire"
-	"github.com/tendermint/tendermint/types"
+
 	cmn "github.com/tendermint/tmlibs/common"
 	dbm "github.com/tendermint/tmlibs/db"
+
+	"github.com/tendermint/tendermint/types"
 )
 
 /*
@@ -74,7 +76,7 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block {
 	}
 	blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
 	if err != nil {
-		cmn.PanicCrisis(cmn.Fmt("Error reading block meta: %v", err))
+		panic(fmt.Sprintf("Error reading block meta: %v", err))
 	}
 	bytez := []byte{}
 	for i := 0; i < blockMeta.BlockID.PartsHeader.Total; i++ {
@@ -83,7 +85,7 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block {
 	}
 	block := wire.ReadBinary(&types.Block{}, bytes.NewReader(bytez), 0, &n, &err).(*types.Block)
 	if err != nil {
-		cmn.PanicCrisis(cmn.Fmt("Error reading block: %v", err))
+		panic(fmt.Sprintf("Error reading block: %v", err))
 	}
 	return block
 }
@@ -100,7 +102,7 @@ func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part {
 	}
 	part := wire.ReadBinary(&types.Part{}, r, 0, &n, &err).(*types.Part)
 	if err != nil {
-		cmn.PanicCrisis(cmn.Fmt("Error reading block part: %v", err))
+		panic(fmt.Sprintf("Error reading block part: %v", err))
 	}
 	return part
 }
@@ -116,7 +118,7 @@ func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
 	}
 	blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
 	if err != nil {
-		cmn.PanicCrisis(cmn.Fmt("Error reading block meta: %v", err))
+		panic(fmt.Sprintf("Error reading block meta: %v", err))
 	}
 	return blockMeta
 }
@@ -134,7 +136,7 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit {
 	}
 	commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit)
 	if err != nil {
-		cmn.PanicCrisis(cmn.Fmt("Error reading commit: %v", err))
+		panic(fmt.Sprintf("Error reading commit: %v", err))
 	}
 	return commit
 }
@@ -151,7 +153,7 @@ func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit {
 	}
 	commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit)
 	if err != nil {
-		cmn.PanicCrisis(cmn.Fmt("Error reading commit: %v", err))
+		panic(fmt.Sprintf("Error reading commit: %v", err))
 	}
 	return commit
 }
@@ -252,7 +254,7 @@ func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
 // If no BlockStoreStateJSON was previously persisted, it returns the zero value.
 func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON {
 	bytes := db.Get(blockStoreKey)
-	if bytes == nil {
+	if len(bytes) == 0 {
 		return BlockStoreStateJSON{
 			Height: 0,
 		}
@@ -260,7 +262,7 @@ func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON {
 	bsj := BlockStoreStateJSON{}
 	err := json.Unmarshal(bytes, &bsj)
 	if err != nil {
-		cmn.PanicCrisis(cmn.Fmt("Could not unmarshal bytes: %X", bytes))
+		panic(fmt.Sprintf("Could not unmarshal bytes: %X", bytes))
 	}
 	return bsj
 }
diff --git a/vendor/github.com/tendermint/tendermint/config/config.go b/vendor/github.com/tendermint/tendermint/config/config.go
index 5d4a8ef6..c87d56b3 100644
--- a/vendor/github.com/tendermint/tendermint/config/config.go
+++ b/vendor/github.com/tendermint/tendermint/config/config.go
@@ -7,6 +7,31 @@ import (
 	"time"
 )
 
+// NOTE: Most of the structs & relevant comments + the
+// default configuration options were used to manually
+// generate the config.toml. Please reflect any changes
+// made here in the defaultConfigTemplate constant in
+// config/toml.go
+// NOTE: tmlibs/cli must know to look in the config dir!
+var (
+	DefaultTendermintDir = ".tendermint"
+	defaultConfigDir     = "config"
+	defaultDataDir       = "data"
+
+	defaultConfigFileName  = "config.toml"
+	defaultGenesisJSONName = "genesis.json"
+
+	defaultPrivValName  = "priv_validator.json"
+	defaultNodeKeyName  = "node_key.json"
+	defaultAddrBookName = "addrbook.json"
+
+	defaultConfigFilePath  = filepath.Join(defaultConfigDir, defaultConfigFileName)
+	defaultGenesisJSONPath = filepath.Join(defaultConfigDir, defaultGenesisJSONName)
+	defaultPrivValPath     = filepath.Join(defaultConfigDir, defaultPrivValName)
+	defaultNodeKeyPath     = filepath.Join(defaultConfigDir, defaultNodeKeyName)
+	defaultAddrBookPath    = filepath.Join(defaultConfigDir, defaultAddrBookName)
+)
+
 // Config defines the top level configuration for a Tendermint node
 type Config struct {
 	// Top level options use an anonymous struct
@@ -38,9 +63,9 @@ func TestConfig() *Config {
 		BaseConfig: TestBaseConfig(),
 		RPC:        TestRPCConfig(),
 		P2P:        TestP2PConfig(),
-		Mempool:    DefaultMempoolConfig(),
+		Mempool:    TestMempoolConfig(),
 		Consensus:  TestConsensusConfig(),
-		TxIndex:    DefaultTxIndexConfig(),
+		TxIndex:    TestTxIndexConfig(),
 	}
 }
 
@@ -59,22 +84,30 @@ func (cfg *Config) SetRoot(root string) *Config {
 
 // BaseConfig defines the base configuration for a Tendermint node
 type BaseConfig struct {
+
+	// chainID is unexposed and immutable but here for convenience
+	chainID string
+
 	// The root directory for all data.
 	// This should be set in viper so it can unmarshal into this struct
 	RootDir string `mapstructure:"home"`
 
-	// The ID of the chain to join (should be signed with every transaction and vote)
-	ChainID string `mapstructure:"chain_id"`
-
-	// A JSON file containing the initial validator set and other meta data
+	// Path to the JSON file containing the initial validator set and other meta data
 	Genesis string `mapstructure:"genesis_file"`
 
-	// A JSON file containing the private key to use as a validator in the consensus protocol
+	// Path to the JSON file containing the private key to use as a validator in the consensus protocol
 	PrivValidator string `mapstructure:"priv_validator_file"`
 
+	// A JSON file containing the private key to use for p2p authenticated encryption
+	NodeKey string `mapstructure:"node_key_file"`
+
 	// A custom human readable name for this node
 	Moniker string `mapstructure:"moniker"`
 
+	// TCP or UNIX socket address for Tendermint to listen on for
+	// connections from an external PrivValidator process
+	PrivValidatorListenAddr string `mapstructure:"priv_validator_laddr"`
+
 	// TCP or UNIX socket address of the ABCI application,
 	// or the name of an ABCI application compiled in with the Tendermint binary
 	ProxyApp string `mapstructure:"proxy_app"`
@@ -107,8 +140,9 @@ type BaseConfig struct {
 // DefaultBaseConfig returns a default base configuration for a Tendermint node
 func DefaultBaseConfig() BaseConfig {
 	return BaseConfig{
-		Genesis:           "genesis.json",
-		PrivValidator:     "priv_validator.json",
+		Genesis:           defaultGenesisJSONPath,
+		PrivValidator:     defaultPrivValPath,
+		NodeKey:           defaultNodeKeyPath,
 		Moniker:           defaultMoniker,
 		ProxyApp:          "tcp://127.0.0.1:46658",
 		ABCI:              "socket",
@@ -123,27 +157,36 @@ func DefaultBaseConfig() BaseConfig {
 
 // TestBaseConfig returns a base configuration for testing a Tendermint node
 func TestBaseConfig() BaseConfig {
-	conf := DefaultBaseConfig()
-	conf.ChainID = "tendermint_test"
-	conf.ProxyApp = "dummy"
-	conf.FastSync = false
-	conf.DBBackend = "memdb"
-	return conf
+	cfg := DefaultBaseConfig()
+	cfg.chainID = "tendermint_test"
+	cfg.ProxyApp = "kvstore"
+	cfg.FastSync = false
+	cfg.DBBackend = "memdb"
+	return cfg
+}
+
+func (cfg BaseConfig) ChainID() string {
+	return cfg.chainID
 }
 
 // GenesisFile returns the full path to the genesis.json file
-func (b BaseConfig) GenesisFile() string {
-	return rootify(b.Genesis, b.RootDir)
+func (cfg BaseConfig) GenesisFile() string {
+	return rootify(cfg.Genesis, cfg.RootDir)
 }
 
 // PrivValidatorFile returns the full path to the priv_validator.json file
-func (b BaseConfig) PrivValidatorFile() string {
-	return rootify(b.PrivValidator, b.RootDir)
+func (cfg BaseConfig) PrivValidatorFile() string {
+	return rootify(cfg.PrivValidator, cfg.RootDir)
+}
+
+// NodeKeyFile returns the full path to the node_key.json file
+func (cfg BaseConfig) NodeKeyFile() string {
+	return rootify(cfg.NodeKey, cfg.RootDir)
 }
 
 // DBDir returns the full path to the database directory
-func (b BaseConfig) DBDir() string {
-	return rootify(b.DBPath, b.RootDir)
+func (cfg BaseConfig) DBDir() string {
+	return rootify(cfg.DBPath, cfg.RootDir)
 }
 
 // DefaultLogLevel returns a default log level of "error"
@@ -151,9 +194,10 @@ func DefaultLogLevel() string {
 	return "error"
 }
 
-// DefaultPackageLogLevels returns a default log level setting so all packages log at "error", while the `state` package logs at "info"
+// DefaultPackageLogLevels returns a default log level setting so all packages
+// log at "error", while the `state` and `main` packages log at "info"
 func DefaultPackageLogLevels() string {
-	return fmt.Sprintf("state:info,*:%s", DefaultLogLevel())
+	return fmt.Sprintf("main:info,state:info,*:%s", DefaultLogLevel())
 }
 
 //-----------------------------------------------------------------------------
@@ -170,7 +214,7 @@ type RPCConfig struct {
 	// NOTE: This server only supports /broadcast_tx_commit
 	GRPCListenAddress string `mapstructure:"grpc_laddr"`
 
-	// Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
+	// Activate unsafe RPC commands like /dial_persistent_peers and /unsafe_flush_mempool
 	Unsafe bool `mapstructure:"unsafe"`
 }
 
@@ -185,11 +229,11 @@ func DefaultRPCConfig() *RPCConfig {
 
 // TestRPCConfig returns a configuration for testing the RPC server
 func TestRPCConfig() *RPCConfig {
-	conf := DefaultRPCConfig()
-	conf.ListenAddress = "tcp://0.0.0.0:36657"
-	conf.GRPCListenAddress = "tcp://0.0.0.0:36658"
-	conf.Unsafe = true
-	return conf
+	cfg := DefaultRPCConfig()
+	cfg.ListenAddress = "tcp://0.0.0.0:36657"
+	cfg.GRPCListenAddress = "tcp://0.0.0.0:36658"
+	cfg.Unsafe = true
+	return cfg
 }
 
 //-----------------------------------------------------------------------------
@@ -203,8 +247,13 @@ type P2PConfig struct {
 	ListenAddress string `mapstructure:"laddr"`
 
 	// Comma separated list of seed nodes to connect to
+	// We only use these if we can’t connect to peers in the addrbook
 	Seeds string `mapstructure:"seeds"`
 
+	// Comma separated list of nodes to keep persistent connections to
+	// Do not add private peers to this list if you don't want them advertised
+	PersistentPeers string `mapstructure:"persistent_peers"`
+
 	// Skip UPNP port forwarding
 	SkipUPNP bool `mapstructure:"skip_upnp"`
 
@@ -214,9 +263,6 @@ type P2PConfig struct {
 	// Set true for strict address routability rules
 	AddrBookStrict bool `mapstructure:"addr_book_strict"`
 
-	// Set true to enable the peer-exchange reactor
-	PexReactor bool `mapstructure:"pex"`
-
 	// Maximum number of peers to connect to
 	MaxNumPeers int `mapstructure:"max_num_peers"`
 
@@ -231,13 +277,28 @@ type P2PConfig struct {
 
 	// Rate at which packets can be received, in bytes/second
 	RecvRate int64 `mapstructure:"recv_rate"`
+
+	// Set true to enable the peer-exchange reactor
+	PexReactor bool `mapstructure:"pex"`
+
+	// Seed mode, in which node constantly crawls the network and looks for
+	// peers. If another node asks it for addresses, it responds and disconnects.
+	//
+	// Does not work if the peer-exchange reactor is disabled.
+	SeedMode bool `mapstructure:"seed_mode"`
+
+	// Authenticated encryption
+	AuthEnc bool `mapstructure:"auth_enc"`
+
+	// Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
+	PrivatePeerIDs string `mapstructure:"private_peer_ids"`
 }
 
 // DefaultP2PConfig returns a default configuration for the peer-to-peer layer
 func DefaultP2PConfig() *P2PConfig {
 	return &P2PConfig{
 		ListenAddress:           "tcp://0.0.0.0:46656",
-		AddrBook:                "addrbook.json",
+		AddrBook:                defaultAddrBookPath,
 		AddrBookStrict:          true,
 		MaxNumPeers:             50,
 		FlushThrottleTimeout:    100,
@@ -245,20 +306,23 @@ func DefaultP2PConfig() *P2PConfig {
 		SendRate:                512000, // 500 kB/s
 		RecvRate:                512000, // 500 kB/s
 		PexReactor:              true,
+		SeedMode:                false,
+		AuthEnc:                 true,
 	}
 }
 
 // TestP2PConfig returns a configuration for testing the peer-to-peer layer
 func TestP2PConfig() *P2PConfig {
-	conf := DefaultP2PConfig()
-	conf.ListenAddress = "tcp://0.0.0.0:36656"
-	conf.SkipUPNP = true
-	return conf
+	cfg := DefaultP2PConfig()
+	cfg.ListenAddress = "tcp://0.0.0.0:36656"
+	cfg.SkipUPNP = true
+	cfg.FlushThrottleTimeout = 10
+	return cfg
 }
 
 // AddrBookFile returns the full path to the address book
-func (p *P2PConfig) AddrBookFile() string {
-	return rootify(p.AddrBook, p.RootDir)
+func (cfg *P2PConfig) AddrBookFile() string {
+	return rootify(cfg.AddrBook, cfg.RootDir)
 }
 
 //-----------------------------------------------------------------------------
@@ -271,6 +335,7 @@ type MempoolConfig struct {
 	RecheckEmpty bool   `mapstructure:"recheck_empty"`
 	Broadcast    bool   `mapstructure:"broadcast"`
 	WalPath      string `mapstructure:"wal_dir"`
+	CacheSize    int    `mapstructure:"cache_size"`
 }
 
 // DefaultMempoolConfig returns a default configuration for the Tendermint mempool
@@ -279,13 +344,21 @@ func DefaultMempoolConfig() *MempoolConfig {
 		Recheck:      true,
 		RecheckEmpty: true,
 		Broadcast:    true,
-		WalPath:      "data/mempool.wal",
+		WalPath:      filepath.Join(defaultDataDir, "mempool.wal"),
+		CacheSize:    100000,
 	}
 }
 
+// TestMempoolConfig returns a configuration for testing the Tendermint mempool
+func TestMempoolConfig() *MempoolConfig {
+	cfg := DefaultMempoolConfig()
+	cfg.CacheSize = 1000
+	return cfg
+}
+
 // WalDir returns the full path to the mempool's write-ahead log
-func (m *MempoolConfig) WalDir() string {
-	return rootify(m.WalPath, m.RootDir)
+func (cfg *MempoolConfig) WalDir() string {
+	return rootify(cfg.WalPath, cfg.RootDir)
 }
 
 //-----------------------------------------------------------------------------
@@ -299,7 +372,7 @@ type ConsensusConfig struct {
 	WalLight bool   `mapstructure:"wal_light"`
 	walFile  string // overrides WalPath if set
 
-	// All timeouts are in ms
+	// All timeouts are in milliseconds
 	TimeoutPropose        int `mapstructure:"timeout_propose"`
 	TimeoutProposeDelta   int `mapstructure:"timeout_propose_delta"`
 	TimeoutPrevote        int `mapstructure:"timeout_prevote"`
@@ -319,11 +392,49 @@ type ConsensusConfig struct {
 	CreateEmptyBlocks         bool `mapstructure:"create_empty_blocks"`
 	CreateEmptyBlocksInterval int  `mapstructure:"create_empty_blocks_interval"`
 
-	// Reactor sleep duration parameters are in ms
+	// Reactor sleep duration parameters are in milliseconds
 	PeerGossipSleepDuration     int `mapstructure:"peer_gossip_sleep_duration"`
 	PeerQueryMaj23SleepDuration int `mapstructure:"peer_query_maj23_sleep_duration"`
 }
 
+// DefaultConsensusConfig returns a default configuration for the consensus service
+func DefaultConsensusConfig() *ConsensusConfig {
+	return &ConsensusConfig{
+		WalPath:                     filepath.Join(defaultDataDir, "cs.wal", "wal"),
+		WalLight:                    false,
+		TimeoutPropose:              3000,
+		TimeoutProposeDelta:         500,
+		TimeoutPrevote:              1000,
+		TimeoutPrevoteDelta:         500,
+		TimeoutPrecommit:            1000,
+		TimeoutPrecommitDelta:       500,
+		TimeoutCommit:               1000,
+		SkipTimeoutCommit:           false,
+		MaxBlockSizeTxs:             10000,
+		MaxBlockSizeBytes:           1, // TODO
+		CreateEmptyBlocks:           true,
+		CreateEmptyBlocksInterval:   0,
+		PeerGossipSleepDuration:     100,
+		PeerQueryMaj23SleepDuration: 2000,
+	}
+}
+
+// TestConsensusConfig returns a configuration for testing the consensus service
+func TestConsensusConfig() *ConsensusConfig {
+	cfg := DefaultConsensusConfig()
+	cfg.TimeoutPropose = 100
+	cfg.TimeoutProposeDelta = 1
+	cfg.TimeoutPrevote = 10
+	cfg.TimeoutPrevoteDelta = 1
+	cfg.TimeoutPrecommit = 10
+	cfg.TimeoutPrecommitDelta = 1
+	cfg.TimeoutCommit = 10
+	cfg.SkipTimeoutCommit = true
+	cfg.PeerGossipSleepDuration = 5
+	cfg.PeerQueryMaj23SleepDuration = 250
+	return cfg
+}
+
 // WaitForTxs returns true if the consensus should wait for transactions before entering the propose step
 func (cfg *ConsensusConfig) WaitForTxs() bool {
 	return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0
@@ -364,53 +475,17 @@ func (cfg *ConsensusConfig) PeerQueryMaj23Sleep() time.Duration {
 	return time.Duration(cfg.PeerQueryMaj23SleepDuration) * time.Millisecond
 }
 
-// DefaultConsensusConfig returns a default configuration for the consensus service
-func DefaultConsensusConfig() *ConsensusConfig {
-	return &ConsensusConfig{
-		WalPath:                     "data/cs.wal/wal",
-		WalLight:                    false,
-		TimeoutPropose:              3000,
-		TimeoutProposeDelta:         500,
-		TimeoutPrevote:              1000,
-		TimeoutPrevoteDelta:         500,
-		TimeoutPrecommit:            1000,
-		TimeoutPrecommitDelta:       500,
-		TimeoutCommit:               1000,
-		SkipTimeoutCommit:           false,
-		MaxBlockSizeTxs:             10000,
-		MaxBlockSizeBytes:           1, // TODO
-		CreateEmptyBlocks:           true,
-		CreateEmptyBlocksInterval:   0,
-		PeerGossipSleepDuration:     100,
-		PeerQueryMaj23SleepDuration: 2000,
-	}
-}
-
-// TestConsensusConfig returns a configuration for testing the consensus service
-func TestConsensusConfig() *ConsensusConfig {
-	config := DefaultConsensusConfig()
-	config.TimeoutPropose = 100
-	config.TimeoutProposeDelta = 1
-	config.TimeoutPrevote = 10
-	config.TimeoutPrevoteDelta = 1
-	config.TimeoutPrecommit = 10
-	config.TimeoutPrecommitDelta = 1
-	config.TimeoutCommit = 10
-	config.SkipTimeoutCommit = true
-	return config
-}
-
 // WalFile returns the full path to the write-ahead log file
-func (c *ConsensusConfig) WalFile() string {
-	if c.walFile != "" {
-		return c.walFile
+func (cfg *ConsensusConfig) WalFile() string {
+	if cfg.walFile != "" {
+		return cfg.walFile
 	}
-	return rootify(c.WalPath, c.RootDir)
+	return rootify(cfg.WalPath, cfg.RootDir)
 }
 
 // SetWalFile sets the path to the write-ahead log file
-func (c *ConsensusConfig) SetWalFile(walFile string) {
-	c.walFile = walFile
+func (cfg *ConsensusConfig) SetWalFile(walFile string) {
+	cfg.walFile = walFile
 }
 
 //-----------------------------------------------------------------------------
@@ -448,6 +523,11 @@ func DefaultTxIndexConfig() *TxIndexConfig {
 	}
 }
 
+// TestTxIndexConfig returns a default configuration for the transaction indexer.
+func TestTxIndexConfig() *TxIndexConfig {
+	return DefaultTxIndexConfig()
+}
+
 //-----------------------------------------------------------------------------
 // Utils
 
diff --git a/vendor/github.com/tendermint/tendermint/config/toml.go b/vendor/github.com/tendermint/tendermint/config/toml.go
index 735f45c1..13f71db8 100644
--- a/vendor/github.com/tendermint/tendermint/config/toml.go
+++ b/vendor/github.com/tendermint/tendermint/config/toml.go
@@ -1,52 +1,231 @@
 package config
 
 import (
+	"bytes"
 	"os"
-	"path"
 	"path/filepath"
-	"strings"
+	"text/template"
 
 	cmn "github.com/tendermint/tmlibs/common"
 )
 
+var configTemplate *template.Template
+
+func init() {
+	var err error
+	if configTemplate, err = template.New("configFileTemplate").Parse(defaultConfigTemplate); err != nil {
+		panic(err)
+	}
+}
+
 /****** these are for production settings ***********/
 
+// EnsureRoot creates the root, config, and data directories if they don't exist,
+// and panics if it fails.
 func EnsureRoot(rootDir string) {
 	if err := cmn.EnsureDir(rootDir, 0700); err != nil {
 		cmn.PanicSanity(err.Error())
 	}
-	if err := cmn.EnsureDir(rootDir+"/data", 0700); err != nil {
+	if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), 0700); err != nil {
+		cmn.PanicSanity(err.Error())
+	}
+	if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), 0700); err != nil {
 		cmn.PanicSanity(err.Error())
 	}
 
-	configFilePath := path.Join(rootDir, "config.toml")
+	configFilePath := filepath.Join(rootDir, defaultConfigFilePath)
 
 	// Write default config file if missing.
 	if !cmn.FileExists(configFilePath) {
-		cmn.MustWriteFile(configFilePath, []byte(defaultConfig(defaultMoniker)), 0644)
+		writeConfigFile(configFilePath)
+	}
+}
+
+// XXX: this func should probably be called by cmd/tendermint/commands/init.go
+// alongside the writing of the genesis.json and priv_validator.json
+func writeConfigFile(configFilePath string) {
+	var buffer bytes.Buffer
+
+	if err := configTemplate.Execute(&buffer, DefaultConfig()); err != nil {
+		panic(err)
 	}
+
+	cmn.MustWriteFile(configFilePath, buffer.Bytes(), 0644)
 }
 
-var defaultConfigTmpl = `# This is a TOML config file.
+// Note: any changes to the comments/variables/mapstructure
+// must be reflected in the appropriate struct in config/config.go
+const defaultConfigTemplate = `# This is a TOML config file.
 # For more information, see https://github.com/toml-lang/toml
 
-proxy_app = "tcp://127.0.0.1:46658"
-moniker = "__MONIKER__"
-fast_sync = true
-db_backend = "leveldb"
-log_level = "state:info,*:error"
+##### main base config options #####
+
+# TCP or UNIX socket address of the ABCI application,
+# or the name of an ABCI application compiled in with the Tendermint binary
+proxy_app = "{{ .BaseConfig.ProxyApp }}"
+
+# A custom human readable name for this node
+moniker = "{{ .BaseConfig.Moniker }}"
+
+# If this node is many blocks behind the tip of the chain, FastSync
+# allows them to catchup quickly by downloading blocks in parallel
+# and verifying their commits
+fast_sync = {{ .BaseConfig.FastSync }}
+
+# Database backend: leveldb | memdb
+db_backend = "{{ .BaseConfig.DBBackend }}"
+
+# Database directory
+db_path = "{{ .BaseConfig.DBPath }}"
+
+# Output level for logging, including package level options
+log_level = "{{ .BaseConfig.LogLevel }}"
+
+##### additional base config options #####
 
+# Path to the JSON file containing the initial validator set and other meta data
+genesis_file = "{{ .BaseConfig.Genesis }}"
+
+# Path to the JSON file containing the private key to use as a validator in the consensus protocol
+priv_validator_file = "{{ .BaseConfig.PrivValidator }}"
+
+# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
+node_key_file = "{{ .BaseConfig.NodeKey}}"
+
+# Mechanism to connect to the ABCI application: socket | grpc
+abci = "{{ .BaseConfig.ABCI }}"
+
+# TCP or UNIX socket address for the profiling server to listen on
+prof_laddr = "{{ .BaseConfig.ProfListenAddress }}"
+
+# If true, query the ABCI app on connecting to a new peer
+# so the app can decide if we should keep the connection or not
+filter_peers = {{ .BaseConfig.FilterPeers }}
+
+##### advanced configuration options #####
+
+##### rpc server configuration options #####
 [rpc]
-laddr = "tcp://0.0.0.0:46657"
 
+# TCP or UNIX socket address for the RPC server to listen on
+laddr = "{{ .RPC.ListenAddress }}"
+
+# TCP or UNIX socket address for the gRPC server to listen on
+# NOTE: This server only supports /broadcast_tx_commit
+grpc_laddr = "{{ .RPC.GRPCListenAddress }}"
+
+# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
+unsafe = {{ .RPC.Unsafe }}
+
+##### peer to peer configuration options #####
 [p2p]
-laddr = "tcp://0.0.0.0:46656"
+
+# Address to listen for incoming connections
+laddr = "{{ .P2P.ListenAddress }}"
+
+# Comma separated list of seed nodes to connect to
 seeds = ""
-`
 
-func defaultConfig(moniker string) string {
-	return strings.Replace(defaultConfigTmpl, "__MONIKER__", moniker, -1)
-}
+# Comma separated list of nodes to keep persistent connections to
+# Do not add private peers to this list if you don't want them advertised
+persistent_peers = ""
+
+# Path to address book
+addr_book_file = "{{ .P2P.AddrBook }}"
+
+# Set true for strict address routability rules
+addr_book_strict = {{ .P2P.AddrBookStrict }}
+
+# Time to wait before flushing messages out on the connection, in ms
+flush_throttle_timeout = {{ .P2P.FlushThrottleTimeout }}
+
+# Maximum number of peers to connect to
+max_num_peers = {{ .P2P.MaxNumPeers }}
+
+# Maximum size of a message packet payload, in bytes
+max_msg_packet_payload_size = {{ .P2P.MaxMsgPacketPayloadSize }}
+
+# Rate at which packets can be sent, in bytes/second
+send_rate = {{ .P2P.SendRate }}
+
+# Rate at which packets can be received, in bytes/second
+recv_rate = {{ .P2P.RecvRate }}
+
+# Set true to enable the peer-exchange reactor
+pex = {{ .P2P.PexReactor }}
+
+# Seed mode, in which node constantly crawls the network and looks for
+# peers. If another node asks it for addresses, it responds and disconnects.
+#
+# Does not work if the peer-exchange reactor is disabled.
+seed_mode = {{ .P2P.SeedMode }}
+
+# Authenticated encryption
+auth_enc = {{ .P2P.AuthEnc }}
+
+# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
+private_peer_ids = "{{ .P2P.PrivatePeerIDs }}"
+
+##### mempool configuration options #####
+[mempool]
+
+recheck = {{ .Mempool.Recheck }}
+recheck_empty = {{ .Mempool.RecheckEmpty }}
+broadcast = {{ .Mempool.Broadcast }}
+wal_dir = "{{ .Mempool.WalPath }}"
+
+##### consensus configuration options #####
+[consensus]
+
+wal_file = "{{ .Consensus.WalPath }}"
+wal_light = {{ .Consensus.WalLight }}
+
+# All timeouts are in milliseconds
+timeout_propose = {{ .Consensus.TimeoutPropose }}
+timeout_propose_delta = {{ .Consensus.TimeoutProposeDelta }}
+timeout_prevote = {{ .Consensus.TimeoutPrevote }}
+timeout_prevote_delta = {{ .Consensus.TimeoutPrevoteDelta }}
+timeout_precommit = {{ .Consensus.TimeoutPrecommit }}
+timeout_precommit_delta = {{ .Consensus.TimeoutPrecommitDelta }}
+timeout_commit = {{ .Consensus.TimeoutCommit }}
+
+# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
+skip_timeout_commit = {{ .Consensus.SkipTimeoutCommit }}
+
+# BlockSize
+max_block_size_txs = {{ .Consensus.MaxBlockSizeTxs }}
+max_block_size_bytes = {{ .Consensus.MaxBlockSizeBytes }}
+
+# EmptyBlocks mode and possible interval between empty blocks in seconds
+create_empty_blocks = {{ .Consensus.CreateEmptyBlocks }}
+create_empty_blocks_interval = {{ .Consensus.CreateEmptyBlocksInterval }}
+
+# Reactor sleep duration parameters are in milliseconds
+peer_gossip_sleep_duration = {{ .Consensus.PeerGossipSleepDuration }}
+peer_query_maj23_sleep_duration = {{ .Consensus.PeerQueryMaj23SleepDuration }}
+
+##### transactions indexer configuration options #####
+[tx_index]
+
+# What indexer to use for transactions
+#
+# Options:
+#   1) "null" (default)
+#   2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
+indexer = "{{ .TxIndex.Indexer }}"
+
+# Comma-separated list of tags to index (by default the only tag is tx hash)
+#
+# It's recommended to index only a subset of tags due to possible memory
+# bloat. This is, of course, depends on the indexer's DB and the volume of
+# transactions.
+index_tags = "{{ .TxIndex.IndexTags }}"
+
+# When set to true, tells indexer to index all tags. Note this may be not
+# desirable (see the comment above). IndexTags has a precedence over
+# IndexAllTags (i.e. when given both, IndexTags will be indexed).
+index_all_tags = {{ .TxIndex.IndexAllTags }}
+`
 
 /****** these are for test settings ***********/
 
@@ -69,17 +248,21 @@ func ResetTestRoot(testName string) *Config {
 	if err := cmn.EnsureDir(rootDir, 0700); err != nil {
 		cmn.PanicSanity(err.Error())
 	}
-	if err := cmn.EnsureDir(rootDir+"/data", 0700); err != nil {
+	if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), 0700); err != nil {
+		cmn.PanicSanity(err.Error())
+	}
+	if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), 0700); err != nil {
 		cmn.PanicSanity(err.Error())
 	}
 
-	configFilePath := path.Join(rootDir, "config.toml")
-	genesisFilePath := path.Join(rootDir, "genesis.json")
-	privFilePath := path.Join(rootDir, "priv_validator.json")
+	baseConfig := DefaultBaseConfig()
+	configFilePath := filepath.Join(rootDir, defaultConfigFilePath)
+	genesisFilePath := filepath.Join(rootDir, baseConfig.Genesis)
+	privFilePath := filepath.Join(rootDir, baseConfig.PrivValidator)
 
 	// Write default config file if missing.
 	if !cmn.FileExists(configFilePath) {
-		cmn.MustWriteFile(configFilePath, []byte(testConfig(defaultMoniker)), 0644)
+		writeConfigFile(configFilePath)
 	}
 	if !cmn.FileExists(genesisFilePath) {
 		cmn.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644)
@@ -91,28 +274,6 @@ func ResetTestRoot(testName string) *Config {
 	return config
 }
 
-var testConfigTmpl = `# This is a TOML config file.
-# For more information, see https://github.com/toml-lang/toml
-
-proxy_app = "dummy"
-moniker = "__MONIKER__"
-fast_sync = false
-db_backend = "memdb"
-log_level = "info"
-
-[rpc]
-laddr = "tcp://0.0.0.0:36657"
-
-[p2p]
-laddr = "tcp://0.0.0.0:36656"
-seeds = ""
-`
-
-func testConfig(moniker string) (testConfig string) {
-	testConfig = strings.Replace(testConfigTmpl, "__MONIKER__", moniker, -1)
-	return
-}
-
 var testGenesis = `{
   "genesis_time": "0001-01-01T00:00:00.000Z",
   "chain_id": "tendermint_test",
diff --git a/vendor/github.com/tendermint/tendermint/consensus/reactor.go b/vendor/github.com/tendermint/tendermint/consensus/reactor.go
index 9b3393e9..70a79d86 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/reactor.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/reactor.go
@@ -27,6 +27,8 @@ const (
 	VoteSetBitsChannel = byte(0x23)
 
 	maxConsensusMessageSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes.
+
+	blocksToContributeToBecomeGoodPeer = 10000
 )
 
 //-----------------------------------------------------------------------------
@@ -179,7 +181,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
 	_, msg, err := DecodeMessage(msgBytes)
 	if err != nil {
 		conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
-		// TODO punish peer?
+		conR.Switch.StopPeerForError(src, err)
 		return
 	}
 	conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
@@ -205,7 +207,11 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
 				return
 			}
 			// Peer claims to have a maj23 for some BlockID at H,R,S,
-			votes.SetPeerMaj23(msg.Round, msg.Type, ps.Peer.Key(), msg.BlockID)
+			err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.Peer.ID(), msg.BlockID)
+			if err != nil {
+				conR.Switch.StopPeerForError(src, err)
+				return
+			}
 			// Respond with a VoteSetBitsMessage showing which votes we have.
 			// (and consequently shows which we don't have)
 			var ourVotes *cmn.BitArray
@@ -242,12 +248,15 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
 		switch msg := msg.(type) {
 		case *ProposalMessage:
 			ps.SetHasProposal(msg.Proposal)
-			conR.conS.peerMsgQueue <- msgInfo{msg, src.Key()}
+			conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
 		case *ProposalPOLMessage:
 			ps.ApplyProposalPOLMessage(msg)
 		case *BlockPartMessage:
 			ps.SetHasProposalBlockPart(msg.Height, msg.Round, msg.Part.Index)
-			conR.conS.peerMsgQueue <- msgInfo{msg, src.Key()}
+			if numBlocks := ps.RecordBlockPart(msg); numBlocks%blocksToContributeToBecomeGoodPeer == 0 {
+				conR.Switch.MarkPeerAsGood(src)
+			}
+			conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
 		default:
 			conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
 		}
@@ -266,8 +275,11 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
 			ps.EnsureVoteBitArrays(height, valSize)
 			ps.EnsureVoteBitArrays(height-1, lastCommitSize)
 			ps.SetHasVote(msg.Vote)
+			if blocks := ps.RecordVote(msg.Vote); blocks%blocksToContributeToBecomeGoodPeer == 0 {
+				conR.Switch.MarkPeerAsGood(src)
+			}
 
-			cs.peerMsgQueue <- msgInfo{msg, src.Key()}
+			cs.peerMsgQueue <- msgInfo{msg, src.ID()}
 
 		default:
 			// don't punish (leave room for soft upgrades)
@@ -359,24 +371,30 @@ func (conR *ConsensusReactor) startBroadcastRoutine() error {
 	}
 
 	go func() {
+		var data interface{}
+		var ok bool
 		for {
 			select {
-			case data, ok := <-stepsCh:
+			case data, ok = <-stepsCh:
 				if ok { // a receive from a closed channel returns the zero value immediately
 					edrs := data.(types.TMEventData).Unwrap().(types.EventDataRoundState)
 					conR.broadcastNewRoundStep(edrs.RoundState.(*cstypes.RoundState))
 				}
-			case data, ok := <-votesCh:
+			case data, ok = <-votesCh:
 				if ok {
 					edv := data.(types.TMEventData).Unwrap().(types.EventDataVote)
 					conR.broadcastHasVoteMessage(edv.Vote)
 				}
-			case data, ok := <-heartbeatsCh:
+			case data, ok = <-heartbeatsCh:
 				if ok {
 					edph := data.(types.TMEventData).Unwrap().(types.EventDataProposalHeartbeat)
 					conR.broadcastProposalHeartbeatMessage(edph)
 				}
-			case <-conR.Quit:
+			case <-conR.Quit():
+				conR.eventBus.UnsubscribeAll(ctx, subscriber)
+				return
+			}
+			if !ok {
 				conR.eventBus.UnsubscribeAll(ctx, subscriber)
 				return
 			}
@@ -590,11 +608,9 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype
 			logger.Debug("Sending block part for catchup failed")
 		}
 		return
-	} else {
-		//logger.Info("No parts to send in catch-up, sleeping")
-		time.Sleep(conR.conS.config.PeerGossipSleep())
-		return
 	}
+	//logger.Info("No parts to send in catch-up, sleeping")
+	time.Sleep(conR.conS.config.PeerGossipSleep())
 }
 
 func (conR *ConsensusReactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
@@ -827,6 +843,21 @@ type PeerState struct {
 
 	mtx sync.Mutex
 	cstypes.PeerRoundState
+
+	stats *peerStateStats
+}
+
+// peerStateStats holds internal statistics for a peer.
+type peerStateStats struct {
+	lastVoteHeight int64
+	votes          int
+
+	lastBlockPartHeight int64
+	blockParts          int
+}
+
+func (pss peerStateStats) String() string {
+	return fmt.Sprintf("peerStateStats{votes: %d, blockParts: %d}", pss.votes, pss.blockParts)
 }
 
 // NewPeerState returns a new PeerState for the given Peer
@@ -840,6 +871,7 @@ func NewPeerState(peer p2p.Peer) *PeerState {
 			LastCommitRound:    -1,
 			CatchupCommitRound: -1,
 		},
+		stats: &peerStateStats{},
 	}
 }
 
@@ -1051,6 +1083,56 @@ func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) {
 	}
 }
 
+// RecordVote updates internal statistics for this peer by recording the vote.
+// It returns the total number of votes (1 per block). This essentially means
+// the number of blocks for which peer has been sending us votes.
+func (ps *PeerState) RecordVote(vote *types.Vote) int {
+	ps.mtx.Lock()
+	defer ps.mtx.Unlock()
+
+	if ps.stats.lastVoteHeight >= vote.Height {
+		return ps.stats.votes
+	}
+	ps.stats.lastVoteHeight = vote.Height
+	ps.stats.votes++
+	return ps.stats.votes
+}
+
+// VotesSent returns the number of blocks for which peer has been sending us
+// votes.
+func (ps *PeerState) VotesSent() int {
+	ps.mtx.Lock()
+	defer ps.mtx.Unlock()
+
+	return ps.stats.votes
+}
+
+// RecordBlockPart updates internal statistics for this peer by recording the
+// block part. It returns the total number of block parts (1 per block). This
+// essentially means the number of blocks for which peer has been sending us
+// block parts.
+func (ps *PeerState) RecordBlockPart(bp *BlockPartMessage) int {
+	ps.mtx.Lock()
+	defer ps.mtx.Unlock()
+
+	if ps.stats.lastBlockPartHeight >= bp.Height {
+		return ps.stats.blockParts
+	}
+
+	ps.stats.lastBlockPartHeight = bp.Height
+	ps.stats.blockParts++
+	return ps.stats.blockParts
+}
+
+// BlockPartsSent returns the number of blocks for which peer has been sending
+// us block parts.
+func (ps *PeerState) BlockPartsSent() int {
+	ps.mtx.Lock()
+	defer ps.mtx.Unlock()
+
+	return ps.stats.blockParts
+}
+
 // SetHasVote sets the given vote as known by the peer
 func (ps *PeerState) SetHasVote(vote *types.Vote) {
 	ps.mtx.Lock()
@@ -1197,11 +1279,13 @@ func (ps *PeerState) StringIndented(indent string) string {
 	ps.mtx.Lock()
 	defer ps.mtx.Unlock()
 	return fmt.Sprintf(`PeerState{
-%s  Key %v
-%s  PRS %v
+%s  Key   %v
+%s  PRS   %v
+%s  Stats %v
 %s}`,
-		indent, ps.Peer.Key(),
+		indent, ps.Peer.ID(),
 		indent, ps.PeerRoundState.StringIndented(indent+"  "),
+		indent, ps.stats,
 		indent)
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/consensus/replay.go b/vendor/github.com/tendermint/tendermint/consensus/replay.go
index 784e8bd6..5b5a4842 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/replay.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/replay.go
@@ -2,6 +2,7 @@ package consensus
 
 import (
 	"bytes"
+	"encoding/json"
 	"fmt"
 	"hash/crc32"
 	"io"
@@ -61,21 +62,21 @@ func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan
 			}
 		}
 	case msgInfo:
-		peerKey := m.PeerKey
-		if peerKey == "" {
-			peerKey = "local"
+		peerID := m.PeerID
+		if peerID == "" {
+			peerID = "local"
 		}
 		switch msg := m.Msg.(type) {
 		case *ProposalMessage:
 			p := msg.Proposal
 			cs.Logger.Info("Replay: Proposal", "height", p.Height, "round", p.Round, "header",
-				p.BlockPartsHeader, "pol", p.POLRound, "peer", peerKey)
+				p.BlockPartsHeader, "pol", p.POLRound, "peer", peerID)
 		case *BlockPartMessage:
-			cs.Logger.Info("Replay: BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerKey)
+			cs.Logger.Info("Replay: BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerID)
 		case *VoteMessage:
 			v := msg.Vote
 			cs.Logger.Info("Replay: Vote", "height", v.Height, "round", v.Round, "type", v.Type,
-				"blockID", v.BlockID, "peer", peerKey)
+				"blockID", v.BlockID, "peer", peerID)
 		}
 
 		cs.handleMsg(m)
@@ -111,7 +112,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int64) error {
 		}
 	}
 	if found {
-		return fmt.Errorf("WAL should not contain #ENDHEIGHT %d.", csHeight)
+		return fmt.Errorf("WAL should not contain #ENDHEIGHT %d", csHeight)
 	}
 
 	// Search for last height marker
@@ -124,7 +125,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int64) error {
 		return err
 	}
 	if !found {
-		return fmt.Errorf("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1)
+		return fmt.Errorf("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d", csHeight, csHeight-1)
 	}
 	defer gr.Close() // nolint: errcheck
 
@@ -190,13 +191,23 @@ type Handshaker struct {
 	stateDB      dbm.DB
 	initialState sm.State
 	store        types.BlockStore
+	appState     json.RawMessage
 	logger       log.Logger
 
 	nBlocks int // number of blocks applied to the state
 }
 
-func NewHandshaker(stateDB dbm.DB, state sm.State, store types.BlockStore) *Handshaker {
-	return &Handshaker{stateDB, state, store, log.NewNopLogger(), 0}
+func NewHandshaker(stateDB dbm.DB, state sm.State,
+	store types.BlockStore, appState json.RawMessage) *Handshaker {
+
+	return &Handshaker{
+		stateDB:      stateDB,
+		initialState: state,
+		store:        store,
+		appState:     appState,
+		logger:       log.NewNopLogger(),
+		nBlocks:      0,
+	}
 }
 
 func (h *Handshaker) SetLogger(l log.Logger) {
@@ -249,7 +260,12 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
 	// If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain
 	if appBlockHeight == 0 {
 		validators := types.TM2PB.Validators(state.Validators)
-		if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators}); err != nil {
+		req := abci.RequestInitChain{
+			Validators:    validators,
+			AppStateBytes: h.appState,
+		}
+		_, err := proxyApp.Consensus().InitChainSync(req)
+		if err != nil {
 			return nil, err
 		}
 	}
@@ -336,7 +352,7 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl
 	var err error
 	finalBlock := storeBlockHeight
 	if mutateState {
-		finalBlock -= 1
+		finalBlock--
 	}
 	for i := appBlockHeight + 1; i <= finalBlock; i++ {
 		h.logger.Info("Applying block", "height", i)
@@ -346,7 +362,7 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl
 			return nil, err
 		}
 
-		h.nBlocks += 1
+		h.nBlocks++
 	}
 
 	if mutateState {
@@ -374,7 +390,7 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap
 		return sm.State{}, err
 	}
 
-	h.nBlocks += 1
+	h.nBlocks++
 
 	return state, nil
 }
@@ -413,7 +429,7 @@ type mockProxyApp struct {
 
 func (mock *mockProxyApp) DeliverTx(tx []byte) abci.ResponseDeliverTx {
 	r := mock.abciResponses.DeliverTx[mock.txCount]
-	mock.txCount += 1
+	mock.txCount++
 	return *r
 }
 
@@ -423,5 +439,5 @@ func (mock *mockProxyApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlo
 }
 
 func (mock *mockProxyApp) Commit() abci.ResponseCommit {
-	return abci.ResponseCommit{Code: abci.CodeTypeOK, Data: mock.appHash}
+	return abci.ResponseCommit{Data: mock.appHash}
 }
diff --git a/vendor/github.com/tendermint/tendermint/consensus/replay_file.go b/vendor/github.com/tendermint/tendermint/consensus/replay_file.go
index 26b8baeb..1fd4f415 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/replay_file.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/replay_file.go
@@ -87,9 +87,9 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error {
 		}
 
 		if nextN > 0 {
-			nextN -= 1
+			nextN--
 		}
-		pb.count += 1
+		pb.count++
 	}
 	return nil
 }
@@ -153,7 +153,7 @@ func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
 		if err := pb.cs.readReplayMessage(msg, newStepCh); err != nil {
 			return err
 		}
-		pb.count += 1
+		pb.count++
 	}
 	return nil
 }
@@ -197,13 +197,12 @@ func (pb *playback) replayConsoleLoop() int {
 
 			if len(tokens) == 1 {
 				return 0
+			}
+			i, err := strconv.Atoi(tokens[1])
+			if err != nil {
+				fmt.Println("next takes an integer argument")
 			} else {
-				i, err := strconv.Atoi(tokens[1])
-				if err != nil {
-					fmt.Println("next takes an integer argument")
-				} else {
-					return i
-				}
+				return i
 			}
 
 		case "back":
@@ -280,20 +279,26 @@ func (pb *playback) replayConsoleLoop() int {
 
 // convenience for replay mode
 func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig) *ConsensusState {
+	dbType := dbm.DBBackendType(config.DBBackend)
 	// Get BlockStore
-	blockStoreDB := dbm.NewDB("blockstore", config.DBBackend, config.DBDir())
+	blockStoreDB := dbm.NewDB("blockstore", dbType, config.DBDir())
 	blockStore := bc.NewBlockStore(blockStoreDB)
 
 	// Get State
-	stateDB := dbm.NewDB("state", config.DBBackend, config.DBDir())
-	state, err := sm.MakeGenesisStateFromFile(config.GenesisFile())
+	stateDB := dbm.NewDB("state", dbType, config.DBDir())
+	gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile())
+	if err != nil {
+		cmn.Exit(err.Error())
+	}
+	state, err := sm.MakeGenesisState(gdoc)
 	if err != nil {
 		cmn.Exit(err.Error())
 	}
 
 	// Create proxyAppConn connection (consensus, mempool, query)
 	clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir())
-	proxyApp := proxy.NewAppConns(clientCreator, NewHandshaker(stateDB, state, blockStore))
+	proxyApp := proxy.NewAppConns(clientCreator,
+		NewHandshaker(stateDB, state, blockStore, gdoc.AppState()))
 	err = proxyApp.Start()
 	if err != nil {
 		cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err))
diff --git a/vendor/github.com/tendermint/tendermint/consensus/state.go b/vendor/github.com/tendermint/tendermint/consensus/state.go
index 518d81c5..57c7b32f 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/state.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/state.go
@@ -17,6 +17,7 @@ import (
 
 	cfg "github.com/tendermint/tendermint/config"
 	cstypes "github.com/tendermint/tendermint/consensus/types"
+	"github.com/tendermint/tendermint/p2p"
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/types"
 )
@@ -46,8 +47,8 @@ var (
 
 // msgs from the reactor which may update the state
 type msgInfo struct {
-	Msg     ConsensusMessage `json:"msg"`
-	PeerKey string           `json:"peer_key"`
+	Msg    ConsensusMessage `json:"msg"`
+	PeerID p2p.ID           `json:"peer_key"`
 }
 
 // internally generated messages which may update the state
@@ -85,7 +86,7 @@ type ConsensusState struct {
 	cstypes.RoundState
 	state sm.State // State until height-1.
 
-	// state changes may be triggered by msgs from peers,
+	// state changes may be triggered by: msgs from peers,
 	// msgs from ourself, or by timeouts
 	peerMsgQueue     chan msgInfo
 	internalMsgQueue chan msgInfo
@@ -303,17 +304,17 @@ func (cs *ConsensusState) OpenWAL(walFile string) (WAL, error) {
 
 //------------------------------------------------------------
 // Public interface for passing messages into the consensus state, possibly causing a state transition.
-// If peerKey == "", the msg is considered internal.
+// If peerID == "", the msg is considered internal.
 // Messages are added to the appropriate queue (peer or internal).
 // If the queue is full, the function may block.
 // TODO: should these return anything or let callers just use events?
 
 // AddVote inputs a vote.
-func (cs *ConsensusState) AddVote(vote *types.Vote, peerKey string) (added bool, err error) {
-	if peerKey == "" {
+func (cs *ConsensusState) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error) {
+	if peerID == "" {
 		cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, ""}
 	} else {
-		cs.peerMsgQueue <- msgInfo{&VoteMessage{vote}, peerKey}
+		cs.peerMsgQueue <- msgInfo{&VoteMessage{vote}, peerID}
 	}
 
 	// TODO: wait for event?!
@@ -321,12 +322,12 @@ func (cs *ConsensusState) AddVote(vote *types.Vote, peerKey string) (added bool,
 }
 
 // SetProposal inputs a proposal.
-func (cs *ConsensusState) SetProposal(proposal *types.Proposal, peerKey string) error {
+func (cs *ConsensusState) SetProposal(proposal *types.Proposal, peerID p2p.ID) error {
 
-	if peerKey == "" {
+	if peerID == "" {
 		cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""}
 	} else {
-		cs.peerMsgQueue <- msgInfo{&ProposalMessage{proposal}, peerKey}
+		cs.peerMsgQueue <- msgInfo{&ProposalMessage{proposal}, peerID}
 	}
 
 	// TODO: wait for event?!
@@ -334,12 +335,12 @@ func (cs *ConsensusState) SetProposal(proposal *types.Proposal, peerKey string)
 }
 
 // AddProposalBlockPart inputs a part of the proposal block.
-func (cs *ConsensusState) AddProposalBlockPart(height int64, round int, part *types.Part, peerKey string) error {
+func (cs *ConsensusState) AddProposalBlockPart(height int64, round int, part *types.Part, peerID p2p.ID) error {
 
-	if peerKey == "" {
+	if peerID == "" {
 		cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""}
 	} else {
-		cs.peerMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, peerKey}
+		cs.peerMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, peerID}
 	}
 
 	// TODO: wait for event?!
@@ -347,13 +348,13 @@ func (cs *ConsensusState) AddProposalBlockPart(height int64, round int, part *ty
 }
 
 // SetProposalAndBlock inputs the proposal and all block parts.
-func (cs *ConsensusState) SetProposalAndBlock(proposal *types.Proposal, block *types.Block, parts *types.PartSet, peerKey string) error {
-	if err := cs.SetProposal(proposal, peerKey); err != nil {
+func (cs *ConsensusState) SetProposalAndBlock(proposal *types.Proposal, block *types.Block, parts *types.PartSet, peerID p2p.ID) error {
+	if err := cs.SetProposal(proposal, peerID); err != nil {
 		return err
 	}
 	for i := 0; i < parts.Total(); i++ {
 		part := parts.GetPart(i)
-		if err := cs.AddProposalBlockPart(proposal.Height, proposal.Round, part, peerKey); err != nil {
+		if err := cs.AddProposalBlockPart(proposal.Height, proposal.Round, part, peerID); err != nil {
 			return err
 		}
 	}
@@ -476,6 +477,9 @@ func (cs *ConsensusState) updateToState(state sm.State) {
 	cs.LockedRound = 0
 	cs.LockedBlock = nil
 	cs.LockedBlockParts = nil
+	cs.ValidRound = 0
+	cs.ValidBlock = nil
+	cs.ValidBlockParts = nil
 	cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators)
 	cs.CommitRound = -1
 	cs.LastCommit = lastPrecommits
@@ -490,7 +494,7 @@ func (cs *ConsensusState) updateToState(state sm.State) {
 func (cs *ConsensusState) newStep() {
 	rs := cs.RoundStateEvent()
 	cs.wal.Save(rs)
-	cs.nSteps += 1
+	cs.nSteps++
 	// newStep is called by updateToStep in NewConsensusState before the eventBus is set!
 	if cs.eventBus != nil {
 		cs.eventBus.PublishEventNewRoundStep(rs)
@@ -540,7 +544,7 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
 			// if the timeout is relevant to the rs
 			// go to the next step
 			cs.handleTimeout(ti, rs)
-		case <-cs.Quit:
+		case <-cs.Quit():
 
 			// NOTE: the internalMsgQueue may have signed messages from our
 			// priv_val that haven't hit the WAL, but its ok because
@@ -561,7 +565,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
 	defer cs.mtx.Unlock()
 
 	var err error
-	msg, peerKey := mi.Msg, mi.PeerKey
+	msg, peerID := mi.Msg, mi.PeerID
 	switch msg := msg.(type) {
 	case *ProposalMessage:
 		// will not cause transition.
@@ -569,16 +573,20 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
 		err = cs.setProposal(msg.Proposal)
 	case *BlockPartMessage:
 		// if the proposal is complete, we'll enterPrevote or tryFinalizeCommit
-		_, err = cs.addProposalBlockPart(msg.Height, msg.Part, peerKey != "")
+		_, err = cs.addProposalBlockPart(msg.Height, msg.Part, peerID != "")
 		if err != nil && msg.Round != cs.Round {
 			err = nil
 		}
 	case *VoteMessage:
 		// attempt to add the vote and dupeout the validator if its a duplicate signature
 		// if the vote gives us a 2/3-any or 2/3-one, we transition
-		err := cs.tryAddVote(msg.Vote, peerKey)
+		err := cs.tryAddVote(msg.Vote, peerID)
 		if err == ErrAddingVote {
 			// TODO: punish peer
+			// We probably don't want to stop the peer here. The vote does not
+			// necessarily comes from a malicious peer but can be just broadcasted by
+			// a typical peer.
+			// https://github.com/tendermint/tendermint/issues/1281
 		}
 
 		// NOTE: the vote is broadcast to peers by the reactor listening
@@ -591,7 +599,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
 		cs.Logger.Error("Unknown msg type", reflect.TypeOf(msg))
 	}
 	if err != nil {
-		cs.Logger.Error("Error with msg", "type", reflect.TypeOf(msg), "peer", peerKey, "err", err, "msg", msg)
+		cs.Logger.Error("Error with msg", "type", reflect.TypeOf(msg), "peer", peerID, "err", err, "msg", msg)
 	}
 }
 
@@ -712,11 +720,7 @@ func (cs *ConsensusState) needProofBlock(height int64) bool {
 func (cs *ConsensusState) proposalHeartbeat(height int64, round int) {
 	counter := 0
 	addr := cs.privValidator.GetAddress()
-	valIndex, v := cs.Validators.GetByAddress(addr)
-	if v == nil {
-		// not a validator
-		valIndex = -1
-	}
+	valIndex, _ := cs.Validators.GetByAddress(addr)
 	chainID := cs.state.ChainID
 	for {
 		rs := cs.GetRoundState()
@@ -733,7 +737,7 @@ func (cs *ConsensusState) proposalHeartbeat(height int64, round int) {
 		}
 		cs.privValidator.SignHeartbeat(chainID, heartbeat)
 		cs.eventBus.PublishEventProposalHeartbeat(types.EventDataProposalHeartbeat{heartbeat})
-		counter += 1
+		counter++
 		time.Sleep(proposalHeartbeatIntervalSeconds * time.Second)
 	}
 }
@@ -770,17 +774,18 @@ func (cs *ConsensusState) enterPropose(height int64, round int) {
 		return
 	}
 
-	if !cs.isProposer() {
-		cs.Logger.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
-		if cs.Validators.HasAddress(cs.privValidator.GetAddress()) {
-			cs.Logger.Debug("This node is a validator")
-		} else {
-			cs.Logger.Debug("This node is not a validator")
-		}
-	} else {
+	// if not a validator, we're done
+	if !cs.Validators.HasAddress(cs.privValidator.GetAddress()) {
+		cs.Logger.Debug("This node is not a validator")
+		return
+	}
+	cs.Logger.Debug("This node is a validator")
+
+	if cs.isProposer() {
 		cs.Logger.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
-		cs.Logger.Debug("This node is a validator")
 		cs.decideProposal(height, round)
+	} else {
+		cs.Logger.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
 	}
 }
 
@@ -796,6 +801,9 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) {
 	if cs.LockedBlock != nil {
 		// If we're locked onto a block, just choose that.
 		block, blockParts = cs.LockedBlock, cs.LockedBlockParts
+	} else if cs.ValidBlock != nil {
+		// If there is valid block, choose that.
+		block, blockParts = cs.ValidBlock, cs.ValidBlockParts
 	} else {
 		// Create a new proposal block from state/txs from the mempool.
 		block, blockParts = cs.createProposalBlock()
@@ -840,10 +848,10 @@ func (cs *ConsensusState) isProposalComplete() bool {
 	// make sure we have the prevotes from it too
 	if cs.Proposal.POLRound < 0 {
 		return true
-	} else {
-		// if this is false the proposer is lying or we haven't received the POL yet
-		return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority()
 	}
+	// if this is false the proposer is lying or we haven't received the POL yet
+	return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority()
+
 }
 
 // Create the next block to propose and return it.
@@ -1261,7 +1269,7 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error {
 	}
 
 	// Verify signature
-	if !cs.Validators.GetProposer().PubKey.VerifyBytes(types.SignBytes(cs.state.ChainID, proposal), proposal.Signature) {
+	if !cs.Validators.GetProposer().PubKey.VerifyBytes(proposal.SignBytes(cs.state.ChainID), proposal.Signature) {
 		return ErrInvalidProposalSignature
 	}
 
@@ -1308,8 +1316,8 @@ func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part, v
 }
 
 // Attempt to add the vote. if its a duplicate signature, dupeout the validator
-func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerKey string) error {
-	_, err := cs.addVote(vote, peerKey)
+func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerID p2p.ID) error {
+	_, err := cs.addVote(vote, peerID)
 	if err != nil {
 		// If the vote height is off, we'll just ignore it,
 		// But if it's a conflicting sig, add it to the cs.evpool.
@@ -1335,7 +1343,7 @@ func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerKey string) error {
 
 //-----------------------------------------------------------------------------
 
-func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool, err error) {
+func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error) {
 	cs.Logger.Debug("addVote", "voteHeight", vote.Height, "voteType", vote.Type, "valIndex", vote.ValidatorIndex, "csHeight", cs.Height)
 
 	// A precommit for the previous height?
@@ -1347,99 +1355,115 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool,
 			return added, ErrVoteHeightMismatch
 		}
 		added, err = cs.LastCommit.AddVote(vote)
-		if added {
-			cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
-			cs.eventBus.PublishEventVote(types.EventDataVote{vote})
-
-			// if we can skip timeoutCommit and have all the votes now,
-			if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() {
-				// go straight to new round (skip timeout commit)
-				// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight)
-				cs.enterNewRound(cs.Height, 0)
-			}
+		if !added {
+			return added, err
+		}
+
+		cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
+		cs.eventBus.PublishEventVote(types.EventDataVote{vote})
+
+		// if we can skip timeoutCommit and have all the votes now,
+		if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() {
+			// go straight to new round (skip timeout commit)
+			// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight)
+			cs.enterNewRound(cs.Height, 0)
 		}
 
 		return
 	}
 
-	// A prevote/precommit for this height?
-	if vote.Height == cs.Height {
-		height := cs.Height
-		added, err = cs.Votes.AddVote(vote, peerKey)
-		if added {
-			cs.eventBus.PublishEventVote(types.EventDataVote{vote})
-
-			switch vote.Type {
-			case types.VoteTypePrevote:
-				prevotes := cs.Votes.Prevotes(vote.Round)
-				cs.Logger.Info("Added to prevote", "vote", vote, "prevotes", prevotes.StringShort())
-				// First, unlock if prevotes is a valid POL.
-				// >> lockRound < POLRound <= unlockOrChangeLockRound (see spec)
-				// NOTE: If (lockRound < POLRound) but !(POLRound <= unlockOrChangeLockRound),
-				// we'll still enterNewRound(H,vote.R) and enterPrecommit(H,vote.R) to process it
-				// there.
-				if (cs.LockedBlock != nil) && (cs.LockedRound < vote.Round) && (vote.Round <= cs.Round) {
-					blockID, ok := prevotes.TwoThirdsMajority()
-					if ok && !cs.LockedBlock.HashesTo(blockID.Hash) {
-						cs.Logger.Info("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round)
-						cs.LockedRound = 0
-						cs.LockedBlock = nil
-						cs.LockedBlockParts = nil
-						cs.eventBus.PublishEventUnlock(cs.RoundStateEvent())
-					}
-				}
-				if cs.Round <= vote.Round && prevotes.HasTwoThirdsAny() {
-					// Round-skip over to PrevoteWait or goto Precommit.
-					cs.enterNewRound(height, vote.Round) // if the vote is ahead of us
-					if prevotes.HasTwoThirdsMajority() {
-						cs.enterPrecommit(height, vote.Round)
-					} else {
-						cs.enterPrevote(height, vote.Round) // if the vote is ahead of us
-						cs.enterPrevoteWait(height, vote.Round)
-					}
-				} else if cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round {
-					// If the proposal is now complete, enter prevote of cs.Round.
-					if cs.isProposalComplete() {
-						cs.enterPrevote(height, cs.Round)
-					}
-				}
-			case types.VoteTypePrecommit:
-				precommits := cs.Votes.Precommits(vote.Round)
-				cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort())
-				blockID, ok := precommits.TwoThirdsMajority()
-				if ok {
-					if len(blockID.Hash) == 0 {
-						cs.enterNewRound(height, vote.Round+1)
-					} else {
-						cs.enterNewRound(height, vote.Round)
-						cs.enterPrecommit(height, vote.Round)
-						cs.enterCommit(height, vote.Round)
-
-						if cs.config.SkipTimeoutCommit && precommits.HasAll() {
-							// if we have all the votes now,
-							// go straight to new round (skip timeout commit)
-							// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight)
-							cs.enterNewRound(cs.Height, 0)
-						}
-
-					}
-				} else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() {
-					cs.enterNewRound(height, vote.Round)
-					cs.enterPrecommit(height, vote.Round)
-					cs.enterPrecommitWait(height, vote.Round)
+	// Height mismatch is ignored.
+	// Not necessarily a bad peer, but not favourable behaviour.
+	if vote.Height != cs.Height {
+		err = ErrVoteHeightMismatch
+		cs.Logger.Info("Vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height, "err", err)
+		return
+	}
+
+	height := cs.Height
+	added, err = cs.Votes.AddVote(vote, peerID)
+	if !added {
+		// Either duplicate, or error upon cs.Votes.AddByIndex()
+		return
+	}
+
+	cs.eventBus.PublishEventVote(types.EventDataVote{vote})
+
+	switch vote.Type {
+	case types.VoteTypePrevote:
+		prevotes := cs.Votes.Prevotes(vote.Round)
+		cs.Logger.Info("Added to prevote", "vote", vote, "prevotes", prevotes.StringShort())
+		blockID, ok := prevotes.TwoThirdsMajority()
+		// First, unlock if prevotes is a valid POL.
+		// >> lockRound < POLRound <= unlockOrChangeLockRound (see spec)
+		// NOTE: If (lockRound < POLRound) but !(POLRound <= unlockOrChangeLockRound),
+		// we'll still enterNewRound(H,vote.R) and enterPrecommit(H,vote.R) to process it
+		// there.
+		if (cs.LockedBlock != nil) && (cs.LockedRound < vote.Round) && (vote.Round <= cs.Round) {
+			if ok && !cs.LockedBlock.HashesTo(blockID.Hash) {
+				cs.Logger.Info("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round)
+				cs.LockedRound = 0
+				cs.LockedBlock = nil
+				cs.LockedBlockParts = nil
+				cs.eventBus.PublishEventUnlock(cs.RoundStateEvent())
+			}
+		}
+		// Update ValidBlock
+		if ok && !blockID.IsZero() && !cs.ValidBlock.HashesTo(blockID.Hash) && vote.Round > cs.ValidRound {
+			// update valid value
+			if cs.ProposalBlock.HashesTo(blockID.Hash) {
+				cs.ValidRound = vote.Round
+				cs.ValidBlock = cs.ProposalBlock
+				cs.ValidBlockParts = cs.ProposalBlockParts
+			}
+			//TODO: We might want to update ValidBlock also in case we don't have that block yet,
+			// and obtain the required block using gossiping
+		}
+
+		if cs.Round <= vote.Round && prevotes.HasTwoThirdsAny() {
+			// Round-skip over to PrevoteWait or goto Precommit.
+			cs.enterNewRound(height, vote.Round) // if the vote is ahead of us
+			if prevotes.HasTwoThirdsMajority() {
+				cs.enterPrecommit(height, vote.Round)
+			} else {
+				cs.enterPrevote(height, vote.Round) // if the vote is ahead of us
+				cs.enterPrevoteWait(height, vote.Round)
+			}
+		} else if cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round {
+			// If the proposal is now complete, enter prevote of cs.Round.
+			if cs.isProposalComplete() {
+				cs.enterPrevote(height, cs.Round)
+			}
+		}
+	case types.VoteTypePrecommit:
+		precommits := cs.Votes.Precommits(vote.Round)
+		cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort())
+		blockID, ok := precommits.TwoThirdsMajority()
+		if ok {
+			if len(blockID.Hash) == 0 {
+				cs.enterNewRound(height, vote.Round+1)
+			} else {
+				cs.enterNewRound(height, vote.Round)
+				cs.enterPrecommit(height, vote.Round)
+				cs.enterCommit(height, vote.Round)
+
+				if cs.config.SkipTimeoutCommit && precommits.HasAll() {
+					// if we have all the votes now,
+					// go straight to new round (skip timeout commit)
+					// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight)
+					cs.enterNewRound(cs.Height, 0)
 				}
-			default:
-				cmn.PanicSanity(cmn.Fmt("Unexpected vote type %X", vote.Type)) // Should not happen.
+
 			}
+		} else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() {
+			cs.enterNewRound(height, vote.Round)
+			cs.enterPrecommit(height, vote.Round)
+			cs.enterPrecommitWait(height, vote.Round)
 		}
-		// Either duplicate, or error upon cs.Votes.AddByIndex()
-		return
-	} else {
-		err = ErrVoteHeightMismatch
+	default:
+		panic(cmn.Fmt("Unexpected vote type %X", vote.Type)) // go-wire should prevent this.
 	}
 
-	// Height mismatch, bad peer?
-	cs.Logger.Info("Vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height, "err", err)
 	return
 }
 
@@ -1470,12 +1494,11 @@ func (cs *ConsensusState) signAddVote(type_ byte, hash []byte, header types.Part
 		cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""})
 		cs.Logger.Info("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err)
 		return vote
-	} else {
-		//if !cs.replayMode {
-		cs.Logger.Error("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err)
-		//}
-		return nil
 	}
+	//if !cs.replayMode {
+	cs.Logger.Error("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err)
+	//}
+	return nil
 }
 
 //---------------------------------------------------------
diff --git a/vendor/github.com/tendermint/tendermint/consensus/ticker.go b/vendor/github.com/tendermint/tendermint/consensus/ticker.go
index f66856f9..b37b7c49 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/ticker.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/ticker.go
@@ -127,7 +127,7 @@ func (t *timeoutTicker) timeoutRoutine() {
 			// We can eliminate it by merging the timeoutRoutine into receiveRoutine
 			//  and managing the timeouts ourselves with a millisecond ticker
 			go func(toi timeoutInfo) { t.tockChan <- toi }(ti)
-		case <-t.Quit:
+		case <-t.Quit():
 			return
 		}
 	}
diff --git a/vendor/github.com/tendermint/tendermint/consensus/types/height_vote_set.go b/vendor/github.com/tendermint/tendermint/consensus/types/height_vote_set.go
index 0a0a25fe..a155bce0 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/types/height_vote_set.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/types/height_vote_set.go
@@ -1,9 +1,12 @@
 package types
 
 import (
+	"errors"
+	"fmt"
 	"strings"
 	"sync"
 
+	"github.com/tendermint/tendermint/p2p"
 	"github.com/tendermint/tendermint/types"
 	cmn "github.com/tendermint/tmlibs/common"
 )
@@ -13,6 +16,10 @@ type RoundVoteSet struct {
 	Precommits *types.VoteSet
 }
 
+var (
+	GotVoteFromUnwantedRoundError = errors.New("Peer has sent a vote that does not match our round for more than one round")
+)
+
 /*
 Keeps track of all VoteSets from round 0 to round 'round'.
 
@@ -35,7 +42,7 @@ type HeightVoteSet struct {
 	mtx               sync.Mutex
 	round             int                  // max tracked round
 	roundVoteSets     map[int]RoundVoteSet // keys: [0...round]
-	peerCatchupRounds map[string][]int     // keys: peer.Key; values: at most 2 rounds
+	peerCatchupRounds map[p2p.ID][]int     // keys: peer.ID; values: at most 2 rounds
 }
 
 func NewHeightVoteSet(chainID string, height int64, valSet *types.ValidatorSet) *HeightVoteSet {
@@ -53,7 +60,7 @@ func (hvs *HeightVoteSet) Reset(height int64, valSet *types.ValidatorSet) {
 	hvs.height = height
 	hvs.valSet = valSet
 	hvs.roundVoteSets = make(map[int]RoundVoteSet)
-	hvs.peerCatchupRounds = make(map[string][]int)
+	hvs.peerCatchupRounds = make(map[p2p.ID][]int)
 
 	hvs.addRound(0)
 	hvs.round = 0
@@ -101,8 +108,8 @@ func (hvs *HeightVoteSet) addRound(round int) {
 }
 
 // Duplicate votes return added=false, err=nil.
-// By convention, peerKey is "" if origin is self.
-func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerKey string) (added bool, err error) {
+// By convention, peerID is "" if origin is self.
+func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error) {
 	hvs.mtx.Lock()
 	defer hvs.mtx.Unlock()
 	if !types.IsVoteTypeValid(vote.Type) {
@@ -110,15 +117,13 @@ func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerKey string) (added bool,
 	}
 	voteSet := hvs.getVoteSet(vote.Round, vote.Type)
 	if voteSet == nil {
-		if rndz := hvs.peerCatchupRounds[peerKey]; len(rndz) < 2 {
+		if rndz := hvs.peerCatchupRounds[peerID]; len(rndz) < 2 {
 			hvs.addRound(vote.Round)
 			voteSet = hvs.getVoteSet(vote.Round, vote.Type)
-			hvs.peerCatchupRounds[peerKey] = append(rndz, vote.Round)
+			hvs.peerCatchupRounds[peerID] = append(rndz, vote.Round)
 		} else {
-			// Peer has sent a vote that does not match our round,
-			// for more than one round.  Bad peer!
-			// TODO punish peer.
-			// log.Warn("Deal with peer giving votes from unwanted rounds")
+			// punish peer
+			err = GotVoteFromUnwantedRoundError
 			return
 		}
 	}
@@ -206,15 +211,15 @@ func (hvs *HeightVoteSet) StringIndented(indent string) string {
 // NOTE: if there are too many peers, or too much peer churn,
 // this can cause memory issues.
 // TODO: implement ability to remove peers too
-func (hvs *HeightVoteSet) SetPeerMaj23(round int, type_ byte, peerID string, blockID types.BlockID) {
+func (hvs *HeightVoteSet) SetPeerMaj23(round int, type_ byte, peerID p2p.ID, blockID types.BlockID) error {
 	hvs.mtx.Lock()
 	defer hvs.mtx.Unlock()
 	if !types.IsVoteTypeValid(type_) {
-		return
+		return fmt.Errorf("SetPeerMaj23: Invalid vote type %v", type_)
 	}
 	voteSet := hvs.getVoteSet(round, type_)
 	if voteSet == nil {
-		return
+		return nil // something we don't know about yet
 	}
-	voteSet.SetPeerMaj23(peerID, blockID)
+	return voteSet.SetPeerMaj23(types.P2PID(peerID), blockID)
 }
diff --git a/vendor/github.com/tendermint/tendermint/consensus/types/state.go b/vendor/github.com/tendermint/tendermint/consensus/types/state.go
index b95131f4..8e79f10d 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/types/state.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/types/state.go
@@ -70,6 +70,9 @@ type RoundState struct {
 	LockedRound        int
 	LockedBlock        *types.Block
 	LockedBlockParts   *types.PartSet
+	ValidRound         int
+	ValidBlock         *types.Block
+	ValidBlockParts    *types.PartSet
 	Votes              *HeightVoteSet
 	CommitRound        int            //
 	LastCommit         *types.VoteSet // Last precommits at Height-1
@@ -106,6 +109,8 @@ func (rs *RoundState) StringIndented(indent string) string {
 %s  ProposalBlock: %v %v
 %s  LockedRound:   %v
 %s  LockedBlock:   %v %v
+%s  ValidRound:   %v
+%s  ValidBlock:   %v %v
 %s  Votes:         %v
 %s  LastCommit:    %v
 %s  LastValidators:%v
@@ -118,6 +123,8 @@ func (rs *RoundState) StringIndented(indent string) string {
 		indent, rs.ProposalBlockParts.StringShort(), rs.ProposalBlock.StringShort(),
 		indent, rs.LockedRound,
 		indent, rs.LockedBlockParts.StringShort(), rs.LockedBlock.StringShort(),
+		indent, rs.ValidRound,
+		indent, rs.ValidBlockParts.StringShort(), rs.ValidBlock.StringShort(),
 		indent, rs.Votes.StringIndented(indent+"    "),
 		indent, rs.LastCommit.StringShort(),
 		indent, rs.LastValidators.StringIndented(indent+"    "),
diff --git a/vendor/github.com/tendermint/tendermint/consensus/wal.go b/vendor/github.com/tendermint/tendermint/consensus/wal.go
index dfbef879..88218940 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/wal.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/wal.go
@@ -121,7 +121,7 @@ func (wal *baseWAL) Save(msg WALMessage) {
 	if wal.light {
 		// in light mode we only write new steps, timeouts, and our own votes (no proposals, block parts)
 		if mi, ok := msg.(msgInfo); ok {
-			if mi.PeerKey != "" {
+			if mi.PeerID != "" {
 				return
 			}
 		}
diff --git a/vendor/github.com/tendermint/tendermint/consensus/wal_generator.go b/vendor/github.com/tendermint/tendermint/consensus/wal_generator.go
index 45609e56..de41d401 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/wal_generator.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/wal_generator.go
@@ -11,7 +11,7 @@ import (
 	"time"
 
 	"github.com/pkg/errors"
-	"github.com/tendermint/abci/example/dummy"
+	"github.com/tendermint/abci/example/kvstore"
 	bc "github.com/tendermint/tendermint/blockchain"
 	cfg "github.com/tendermint/tendermint/config"
 	"github.com/tendermint/tendermint/proxy"
@@ -25,13 +25,13 @@ import (
 
 // WALWithNBlocks generates a consensus WAL. It does this by spining up a
 // stripped down version of node (proxy app, event bus, consensus state) with a
-// persistent dummy application and special consensus wal instance
+// persistent kvstore application and special consensus wal instance
 // (byteBufferWAL) and waits until numBlocks are created. Then it returns a WAL
 // content.
 func WALWithNBlocks(numBlocks int) (data []byte, err error) {
 	config := getConfig()
 
-	app := dummy.NewPersistentDummyApplication(filepath.Join(config.DBDir(), "wal_generator"))
+	app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), "wal_generator"))
 
 	logger := log.TestingLogger().With("wal_generator", "wal_generator")
 	logger.Info("generating WAL (last height msg excluded)", "numBlocks", numBlocks)
@@ -52,7 +52,7 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
 		return nil, errors.Wrap(err, "failed to make genesis state")
 	}
 	blockStore := bc.NewBlockStore(blockStoreDB)
-	handshaker := NewHandshaker(stateDB, state, blockStore)
+	handshaker := NewHandshaker(stateDB, state, blockStore, genDoc.AppState())
 	proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app), handshaker)
 	proxyApp.SetLogger(logger.With("module", "proxy"))
 	if err := proxyApp.Start(); err != nil {
diff --git a/vendor/github.com/tendermint/tendermint/evidence/reactor.go b/vendor/github.com/tendermint/tendermint/evidence/reactor.go
index cb9706a3..6647db96 100644
--- a/vendor/github.com/tendermint/tendermint/evidence/reactor.go
+++ b/vendor/github.com/tendermint/tendermint/evidence/reactor.go
@@ -84,7 +84,8 @@ func (evR *EvidenceReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
 func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
 	_, msg, err := DecodeMessage(msgBytes)
 	if err != nil {
-		evR.Logger.Error("Error decoding message", "err", err)
+		evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
+		evR.Switch.StopPeerForError(src, err)
 		return
 	}
 	evR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
@@ -95,7 +96,8 @@ func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
 			err := evR.evpool.AddEvidence(ev)
 			if err != nil {
 				evR.Logger.Info("Evidence is not valid", "evidence", msg.Evidence, "err", err)
-				// TODO: punish peer
+				// punish peer
+				evR.Switch.StopPeerForError(src, err)
 			}
 		}
 	default:
@@ -126,7 +128,7 @@ func (evR *EvidenceReactor) broadcastRoutine() {
 			// broadcast all pending evidence
 			msg := &EvidenceListMessage{evR.evpool.PendingEvidence()}
 			evR.Switch.Broadcast(EvidenceChannel, struct{ EvidenceMessage }{msg})
-		case <-evR.Quit:
+		case <-evR.Quit():
 			return
 		}
 	}
diff --git a/vendor/github.com/tendermint/tendermint/evidence/store.go b/vendor/github.com/tendermint/tendermint/evidence/store.go
index fd40b533..7c8becd0 100644
--- a/vendor/github.com/tendermint/tendermint/evidence/store.go
+++ b/vendor/github.com/tendermint/tendermint/evidence/store.go
@@ -99,8 +99,8 @@ func (store *EvidenceStore) PendingEvidence() (evidence []types.Evidence) {
 // ListEvidence lists the evidence for the given prefix key.
 // It is wrapped by PriorityEvidence and PendingEvidence for convenience.
 func (store *EvidenceStore) ListEvidence(prefixKey string) (evidence []types.Evidence) {
-	iter := store.db.IteratorPrefix([]byte(prefixKey))
-	for iter.Next() {
+	iter := dbm.IteratePrefix(store.db, []byte(prefixKey))
+	for ; iter.Valid(); iter.Next() {
 		val := iter.Value()
 
 		var ei EvidenceInfo
diff --git a/vendor/github.com/tendermint/tendermint/mempool/mempool.go b/vendor/github.com/tendermint/tendermint/mempool/mempool.go
index ccd615ac..ec4f9847 100644
--- a/vendor/github.com/tendermint/tendermint/mempool/mempool.go
+++ b/vendor/github.com/tendermint/tendermint/mempool/mempool.go
@@ -3,7 +3,6 @@ package mempool
 import (
 	"bytes"
 	"container/list"
-	"fmt"
 	"sync"
 	"sync/atomic"
 	"time"
@@ -49,7 +48,7 @@ TODO: Better handle abci client errors. (make it automatically handle connection
 
 */
 
-const cacheSize = 100000
+var ErrTxInCache = errors.New("Tx already exists in cache")
 
 // Mempool is an ordered in-memory pool for transactions before they are proposed in a consensus
 // round. Transaction validity is checked using the CheckTx abci message before the transaction is
@@ -92,9 +91,8 @@ func NewMempool(config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool, he
 		recheckCursor: nil,
 		recheckEnd:    nil,
 		logger:        log.NewNopLogger(),
-		cache:         newTxCache(cacheSize),
+		cache:         newTxCache(config.CacheSize),
 	}
-	mempool.initWAL()
 	proxyAppConn.SetResponseCallback(mempool.resCb)
 	return mempool
 }
@@ -131,7 +129,7 @@ func (mem *Mempool) CloseWAL() bool {
 	return true
 }
 
-func (mem *Mempool) initWAL() {
+func (mem *Mempool) InitWAL() {
 	walDir := mem.config.WalDir()
 	if walDir != "" {
 		err := cmn.EnsureDir(walDir, 0700)
@@ -161,6 +159,12 @@ func (mem *Mempool) Size() int {
 	return mem.txs.Len()
 }
 
+// Flushes the mempool connection to ensure async resCb calls are done e.g.
+// from CheckTx.
+func (mem *Mempool) FlushAppConn() error {
+	return mem.proxyAppConn.FlushSync()
+}
+
 // Flush removes all transactions from the mempool and cache
 func (mem *Mempool) Flush() {
 	mem.proxyMtx.Lock()
@@ -174,10 +178,17 @@ func (mem *Mempool) Flush() {
 	}
 }
 
-// TxsFrontWait returns the first transaction in the ordered list for peer goroutines to call .NextWait() on.
-// It blocks until the mempool is not empty (ie. until the internal `mem.txs` has at least one element)
-func (mem *Mempool) TxsFrontWait() *clist.CElement {
-	return mem.txs.FrontWait()
+// TxsFront returns the first transaction in the ordered list for peer
+// goroutines to call .NextWait() on.
+func (mem *Mempool) TxsFront() *clist.CElement {
+	return mem.txs.Front()
+}
+
+// TxsWaitChan returns a channel to wait on transactions. It will be closed
+// once the mempool is not empty (ie. the internal `mem.txs` has at least one
+// element)
+func (mem *Mempool) TxsWaitChan() <-chan struct{} {
+	return mem.txs.WaitChan()
 }
 
 // CheckTx executes a new transaction against the application to determine its validity
@@ -192,7 +203,7 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) {
 
 	// CACHE
 	if mem.cache.Exists(tx) {
-		return fmt.Errorf("Tx already exists in cache")
+		return ErrTxInCache
 	}
 	mem.cache.Push(tx)
 	// END CACHE
@@ -349,9 +360,6 @@ func (mem *Mempool) collectTxs(maxTxs int) types.Txs {
 // NOTE: this should be called *after* block is committed by consensus.
 // NOTE: unsafe; Lock/Unlock must be managed by caller
 func (mem *Mempool) Update(height int64, txs types.Txs) error {
-	if err := mem.proxyAppConn.FlushSync(); err != nil { // To flush async resCb calls e.g. from CheckTx
-		return err
-	}
 	// First, create a lookup map of txns in new txs.
 	txsMap := make(map[string]struct{})
 	for _, tx := range txs {
@@ -449,7 +457,7 @@ func newTxCache(cacheSize int) *txCache {
 // Reset resets the txCache to empty.
 func (cache *txCache) Reset() {
 	cache.mtx.Lock()
-	cache.map_ = make(map[string]struct{}, cacheSize)
+	cache.map_ = make(map[string]struct{}, cache.size)
 	cache.list.Init()
 	cache.mtx.Unlock()
 }
diff --git a/vendor/github.com/tendermint/tendermint/mempool/reactor.go b/vendor/github.com/tendermint/tendermint/mempool/reactor.go
index 4523f824..514347e9 100644
--- a/vendor/github.com/tendermint/tendermint/mempool/reactor.go
+++ b/vendor/github.com/tendermint/tendermint/mempool/reactor.go
@@ -73,7 +73,8 @@ func (memR *MempoolReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
 func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
 	_, msg, err := DecodeMessage(msgBytes)
 	if err != nil {
-		memR.Logger.Error("Error decoding message", "err", err)
+		memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
+		memR.Switch.StopPeerForError(src, err)
 		return
 	}
 	memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
@@ -101,8 +102,6 @@ type PeerState interface {
 }
 
 // Send new mempool txs to peer.
-// TODO: Handle mempool or reactor shutdown?
-// As is this routine may block forever if no new txs come in.
 func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) {
 	if !memR.config.Broadcast {
 		return
@@ -110,15 +109,22 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) {
 
 	var next *clist.CElement
 	for {
-		if !memR.IsRunning() || !peer.IsRunning() {
-			return // Quit!
-		}
+		// This happens because the CElement we were looking at got garbage
+		// collected (removed). That is, .NextWait() returned nil. Go ahead and
+		// start from the beginning.
 		if next == nil {
-			// This happens because the CElement we were looking at got
-			// garbage collected (removed).  That is, .NextWait() returned nil.
-			// Go ahead and start from the beginning.
-			next = memR.Mempool.TxsFrontWait() // Wait until a tx is available
+			select {
+			case <-memR.Mempool.TxsWaitChan(): // Wait until a tx is available
+				if next = memR.Mempool.TxsFront(); next == nil {
+					continue
+				}
+			case <-peer.Quit():
+				return
+			case <-memR.Quit():
+				return
+			}
 		}
+
 		memTx := next.Value.(*mempoolTx)
 		// make sure the peer is up to date
 		height := memTx.Height()
@@ -137,8 +143,15 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) {
 			continue
 		}
 
-		next = next.NextWait()
-		continue
+		select {
+		case <-next.NextWaitChan():
+			// see the start of the for loop for nil check
+			next = next.Next()
+		case <-peer.Quit():
+			return
+		case <-memR.Quit():
+			return
+		}
 	}
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/node/node.go b/vendor/github.com/tendermint/tendermint/node/node.go
index f922d832..acde2e48 100644
--- a/vendor/github.com/tendermint/tendermint/node/node.go
+++ b/vendor/github.com/tendermint/tendermint/node/node.go
@@ -7,7 +7,6 @@ import (
 	"fmt"
 	"net"
 	"net/http"
-	"strings"
 
 	abci "github.com/tendermint/abci/types"
 	crypto "github.com/tendermint/go-crypto"
@@ -18,10 +17,11 @@ import (
 
 	bc "github.com/tendermint/tendermint/blockchain"
 	cfg "github.com/tendermint/tendermint/config"
-	"github.com/tendermint/tendermint/consensus"
+	cs "github.com/tendermint/tendermint/consensus"
 	"github.com/tendermint/tendermint/evidence"
 	mempl "github.com/tendermint/tendermint/mempool"
 	"github.com/tendermint/tendermint/p2p"
+	"github.com/tendermint/tendermint/p2p/pex"
 	"github.com/tendermint/tendermint/p2p/trust"
 	"github.com/tendermint/tendermint/proxy"
 	rpccore "github.com/tendermint/tendermint/rpc/core"
@@ -33,6 +33,7 @@ import (
 	"github.com/tendermint/tendermint/state/txindex/kv"
 	"github.com/tendermint/tendermint/state/txindex/null"
 	"github.com/tendermint/tendermint/types"
+	priv_val "github.com/tendermint/tendermint/types/priv_validator"
 	"github.com/tendermint/tendermint/version"
 
 	_ "net/http/pprof"
@@ -52,7 +53,8 @@ type DBProvider func(*DBContext) (dbm.DB, error)
 // DefaultDBProvider returns a database using the DBBackend and DBDir
 // specified in the ctx.Config.
 func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
-	return dbm.NewDB(ctx.ID, ctx.Config.DBBackend, ctx.Config.DBDir()), nil
+	dbType := dbm.DBBackendType(ctx.Config.DBBackend)
+	return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()), nil
 }
 
 // GenesisDocProvider returns a GenesisDoc.
@@ -80,7 +82,8 @@ func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
 		proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
 		DefaultGenesisDocProviderFunc(config),
 		DefaultDBProvider,
-		logger)
+		logger,
+	)
 }
 
 //------------------------------------------------------------------------------
@@ -96,22 +99,21 @@ type Node struct {
 	privValidator types.PrivValidator // local node's validator key
 
 	// network
-	privKey          crypto.PrivKeyEd25519   // local node's p2p key
 	sw               *p2p.Switch             // p2p connections
-	addrBook         *p2p.AddrBook           // known peers
+	addrBook         pex.AddrBook            // known peers
 	trustMetricStore *trust.TrustMetricStore // trust metrics for all peers
 
 	// services
 	eventBus         *types.EventBus // pub/sub for services
 	stateDB          dbm.DB
-	blockStore       *bc.BlockStore              // store the blockchain to disk
-	bcReactor        *bc.BlockchainReactor       // for fast-syncing
-	mempoolReactor   *mempl.MempoolReactor       // for gossipping transactions
-	consensusState   *consensus.ConsensusState   // latest consensus state
-	consensusReactor *consensus.ConsensusReactor // for participating in the consensus
-	evidencePool     *evidence.EvidencePool      // tracking evidence
-	proxyApp         proxy.AppConns              // connection to the application
-	rpcListeners     []net.Listener              // rpc servers
+	blockStore       *bc.BlockStore         // store the blockchain to disk
+	bcReactor        *bc.BlockchainReactor  // for fast-syncing
+	mempoolReactor   *mempl.MempoolReactor  // for gossipping transactions
+	consensusState   *cs.ConsensusState     // latest consensus state
+	consensusReactor *cs.ConsensusReactor   // for participating in the consensus
+	evidencePool     *evidence.EvidencePool // tracking evidence
+	proxyApp         proxy.AppConns         // connection to the application
+	rpcListeners     []net.Listener         // rpc servers
 	txIndexer        txindex.TxIndexer
 	indexerService   *txindex.IndexerService
 }
@@ -159,7 +161,7 @@ func NewNode(config *cfg.Config,
 	// and sync tendermint and the app by performing a handshake
 	// and replaying any necessary blocks
 	consensusLogger := logger.With("module", "consensus")
-	handshaker := consensus.NewHandshaker(stateDB, state, blockStore)
+	handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc.AppState())
 	handshaker.SetLogger(consensusLogger)
 	proxyApp := proxy.NewAppConns(clientCreator, handshaker)
 	proxyApp.SetLogger(logger.With("module", "proxy"))
@@ -170,8 +172,26 @@ func NewNode(config *cfg.Config,
 	// reload the state (it may have been updated by the handshake)
 	state = sm.LoadState(stateDB)
 
-	// Generate node PrivKey
-	privKey := crypto.GenPrivKeyEd25519()
+	// If an address is provided, listen on the socket for a
+	// connection from an external signing process.
+	if config.PrivValidatorListenAddr != "" {
+		var (
+			// TODO: persist this key so external signer
+			// can actually authenticate us
+			privKey = crypto.GenPrivKeyEd25519()
+			pvsc    = priv_val.NewSocketClient(
+				logger.With("module", "priv_val"),
+				config.PrivValidatorListenAddr,
+				privKey,
+			)
+		)
+
+		if err := pvsc.Start(); err != nil {
+			return nil, fmt.Errorf("Error starting private validator client: %v", err)
+		}
+
+		privValidator = pvsc
+	}
 
 	// Decide whether to fast-sync or not
 	// We don't fast-sync when the only validator is us.
@@ -185,14 +205,15 @@ func NewNode(config *cfg.Config,
 
 	// Log whether this node is a validator or an observer
 	if state.Validators.HasAddress(privValidator.GetAddress()) {
-		consensusLogger.Info("This node is a validator")
+		consensusLogger.Info("This node is a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey())
 	} else {
-		consensusLogger.Info("This node is not a validator")
+		consensusLogger.Info("This node is not a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey())
 	}
 
 	// Make MempoolReactor
 	mempoolLogger := logger.With("module", "mempool")
 	mempool := mempl.NewMempool(config.Mempool, proxyApp.Mempool(), state.LastBlockHeight)
+	mempool.InitWAL() // no need to have the mempool wal during tests
 	mempool.SetLogger(mempoolLogger)
 	mempoolReactor := mempl.NewMempoolReactor(config.Mempool, mempool)
 	mempoolReactor.SetLogger(mempoolLogger)
@@ -222,13 +243,13 @@ func NewNode(config *cfg.Config,
 	bcReactor.SetLogger(logger.With("module", "blockchain"))
 
 	// Make ConsensusReactor
-	consensusState := consensus.NewConsensusState(config.Consensus, state.Copy(),
+	consensusState := cs.NewConsensusState(config.Consensus, state.Copy(),
 		blockExec, blockStore, mempool, evidencePool)
 	consensusState.SetLogger(consensusLogger)
 	if privValidator != nil {
 		consensusState.SetPrivValidator(privValidator)
 	}
-	consensusReactor := consensus.NewConsensusReactor(consensusState, fastSync)
+	consensusReactor := cs.NewConsensusReactor(consensusState, fastSync)
 	consensusReactor.SetLogger(consensusLogger)
 
 	p2pLogger := logger.With("module", "p2p")
@@ -241,10 +262,10 @@ func NewNode(config *cfg.Config,
 	sw.AddReactor("EVIDENCE", evidenceReactor)
 
 	// Optionally, start the pex reactor
-	var addrBook *p2p.AddrBook
+	var addrBook pex.AddrBook
 	var trustMetricStore *trust.TrustMetricStore
 	if config.P2P.PexReactor {
-		addrBook = p2p.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
+		addrBook = pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
 		addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
 
 		// Get the trust metric history data
@@ -255,11 +276,17 @@ func NewNode(config *cfg.Config,
 		trustMetricStore = trust.NewTrustMetricStore(trustHistoryDB, trust.DefaultConfig())
 		trustMetricStore.SetLogger(p2pLogger)
 
-		pexReactor := p2p.NewPEXReactor(addrBook)
+		pexReactor := pex.NewPEXReactor(addrBook,
+			&pex.PEXReactorConfig{
+				Seeds:          cmn.SplitAndTrim(config.P2P.Seeds, ",", " "),
+				SeedMode:       config.P2P.SeedMode,
+				PrivatePeerIDs: cmn.SplitAndTrim(config.P2P.PrivatePeerIDs, ",", " ")})
 		pexReactor.SetLogger(p2pLogger)
 		sw.AddReactor("PEX", pexReactor)
 	}
 
+	sw.SetAddrBook(addrBook)
+
 	// Filter peers by addr or pubkey with an ABCI query.
 	// If the query return code is OK, add peer.
 	// XXX: Query format subject to change
@@ -271,17 +298,17 @@ func NewNode(config *cfg.Config,
 				return err
 			}
 			if resQuery.IsErr() {
-				return resQuery
+				return fmt.Errorf("Error querying abci app: %v", resQuery)
 			}
 			return nil
 		})
-		sw.SetPubKeyFilter(func(pubkey crypto.PubKeyEd25519) error {
-			resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: cmn.Fmt("/p2p/filter/pubkey/%X", pubkey.Bytes())})
+		sw.SetIDFilter(func(id p2p.ID) error {
+			resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: cmn.Fmt("/p2p/filter/pubkey/%s", id)})
 			if err != nil {
 				return err
 			}
 			if resQuery.IsErr() {
-				return resQuery
+				return fmt.Errorf("Error querying abci app: %v", resQuery)
 			}
 			return nil
 		})
@@ -303,7 +330,7 @@ func NewNode(config *cfg.Config,
 			return nil, err
 		}
 		if config.TxIndex.IndexTags != "" {
-			txIndexer = kv.NewTxIndex(store, kv.IndexTags(strings.Split(config.TxIndex.IndexTags, ",")))
+			txIndexer = kv.NewTxIndex(store, kv.IndexTags(cmn.SplitAndTrim(config.TxIndex.IndexTags, ",", " ")))
 		} else if config.TxIndex.IndexAllTags {
 			txIndexer = kv.NewTxIndex(store, kv.IndexAllTags())
 		} else {
@@ -328,7 +355,6 @@ func NewNode(config *cfg.Config,
 		genesisDoc:    genDoc,
 		privValidator: privValidator,
 
-		privKey:          privKey,
 		sw:               sw,
 		addrBook:         addrBook,
 		trustMetricStore: trustMetricStore,
@@ -371,19 +397,31 @@ func (n *Node) OnStart() error {
 	l := p2p.NewDefaultListener(protocol, address, n.config.P2P.SkipUPNP, n.Logger.With("module", "p2p"))
 	n.sw.AddListener(l)
 
+	// Generate node PrivKey
+	// TODO: pass in like privValidator
+	nodeKey, err := p2p.LoadOrGenNodeKey(n.config.NodeKeyFile())
+	if err != nil {
+		return err
+	}
+	n.Logger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", n.config.NodeKeyFile())
+
+	nodeInfo := n.makeNodeInfo(nodeKey.PubKey())
+	n.sw.SetNodeInfo(nodeInfo)
+	n.sw.SetNodeKey(nodeKey)
+
+	// Add ourselves to addrbook to prevent dialing ourselves
+	n.addrBook.AddOurAddress(nodeInfo.NetAddress())
+
 	// Start the switch
-	n.sw.SetNodeInfo(n.makeNodeInfo())
-	n.sw.SetNodePrivKey(n.privKey)
 	err = n.sw.Start()
 	if err != nil {
 		return err
 	}
 
-	// If seeds exist, add them to the address book and dial out
-	if n.config.P2P.Seeds != "" {
-		// dial out
-		seeds := strings.Split(n.config.P2P.Seeds, ",")
-		if err := n.DialSeeds(seeds); err != nil {
+	// Always connect to persistent peers
+	if n.config.P2P.PersistentPeers != "" {
+		err = n.sw.DialPeersAsync(n.addrBook, cmn.SplitAndTrim(n.config.P2P.PersistentPeers, ",", " "), true)
+		if err != nil {
 			return err
 		}
 	}
@@ -408,8 +446,13 @@ func (n *Node) OnStop() {
 	}
 
 	n.eventBus.Stop()
-
 	n.indexerService.Stop()
+
+	if pvsc, ok := n.privValidator.(*priv_val.SocketClient); ok {
+		if err := pvsc.Stop(); err != nil {
+			n.Logger.Error("Error stopping priv validator socket client", "err", err)
+		}
+	}
 }
 
 // RunForever waits for an interrupt signal and stops the node.
@@ -448,7 +491,7 @@ func (n *Node) ConfigureRPC() {
 
 func (n *Node) startRPC() ([]net.Listener, error) {
 	n.ConfigureRPC()
-	listenAddrs := strings.Split(n.config.RPC.ListenAddress, ",")
+	listenAddrs := cmn.SplitAndTrim(n.config.RPC.ListenAddress, ",", " ")
 
 	if n.config.RPC.Unsafe {
 		rpccore.AddUnsafeRoutes()
@@ -494,12 +537,12 @@ func (n *Node) BlockStore() *bc.BlockStore {
 }
 
 // ConsensusState returns the Node's ConsensusState.
-func (n *Node) ConsensusState() *consensus.ConsensusState {
+func (n *Node) ConsensusState() *cs.ConsensusState {
 	return n.consensusState
 }
 
 // ConsensusReactor returns the Node's ConsensusReactor.
-func (n *Node) ConsensusReactor() *consensus.ConsensusReactor {
+func (n *Node) ConsensusReactor() *cs.ConsensusReactor {
 	return n.consensusReactor
 }
 
@@ -534,25 +577,35 @@ func (n *Node) ProxyApp() proxy.AppConns {
 	return n.proxyApp
 }
 
-func (n *Node) makeNodeInfo() *p2p.NodeInfo {
+func (n *Node) makeNodeInfo(pubKey crypto.PubKey) p2p.NodeInfo {
 	txIndexerStatus := "on"
 	if _, ok := n.txIndexer.(*null.TxIndex); ok {
 		txIndexerStatus = "off"
 	}
-	nodeInfo := &p2p.NodeInfo{
-		PubKey:  n.privKey.PubKey().Unwrap().(crypto.PubKeyEd25519),
-		Moniker: n.config.Moniker,
+	nodeInfo := p2p.NodeInfo{
+		PubKey:  pubKey,
 		Network: n.genesisDoc.ChainID,
 		Version: version.Version,
+		Channels: []byte{
+			bc.BlockchainChannel,
+			cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,
+			mempl.MempoolChannel,
+			evidence.EvidenceChannel,
+		},
+		Moniker: n.config.Moniker,
 		Other: []string{
 			cmn.Fmt("wire_version=%v", wire.Version),
 			cmn.Fmt("p2p_version=%v", p2p.Version),
-			cmn.Fmt("consensus_version=%v", consensus.Version),
+			cmn.Fmt("consensus_version=%v", cs.Version),
 			cmn.Fmt("rpc_version=%v/%v", rpc.Version, rpccore.Version),
 			cmn.Fmt("tx_index=%v", txIndexerStatus),
 		},
 	}
 
+	if n.config.P2P.PexReactor {
+		nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
+	}
+
 	rpcListenAddr := n.config.RPC.ListenAddress
 	nodeInfo.Other = append(nodeInfo.Other, cmn.Fmt("rpc_addr=%v", rpcListenAddr))
 
@@ -571,15 +624,10 @@ func (n *Node) makeNodeInfo() *p2p.NodeInfo {
 //------------------------------------------------------------------------------
 
 // NodeInfo returns the Node's Info from the Switch.
-func (n *Node) NodeInfo() *p2p.NodeInfo {
+func (n *Node) NodeInfo() p2p.NodeInfo {
 	return n.sw.NodeInfo()
 }
 
-// DialSeeds dials the given seeds on the Switch.
-func (n *Node) DialSeeds(seeds []string) error {
-	return n.sw.DialSeeds(n.addrBook, seeds)
-}
-
 //------------------------------------------------------------------------------
 
 var (
@@ -591,14 +639,13 @@ func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
 	bytes := db.Get(genesisDocKey)
 	if len(bytes) == 0 {
 		return nil, errors.New("Genesis doc not found")
-	} else {
-		var genDoc *types.GenesisDoc
-		err := json.Unmarshal(bytes, &genDoc)
-		if err != nil {
-			cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes))
-		}
-		return genDoc, nil
 	}
+	var genDoc *types.GenesisDoc
+	err := json.Unmarshal(bytes, &genDoc)
+	if err != nil {
+		cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes))
+	}
+	return genDoc, nil
 }
 
 // panics if failed to marshal the given genesis document
diff --git a/vendor/github.com/tendermint/tendermint/p2p/base_reactor.go b/vendor/github.com/tendermint/tendermint/p2p/base_reactor.go
new file mode 100644
index 00000000..83c8efa4
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/p2p/base_reactor.go
@@ -0,0 +1,53 @@
+package p2p
+
+import (
+	"github.com/tendermint/tendermint/p2p/conn"
+	cmn "github.com/tendermint/tmlibs/common"
+)
+
+type Reactor interface {
+	cmn.Service // Start, Stop
+
+	// SetSwitch allows setting a switch.
+	SetSwitch(*Switch)
+
+	// GetChannels returns the list of channel descriptors.
+	GetChannels() []*conn.ChannelDescriptor
+
+	// AddPeer is called by the switch when a new peer is added.
+	AddPeer(peer Peer)
+
+	// RemovePeer is called by the switch when the peer is stopped (due to error
+	// or other reason).
+	RemovePeer(peer Peer, reason interface{})
+
+	// Receive is called when msgBytes is received from peer.
+	//
+	// NOTE reactor can not keep msgBytes around after Receive completes without
+	// copying.
+	//
+	// CONTRACT: msgBytes are not nil.
+	Receive(chID byte, peer Peer, msgBytes []byte)
+}
+
+//--------------------------------------
+
+type BaseReactor struct {
+	cmn.BaseService // Provides Start, Stop, .Quit
+	Switch          *Switch
+}
+
+func NewBaseReactor(name string, impl Reactor) *BaseReactor {
+	return &BaseReactor{
+		BaseService: *cmn.NewBaseService(nil, name, impl),
+		Switch:      nil,
+	}
+}
+
+func (br *BaseReactor) SetSwitch(sw *Switch) {
+	br.Switch = sw
+}
+func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor        { return nil }
+func (*BaseReactor) AddPeer(peer Peer)                             {}
+func (*BaseReactor) RemovePeer(peer Peer, reason interface{})      {}
+func (*BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {}
diff --git a/vendor/github.com/tendermint/tendermint/p2p/conn_go110.go b/vendor/github.com/tendermint/tendermint/p2p/conn/conn_go110.go
similarity index 86%
rename from vendor/github.com/tendermint/tendermint/p2p/conn_go110.go
rename to vendor/github.com/tendermint/tendermint/p2p/conn/conn_go110.go
index 2fca7c3d..68218810 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/conn_go110.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/conn/conn_go110.go
@@ -1,6 +1,6 @@
 // +build go1.10
 
-package p2p
+package conn
 
 // Go1.10 has a proper net.Conn implementation that
 // has the SetDeadline method implemented as per
@@ -10,6 +10,6 @@ package p2p
 
 import "net"
 
-func netPipe() (net.Conn, net.Conn) {
+func NetPipe() (net.Conn, net.Conn) {
 	return net.Pipe()
 }
diff --git a/vendor/github.com/tendermint/tendermint/p2p/conn_notgo110.go b/vendor/github.com/tendermint/tendermint/p2p/conn/conn_notgo110.go
similarity index 94%
rename from vendor/github.com/tendermint/tendermint/p2p/conn_notgo110.go
rename to vendor/github.com/tendermint/tendermint/p2p/conn/conn_notgo110.go
index a5c2f741..ed642eb5 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/conn_notgo110.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/conn/conn_notgo110.go
@@ -1,6 +1,6 @@
 // +build !go1.10
 
-package p2p
+package conn
 
 import (
 	"net"
@@ -24,7 +24,7 @@ func (p *pipe) SetDeadline(t time.Time) error {
 	return nil
 }
 
-func netPipe() (net.Conn, net.Conn) {
+func NetPipe() (net.Conn, net.Conn) {
 	p1, p2 := net.Pipe()
 	return &pipe{p1}, &pipe{p2}
 }
diff --git a/vendor/github.com/tendermint/tendermint/p2p/connection.go b/vendor/github.com/tendermint/tendermint/p2p/conn/connection.go
similarity index 86%
rename from vendor/github.com/tendermint/tendermint/p2p/connection.go
rename to vendor/github.com/tendermint/tendermint/p2p/conn/connection.go
index 626aeb10..9a3360f2 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/connection.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/conn/connection.go
@@ -1,30 +1,26 @@
-package p2p
+package conn
 
 import (
 	"bufio"
+	"errors"
 	"fmt"
 	"io"
 	"math"
 	"net"
-	"runtime/debug"
 	"sync/atomic"
 	"time"
 
 	wire "github.com/tendermint/go-wire"
-	tmlegacy "github.com/tendermint/go-wire/nowriter/tmlegacy"
 	cmn "github.com/tendermint/tmlibs/common"
 	flow "github.com/tendermint/tmlibs/flowrate"
 	"github.com/tendermint/tmlibs/log"
 )
 
-var legacy = tmlegacy.TMEncoderLegacy{}
-
 const (
 	numBatchMsgPackets = 10
 	minReadBufferSize  = 1024
 	minWriteBufferSize = 65536
 	updateStats        = 2 * time.Second
-	pingTimeout        = 40 * time.Second
 
 	// some of these defaults are written in the user config
 	// flushThrottle, sendRate, recvRate
@@ -37,6 +33,8 @@ const (
 	defaultSendRate            = int64(512000) // 500KB/s
 	defaultRecvRate            = int64(512000) // 500KB/s
 	defaultSendTimeout         = 10 * time.Second
+	defaultPingInterval        = 60 * time.Second
+	defaultPongTimeout         = 45 * time.Second
 )
 
 type receiveCbFunc func(chID byte, msgBytes []byte)
@@ -84,13 +82,17 @@ type MConnection struct {
 	errored     uint32
 	config      *MConnConfig
 
-	quit         chan struct{}
-	flushTimer   *cmn.ThrottleTimer // flush writes as necessary but throttled.
-	pingTimer    *cmn.RepeatTimer   // send pings periodically
-	chStatsTimer *cmn.RepeatTimer   // update channel stats periodically
+	quit       chan struct{}
+	flushTimer *cmn.ThrottleTimer // flush writes as necessary but throttled.
+	pingTimer  *cmn.RepeatTimer   // send pings periodically
+
+	// close conn if pong is not received in pongTimeout
+	pongTimer     *time.Timer
+	pongTimeoutCh chan bool // true - timeout, false - peer sent pong
 
-	LocalAddress  *NetAddress
-	RemoteAddress *NetAddress
+	chStatsTimer *cmn.RepeatTimer // update channel stats periodically
+
+	created time.Time // time of creation
 }
 
 // MConnConfig is a MConnection configuration.
@@ -98,13 +100,21 @@ type MConnConfig struct {
 	SendRate int64 `mapstructure:"send_rate"`
 	RecvRate int64 `mapstructure:"recv_rate"`
 
-	maxMsgPacketPayloadSize int
+	// Maximum payload size
+	MaxMsgPacketPayloadSize int `mapstructure:"max_msg_packet_payload_size"`
 
-	flushThrottle time.Duration
+	// Interval to flush writes (throttled)
+	FlushThrottle time.Duration `mapstructure:"flush_throttle"`
+
+	// Interval to send pings
+	PingInterval time.Duration `mapstructure:"ping_interval"`
+
+	// Maximum wait time for pongs
+	PongTimeout time.Duration `mapstructure:"pong_timeout"`
 }
 
 func (cfg *MConnConfig) maxMsgPacketTotalSize() int {
-	return cfg.maxMsgPacketPayloadSize + maxMsgPacketOverheadSize
+	return cfg.MaxMsgPacketPayloadSize + maxMsgPacketOverheadSize
 }
 
 // DefaultMConnConfig returns the default config.
@@ -112,8 +122,10 @@ func DefaultMConnConfig() *MConnConfig {
 	return &MConnConfig{
 		SendRate:                defaultSendRate,
 		RecvRate:                defaultRecvRate,
-		maxMsgPacketPayloadSize: defaultMaxMsgPacketPayloadSize,
-		flushThrottle:           defaultFlushThrottle,
+		MaxMsgPacketPayloadSize: defaultMaxMsgPacketPayloadSize,
+		FlushThrottle:           defaultFlushThrottle,
+		PingInterval:            defaultPingInterval,
+		PongTimeout:             defaultPongTimeout,
 	}
 }
 
@@ -129,6 +141,10 @@ func NewMConnection(conn net.Conn, chDescs []*ChannelDescriptor, onReceive recei
 
 // NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config
 func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc, config *MConnConfig) *MConnection {
+	if config.PongTimeout >= config.PingInterval {
+		panic("pongTimeout must be less than pingInterval (otherwise, next ping will reset pong timer)")
+	}
+
 	mconn := &MConnection{
 		conn:        conn,
 		bufReader:   bufio.NewReaderSize(conn, minReadBufferSize),
@@ -136,13 +152,10 @@ func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onRec
 		sendMonitor: flow.New(0, 0),
 		recvMonitor: flow.New(0, 0),
 		send:        make(chan struct{}, 1),
-		pong:        make(chan struct{}),
+		pong:        make(chan struct{}, 1),
 		onReceive:   onReceive,
 		onError:     onError,
 		config:      config,
-
-		LocalAddress:  NewNetAddress(conn.LocalAddr()),
-		RemoteAddress: NewNetAddress(conn.RemoteAddr()),
 	}
 
 	// Create channels
@@ -175,8 +188,9 @@ func (c *MConnection) OnStart() error {
 		return err
 	}
 	c.quit = make(chan struct{})
-	c.flushTimer = cmn.NewThrottleTimer("flush", c.config.flushThrottle)
-	c.pingTimer = cmn.NewRepeatTimer("ping", pingTimeout)
+	c.flushTimer = cmn.NewThrottleTimer("flush", c.config.FlushThrottle)
+	c.pingTimer = cmn.NewRepeatTimer("ping", c.config.PingInterval)
+	c.pongTimeoutCh = make(chan bool, 1)
 	c.chStatsTimer = cmn.NewRepeatTimer("chStats", updateStats)
 	go c.sendRoutine()
 	go c.recvRoutine()
@@ -193,11 +207,11 @@ func (c *MConnection) OnStop() {
 		close(c.quit)
 	}
 	c.conn.Close() // nolint: errcheck
+
 	// We can't close pong safely here because
 	// recvRoutine may write to it after we've stopped.
 	// Though it doesn't need to get closed at all,
 	// we close it @ recvRoutine.
-	// close(c.pong)
 }
 
 func (c *MConnection) String() string {
@@ -215,8 +229,7 @@ func (c *MConnection) flush() {
 // Catch panics, usually caused by remote disconnects.
 func (c *MConnection) _recover() {
 	if r := recover(); r != nil {
-		stack := debug.Stack()
-		err := cmn.StackError{r, stack}
+		err := cmn.ErrorWrap(r, "recovered from panic")
 		c.stopForError(err)
 	}
 }
@@ -320,12 +333,26 @@ FOR_LOOP:
 			}
 		case <-c.pingTimer.Chan():
 			c.Logger.Debug("Send Ping")
-			legacy.WriteOctet(packetTypePing, c.bufWriter, &n, &err)
+			wire.WriteByte(packetTypePing, c.bufWriter, &n, &err)
 			c.sendMonitor.Update(int(n))
+			c.Logger.Debug("Starting pong timer", "dur", c.config.PongTimeout)
+			c.pongTimer = time.AfterFunc(c.config.PongTimeout, func() {
+				select {
+				case c.pongTimeoutCh <- true:
+				default:
+				}
+			})
 			c.flush()
+		case timeout := <-c.pongTimeoutCh:
+			if timeout {
+				c.Logger.Debug("Pong timeout")
+				err = errors.New("pong timeout")
+			} else {
+				c.stopPongTimer()
+			}
 		case <-c.pong:
 			c.Logger.Debug("Send Pong")
-			legacy.WriteOctet(packetTypePong, c.bufWriter, &n, &err)
+			wire.WriteByte(packetTypePong, c.bufWriter, &n, &err)
 			c.sendMonitor.Update(int(n))
 			c.flush()
 		case <-c.quit:
@@ -353,6 +380,7 @@ FOR_LOOP:
 	}
 
 	// Cleanup
+	c.stopPongTimer()
 }
 
 // Returns true if messages from channels were exhausted.
@@ -394,9 +422,8 @@ func (c *MConnection) sendMsgPacket() bool {
 	// Nothing to send?
 	if leastChannel == nil {
 		return true
-	} else {
-		// c.Logger.Info("Found a msgPacket to send")
 	}
+	// c.Logger.Info("Found a msgPacket to send")
 
 	// Make & send a msgPacket from this channel
 	n, err := leastChannel.writeMsgPacketTo(c.bufWriter)
@@ -413,6 +440,7 @@ func (c *MConnection) sendMsgPacket() bool {
 // recvRoutine reads msgPackets and reconstructs the message using the channels' "recving" buffer.
 // After a whole message has been assembled, it's pushed to onReceive().
 // Blocks depending on how the connection is throttled.
+// Otherwise, it never blocks.
 func (c *MConnection) recvRoutine() {
 	defer c._recover()
 
@@ -453,11 +481,20 @@ FOR_LOOP:
 		switch pktType {
 		case packetTypePing:
 			// TODO: prevent abuse, as they cause flush()'s.
+			// https://github.com/tendermint/tendermint/issues/1190
 			c.Logger.Debug("Receive Ping")
-			c.pong <- struct{}{}
+			select {
+			case c.pong <- struct{}{}:
+			default:
+				// never block
+			}
 		case packetTypePong:
-			// do nothing
 			c.Logger.Debug("Receive Pong")
+			select {
+			case c.pongTimeoutCh <- false:
+			default:
+				// never block
+			}
 		case packetTypeMsg:
 			pkt, n, err := msgPacket{}, int(0), error(nil)
 			wire.ReadBinaryPtr(&pkt, c.bufReader, c.config.maxMsgPacketTotalSize(), &n, &err)
@@ -474,6 +511,7 @@ FOR_LOOP:
 				err := fmt.Errorf("Unknown channel %X", pkt.ChannelID)
 				c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
 				c.stopForError(err)
+				break FOR_LOOP
 			}
 
 			msgBytes, err := channel.recvMsgPacket(pkt)
@@ -493,11 +531,8 @@ FOR_LOOP:
 			err := fmt.Errorf("Unknown message type %X", pktType)
 			c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
 			c.stopForError(err)
+			break FOR_LOOP
 		}
-
-		// TODO: shouldn't this go in the sendRoutine?
-		// Better to send a ping packet when *we* haven't sent anything for a while.
-		c.pingTimer.Reset()
 	}
 
 	// Cleanup
@@ -507,7 +542,18 @@ FOR_LOOP:
 	}
 }
 
+// not goroutine-safe
+func (c *MConnection) stopPongTimer() {
+	if c.pongTimer != nil {
+		if !c.pongTimer.Stop() {
+			<-c.pongTimer.C
+		}
+		c.pongTimer = nil
+	}
+}
+
 type ConnectionStatus struct {
+	Duration    time.Duration
 	SendMonitor flow.Status
 	RecvMonitor flow.Status
 	Channels    []ChannelStatus
@@ -523,6 +569,7 @@ type ChannelStatus struct {
 
 func (c *MConnection) Status() ConnectionStatus {
 	var status ConnectionStatus
+	status.Duration = time.Since(c.created)
 	status.SendMonitor = c.sendMonitor.Status()
 	status.RecvMonitor = c.recvMonitor.Status()
 	status.Channels = make([]ChannelStatus, len(c.channels))
@@ -588,7 +635,7 @@ func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel {
 		desc:                    desc,
 		sendQueue:               make(chan []byte, desc.SendQueueCapacity),
 		recving:                 make([]byte, 0, desc.RecvBufferCapacity),
-		maxMsgPacketPayloadSize: conn.config.maxMsgPacketPayloadSize,
+		maxMsgPacketPayloadSize: conn.config.MaxMsgPacketPayloadSize,
 	}
 }
 
@@ -677,11 +724,12 @@ func (ch *Channel) writeMsgPacketTo(w io.Writer) (n int, err error) {
 }
 
 func writeMsgPacketTo(packet msgPacket, w io.Writer, n *int, err *error) {
-	legacy.WriteOctet(packetTypeMsg, w, n, err)
+	wire.WriteByte(packetTypeMsg, w, n, err)
 	wire.WriteBinary(packet, w, n, err)
 }
 
-// Handles incoming msgPackets. Returns a msg bytes if msg is complete.
+// Handles incoming msgPackets. It returns a message bytes if message is
+// complete. NOTE message bytes may change on next call to recvMsgPacket.
 // Not goroutine-safe
 func (ch *Channel) recvMsgPacket(packet msgPacket) ([]byte, error) {
 	ch.Logger.Debug("Read Msg Packet", "conn", ch.conn, "packet", packet)
@@ -691,6 +739,7 @@ func (ch *Channel) recvMsgPacket(packet msgPacket) ([]byte, error) {
 	ch.recving = append(ch.recving, packet.Bytes...)
 	if packet.EOF == byte(0x01) {
 		msgBytes := ch.recving
+
 		// clear the slice without re-allocating.
 		// http://stackoverflow.com/questions/16971741/how-do-you-clear-a-slice-in-go
 		//   suggests this could be a memory leak, but we might as well keep the memory for the channel until it closes,
diff --git a/vendor/github.com/tendermint/tendermint/p2p/secret_connection.go b/vendor/github.com/tendermint/tendermint/p2p/conn/secret_connection.go
similarity index 79%
rename from vendor/github.com/tendermint/tendermint/p2p/secret_connection.go
rename to vendor/github.com/tendermint/tendermint/p2p/conn/secret_connection.go
index aec0a751..bc67abf3 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/secret_connection.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/conn/secret_connection.go
@@ -4,7 +4,7 @@
 // is known ahead of time, and thus we are technically
 // still vulnerable to MITM. (TODO!)
 // See docs/sts-final.pdf for more info
-package p2p
+package conn
 
 import (
 	"bytes"
@@ -20,8 +20,8 @@ import (
 	"golang.org/x/crypto/nacl/secretbox"
 	"golang.org/x/crypto/ripemd160"
 
-	"github.com/tendermint/go-crypto"
-	"github.com/tendermint/go-wire"
+	crypto "github.com/tendermint/go-crypto"
+	wire "github.com/tendermint/go-wire"
 	cmn "github.com/tendermint/tmlibs/common"
 )
 
@@ -38,7 +38,7 @@ type SecretConnection struct {
 	recvBuffer []byte
 	recvNonce  *[24]byte
 	sendNonce  *[24]byte
-	remPubKey  crypto.PubKeyEd25519
+	remPubKey  crypto.PubKey
 	shrSecret  *[32]byte // shared secret
 }
 
@@ -46,9 +46,9 @@ type SecretConnection struct {
 // Returns nil if error in handshake.
 // Caller should call conn.Close()
 // See docs/sts-final.pdf for more information.
-func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKeyEd25519) (*SecretConnection, error) {
+func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (*SecretConnection, error) {
 
-	locPubKey := locPrivKey.PubKey().Unwrap().(crypto.PubKeyEd25519)
+	locPubKey := locPrivKey.PubKey()
 
 	// Generate ephemeral keys for perfect forward secrecy.
 	locEphPub, locEphPriv := genEphKeys()
@@ -100,12 +100,12 @@ func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKeyEd25
 	}
 
 	// We've authorized.
-	sc.remPubKey = remPubKey.Unwrap().(crypto.PubKeyEd25519)
+	sc.remPubKey = remPubKey
 	return sc, nil
 }
 
 // Returns authenticated remote pubkey
-func (sc *SecretConnection) RemotePubKey() crypto.PubKeyEd25519 {
+func (sc *SecretConnection) RemotePubKey() crypto.PubKey {
 	return sc.remPubKey
 }
 
@@ -113,7 +113,7 @@ func (sc *SecretConnection) RemotePubKey() crypto.PubKeyEd25519 {
 // CONTRACT: data smaller than dataMaxSize is read atomically.
 func (sc *SecretConnection) Write(data []byte) (n int, err error) {
 	for 0 < len(data) {
-		var frame []byte = make([]byte, totalFrameSize)
+		var frame = make([]byte, totalFrameSize)
 		var chunk []byte
 		if dataMaxSize < len(data) {
 			chunk = data[:dataMaxSize]
@@ -136,9 +136,8 @@ func (sc *SecretConnection) Write(data []byte) (n int, err error) {
 		_, err := sc.conn.Write(sealedFrame)
 		if err != nil {
 			return n, err
-		} else {
-			n += len(chunk)
 		}
+		n += len(chunk)
 	}
 	return
 }
@@ -200,26 +199,36 @@ func genEphKeys() (ephPub, ephPriv *[32]byte) {
 }
 
 func shareEphPubKey(conn io.ReadWriteCloser, locEphPub *[32]byte) (remEphPub *[32]byte, err error) {
-	var err1, err2 error
-
-	cmn.Parallel(
-		func() {
-			_, err1 = conn.Write(locEphPub[:])
+	// Send our pubkey and receive theirs in tandem.
+	var trs, _ = cmn.Parallel(
+		func(_ int) (val interface{}, err error, abort bool) {
+			var _, err1 = conn.Write(locEphPub[:])
+			if err1 != nil {
+				return nil, err1, true // abort
+			} else {
+				return nil, nil, false
+			}
 		},
-		func() {
-			remEphPub = new([32]byte)
-			_, err2 = io.ReadFull(conn, remEphPub[:])
+		func(_ int) (val interface{}, err error, abort bool) {
+			var _remEphPub [32]byte
+			var _, err2 = io.ReadFull(conn, _remEphPub[:])
+			if err2 != nil {
+				return nil, err2, true // abort
+			} else {
+				return _remEphPub, nil, false
+			}
 		},
 	)
 
-	if err1 != nil {
-		return nil, err1
-	}
-	if err2 != nil {
-		return nil, err2
+	// If error:
+	if trs.FirstError() != nil {
+		err = trs.FirstError()
+		return
 	}
 
-	return remEphPub, nil
+	// Otherwise:
+	var _remEphPub = trs.FirstValue().([32]byte)
+	return &_remEphPub, nil
 }
 
 func computeSharedSecret(remPubKey, locPrivKey *[32]byte) (shrSecret *[32]byte) {
@@ -258,8 +267,8 @@ func genChallenge(loPubKey, hiPubKey *[32]byte) (challenge *[32]byte) {
 	return hash32(append(loPubKey[:], hiPubKey[:]...))
 }
 
-func signChallenge(challenge *[32]byte, locPrivKey crypto.PrivKeyEd25519) (signature crypto.SignatureEd25519) {
-	signature = locPrivKey.Sign(challenge[:]).Unwrap().(crypto.SignatureEd25519)
+func signChallenge(challenge *[32]byte, locPrivKey crypto.PrivKey) (signature crypto.Signature) {
+	signature = locPrivKey.Sign(challenge[:])
 	return
 }
 
@@ -268,33 +277,42 @@ type authSigMessage struct {
 	Sig crypto.Signature
 }
 
-func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKeyEd25519, signature crypto.SignatureEd25519) (*authSigMessage, error) {
-	var recvMsg authSigMessage
-	var err1, err2 error
-
-	cmn.Parallel(
-		func() {
+func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature crypto.Signature) (recvMsg *authSigMessage, err error) {
+	// Send our info and receive theirs in tandem.
+	var trs, _ = cmn.Parallel(
+		func(_ int) (val interface{}, err error, abort bool) {
 			msgBytes := wire.BinaryBytes(authSigMessage{pubKey.Wrap(), signature.Wrap()})
-			_, err1 = sc.Write(msgBytes)
+			var _, err1 = sc.Write(msgBytes)
+			if err1 != nil {
+				return nil, err1, true // abort
+			} else {
+				return nil, nil, false
+			}
 		},
-		func() {
+		func(_ int) (val interface{}, err error, abort bool) {
 			readBuffer := make([]byte, authSigMsgSize)
-			_, err2 = io.ReadFull(sc, readBuffer)
+			var _, err2 = io.ReadFull(sc, readBuffer)
 			if err2 != nil {
-				return
+				return nil, err2, true // abort
 			}
 			n := int(0) // not used.
-			recvMsg = wire.ReadBinary(authSigMessage{}, bytes.NewBuffer(readBuffer), authSigMsgSize, &n, &err2).(authSigMessage)
-		})
+			var _recvMsg = wire.ReadBinary(authSigMessage{}, bytes.NewBuffer(readBuffer), authSigMsgSize, &n, &err2).(authSigMessage)
+			if err2 != nil {
+				return nil, err2, true // abort
+			} else {
+				return _recvMsg, nil, false
+			}
+		},
+	)
 
-	if err1 != nil {
-		return nil, err1
-	}
-	if err2 != nil {
-		return nil, err2
+	// If error:
+	if trs.FirstError() != nil {
+		err = trs.FirstError()
+		return
 	}
 
-	return &recvMsg, nil
+	var _recvMsg = trs.FirstValue().(authSigMessage)
+	return &_recvMsg, nil
 }
 
 //--------------------------------------------------------------------------------
@@ -328,7 +346,7 @@ func incr2Nonce(nonce *[24]byte) {
 // increment nonce big-endian by 1 with wraparound.
 func incrNonce(nonce *[24]byte) {
 	for i := 23; 0 <= i; i-- {
-		nonce[i] += 1
+		nonce[i]++
 		if nonce[i] != 0 {
 			return
 		}
diff --git a/vendor/github.com/tendermint/tendermint/p2p/errors.go b/vendor/github.com/tendermint/tendermint/p2p/errors.go
new file mode 100644
index 00000000..cb6a7051
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/p2p/errors.go
@@ -0,0 +1,20 @@
+package p2p
+
+import (
+	"errors"
+	"fmt"
+)
+
+var (
+	ErrSwitchDuplicatePeer = errors.New("Duplicate peer")
+	ErrSwitchConnectToSelf = errors.New("Connect to self")
+)
+
+type ErrSwitchAuthenticationFailure struct {
+	Dialed *NetAddress
+	Got    ID
+}
+
+func (e ErrSwitchAuthenticationFailure) Error() string {
+	return fmt.Sprintf("Failed to authenticate peer. Dialed %v, but got peer with ID %s", e.Dialed, e.Got)
+}
diff --git a/vendor/github.com/tendermint/tendermint/p2p/key.go b/vendor/github.com/tendermint/tendermint/p2p/key.go
new file mode 100644
index 00000000..6d0f2858
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/p2p/key.go
@@ -0,0 +1,112 @@
+package p2p
+
+import (
+	"bytes"
+	"encoding/hex"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+
+	crypto "github.com/tendermint/go-crypto"
+	cmn "github.com/tendermint/tmlibs/common"
+)
+
+// ID is a hex-encoded crypto.Address
+type ID string
+
+// IDByteLength is the length of a crypto.Address. Currently only 20.
+// TODO: support other length addresses ?
+const IDByteLength = 20
+
+//------------------------------------------------------------------------------
+// Persistent peer ID
+// TODO: encrypt on disk
+
+// NodeKey is the persistent peer key.
+// It contains the nodes private key for authentication.
+type NodeKey struct {
+	PrivKey crypto.PrivKey `json:"priv_key"` // our priv key
+}
+
+// ID returns the peer's canonical ID - the hash of its public key.
+func (nodeKey *NodeKey) ID() ID {
+	return PubKeyToID(nodeKey.PubKey())
+}
+
+// PubKey returns the peer's PubKey
+func (nodeKey *NodeKey) PubKey() crypto.PubKey {
+	return nodeKey.PrivKey.PubKey()
+}
+
+// PubKeyToID returns the ID corresponding to the given PubKey.
+// It's the hex-encoding of the pubKey.Address().
+func PubKeyToID(pubKey crypto.PubKey) ID {
+	return ID(hex.EncodeToString(pubKey.Address()))
+}
+
+// LoadOrGenNodeKey attempts to load the NodeKey from the given filePath.
+// If the file does not exist, it generates and saves a new NodeKey.
+func LoadOrGenNodeKey(filePath string) (*NodeKey, error) {
+	if cmn.FileExists(filePath) {
+		nodeKey, err := loadNodeKey(filePath)
+		if err != nil {
+			return nil, err
+		}
+		return nodeKey, nil
+	}
+	return genNodeKey(filePath)
+}
+
+func loadNodeKey(filePath string) (*NodeKey, error) {
+	jsonBytes, err := ioutil.ReadFile(filePath)
+	if err != nil {
+		return nil, err
+	}
+	nodeKey := new(NodeKey)
+	err = json.Unmarshal(jsonBytes, nodeKey)
+	if err != nil {
+		return nil, fmt.Errorf("Error reading NodeKey from %v: %v", filePath, err)
+	}
+	return nodeKey, nil
+}
+
+func genNodeKey(filePath string) (*NodeKey, error) {
+	privKey := crypto.GenPrivKeyEd25519().Wrap()
+	nodeKey := &NodeKey{
+		PrivKey: privKey,
+	}
+
+	jsonBytes, err := json.Marshal(nodeKey)
+	if err != nil {
+		return nil, err
+	}
+	err = ioutil.WriteFile(filePath, jsonBytes, 0600)
+	if err != nil {
+		return nil, err
+	}
+	return nodeKey, nil
+}
+
+//------------------------------------------------------------------------------
+
+// MakePoWTarget returns the big-endian encoding of 2^(targetBits - difficulty) - 1.
+// It can be used as a Proof of Work target.
+// NOTE: targetBits must be a multiple of 8 and difficulty must be less than targetBits.
+func MakePoWTarget(difficulty, targetBits uint) []byte {
+	if targetBits%8 != 0 {
+		panic(fmt.Sprintf("targetBits (%d) not a multiple of 8", targetBits))
+	}
+	if difficulty >= targetBits {
+		panic(fmt.Sprintf("difficulty (%d) >= targetBits (%d)", difficulty, targetBits))
+	}
+	targetBytes := targetBits / 8
+	zeroPrefixLen := (int(difficulty) / 8)
+	prefix := bytes.Repeat([]byte{0}, zeroPrefixLen)
+	mod := (difficulty % 8)
+	if mod > 0 {
+		nonZeroPrefix := byte(1<<(8-mod) - 1)
+		prefix = append(prefix, nonZeroPrefix)
+	}
+	tailLen := int(targetBytes) - len(prefix)
+	return append(prefix, bytes.Repeat([]byte{0xFF}, tailLen)...)
+}
diff --git a/vendor/github.com/tendermint/tendermint/p2p/listener.go b/vendor/github.com/tendermint/tendermint/p2p/listener.go
index 884c45ee..e698765c 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/listener.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/listener.go
@@ -72,7 +72,7 @@ func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log
 
 	// Determine internal address...
 	var intAddr *NetAddress
-	intAddr, err = NewNetAddressString(lAddr)
+	intAddr, err = NewNetAddressStringWithOptionalID(lAddr)
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/tendermint/tendermint/p2p/netaddress.go b/vendor/github.com/tendermint/tendermint/p2p/netaddress.go
index 41c2cc97..a77090a7 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/netaddress.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/netaddress.go
@@ -5,6 +5,7 @@
 package p2p
 
 import (
+	"encoding/hex"
 	"flag"
 	"fmt"
 	"net"
@@ -12,41 +13,81 @@ import (
 	"strings"
 	"time"
 
+	"github.com/pkg/errors"
 	cmn "github.com/tendermint/tmlibs/common"
 )
 
 // NetAddress defines information about a peer on the network
-// including its IP address, and port.
+// including its ID, IP address, and port.
 type NetAddress struct {
+	ID   ID
 	IP   net.IP
 	Port uint16
 	str  string
 }
 
+// IDAddressString returns id@hostPort.
+func IDAddressString(id ID, hostPort string) string {
+	return fmt.Sprintf("%s@%s", id, hostPort)
+}
+
 // NewNetAddress returns a new NetAddress using the provided TCP
 // address. When testing, other net.Addr (except TCP) will result in
 // using 0.0.0.0:0. When normal run, other net.Addr (except TCP) will
 // panic.
 // TODO: socks proxies?
-func NewNetAddress(addr net.Addr) *NetAddress {
+func NewNetAddress(id ID, addr net.Addr) *NetAddress {
 	tcpAddr, ok := addr.(*net.TCPAddr)
 	if !ok {
 		if flag.Lookup("test.v") == nil { // normal run
 			cmn.PanicSanity(cmn.Fmt("Only TCPAddrs are supported. Got: %v", addr))
 		} else { // in testing
-			return NewNetAddressIPPort(net.IP("0.0.0.0"), 0)
+			netAddr := NewNetAddressIPPort(net.IP("0.0.0.0"), 0)
+			netAddr.ID = id
+			return netAddr
 		}
 	}
 	ip := tcpAddr.IP
 	port := uint16(tcpAddr.Port)
-	return NewNetAddressIPPort(ip, port)
+	na := NewNetAddressIPPort(ip, port)
+	na.ID = id
+	return na
 }
 
-// NewNetAddressString returns a new NetAddress using the provided
-// address in the form of "IP:Port". Also resolves the host if host
-// is not an IP.
+// NewNetAddressString returns a new NetAddress using the provided address in
+// the form of "ID@IP:Port".
+// Also resolves the host if host is not an IP.
 func NewNetAddressString(addr string) (*NetAddress, error) {
-	host, portStr, err := net.SplitHostPort(removeProtocolIfDefined(addr))
+	spl := strings.Split(addr, "@")
+	if len(spl) < 2 {
+		return nil, fmt.Errorf("Address (%s) does not contain ID", addr)
+	}
+	return NewNetAddressStringWithOptionalID(addr)
+}
+
+// NewNetAddressStringWithOptionalID returns a new NetAddress using the
+// provided address in the form of "ID@IP:Port", where the ID is optional.
+// Also resolves the host if host is not an IP.
+func NewNetAddressStringWithOptionalID(addr string) (*NetAddress, error) {
+	addrWithoutProtocol := removeProtocolIfDefined(addr)
+
+	var id ID
+	spl := strings.Split(addrWithoutProtocol, "@")
+	if len(spl) == 2 {
+		idStr := spl[0]
+		idBytes, err := hex.DecodeString(idStr)
+		if err != nil {
+			return nil, errors.Wrapf(err, "Address (%s) contains invalid ID", addrWithoutProtocol)
+		}
+		if len(idBytes) != IDByteLength {
+			return nil, fmt.Errorf("Address (%s) contains ID of invalid length (%d). Should be %d hex-encoded bytes",
+				addrWithoutProtocol, len(idBytes), IDByteLength)
+		}
+
+		id, addrWithoutProtocol = ID(idStr), spl[1]
+	}
+
+	host, portStr, err := net.SplitHostPort(addrWithoutProtocol)
 	if err != nil {
 		return nil, err
 	}
@@ -68,6 +109,7 @@ func NewNetAddressString(addr string) (*NetAddress, error) {
 	}
 
 	na := NewNetAddressIPPort(ip, uint16(port))
+	na.ID = id
 	return na, nil
 }
 
@@ -90,49 +132,56 @@ func NewNetAddressStrings(addrs []string) ([]*NetAddress, []error) {
 // NewNetAddressIPPort returns a new NetAddress using the provided IP
 // and port number.
 func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress {
-	na := &NetAddress{
+	return &NetAddress{
 		IP:   ip,
 		Port: port,
-		str: net.JoinHostPort(
-			ip.String(),
-			strconv.FormatUint(uint64(port), 10),
-		),
 	}
-	return na
 }
 
-// Equals reports whether na and other are the same addresses.
+// Equals reports whether na and other are the same addresses,
+// including their ID, IP, and Port.
 func (na *NetAddress) Equals(other interface{}) bool {
 	if o, ok := other.(*NetAddress); ok {
 		return na.String() == o.String()
 	}
-
 	return false
 }
 
-func (na *NetAddress) Less(other interface{}) bool {
+// Same returns true is na has the same non-empty ID or DialString as other.
+func (na *NetAddress) Same(other interface{}) bool {
 	if o, ok := other.(*NetAddress); ok {
-		return na.String() < o.String()
+		if na.DialString() == o.DialString() {
+			return true
+		}
+		if na.ID != "" && na.ID == o.ID {
+			return true
+		}
 	}
-
-	cmn.PanicSanity("Cannot compare unequal types")
 	return false
 }
 
-// String representation.
+// String representation: <ID>@<IP>:<PORT>
 func (na *NetAddress) String() string {
 	if na.str == "" {
-		na.str = net.JoinHostPort(
-			na.IP.String(),
-			strconv.FormatUint(uint64(na.Port), 10),
-		)
+		addrStr := na.DialString()
+		if na.ID != "" {
+			addrStr = IDAddressString(na.ID, addrStr)
+		}
+		na.str = addrStr
 	}
 	return na.str
 }
 
+func (na *NetAddress) DialString() string {
+	return net.JoinHostPort(
+		na.IP.String(),
+		strconv.FormatUint(uint64(na.Port), 10),
+	)
+}
+
 // Dial calls net.Dial on the address.
 func (na *NetAddress) Dial() (net.Conn, error) {
-	conn, err := net.Dial("tcp", na.String())
+	conn, err := net.Dial("tcp", na.DialString())
 	if err != nil {
 		return nil, err
 	}
@@ -141,7 +190,7 @@ func (na *NetAddress) Dial() (net.Conn, error) {
 
 // DialTimeout calls net.DialTimeout on the address.
 func (na *NetAddress) DialTimeout(timeout time.Duration) (net.Conn, error) {
-	conn, err := net.DialTimeout("tcp", na.String(), timeout)
+	conn, err := net.DialTimeout("tcp", na.DialString(), timeout)
 	if err != nil {
 		return nil, err
 	}
@@ -256,7 +305,7 @@ func (na *NetAddress) RFC6145() bool { return rfc6145.Contains(na.IP) }
 func removeProtocolIfDefined(addr string) string {
 	if strings.Contains(addr, "://") {
 		return strings.Split(addr, "://")[1]
-	} else {
-		return addr
 	}
+	return addr
+
 }
diff --git a/vendor/github.com/tendermint/tendermint/p2p/node_info.go b/vendor/github.com/tendermint/tendermint/p2p/node_info.go
new file mode 100644
index 00000000..346de37d
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/p2p/node_info.go
@@ -0,0 +1,139 @@
+package p2p
+
+import (
+	"fmt"
+	"strings"
+
+	crypto "github.com/tendermint/go-crypto"
+)
+
+const (
+	maxNodeInfoSize = 10240 // 10Kb
+	maxNumChannels  = 16    // plenty of room for upgrades, for now
+)
+
+func MaxNodeInfoSize() int {
+	return maxNodeInfoSize
+}
+
+// NodeInfo is the basic node information exchanged
+// between two peers during the Tendermint P2P handshake.
+type NodeInfo struct {
+	// Authenticate
+	PubKey     crypto.PubKey `json:"pub_key"`     // authenticated pubkey
+	ListenAddr string        `json:"listen_addr"` // accepting incoming
+
+	// Check compatibility
+	Network  string `json:"network"`  // network/chain ID
+	Version  string `json:"version"`  // major.minor.revision
+	Channels []byte `json:"channels"` // channels this node knows about
+
+	// Sanitize
+	Moniker string   `json:"moniker"` // arbitrary moniker
+	Other   []string `json:"other"`   // other application specific data
+}
+
+// Validate checks the self-reported NodeInfo is safe.
+// It returns an error if there
+// are too many Channels or any duplicate Channels.
+// TODO: constraints for Moniker/Other? Or is that for the UI ?
+func (info NodeInfo) Validate() error {
+	if len(info.Channels) > maxNumChannels {
+		return fmt.Errorf("info.Channels is too long (%v). Max is %v", len(info.Channels), maxNumChannels)
+	}
+
+	channels := make(map[byte]struct{})
+	for _, ch := range info.Channels {
+		_, ok := channels[ch]
+		if ok {
+			return fmt.Errorf("info.Channels contains duplicate channel id %v", ch)
+		}
+		channels[ch] = struct{}{}
+	}
+	return nil
+}
+
+// CompatibleWith checks if two NodeInfo are compatible with eachother.
+// CONTRACT: two nodes are compatible if the major/minor versions match and network match
+// and they have at least one channel in common.
+func (info NodeInfo) CompatibleWith(other NodeInfo) error {
+	iMajor, iMinor, _, iErr := splitVersion(info.Version)
+	oMajor, oMinor, _, oErr := splitVersion(other.Version)
+
+	// if our own version number is not formatted right, we messed up
+	if iErr != nil {
+		return iErr
+	}
+
+	// version number must be formatted correctly ("x.x.x")
+	if oErr != nil {
+		return oErr
+	}
+
+	// major version must match
+	if iMajor != oMajor {
+		return fmt.Errorf("Peer is on a different major version. Got %v, expected %v", oMajor, iMajor)
+	}
+
+	// minor version must match
+	if iMinor != oMinor {
+		return fmt.Errorf("Peer is on a different minor version. Got %v, expected %v", oMinor, iMinor)
+	}
+
+	// nodes must be on the same network
+	if info.Network != other.Network {
+		return fmt.Errorf("Peer is on a different network. Got %v, expected %v", other.Network, info.Network)
+	}
+
+	// if we have no channels, we're just testing
+	if len(info.Channels) == 0 {
+		return nil
+	}
+
+	// for each of our channels, check if they have it
+	found := false
+OUTER_LOOP:
+	for _, ch1 := range info.Channels {
+		for _, ch2 := range other.Channels {
+			if ch1 == ch2 {
+				found = true
+				break OUTER_LOOP // only need one
+			}
+		}
+	}
+	if !found {
+		return fmt.Errorf("Peer has no common channels. Our channels: %v ; Peer channels: %v", info.Channels, other.Channels)
+	}
+	return nil
+}
+
+// ID returns node's ID.
+func (info NodeInfo) ID() ID {
+	return PubKeyToID(info.PubKey)
+}
+
+// NetAddress returns a NetAddress derived from the NodeInfo -
+// it includes the authenticated peer ID and the self-reported
+// ListenAddr. Note that the ListenAddr is not authenticated and
+// may not match that address actually dialed if its an outbound peer.
+func (info NodeInfo) NetAddress() *NetAddress {
+	id := PubKeyToID(info.PubKey)
+	addr := info.ListenAddr
+	netAddr, err := NewNetAddressString(IDAddressString(id, addr))
+	if err != nil {
+		panic(err) // everything should be well formed by now
+	}
+	return netAddr
+}
+
+func (info NodeInfo) String() string {
+	return fmt.Sprintf("NodeInfo{pk: %v, moniker: %v, network: %v [listen %v], version: %v (%v)}", info.PubKey, info.Moniker, info.Network, info.ListenAddr, info.Version, info.Other)
+}
+
+func splitVersion(version string) (string, string, string, error) {
+	spl := strings.Split(version, ".")
+	if len(spl) != 3 {
+		return "", "", "", fmt.Errorf("Invalid version format %v", version)
+	}
+	return spl[0], spl[1], spl[2], nil
+}
diff --git a/vendor/github.com/tendermint/tendermint/p2p/peer.go b/vendor/github.com/tendermint/tendermint/p2p/peer.go
index cc7f4927..0fa7ca03 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/peer.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/peer.go
@@ -11,17 +11,19 @@ import (
 	wire "github.com/tendermint/go-wire"
 	cmn "github.com/tendermint/tmlibs/common"
 	"github.com/tendermint/tmlibs/log"
+
+	tmconn "github.com/tendermint/tendermint/p2p/conn"
 )
 
 // Peer is an interface representing a peer connected on a reactor.
 type Peer interface {
 	cmn.Service
 
-	Key() string
-	IsOutbound() bool
-	IsPersistent() bool
-	NodeInfo() *NodeInfo
-	Status() ConnectionStatus
+	ID() ID             // peer's cryptographic ID
+	IsOutbound() bool   // did we dial the peer
+	IsPersistent() bool // do we redial this peer when we disconnect
+	NodeInfo() NodeInfo // peer's info
+	Status() tmconn.ConnectionStatus
 
 	Send(byte, interface{}) bool
 	TrySend(byte, interface{}) bool
@@ -30,24 +32,55 @@ type Peer interface {
 	Get(string) interface{}
 }
 
-// Peer could be marked as persistent, in which case you can use
-// Redial function to reconnect. Note that inbound peers can't be
-// made persistent. They should be made persistent on the other end.
+//----------------------------------------------------------
+
+// peerConn contains the raw connection and its config.
+type peerConn struct {
+	outbound   bool
+	persistent bool
+	config     *PeerConfig
+	conn       net.Conn // source connection
+}
+
+// ID only exists for SecretConnection.
+// NOTE: Will panic if conn is not *SecretConnection.
+func (pc peerConn) ID() ID {
+	return PubKeyToID(pc.conn.(*tmconn.SecretConnection).RemotePubKey())
+}
+
+// peer implements Peer.
 //
 // Before using a peer, you will need to perform a handshake on connection.
 type peer struct {
 	cmn.BaseService
 
-	outbound bool
+	// raw peerConn and the multiplex connection
+	peerConn
+	mconn *tmconn.MConnection
 
-	conn  net.Conn     // source connection
-	mconn *MConnection // multiplex connection
+	// peer's node info and the channel it knows about
+	// channels = nodeInfo.Channels
+	// cached to avoid copying nodeInfo in hasChannel
+	nodeInfo NodeInfo
+	channels []byte
 
-	persistent bool
-	config     *PeerConfig
+	// User data
+	Data *cmn.CMap
+}
 
-	nodeInfo *NodeInfo
-	Data     *cmn.CMap // User data.
+func newPeer(pc peerConn, nodeInfo NodeInfo,
+	reactorsByCh map[byte]Reactor, chDescs []*tmconn.ChannelDescriptor,
+	onPeerError func(Peer, interface{})) *peer {
+
+	p := &peer{
+		peerConn: pc,
+		nodeInfo: nodeInfo,
+		channels: nodeInfo.Channels,
+		Data:     cmn.NewCMap(),
+	}
+	p.mconn = createMConnection(pc.conn, p, reactorsByCh, chDescs, onPeerError, pc.config.MConfig)
+	p.BaseService = *cmn.NewBaseService(nil, "Peer", p)
+	return p
 }
 
 // PeerConfig is a Peer configuration.
@@ -58,7 +91,7 @@ type PeerConfig struct {
 	HandshakeTimeout time.Duration `mapstructure:"handshake_timeout"`
 	DialTimeout      time.Duration `mapstructure:"dial_timeout"`
 
-	MConfig *MConnConfig `mapstructure:"connection"`
+	MConfig *tmconn.MConnConfig `mapstructure:"connection"`
 
 	Fuzz       bool            `mapstructure:"fuzz"` // fuzz connection (for testing)
 	FuzzConfig *FuzzConnConfig `mapstructure:"fuzz_config"`
@@ -70,38 +103,48 @@ func DefaultPeerConfig() *PeerConfig {
 		AuthEnc:          true,
 		HandshakeTimeout: 20, // * time.Second,
 		DialTimeout:      3,  // * time.Second,
-		MConfig:          DefaultMConnConfig(),
+		MConfig:          tmconn.DefaultMConnConfig(),
 		Fuzz:             false,
 		FuzzConfig:       DefaultFuzzConnConfig(),
 	}
 }
 
-func newOutboundPeer(addr *NetAddress, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor,
-	onPeerError func(Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519, config *PeerConfig) (*peer, error) {
+func newOutboundPeerConn(addr *NetAddress, config *PeerConfig, persistent bool, ourNodePrivKey crypto.PrivKey) (peerConn, error) {
+	var pc peerConn
 
 	conn, err := dial(addr, config)
 	if err != nil {
-		return nil, errors.Wrap(err, "Error creating peer")
+		return pc, errors.Wrap(err, "Error creating peer")
 	}
 
-	peer, err := newPeerFromConnAndConfig(conn, true, reactorsByCh, chDescs, onPeerError, ourNodePrivKey, config)
+	pc, err = newPeerConn(conn, config, true, persistent, ourNodePrivKey)
 	if err != nil {
-		if err := conn.Close(); err != nil {
-			return nil, err
+		if err2 := conn.Close(); err2 != nil {
+			return pc, errors.Wrap(err, err2.Error())
 		}
-		return nil, err
+		return pc, err
 	}
-	return peer, nil
+
+	// ensure dialed ID matches connection ID
+	if config.AuthEnc && addr.ID != pc.ID() {
+		if err2 := conn.Close(); err2 != nil {
+			return pc, errors.Wrap(err, err2.Error())
+		}
+		return pc, ErrSwitchAuthenticationFailure{addr, pc.ID()}
+	}
+	return pc, nil
 }
 
-func newInboundPeer(conn net.Conn, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor,
-	onPeerError func(Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519, config *PeerConfig) (*peer, error) {
+func newInboundPeerConn(conn net.Conn, config *PeerConfig, ourNodePrivKey crypto.PrivKey) (peerConn, error) {
 
-	return newPeerFromConnAndConfig(conn, false, reactorsByCh, chDescs, onPeerError, ourNodePrivKey, config)
+	// TODO: issue PoW challenge
+
+	return newPeerConn(conn, config, false, false, ourNodePrivKey)
 }
 
-func newPeerFromConnAndConfig(rawConn net.Conn, outbound bool, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor,
-	onPeerError func(Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519, config *PeerConfig) (*peer, error) {
+func newPeerConn(rawConn net.Conn,
+	config *PeerConfig, outbound, persistent bool,
+	ourNodePrivKey crypto.PrivKey) (pc peerConn, err error) {
 
 	conn := rawConn
 
@@ -111,121 +154,37 @@ func newPeerFromConnAndConfig(rawConn net.Conn, outbound bool, reactorsByCh map[
 		conn = FuzzConnAfterFromConfig(conn, 10*time.Second, config.FuzzConfig)
 	}
 
-	// Encrypt connection
 	if config.AuthEnc {
+		// Set deadline for secret handshake
 		if err := conn.SetDeadline(time.Now().Add(config.HandshakeTimeout * time.Second)); err != nil {
-			return nil, errors.Wrap(err, "Error setting deadline while encrypting connection")
+			return pc, errors.Wrap(err, "Error setting deadline while encrypting connection")
 		}
 
-		var err error
-		conn, err = MakeSecretConnection(conn, ourNodePrivKey)
+		// Encrypt connection
+		conn, err = tmconn.MakeSecretConnection(conn, ourNodePrivKey)
 		if err != nil {
-			return nil, errors.Wrap(err, "Error creating peer")
+			return pc, errors.Wrap(err, "Error creating peer")
 		}
 	}
 
-	// Key and NodeInfo are set after Handshake
-	p := &peer{
-		outbound: outbound,
-		conn:     conn,
-		config:   config,
-		Data:     cmn.NewCMap(),
-	}
-
-	p.mconn = createMConnection(conn, p, reactorsByCh, chDescs, onPeerError, config.MConfig)
-
-	p.BaseService = *cmn.NewBaseService(nil, "Peer", p)
-
-	return p, nil
+	// Only the information we already have
+	return peerConn{
+		config:     config,
+		outbound:   outbound,
+		persistent: persistent,
+		conn:       conn,
+	}, nil
 }
 
+//---------------------------------------------------
+// Implements cmn.Service
+
+// SetLogger implements BaseService.
 func (p *peer) SetLogger(l log.Logger) {
 	p.Logger = l
 	p.mconn.SetLogger(l)
 }
 
-// CloseConn should be used when the peer was created, but never started.
-func (p *peer) CloseConn() {
-	p.conn.Close() // nolint: errcheck
-}
-
-// makePersistent marks the peer as persistent.
-func (p *peer) makePersistent() {
-	if !p.outbound {
-		panic("inbound peers can't be made persistent")
-	}
-
-	p.persistent = true
-}
-
-// IsPersistent returns true if the peer is persitent, false otherwise.
-func (p *peer) IsPersistent() bool {
-	return p.persistent
-}
-
-// HandshakeTimeout performs a handshake between a given node and the peer.
-// NOTE: blocking
-func (p *peer) HandshakeTimeout(ourNodeInfo *NodeInfo, timeout time.Duration) error {
-	// Set deadline for handshake so we don't block forever on conn.ReadFull
-	if err := p.conn.SetDeadline(time.Now().Add(timeout)); err != nil {
-		return errors.Wrap(err, "Error setting deadline")
-	}
-
-	var peerNodeInfo = new(NodeInfo)
-	var err1 error
-	var err2 error
-	cmn.Parallel(
-		func() {
-			var n int
-			wire.WriteBinary(ourNodeInfo, p.conn, &n, &err1)
-		},
-		func() {
-			var n int
-			wire.ReadBinary(peerNodeInfo, p.conn, maxNodeInfoSize, &n, &err2)
-			p.Logger.Info("Peer handshake", "peerNodeInfo", peerNodeInfo)
-		})
-	if err1 != nil {
-		return errors.Wrap(err1, "Error during handshake/write")
-	}
-	if err2 != nil {
-		return errors.Wrap(err2, "Error during handshake/read")
-	}
-
-	if p.config.AuthEnc {
-		// Check that the professed PubKey matches the sconn's.
-		if !peerNodeInfo.PubKey.Equals(p.PubKey().Wrap()) {
-			return fmt.Errorf("Ignoring connection with unmatching pubkey: %v vs %v",
-				peerNodeInfo.PubKey, p.PubKey())
-		}
-	}
-
-	// Remove deadline
-	if err := p.conn.SetDeadline(time.Time{}); err != nil {
-		return errors.Wrap(err, "Error removing deadline")
-	}
-
-	peerNodeInfo.RemoteAddr = p.Addr().String()
-
-	p.nodeInfo = peerNodeInfo
-	return nil
-}
-
-// Addr returns peer's remote network address.
-func (p *peer) Addr() net.Addr {
-	return p.conn.RemoteAddr()
-}
-
-// PubKey returns peer's public key.
-func (p *peer) PubKey() crypto.PubKeyEd25519 {
-	if p.config.AuthEnc {
-		return p.conn.(*SecretConnection).RemotePubKey()
-	}
-	if p.NodeInfo() == nil {
-		panic("Attempt to get peer's PubKey before calling Handshake")
-	}
-	return p.PubKey()
-}
-
 // OnStart implements BaseService.
 func (p *peer) OnStart() error {
 	if err := p.BaseService.OnStart(); err != nil {
@@ -238,17 +197,35 @@ func (p *peer) OnStart() error {
 // OnStop implements BaseService.
 func (p *peer) OnStop() {
 	p.BaseService.OnStop()
-	p.mconn.Stop()
+	p.mconn.Stop() // stop everything and close the conn
 }
 
-// Connection returns underlying MConnection.
-func (p *peer) Connection() *MConnection {
-	return p.mconn
+//---------------------------------------------------
+// Implements Peer
+
+// ID returns the peer's ID - the hex encoded hash of its pubkey.
+func (p *peer) ID() ID {
+	return p.nodeInfo.ID()
 }
 
 // IsOutbound returns true if the connection is outbound, false otherwise.
 func (p *peer) IsOutbound() bool {
-	return p.outbound
+	return p.peerConn.outbound
+}
+
+// IsPersistent returns true if the peer is persitent, false otherwise.
+func (p *peer) IsPersistent() bool {
+	return p.peerConn.persistent
+}
+
+// NodeInfo returns a copy of the peer's NodeInfo.
+func (p *peer) NodeInfo() NodeInfo {
+	return p.nodeInfo
+}
+
+// Status returns the peer's ConnectionStatus.
+func (p *peer) Status() tmconn.ConnectionStatus {
+	return p.mconn.Status()
 }
 
 // Send msg to the channel identified by chID byte. Returns false if the send
@@ -258,6 +235,8 @@ func (p *peer) Send(chID byte, msg interface{}) bool {
 		// see Switch#Broadcast, where we fetch the list of peers and loop over
 		// them - while we're looping, one peer may be removed and stopped.
 		return false
+	} else if !p.hasChannel(chID) {
+		return false
 	}
 	return p.mconn.Send(chID, msg)
 }
@@ -267,30 +246,10 @@ func (p *peer) Send(chID byte, msg interface{}) bool {
 func (p *peer) TrySend(chID byte, msg interface{}) bool {
 	if !p.IsRunning() {
 		return false
-	}
-	return p.mconn.TrySend(chID, msg)
-}
-
-// CanSend returns true if the send queue is not full, false otherwise.
-func (p *peer) CanSend(chID byte) bool {
-	if !p.IsRunning() {
+	} else if !p.hasChannel(chID) {
 		return false
 	}
-	return p.mconn.CanSend(chID)
-}
-
-// String representation.
-func (p *peer) String() string {
-	if p.outbound {
-		return fmt.Sprintf("Peer{%v %v out}", p.mconn, p.Key())
-	}
-
-	return fmt.Sprintf("Peer{%v %v in}", p.mconn, p.Key())
-}
-
-// Equals reports whenever 2 peers are actually represent the same node.
-func (p *peer) Equals(other Peer) bool {
-	return p.Key() == other.Key()
+	return p.mconn.TrySend(chID, msg)
 }
 
 // Get the data for a given key.
@@ -303,25 +262,86 @@ func (p *peer) Set(key string, data interface{}) {
 	p.Data.Set(key, data)
 }
 
-// Key returns the peer's id key.
-func (p *peer) Key() string {
-	return p.nodeInfo.ListenAddr // XXX: should probably be PubKey.KeyString()
+// hasChannel returns true if the peer reported
+// knowing about the given chID.
+func (p *peer) hasChannel(chID byte) bool {
+	for _, ch := range p.channels {
+		if ch == chID {
+			return true
+		}
+	}
+	// NOTE: probably will want to remove this
+	// but could be helpful while the feature is new
+	p.Logger.Debug("Unknown channel for peer", "channel", chID, "channels", p.channels)
+	return false
 }
 
-// NodeInfo returns a copy of the peer's NodeInfo.
-func (p *peer) NodeInfo() *NodeInfo {
-	if p.nodeInfo == nil {
-		return nil
+//---------------------------------------------------
+// methods used by the Switch
+
+// CloseConn should be called by the Switch if the peer was created but never started.
+func (pc *peerConn) CloseConn() {
+	pc.conn.Close() // nolint: errcheck
+}
+
+// HandshakeTimeout performs the Tendermint P2P handshake between a given node and the peer
+// by exchanging their NodeInfo. It sets the received nodeInfo on the peer.
+// NOTE: blocking
+func (pc *peerConn) HandshakeTimeout(ourNodeInfo NodeInfo, timeout time.Duration) (peerNodeInfo NodeInfo, err error) {
+	// Set deadline for handshake so we don't block forever on conn.ReadFull
+	if err := pc.conn.SetDeadline(time.Now().Add(timeout)); err != nil {
+		return peerNodeInfo, errors.Wrap(err, "Error setting deadline")
+	}
+
+	var trs, _ = cmn.Parallel(
+		func(_ int) (val interface{}, err error, abort bool) {
+			var n int
+			wire.WriteBinary(&ourNodeInfo, pc.conn, &n, &err)
+			return
+		},
+		func(_ int) (val interface{}, err error, abort bool) {
+			var n int
+			wire.ReadBinary(&peerNodeInfo, pc.conn, MaxNodeInfoSize(), &n, &err)
+			return
+		},
+	)
+	if err := trs.FirstError(); err != nil {
+		return peerNodeInfo, errors.Wrap(err, "Error during handshake")
 	}
-	n := *p.nodeInfo // copy
-	return &n
+
+	// Remove deadline
+	if err := pc.conn.SetDeadline(time.Time{}); err != nil {
+		return peerNodeInfo, errors.Wrap(err, "Error removing deadline")
+	}
+
+	return peerNodeInfo, nil
 }
 
-// Status returns the peer's ConnectionStatus.
-func (p *peer) Status() ConnectionStatus {
-	return p.mconn.Status()
+// Addr returns peer's remote network address.
+func (p *peer) Addr() net.Addr {
+	return p.peerConn.conn.RemoteAddr()
 }
 
+// CanSend returns true if the send queue is not full, false otherwise.
+func (p *peer) CanSend(chID byte) bool {
+	if !p.IsRunning() {
+		return false
+	}
+	return p.mconn.CanSend(chID)
+}
+
+// String representation.
+func (p *peer) String() string {
+	if p.outbound {
+		return fmt.Sprintf("Peer{%v %v out}", p.mconn, p.ID())
+	}
+
+	return fmt.Sprintf("Peer{%v %v in}", p.mconn, p.ID())
+}
+
+//------------------------------------------------------------------
+// helper funcs
+
 func dial(addr *NetAddress, config *PeerConfig) (net.Conn, error) {
 	conn, err := addr.DialTimeout(config.DialTimeout * time.Second)
 	if err != nil {
@@ -330,13 +350,15 @@ func dial(addr *NetAddress, config *PeerConfig) (net.Conn, error) {
 	return conn, nil
 }
 
-func createMConnection(conn net.Conn, p *peer, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor,
-	onPeerError func(Peer, interface{}), config *MConnConfig) *MConnection {
+func createMConnection(conn net.Conn, p *peer, reactorsByCh map[byte]Reactor, chDescs []*tmconn.ChannelDescriptor,
+	onPeerError func(Peer, interface{}), config *tmconn.MConnConfig) *tmconn.MConnection {
 
 	onReceive := func(chID byte, msgBytes []byte) {
 		reactor := reactorsByCh[chID]
 		if reactor == nil {
-			cmn.PanicSanity(cmn.Fmt("Unknown channel %X", chID))
+			// Note that its ok to panic here as it's caught in the conn._recover,
+			// which does onPeerError.
+			panic(cmn.Fmt("Unknown channel %X", chID))
 		}
 		reactor.Receive(chID, p, msgBytes)
 	}
@@ -345,5 +367,5 @@ func createMConnection(conn net.Conn, p *peer, reactorsByCh map[byte]Reactor, ch
 		onPeerError(p, r)
 	}
 
-	return NewMConnectionWithConfig(conn, chDescs, onReceive, onError, config)
+	return tmconn.NewMConnectionWithConfig(conn, chDescs, onReceive, onError, config)
 }
diff --git a/vendor/github.com/tendermint/tendermint/p2p/peer_set.go b/vendor/github.com/tendermint/tendermint/p2p/peer_set.go
index c21748cf..a4565ea1 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/peer_set.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/peer_set.go
@@ -6,8 +6,8 @@ import (
 
 // IPeerSet has a (immutable) subset of the methods of PeerSet.
 type IPeerSet interface {
-	Has(key string) bool
-	Get(key string) Peer
+	Has(key ID) bool
+	Get(key ID) Peer
 	List() []Peer
 	Size() int
 }
@@ -18,7 +18,7 @@ type IPeerSet interface {
 // Iteration over the peers is super fast and thread-safe.
 type PeerSet struct {
 	mtx    sync.Mutex
-	lookup map[string]*peerSetItem
+	lookup map[ID]*peerSetItem
 	list   []Peer
 }
 
@@ -30,7 +30,7 @@ type peerSetItem struct {
 // NewPeerSet creates a new peerSet with a list of initial capacity of 256 items.
 func NewPeerSet() *PeerSet {
 	return &PeerSet{
-		lookup: make(map[string]*peerSetItem),
+		lookup: make(map[ID]*peerSetItem),
 		list:   make([]Peer, 0, 256),
 	}
 }
@@ -40,7 +40,7 @@ func NewPeerSet() *PeerSet {
 func (ps *PeerSet) Add(peer Peer) error {
 	ps.mtx.Lock()
 	defer ps.mtx.Unlock()
-	if ps.lookup[peer.Key()] != nil {
+	if ps.lookup[peer.ID()] != nil {
 		return ErrSwitchDuplicatePeer
 	}
 
@@ -48,13 +48,13 @@ func (ps *PeerSet) Add(peer Peer) error {
 	// Appending is safe even with other goroutines
 	// iterating over the ps.list slice.
 	ps.list = append(ps.list, peer)
-	ps.lookup[peer.Key()] = &peerSetItem{peer, index}
+	ps.lookup[peer.ID()] = &peerSetItem{peer, index}
 	return nil
 }
 
 // Has returns true iff the PeerSet contains
 // the peer referred to by this peerKey.
-func (ps *PeerSet) Has(peerKey string) bool {
+func (ps *PeerSet) Has(peerKey ID) bool {
 	ps.mtx.Lock()
 	_, ok := ps.lookup[peerKey]
 	ps.mtx.Unlock()
@@ -62,22 +62,21 @@ func (ps *PeerSet) Has(peerKey string) bool {
 }
 
 // Get looks up a peer by the provided peerKey.
-func (ps *PeerSet) Get(peerKey string) Peer {
+func (ps *PeerSet) Get(peerKey ID) Peer {
 	ps.mtx.Lock()
 	defer ps.mtx.Unlock()
 	item, ok := ps.lookup[peerKey]
 	if ok {
 		return item.peer
-	} else {
-		return nil
 	}
+	return nil
 }
 
 // Remove discards peer by its Key, if the peer was previously memoized.
 func (ps *PeerSet) Remove(peer Peer) {
 	ps.mtx.Lock()
 	defer ps.mtx.Unlock()
-	item := ps.lookup[peer.Key()]
+	item := ps.lookup[peer.ID()]
 	if item == nil {
 		return
 	}
@@ -90,18 +89,18 @@ func (ps *PeerSet) Remove(peer Peer) {
 	// If it's the last peer, that's an easy special case.
 	if index == len(ps.list)-1 {
 		ps.list = newList
-		delete(ps.lookup, peer.Key())
+		delete(ps.lookup, peer.ID())
 		return
 	}
 
 	// Replace the popped item with the last item in the old list.
 	lastPeer := ps.list[len(ps.list)-1]
-	lastPeerKey := lastPeer.Key()
+	lastPeerKey := lastPeer.ID()
 	lastPeerItem := ps.lookup[lastPeerKey]
 	newList[index] = lastPeer
 	lastPeerItem.index = index
 	ps.list = newList
-	delete(ps.lookup, peer.Key())
+	delete(ps.lookup, peer.ID())
 }
 
 // Size returns the number of unique items in the peerSet.
diff --git a/vendor/github.com/tendermint/tendermint/p2p/addrbook.go b/vendor/github.com/tendermint/tendermint/p2p/pex/addrbook.go
similarity index 56%
rename from vendor/github.com/tendermint/tendermint/p2p/addrbook.go
rename to vendor/github.com/tendermint/tendermint/p2p/pex/addrbook.go
index 8f924d12..a8462f37 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/addrbook.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/pex/addrbook.go
@@ -2,94 +2,89 @@
 // Originally Copyright (c) 2013-2014 Conformal Systems LLC.
 // https://github.com/conformal/btcd/blob/master/LICENSE
 
-package p2p
+package pex
 
 import (
+	"crypto/sha256"
 	"encoding/binary"
-	"encoding/json"
 	"fmt"
 	"math"
 	"math/rand"
 	"net"
-	"os"
 	"sync"
 	"time"
 
 	crypto "github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/p2p"
 	cmn "github.com/tendermint/tmlibs/common"
 )
 
 const (
-	// addresses under which the address manager will claim to need more addresses.
-	needAddressThreshold = 1000
-
-	// interval used to dump the address cache to disk for future use.
-	dumpAddressInterval = time.Minute * 2
-
-	// max addresses in each old address bucket.
-	oldBucketSize = 64
-
-	// buckets we split old addresses over.
-	oldBucketCount = 64
-
-	// max addresses in each new address bucket.
-	newBucketSize = 64
+	bucketTypeNew = 0x01
+	bucketTypeOld = 0x02
+)
 
-	// buckets that we spread new addresses over.
-	newBucketCount = 256
+// AddrBook is an address book used for tracking peers
+// so we can gossip about them to others and select
+// peers to dial.
+// TODO: break this up?
+type AddrBook interface {
+	cmn.Service
 
-	// old buckets over which an address group will be spread.
-	oldBucketsPerGroup = 4
+	// Add our own addresses so we don't later add ourselves
+	AddOurAddress(*p2p.NetAddress)
+	// Check if it is our address
+	OurAddress(*p2p.NetAddress) bool
 
-	// new buckets over which a source address group will be spread.
-	newBucketsPerGroup = 32
+	// Add and remove an address
+	AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error
+	RemoveAddress(*p2p.NetAddress)
 
-	// buckets a frequently seen new address may end up in.
-	maxNewBucketsPerAddress = 4
+	// Check if the address is in the book
+	HasAddress(*p2p.NetAddress) bool
 
-	// days before which we assume an address has vanished
-	// if we have not seen it announced in that long.
-	numMissingDays = 30
+	// Do we need more peers?
+	NeedMoreAddrs() bool
 
-	// tries without a single success before we assume an address is bad.
-	numRetries = 3
+	// Pick an address to dial
+	PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress
 
-	// max failures we will accept without a success before considering an address bad.
-	maxFailures = 10
+	// Mark address
+	MarkGood(*p2p.NetAddress)
+	MarkAttempt(*p2p.NetAddress)
+	MarkBad(*p2p.NetAddress)
 
-	// days since the last success before we will consider evicting an address.
-	minBadDays = 7
+	IsGood(*p2p.NetAddress) bool
 
-	// % of total addresses known returned by GetSelection.
-	getSelectionPercent = 23
+	// Send a selection of addresses to peers
+	GetSelection() []*p2p.NetAddress
+	// Send a selection of addresses with bias
+	GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress
 
-	// min addresses that must be returned by GetSelection. Useful for bootstrapping.
-	minGetSelection = 32
+	// TODO: remove
+	ListOfKnownAddresses() []*knownAddress
 
-	// max addresses returned by GetSelection
-	// NOTE: this must match "maxPexMessageSize"
-	maxGetSelection = 250
-)
+	// Persist to disk
+	Save()
+}
 
-const (
-	bucketTypeNew = 0x01
-	bucketTypeOld = 0x02
-)
+var _ AddrBook = (*addrBook)(nil)
 
-// AddrBook - concurrency safe peer address manager.
-type AddrBook struct {
+// addrBook - concurrency safe peer address manager.
+// Implements AddrBook.
+type addrBook struct {
 	cmn.BaseService
 
 	// immutable after creation
 	filePath          string
 	routabilityStrict bool
-	key               string
+	key               string // random prefix for bucket placement
 
 	// accessed concurrently
 	mtx        sync.Mutex
 	rand       *rand.Rand
-	ourAddrs   map[string]*NetAddress
-	addrLookup map[string]*knownAddress // new & old
+	ourAddrs   map[string]struct{}
+	addrLookup map[p2p.ID]*knownAddress // new & old
 	bucketsOld []map[string]*knownAddress
 	bucketsNew []map[string]*knownAddress
 	nOld       int
@@ -100,11 +95,11 @@ type AddrBook struct {
 
 // NewAddrBook creates a new address book.
 // Use Start to begin processing asynchronous address updates.
-func NewAddrBook(filePath string, routabilityStrict bool) *AddrBook {
-	am := &AddrBook{
-		rand:              rand.New(rand.NewSource(time.Now().UnixNano())),
-		ourAddrs:          make(map[string]*NetAddress),
-		addrLookup:        make(map[string]*knownAddress),
+func NewAddrBook(filePath string, routabilityStrict bool) *addrBook {
+	am := &addrBook{
+		rand:              rand.New(rand.NewSource(time.Now().UnixNano())), // TODO: seed from outside
+		ourAddrs:          make(map[string]struct{}),
+		addrLookup:        make(map[p2p.ID]*knownAddress),
 		filePath:          filePath,
 		routabilityStrict: routabilityStrict,
 	}
@@ -113,8 +108,9 @@ func NewAddrBook(filePath string, routabilityStrict bool) *AddrBook {
 	return am
 }
 
+// Initialize the buckets.
 // When modifying this, don't forget to update loadFromFile()
-func (a *AddrBook) init() {
+func (a *addrBook) init() {
 	a.key = crypto.CRandHex(24) // 24/2 * 8 = 96 bits
 	// New addr buckets
 	a.bucketsNew = make([]map[string]*knownAddress, newBucketCount)
@@ -129,7 +125,7 @@ func (a *AddrBook) init() {
 }
 
 // OnStart implements Service.
-func (a *AddrBook) OnStart() error {
+func (a *addrBook) OnStart() error {
 	if err := a.BaseService.OnStart(); err != nil {
 		return err
 	}
@@ -144,78 +140,100 @@ func (a *AddrBook) OnStart() error {
 }
 
 // OnStop implements Service.
-func (a *AddrBook) OnStop() {
+func (a *addrBook) OnStop() {
 	a.BaseService.OnStop()
 }
 
-func (a *AddrBook) Wait() {
+func (a *addrBook) Wait() {
 	a.wg.Wait()
 }
 
-// AddOurAddress adds another one of our addresses.
-func (a *AddrBook) AddOurAddress(addr *NetAddress) {
+func (a *addrBook) FilePath() string {
+	return a.filePath
+}
+
+//-------------------------------------------------------
+
+// AddOurAddress one of our addresses.
+func (a *addrBook) AddOurAddress(addr *p2p.NetAddress) {
 	a.mtx.Lock()
 	defer a.mtx.Unlock()
 	a.Logger.Info("Add our address to book", "addr", addr)
-	a.ourAddrs[addr.String()] = addr
+	a.ourAddrs[addr.String()] = struct{}{}
 }
 
-// OurAddresses returns a list of our addresses.
-func (a *AddrBook) OurAddresses() []*NetAddress {
-	addrs := []*NetAddress{}
-	for _, addr := range a.ourAddrs {
-		addrs = append(addrs, addr)
-	}
-	return addrs
+// OurAddress returns true if it is our address.
+func (a *addrBook) OurAddress(addr *p2p.NetAddress) bool {
+	a.mtx.Lock()
+	_, ok := a.ourAddrs[addr.String()]
+	a.mtx.Unlock()
+	return ok
 }
 
-// AddAddress adds the given address as received from the given source.
+// AddAddress implements AddrBook - adds the given address as received from the given source.
 // NOTE: addr must not be nil
-func (a *AddrBook) AddAddress(addr *NetAddress, src *NetAddress) error {
+func (a *addrBook) AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error {
 	a.mtx.Lock()
 	defer a.mtx.Unlock()
 	return a.addAddress(addr, src)
 }
 
-// NeedMoreAddrs returns true if there are not have enough addresses in the book.
-func (a *AddrBook) NeedMoreAddrs() bool {
-	return a.Size() < needAddressThreshold
+// RemoveAddress implements AddrBook - removes the address from the book.
+func (a *addrBook) RemoveAddress(addr *p2p.NetAddress) {
+	a.mtx.Lock()
+	defer a.mtx.Unlock()
+	ka := a.addrLookup[addr.ID]
+	if ka == nil {
+		return
+	}
+	a.Logger.Info("Remove address from book", "addr", ka.Addr, "ID", ka.ID)
+	a.removeFromAllBuckets(ka)
 }
 
-// Size returns the number of addresses in the book.
-func (a *AddrBook) Size() int {
+// IsGood returns true if peer was ever marked as good and haven't
+// done anything wrong since then.
+func (a *addrBook) IsGood(addr *p2p.NetAddress) bool {
 	a.mtx.Lock()
 	defer a.mtx.Unlock()
-	return a.size()
+	return a.addrLookup[addr.ID].isOld()
 }
 
-func (a *AddrBook) size() int {
-	return a.nNew + a.nOld
+// HasAddress returns true if the address is in the book.
+func (a *addrBook) HasAddress(addr *p2p.NetAddress) bool {
+	a.mtx.Lock()
+	defer a.mtx.Unlock()
+	ka := a.addrLookup[addr.ID]
+	return ka != nil
 }
 
-// PickAddress picks an address to connect to.
+// NeedMoreAddrs implements AddrBook - returns true if there are not have enough addresses in the book.
+func (a *addrBook) NeedMoreAddrs() bool {
+	return a.Size() < needAddressThreshold
+}
+
+// PickAddress implements AddrBook. It picks an address to connect to.
 // The address is picked randomly from an old or new bucket according
-// to the newBias argument, which must be between [0, 100] (or else is truncated to that range)
+// to the biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to that range)
 // and determines how biased we are to pick an address from a new bucket.
 // PickAddress returns nil if the AddrBook is empty or if we try to pick
 // from an empty bucket.
-func (a *AddrBook) PickAddress(newBias int) *NetAddress {
+func (a *addrBook) PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress {
 	a.mtx.Lock()
 	defer a.mtx.Unlock()
 
 	if a.size() == 0 {
 		return nil
 	}
-	if newBias > 100 {
-		newBias = 100
+	if biasTowardsNewAddrs > 100 {
+		biasTowardsNewAddrs = 100
 	}
-	if newBias < 0 {
-		newBias = 0
+	if biasTowardsNewAddrs < 0 {
+		biasTowardsNewAddrs = 0
 	}
 
 	// Bias between new and old addresses.
-	oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(newBias))
-	newCorrelation := math.Sqrt(float64(a.nNew)) * float64(newBias)
+	oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(biasTowardsNewAddrs))
+	newCorrelation := math.Sqrt(float64(a.nNew)) * float64(biasTowardsNewAddrs)
 
 	// pick a random peer from a random bucket
 	var bucket map[string]*knownAddress
@@ -243,12 +261,12 @@ func (a *AddrBook) PickAddress(newBias int) *NetAddress {
 	return nil
 }
 
-// MarkGood marks the peer as good and moves it into an "old" bucket.
-// XXX: we never call this!
-func (a *AddrBook) MarkGood(addr *NetAddress) {
+// MarkGood implements AddrBook - it marks the peer as good and
+// moves it into an "old" bucket.
+func (a *addrBook) MarkGood(addr *p2p.NetAddress) {
 	a.mtx.Lock()
 	defer a.mtx.Unlock()
-	ka := a.addrLookup[addr.String()]
+	ka := a.addrLookup[addr.ID]
 	if ka == nil {
 		return
 	}
@@ -258,39 +276,26 @@ func (a *AddrBook) MarkGood(addr *NetAddress) {
 	}
 }
 
-// MarkAttempt marks that an attempt was made to connect to the address.
-func (a *AddrBook) MarkAttempt(addr *NetAddress) {
+// MarkAttempt implements AddrBook - it marks that an attempt was made to connect to the address.
+func (a *addrBook) MarkAttempt(addr *p2p.NetAddress) {
 	a.mtx.Lock()
 	defer a.mtx.Unlock()
-	ka := a.addrLookup[addr.String()]
+	ka := a.addrLookup[addr.ID]
 	if ka == nil {
 		return
 	}
 	ka.markAttempt()
 }
 
-// MarkBad currently just ejects the address. In the future, consider
-// blacklisting.
-func (a *AddrBook) MarkBad(addr *NetAddress) {
+// MarkBad implements AddrBook. Currently it just ejects the address.
+// TODO: black list for some amount of time
+func (a *addrBook) MarkBad(addr *p2p.NetAddress) {
 	a.RemoveAddress(addr)
 }
 
-// RemoveAddress removes the address from the book.
-func (a *AddrBook) RemoveAddress(addr *NetAddress) {
-	a.mtx.Lock()
-	defer a.mtx.Unlock()
-	ka := a.addrLookup[addr.String()]
-	if ka == nil {
-		return
-	}
-	a.Logger.Info("Remove address from book", "addr", addr)
-	a.removeFromAllBuckets(ka)
-}
-
-/* Peer exchange */
-
-// GetSelection randomly selects some addresses (old & new). Suitable for peer-exchange protocols.
-func (a *AddrBook) GetSelection() []*NetAddress {
+// GetSelection implements AddrBook.
+// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols.
+func (a *addrBook) GetSelection() []*p2p.NetAddress {
 	a.mtx.Lock()
 	defer a.mtx.Unlock()
 
@@ -298,10 +303,10 @@ func (a *AddrBook) GetSelection() []*NetAddress {
 		return nil
 	}
 
-	allAddr := make([]*NetAddress, a.size())
+	allAddr := make([]*p2p.NetAddress, a.size())
 	i := 0
-	for _, v := range a.addrLookup {
-		allAddr[i] = v.Addr
+	for _, ka := range a.addrLookup {
+		allAddr[i] = ka.Addr
 		i++
 	}
 
@@ -323,90 +328,133 @@ func (a *AddrBook) GetSelection() []*NetAddress {
 	return allAddr[:numAddresses]
 }
 
-/* Loading & Saving */
-
-type addrBookJSON struct {
-	Key   string
-	Addrs []*knownAddress
-}
-
-func (a *AddrBook) saveToFile(filePath string) {
-	a.Logger.Info("Saving AddrBook to file", "size", a.Size())
-
+// GetSelectionWithBias implements AddrBook.
+// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols.
+//
+// Each address is picked randomly from an old or new bucket according to the
+// biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to
+// that range) and determines how biased we are to pick an address from a new
+// bucket.
+func (a *addrBook) GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress {
 	a.mtx.Lock()
 	defer a.mtx.Unlock()
-	// Compile Addrs
-	addrs := []*knownAddress{}
-	for _, ka := range a.addrLookup {
-		addrs = append(addrs, ka)
-	}
 
-	aJSON := &addrBookJSON{
-		Key:   a.key,
-		Addrs: addrs,
+	if a.size() == 0 {
+		return nil
 	}
 
-	jsonBytes, err := json.MarshalIndent(aJSON, "", "\t")
-	if err != nil {
-		a.Logger.Error("Failed to save AddrBook to file", "err", err)
-		return
+	if biasTowardsNewAddrs > 100 {
+		biasTowardsNewAddrs = 100
 	}
-	err = cmn.WriteFileAtomic(filePath, jsonBytes, 0644)
-	if err != nil {
-		a.Logger.Error("Failed to save AddrBook to file", "file", filePath, "err", err)
+	if biasTowardsNewAddrs < 0 {
+		biasTowardsNewAddrs = 0
 	}
-}
 
-// Returns false if file does not exist.
-// cmn.Panics if file is corrupt.
-func (a *AddrBook) loadFromFile(filePath string) bool {
-	// If doesn't exist, do nothing.
-	_, err := os.Stat(filePath)
-	if os.IsNotExist(err) {
-		return false
-	}
+	numAddresses := cmn.MaxInt(
+		cmn.MinInt(minGetSelection, a.size()),
+		a.size()*getSelectionPercent/100)
+	numAddresses = cmn.MinInt(maxGetSelection, numAddresses)
 
-	// Load addrBookJSON{}
-	r, err := os.Open(filePath)
-	if err != nil {
-		cmn.PanicCrisis(cmn.Fmt("Error opening file %s: %v", filePath, err))
-	}
-	defer r.Close() // nolint: errcheck
-	aJSON := &addrBookJSON{}
-	dec := json.NewDecoder(r)
-	err = dec.Decode(aJSON)
-	if err != nil {
-		cmn.PanicCrisis(cmn.Fmt("Error reading file %s: %v", filePath, err))
-	}
+	selection := make([]*p2p.NetAddress, numAddresses)
+
+	oldBucketToAddrsMap := make(map[int]map[string]struct{})
+	var oldIndex int
+	newBucketToAddrsMap := make(map[int]map[string]struct{})
+	var newIndex int
+
+	selectionIndex := 0
+ADDRS_LOOP:
+	for selectionIndex < numAddresses {
+		pickFromOldBucket := int((float64(selectionIndex)/float64(numAddresses))*100) >= biasTowardsNewAddrs
+		pickFromOldBucket = (pickFromOldBucket && a.nOld > 0) || a.nNew == 0
+		bucket := make(map[string]*knownAddress)
+
+		// loop until we pick a random non-empty bucket
+		for len(bucket) == 0 {
+			if pickFromOldBucket {
+				oldIndex = a.rand.Intn(len(a.bucketsOld))
+				bucket = a.bucketsOld[oldIndex]
+			} else {
+				newIndex = a.rand.Intn(len(a.bucketsNew))
+				bucket = a.bucketsNew[newIndex]
+			}
+		}
+
+		// pick a random index
+		randIndex := a.rand.Intn(len(bucket))
 
-	// Restore all the fields...
-	// Restore the key
-	a.key = aJSON.Key
-	// Restore .bucketsNew & .bucketsOld
-	for _, ka := range aJSON.Addrs {
-		for _, bucketIndex := range ka.Buckets {
-			bucket := a.getBucket(ka.BucketType, bucketIndex)
-			bucket[ka.Addr.String()] = ka
+		// loop over the map to return that index
+		var selectedAddr *p2p.NetAddress
+		for _, ka := range bucket {
+			if randIndex == 0 {
+				selectedAddr = ka.Addr
+				break
+			}
+			randIndex--
 		}
-		a.addrLookup[ka.Addr.String()] = ka
-		if ka.BucketType == bucketTypeNew {
-			a.nNew++
+
+		// if we have selected the address before, restart the loop
+		// otherwise, record it and continue
+		if pickFromOldBucket {
+			if addrsMap, ok := oldBucketToAddrsMap[oldIndex]; ok {
+				if _, ok = addrsMap[selectedAddr.String()]; ok {
+					continue ADDRS_LOOP
+				}
+			} else {
+				oldBucketToAddrsMap[oldIndex] = make(map[string]struct{})
+			}
+			oldBucketToAddrsMap[oldIndex][selectedAddr.String()] = struct{}{}
 		} else {
-			a.nOld++
+			if addrsMap, ok := newBucketToAddrsMap[newIndex]; ok {
+				if _, ok = addrsMap[selectedAddr.String()]; ok {
+					continue ADDRS_LOOP
+				}
+			} else {
+				newBucketToAddrsMap[newIndex] = make(map[string]struct{})
+			}
+			newBucketToAddrsMap[newIndex][selectedAddr.String()] = struct{}{}
 		}
+
+		selection[selectionIndex] = selectedAddr
+		selectionIndex++
 	}
-	return true
+
+	return selection
 }
 
-// Save saves the book.
-func (a *AddrBook) Save() {
-	a.Logger.Info("Saving AddrBook to file", "size", a.Size())
-	a.saveToFile(a.filePath)
+// ListOfKnownAddresses returns the new and old addresses.
+func (a *addrBook) ListOfKnownAddresses() []*knownAddress {
+	a.mtx.Lock()
+	defer a.mtx.Unlock()
+
+	addrs := []*knownAddress{}
+	for _, addr := range a.addrLookup {
+		addrs = append(addrs, addr.copy())
+	}
+	return addrs
 }
 
-/* Private methods */
+//------------------------------------------------
+
+// Size returns the number of addresses in the book.
+func (a *addrBook) Size() int {
+	a.mtx.Lock()
+	defer a.mtx.Unlock()
+	return a.size()
+}
+
+func (a *addrBook) size() int {
+	return a.nNew + a.nOld
+}
 
-func (a *AddrBook) saveRoutine() {
+//----------------------------------------------------------
+
+// Save persists the address book to disk.
+func (a *addrBook) Save() {
+	a.saveToFile(a.filePath) // thread safe
+}
+
+func (a *addrBook) saveRoutine() {
 	defer a.wg.Done()
 
 	saveFileTicker := time.NewTicker(dumpAddressInterval)
@@ -415,7 +463,7 @@ out:
 		select {
 		case <-saveFileTicker.C:
 			a.saveToFile(a.filePath)
-		case <-a.Quit:
+		case <-a.Quit():
 			break out
 		}
 	}
@@ -424,7 +472,9 @@ out:
 	a.Logger.Info("Address handler done")
 }
 
-func (a *AddrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress {
+//----------------------------------------------------------
+
+func (a *addrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress {
 	switch bucketType {
 	case bucketTypeNew:
 		return a.bucketsNew[bucketIdx]
@@ -438,7 +488,7 @@ func (a *AddrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAd
 
 // Adds ka to new bucket. Returns false if it couldn't do it cuz buckets full.
 // NOTE: currently it always returns true.
-func (a *AddrBook) addToNewBucket(ka *knownAddress, bucketIdx int) bool {
+func (a *addrBook) addToNewBucket(ka *knownAddress, bucketIdx int) bool {
 	// Sanity check
 	if ka.isOld() {
 		a.Logger.Error(cmn.Fmt("Cannot add address already in old bucket to a new bucket: %v", ka))
@@ -455,24 +505,25 @@ func (a *AddrBook) addToNewBucket(ka *knownAddress, bucketIdx int) bool {
 
 	// Enforce max addresses.
 	if len(bucket) > newBucketSize {
-		a.Logger.Info("new bucket is full, expiring old ")
+		a.Logger.Info("new bucket is full, expiring new")
 		a.expireNew(bucketIdx)
 	}
 
 	// Add to bucket.
 	bucket[addrStr] = ka
+	// increment nNew if the peer doesnt already exist in a bucket
 	if ka.addBucketRef(bucketIdx) == 1 {
 		a.nNew++
 	}
 
-	// Ensure in addrLookup
-	a.addrLookup[addrStr] = ka
+	// Add it to addrLookup
+	a.addrLookup[ka.ID()] = ka
 
 	return true
 }
 
 // Adds ka to old bucket. Returns false if it couldn't do it cuz buckets full.
-func (a *AddrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool {
+func (a *addrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool {
 	// Sanity check
 	if ka.isNew() {
 		a.Logger.Error(cmn.Fmt("Cannot add new address to old bucket: %v", ka))
@@ -503,12 +554,12 @@ func (a *AddrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool {
 	}
 
 	// Ensure in addrLookup
-	a.addrLookup[addrStr] = ka
+	a.addrLookup[ka.ID()] = ka
 
 	return true
 }
 
-func (a *AddrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) {
+func (a *addrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) {
 	if ka.BucketType != bucketType {
 		a.Logger.Error(cmn.Fmt("Bucket type mismatch: %v", ka))
 		return
@@ -521,11 +572,11 @@ func (a *AddrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx
 		} else {
 			a.nOld--
 		}
-		delete(a.addrLookup, ka.Addr.String())
+		delete(a.addrLookup, ka.ID())
 	}
 }
 
-func (a *AddrBook) removeFromAllBuckets(ka *knownAddress) {
+func (a *addrBook) removeFromAllBuckets(ka *knownAddress) {
 	for _, bucketIdx := range ka.Buckets {
 		bucket := a.getBucket(ka.BucketType, bucketIdx)
 		delete(bucket, ka.Addr.String())
@@ -536,10 +587,12 @@ func (a *AddrBook) removeFromAllBuckets(ka *knownAddress) {
 	} else {
 		a.nOld--
 	}
-	delete(a.addrLookup, ka.Addr.String())
+	delete(a.addrLookup, ka.ID())
 }
 
-func (a *AddrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress {
+//----------------------------------------------------------
+
+func (a *addrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress {
 	bucket := a.getBucket(bucketType, bucketIdx)
 	var oldest *knownAddress
 	for _, ka := range bucket {
@@ -550,7 +603,9 @@ func (a *AddrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress {
 	return oldest
 }
 
-func (a *AddrBook) addAddress(addr, src *NetAddress) error {
+// adds the address to a "new" bucket. if its already in one,
+// it only adds it probabilistically
+func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error {
 	if a.routabilityStrict && !addr.Routable() {
 		return fmt.Errorf("Cannot add non-routable address %v", addr)
 	}
@@ -559,7 +614,7 @@ func (a *AddrBook) addAddress(addr, src *NetAddress) error {
 		return fmt.Errorf("Cannot add ourselves with address %v", addr)
 	}
 
-	ka := a.addrLookup[addr.String()]
+	ka := a.addrLookup[addr.ID]
 
 	if ka != nil {
 		// Already old.
@@ -580,7 +635,10 @@ func (a *AddrBook) addAddress(addr, src *NetAddress) error {
 	}
 
 	bucket := a.calcNewBucket(addr, src)
-	a.addToNewBucket(ka, bucket)
+	added := a.addToNewBucket(ka, bucket)
+	if !added {
+		a.Logger.Info("Can't add new address, addr book is full", "address", addr, "total", a.size())
+	}
 
 	a.Logger.Info("Added new address", "address", addr, "total", a.size())
 	return nil
@@ -588,7 +646,7 @@ func (a *AddrBook) addAddress(addr, src *NetAddress) error {
 
 // Make space in the new buckets by expiring the really bad entries.
 // If no bad entries are available we remove the oldest.
-func (a *AddrBook) expireNew(bucketIdx int) {
+func (a *addrBook) expireNew(bucketIdx int) {
 	for addrStr, ka := range a.bucketsNew[bucketIdx] {
 		// If an entry is bad, throw it away
 		if ka.isBad() {
@@ -603,10 +661,10 @@ func (a *AddrBook) expireNew(bucketIdx int) {
 	a.removeFromBucket(oldest, bucketTypeNew, bucketIdx)
 }
 
-// Promotes an address from new to old.
-// TODO: Move to old probabilistically.
-// The better a node is, the less likely it should be evicted from an old bucket.
-func (a *AddrBook) moveToOld(ka *knownAddress) {
+// Promotes an address from new to old. If the destination bucket is full,
+// demote the oldest one to a "new" bucket.
+// TODO: Demote more probabilistically?
+func (a *addrBook) moveToOld(ka *knownAddress) {
 	// Sanity check
 	if ka.isOld() {
 		a.Logger.Error(cmn.Fmt("Cannot promote address that is already old %v", ka))
@@ -649,9 +707,12 @@ func (a *AddrBook) moveToOld(ka *knownAddress) {
 	}
 }
 
+//---------------------------------------------------------------------
+// calculate bucket placements
+
 // doublesha256(  key + sourcegroup +
 //                int64(doublesha256(key + group + sourcegroup))%bucket_per_group  ) % num_new_buckets
-func (a *AddrBook) calcNewBucket(addr, src *NetAddress) int {
+func (a *addrBook) calcNewBucket(addr, src *p2p.NetAddress) int {
 	data1 := []byte{}
 	data1 = append(data1, []byte(a.key)...)
 	data1 = append(data1, []byte(a.groupKey(addr))...)
@@ -672,7 +733,7 @@ func (a *AddrBook) calcNewBucket(addr, src *NetAddress) int {
 
 // doublesha256(  key + group +
 //                int64(doublesha256(key + addr))%buckets_per_group  ) % num_old_buckets
-func (a *AddrBook) calcOldBucket(addr *NetAddress) int {
+func (a *addrBook) calcOldBucket(addr *p2p.NetAddress) int {
 	data1 := []byte{}
 	data1 = append(data1, []byte(a.key)...)
 	data1 = append(data1, []byte(addr.String())...)
@@ -694,7 +755,7 @@ func (a *AddrBook) calcOldBucket(addr *NetAddress) int {
 // This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string
 // "local" for a local address and the string "unroutable" for an unroutable
 // address.
-func (a *AddrBook) groupKey(na *NetAddress) string {
+func (a *addrBook) groupKey(na *p2p.NetAddress) string {
 	if a.routabilityStrict && na.Local() {
 		return "local"
 	}
@@ -739,127 +800,12 @@ func (a *AddrBook) groupKey(na *NetAddress) string {
 	return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(bits, 128)}).String()
 }
 
-//-----------------------------------------------------------------------------
-
-/*
-   knownAddress
-
-   tracks information about a known network address that is used
-   to determine how viable an address is.
-*/
-type knownAddress struct {
-	Addr        *NetAddress
-	Src         *NetAddress
-	Attempts    int32
-	LastAttempt time.Time
-	LastSuccess time.Time
-	BucketType  byte
-	Buckets     []int
-}
-
-func newKnownAddress(addr *NetAddress, src *NetAddress) *knownAddress {
-	return &knownAddress{
-		Addr:        addr,
-		Src:         src,
-		Attempts:    0,
-		LastAttempt: time.Now(),
-		BucketType:  bucketTypeNew,
-		Buckets:     nil,
-	}
-}
-
-func (ka *knownAddress) isOld() bool {
-	return ka.BucketType == bucketTypeOld
-}
-
-func (ka *knownAddress) isNew() bool {
-	return ka.BucketType == bucketTypeNew
-}
-
-func (ka *knownAddress) markAttempt() {
-	now := time.Now()
-	ka.LastAttempt = now
-	ka.Attempts += 1
-}
-
-func (ka *knownAddress) markGood() {
-	now := time.Now()
-	ka.LastAttempt = now
-	ka.Attempts = 0
-	ka.LastSuccess = now
-}
-
-func (ka *knownAddress) addBucketRef(bucketIdx int) int {
-	for _, bucket := range ka.Buckets {
-		if bucket == bucketIdx {
-			// TODO refactor to return error?
-			// log.Warn(Fmt("Bucket already exists in ka.Buckets: %v", ka))
-			return -1
-		}
-	}
-	ka.Buckets = append(ka.Buckets, bucketIdx)
-	return len(ka.Buckets)
-}
-
-func (ka *knownAddress) removeBucketRef(bucketIdx int) int {
-	buckets := []int{}
-	for _, bucket := range ka.Buckets {
-		if bucket != bucketIdx {
-			buckets = append(buckets, bucket)
-		}
-	}
-	if len(buckets) != len(ka.Buckets)-1 {
-		// TODO refactor to return error?
-		// log.Warn(Fmt("bucketIdx not found in ka.Buckets: %v", ka))
-		return -1
-	}
-	ka.Buckets = buckets
-	return len(ka.Buckets)
-}
-
-/*
-   An address is bad if the address in question is a New address, has not been tried in the last
-   minute, and meets one of the following criteria:
-
-   1) It claims to be from the future
-   2) It hasn't been seen in over a month
-   3) It has failed at least three times and never succeeded
-   4) It has failed ten times in the last week
-
-   All addresses that meet these criteria are assumed to be worthless and not
-   worth keeping hold of.
-
-   XXX: so a good peer needs us to call MarkGood before the conditions above are reached!
-*/
-func (ka *knownAddress) isBad() bool {
-	// Is Old --> good
-	if ka.BucketType == bucketTypeOld {
-		return false
-	}
-
-	// Has been attempted in the last minute --> good
-	if ka.LastAttempt.Before(time.Now().Add(-1 * time.Minute)) {
-		return false
-	}
-
-	// Too old?
-	// XXX: does this mean if we've kept a connection up for this long we'll disconnect?!
-	// and shouldn't it be .Before ?
-	if ka.LastAttempt.After(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) {
-		return true
-	}
-
-	// Never succeeded?
-	if ka.LastSuccess.IsZero() && ka.Attempts >= numRetries {
-		return true
-	}
-
-	// Hasn't succeeded in too long?
-	// XXX: does this mean if we've kept a connection up for this long we'll disconnect?!
-	if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) &&
-		ka.Attempts >= maxFailures {
-		return true
-	}
-
-	return false
+// doubleSha256 calculates sha256(sha256(b)) and returns the resulting bytes.
+func doubleSha256(b []byte) []byte {
+	hasher := sha256.New()
+	hasher.Write(b) // nolint: errcheck, gas
+	sum := hasher.Sum(nil)
+	hasher.Reset()
+	hasher.Write(sum) // nolint: errcheck, gas
+	return hasher.Sum(nil)
 }
diff --git a/vendor/github.com/tendermint/tendermint/p2p/pex/file.go b/vendor/github.com/tendermint/tendermint/p2p/pex/file.go
new file mode 100644
index 00000000..38142dd9
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/p2p/pex/file.go
@@ -0,0 +1,83 @@
+package pex
+
+import (
+	"encoding/json"
+	"os"
+
+	cmn "github.com/tendermint/tmlibs/common"
+)
+
+/* Loading & Saving */
+
+type addrBookJSON struct {
+	Key   string          `json:"key"`
+	Addrs []*knownAddress `json:"addrs"`
+}
+
+func (a *addrBook) saveToFile(filePath string) {
+	a.Logger.Info("Saving AddrBook to file", "size", a.Size())
+
+	a.mtx.Lock()
+	defer a.mtx.Unlock()
+	// Compile Addrs
+	addrs := []*knownAddress{}
+	for _, ka := range a.addrLookup {
+		addrs = append(addrs, ka)
+	}
+
+	aJSON := &addrBookJSON{
+		Key:   a.key,
+		Addrs: addrs,
+	}
+
+	jsonBytes, err := json.MarshalIndent(aJSON, "", "\t")
+	if err != nil {
+		a.Logger.Error("Failed to save AddrBook to file", "err", err)
+		return
+	}
+	err = cmn.WriteFileAtomic(filePath, jsonBytes, 0644)
+	if err != nil {
+		a.Logger.Error("Failed to save AddrBook to file", "file", filePath, "err", err)
+	}
+}
+
+// Returns false if file does not exist.
+// cmn.Panics if file is corrupt.
+func (a *addrBook) loadFromFile(filePath string) bool {
+	// If doesn't exist, do nothing.
+	_, err := os.Stat(filePath)
+	if os.IsNotExist(err) {
+		return false
+	}
+
+	// Load addrBookJSON{}
+	r, err := os.Open(filePath)
+	if err != nil {
+		cmn.PanicCrisis(cmn.Fmt("Error opening file %s: %v", filePath, err))
+	}
+	defer r.Close() // nolint: errcheck
+	aJSON := &addrBookJSON{}
+	dec := json.NewDecoder(r)
+	err = dec.Decode(aJSON)
+	if err != nil {
+		cmn.PanicCrisis(cmn.Fmt("Error reading file %s: %v", filePath, err))
+	}
+
+	// Restore all the fields...
+	// Restore the key
+	a.key = aJSON.Key
+	// Restore .bucketsNew & .bucketsOld
+	for _, ka := range aJSON.Addrs {
+		for _, bucketIndex := range ka.Buckets {
+			bucket := a.getBucket(ka.BucketType, bucketIndex)
+			bucket[ka.Addr.String()] = ka
+		}
+		a.addrLookup[ka.ID()] = ka
+		if ka.BucketType == bucketTypeNew {
+			a.nNew++
+		} else {
+			a.nOld++
+		}
+	}
+	return true
+}
diff --git a/vendor/github.com/tendermint/tendermint/p2p/pex/known_address.go b/vendor/github.com/tendermint/tendermint/p2p/pex/known_address.go
new file mode 100644
index 00000000..0261e490
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/p2p/pex/known_address.go
@@ -0,0 +1,142 @@
+package pex
+
+import (
+	"time"
+
+	"github.com/tendermint/tendermint/p2p"
+)
+
+// knownAddress tracks information about a known network address
+// that is used to determine how viable an address is.
+type knownAddress struct {
+	Addr        *p2p.NetAddress `json:"addr"`
+	Src         *p2p.NetAddress `json:"src"`
+	Attempts    int32           `json:"attempts"`
+	LastAttempt time.Time       `json:"last_attempt"`
+	LastSuccess time.Time       `json:"last_success"`
+	BucketType  byte            `json:"bucket_type"`
+	Buckets     []int           `json:"buckets"`
+}
+
+func newKnownAddress(addr *p2p.NetAddress, src *p2p.NetAddress) *knownAddress {
+	return &knownAddress{
+		Addr:        addr,
+		Src:         src,
+		Attempts:    0,
+		LastAttempt: time.Now(),
+		BucketType:  bucketTypeNew,
+		Buckets:     nil,
+	}
+}
+
+func (ka *knownAddress) ID() p2p.ID {
+	return ka.Addr.ID
+}
+
+func (ka *knownAddress) copy() *knownAddress {
+	return &knownAddress{
+		Addr:        ka.Addr,
+		Src:         ka.Src,
+		Attempts:    ka.Attempts,
+		LastAttempt: ka.LastAttempt,
+		LastSuccess: ka.LastSuccess,
+		BucketType:  ka.BucketType,
+		Buckets:     ka.Buckets,
+	}
+}
+
+func (ka *knownAddress) isOld() bool {
+	return ka.BucketType == bucketTypeOld
+}
+
+func (ka *knownAddress) isNew() bool {
+	return ka.BucketType == bucketTypeNew
+}
+
+func (ka *knownAddress) markAttempt() {
+	now := time.Now()
+	ka.LastAttempt = now
+	ka.Attempts++
+}
+
+func (ka *knownAddress) markGood() {
+	now := time.Now()
+	ka.LastAttempt = now
+	ka.Attempts = 0
+	ka.LastSuccess = now
+}
+
+func (ka *knownAddress) addBucketRef(bucketIdx int) int {
+	for _, bucket := range ka.Buckets {
+		if bucket == bucketIdx {
+			// TODO refactor to return error?
+			// log.Warn(Fmt("Bucket already exists in ka.Buckets: %v", ka))
+			return -1
+		}
+	}
+	ka.Buckets = append(ka.Buckets, bucketIdx)
+	return len(ka.Buckets)
+}
+
+func (ka *knownAddress) removeBucketRef(bucketIdx int) int {
+	buckets := []int{}
+	for _, bucket := range ka.Buckets {
+		if bucket != bucketIdx {
+			buckets = append(buckets, bucket)
+		}
+	}
+	if len(buckets) != len(ka.Buckets)-1 {
+		// TODO refactor to return error?
+		// log.Warn(Fmt("bucketIdx not found in ka.Buckets: %v", ka))
+		return -1
+	}
+	ka.Buckets = buckets
+	return len(ka.Buckets)
+}
+
+/*
+   An address is bad if the address in question is a New address, has not been tried in the last
+   minute, and meets one of the following criteria:
+
+   1) It claims to be from the future
+   2) It hasn't been seen in over a week
+   3) It has failed at least three times and never succeeded
+   4) It has failed ten times in the last week
+
+   All addresses that meet these criteria are assumed to be worthless and not
+   worth keeping hold of.
+
+   XXX: so a good peer needs us to call MarkGood before the conditions above are reached!
+*/
+func (ka *knownAddress) isBad() bool {
+	// Is Old --> good
+	if ka.BucketType == bucketTypeOld {
+		return false
+	}
+
+	// Has been attempted in the last minute --> good
+	if ka.LastAttempt.Before(time.Now().Add(-1 * time.Minute)) {
+		return false
+	}
+
+	// Too old?
+	// XXX: does this mean if we've kept a connection up for this long we'll disconnect?!
+	// and shouldn't it be .Before ?
+	if ka.LastAttempt.After(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) {
+		return true
+	}
+
+	// Never succeeded?
+	if ka.LastSuccess.IsZero() && ka.Attempts >= numRetries {
+		return true
+	}
+
+	// Hasn't succeeded in too long?
+	// XXX: does this mean if we've kept a connection up for this long we'll disconnect?!
+	if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) &&
+		ka.Attempts >= maxFailures {
+		return true
+	}
+
+	return false
+}
diff --git a/vendor/github.com/tendermint/tendermint/p2p/pex/params.go b/vendor/github.com/tendermint/tendermint/p2p/pex/params.go
new file mode 100644
index 00000000..f94e1021
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/p2p/pex/params.go
@@ -0,0 +1,55 @@
+package pex
+
+import "time"
+
+const (
+	// addresses under which the address manager will claim to need more addresses.
+	needAddressThreshold = 1000
+
+	// interval used to dump the address cache to disk for future use.
+	dumpAddressInterval = time.Minute * 2
+
+	// max addresses in each old address bucket.
+	oldBucketSize = 64
+
+	// buckets we split old addresses over.
+	oldBucketCount = 64
+
+	// max addresses in each new address bucket.
+	newBucketSize = 64
+
+	// buckets that we spread new addresses over.
+	newBucketCount = 256
+
+	// old buckets over which an address group will be spread.
+	oldBucketsPerGroup = 4
+
+	// new buckets over which a source address group will be spread.
+	newBucketsPerGroup = 32
+
+	// buckets a frequently seen new address may end up in.
+	maxNewBucketsPerAddress = 4
+
+	// days before which we assume an address has vanished
+	// if we have not seen it announced in that long.
+	numMissingDays = 7
+
+	// tries without a single success before we assume an address is bad.
+	numRetries = 3
+
+	// max failures we will accept without a success before considering an address bad.
+	maxFailures = 10 // ?
+
+	// days since the last success before we will consider evicting an address.
+	minBadDays = 7
+
+	// % of total addresses known returned by GetSelection.
+	getSelectionPercent = 23
+
+	// min addresses that must be returned by GetSelection. Useful for bootstrapping.
+	minGetSelection = 32
+
+	// max addresses returned by GetSelection
+	// NOTE: this must match "maxPexMessageSize"
+	maxGetSelection = 250
+)
diff --git a/vendor/github.com/tendermint/tendermint/p2p/pex/pex_reactor.go b/vendor/github.com/tendermint/tendermint/p2p/pex/pex_reactor.go
new file mode 100644
index 00000000..1bcc493d
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/p2p/pex/pex_reactor.go
@@ -0,0 +1,652 @@
+package pex
+
+import (
+	"bytes"
+	"fmt"
+	"math/rand"
+	"reflect"
+	"sort"
+	"sync"
+	"time"
+
+	"github.com/pkg/errors"
+	wire "github.com/tendermint/go-wire"
+	cmn "github.com/tendermint/tmlibs/common"
+
+	"github.com/tendermint/tendermint/p2p"
+	"github.com/tendermint/tendermint/p2p/conn"
+)
+
+type Peer = p2p.Peer
+
+const (
+	// PexChannel is a channel for PEX messages
+	PexChannel = byte(0x00)
+
+	maxPexMessageSize = 1048576 // 1MB
+
+	// ensure we have enough peers
+	defaultEnsurePeersPeriod   = 30 * time.Second
+	defaultMinNumOutboundPeers = 10
+
+	// Seed/Crawler constants
+
+	// We want seeds to only advertise good peers. Therefore they should wait at
+	// least as long as we expect it to take for a peer to become good before
+	// disconnecting.
+	// see consensus/reactor.go: blocksToContributeToBecomeGoodPeer
+	// 10000 blocks assuming 1s blocks ~ 2.7 hours.
+	defaultSeedDisconnectWaitPeriod = 3 * time.Hour
+
+	defaultCrawlPeerInterval = 2 * time.Minute // don't redial for this. TODO: back-off. what for?
+
+	defaultCrawlPeersPeriod = 30 * time.Second // check some peers every this
+
+	maxAttemptsToDial = 16 // ~ 35h in total (last attempt - 18h)
+
+	// if node connects to seed, it does not have any trusted peers.
+	// Especially in the beginning, node should have more trusted peers than
+	// untrusted.
+	biasToSelectNewPeers = 30 // 70 to select good peers
+)
+
+// PEXReactor handles PEX (peer exchange) and ensures that an
+// adequate number of peers are connected to the switch.
+//
+// It uses `AddrBook` (address book) to store `NetAddress`es of the peers.
+//
+// ## Preventing abuse
+//
+// Only accept pexAddrsMsg from peers we sent a corresponding pexRequestMsg too.
+// Only accept one pexRequestMsg every ~defaultEnsurePeersPeriod.
+type PEXReactor struct {
+	p2p.BaseReactor
+
+	book              AddrBook
+	config            *PEXReactorConfig
+	ensurePeersPeriod time.Duration
+
+	// maps to prevent abuse
+	requestsSent         *cmn.CMap // ID->struct{}: unanswered send requests
+	lastReceivedRequests *cmn.CMap // ID->time.Time: last time peer requested from us
+
+	attemptsToDial sync.Map // address (string) -> {number of attempts (int), last time dialed (time.Time)}
+}
+
+// PEXReactorConfig holds reactor specific configuration data.
+type PEXReactorConfig struct {
+	// Seed/Crawler mode
+	SeedMode bool
+
+	// Seeds is a list of addresses reactor may use
+	// if it can't connect to peers in the addrbook.
+	Seeds []string
+
+	// PrivatePeerIDs is a list of peer IDs, which must not be gossiped to other
+	// peers.
+	PrivatePeerIDs []string
+}
+
+type _attemptsToDial struct {
+	number     int
+	lastDialed time.Time
+}
+
+// NewPEXReactor creates new PEX reactor.
+func NewPEXReactor(b AddrBook, config *PEXReactorConfig) *PEXReactor {
+	r := &PEXReactor{
+		book:                 b,
+		config:               config,
+		ensurePeersPeriod:    defaultEnsurePeersPeriod,
+		requestsSent:         cmn.NewCMap(),
+		lastReceivedRequests: cmn.NewCMap(),
+	}
+	r.BaseReactor = *p2p.NewBaseReactor("PEXReactor", r)
+	return r
+}
+
+// OnStart implements BaseService
+func (r *PEXReactor) OnStart() error {
+	if err := r.BaseReactor.OnStart(); err != nil {
+		return err
+	}
+	err := r.book.Start()
+	if err != nil && err != cmn.ErrAlreadyStarted {
+		return err
+	}
+
+	// return err if user provided a bad seed address
+	if err := r.checkSeeds(); err != nil {
+		return err
+	}
+
+	// Check if this node should run
+	// in seed/crawler mode
+	if r.config.SeedMode {
+		go r.crawlPeersRoutine()
+	} else {
+		go r.ensurePeersRoutine()
+	}
+	return nil
+}
+
+// OnStop implements BaseService
+func (r *PEXReactor) OnStop() {
+	r.BaseReactor.OnStop()
+	r.book.Stop()
+}
+
+// GetChannels implements Reactor
+func (r *PEXReactor) GetChannels() []*conn.ChannelDescriptor {
+	return []*conn.ChannelDescriptor{
+		{
+			ID:                PexChannel,
+			Priority:          1,
+			SendQueueCapacity: 10,
+		},
+	}
+}
+
+// AddPeer implements Reactor by adding peer to the address book (if inbound)
+// or by requesting more addresses (if outbound).
+func (r *PEXReactor) AddPeer(p Peer) {
+	if p.IsOutbound() {
+		// For outbound peers, the address is already in the books -
+		// either via DialPeersAsync or r.Receive.
+		// Ask it for more peers if we need.
+		if r.book.NeedMoreAddrs() {
+			r.RequestAddrs(p)
+		}
+	} else {
+		// For inbound peers, the peer is its own source,
+		// and its NodeInfo has already been validated.
+		// Let the ensurePeersRoutine handle asking for more
+		// peers when we need - we don't trust inbound peers as much.
+		addr := p.NodeInfo().NetAddress()
+		if !isAddrPrivate(addr, r.config.PrivatePeerIDs) {
+			err := r.book.AddAddress(addr, addr)
+			if err != nil {
+				r.Logger.Error("Failed to add new address", "err", err)
+			}
+		}
+	}
+}
+
+// RemovePeer implements Reactor.
+func (r *PEXReactor) RemovePeer(p Peer, reason interface{}) {
+	id := string(p.ID())
+	r.requestsSent.Delete(id)
+	r.lastReceivedRequests.Delete(id)
+}
+
+// Receive implements Reactor by handling incoming PEX messages.
+func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) {
+	_, msg, err := DecodeMessage(msgBytes)
+	if err != nil {
+		r.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
+		r.Switch.StopPeerForError(src, err)
+		return
+	}
+	r.Logger.Debug("Received message", "src", src, "chId", chID, "msg", msg)
+
+	switch msg := msg.(type) {
+	case *pexRequestMessage:
+		// Check we're not receiving too many requests
+		if err := r.receiveRequest(src); err != nil {
+			r.Switch.StopPeerForError(src, err)
+			return
+		}
+
+		// Seeds disconnect after sending a batch of addrs
+		if r.config.SeedMode {
+			r.SendAddrs(src, r.book.GetSelectionWithBias(biasToSelectNewPeers))
+			r.Switch.StopPeerGracefully(src)
+		} else {
+			r.SendAddrs(src, r.book.GetSelection())
+		}
+
+	case *pexAddrsMessage:
+		// If we asked for addresses, add them to the book
+		if err := r.ReceiveAddrs(msg.Addrs, src); err != nil {
+			r.Switch.StopPeerForError(src, err)
+			return
+		}
+	default:
+		r.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
+	}
+}
+
+func (r *PEXReactor) receiveRequest(src Peer) error {
+	id := string(src.ID())
+	v := r.lastReceivedRequests.Get(id)
+	if v == nil {
+		// initialize with empty time
+		lastReceived := time.Time{}
+		r.lastReceivedRequests.Set(id, lastReceived)
+		return nil
+	}
+
+	lastReceived := v.(time.Time)
+	if lastReceived.Equal(time.Time{}) {
+		// first time gets a free pass. then we start tracking the time
+		lastReceived = time.Now()
+		r.lastReceivedRequests.Set(id, lastReceived)
+		return nil
+	}
+
+	now := time.Now()
+	if now.Sub(lastReceived) < r.ensurePeersPeriod/3 {
+		return fmt.Errorf("Peer (%v) is sending too many PEX requests. Disconnecting", src.ID())
+	}
+	r.lastReceivedRequests.Set(id, now)
+	return nil
+}
+
+// RequestAddrs asks peer for more addresses if we do not already
+// have a request out for this peer.
+func (r *PEXReactor) RequestAddrs(p Peer) {
+	id := string(p.ID())
+	if r.requestsSent.Has(id) {
+		return
+	}
+	r.requestsSent.Set(id, struct{}{})
+	p.Send(PexChannel, struct{ PexMessage }{&pexRequestMessage{}})
+}
+
+// ReceiveAddrs adds the given addrs to the addrbook if theres an open
+// request for this peer and deletes the open request.
+// If there's no open request for the src peer, it returns an error.
+func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error {
+	id := string(src.ID())
+
+	if !r.requestsSent.Has(id) {
+		return errors.New("Received unsolicited pexAddrsMessage")
+	}
+
+	r.requestsSent.Delete(id)
+
+	srcAddr := src.NodeInfo().NetAddress()
+	for _, netAddr := range addrs {
+		if netAddr != nil && !isAddrPrivate(netAddr, r.config.PrivatePeerIDs) {
+			err := r.book.AddAddress(netAddr, srcAddr)
+			if err != nil {
+				r.Logger.Error("Failed to add new address", "err", err)
+			}
+		}
+	}
+	return nil
+}
+
+// SendAddrs sends addrs to the peer.
+func (r *PEXReactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) {
+	p.Send(PexChannel, struct{ PexMessage }{&pexAddrsMessage{Addrs: netAddrs}})
+}
+
+// SetEnsurePeersPeriod sets period to ensure peers connected.
+func (r *PEXReactor) SetEnsurePeersPeriod(d time.Duration) {
+	r.ensurePeersPeriod = d
+}
+
+// Ensures that sufficient peers are connected. (continuous)
+func (r *PEXReactor) ensurePeersRoutine() {
+	var (
+		seed   = rand.New(rand.NewSource(time.Now().UnixNano()))
+		jitter = seed.Int63n(r.ensurePeersPeriod.Nanoseconds())
+	)
+
+	// Randomize first round of communication to avoid thundering herd.
+	// If no potential peers are present directly start connecting so we guarantee
+	// swift setup with the help of configured seeds.
+	if r.hasPotentialPeers() {
+		time.Sleep(time.Duration(jitter))
+	}
+
+	// fire once immediately.
+	// ensures we dial the seeds right away if the book is empty
+	r.ensurePeers()
+
+	// fire periodically
+	ticker := time.NewTicker(r.ensurePeersPeriod)
+	for {
+		select {
+		case <-ticker.C:
+			r.ensurePeers()
+		case <-r.Quit():
+			ticker.Stop()
+			return
+		}
+	}
+}
+
+// ensurePeers ensures that sufficient peers are connected. (once)
+//
+// heuristic that we haven't perfected yet, or, perhaps is manually edited by
+// the node operator. It should not be used to compute what addresses are
+// already connected or not.
+func (r *PEXReactor) ensurePeers() {
+	var (
+		out, in, dial = r.Switch.NumPeers()
+		numToDial     = defaultMinNumOutboundPeers - (out + dial)
+	)
+	r.Logger.Info(
+		"Ensure peers",
+		"numOutPeers", out,
+		"numInPeers", in,
+		"numDialing", dial,
+		"numToDial", numToDial,
+	)
+
+	if numToDial <= 0 {
+		return
+	}
+
+	// bias to prefer more vetted peers when we have fewer connections.
+	// not perfect, but somewhate ensures that we prioritize connecting to more-vetted
+	// NOTE: range here is [10, 90]. Too high ?
+	newBias := cmn.MinInt(out, 8)*10 + 10
+
+	toDial := make(map[p2p.ID]*p2p.NetAddress)
+	// Try maxAttempts times to pick numToDial addresses to dial
+	maxAttempts := numToDial * 3
+
+	for i := 0; i < maxAttempts && len(toDial) < numToDial; i++ {
+		try := r.book.PickAddress(newBias)
+		if try == nil {
+			continue
+		}
+		if _, selected := toDial[try.ID]; selected {
+			continue
+		}
+		if dialling := r.Switch.IsDialing(try.ID); dialling {
+			continue
+		}
+		if connected := r.Switch.Peers().Has(try.ID); connected {
+			continue
+		}
+		r.Logger.Info("Will dial address", "addr", try)
+		toDial[try.ID] = try
+	}
+
+	// Dial picked addresses
+	for _, addr := range toDial {
+		go r.dialPeer(addr)
+	}
+
+	// If we need more addresses, pick a random peer and ask for more.
+	if r.book.NeedMoreAddrs() {
+		peers := r.Switch.Peers().List()
+		peersCount := len(peers)
+		if peersCount > 0 {
+			peer := peers[rand.Int()%peersCount] // nolint: gas
+			r.Logger.Info("We need more addresses. Sending pexRequest to random peer", "peer", peer)
+			r.RequestAddrs(peer)
+		}
+	}
+
+	// If we are not connected to nor dialing anybody, fallback to dialing a seed.
+	if out+in+dial+len(toDial) == 0 {
+		r.Logger.Info("No addresses to dial nor connected peers. Falling back to seeds")
+		r.dialSeeds()
+	}
+}
+
+func (r *PEXReactor) dialPeer(addr *p2p.NetAddress) {
+	var attempts int
+	var lastDialed time.Time
+	if lAttempts, attempted := r.attemptsToDial.Load(addr.DialString()); attempted {
+		attempts = lAttempts.(_attemptsToDial).number
+		lastDialed = lAttempts.(_attemptsToDial).lastDialed
+	}
+
+	if attempts > maxAttemptsToDial {
+		r.Logger.Error("Reached max attempts to dial", "addr", addr, "attempts", attempts)
+		r.book.MarkBad(addr)
+		return
+	}
+
+	// exponential backoff if it's not our first attempt to dial given address
+	if attempts > 0 {
+		jitterSeconds := time.Duration(rand.Float64() * float64(time.Second)) // 1s == (1e9 ns)
+		backoffDuration := jitterSeconds + ((1 << uint(attempts)) * time.Second)
+		sinceLastDialed := time.Since(lastDialed)
+		if sinceLastDialed < backoffDuration {
+			r.Logger.Debug("Too early to dial", "addr", addr, "backoff_duration", backoffDuration, "last_dialed", lastDialed, "time_since", sinceLastDialed)
+			return
+		}
+	}
+
+	err := r.Switch.DialPeerWithAddress(addr, false)
+	if err != nil {
+		r.Logger.Error("Dialing failed", "addr", addr, "err", err, "attempts", attempts)
+		// TODO: detect more "bad peer" scenarios
+		if _, ok := err.(p2p.ErrSwitchAuthenticationFailure); ok {
+			r.book.MarkBad(addr)
+			r.attemptsToDial.Delete(addr.DialString())
+		} else {
+			r.book.MarkAttempt(addr)
+			// FIXME: if the addr is going to be removed from the addrbook (hard to
+			// tell at this point), we need to Delete it from attemptsToDial, not
+			// record another attempt.
+			// record attempt
+			r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()})
+		}
+	} else {
+		// cleanup any history
+		r.attemptsToDial.Delete(addr.DialString())
+	}
+}
+
+// check seed addresses are well formed
+func (r *PEXReactor) checkSeeds() error {
+	lSeeds := len(r.config.Seeds)
+	if lSeeds == 0 {
+		return nil
+	}
+	_, errs := p2p.NewNetAddressStrings(r.config.Seeds)
+	for _, err := range errs {
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// randomly dial seeds until we connect to one or exhaust them
+func (r *PEXReactor) dialSeeds() {
+	lSeeds := len(r.config.Seeds)
+	if lSeeds == 0 {
+		return
+	}
+	seedAddrs, _ := p2p.NewNetAddressStrings(r.config.Seeds)
+
+	perm := rand.Perm(lSeeds)
+	// perm := r.Switch.rng.Perm(lSeeds)
+	for _, i := range perm {
+		// dial a random seed
+		seedAddr := seedAddrs[i]
+		err := r.Switch.DialPeerWithAddress(seedAddr, false)
+		if err == nil {
+			return
+		}
+		r.Switch.Logger.Error("Error dialing seed", "err", err, "seed", seedAddr)
+	}
+	r.Switch.Logger.Error("Couldn't connect to any seeds")
+}
+
+// AttemptsToDial returns the number of attempts to dial specific address. It
+// returns 0 if never attempted or successfully connected.
+func (r *PEXReactor) AttemptsToDial(addr *p2p.NetAddress) int {
+	lAttempts, attempted := r.attemptsToDial.Load(addr.DialString())
+	if attempted {
+		return lAttempts.(_attemptsToDial).number
+	}
+	return 0
+}
+
+//----------------------------------------------------------
+
+// Explores the network searching for more peers. (continuous)
+// Seed/Crawler Mode causes this node to quickly disconnect
+// from peers, except other seed nodes.
+func (r *PEXReactor) crawlPeersRoutine() {
+	// Do an initial crawl
+	r.crawlPeers()
+
+	// Fire periodically
+	ticker := time.NewTicker(defaultCrawlPeersPeriod)
+
+	for {
+		select {
+		case <-ticker.C:
+			r.attemptDisconnects()
+			r.crawlPeers()
+		case <-r.Quit():
+			return
+		}
+	}
+}
+
+// hasPotentialPeers indicates if there is a potential peer to connect to, by
+// consulting the Switch as well as the AddrBook.
+func (r *PEXReactor) hasPotentialPeers() bool {
+	out, in, dial := r.Switch.NumPeers()
+
+	return out+in+dial > 0 && len(r.book.ListOfKnownAddresses()) > 0
+}
+
+// crawlPeerInfo handles temporary data needed for the
+// network crawling performed during seed/crawler mode.
+type crawlPeerInfo struct {
+	// The listening address of a potential peer we learned about
+	Addr *p2p.NetAddress
+
+	// The last time we attempt to reach this address
+	LastAttempt time.Time
+
+	// The last time we successfully reached this address
+	LastSuccess time.Time
+}
+
+// oldestFirst implements sort.Interface for []crawlPeerInfo
+// based on the LastAttempt field.
+type oldestFirst []crawlPeerInfo
+
+func (of oldestFirst) Len() int           { return len(of) }
+func (of oldestFirst) Swap(i, j int)      { of[i], of[j] = of[j], of[i] }
+func (of oldestFirst) Less(i, j int) bool { return of[i].LastAttempt.Before(of[j].LastAttempt) }
+
+// getPeersToCrawl returns addresses of potential peers that we wish to validate.
+// NOTE: The status information is ordered as described above.
+func (r *PEXReactor) getPeersToCrawl() []crawlPeerInfo {
+	var of oldestFirst
+
+	// TODO: be more selective
+	addrs := r.book.ListOfKnownAddresses()
+	for _, addr := range addrs {
+		if len(addr.ID()) == 0 {
+			continue // dont use peers without id
+		}
+
+		of = append(of, crawlPeerInfo{
+			Addr:        addr.Addr,
+			LastAttempt: addr.LastAttempt,
+			LastSuccess: addr.LastSuccess,
+		})
+	}
+	sort.Sort(of)
+	return of
+}
+
+// crawlPeers will crawl the network looking for new peer addresses. (once)
+func (r *PEXReactor) crawlPeers() {
+	peerInfos := r.getPeersToCrawl()
+
+	now := time.Now()
+	// Use addresses we know of to reach additional peers
+	for _, pi := range peerInfos {
+		// Do not attempt to connect with peers we recently dialed
+		if now.Sub(pi.LastAttempt) < defaultCrawlPeerInterval {
+			continue
+		}
+		// Otherwise, attempt to connect with the known address
+		err := r.Switch.DialPeerWithAddress(pi.Addr, false)
+		if err != nil {
+			r.book.MarkAttempt(pi.Addr)
+			continue
+		}
+		// Ask for more addresses
+		peer := r.Switch.Peers().Get(pi.Addr.ID)
+		r.RequestAddrs(peer)
+	}
+}
+
+// attemptDisconnects checks if we've been with each peer long enough to disconnect
+func (r *PEXReactor) attemptDisconnects() {
+	for _, peer := range r.Switch.Peers().List() {
+		if peer.Status().Duration < defaultSeedDisconnectWaitPeriod {
+			continue
+		}
+		if peer.IsPersistent() {
+			continue
+		}
+		r.Switch.StopPeerGracefully(peer)
+	}
+}
+
+// isAddrPrivate returns true if addr is private.
+func isAddrPrivate(addr *p2p.NetAddress, privatePeerIDs []string) bool {
+	for _, id := range privatePeerIDs {
+		if string(addr.ID) == id {
+			return true
+		}
+	}
+	return false
+}
+
+//-----------------------------------------------------------------------------
+// Messages
+
+const (
+	msgTypeRequest = byte(0x01)
+	msgTypeAddrs   = byte(0x02)
+)
+
+// PexMessage is a primary type for PEX messages. Underneath, it could contain
+// either pexRequestMessage, or pexAddrsMessage messages.
+type PexMessage interface{}
+
+var _ = wire.RegisterInterface(
+	struct{ PexMessage }{},
+	wire.ConcreteType{&pexRequestMessage{}, msgTypeRequest},
+	wire.ConcreteType{&pexAddrsMessage{}, msgTypeAddrs},
+)
+
+// DecodeMessage implements interface registered above.
+func DecodeMessage(bz []byte) (msgType byte, msg PexMessage, err error) {
+	msgType = bz[0]
+	n := new(int)
+	r := bytes.NewReader(bz)
+	msg = wire.ReadBinary(struct{ PexMessage }{}, r, maxPexMessageSize, n, &err).(struct{ PexMessage }).PexMessage
+	return
+}
+
+/*
+A pexRequestMessage requests additional peer addresses.
+*/
+type pexRequestMessage struct {
+}
+
+func (m *pexRequestMessage) String() string {
+	return "[pexRequest]"
+}
+
+/*
+A message with announced peer addresses.
+*/
+type pexAddrsMessage struct {
+	Addrs []*p2p.NetAddress
+}
+
+func (m *pexAddrsMessage) String() string {
+	return fmt.Sprintf("[pexAddrs %v]", m.Addrs)
+}
diff --git a/vendor/github.com/tendermint/tendermint/p2p/pex_reactor.go b/vendor/github.com/tendermint/tendermint/p2p/pex_reactor.go
deleted file mode 100644
index 2bfe7dca..00000000
--- a/vendor/github.com/tendermint/tendermint/p2p/pex_reactor.go
+++ /dev/null
@@ -1,356 +0,0 @@
-package p2p
-
-import (
-	"bytes"
-	"fmt"
-	"math/rand"
-	"reflect"
-	"time"
-
-	wire "github.com/tendermint/go-wire"
-	cmn "github.com/tendermint/tmlibs/common"
-)
-
-const (
-	// PexChannel is a channel for PEX messages
-	PexChannel = byte(0x00)
-
-	// period to ensure peers connected
-	defaultEnsurePeersPeriod = 30 * time.Second
-	minNumOutboundPeers      = 10
-	maxPexMessageSize        = 1048576 // 1MB
-
-	// maximum pex messages one peer can send to us during `msgCountByPeerFlushInterval`
-	defaultMaxMsgCountByPeer    = 1000
-	msgCountByPeerFlushInterval = 1 * time.Hour
-)
-
-// PEXReactor handles PEX (peer exchange) and ensures that an
-// adequate number of peers are connected to the switch.
-//
-// It uses `AddrBook` (address book) to store `NetAddress`es of the peers.
-//
-// ## Preventing abuse
-//
-// For now, it just limits the number of messages from one peer to
-// `defaultMaxMsgCountByPeer` messages per `msgCountByPeerFlushInterval` (1000
-// msg/hour).
-//
-// NOTE [2017-01-17]:
-//   Limiting is fine for now. Maybe down the road we want to keep track of the
-//   quality of peer messages so if peerA keeps telling us about peers we can't
-//   connect to then maybe we should care less about peerA. But I don't think
-//   that kind of complexity is priority right now.
-type PEXReactor struct {
-	BaseReactor
-
-	book              *AddrBook
-	ensurePeersPeriod time.Duration
-
-	// tracks message count by peer, so we can prevent abuse
-	msgCountByPeer    *cmn.CMap
-	maxMsgCountByPeer uint16
-}
-
-// NewPEXReactor creates new PEX reactor.
-func NewPEXReactor(b *AddrBook) *PEXReactor {
-	r := &PEXReactor{
-		book:              b,
-		ensurePeersPeriod: defaultEnsurePeersPeriod,
-		msgCountByPeer:    cmn.NewCMap(),
-		maxMsgCountByPeer: defaultMaxMsgCountByPeer,
-	}
-	r.BaseReactor = *NewBaseReactor("PEXReactor", r)
-	return r
-}
-
-// OnStart implements BaseService
-func (r *PEXReactor) OnStart() error {
-	if err := r.BaseReactor.OnStart(); err != nil {
-		return err
-	}
-	err := r.book.Start()
-	if err != nil && err != cmn.ErrAlreadyStarted {
-		return err
-	}
-	go r.ensurePeersRoutine()
-	go r.flushMsgCountByPeer()
-	return nil
-}
-
-// OnStop implements BaseService
-func (r *PEXReactor) OnStop() {
-	r.BaseReactor.OnStop()
-	r.book.Stop()
-}
-
-// GetChannels implements Reactor
-func (r *PEXReactor) GetChannels() []*ChannelDescriptor {
-	return []*ChannelDescriptor{
-		{
-			ID:                PexChannel,
-			Priority:          1,
-			SendQueueCapacity: 10,
-		},
-	}
-}
-
-// AddPeer implements Reactor by adding peer to the address book (if inbound)
-// or by requesting more addresses (if outbound).
-func (r *PEXReactor) AddPeer(p Peer) {
-	if p.IsOutbound() {
-		// For outbound peers, the address is already in the books.
-		// Either it was added in DialSeeds or when we
-		// received the peer's address in r.Receive
-		if r.book.NeedMoreAddrs() {
-			r.RequestPEX(p)
-		}
-	} else { // For inbound connections, the peer is its own source
-		addr, err := NewNetAddressString(p.NodeInfo().ListenAddr)
-		if err != nil {
-			// peer gave us a bad ListenAddr. TODO: punish
-			r.Logger.Error("Error in AddPeer: invalid peer address", "addr", p.NodeInfo().ListenAddr, "err", err)
-			return
-		}
-		r.book.AddAddress(addr, addr)
-	}
-}
-
-// RemovePeer implements Reactor.
-func (r *PEXReactor) RemovePeer(p Peer, reason interface{}) {
-	// If we aren't keeping track of local temp data for each peer here, then we
-	// don't have to do anything.
-}
-
-// Receive implements Reactor by handling incoming PEX messages.
-func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) {
-	srcAddrStr := src.NodeInfo().RemoteAddr
-	srcAddr, err := NewNetAddressString(srcAddrStr)
-	if err != nil {
-		// this should never happen. TODO: cancel conn
-		r.Logger.Error("Error in Receive: invalid peer address", "addr", srcAddrStr, "err", err)
-		return
-	}
-
-	r.IncrementMsgCountForPeer(srcAddrStr)
-	if r.ReachedMaxMsgCountForPeer(srcAddrStr) {
-		r.Logger.Error("Maximum number of messages reached for peer", "peer", srcAddrStr)
-		// TODO remove src from peers?
-		return
-	}
-
-	_, msg, err := DecodeMessage(msgBytes)
-	if err != nil {
-		r.Logger.Error("Error decoding message", "err", err)
-		return
-	}
-	r.Logger.Debug("Received message", "src", src, "chId", chID, "msg", msg)
-
-	switch msg := msg.(type) {
-	case *pexRequestMessage:
-		// src requested some peers.
-		// NOTE: we might send an empty selection
-		r.SendAddrs(src, r.book.GetSelection())
-	case *pexAddrsMessage:
-		// We received some peer addresses from src.
-		// TODO: (We don't want to get spammed with bad peers)
-		for _, addr := range msg.Addrs {
-			if addr != nil {
-				r.book.AddAddress(addr, srcAddr)
-			}
-		}
-	default:
-		r.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
-	}
-}
-
-// RequestPEX asks peer for more addresses.
-func (r *PEXReactor) RequestPEX(p Peer) {
-	p.Send(PexChannel, struct{ PexMessage }{&pexRequestMessage{}})
-}
-
-// SendAddrs sends addrs to the peer.
-func (r *PEXReactor) SendAddrs(p Peer, addrs []*NetAddress) {
-	p.Send(PexChannel, struct{ PexMessage }{&pexAddrsMessage{Addrs: addrs}})
-}
-
-// SetEnsurePeersPeriod sets period to ensure peers connected.
-func (r *PEXReactor) SetEnsurePeersPeriod(d time.Duration) {
-	r.ensurePeersPeriod = d
-}
-
-// SetMaxMsgCountByPeer sets maximum messages one peer can send to us during 'msgCountByPeerFlushInterval'.
-func (r *PEXReactor) SetMaxMsgCountByPeer(v uint16) {
-	r.maxMsgCountByPeer = v
-}
-
-// ReachedMaxMsgCountForPeer returns true if we received too many
-// messages from peer with address `addr`.
-// NOTE: assumes the value in the CMap is non-nil
-func (r *PEXReactor) ReachedMaxMsgCountForPeer(addr string) bool {
-	return r.msgCountByPeer.Get(addr).(uint16) >= r.maxMsgCountByPeer
-}
-
-// Increment or initialize the msg count for the peer in the CMap
-func (r *PEXReactor) IncrementMsgCountForPeer(addr string) {
-	var count uint16
-	countI := r.msgCountByPeer.Get(addr)
-	if countI != nil {
-		count = countI.(uint16)
-	}
-	count++
-	r.msgCountByPeer.Set(addr, count)
-}
-
-// Ensures that sufficient peers are connected. (continuous)
-func (r *PEXReactor) ensurePeersRoutine() {
-	// Randomize when routine starts
-	ensurePeersPeriodMs := r.ensurePeersPeriod.Nanoseconds() / 1e6
-	time.Sleep(time.Duration(rand.Int63n(ensurePeersPeriodMs)) * time.Millisecond)
-
-	// fire once immediately.
-	r.ensurePeers()
-
-	// fire periodically
-	ticker := time.NewTicker(r.ensurePeersPeriod)
-
-	for {
-		select {
-		case <-ticker.C:
-			r.ensurePeers()
-		case <-r.Quit:
-			ticker.Stop()
-			return
-		}
-	}
-}
-
-// ensurePeers ensures that sufficient peers are connected. (once)
-//
-// Old bucket / New bucket are arbitrary categories to denote whether an
-// address is vetted or not, and this needs to be determined over time via a
-// heuristic that we haven't perfected yet, or, perhaps is manually edited by
-// the node operator. It should not be used to compute what addresses are
-// already connected or not.
-//
-// TODO Basically, we need to work harder on our good-peer/bad-peer marking.
-// What we're currently doing in terms of marking good/bad peers is just a
-// placeholder. It should not be the case that an address becomes old/vetted
-// upon a single successful connection.
-func (r *PEXReactor) ensurePeers() {
-	numOutPeers, _, numDialing := r.Switch.NumPeers()
-	numToDial := minNumOutboundPeers - (numOutPeers + numDialing)
-	r.Logger.Info("Ensure peers", "numOutPeers", numOutPeers, "numDialing", numDialing, "numToDial", numToDial)
-	if numToDial <= 0 {
-		return
-	}
-
-	// bias to prefer more vetted peers when we have fewer connections.
-	// not perfect, but somewhate ensures that we prioritize connecting to more-vetted
-	// NOTE: range here is [10, 90]. Too high ?
-	newBias := cmn.MinInt(numOutPeers, 8)*10 + 10
-
-	toDial := make(map[string]*NetAddress)
-	// Try maxAttempts times to pick numToDial addresses to dial
-	maxAttempts := numToDial * 3
-	for i := 0; i < maxAttempts && len(toDial) < numToDial; i++ {
-		try := r.book.PickAddress(newBias)
-		if try == nil {
-			continue
-		}
-		if _, selected := toDial[try.IP.String()]; selected {
-			continue
-		}
-		if dialling := r.Switch.IsDialing(try); dialling {
-			continue
-		}
-		// XXX: Should probably use pubkey as peer key ...
-		if connected := r.Switch.Peers().Has(try.String()); connected {
-			continue
-		}
-		r.Logger.Info("Will dial address", "addr", try)
-		toDial[try.IP.String()] = try
-	}
-
-	// Dial picked addresses
-	for _, item := range toDial {
-		go func(picked *NetAddress) {
-			_, err := r.Switch.DialPeerWithAddress(picked, false)
-			if err != nil {
-				r.book.MarkAttempt(picked)
-			}
-		}(item)
-	}
-
-	// If we need more addresses, pick a random peer and ask for more.
-	if r.book.NeedMoreAddrs() {
-		if peers := r.Switch.Peers().List(); len(peers) > 0 {
-			i := rand.Int() % len(peers) // nolint: gas
-			peer := peers[i]
-			r.Logger.Info("No addresses to dial. Sending pexRequest to random peer", "peer", peer)
-			r.RequestPEX(peer)
-		}
-	}
-}
-
-func (r *PEXReactor) flushMsgCountByPeer() {
-	ticker := time.NewTicker(msgCountByPeerFlushInterval)
-
-	for {
-		select {
-		case <-ticker.C:
-			r.msgCountByPeer.Clear()
-		case <-r.Quit:
-			ticker.Stop()
-			return
-		}
-	}
-}
-
-//-----------------------------------------------------------------------------
-// Messages
-
-const (
-	msgTypeRequest = byte(0x01)
-	msgTypeAddrs   = byte(0x02)
-)
-
-// PexMessage is a primary type for PEX messages. Underneath, it could contain
-// either pexRequestMessage, or pexAddrsMessage messages.
-type PexMessage interface{}
-
-var _ = wire.RegisterInterface(
-	struct{ PexMessage }{},
-	wire.ConcreteType{&pexRequestMessage{}, msgTypeRequest},
-	wire.ConcreteType{&pexAddrsMessage{}, msgTypeAddrs},
-)
-
-// DecodeMessage implements interface registered above.
-func DecodeMessage(bz []byte) (msgType byte, msg PexMessage, err error) {
-	msgType = bz[0]
-	n := new(int)
-	r := bytes.NewReader(bz)
-	msg = wire.ReadBinary(struct{ PexMessage }{}, r, maxPexMessageSize, n, &err).(struct{ PexMessage }).PexMessage
-	return
-}
-
-/*
-A pexRequestMessage requests additional peer addresses.
-*/
-type pexRequestMessage struct {
-}
-
-func (m *pexRequestMessage) String() string {
-	return "[pexRequest]"
-}
-
-/*
-A message with announced peer addresses.
-*/
-type pexAddrsMessage struct {
-	Addrs []*NetAddress
-}
-
-func (m *pexAddrsMessage) String() string {
-	return fmt.Sprintf("[pexAddrs %v]", m.Addrs)
-}
diff --git a/vendor/github.com/tendermint/tendermint/p2p/switch.go b/vendor/github.com/tendermint/tendermint/p2p/switch.go
index 76b01980..e412d3cc 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/switch.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/switch.go
@@ -5,18 +5,19 @@ import (
 	"math"
 	"math/rand"
 	"net"
+	"sync"
 	"time"
 
 	"github.com/pkg/errors"
 
-	crypto "github.com/tendermint/go-crypto"
 	cfg "github.com/tendermint/tendermint/config"
+	"github.com/tendermint/tendermint/p2p/conn"
 	cmn "github.com/tendermint/tmlibs/common"
 )
 
 const (
 	// wait a random amount of time from this interval
-	// before dialing seeds or reconnecting to help prevent DoS
+	// before dialing peers or reconnecting to help prevent DoS
 	dialRandomizerIntervalMilliseconds = 3000
 
 	// repeatedly try to reconnect for a few minutes
@@ -30,46 +31,26 @@ const (
 	reconnectBackOffBaseSeconds = 3
 )
 
-type Reactor interface {
-	cmn.Service // Start, Stop
-
-	SetSwitch(*Switch)
-	GetChannels() []*ChannelDescriptor
-	AddPeer(peer Peer)
-	RemovePeer(peer Peer, reason interface{})
-	Receive(chID byte, peer Peer, msgBytes []byte) // CONTRACT: msgBytes are not nil
-}
-
-//--------------------------------------
-
-type BaseReactor struct {
-	cmn.BaseService // Provides Start, Stop, .Quit
-	Switch          *Switch
-}
-
-func NewBaseReactor(name string, impl Reactor) *BaseReactor {
-	return &BaseReactor{
-		BaseService: *cmn.NewBaseService(nil, name, impl),
-		Switch:      nil,
-	}
-}
+//-----------------------------------------------------------------------------
 
-func (br *BaseReactor) SetSwitch(sw *Switch) {
-	br.Switch = sw
+// An AddrBook represents an address book from the pex package, which is used
+// to store peer addresses.
+type AddrBook interface {
+	AddAddress(addr *NetAddress, src *NetAddress) error
+	AddOurAddress(*NetAddress)
+	OurAddress(*NetAddress) bool
+	MarkGood(*NetAddress)
+	RemoveAddress(*NetAddress)
+	HasAddress(*NetAddress) bool
+	Save()
 }
-func (_ *BaseReactor) GetChannels() []*ChannelDescriptor             { return nil }
-func (_ *BaseReactor) AddPeer(peer Peer)                             {}
-func (_ *BaseReactor) RemovePeer(peer Peer, reason interface{})      {}
-func (_ *BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {}
 
 //-----------------------------------------------------------------------------
 
-/*
-The `Switch` handles peer connections and exposes an API to receive incoming messages
-on `Reactors`.  Each `Reactor` is responsible for handling incoming messages of one
-or more `Channels`.  So while sending outgoing messages is typically performed on the peer,
-incoming messages are received on the reactor.
-*/
+// Switch handles peer connections and exposes an API to receive incoming messages
+// on `Reactors`.  Each `Reactor` is responsible for handling incoming messages of one
+// or more `Channels`.  So while sending outgoing messages is typically performed on the peer,
+// incoming messages are received on the reactor.
 type Switch struct {
 	cmn.BaseService
 
@@ -77,33 +58,30 @@ type Switch struct {
 	peerConfig   *PeerConfig
 	listeners    []Listener
 	reactors     map[string]Reactor
-	chDescs      []*ChannelDescriptor
+	chDescs      []*conn.ChannelDescriptor
 	reactorsByCh map[byte]Reactor
 	peers        *PeerSet
 	dialing      *cmn.CMap
-	nodeInfo     *NodeInfo             // our node info
-	nodePrivKey  crypto.PrivKeyEd25519 // our node privkey
+	nodeInfo     NodeInfo // our node info
+	nodeKey      *NodeKey // our node privkey
+	addrBook     AddrBook
 
-	filterConnByAddr   func(net.Addr) error
-	filterConnByPubKey func(crypto.PubKeyEd25519) error
+	filterConnByAddr func(net.Addr) error
+	filterConnByID   func(ID) error
 
 	rng *rand.Rand // seed for randomizing dial times and orders
 }
 
-var (
-	ErrSwitchDuplicatePeer = errors.New("Duplicate peer")
-)
-
+// NewSwitch creates a new Switch with the given config.
 func NewSwitch(config *cfg.P2PConfig) *Switch {
 	sw := &Switch{
 		config:       config,
 		peerConfig:   DefaultPeerConfig(),
 		reactors:     make(map[string]Reactor),
-		chDescs:      make([]*ChannelDescriptor, 0),
+		chDescs:      make([]*conn.ChannelDescriptor, 0),
 		reactorsByCh: make(map[byte]Reactor),
 		peers:        NewPeerSet(),
 		dialing:      cmn.NewCMap(),
-		nodeInfo:     nil,
 	}
 
 	// Ensure we have a completely undeterministic PRNG. cmd.RandInt64() draws
@@ -111,15 +89,19 @@ func NewSwitch(config *cfg.P2PConfig) *Switch {
 	sw.rng = rand.New(rand.NewSource(cmn.RandInt64()))
 
 	// TODO: collapse the peerConfig into the config ?
-	sw.peerConfig.MConfig.flushThrottle = time.Duration(config.FlushThrottleTimeout) * time.Millisecond
+	sw.peerConfig.MConfig.FlushThrottle = time.Duration(config.FlushThrottleTimeout) * time.Millisecond
 	sw.peerConfig.MConfig.SendRate = config.SendRate
 	sw.peerConfig.MConfig.RecvRate = config.RecvRate
-	sw.peerConfig.MConfig.maxMsgPacketPayloadSize = config.MaxMsgPacketPayloadSize
+	sw.peerConfig.MConfig.MaxMsgPacketPayloadSize = config.MaxMsgPacketPayloadSize
+	sw.peerConfig.AuthEnc = config.AuthEnc
 
 	sw.BaseService = *cmn.NewBaseService(nil, "P2P Switch", sw)
 	return sw
 }
 
+//---------------------------------------------------------------------
+// Switch setup
+
 // AddReactor adds the given reactor to the switch.
 // NOTE: Not goroutine safe.
 func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor {
@@ -171,26 +153,25 @@ func (sw *Switch) IsListening() bool {
 
 // SetNodeInfo sets the switch's NodeInfo for checking compatibility and handshaking with other nodes.
 // NOTE: Not goroutine safe.
-func (sw *Switch) SetNodeInfo(nodeInfo *NodeInfo) {
+func (sw *Switch) SetNodeInfo(nodeInfo NodeInfo) {
 	sw.nodeInfo = nodeInfo
 }
 
 // NodeInfo returns the switch's NodeInfo.
 // NOTE: Not goroutine safe.
-func (sw *Switch) NodeInfo() *NodeInfo {
+func (sw *Switch) NodeInfo() NodeInfo {
 	return sw.nodeInfo
 }
 
-// SetNodePrivKey sets the switch's private key for authenticated encryption.
-// NOTE: Overwrites sw.nodeInfo.PubKey.
+// SetNodeKey sets the switch's private key for authenticated encryption.
 // NOTE: Not goroutine safe.
-func (sw *Switch) SetNodePrivKey(nodePrivKey crypto.PrivKeyEd25519) {
-	sw.nodePrivKey = nodePrivKey
-	if sw.nodeInfo != nil {
-		sw.nodeInfo.PubKey = nodePrivKey.PubKey().Unwrap().(crypto.PubKeyEd25519)
-	}
+func (sw *Switch) SetNodeKey(nodeKey *NodeKey) {
+	sw.nodeKey = nodeKey
 }
 
+//---------------------------------------------------------------------
+// Service start/stop
+
 // OnStart implements BaseService. It starts all the reactors, peers, and listeners.
 func (sw *Switch) OnStart() error {
 	// Start reactors
@@ -226,188 +207,31 @@ func (sw *Switch) OnStop() {
 	}
 }
 
-// addPeer checks the given peer's validity, performs a handshake, and adds the
-// peer to the switch and to all registered reactors.
-// NOTE: This performs a blocking handshake before the peer is added.
-// NOTE: If error is returned, caller is responsible for calling peer.CloseConn()
-func (sw *Switch) addPeer(peer *peer) error {
-
-	if err := sw.FilterConnByAddr(peer.Addr()); err != nil {
-		return err
-	}
-
-	if err := sw.FilterConnByPubKey(peer.PubKey()); err != nil {
-		return err
-	}
-
-	if err := peer.HandshakeTimeout(sw.nodeInfo, time.Duration(sw.peerConfig.HandshakeTimeout*time.Second)); err != nil {
-		return err
-	}
-
-	// Avoid self
-	if sw.nodeInfo.PubKey.Equals(peer.PubKey().Wrap()) {
-		return errors.New("Ignoring connection from self")
-	}
-
-	// Check version, chain id
-	if err := sw.nodeInfo.CompatibleWith(peer.NodeInfo()); err != nil {
-		return err
-	}
-
-	// Check for duplicate peer
-	if sw.peers.Has(peer.Key()) {
-		return ErrSwitchDuplicatePeer
-
-	}
-
-	// Start peer
-	if sw.IsRunning() {
-		sw.startInitPeer(peer)
-	}
-
-	// Add the peer to .peers.
-	// We start it first so that a peer in the list is safe to Stop.
-	// It should not err since we already checked peers.Has().
-	if err := sw.peers.Add(peer); err != nil {
-		return err
-	}
-
-	sw.Logger.Info("Added peer", "peer", peer)
-	return nil
-}
-
-// FilterConnByAddr returns an error if connecting to the given address is forbidden.
-func (sw *Switch) FilterConnByAddr(addr net.Addr) error {
-	if sw.filterConnByAddr != nil {
-		return sw.filterConnByAddr(addr)
-	}
-	return nil
-}
-
-// FilterConnByPubKey returns an error if connecting to the given public key is forbidden.
-func (sw *Switch) FilterConnByPubKey(pubkey crypto.PubKeyEd25519) error {
-	if sw.filterConnByPubKey != nil {
-		return sw.filterConnByPubKey(pubkey)
-	}
-	return nil
-
-}
-
-// SetAddrFilter sets the function for filtering connections by address.
-func (sw *Switch) SetAddrFilter(f func(net.Addr) error) {
-	sw.filterConnByAddr = f
-}
-
-// SetPubKeyFilter sets the function for filtering connections by public key.
-func (sw *Switch) SetPubKeyFilter(f func(crypto.PubKeyEd25519) error) {
-	sw.filterConnByPubKey = f
-}
-
-func (sw *Switch) startInitPeer(peer *peer) {
-	err := peer.Start() // spawn send/recv routines
-	if err != nil {
-		// Should never happen
-		sw.Logger.Error("Error starting peer", "peer", peer, "err", err)
-	}
-
-	for _, reactor := range sw.reactors {
-		reactor.AddPeer(peer)
-	}
-}
-
-// DialSeeds dials a list of seeds asynchronously in random order.
-func (sw *Switch) DialSeeds(addrBook *AddrBook, seeds []string) error {
-	netAddrs, errs := NewNetAddressStrings(seeds)
-	for _, err := range errs {
-		sw.Logger.Error("Error in seed's address", "err", err)
-	}
-
-	if addrBook != nil {
-		// add seeds to `addrBook`
-		ourAddrS := sw.nodeInfo.ListenAddr
-		ourAddr, _ := NewNetAddressString(ourAddrS)
-		for _, netAddr := range netAddrs {
-			// do not add ourselves
-			if netAddr.Equals(ourAddr) {
-				continue
-			}
-			addrBook.AddAddress(netAddr, ourAddr)
-		}
-		addrBook.Save()
-	}
-
-	// permute the list, dial them in random order.
-	perm := sw.rng.Perm(len(netAddrs))
-	for i := 0; i < len(perm); i++ {
-		go func(i int) {
-			sw.randomSleep(0)
-			j := perm[i]
-			sw.dialSeed(netAddrs[j])
-		}(i)
-	}
-	return nil
-}
-
-// sleep for interval plus some random amount of ms on [0, dialRandomizerIntervalMilliseconds]
-func (sw *Switch) randomSleep(interval time.Duration) {
-	r := time.Duration(sw.rng.Int63n(dialRandomizerIntervalMilliseconds)) * time.Millisecond
-	time.Sleep(r + interval)
-}
+//---------------------------------------------------------------------
+// Peers
 
-func (sw *Switch) dialSeed(addr *NetAddress) {
-	peer, err := sw.DialPeerWithAddress(addr, true)
-	if err != nil {
-		sw.Logger.Error("Error dialing seed", "err", err)
-	} else {
-		sw.Logger.Info("Connected to seed", "peer", peer)
-	}
-}
-
-// DialPeerWithAddress dials the given peer and runs sw.addPeer if it connects successfully.
-// If `persistent == true`, the switch will always try to reconnect to this peer if the connection ever fails.
-func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) (Peer, error) {
-	sw.dialing.Set(addr.IP.String(), addr)
-	defer sw.dialing.Delete(addr.IP.String())
-
-	sw.Logger.Info("Dialing peer", "address", addr)
-	peer, err := newOutboundPeer(addr, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, sw.peerConfig)
-	if err != nil {
-		sw.Logger.Error("Failed to dial peer", "address", addr, "err", err)
-		return nil, err
-	}
-	peer.SetLogger(sw.Logger.With("peer", addr))
-	if persistent {
-		peer.makePersistent()
-	}
-	err = sw.addPeer(peer)
-	if err != nil {
-		sw.Logger.Error("Failed to add peer", "address", addr, "err", err)
-		peer.CloseConn()
-		return nil, err
-	}
-	sw.Logger.Info("Dialed and added peer", "address", addr, "peer", peer)
-	return peer, nil
-}
-
-// IsDialing returns true if the switch is currently dialing the given address.
-func (sw *Switch) IsDialing(addr *NetAddress) bool {
-	return sw.dialing.Has(addr.IP.String())
-}
-
-// Broadcast runs a go routine for each attempted send, which will block
-// trying to send for defaultSendTimeoutSeconds. Returns a channel
-// which receives success values for each attempted send (false if times out).
+// Broadcast runs a go routine for each attempted send, which will block trying
+// to send for defaultSendTimeoutSeconds. Returns a channel which receives
+// success values for each attempted send (false if times out). Channel will be
+// closed once msg send to all peers.
+//
 // NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved.
-// TODO: Something more intelligent.
 func (sw *Switch) Broadcast(chID byte, msg interface{}) chan bool {
 	successChan := make(chan bool, len(sw.peers.List()))
 	sw.Logger.Debug("Broadcast", "channel", chID, "msg", msg)
+	var wg sync.WaitGroup
 	for _, peer := range sw.peers.List() {
+		wg.Add(1)
 		go func(peer Peer) {
+			defer wg.Done()
 			success := peer.Send(chID, msg)
 			successChan <- success
 		}(peer)
 	}
+	go func() {
+		wg.Wait()
+		close(successChan)
+	}()
 	return successChan
 }
 
@@ -442,12 +266,29 @@ func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) {
 	}
 }
 
+// StopPeerGracefully disconnects from a peer gracefully.
+// TODO: handle graceful disconnects.
+func (sw *Switch) StopPeerGracefully(peer Peer) {
+	sw.Logger.Info("Stopping peer gracefully")
+	sw.stopAndRemovePeer(peer, nil)
+}
+
+func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) {
+	sw.peers.Remove(peer)
+	peer.Stop()
+	for _, reactor := range sw.reactors {
+		reactor.RemovePeer(peer, reason)
+	}
+}
+
 // reconnectToPeer tries to reconnect to the peer, first repeatedly
 // with a fixed interval, then with exponential backoff.
 // If no success after all that, it stops trying, and leaves it
 // to the PEX/Addrbook to find the peer again
 func (sw *Switch) reconnectToPeer(peer Peer) {
-	addr, _ := NewNetAddressString(peer.NodeInfo().RemoteAddr)
+	// NOTE this will connect to the self reported address,
+	// not necessarily the original we dialed
+	netAddr := peer.NodeInfo().NetAddress()
 	start := time.Now()
 	sw.Logger.Info("Reconnecting to peer", "peer", peer)
 	for i := 0; i < reconnectAttempts; i++ {
@@ -455,14 +296,13 @@ func (sw *Switch) reconnectToPeer(peer Peer) {
 			return
 		}
 
-		peer, err := sw.DialPeerWithAddress(addr, true)
+		err := sw.DialPeerWithAddress(netAddr, true)
 		if err != nil {
 			sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "peer", peer)
 			// sleep a set amount
 			sw.randomSleep(reconnectInterval)
 			continue
 		} else {
-			sw.Logger.Info("Reconnected to peer", "peer", peer)
 			return
 		}
 	}
@@ -477,33 +317,127 @@ func (sw *Switch) reconnectToPeer(peer Peer) {
 		// sleep an exponentially increasing amount
 		sleepIntervalSeconds := math.Pow(reconnectBackOffBaseSeconds, float64(i))
 		sw.randomSleep(time.Duration(sleepIntervalSeconds) * time.Second)
-		peer, err := sw.DialPeerWithAddress(addr, true)
-		if err != nil {
-			sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "peer", peer)
-			continue
-		} else {
-			sw.Logger.Info("Reconnected to peer", "peer", peer)
-			return
+		err := sw.DialPeerWithAddress(netAddr, true)
+		if err == nil {
+			return // success
 		}
+		sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "peer", peer)
 	}
 	sw.Logger.Error("Failed to reconnect to peer. Giving up", "peer", peer, "elapsed", time.Since(start))
 }
 
-// StopPeerGracefully disconnects from a peer gracefully.
-// TODO: handle graceful disconnects.
-func (sw *Switch) StopPeerGracefully(peer Peer) {
-	sw.Logger.Info("Stopping peer gracefully")
-	sw.stopAndRemovePeer(peer, nil)
+// SetAddrBook allows to set address book on Switch.
+func (sw *Switch) SetAddrBook(addrBook AddrBook) {
+	sw.addrBook = addrBook
 }
 
-func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) {
-	sw.peers.Remove(peer)
-	peer.Stop()
-	for _, reactor := range sw.reactors {
-		reactor.RemovePeer(peer, reason)
+// MarkPeerAsGood marks the given peer as good when it did something useful
+// like contributed to consensus.
+func (sw *Switch) MarkPeerAsGood(peer Peer) {
+	if sw.addrBook != nil {
+		sw.addrBook.MarkGood(peer.NodeInfo().NetAddress())
+	}
+}
+
+//---------------------------------------------------------------------
+// Dialing
+
+// IsDialing returns true if the switch is currently dialing the given ID.
+func (sw *Switch) IsDialing(id ID) bool {
+	return sw.dialing.Has(string(id))
+}
+
+// DialPeersAsync dials a list of peers asynchronously in random order (optionally, making them persistent).
+func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent bool) error {
+	netAddrs, errs := NewNetAddressStrings(peers)
+	// only log errors, dial correct addresses
+	for _, err := range errs {
+		sw.Logger.Error("Error in peer's address", "err", err)
+	}
+
+	ourAddr := sw.nodeInfo.NetAddress()
+
+	// TODO: move this out of here ?
+	if addrBook != nil {
+		// add peers to `addrBook`
+		for _, netAddr := range netAddrs {
+			// do not add our address or ID
+			if !netAddr.Same(ourAddr) {
+				addrBook.AddAddress(netAddr, ourAddr)
+			}
+		}
+		// Persist some peers to disk right away.
+		// NOTE: integration tests depend on this
+		addrBook.Save()
 	}
+
+	// permute the list, dial them in random order.
+	perm := sw.rng.Perm(len(netAddrs))
+	for i := 0; i < len(perm); i++ {
+		go func(i int) {
+			j := perm[i]
+
+			// do not dial ourselves
+			if netAddrs[j].Same(ourAddr) {
+				return
+			}
+
+			sw.randomSleep(0)
+			err := sw.DialPeerWithAddress(netAddrs[j], persistent)
+			if err != nil {
+				sw.Logger.Error("Error dialing peer", "err", err)
+			}
+		}(i)
+	}
+	return nil
 }
 
+// DialPeerWithAddress dials the given peer and runs sw.addPeer if it connects and authenticates successfully.
+// If `persistent == true`, the switch will always try to reconnect to this peer if the connection ever fails.
+func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) error {
+	sw.dialing.Set(string(addr.ID), addr)
+	defer sw.dialing.Delete(string(addr.ID))
+	return sw.addOutboundPeerWithConfig(addr, sw.peerConfig, persistent)
+}
+
+// sleep for interval plus some random amount of ms on [0, dialRandomizerIntervalMilliseconds]
+func (sw *Switch) randomSleep(interval time.Duration) {
+	r := time.Duration(sw.rng.Int63n(dialRandomizerIntervalMilliseconds)) * time.Millisecond
+	time.Sleep(r + interval)
+}
+
+//------------------------------------------------------------------------------------
+// Connection filtering
+
+// FilterConnByAddr returns an error if connecting to the given address is forbidden.
+func (sw *Switch) FilterConnByAddr(addr net.Addr) error {
+	if sw.filterConnByAddr != nil {
+		return sw.filterConnByAddr(addr)
+	}
+	return nil
+}
+
+// FilterConnByID returns an error if connecting to the given peer ID is forbidden.
+func (sw *Switch) FilterConnByID(id ID) error {
+	if sw.filterConnByID != nil {
+		return sw.filterConnByID(id)
+	}
+	return nil
+
+}
+
+// SetAddrFilter sets the function for filtering connections by address.
+func (sw *Switch) SetAddrFilter(f func(net.Addr) error) {
+	sw.filterConnByAddr = f
+}
+
+// SetIDFilter sets the function for filtering connections by peer ID.
+func (sw *Switch) SetIDFilter(f func(ID) error) {
+	sw.filterConnByID = f
+}
+
+//------------------------------------------------------------------------------------
+
 func (sw *Switch) listenerRoutine(l Listener) {
 	for {
 		inConn, ok := <-l.Connections()
@@ -519,130 +453,149 @@ func (sw *Switch) listenerRoutine(l Listener) {
 		}
 
 		// New inbound connection!
-		err := sw.addPeerWithConnectionAndConfig(inConn, sw.peerConfig)
+		err := sw.addInboundPeerWithConfig(inConn, sw.peerConfig)
 		if err != nil {
 			sw.Logger.Info("Ignoring inbound connection: error while adding peer", "address", inConn.RemoteAddr().String(), "err", err)
 			continue
 		}
-
-		// NOTE: We don't yet have the listening port of the
-		// remote (if they have a listener at all).
-		// The peerHandshake will handle that.
 	}
 
 	// cleanup
 }
 
-//------------------------------------------------------------------
-// Connects switches via arbitrary net.Conn. Used for testing.
-
-// MakeConnectedSwitches returns n switches, connected according to the connect func.
-// If connect==Connect2Switches, the switches will be fully connected.
-// initSwitch defines how the i'th switch should be initialized (ie. with what reactors).
-// NOTE: panics if any switch fails to start.
-func MakeConnectedSwitches(cfg *cfg.P2PConfig, n int, initSwitch func(int, *Switch) *Switch, connect func([]*Switch, int, int)) []*Switch {
-	switches := make([]*Switch, n)
-	for i := 0; i < n; i++ {
-		switches[i] = makeSwitch(cfg, i, "testing", "123.123.123", initSwitch)
-	}
-
-	if err := StartSwitches(switches); err != nil {
-		panic(err)
+func (sw *Switch) addInboundPeerWithConfig(conn net.Conn, config *PeerConfig) error {
+	peerConn, err := newInboundPeerConn(conn, config, sw.nodeKey.PrivKey)
+	if err != nil {
+		conn.Close() // peer is nil
+		return err
 	}
-
-	for i := 0; i < n; i++ {
-		for j := i + 1; j < n; j++ {
-			connect(switches, i, j)
-		}
+	if err = sw.addPeer(peerConn); err != nil {
+		peerConn.CloseConn()
+		return err
 	}
 
-	return switches
+	return nil
 }
 
-// Connect2Switches will connect switches i and j via net.Pipe().
-// Blocks until a connection is established.
-// NOTE: caller ensures i and j are within bounds.
-func Connect2Switches(switches []*Switch, i, j int) {
-	switchI := switches[i]
-	switchJ := switches[j]
-	c1, c2 := netPipe()
-	doneCh := make(chan struct{})
-	go func() {
-		err := switchI.addPeerWithConnection(c1)
-		if err != nil {
-			panic(err)
-		}
-		doneCh <- struct{}{}
-	}()
-	go func() {
-		err := switchJ.addPeerWithConnection(c2)
-		if err != nil {
-			panic(err)
-		}
-		doneCh <- struct{}{}
-	}()
-	<-doneCh
-	<-doneCh
-}
+// dial the peer; make secret connection; authenticate against the dialed ID;
+// add the peer.
+func (sw *Switch) addOutboundPeerWithConfig(addr *NetAddress, config *PeerConfig, persistent bool) error {
+	sw.Logger.Info("Dialing peer", "address", addr)
+	peerConn, err := newOutboundPeerConn(addr, config, persistent, sw.nodeKey.PrivKey)
+	if err != nil {
+		sw.Logger.Error("Failed to dial peer", "address", addr, "err", err)
+		return err
+	}
 
-// StartSwitches calls sw.Start() for each given switch.
-// It returns the first encountered error.
-func StartSwitches(switches []*Switch) error {
-	for _, s := range switches {
-		err := s.Start() // start switch and reactors
-		if err != nil {
-			return err
-		}
+	if err := sw.addPeer(peerConn); err != nil {
+		sw.Logger.Error("Failed to add peer", "address", addr, "err", err)
+		peerConn.CloseConn()
+		return err
 	}
 	return nil
 }
 
-func makeSwitch(cfg *cfg.P2PConfig, i int, network, version string, initSwitch func(int, *Switch) *Switch) *Switch {
-	privKey := crypto.GenPrivKeyEd25519()
-	// new switch, add reactors
-	// TODO: let the config be passed in?
-	s := initSwitch(i, NewSwitch(cfg))
-	s.SetNodeInfo(&NodeInfo{
-		PubKey:     privKey.PubKey().Unwrap().(crypto.PubKeyEd25519),
-		Moniker:    cmn.Fmt("switch%d", i),
-		Network:    network,
-		Version:    version,
-		RemoteAddr: cmn.Fmt("%v:%v", network, rand.Intn(64512)+1023),
-		ListenAddr: cmn.Fmt("%v:%v", network, rand.Intn(64512)+1023),
-	})
-	s.SetNodePrivKey(privKey)
-	return s
-}
-
-func (sw *Switch) addPeerWithConnection(conn net.Conn) error {
-	peer, err := newInboundPeer(conn, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, sw.peerConfig)
+// addPeer performs the Tendermint P2P handshake with a peer
+// that already has a SecretConnection. If all goes well,
+// it starts the peer and adds it to the switch.
+// NOTE: This performs a blocking handshake before the peer is added.
+// NOTE: If error is returned, caller is responsible for calling peer.CloseConn()
+func (sw *Switch) addPeer(pc peerConn) error {
+
+	addr := pc.conn.RemoteAddr()
+	if err := sw.FilterConnByAddr(addr); err != nil {
+		return err
+	}
+
+	// NOTE: if AuthEnc==false, we don't have a peerID until after the handshake.
+	// If AuthEnc==true then we already know the ID and could do the checks first before the handshake,
+	// but it's simple to just deal with both cases the same after the handshake.
+
+	// Exchange NodeInfo on the conn
+	peerNodeInfo, err := pc.HandshakeTimeout(sw.nodeInfo, time.Duration(sw.peerConfig.HandshakeTimeout*time.Second))
 	if err != nil {
-		if err := conn.Close(); err != nil {
-			sw.Logger.Error("Error closing connection", "err", err)
+		return err
+	}
+
+	peerID := peerNodeInfo.ID()
+
+	// ensure connection key matches self reported key
+	if pc.config.AuthEnc {
+		connID := pc.ID()
+
+		if peerID != connID {
+			return fmt.Errorf("nodeInfo.ID() (%v) doesn't match conn.ID() (%v)",
+				peerID, connID)
 		}
+	}
+
+	// Validate the peers nodeInfo
+	if err := peerNodeInfo.Validate(); err != nil {
 		return err
 	}
-	peer.SetLogger(sw.Logger.With("peer", conn.RemoteAddr()))
-	if err = sw.addPeer(peer); err != nil {
-		peer.CloseConn()
+
+	// Avoid self
+	if sw.nodeKey.ID() == peerID {
+		addr := peerNodeInfo.NetAddress()
+
+		// remove the given address from the address book if we're added it earlier
+		sw.addrBook.RemoveAddress(addr)
+
+		// add the given address to the address book to avoid dialing ourselves
+		// again this is our public address
+		sw.addrBook.AddOurAddress(addr)
+
+		return ErrSwitchConnectToSelf
+	}
+
+	// Avoid duplicate
+	if sw.peers.Has(peerID) {
+		return ErrSwitchDuplicatePeer
+	}
+
+	// Filter peer against ID white list
+	if err := sw.FilterConnByID(peerID); err != nil {
 		return err
 	}
 
+	// Check version, chain id
+	if err := sw.nodeInfo.CompatibleWith(peerNodeInfo); err != nil {
+		return err
+	}
+
+	peer := newPeer(pc, peerNodeInfo, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError)
+	peer.SetLogger(sw.Logger.With("peer", addr))
+
+	peer.Logger.Info("Successful handshake with peer", "peerNodeInfo", peerNodeInfo)
+
+	// All good. Start peer
+	if sw.IsRunning() {
+		if err = sw.startInitPeer(peer); err != nil {
+			return err
+		}
+	}
+
+	// Add the peer to .peers.
+	// We start it first so that a peer in the list is safe to Stop.
+	// It should not err since we already checked peers.Has().
+	if err := sw.peers.Add(peer); err != nil {
+		return err
+	}
+
+	sw.Logger.Info("Added peer", "peer", peer)
 	return nil
 }
 
-func (sw *Switch) addPeerWithConnectionAndConfig(conn net.Conn, config *PeerConfig) error {
-	peer, err := newInboundPeer(conn, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, config)
+func (sw *Switch) startInitPeer(peer *peer) error {
+	err := peer.Start() // spawn send/recv routines
 	if err != nil {
-		if err := conn.Close(); err != nil {
-			sw.Logger.Error("Error closing connection", "err", err)
-		}
+		// Should never happen
+		sw.Logger.Error("Error starting peer", "peer", peer, "err", err)
 		return err
 	}
-	peer.SetLogger(sw.Logger.With("peer", conn.RemoteAddr()))
-	if err = sw.addPeer(peer); err != nil {
-		peer.CloseConn()
-		return err
+
+	for _, reactor := range sw.reactors {
+		reactor.AddPeer(peer)
 	}
 
 	return nil
diff --git a/vendor/github.com/tendermint/tendermint/p2p/test_util.go b/vendor/github.com/tendermint/tendermint/p2p/test_util.go
new file mode 100644
index 00000000..535b0bd0
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/p2p/test_util.go
@@ -0,0 +1,152 @@
+package p2p
+
+import (
+	"math/rand"
+	"net"
+
+	crypto "github.com/tendermint/go-crypto"
+	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tmlibs/log"
+
+	cfg "github.com/tendermint/tendermint/config"
+	"github.com/tendermint/tendermint/p2p/conn"
+)
+
+func AddPeerToSwitch(sw *Switch, peer Peer) {
+	sw.peers.Add(peer)
+}
+
+func CreateRandomPeer(outbound bool) *peer {
+	addr, netAddr := CreateRoutableAddr()
+	p := &peer{
+		peerConn: peerConn{
+			outbound: outbound,
+		},
+		nodeInfo: NodeInfo{
+			ListenAddr: netAddr.DialString(),
+			PubKey:     crypto.GenPrivKeyEd25519().Wrap().PubKey(),
+		},
+		mconn: &conn.MConnection{},
+	}
+	p.SetLogger(log.TestingLogger().With("peer", addr))
+	return p
+}
+
+func CreateRoutableAddr() (addr string, netAddr *NetAddress) {
+	for {
+		var err error
+		addr = cmn.Fmt("%X@%v.%v.%v.%v:46656", cmn.RandBytes(20), rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256)
+		netAddr, err = NewNetAddressString(addr)
+		if err != nil {
+			panic(err)
+		}
+		if netAddr.Routable() {
+			break
+		}
+	}
+	return
+}
+
+//------------------------------------------------------------------
+// Connects switches via arbitrary net.Conn. Used for testing.
+
+// MakeConnectedSwitches returns n switches, connected according to the connect func.
+// If connect==Connect2Switches, the switches will be fully connected.
+// initSwitch defines how the i'th switch should be initialized (ie. with what reactors).
+// NOTE: panics if any switch fails to start.
+func MakeConnectedSwitches(cfg *cfg.P2PConfig, n int, initSwitch func(int, *Switch) *Switch, connect func([]*Switch, int, int)) []*Switch {
+	switches := make([]*Switch, n)
+	for i := 0; i < n; i++ {
+		switches[i] = MakeSwitch(cfg, i, "testing", "123.123.123", initSwitch)
+	}
+
+	if err := StartSwitches(switches); err != nil {
+		panic(err)
+	}
+
+	for i := 0; i < n; i++ {
+		for j := i + 1; j < n; j++ {
+			connect(switches, i, j)
+		}
+	}
+
+	return switches
+}
+
+// Connect2Switches will connect switches i and j via net.Pipe().
+// Blocks until a connection is established.
+// NOTE: caller ensures i and j are within bounds.
+func Connect2Switches(switches []*Switch, i, j int) {
+	switchI := switches[i]
+	switchJ := switches[j]
+	c1, c2 := conn.NetPipe()
+	doneCh := make(chan struct{})
+	go func() {
+		err := switchI.addPeerWithConnection(c1)
+		if err != nil {
+			panic(err)
+		}
+		doneCh <- struct{}{}
+	}()
+	go func() {
+		err := switchJ.addPeerWithConnection(c2)
+		if err != nil {
+			panic(err)
+		}
+		doneCh <- struct{}{}
+	}()
+	<-doneCh
+	<-doneCh
+}
+
+func (sw *Switch) addPeerWithConnection(conn net.Conn) error {
+	pc, err := newInboundPeerConn(conn, sw.peerConfig, sw.nodeKey.PrivKey)
+	if err != nil {
+		if err := conn.Close(); err != nil {
+			sw.Logger.Error("Error closing connection", "err", err)
+		}
+		return err
+	}
+	if err = sw.addPeer(pc); err != nil {
+		pc.CloseConn()
+		return err
+	}
+
+	return nil
+}
+
+// StartSwitches calls sw.Start() for each given switch.
+// It returns the first encountered error.
+func StartSwitches(switches []*Switch) error {
+	for _, s := range switches {
+		err := s.Start() // start switch and reactors
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func MakeSwitch(cfg *cfg.P2PConfig, i int, network, version string, initSwitch func(int, *Switch) *Switch) *Switch {
+	// new switch, add reactors
+	// TODO: let the config be passed in?
+	nodeKey := &NodeKey{
+		PrivKey: crypto.GenPrivKeyEd25519().Wrap(),
+	}
+	sw := NewSwitch(cfg)
+	sw.SetLogger(log.TestingLogger())
+	sw = initSwitch(i, sw)
+	ni := NodeInfo{
+		PubKey:     nodeKey.PubKey(),
+		Moniker:    cmn.Fmt("switch%d", i),
+		Network:    network,
+		Version:    version,
+		ListenAddr: cmn.Fmt("%v:%v", network, rand.Intn(64512)+1023),
+	}
+	for ch := range sw.reactorsByCh {
+		ni.Channels = append(ni.Channels, ch)
+	}
+	sw.SetNodeInfo(ni)
+	sw.SetNodeKey(nodeKey)
+	return sw
+}
diff --git a/vendor/github.com/tendermint/tendermint/p2p/trust/metric.go b/vendor/github.com/tendermint/tendermint/p2p/trust/metric.go
index bf6ddb5e..5770b420 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/trust/metric.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/trust/metric.go
@@ -118,7 +118,7 @@ func (tm *TrustMetric) OnStart() error {
 }
 
 // OnStop implements Service
-// Nothing to do since the goroutine shuts down by itself via BaseService.Quit
+// Nothing to do since the goroutine shuts down by itself via BaseService.Quit()
 func (tm *TrustMetric) OnStop() {}
 
 // Returns a snapshot of the trust metric history data
@@ -298,7 +298,7 @@ loop:
 		select {
 		case <-tick:
 			tm.NextTimeInterval()
-		case <-tm.Quit:
+		case <-tm.Quit():
 			// Stop all further tracking for this metric
 			break loop
 		}
diff --git a/vendor/github.com/tendermint/tendermint/p2p/trust/store.go b/vendor/github.com/tendermint/tendermint/p2p/trust/store.go
index 0e61b065..bbb4592a 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/trust/store.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/trust/store.go
@@ -200,7 +200,7 @@ loop:
 		select {
 		case <-t.C:
 			tms.SaveToDB()
-		case <-tms.Quit:
+		case <-tms.Quit():
 			break loop
 		}
 	}
diff --git a/vendor/github.com/tendermint/tendermint/p2p/trust/ticker.go b/vendor/github.com/tendermint/tendermint/p2p/trust/ticker.go
index bce9fcc2..3f0f3091 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/trust/ticker.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/trust/ticker.go
@@ -24,7 +24,7 @@ type TestTicker struct {
 
 // NewTestTicker returns our ticker used within test routines
 func NewTestTicker() *TestTicker {
-	c := make(chan time.Time, 1)
+	c := make(chan time.Time)
 	return &TestTicker{
 		C: c,
 	}
diff --git a/vendor/github.com/tendermint/tendermint/p2p/types.go b/vendor/github.com/tendermint/tendermint/p2p/types.go
index 4e0994b7..b11765bb 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/types.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/types.go
@@ -1,81 +1,8 @@
 package p2p
 
 import (
-	"fmt"
-	"net"
-	"strconv"
-	"strings"
-
-	crypto "github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/p2p/conn"
 )
 
-const maxNodeInfoSize = 10240 // 10Kb
-
-type NodeInfo struct {
-	PubKey     crypto.PubKeyEd25519 `json:"pub_key"`
-	Moniker    string               `json:"moniker"`
-	Network    string               `json:"network"`
-	RemoteAddr string               `json:"remote_addr"`
-	ListenAddr string               `json:"listen_addr"`
-	Version    string               `json:"version"` // major.minor.revision
-	Other      []string             `json:"other"`   // other application specific data
-}
-
-// CONTRACT: two nodes are compatible if the major/minor versions match and network match
-func (info *NodeInfo) CompatibleWith(other *NodeInfo) error {
-	iMajor, iMinor, _, iErr := splitVersion(info.Version)
-	oMajor, oMinor, _, oErr := splitVersion(other.Version)
-
-	// if our own version number is not formatted right, we messed up
-	if iErr != nil {
-		return iErr
-	}
-
-	// version number must be formatted correctly ("x.x.x")
-	if oErr != nil {
-		return oErr
-	}
-
-	// major version must match
-	if iMajor != oMajor {
-		return fmt.Errorf("Peer is on a different major version. Got %v, expected %v", oMajor, iMajor)
-	}
-
-	// minor version must match
-	if iMinor != oMinor {
-		return fmt.Errorf("Peer is on a different minor version. Got %v, expected %v", oMinor, iMinor)
-	}
-
-	// nodes must be on the same network
-	if info.Network != other.Network {
-		return fmt.Errorf("Peer is on a different network. Got %v, expected %v", other.Network, info.Network)
-	}
-
-	return nil
-}
-
-func (info *NodeInfo) ListenHost() string {
-	host, _, _ := net.SplitHostPort(info.ListenAddr) // nolint: errcheck, gas
-	return host
-}
-
-func (info *NodeInfo) ListenPort() int {
-	_, port, _ := net.SplitHostPort(info.ListenAddr) // nolint: errcheck, gas
-	port_i, err := strconv.Atoi(port)
-	if err != nil {
-		return -1
-	}
-	return port_i
-}
-
-func (info NodeInfo) String() string {
-	return fmt.Sprintf("NodeInfo{pk: %v, moniker: %v, network: %v [remote %v, listen %v], version: %v (%v)}", info.PubKey, info.Moniker, info.Network, info.RemoteAddr, info.ListenAddr, info.Version, info.Other)
-}
-
-func splitVersion(version string) (string, string, string, error) {
-	spl := strings.Split(version, ".")
-	if len(spl) != 3 {
-		return "", "", "", fmt.Errorf("Invalid version format %v", version)
-	}
-	return spl[0], spl[1], spl[2], nil
-}
+type ChannelDescriptor = conn.ChannelDescriptor
+type ConnectionStatus = conn.ConnectionStatus
diff --git a/vendor/github.com/tendermint/tendermint/p2p/upnp/upnp.go b/vendor/github.com/tendermint/tendermint/p2p/upnp/upnp.go
index cac67a73..e98538aa 100644
--- a/vendor/github.com/tendermint/tendermint/p2p/upnp/upnp.go
+++ b/vendor/github.com/tendermint/tendermint/p2p/upnp/upnp.go
@@ -103,7 +103,7 @@ func Discover() (nat NAT, err error) {
 			return
 		}
 	}
-	err = errors.New("UPnP port discovery failed.")
+	err = errors.New("UPnP port discovery failed")
 	return
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/p2p/util.go b/vendor/github.com/tendermint/tendermint/p2p/util.go
deleted file mode 100644
index a4c3ad58..00000000
--- a/vendor/github.com/tendermint/tendermint/p2p/util.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package p2p
-
-import (
-	"crypto/sha256"
-)
-
-// doubleSha256 calculates sha256(sha256(b)) and returns the resulting bytes.
-func doubleSha256(b []byte) []byte {
-	hasher := sha256.New()
-	hasher.Write(b) // nolint: errcheck, gas
-	sum := hasher.Sum(nil)
-	hasher.Reset()
-	hasher.Write(sum) // nolint: errcheck, gas
-	return hasher.Sum(nil)
-}
diff --git a/vendor/github.com/tendermint/tendermint/proxy/client.go b/vendor/github.com/tendermint/tendermint/proxy/client.go
index a70da1ca..6c987368 100644
--- a/vendor/github.com/tendermint/tendermint/proxy/client.go
+++ b/vendor/github.com/tendermint/tendermint/proxy/client.go
@@ -6,7 +6,7 @@ import (
 	"github.com/pkg/errors"
 
 	abcicli "github.com/tendermint/abci/client"
-	"github.com/tendermint/abci/example/dummy"
+	"github.com/tendermint/abci/example/kvstore"
 	"github.com/tendermint/abci/types"
 )
 
@@ -64,10 +64,14 @@ func (r *remoteClientCreator) NewABCIClient() (abcicli.Client, error) {
 
 func DefaultClientCreator(addr, transport, dbDir string) ClientCreator {
 	switch addr {
+	case "kvstore":
+		fallthrough
 	case "dummy":
-		return NewLocalClientCreator(dummy.NewDummyApplication())
+		return NewLocalClientCreator(kvstore.NewKVStoreApplication())
+	case "persistent_kvstore":
+		fallthrough
 	case "persistent_dummy":
-		return NewLocalClientCreator(dummy.NewPersistentDummyApplication(dbDir))
+		return NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(dbDir))
 	case "nilapp":
 		return NewLocalClientCreator(types.NewBaseApplication())
 	default:
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/abci.go b/vendor/github.com/tendermint/tendermint/rpc/core/abci.go
index a49b52b6..874becae 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/abci.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/abci.go
@@ -2,9 +2,9 @@ package core
 
 import (
 	abci "github.com/tendermint/abci/types"
-	data "github.com/tendermint/go-wire/data"
 	ctypes "github.com/tendermint/tendermint/rpc/core/types"
 	"github.com/tendermint/tendermint/version"
+	cmn "github.com/tendermint/tmlibs/common"
 )
 
 // Query the application for some information.
@@ -47,7 +47,7 @@ import (
 // | data      | []byte | false   | true     | Data                                           |
 // | height    | int64 | 0       | false    | Height (0 means latest)                        |
 // | trusted   | bool   | false   | false    | Does not include a proof of the data inclusion |
-func ABCIQuery(path string, data data.Bytes, height int64, trusted bool) (*ctypes.ResultABCIQuery, error) {
+func ABCIQuery(path string, data cmn.HexBytes, height int64, trusted bool) (*ctypes.ResultABCIQuery, error) {
 	resQuery, err := proxyAppQuery.QuerySync(abci.RequestQuery{
 		Path:   path,
 		Data:   data,
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/consensus.go b/vendor/github.com/tendermint/tendermint/rpc/core/consensus.go
index 65c9fc36..25b67925 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/consensus.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/consensus.go
@@ -3,6 +3,7 @@ package core
 import (
 	cm "github.com/tendermint/tendermint/consensus"
 	cstypes "github.com/tendermint/tendermint/consensus/types"
+	p2p "github.com/tendermint/tendermint/p2p"
 	ctypes "github.com/tendermint/tendermint/rpc/core/types"
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/types"
@@ -82,11 +83,11 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) {
 // }
 // ```
 func DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
-	peerRoundStates := make(map[string]*cstypes.PeerRoundState)
+	peerRoundStates := make(map[p2p.ID]*cstypes.PeerRoundState)
 	for _, peer := range p2pSwitch.Peers().List() {
 		peerState := peer.Get(types.PeerStateKey).(*cm.PeerState)
 		peerRoundState := peerState.GetRoundState()
-		peerRoundStates[peer.Key()] = peerRoundState
+		peerRoundStates[peer.ID()] = peerRoundState
 	}
 	return &ctypes.ResultDumpConsensusState{consensusState.GetRoundState(), peerRoundStates}, nil
 }
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/doc.go b/vendor/github.com/tendermint/tendermint/rpc/core/doc.go
index a72cec02..b479482c 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/doc.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/doc.go
@@ -11,7 +11,7 @@ Tendermint RPC is built using [our own RPC library](https://github.com/tendermin
 
 ## Configuration
 
-Set the `laddr` config parameter under `[rpc]` table in the `$TMHOME/config.toml` file or the `--rpc.laddr` command-line flag to the desired protocol://host:port setting.  Default: `tcp://0.0.0.0:46657`.
+Set the `laddr` config parameter under `[rpc]` table in the `$TMHOME/config/config.toml` file or the `--rpc.laddr` command-line flag to the desired protocol://host:port setting.  Default: `tcp://0.0.0.0:46657`.
 
 ## Arguments
 
@@ -81,6 +81,7 @@ Available endpoints:
 /net_info
 /num_unconfirmed_txs
 /status
+/health
 /unconfirmed_txs
 /unsafe_flush_mempool
 /unsafe_stop_cpu_profiler
@@ -95,6 +96,7 @@ Endpoints that require arguments:
 /broadcast_tx_sync?tx=_
 /commit?height=_
 /dial_seeds?seeds=_
+/dial_persistent_peers?persistent_peers=_
 /subscribe?event=_
 /tx?hash=_&prove=_
 /unsafe_start_cpu_profiler?filename=_
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/events.go b/vendor/github.com/tendermint/tendermint/rpc/core/events.go
index 538134b0..9353ace6 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/events.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/events.go
@@ -13,11 +13,57 @@ import (
 
 // Subscribe for events via WebSocket.
 //
+// To tell which events you want, you need to provide a query. query is a
+// string, which has a form: "condition AND condition ..." (no OR at the
+// moment). condition has a form: "key operation operand". key is a string with
+// a restricted set of possible symbols ( \t\n\r\\()"'=>< are not allowed).
+// operation can be "=", "<", "<=", ">", ">=", "CONTAINS". operand can be a
+// string (escaped with single quotes), number, date or time.
+//
+// Examples:
+//		tm.event = 'NewBlock'								# new blocks
+//		tm.event = 'CompleteProposal'				# node got a complete proposal
+//		tm.event = 'Tx' AND tx.hash = 'XYZ' # single transaction
+//		tm.event = 'Tx' AND tx.height = 5		# all txs of the fifth block
+//		tx.height = 5												# all txs of the fifth block
+//
+// Tendermint provides a few predefined keys: tm.event, tx.hash and tx.height.
+// Note for transactions, you can define additional keys by providing tags with
+// DeliverTx response.
+//
+//		DeliverTx{
+//			Tags: []*KVPair{
+//				"agent.name": "K",
+//			}
+//	  }
+//
+//		tm.event = 'Tx' AND agent.name = 'K'
+//		tm.event = 'Tx' AND account.created_at >= TIME 2013-05-03T14:45:00Z
+//		tm.event = 'Tx' AND contract.sign_date = DATE 2017-01-01
+//		tm.event = 'Tx' AND account.owner CONTAINS 'Igor'
+//
+// See list of all possible events here
+// https://godoc.org/github.com/tendermint/tendermint/types#pkg-constants
+//
+// For complete query syntax, check out
+// https://godoc.org/github.com/tendermint/tmlibs/pubsub/query.
+//
 // ```go
+// import "github.com/tendermint/tmlibs/pubsub/query"
 // import "github.com/tendermint/tendermint/types"
 //
 // client := client.NewHTTP("tcp://0.0.0.0:46657", "/websocket")
-// result, err := client.AddListenerForEvent(types.EventStringNewBlock())
+// ctx, cancel := context.WithTimeout(context.Background(), timeout)
+// defer cancel()
+// query := query.MustParse("tm.event = 'Tx' AND tx.height = 3")
+// txs := make(chan interface{})
+// err := client.Subscribe(ctx, "test-client", query, txs)
+//
+// go func() {
+//   for e := range txs {
+//     fmt.Println("got ", e.(types.TMEventData).Unwrap().(types.EventDataTx))
+//	 }
+// }()
 // ```
 //
 // > The above command returns JSON structured like this:
@@ -35,7 +81,7 @@ import (
 //
 // | Parameter | Type   | Default | Required | Description |
 // |-----------+--------+---------+----------+-------------|
-// | event     | string | ""      | true     | Event name  |
+// | query     | string | ""      | true     | Query       |
 //
 // <aside class="notice">WebSocket only</aside>
 func Subscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultSubscribe, error) {
@@ -68,10 +114,8 @@ func Subscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultSubscri
 // Unsubscribe from events via WebSocket.
 //
 // ```go
-// import 'github.com/tendermint/tendermint/types'
-//
 // client := client.NewHTTP("tcp://0.0.0.0:46657", "/websocket")
-// result, err := client.RemoveListenerForEvent(types.EventStringNewBlock())
+// err := client.Unsubscribe("test-client", query)
 // ```
 //
 // > The above command returns JSON structured like this:
@@ -89,7 +133,7 @@ func Subscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultSubscri
 //
 // | Parameter | Type   | Default | Required | Description |
 // |-----------+--------+---------+----------+-------------|
-// | event     | string | ""      | true     | Event name  |
+// | query     | string | ""      | true     | Query       |
 //
 // <aside class="notice">WebSocket only</aside>
 func Unsubscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultUnsubscribe, error) {
@@ -106,6 +150,25 @@ func Unsubscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultUnsub
 	return &ctypes.ResultUnsubscribe{}, nil
 }
 
+// Unsubscribe from all events via WebSocket.
+//
+// ```go
+// client := client.NewHTTP("tcp://0.0.0.0:46657", "/websocket")
+// err := client.UnsubscribeAll("test-client")
+// ```
+//
+// > The above command returns JSON structured like this:
+//
+// ```json
+// {
+// 	"error": "",
+// 	"result": {},
+// 	"id": "",
+// 	"jsonrpc": "2.0"
+// }
+// ```
+//
+// <aside class="notice">WebSocket only</aside>
 func UnsubscribeAll(wsCtx rpctypes.WSRPCContext) (*ctypes.ResultUnsubscribe, error) {
 	addr := wsCtx.GetRemoteAddr()
 	logger.Info("Unsubscribe from all", "remote", addr)
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/health.go b/vendor/github.com/tendermint/tendermint/rpc/core/health.go
new file mode 100644
index 00000000..ab2ceb16
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/health.go
@@ -0,0 +1,31 @@
+package core
+
+import (
+	ctypes "github.com/tendermint/tendermint/rpc/core/types"
+)
+
+// Get node health. Returns empty result (200 OK) on success, no response - in
+// case of an error.
+//
+// ```shell
+// curl 'localhost:46657/health'
+// ```
+//
+// ```go
+// client := client.NewHTTP("tcp://0.0.0.0:46657", "/websocket")
+// result, err := client.Health()
+// ```
+//
+// > The above command returns JSON structured like this:
+//
+// ```json
+// {
+// 	"error": "",
+// 	"result": {},
+// 	"id": "",
+// 	"jsonrpc": "2.0"
+// }
+// ```
+func Health() (*ctypes.ResultHealth, error) {
+	return &ctypes.ResultHealth{}, nil
+}
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go b/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go
index 3f663c37..1dbdd801 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go
@@ -8,9 +8,9 @@ import (
 	"github.com/pkg/errors"
 
 	abci "github.com/tendermint/abci/types"
-	data "github.com/tendermint/go-wire/data"
 	ctypes "github.com/tendermint/tendermint/rpc/core/types"
 	"github.com/tendermint/tendermint/types"
+	cmn "github.com/tendermint/tmlibs/common"
 )
 
 //-----------------------------------------------------------------------------
@@ -192,7 +192,7 @@ func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
 		deliverTxRes := deliverTxResMsg.(types.TMEventData).Unwrap().(types.EventDataTx)
 		// The tx was included in a block.
 		deliverTxR := deliverTxRes.Result
-		logger.Info("DeliverTx passed ", "tx", data.Bytes(tx), "response", deliverTxR)
+		logger.Info("DeliverTx passed ", "tx", cmn.HexBytes(tx), "response", deliverTxR)
 		return &ctypes.ResultBroadcastTxCommit{
 			CheckTx:   *checkTxR,
 			DeliverTx: deliverTxR,
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/net.go b/vendor/github.com/tendermint/tendermint/rpc/core/net.go
index b3f1c7ce..1918abf1 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/net.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/net.go
@@ -1,7 +1,7 @@
 package core
 
 import (
-	"fmt"
+	"github.com/pkg/errors"
 
 	ctypes "github.com/tendermint/tendermint/rpc/core/types"
 )
@@ -42,7 +42,8 @@ func NetInfo() (*ctypes.ResultNetInfo, error) {
 	peers := []ctypes.Peer{}
 	for _, peer := range p2pSwitch.Peers().List() {
 		peers = append(peers, ctypes.Peer{
-			NodeInfo:         *peer.NodeInfo(),
+			NodeInfo:         peer.NodeInfo(),
+			ID:               peer.ID(),
 			IsOutbound:       peer.IsOutbound(),
 			ConnectionStatus: peer.Status(),
 		})
@@ -55,19 +56,31 @@ func NetInfo() (*ctypes.ResultNetInfo, error) {
 }
 
 func UnsafeDialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) {
-
 	if len(seeds) == 0 {
-		return &ctypes.ResultDialSeeds{}, fmt.Errorf("No seeds provided")
+		return &ctypes.ResultDialSeeds{}, errors.New("No seeds provided")
 	}
-	// starts go routines to dial each seed after random delays
+	// starts go routines to dial each peer after random delays
 	logger.Info("DialSeeds", "addrBook", addrBook, "seeds", seeds)
-	err := p2pSwitch.DialSeeds(addrBook, seeds)
+	err := p2pSwitch.DialPeersAsync(addrBook, seeds, false)
 	if err != nil {
 		return &ctypes.ResultDialSeeds{}, err
 	}
 	return &ctypes.ResultDialSeeds{"Dialing seeds in progress. See /net_info for details"}, nil
 }
 
+func UnsafeDialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) {
+	if len(peers) == 0 {
+		return &ctypes.ResultDialPeers{}, errors.New("No peers provided")
+	}
+	// starts go routines to dial each peer after random delays
+	logger.Info("DialPeers", "addrBook", addrBook, "peers", peers, "persistent", persistent)
+	err := p2pSwitch.DialPeersAsync(addrBook, peers, persistent)
+	if err != nil {
+		return &ctypes.ResultDialPeers{}, err
+	}
+	return &ctypes.ResultDialPeers{"Dialing peers in progress. See /net_info for details"}, nil
+}
+
 // Get genesis file.
 //
 // ```shell
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/pipe.go b/vendor/github.com/tendermint/tendermint/rpc/core/pipe.go
index 927d7cca..1eb00cee 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/pipe.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/pipe.go
@@ -3,10 +3,10 @@ package core
 import (
 	"time"
 
-	crypto "github.com/tendermint/go-crypto"
+	"github.com/tendermint/go-crypto"
 	"github.com/tendermint/tendermint/consensus"
 	cstypes "github.com/tendermint/tendermint/consensus/types"
-	p2p "github.com/tendermint/tendermint/p2p"
+	"github.com/tendermint/tendermint/p2p"
 	"github.com/tendermint/tendermint/proxy"
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/state/txindex"
@@ -30,9 +30,9 @@ type P2P interface {
 	Listeners() []p2p.Listener
 	Peers() p2p.IPeerSet
 	NumPeers() (outbound, inbound, dialig int)
-	NodeInfo() *p2p.NodeInfo
+	NodeInfo() p2p.NodeInfo
 	IsListening() bool
-	DialSeeds(*p2p.AddrBook, []string) error
+	DialPeersAsync(p2p.AddrBook, []string, bool) error
 }
 
 //----------------------------------------------
@@ -54,7 +54,7 @@ var (
 	// objects
 	pubKey           crypto.PubKey
 	genDoc           *types.GenesisDoc // cache the genesis structure
-	addrBook         *p2p.AddrBook
+	addrBook         p2p.AddrBook
 	txIndexer        txindex.TxIndexer
 	consensusReactor *consensus.ConsensusReactor
 	eventBus         *types.EventBus // thread safe
@@ -94,7 +94,7 @@ func SetGenesisDoc(doc *types.GenesisDoc) {
 	genDoc = doc
 }
 
-func SetAddrBook(book *p2p.AddrBook) {
+func SetAddrBook(book p2p.AddrBook) {
 	addrBook = book
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/routes.go b/vendor/github.com/tendermint/tendermint/rpc/core/routes.go
index fb5a1fd3..0e10cefe 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/routes.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/routes.go
@@ -12,6 +12,7 @@ var Routes = map[string]*rpc.RPCFunc{
 	"unsubscribe_all": rpc.NewWSRPCFunc(UnsubscribeAll, ""),
 
 	// info API
+	"health":               rpc.NewRPCFunc(Health, ""),
 	"status":               rpc.NewRPCFunc(Status, ""),
 	"net_info":             rpc.NewRPCFunc(NetInfo, ""),
 	"blockchain":           rpc.NewRPCFunc(BlockchainInfo, "minHeight,maxHeight"),
@@ -39,6 +40,7 @@ var Routes = map[string]*rpc.RPCFunc{
 func AddUnsafeRoutes() {
 	// control API
 	Routes["dial_seeds"] = rpc.NewRPCFunc(UnsafeDialSeeds, "seeds")
+	Routes["dial_peers"] = rpc.NewRPCFunc(UnsafeDialPeers, "peers,persistent")
 	Routes["unsafe_flush_mempool"] = rpc.NewRPCFunc(UnsafeFlushMempool, "")
 
 	// profiler API
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/status.go b/vendor/github.com/tendermint/tendermint/rpc/core/status.go
index 653c37f5..b4543a61 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/status.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/status.go
@@ -1,11 +1,13 @@
 package core
 
 import (
+	"bytes"
 	"time"
 
-	data "github.com/tendermint/go-wire/data"
 	ctypes "github.com/tendermint/tendermint/rpc/core/types"
+	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/types"
+	cmn "github.com/tendermint/tmlibs/common"
 )
 
 // Get Tendermint status including node info, pubkey, latest block
@@ -48,7 +50,10 @@ import (
 // 			"remote_addr": "",
 // 			"network": "test-chain-qhVCa2",
 // 			"moniker": "vagrant-ubuntu-trusty-64",
-// 			"pub_key": "844981FE99ABB19F7816F2D5E94E8A74276AB1153760A7799E925C75401856C6"
+// 			"pub_key": "844981FE99ABB19F7816F2D5E94E8A74276AB1153760A7799E925C75401856C6",
+//			"validator_status": {
+//				"voting_power": 10
+//			}
 // 		}
 // 	},
 // 	"id": "",
@@ -59,8 +64,8 @@ func Status() (*ctypes.ResultStatus, error) {
 	latestHeight := blockStore.Height()
 	var (
 		latestBlockMeta     *types.BlockMeta
-		latestBlockHash     data.Bytes
-		latestAppHash       data.Bytes
+		latestBlockHash     cmn.HexBytes
+		latestAppHash       cmn.HexBytes
 		latestBlockTimeNano int64
 	)
 	if latestHeight != 0 {
@@ -72,12 +77,50 @@ func Status() (*ctypes.ResultStatus, error) {
 
 	latestBlockTime := time.Unix(0, latestBlockTimeNano)
 
-	return &ctypes.ResultStatus{
+	result := &ctypes.ResultStatus{
 		NodeInfo:          p2pSwitch.NodeInfo(),
 		PubKey:            pubKey,
 		LatestBlockHash:   latestBlockHash,
 		LatestAppHash:     latestAppHash,
 		LatestBlockHeight: latestHeight,
 		LatestBlockTime:   latestBlockTime,
-		Syncing:           consensusReactor.FastSync()}, nil
+		Syncing:           consensusReactor.FastSync(),
+	}
+
+	// add ValidatorStatus if node is a validator
+	if val := validatorAtHeight(latestHeight); val != nil {
+		result.ValidatorStatus = ctypes.ValidatorStatus{
+			VotingPower: val.VotingPower,
+		}
+	}
+
+	return result, nil
+}
+
+func validatorAtHeight(h int64) *types.Validator {
+	lastBlockHeight, vals := consensusState.GetValidators()
+
+	privValAddress := pubKey.Address()
+
+	// if we're still at height h, search in the current validator set
+	if lastBlockHeight == h {
+		for _, val := range vals {
+			if bytes.Equal(val.Address, privValAddress) {
+				return val
+			}
+		}
+	}
+
+	// if we've moved to the next height, retrieve the validator set from DB
+	if lastBlockHeight > h {
+		vals, err := sm.LoadValidators(stateDB, h)
+		if err != nil {
+			// should not happen
+			return nil
+		}
+		_, val := vals.GetByAddress(privValAddress)
+		return val
+	}
+
+	return nil
 }
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/tx.go b/vendor/github.com/tendermint/tendermint/rpc/core/tx.go
index f592326b..7ddc7080 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/tx.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/tx.go
@@ -44,7 +44,8 @@ import (
 // 			"code": 0
 // 		},
 // 		"index": 0,
-// 		"height": 52
+// 		"height": 52,
+//		"hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF"
 // 	},
 // 	"id": "",
 // 	"jsonrpc": "2.0"
@@ -67,11 +68,12 @@ import (
 // - `tx_result`: the `abci.Result` object
 // - `index`: `int` - index of the transaction
 // - `height`: `int` - height of the block where this transaction was in
+// - `hash`: `[]byte` - hash of the transaction
 func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
 
 	// if index is disabled, return error
 	if _, ok := txIndexer.(*null.TxIndex); ok {
-		return nil, fmt.Errorf("Transaction indexing is disabled.")
+		return nil, fmt.Errorf("Transaction indexing is disabled")
 	}
 
 	r, err := txIndexer.Get(hash)
@@ -93,6 +95,7 @@ func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
 	}
 
 	return &ctypes.ResultTx{
+		Hash:     hash,
 		Height:   height,
 		Index:    uint32(index),
 		TxResult: r.Result,
@@ -137,7 +140,8 @@ func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
 //       "tx": "mvZHHa7HhZ4aRT0xMDA=",
 //       "tx_result": {},
 //       "index": 31,
-//       "height": 12
+//       "height": 12,
+//       "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF"
 //     }
 //   ],
 //   "id": "",
@@ -161,10 +165,11 @@ func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
 // - `tx_result`: the `abci.Result` object
 // - `index`: `int` - index of the transaction
 // - `height`: `int` - height of the block where this transaction was in
+// - `hash`: `[]byte` - hash of the transaction
 func TxSearch(query string, prove bool) ([]*ctypes.ResultTx, error) {
 	// if index is disabled, return error
 	if _, ok := txIndexer.(*null.TxIndex); ok {
-		return nil, fmt.Errorf("Transaction indexing is disabled.")
+		return nil, fmt.Errorf("Transaction indexing is disabled")
 	}
 
 	q, err := tmquery.New(query)
@@ -191,6 +196,7 @@ func TxSearch(query string, prove bool) ([]*ctypes.ResultTx, error) {
 		}
 
 		apiResults[i] = &ctypes.ResultTx{
+			Hash:     r.Tx.Hash(),
 			Height:   height,
 			Index:    index,
 			TxResult: r.Result,
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/types/responses.go b/vendor/github.com/tendermint/tendermint/rpc/core/types/responses.go
index dae7c004..8a6fff63 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/types/responses.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/types/responses.go
@@ -6,7 +6,8 @@ import (
 
 	abci "github.com/tendermint/abci/types"
 	crypto "github.com/tendermint/go-crypto"
-	"github.com/tendermint/go-wire/data"
+	cmn "github.com/tendermint/tmlibs/common"
+
 	cstypes "github.com/tendermint/tendermint/consensus/types"
 	"github.com/tendermint/tendermint/p2p"
 	"github.com/tendermint/tendermint/state"
@@ -53,18 +54,23 @@ func NewResultCommit(header *types.Header, commit *types.Commit,
 	}
 }
 
+type ValidatorStatus struct {
+	VotingPower int64 `json:"voting_power"`
+}
+
 type ResultStatus struct {
-	NodeInfo          *p2p.NodeInfo `json:"node_info"`
-	PubKey            crypto.PubKey `json:"pub_key"`
-	LatestBlockHash   data.Bytes    `json:"latest_block_hash"`
-	LatestAppHash     data.Bytes    `json:"latest_app_hash"`
-	LatestBlockHeight int64         `json:"latest_block_height"`
-	LatestBlockTime   time.Time     `json:"latest_block_time"`
-	Syncing           bool          `json:"syncing"`
+	NodeInfo          p2p.NodeInfo    `json:"node_info"`
+	PubKey            crypto.PubKey   `json:"pub_key"`
+	LatestBlockHash   cmn.HexBytes    `json:"latest_block_hash"`
+	LatestAppHash     cmn.HexBytes    `json:"latest_app_hash"`
+	LatestBlockHeight int64           `json:"latest_block_height"`
+	LatestBlockTime   time.Time       `json:"latest_block_time"`
+	Syncing           bool            `json:"syncing"`
+	ValidatorStatus   ValidatorStatus `json:"validator_status,omitempty"`
 }
 
 func (s *ResultStatus) TxIndexEnabled() bool {
-	if s == nil || s.NodeInfo == nil {
+	if s == nil {
 		return false
 	}
 	for _, s := range s.NodeInfo.Other {
@@ -86,8 +92,13 @@ type ResultDialSeeds struct {
 	Log string `json:"log"`
 }
 
+type ResultDialPeers struct {
+	Log string `json:"log"`
+}
+
 type Peer struct {
 	p2p.NodeInfo     `json:"node_info"`
+	p2p.ID           `json:"node_id"`
 	IsOutbound       bool                 `json:"is_outbound"`
 	ConnectionStatus p2p.ConnectionStatus `json:"connection_status"`
 }
@@ -99,25 +110,26 @@ type ResultValidators struct {
 
 type ResultDumpConsensusState struct {
 	RoundState      *cstypes.RoundState                `json:"round_state"`
-	PeerRoundStates map[string]*cstypes.PeerRoundState `json:"peer_round_states"`
+	PeerRoundStates map[p2p.ID]*cstypes.PeerRoundState `json:"peer_round_states"`
 }
 
 type ResultBroadcastTx struct {
-	Code uint32     `json:"code"`
-	Data data.Bytes `json:"data"`
-	Log  string     `json:"log"`
+	Code uint32       `json:"code"`
+	Data cmn.HexBytes `json:"data"`
+	Log  string       `json:"log"`
 
-	Hash data.Bytes `json:"hash"`
+	Hash cmn.HexBytes `json:"hash"`
 }
 
 type ResultBroadcastTxCommit struct {
 	CheckTx   abci.ResponseCheckTx   `json:"check_tx"`
 	DeliverTx abci.ResponseDeliverTx `json:"deliver_tx"`
-	Hash      data.Bytes             `json:"hash"`
+	Hash      cmn.HexBytes           `json:"hash"`
 	Height    int64                  `json:"height"`
 }
 
 type ResultTx struct {
+	Hash     cmn.HexBytes           `json:"hash"`
 	Height   int64                  `json:"height"`
 	Index    uint32                 `json:"index"`
 	TxResult abci.ResponseDeliverTx `json:"tx_result"`
@@ -150,3 +162,5 @@ type ResultEvent struct {
 	Query string            `json:"query"`
 	Data  types.TMEventData `json:"data"`
 }
+
+type ResultHealth struct{}
diff --git a/vendor/github.com/tendermint/tendermint/rpc/grpc/api.go b/vendor/github.com/tendermint/tendermint/rpc/grpc/api.go
index f36b5800..c0a92004 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/grpc/api.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/grpc/api.go
@@ -11,7 +11,7 @@ type broadcastAPI struct {
 }
 
 func (bapi *broadcastAPI) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) {
-	// dummy so we can check if the server is up
+	// kvstore so we can check if the server is up
 	return &ResponsePing{}, nil
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/rpc/lib/client/http_client.go b/vendor/github.com/tendermint/tendermint/rpc/lib/client/http_client.go
index a1b23a25..902b7eeb 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/lib/client/http_client.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/lib/client/http_client.go
@@ -42,7 +42,7 @@ func makeHTTPDialer(remoteAddr string) (string, func(string, string) (net.Conn,
 		protocol = "tcp"
 	}
 
-	// replace / with . for http requests (dummy domain)
+	// replace / with . for http requests (kvstore domain)
 	trimmedAddress := strings.Replace(address, "/", ".", -1)
 	return trimmedAddress, func(proto, addr string) (net.Conn, error) {
 		return net.Dial(protocol, address)
@@ -187,7 +187,6 @@ func argsToJson(args map[string]interface{}) error {
 			continue
 		}
 
-		// Pass everything else to go-wire
 		data, err := json.Marshal(v)
 		if err != nil {
 			return err
diff --git a/vendor/github.com/tendermint/tendermint/rpc/lib/client/ws_client.go b/vendor/github.com/tendermint/tendermint/rpc/lib/client/ws_client.go
index 79e3f63f..ab2e94d0 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/lib/client/ws_client.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/lib/client/ws_client.go
@@ -254,10 +254,8 @@ func (c *WSClient) reconnect() error {
 		c.mtx.Unlock()
 	}()
 
-	// 1s == (1e9 ns) == (1 Billion ns)
-	billionNs := float64(time.Second.Nanoseconds())
 	for {
-		jitterSeconds := time.Duration(rand.Float64() * billionNs)
+		jitterSeconds := time.Duration(rand.Float64() * float64(time.Second)) // 1s == (1e9 ns)
 		backoffDuration := jitterSeconds + ((1 << uint(attempt)) * time.Second)
 
 		c.Logger.Info("reconnecting", "attempt", attempt+1, "backoff_duration", backoffDuration)
@@ -320,22 +318,22 @@ func (c *WSClient) reconnectRoutine() {
 				c.Logger.Error("failed to reconnect", "err", err, "original_err", originalError)
 				c.Stop()
 				return
-			} else {
-				// drain reconnectAfter
-			LOOP:
-				for {
-					select {
-					case <-c.reconnectAfter:
-					default:
-						break LOOP
-					}
-				}
-				err = c.processBacklog()
-				if err == nil {
-					c.startReadWriteRoutines()
+			}
+			// drain reconnectAfter
+		LOOP:
+			for {
+				select {
+				case <-c.reconnectAfter:
+				default:
+					break LOOP
 				}
 			}
-		case <-c.Quit:
+			err := c.processBacklog()
+			if err == nil {
+				c.startReadWriteRoutines()
+			}
+
+		case <-c.Quit():
 			return
 		}
 	}
@@ -394,7 +392,7 @@ func (c *WSClient) writeRoutine() {
 			c.Logger.Debug("sent ping")
 		case <-c.readRoutineQuit:
 			return
-		case <-c.Quit:
+		case <-c.Quit():
 			if err := c.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")); err != nil {
 				c.Logger.Error("failed to write message", "err", err)
 			}
@@ -455,7 +453,7 @@ func (c *WSClient) readRoutine() {
 		// c.wg.Wait() in c.Stop(). Note we rely on Quit being closed so that it sends unlimited Quit signals to stop
 		// both readRoutine and writeRoutine
 		select {
-		case <-c.Quit:
+		case <-c.Quit():
 		case c.ResponsesCh <- response:
 		}
 	}
diff --git a/vendor/github.com/tendermint/tendermint/rpc/lib/server/handlers.go b/vendor/github.com/tendermint/tendermint/rpc/lib/server/handlers.go
index 1e14ea9a..19fc0f6e 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/lib/server/handlers.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/lib/server/handlers.go
@@ -484,7 +484,7 @@ func (wsc *wsConnection) GetEventSubscriber() types.EventSubscriber {
 // It implements WSRPCConnection. It is Goroutine-safe.
 func (wsc *wsConnection) WriteRPCResponse(resp types.RPCResponse) {
 	select {
-	case <-wsc.Quit:
+	case <-wsc.Quit():
 		return
 	case wsc.writeChan <- resp:
 	}
@@ -494,7 +494,7 @@ func (wsc *wsConnection) WriteRPCResponse(resp types.RPCResponse) {
 // It implements WSRPCConnection. It is Goroutine-safe
 func (wsc *wsConnection) TryWriteRPCResponse(resp types.RPCResponse) bool {
 	select {
-	case <-wsc.Quit:
+	case <-wsc.Quit():
 		return false
 	case wsc.writeChan <- resp:
 		return true
@@ -525,7 +525,7 @@ func (wsc *wsConnection) readRoutine() {
 
 	for {
 		select {
-		case <-wsc.Quit:
+		case <-wsc.Quit():
 			return
 		default:
 			// reset deadline for every type of message (control or data)
@@ -643,7 +643,7 @@ func (wsc *wsConnection) writeRoutine() {
 					return
 				}
 			}
-		case <-wsc.Quit:
+		case <-wsc.Quit():
 			return
 		}
 	}
@@ -746,13 +746,13 @@ func writeListOfEndpoints(w http.ResponseWriter, r *http.Request, funcMap map[st
 	buf.WriteString("<br>Available endpoints:<br>")
 
 	for _, name := range noArgNames {
-		link := fmt.Sprintf("http://%s/%s", r.Host, name)
+		link := fmt.Sprintf("//%s/%s", r.Host, name)
 		buf.WriteString(fmt.Sprintf("<a href=\"%s\">%s</a></br>", link, link))
 	}
 
 	buf.WriteString("<br>Endpoints that require arguments:<br>")
 	for _, name := range argNames {
-		link := fmt.Sprintf("http://%s/%s?", r.Host, name)
+		link := fmt.Sprintf("//%s/%s?", r.Host, name)
 		funcData := funcMap[name]
 		for i, argName := range funcData.argNames {
 			link += argName + "=_"
diff --git a/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go b/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go
index 515baf5d..3f54c61e 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go
@@ -18,32 +18,51 @@ import (
 )
 
 func StartHTTPServer(listenAddr string, handler http.Handler, logger log.Logger) (listener net.Listener, err error) {
-	// listenAddr should be fully formed including tcp:// or unix:// prefix
 	var proto, addr string
 	parts := strings.SplitN(listenAddr, "://", 2)
 	if len(parts) != 2 {
-		logger.Error("WARNING (tendermint/rpc/lib): Please use fully formed listening addresses, including the tcp:// or unix:// prefix")
-		// we used to allow addrs without tcp/unix prefix by checking for a colon
-		// TODO: Deprecate
-		proto = types.SocketType(listenAddr)
-		addr = listenAddr
-		// return nil, errors.Errorf("Invalid listener address %s", lisenAddr)
-	} else {
-		proto, addr = parts[0], parts[1]
+		return nil, errors.Errorf("Invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", listenAddr)
 	}
+	proto, addr = parts[0], parts[1]
 
-	logger.Info(fmt.Sprintf("Starting RPC HTTP server on %s socket %v", proto, addr))
+	logger.Info(fmt.Sprintf("Starting RPC HTTP server on %s", listenAddr))
 	listener, err = net.Listen(proto, addr)
 	if err != nil {
-		return nil, errors.Errorf("Failed to listen to %v: %v", listenAddr, err)
+		return nil, errors.Errorf("Failed to listen on %v: %v", listenAddr, err)
 	}
 
 	go func() {
-		res := http.Serve(
+		err := http.Serve(
 			listener,
 			RecoverAndLogHandler(handler, logger),
 		)
-		logger.Error("RPC HTTP server stopped", "result", res)
+		logger.Error("RPC HTTP server stopped", "err", err)
+	}()
+	return listener, nil
+}
+
+func StartHTTPAndTLSServer(listenAddr string, handler http.Handler, certFile, keyFile string, logger log.Logger) (listener net.Listener, err error) {
+	var proto, addr string
+	parts := strings.SplitN(listenAddr, "://", 2)
+	if len(parts) != 2 {
+		return nil, errors.Errorf("Invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", listenAddr)
+	}
+	proto, addr = parts[0], parts[1]
+
+	logger.Info(fmt.Sprintf("Starting RPC HTTPS server on %s (cert: %q, key: %q)", listenAddr, certFile, keyFile))
+	listener, err = net.Listen(proto, addr)
+	if err != nil {
+		return nil, errors.Errorf("Failed to listen on %v: %v", listenAddr, err)
+	}
+
+	go func() {
+		err := http.ServeTLS(
+			listener,
+			RecoverAndLogHandler(handler, logger),
+			certFile,
+			keyFile,
+		)
+		logger.Error("RPC HTTPS server stopped", "err", err)
 	}()
 	return listener, nil
 }
diff --git a/vendor/github.com/tendermint/tendermint/rpc/lib/types/types.go b/vendor/github.com/tendermint/tendermint/rpc/lib/types/types.go
index 37d45145..e4b02c58 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/lib/types/types.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/lib/types/types.go
@@ -101,9 +101,8 @@ func NewRPCErrorResponse(id string, code int, msg string, data string) RPCRespon
 func (resp RPCResponse) String() string {
 	if resp.Error == nil {
 		return fmt.Sprintf("[%s %v]", resp.ID, resp.Result)
-	} else {
-		return fmt.Sprintf("[%s %s]", resp.ID, resp.Error)
 	}
+	return fmt.Sprintf("[%s %s]", resp.ID, resp.Error)
 }
 
 func RPCParseError(id string, err error) RPCResponse {
diff --git a/vendor/github.com/tendermint/tendermint/state/execution.go b/vendor/github.com/tendermint/tendermint/state/execution.go
index 921799b8..64db9f31 100644
--- a/vendor/github.com/tendermint/tendermint/state/execution.go
+++ b/vendor/github.com/tendermint/tendermint/state/execution.go
@@ -1,7 +1,6 @@
 package state
 
 import (
-	"errors"
 	"fmt"
 
 	fail "github.com/ebuchman/fail-test"
@@ -127,21 +126,26 @@ func (blockExec *BlockExecutor) Commit(block *types.Block) ([]byte, error) {
 	blockExec.mempool.Lock()
 	defer blockExec.mempool.Unlock()
 
+	// while mempool is Locked, flush to ensure all async requests have completed
+	// in the ABCI app before Commit.
+	err := blockExec.mempool.FlushAppConn()
+	if err != nil {
+		blockExec.logger.Error("Client error during mempool.FlushAppConn", "err", err)
+		return nil, err
+	}
+
 	// Commit block, get hash back
 	res, err := blockExec.proxyApp.CommitSync()
 	if err != nil {
 		blockExec.logger.Error("Client error during proxyAppConn.CommitSync", "err", err)
 		return nil, err
 	}
-	if res.IsErr() {
-		blockExec.logger.Error("Error in proxyAppConn.CommitSync", "err", res)
-		return nil, res
-	}
-	if res.Log != "" {
-		blockExec.logger.Debug("Commit.Log: " + res.Log)
-	}
+	// ResponseCommit has no error code - just data
 
-	blockExec.logger.Info("Committed state", "height", block.Height, "txs", block.NumTxs, "appHash", res.Data)
+	blockExec.logger.Info("Committed state",
+		"height", block.Height,
+		"txs", block.NumTxs,
+		"appHash", fmt.Sprintf("%X", res.Data))
 
 	// Update mempool.
 	if err := blockExec.mempool.Update(block.Height, block.Txs); err != nil {
@@ -191,9 +195,9 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus,
 	}
 
 	// TODO: determine which validators were byzantine
-	byzantineVals := make([]*abci.Evidence, len(block.Evidence.Evidence))
+	byzantineVals := make([]abci.Evidence, len(block.Evidence.Evidence))
 	for i, ev := range block.Evidence.Evidence {
-		byzantineVals[i] = &abci.Evidence{
+		byzantineVals[i] = abci.Evidence{
 			PubKey: ev.Address(), // XXX
 			Height: ev.Height(),
 		}
@@ -236,18 +240,10 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus,
 	return abciResponses, nil
 }
 
-func updateValidators(currentSet *types.ValidatorSet, updates []*abci.Validator) error {
-	// If more or equal than 1/3 of total voting power changed in one block, then
-	// a light client could never prove the transition externally. See
-	// ./lite/doc.go for details on how a light client tracks validators.
-	vp23, err := changeInVotingPowerMoreOrEqualToOneThird(currentSet, updates)
-	if err != nil {
-		return err
-	}
-	if vp23 {
-		return errors.New("the change in voting power must be strictly less than 1/3")
-	}
-
+// If more or equal than 1/3 of total voting power changed in one block, then
+// a light client could never prove the transition externally. See
+// ./lite/doc.go for details on how a light client tracks validators.
+func updateValidators(currentSet *types.ValidatorSet, updates []abci.Validator) error {
 	for _, v := range updates {
 		pubkey, err := crypto.PubKeyFromBytes(v.PubKey) // NOTE: expects go-wire encoded pubkey
 		if err != nil {
@@ -286,42 +282,6 @@ func updateValidators(currentSet *types.ValidatorSet, updates []*abci.Validator)
 	return nil
 }
 
-func changeInVotingPowerMoreOrEqualToOneThird(currentSet *types.ValidatorSet, updates []*abci.Validator) (bool, error) {
-	threshold := currentSet.TotalVotingPower() * 1 / 3
-	acc := int64(0)
-
-	for _, v := range updates {
-		pubkey, err := crypto.PubKeyFromBytes(v.PubKey) // NOTE: expects go-wire encoded pubkey
-		if err != nil {
-			return false, err
-		}
-
-		address := pubkey.Address()
-		power := int64(v.Power)
-		// mind the overflow from int64
-		if power < 0 {
-			return false, fmt.Errorf("Power (%d) overflows int64", v.Power)
-		}
-
-		_, val := currentSet.GetByAddress(address)
-		if val == nil {
-			acc += power
-		} else {
-			np := val.VotingPower - power
-			if np < 0 {
-				np = -np
-			}
-			acc += np
-		}
-
-		if acc >= threshold {
-			return true, nil
-		}
-	}
-
-	return false, nil
-}
-
 // updateState returns a new State updated according to the header and responses.
 func updateState(s State, blockID types.BlockID, header *types.Header,
 	abciResponses *ABCIResponses) (State, error) {
@@ -417,12 +377,6 @@ func ExecCommitBlock(appConnConsensus proxy.AppConnConsensus, block *types.Block
 		logger.Error("Client error during proxyAppConn.CommitSync", "err", res)
 		return nil, err
 	}
-	if res.IsErr() {
-		logger.Error("Error in proxyAppConn.CommitSync", "err", res)
-		return nil, res
-	}
-	if res.Log != "" {
-		logger.Info("Commit.Log: " + res.Log)
-	}
+	// ResponseCommit has no error or log, just data
 	return res.Data, nil
 }
diff --git a/vendor/github.com/tendermint/tendermint/state/state.go b/vendor/github.com/tendermint/tendermint/state/state.go
index 575a1630..fb5d78c4 100644
--- a/vendor/github.com/tendermint/tendermint/state/state.go
+++ b/vendor/github.com/tendermint/tendermint/state/state.go
@@ -86,7 +86,11 @@ func (s State) Equals(s2 State) bool {
 
 // Bytes serializes the State using go-wire.
 func (s State) Bytes() []byte {
-	return wire.BinaryBytes(s)
+	bz, err := wire.MarshalBinary(s)
+	if err != nil {
+		panic(err)
+	}
+	return bz
 }
 
 // IsEmpty returns true if the State is equal to the empty State.
diff --git a/vendor/github.com/tendermint/tendermint/state/store.go b/vendor/github.com/tendermint/tendermint/state/store.go
index de2d4d67..df07ec54 100644
--- a/vendor/github.com/tendermint/tendermint/state/store.go
+++ b/vendor/github.com/tendermint/tendermint/state/store.go
@@ -1,7 +1,6 @@
 package state
 
 import (
-	"bytes"
 	"fmt"
 
 	abci "github.com/tendermint/abci/types"
@@ -70,12 +69,11 @@ func loadState(db dbm.DB, key []byte) (state State) {
 		return state
 	}
 
-	r, n, err := bytes.NewReader(buf), new(int), new(error)
-	wire.ReadBinaryPtr(&state, r, 0, n, err)
-	if *err != nil {
+	err := wire.UnmarshalBinary(buf, &state)
+	if err != nil {
 		// DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED
 		cmn.Exit(cmn.Fmt(`LoadState: Data has been corrupted or its spec has changed:
-                %v\n`, *err))
+                %v\n`, err))
 	}
 	// TODO: ensure that buf is completely read.
 
@@ -113,7 +111,11 @@ func NewABCIResponses(block *types.Block) *ABCIResponses {
 
 // Bytes serializes the ABCIResponse using go-wire
 func (a *ABCIResponses) Bytes() []byte {
-	return wire.BinaryBytes(*a)
+	bz, err := wire.MarshalBinary(*a)
+	if err != nil {
+		panic(err)
+	}
+	return bz
 }
 
 func (a *ABCIResponses) ResultsHash() []byte {
@@ -131,12 +133,11 @@ func LoadABCIResponses(db dbm.DB, height int64) (*ABCIResponses, error) {
 	}
 
 	abciResponses := new(ABCIResponses)
-	r, n, err := bytes.NewReader(buf), new(int), new(error)
-	wire.ReadBinaryPtr(abciResponses, r, 0, n, err)
-	if *err != nil {
+	err := wire.UnmarshalBinary(buf, abciResponses)
+	if err != nil {
 		// DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED
 		cmn.Exit(cmn.Fmt(`LoadABCIResponses: Data has been corrupted or its spec has
-                changed: %v\n`, *err))
+                changed: %v\n`, err))
 	}
 	// TODO: ensure that buf is completely read.
 
@@ -160,7 +161,11 @@ type ValidatorsInfo struct {
 
 // Bytes serializes the ValidatorsInfo using go-wire
 func (valInfo *ValidatorsInfo) Bytes() []byte {
-	return wire.BinaryBytes(*valInfo)
+	bz, err := wire.MarshalBinary(*valInfo)
+	if err != nil {
+		panic(err)
+	}
+	return bz
 }
 
 // LoadValidators loads the ValidatorSet for a given height.
@@ -189,12 +194,11 @@ func loadValidatorsInfo(db dbm.DB, height int64) *ValidatorsInfo {
 	}
 
 	v := new(ValidatorsInfo)
-	r, n, err := bytes.NewReader(buf), new(int), new(error)
-	wire.ReadBinaryPtr(v, r, 0, n, err)
-	if *err != nil {
+	err := wire.UnmarshalBinary(buf, v)
+	if err != nil {
 		// DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED
 		cmn.Exit(cmn.Fmt(`LoadValidators: Data has been corrupted or its spec has changed:
-                %v\n`, *err))
+                %v\n`, err))
 	}
 	// TODO: ensure that buf is completely read.
 
@@ -225,7 +229,11 @@ type ConsensusParamsInfo struct {
 
 // Bytes serializes the ConsensusParamsInfo using go-wire
 func (params ConsensusParamsInfo) Bytes() []byte {
-	return wire.BinaryBytes(params)
+	bz, err := wire.MarshalBinary(params)
+	if err != nil {
+		panic(err)
+	}
+	return bz
 }
 
 // LoadConsensusParams loads the ConsensusParams for a given height.
@@ -255,12 +263,11 @@ func loadConsensusParamsInfo(db dbm.DB, height int64) *ConsensusParamsInfo {
 	}
 
 	paramsInfo := new(ConsensusParamsInfo)
-	r, n, err := bytes.NewReader(buf), new(int), new(error)
-	wire.ReadBinaryPtr(paramsInfo, r, 0, n, err)
-	if *err != nil {
+	err := wire.UnmarshalBinary(buf, paramsInfo)
+	if err != nil {
 		// DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED
 		cmn.Exit(cmn.Fmt(`LoadConsensusParams: Data has been corrupted or its spec has changed:
-                %v\n`, *err))
+                %v\n`, err))
 	}
 	// TODO: ensure that buf is completely read.
 
diff --git a/vendor/github.com/tendermint/tendermint/state/txindex/kv/kv.go b/vendor/github.com/tendermint/tendermint/state/txindex/kv/kv.go
index b70f3699..74bf4843 100644
--- a/vendor/github.com/tendermint/tendermint/state/txindex/kv/kv.go
+++ b/vendor/github.com/tendermint/tendermint/state/txindex/kv/kv.go
@@ -4,19 +4,20 @@ import (
 	"bytes"
 	"encoding/hex"
 	"fmt"
+	"sort"
 	"strconv"
 	"strings"
 	"time"
 
 	"github.com/pkg/errors"
 
-	abci "github.com/tendermint/abci/types"
 	wire "github.com/tendermint/go-wire"
-	"github.com/tendermint/tendermint/state/txindex"
-	"github.com/tendermint/tendermint/types"
 	cmn "github.com/tendermint/tmlibs/common"
-	db "github.com/tendermint/tmlibs/db"
+	dbm "github.com/tendermint/tmlibs/db"
 	"github.com/tendermint/tmlibs/pubsub/query"
+
+	"github.com/tendermint/tendermint/state/txindex"
+	"github.com/tendermint/tendermint/types"
 )
 
 const (
@@ -27,13 +28,13 @@ var _ txindex.TxIndexer = (*TxIndex)(nil)
 
 // TxIndex is the simplest possible indexer, backed by key-value storage (levelDB).
 type TxIndex struct {
-	store        db.DB
+	store        dbm.DB
 	tagsToIndex  []string
 	indexAllTags bool
 }
 
 // NewTxIndex creates new KV indexer.
-func NewTxIndex(store db.DB, options ...func(*TxIndex)) *TxIndex {
+func NewTxIndex(store dbm.DB, options ...func(*TxIndex)) *TxIndex {
 	txi := &TxIndex{store: store, tagsToIndex: make([]string, 0), indexAllTags: false}
 	for _, o := range options {
 		o(txi)
@@ -67,10 +68,8 @@ func (txi *TxIndex) Get(hash []byte) (*types.TxResult, error) {
 		return nil, nil
 	}
 
-	r := bytes.NewReader(rawBytes)
-	var n int
-	var err error
-	txResult := wire.ReadBinary(&types.TxResult{}, r, 0, &n, &err).(*types.TxResult)
+	txResult := new(types.TxResult)
+	err := wire.UnmarshalBinary(rawBytes, &txResult)
 	if err != nil {
 		return nil, fmt.Errorf("Error reading TxResult: %v", err)
 	}
@@ -87,13 +86,16 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error {
 
 		// index tx by tags
 		for _, tag := range result.Result.Tags {
-			if txi.indexAllTags || cmn.StringInSlice(tag.Key, txi.tagsToIndex) {
+			if txi.indexAllTags || cmn.StringInSlice(string(tag.Key), txi.tagsToIndex) {
 				storeBatch.Set(keyForTag(tag, result), hash)
 			}
 		}
 
 		// index tx by hash
-		rawBytes := wire.BinaryBytes(result)
+		rawBytes, err := wire.MarshalBinary(result)
+		if err != nil {
+			return err
+		}
 		storeBatch.Set(hash, rawBytes)
 	}
 
@@ -109,13 +111,16 @@ func (txi *TxIndex) Index(result *types.TxResult) error {
 
 	// index tx by tags
 	for _, tag := range result.Result.Tags {
-		if txi.indexAllTags || cmn.StringInSlice(tag.Key, txi.tagsToIndex) {
+		if txi.indexAllTags || cmn.StringInSlice(string(tag.Key), txi.tagsToIndex) {
 			b.Set(keyForTag(tag, result), hash)
 		}
 	}
 
 	// index tx by hash
-	rawBytes := wire.BinaryBytes(result)
+	rawBytes, err := wire.MarshalBinary(result)
+	if err != nil {
+		return err
+	}
 	b.Set(hash, rawBytes)
 
 	b.Write()
@@ -143,9 +148,8 @@ func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) {
 		res, err := txi.Get(hash)
 		if res == nil {
 			return []*types.TxResult{}, nil
-		} else {
-			return []*types.TxResult{res}, errors.Wrap(err, "error while retrieving the result")
 		}
+		return []*types.TxResult{res}, errors.Wrap(err, "error while retrieving the result")
 	}
 
 	// conditions to skip because they're handled before "everything else"
@@ -166,10 +170,10 @@ func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) {
 
 		for _, r := range ranges {
 			if !hashesInitialized {
-				hashes = txi.matchRange(r, startKeyForRange(r, height))
+				hashes = txi.matchRange(r, []byte(r.key))
 				hashesInitialized = true
 			} else {
-				hashes = intersect(hashes, txi.matchRange(r, startKeyForRange(r, height)))
+				hashes = intersect(hashes, txi.matchRange(r, []byte(r.key)))
 			}
 		}
 	}
@@ -198,6 +202,11 @@ func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) {
 		i++
 	}
 
+	// sort by height by default
+	sort.Slice(results, func(i, j int) bool {
+		return results[i].Height < results[j].Height
+	})
+
 	return results, nil
 }
 
@@ -232,6 +241,52 @@ type queryRange struct {
 	includeUpperBound bool
 }
 
+func (r queryRange) lowerBoundValue() interface{} {
+	if r.lowerBound == nil {
+		return nil
+	}
+
+	if r.includeLowerBound {
+		return r.lowerBound
+	} else {
+		switch t := r.lowerBound.(type) {
+		case int64:
+			return t + 1
+		case time.Time:
+			return t.Unix() + 1
+		default:
+			panic("not implemented")
+		}
+	}
+}
+
+func (r queryRange) AnyBound() interface{} {
+	if r.lowerBound != nil {
+		return r.lowerBound
+	} else {
+		return r.upperBound
+	}
+}
+
+func (r queryRange) upperBoundValue() interface{} {
+	if r.upperBound == nil {
+		return nil
+	}
+
+	if r.includeUpperBound {
+		return r.upperBound
+	} else {
+		switch t := r.upperBound.(type) {
+		case int64:
+			return t - 1
+		case time.Time:
+			return t.Unix() - 1
+		default:
+			panic("not implemented")
+		}
+	}
+}
+
 func lookForRanges(conditions []query.Condition) (ranges queryRanges, indexes []int) {
 	ranges = make(queryRanges)
 	for i, c := range conditions {
@@ -270,18 +325,18 @@ func isRangeOperation(op query.Operator) bool {
 
 func (txi *TxIndex) match(c query.Condition, startKey []byte) (hashes [][]byte) {
 	if c.Op == query.OpEqual {
-		it := txi.store.IteratorPrefix(startKey)
-		defer it.Release()
-		for it.Next() {
+		it := dbm.IteratePrefix(txi.store, startKey)
+		defer it.Close()
+		for ; it.Valid(); it.Next() {
 			hashes = append(hashes, it.Value())
 		}
 	} else if c.Op == query.OpContains {
 		// XXX: doing full scan because startKey does not apply here
 		// For example, if startKey = "account.owner=an" and search query = "accoutn.owner CONSISTS an"
 		// we can't iterate with prefix "account.owner=an" because we might miss keys like "account.owner=Ulan"
-		it := txi.store.Iterator()
-		defer it.Release()
-		for it.Next() {
+		it := txi.store.Iterator(nil, nil)
+		defer it.Close()
+		for ; it.Valid(); it.Next() {
 			if !isTagKey(it.Key()) {
 				continue
 			}
@@ -295,34 +350,49 @@ func (txi *TxIndex) match(c query.Condition, startKey []byte) (hashes [][]byte)
 	return
 }
 
-func (txi *TxIndex) matchRange(r queryRange, startKey []byte) (hashes [][]byte) {
-	it := txi.store.IteratorPrefix(startKey)
-	defer it.Release()
+func (txi *TxIndex) matchRange(r queryRange, prefix []byte) (hashes [][]byte) {
+	// create a map to prevent duplicates
+	hashesMap := make(map[string][]byte)
+
+	lowerBound := r.lowerBoundValue()
+	upperBound := r.upperBoundValue()
+
+	it := dbm.IteratePrefix(txi.store, prefix)
+	defer it.Close()
 LOOP:
-	for it.Next() {
+	for ; it.Valid(); it.Next() {
 		if !isTagKey(it.Key()) {
 			continue
 		}
-		if r.upperBound != nil {
-			// no other way to stop iterator other than checking for upperBound
-			switch (r.upperBound).(type) {
-			case int64:
-				v, err := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64)
-				if err == nil && v == r.upperBound {
-					if r.includeUpperBound {
-						hashes = append(hashes, it.Value())
-					}
-					break LOOP
-				}
-				// XXX: passing time in a ABCI Tags is not yet implemented
-				// case time.Time:
-				// 	v := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64)
-				// 	if v == r.upperBound {
-				// 		break
-				// 	}
+		switch r.AnyBound().(type) {
+		case int64:
+			v, err := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64)
+			if err != nil {
+				continue LOOP
+			}
+			include := true
+			if lowerBound != nil && v < lowerBound.(int64) {
+				include = false
 			}
+			if upperBound != nil && v > upperBound.(int64) {
+				include = false
+			}
+			if include {
+				hashesMap[fmt.Sprintf("%X", it.Value())] = it.Value()
+			}
+			// XXX: passing time in a ABCI Tags is not yet implemented
+			// case time.Time:
+			// 	v := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64)
+			// 	if v == r.upperBound {
+			// 		break
+			// 	}
 		}
-		hashes = append(hashes, it.Value())
+	}
+	hashes = make([][]byte, len(hashesMap))
+	i := 0
+	for _, h := range hashesMap {
+		hashes[i] = h
+		i++
 	}
 	return
 }
@@ -340,33 +410,6 @@ func startKey(c query.Condition, height int64) []byte {
 	return []byte(key)
 }
 
-func startKeyForRange(r queryRange, height int64) []byte {
-	if r.lowerBound == nil {
-		return []byte(r.key)
-	}
-
-	var lowerBound interface{}
-	if r.includeLowerBound {
-		lowerBound = r.lowerBound
-	} else {
-		switch t := r.lowerBound.(type) {
-		case int64:
-			lowerBound = t + 1
-		case time.Time:
-			lowerBound = t.Unix() + 1
-		default:
-			panic("not implemented")
-		}
-	}
-	var key string
-	if height > 0 {
-		key = fmt.Sprintf("%s/%v/%d", r.key, lowerBound, height)
-	} else {
-		key = fmt.Sprintf("%s/%v", r.key, lowerBound)
-	}
-	return []byte(key)
-}
-
 func isTagKey(key []byte) bool {
 	return strings.Count(string(key), tagKeySeparator) == 3
 }
@@ -376,17 +419,8 @@ func extractValueFromKey(key []byte) string {
 	return parts[1]
 }
 
-func keyForTag(tag *abci.KVPair, result *types.TxResult) []byte {
-	switch tag.ValueType {
-	case abci.KVPair_STRING:
-		return []byte(fmt.Sprintf("%s/%v/%d/%d", tag.Key, tag.ValueString, result.Height, result.Index))
-	case abci.KVPair_INT:
-		return []byte(fmt.Sprintf("%s/%v/%d/%d", tag.Key, tag.ValueInt, result.Height, result.Index))
-	// case abci.KVPair_TIME:
-	// 	return []byte(fmt.Sprintf("%s/%d/%d/%d", tag.Key, tag.ValueTime.Unix(), result.Height, result.Index))
-	default:
-		panic(fmt.Sprintf("Undefined value type: %v", tag.ValueType))
-	}
+func keyForTag(tag cmn.KVPair, result *types.TxResult) []byte {
+	return []byte(fmt.Sprintf("%s/%s/%d/%d", tag.Key, tag.Value, result.Height, result.Index))
 }
 
 ///////////////////////////////////////////////////////////////////////////////
diff --git a/vendor/github.com/tendermint/tendermint/types/block.go b/vendor/github.com/tendermint/tendermint/types/block.go
index 6aa97c2d..22d605d6 100644
--- a/vendor/github.com/tendermint/tendermint/types/block.go
+++ b/vendor/github.com/tendermint/tendermint/types/block.go
@@ -4,14 +4,13 @@ import (
 	"bytes"
 	"errors"
 	"fmt"
-	"io"
 	"strings"
 	"time"
 
-	wire "github.com/tendermint/go-wire"
-	"github.com/tendermint/go-wire/data"
+	wire "github.com/tendermint/tendermint/wire"
 	cmn "github.com/tendermint/tmlibs/common"
 	"github.com/tendermint/tmlibs/merkle"
+	"golang.org/x/crypto/ripemd160"
 )
 
 // Block defines the atomic unit of a Tendermint blockchain.
@@ -85,7 +84,7 @@ func (b *Block) FillHeader() {
 
 // Hash computes and returns the block hash.
 // If the block is incomplete, block hash is nil for safety.
-func (b *Block) Hash() data.Bytes {
+func (b *Block) Hash() cmn.HexBytes {
 	if b == nil || b.Header == nil || b.Data == nil || b.LastCommit == nil {
 		return nil
 	}
@@ -96,7 +95,11 @@ func (b *Block) Hash() data.Bytes {
 // MakePartSet returns a PartSet containing parts of a serialized block.
 // This is the form in which the block is gossipped to peers.
 func (b *Block) MakePartSet(partSize int) *PartSet {
-	return NewPartSetFromData(wire.BinaryBytes(b), partSize)
+	bz, err := wire.MarshalBinary(b)
+	if err != nil {
+		panic(err)
+	}
+	return NewPartSetFromData(bz, partSize)
 }
 
 // HashesTo is a convenience function that checks if a block hashes to the given argument.
@@ -138,9 +141,8 @@ func (b *Block) StringIndented(indent string) string {
 func (b *Block) StringShort() string {
 	if b == nil {
 		return "nil-Block"
-	} else {
-		return fmt.Sprintf("Block#%v", b.Hash())
 	}
+	return fmt.Sprintf("Block#%v", b.Hash())
 }
 
 //-----------------------------------------------------------------------------
@@ -160,39 +162,41 @@ type Header struct {
 	TotalTxs    int64   `json:"total_txs"`
 
 	// hashes of block data
-	LastCommitHash data.Bytes `json:"last_commit_hash"` // commit from validators from the last block
-	DataHash       data.Bytes `json:"data_hash"`        // transactions
+	LastCommitHash cmn.HexBytes `json:"last_commit_hash"` // commit from validators from the last block
+	DataHash       cmn.HexBytes `json:"data_hash"`        // transactions
 
 	// hashes from the app output from the prev block
-	ValidatorsHash  data.Bytes `json:"validators_hash"`   // validators for the current block
-	ConsensusHash   data.Bytes `json:"consensus_hash"`    // consensus params for current block
-	AppHash         data.Bytes `json:"app_hash"`          // state after txs from the previous block
-	LastResultsHash data.Bytes `json:"last_results_hash"` // root hash of all results from the txs from the previous block
+	ValidatorsHash  cmn.HexBytes `json:"validators_hash"`   // validators for the current block
+	ConsensusHash   cmn.HexBytes `json:"consensus_hash"`    // consensus params for current block
+	AppHash         cmn.HexBytes `json:"app_hash"`          // state after txs from the previous block
+	LastResultsHash cmn.HexBytes `json:"last_results_hash"` // root hash of all results from the txs from the previous block
 
 	// consensus info
-	EvidenceHash data.Bytes `json:"evidence_hash"` // evidence included in the block
+	EvidenceHash cmn.HexBytes `json:"evidence_hash"` // evidence included in the block
 }
 
 // Hash returns the hash of the header.
-// Returns nil if ValidatorHash is missing.
-func (h *Header) Hash() data.Bytes {
-	if len(h.ValidatorsHash) == 0 {
+// Returns nil if ValidatorHash is missing,
+// since a Header is not valid unless there is
+// a ValidaotrsHash (corresponding to the validator set).
+func (h *Header) Hash() cmn.HexBytes {
+	if h == nil || len(h.ValidatorsHash) == 0 {
 		return nil
 	}
-	return merkle.SimpleHashFromMap(map[string]interface{}{
-		"ChainID":     h.ChainID,
-		"Height":      h.Height,
-		"Time":        h.Time,
-		"NumTxs":      h.NumTxs,
-		"TotalTxs":    h.TotalTxs,
-		"LastBlockID": h.LastBlockID,
-		"LastCommit":  h.LastCommitHash,
-		"Data":        h.DataHash,
-		"Validators":  h.ValidatorsHash,
-		"App":         h.AppHash,
-		"Consensus":   h.ConsensusHash,
-		"Results":     h.LastResultsHash,
-		"Evidence":    h.EvidenceHash,
+	return merkle.SimpleHashFromMap(map[string]merkle.Hasher{
+		"ChainID":     wireHasher(h.ChainID),
+		"Height":      wireHasher(h.Height),
+		"Time":        wireHasher(h.Time),
+		"NumTxs":      wireHasher(h.NumTxs),
+		"TotalTxs":    wireHasher(h.TotalTxs),
+		"LastBlockID": wireHasher(h.LastBlockID),
+		"LastCommit":  wireHasher(h.LastCommitHash),
+		"Data":        wireHasher(h.DataHash),
+		"Validators":  wireHasher(h.ValidatorsHash),
+		"App":         wireHasher(h.AppHash),
+		"Consensus":   wireHasher(h.ConsensusHash),
+		"Results":     wireHasher(h.LastResultsHash),
+		"Evidence":    wireHasher(h.EvidenceHash),
 	})
 }
 
@@ -212,7 +216,7 @@ func (h *Header) StringIndented(indent string) string {
 %s  Data:           %v
 %s  Validators:     %v
 %s  App:            %v
-%s  Conensus:       %v
+%s  Consensus:       %v
 %s  Results:        %v
 %s  Evidence:       %v
 %s}#%v`,
@@ -245,11 +249,12 @@ type Commit struct {
 
 	// Volatile
 	firstPrecommit *Vote
-	hash           data.Bytes
+	hash           cmn.HexBytes
 	bitArray       *cmn.BitArray
 }
 
-// FirstPrecommit returns the first non-nil precommit in the commit
+// FirstPrecommit returns the first non-nil precommit in the commit.
+// If all precommits are nil, it returns an empty precommit with height 0.
 func (commit *Commit) FirstPrecommit() *Vote {
 	if len(commit.Precommits) == 0 {
 		return nil
@@ -263,7 +268,9 @@ func (commit *Commit) FirstPrecommit() *Vote {
 			return precommit
 		}
 	}
-	return nil
+	return &Vote{
+		Type: VoteTypePrecommit,
+	}
 }
 
 // Height returns the height of the commit
@@ -354,13 +361,13 @@ func (commit *Commit) ValidateBasic() error {
 }
 
 // Hash returns the hash of the commit
-func (commit *Commit) Hash() data.Bytes {
+func (commit *Commit) Hash() cmn.HexBytes {
 	if commit.hash == nil {
-		bs := make([]interface{}, len(commit.Precommits))
+		bs := make([]merkle.Hasher, len(commit.Precommits))
 		for i, precommit := range commit.Precommits {
-			bs[i] = precommit
+			bs[i] = wireHasher(precommit)
 		}
-		commit.hash = merkle.SimpleHashFromBinaries(bs)
+		commit.hash = merkle.SimpleHashFromHashers(bs)
 	}
 	return commit.hash
 }
@@ -402,11 +409,14 @@ type Data struct {
 	Txs Txs `json:"txs"`
 
 	// Volatile
-	hash data.Bytes
+	hash cmn.HexBytes
 }
 
 // Hash returns the hash of the data
-func (data *Data) Hash() data.Bytes {
+func (data *Data) Hash() cmn.HexBytes {
+	if data == nil {
+		return (Txs{}).Hash()
+	}
 	if data.hash == nil {
 		data.hash = data.Txs.Hash() // NOTE: leaves of merkle tree are TxIDs
 	}
@@ -440,11 +450,11 @@ type EvidenceData struct {
 	Evidence EvidenceList `json:"evidence"`
 
 	// Volatile
-	hash data.Bytes
+	hash cmn.HexBytes
 }
 
 // Hash returns the hash of the data.
-func (data *EvidenceData) Hash() data.Bytes {
+func (data *EvidenceData) Hash() cmn.HexBytes {
 	if data.hash == nil {
 		data.hash = data.Evidence.Hash()
 	}
@@ -476,7 +486,7 @@ func (data *EvidenceData) StringIndented(indent string) string {
 
 // BlockID defines the unique ID of a block as its Hash and its PartSetHeader
 type BlockID struct {
-	Hash        data.Bytes    `json:"hash"`
+	Hash        cmn.HexBytes  `json:"hash"`
 	PartsHeader PartSetHeader `json:"parts"`
 }
 
@@ -493,20 +503,43 @@ func (blockID BlockID) Equals(other BlockID) bool {
 
 // Key returns a machine-readable string representation of the BlockID
 func (blockID BlockID) Key() string {
-	return string(blockID.Hash) + string(wire.BinaryBytes(blockID.PartsHeader))
-}
-
-// WriteSignBytes writes the canonical bytes of the BlockID to the given writer for digital signing
-func (blockID BlockID) WriteSignBytes(w io.Writer, n *int, err *error) {
-	if blockID.IsZero() {
-		wire.WriteTo([]byte("null"), w, n, err)
-	} else {
-		wire.WriteJSON(CanonicalBlockID(blockID), w, n, err)
+	bz, err := wire.MarshalBinary(blockID.PartsHeader)
+	if err != nil {
+		panic(err)
 	}
-
+	return string(blockID.Hash) + string(bz)
 }
 
 // String returns a human readable string representation of the BlockID
 func (blockID BlockID) String() string {
 	return fmt.Sprintf(`%v:%v`, blockID.Hash, blockID.PartsHeader)
 }
+
+//-------------------------------------------------------
+
+type hasher struct {
+	item interface{}
+}
+
+func (h hasher) Hash() []byte {
+	hasher := ripemd160.New()
+	bz, err := wire.MarshalBinary(h.item)
+	if err != nil {
+		panic(err)
+	}
+	_, err = hasher.Write(bz)
+	if err != nil {
+		panic(err)
+	}
+	return hasher.Sum(nil)
+
+}
+
+func tmHash(item interface{}) []byte {
+	h := hasher{item}
+	return h.Hash()
+}
+
+func wireHasher(item interface{}) merkle.Hasher {
+	return hasher{item}
+}
diff --git a/vendor/github.com/tendermint/tendermint/types/canonical_json.go b/vendor/github.com/tendermint/tendermint/types/canonical_json.go
index 41c67c24..4eeeb206 100644
--- a/vendor/github.com/tendermint/tendermint/types/canonical_json.go
+++ b/vendor/github.com/tendermint/tendermint/types/canonical_json.go
@@ -3,23 +3,23 @@ package types
 import (
 	"time"
 
-	wire "github.com/tendermint/go-wire"
-	"github.com/tendermint/go-wire/data"
+	wire "github.com/tendermint/tendermint/wire"
+	cmn "github.com/tendermint/tmlibs/common"
 )
 
-// canonical json is go-wire's json for structs with fields in alphabetical order
+// canonical json is wire's json for structs with fields in alphabetical order
 
-// timeFormat is used for generating the sigs
-const timeFormat = wire.RFC3339Millis
+// TimeFormat is used for generating the sigs
+const TimeFormat = wire.RFC3339Millis
 
 type CanonicalJSONBlockID struct {
-	Hash        data.Bytes                 `json:"hash,omitempty"`
+	Hash        cmn.HexBytes               `json:"hash,omitempty"`
 	PartsHeader CanonicalJSONPartSetHeader `json:"parts,omitempty"`
 }
 
 type CanonicalJSONPartSetHeader struct {
-	Hash  data.Bytes `json:"hash"`
-	Total int        `json:"total"`
+	Hash  cmn.HexBytes `json:"hash"`
+	Total int          `json:"total"`
 }
 
 type CanonicalJSONProposal struct {
@@ -40,11 +40,11 @@ type CanonicalJSONVote struct {
 }
 
 type CanonicalJSONHeartbeat struct {
-	Height           int64      `json:"height"`
-	Round            int        `json:"round"`
-	Sequence         int        `json:"sequence"`
-	ValidatorAddress data.Bytes `json:"validator_address"`
-	ValidatorIndex   int        `json:"validator_index"`
+	Height           int64   `json:"height"`
+	Round            int     `json:"round"`
+	Sequence         int     `json:"sequence"`
+	ValidatorAddress Address `json:"validator_address"`
+	ValidatorIndex   int     `json:"validator_index"`
 }
 
 //------------------------------------
@@ -114,8 +114,8 @@ func CanonicalHeartbeat(heartbeat *Heartbeat) CanonicalJSONHeartbeat {
 }
 
 func CanonicalTime(t time.Time) string {
-	// note that sending time over go-wire resets it to
+	// note that sending time over wire resets it to
 	// local time, we need to force UTC here, so the
 	// signatures match
-	return t.UTC().Format(timeFormat)
+	return t.UTC().Format(TimeFormat)
 }
diff --git a/vendor/github.com/tendermint/tendermint/types/event_buffer.go b/vendor/github.com/tendermint/tendermint/types/event_buffer.go
index 6f236e8e..18b41014 100644
--- a/vendor/github.com/tendermint/tendermint/types/event_buffer.go
+++ b/vendor/github.com/tendermint/tendermint/types/event_buffer.go
@@ -41,6 +41,10 @@ func (b *TxEventBuffer) Flush() error {
 			return err
 		}
 	}
-	b.events = make([]EventDataTx, 0, b.capacity)
+
+	// Clear out the elements and set the length to 0
+	// but maintain the underlying slice's capacity.
+	// See Issue https://github.com/tendermint/tendermint/issues/1189
+	b.events = b.events[:0]
 	return nil
 }
diff --git a/vendor/github.com/tendermint/tendermint/types/event_bus.go b/vendor/github.com/tendermint/tendermint/types/event_bus.go
index 6b6069b9..37bd5619 100644
--- a/vendor/github.com/tendermint/tendermint/types/event_bus.go
+++ b/vendor/github.com/tendermint/tendermint/types/event_bus.go
@@ -4,7 +4,6 @@ import (
 	"context"
 	"fmt"
 
-	abci "github.com/tendermint/abci/types"
 	cmn "github.com/tendermint/tmlibs/common"
 	"github.com/tendermint/tmlibs/log"
 	tmpubsub "github.com/tendermint/tmlibs/pubsub"
@@ -98,17 +97,11 @@ func (b *EventBus) PublishEventTx(event EventDataTx) error {
 	// validate and fill tags from tx result
 	for _, tag := range event.Result.Tags {
 		// basic validation
-		if tag.Key == "" {
+		if len(tag.Key) == 0 {
 			b.Logger.Info("Got tag with an empty key (skipping)", "tag", tag, "tx", event.Tx)
 			continue
 		}
-
-		switch tag.ValueType {
-		case abci.KVPair_STRING:
-			tags[tag.Key] = tag.ValueString
-		case abci.KVPair_INT:
-			tags[tag.Key] = tag.ValueInt
-		}
+		tags[string(tag.Key)] = string(tag.Value)
 	}
 
 	// add predefined tags
diff --git a/vendor/github.com/tendermint/tendermint/types/evidence.go b/vendor/github.com/tendermint/tendermint/types/evidence.go
index 3ae3e40b..0b349604 100644
--- a/vendor/github.com/tendermint/tendermint/types/evidence.go
+++ b/vendor/github.com/tendermint/tendermint/types/evidence.go
@@ -5,7 +5,7 @@ import (
 	"fmt"
 
 	"github.com/tendermint/go-crypto"
-	wire "github.com/tendermint/go-wire"
+	wire "github.com/tendermint/tendermint/wire"
 	"github.com/tendermint/tmlibs/merkle"
 )
 
@@ -120,7 +120,7 @@ func (dve *DuplicateVoteEvidence) Index() int {
 
 // Hash returns the hash of the evidence.
 func (dve *DuplicateVoteEvidence) Hash() []byte {
-	return merkle.SimpleHashFromBinary(dve)
+	return wireHasher(dve).Hash()
 }
 
 // Verify returns an error if the two votes aren't conflicting.
@@ -144,14 +144,14 @@ func (dve *DuplicateVoteEvidence) Verify(chainID string) error {
 
 	// BlockIDs must be different
 	if dve.VoteA.BlockID.Equals(dve.VoteB.BlockID) {
-		return fmt.Errorf("DuplicateVoteEvidence Error: BlockIDs are the same (%v) - not a real duplicate vote!", dve.VoteA.BlockID)
+		return fmt.Errorf("DuplicateVoteEvidence Error: BlockIDs are the same (%v) - not a real duplicate vote", dve.VoteA.BlockID)
 	}
 
 	// Signatures must be valid
-	if !dve.PubKey.VerifyBytes(SignBytes(chainID, dve.VoteA), dve.VoteA.Signature) {
+	if !dve.PubKey.VerifyBytes(dve.VoteA.SignBytes(chainID), dve.VoteA.Signature) {
 		return fmt.Errorf("DuplicateVoteEvidence Error verifying VoteA: %v", ErrVoteInvalidSignature)
 	}
-	if !dve.PubKey.VerifyBytes(SignBytes(chainID, dve.VoteB), dve.VoteB.Signature) {
+	if !dve.PubKey.VerifyBytes(dve.VoteB.SignBytes(chainID), dve.VoteB.Signature) {
 		return fmt.Errorf("DuplicateVoteEvidence Error verifying VoteB: %v", ErrVoteInvalidSignature)
 	}
 
@@ -165,7 +165,9 @@ func (dve *DuplicateVoteEvidence) Equal(ev Evidence) bool {
 	}
 
 	// just check their hashes
-	return bytes.Equal(merkle.SimpleHashFromBinary(dve), merkle.SimpleHashFromBinary(ev))
+	dveHash := wireHasher(dve).Hash()
+	evHash := wireHasher(ev).Hash()
+	return bytes.Equal(dveHash, evHash)
 }
 
 //-----------------------------------------------------------------
diff --git a/vendor/github.com/tendermint/tendermint/types/genesis.go b/vendor/github.com/tendermint/tendermint/types/genesis.go
index e33f6025..21c61806 100644
--- a/vendor/github.com/tendermint/tendermint/types/genesis.go
+++ b/vendor/github.com/tendermint/tendermint/types/genesis.go
@@ -8,7 +8,6 @@ import (
 	"github.com/pkg/errors"
 
 	crypto "github.com/tendermint/go-crypto"
-	"github.com/tendermint/go-wire/data"
 	cmn "github.com/tendermint/tmlibs/common"
 )
 
@@ -28,8 +27,18 @@ type GenesisDoc struct {
 	ChainID         string             `json:"chain_id"`
 	ConsensusParams *ConsensusParams   `json:"consensus_params,omitempty"`
 	Validators      []GenesisValidator `json:"validators"`
-	AppHash         data.Bytes         `json:"app_hash"`
-	AppOptions      interface{}        `json:"app_options,omitempty"`
+	AppHash         cmn.HexBytes       `json:"app_hash"`
+	AppStateJSON    json.RawMessage    `json:"app_state,omitempty"`
+	AppOptions      json.RawMessage    `json:"app_options,omitempty"` // DEPRECATED
+}
+
+// AppState returns raw application state.
+// TODO: replace with AppState field during next breaking release (0.18)
+func (genDoc *GenesisDoc) AppState() json.RawMessage {
+	if len(genDoc.AppOptions) > 0 {
+		return genDoc.AppOptions
+	}
+	return genDoc.AppStateJSON
 }
 
 // SaveAs is a utility method for saving GenensisDoc as a JSON file.
diff --git a/vendor/github.com/tendermint/tendermint/types/heartbeat.go b/vendor/github.com/tendermint/tendermint/types/heartbeat.go
index da9b342b..8b86a15b 100644
--- a/vendor/github.com/tendermint/tendermint/types/heartbeat.go
+++ b/vendor/github.com/tendermint/tendermint/types/heartbeat.go
@@ -2,11 +2,9 @@ package types
 
 import (
 	"fmt"
-	"io"
 
 	"github.com/tendermint/go-crypto"
-	"github.com/tendermint/go-wire"
-	"github.com/tendermint/go-wire/data"
+	"github.com/tendermint/tendermint/wire"
 	cmn "github.com/tendermint/tmlibs/common"
 )
 
@@ -16,7 +14,7 @@ import (
 // json field tags because we always want the JSON
 // representation to be in its canonical form.
 type Heartbeat struct {
-	ValidatorAddress data.Bytes       `json:"validator_address"`
+	ValidatorAddress Address          `json:"validator_address"`
 	ValidatorIndex   int              `json:"validator_index"`
 	Height           int64            `json:"height"`
 	Round            int              `json:"round"`
@@ -24,13 +22,17 @@ type Heartbeat struct {
 	Signature        crypto.Signature `json:"signature"`
 }
 
-// WriteSignBytes writes the Heartbeat for signing.
+// SignBytes returns the Heartbeat bytes for signing.
 // It panics if the Heartbeat is nil.
-func (heartbeat *Heartbeat) WriteSignBytes(chainID string, w io.Writer, n *int, err *error) {
-	wire.WriteJSON(CanonicalJSONOnceHeartbeat{
+func (heartbeat *Heartbeat) SignBytes(chainID string) []byte {
+	bz, err := wire.MarshalJSON(CanonicalJSONOnceHeartbeat{
 		chainID,
 		CanonicalHeartbeat(heartbeat),
-	}, w, n, err)
+	})
+	if err != nil {
+		panic(err)
+	}
+	return bz
 }
 
 // Copy makes a copy of the Heartbeat.
diff --git a/vendor/github.com/tendermint/tendermint/types/params.go b/vendor/github.com/tendermint/tendermint/types/params.go
index 90ab5f65..0e8ac577 100644
--- a/vendor/github.com/tendermint/tendermint/types/params.go
+++ b/vendor/github.com/tendermint/tendermint/types/params.go
@@ -106,13 +106,13 @@ func (params *ConsensusParams) Validate() error {
 // Hash returns a merkle hash of the parameters to store
 // in the block header
 func (params *ConsensusParams) Hash() []byte {
-	return merkle.SimpleHashFromMap(map[string]interface{}{
-		"block_gossip_part_size_bytes": params.BlockGossip.BlockPartSizeBytes,
-		"block_size_max_bytes":         params.BlockSize.MaxBytes,
-		"block_size_max_gas":           params.BlockSize.MaxGas,
-		"block_size_max_txs":           params.BlockSize.MaxTxs,
-		"tx_size_max_bytes":            params.TxSize.MaxBytes,
-		"tx_size_max_gas":              params.TxSize.MaxGas,
+	return merkle.SimpleHashFromMap(map[string]merkle.Hasher{
+		"block_gossip_part_size_bytes": wireHasher(params.BlockGossip.BlockPartSizeBytes),
+		"block_size_max_bytes":         wireHasher(params.BlockSize.MaxBytes),
+		"block_size_max_gas":           wireHasher(params.BlockSize.MaxGas),
+		"block_size_max_txs":           wireHasher(params.BlockSize.MaxTxs),
+		"tx_size_max_bytes":            wireHasher(params.TxSize.MaxBytes),
+		"tx_size_max_gas":              wireHasher(params.TxSize.MaxGas),
 	})
 }
 
diff --git a/vendor/github.com/tendermint/tendermint/types/part_set.go b/vendor/github.com/tendermint/tendermint/types/part_set.go
index e8a0997c..74994329 100644
--- a/vendor/github.com/tendermint/tendermint/types/part_set.go
+++ b/vendor/github.com/tendermint/tendermint/types/part_set.go
@@ -9,8 +9,6 @@ import (
 
 	"golang.org/x/crypto/ripemd160"
 
-	"github.com/tendermint/go-wire"
-	"github.com/tendermint/go-wire/data"
 	cmn "github.com/tendermint/tmlibs/common"
 	"github.com/tendermint/tmlibs/merkle"
 )
@@ -22,7 +20,7 @@ var (
 
 type Part struct {
 	Index int                `json:"index"`
-	Bytes data.Bytes         `json:"bytes"`
+	Bytes cmn.HexBytes       `json:"bytes"`
 	Proof merkle.SimpleProof `json:"proof"`
 
 	// Cache
@@ -32,12 +30,11 @@ type Part struct {
 func (part *Part) Hash() []byte {
 	if part.hash != nil {
 		return part.hash
-	} else {
-		hasher := ripemd160.New()
-		hasher.Write(part.Bytes) // nolint: errcheck, gas
-		part.hash = hasher.Sum(nil)
-		return part.hash
 	}
+	hasher := ripemd160.New()
+	hasher.Write(part.Bytes) // nolint: errcheck, gas
+	part.hash = hasher.Sum(nil)
+	return part.hash
 }
 
 func (part *Part) String() string {
@@ -58,8 +55,8 @@ func (part *Part) StringIndented(indent string) string {
 //-------------------------------------
 
 type PartSetHeader struct {
-	Total int        `json:"total"`
-	Hash  data.Bytes `json:"hash"`
+	Total int          `json:"total"`
+	Hash  cmn.HexBytes `json:"hash"`
 }
 
 func (psh PartSetHeader) String() string {
@@ -74,10 +71,6 @@ func (psh PartSetHeader) Equals(other PartSetHeader) bool {
 	return psh.Total == other.Total && bytes.Equal(psh.Hash, other.Hash)
 }
 
-func (psh PartSetHeader) WriteSignBytes(w io.Writer, n *int, err *error) {
-	wire.WriteJSON(CanonicalPartSetHeader(psh), w, n, err)
-}
-
 //-------------------------------------
 
 type PartSet struct {
@@ -96,7 +89,7 @@ func NewPartSetFromData(data []byte, partSize int) *PartSet {
 	// divide data into 4kb parts.
 	total := (len(data) + partSize - 1) / partSize
 	parts := make([]*Part, total)
-	parts_ := make([]merkle.Hashable, total)
+	parts_ := make([]merkle.Hasher, total)
 	partsBitArray := cmn.NewBitArray(total)
 	for i := 0; i < total; i++ {
 		part := &Part{
@@ -108,7 +101,7 @@ func NewPartSetFromData(data []byte, partSize int) *PartSet {
 		partsBitArray.SetIndex(i, true)
 	}
 	// Compute merkle proofs
-	root, proofs := merkle.SimpleProofsFromHashables(parts_)
+	root, proofs := merkle.SimpleProofsFromHashers(parts_)
 	for i := 0; i < total; i++ {
 		parts[i].Proof = *proofs[i]
 	}
@@ -135,20 +128,18 @@ func NewPartSetFromHeader(header PartSetHeader) *PartSet {
 func (ps *PartSet) Header() PartSetHeader {
 	if ps == nil {
 		return PartSetHeader{}
-	} else {
-		return PartSetHeader{
-			Total: ps.total,
-			Hash:  ps.hash,
-		}
+	}
+	return PartSetHeader{
+		Total: ps.total,
+		Hash:  ps.hash,
 	}
 }
 
 func (ps *PartSet) HasHeader(header PartSetHeader) bool {
 	if ps == nil {
 		return false
-	} else {
-		return ps.Header().Equals(header)
 	}
+	return ps.Header().Equals(header)
 }
 
 func (ps *PartSet) BitArray() *cmn.BitArray {
@@ -257,7 +248,7 @@ func (psr *PartSetReader) Read(p []byte) (n int, err error) {
 		return n1 + n2, err
 	}
 
-	psr.i += 1
+	psr.i++
 	if psr.i >= len(psr.parts) {
 		return 0, io.EOF
 	}
@@ -268,9 +259,8 @@ func (psr *PartSetReader) Read(p []byte) (n int, err error) {
 func (ps *PartSet) StringShort() string {
 	if ps == nil {
 		return "nil-PartSet"
-	} else {
-		ps.mtx.Lock()
-		defer ps.mtx.Unlock()
-		return fmt.Sprintf("(%v of %v)", ps.Count(), ps.Total())
 	}
+	ps.mtx.Lock()
+	defer ps.mtx.Unlock()
+	return fmt.Sprintf("(%v of %v)", ps.Count(), ps.Total())
 }
diff --git a/vendor/github.com/tendermint/tendermint/types/priv_validator.go b/vendor/github.com/tendermint/tendermint/types/priv_validator.go
index 31c65eeb..052ace97 100644
--- a/vendor/github.com/tendermint/tendermint/types/priv_validator.go
+++ b/vendor/github.com/tendermint/tendermint/types/priv_validator.go
@@ -6,21 +6,19 @@ import (
 	"errors"
 	"fmt"
 	"io/ioutil"
-	"os"
 	"sync"
 	"time"
 
 	crypto "github.com/tendermint/go-crypto"
-	data "github.com/tendermint/go-wire/data"
 	cmn "github.com/tendermint/tmlibs/common"
 )
 
 // TODO: type ?
 const (
-	stepNone      = 0 // Used to distinguish the initial state
-	stepPropose   = 1
-	stepPrevote   = 2
-	stepPrecommit = 3
+	stepNone      int8 = 0 // Used to distinguish the initial state
+	stepPropose   int8 = 1
+	stepPrevote   int8 = 2
+	stepPrecommit int8 = 3
 )
 
 func voteToStep(vote *Vote) int8 {
@@ -35,10 +33,61 @@ func voteToStep(vote *Vote) int8 {
 	}
 }
 
+//--------------------------------------------------------------
+// PrivValidator is being upgraded! See types/priv_validator/
+
+// ValidatorID contains the identity of the validator.
+type ValidatorID struct {
+	Address cmn.HexBytes  `json:"address"`
+	PubKey  crypto.PubKey `json:"pub_key"`
+}
+
+// PrivValidator defines the functionality of a local Tendermint validator
+// that signs votes, proposals, and heartbeats, and never double signs.
+type PrivValidator2 interface {
+	Address() (Address, error) // redundant since .PubKey().Address()
+	PubKey() (crypto.PubKey, error)
+
+	SignVote(chainID string, vote *Vote) error
+	SignProposal(chainID string, proposal *Proposal) error
+	SignHeartbeat(chainID string, heartbeat *Heartbeat) error
+}
+
+type TestSigner interface {
+	Address() cmn.HexBytes
+	PubKey() crypto.PubKey
+	Sign([]byte) (crypto.Signature, error)
+}
+
+func GenSigner() TestSigner {
+	return &DefaultTestSigner{
+		crypto.GenPrivKeyEd25519().Wrap(),
+	}
+}
+
+type DefaultTestSigner struct {
+	crypto.PrivKey
+}
+
+func (ds *DefaultTestSigner) Address() cmn.HexBytes {
+	return ds.PubKey().Address()
+}
+
+func (ds *DefaultTestSigner) PubKey() crypto.PubKey {
+	return ds.PrivKey.PubKey()
+}
+
+func (ds *DefaultTestSigner) Sign(msg []byte) (crypto.Signature, error) {
+	return ds.PrivKey.Sign(msg), nil
+}
+
+//--------------------------------------------------------------
+// TODO: Deprecate!
+
 // PrivValidator defines the functionality of a local Tendermint validator
 // that signs votes, proposals, and heartbeats, and never double signs.
 type PrivValidator interface {
-	GetAddress() data.Bytes // redundant since .PubKey().Address()
+	GetAddress() Address // redundant since .PubKey().Address()
 	GetPubKey() crypto.PubKey
 
 	SignVote(chainID string, vote *Vote) error
@@ -49,14 +98,15 @@ type PrivValidator interface {
 // PrivValidatorFS implements PrivValidator using data persisted to disk
 // to prevent double signing. The Signer itself can be mutated to use
 // something besides the default, for instance a hardware signer.
+// NOTE: the directory containing the privVal.filePath must already exist.
 type PrivValidatorFS struct {
-	Address       data.Bytes       `json:"address"`
+	Address       Address          `json:"address"`
 	PubKey        crypto.PubKey    `json:"pub_key"`
 	LastHeight    int64            `json:"last_height"`
 	LastRound     int              `json:"last_round"`
 	LastStep      int8             `json:"last_step"`
 	LastSignature crypto.Signature `json:"last_signature,omitempty"` // so we dont lose signatures
-	LastSignBytes data.Bytes       `json:"last_signbytes,omitempty"` // so we dont lose signatures
+	LastSignBytes cmn.HexBytes     `json:"last_signbytes,omitempty"` // so we dont lose signatures
 
 	// PrivKey should be empty if a Signer other than the default is being used.
 	PrivKey crypto.PrivKey `json:"priv_key"`
@@ -96,7 +146,7 @@ func (ds *DefaultSigner) Sign(msg []byte) (crypto.Signature, error) {
 
 // GetAddress returns the address of the validator.
 // Implements PrivValidator.
-func (pv *PrivValidatorFS) GetAddress() data.Bytes {
+func (pv *PrivValidatorFS) GetAddress() Address {
 	return pv.Address
 }
 
@@ -131,7 +181,7 @@ func LoadPrivValidatorFS(filePath string) *PrivValidatorFS {
 // or else generates a new one and saves it to the filePath.
 func LoadOrGenPrivValidatorFS(filePath string) *PrivValidatorFS {
 	var privVal *PrivValidatorFS
-	if _, err := os.Stat(filePath); err == nil {
+	if cmn.FileExists(filePath) {
 		privVal = LoadPrivValidatorFS(filePath)
 	} else {
 		privVal = GenPrivValidatorFS(filePath)
@@ -161,84 +211,78 @@ func LoadPrivValidatorFSWithSigner(filePath string, signerFunc func(PrivValidato
 }
 
 // Save persists the PrivValidatorFS to disk.
-func (privVal *PrivValidatorFS) Save() {
-	privVal.mtx.Lock()
-	defer privVal.mtx.Unlock()
-	privVal.save()
+func (pv *PrivValidatorFS) Save() {
+	pv.mtx.Lock()
+	defer pv.mtx.Unlock()
+	pv.save()
 }
 
-func (privVal *PrivValidatorFS) save() {
-	if privVal.filePath == "" {
-		cmn.PanicSanity("Cannot save PrivValidator: filePath not set")
+func (pv *PrivValidatorFS) save() {
+	outFile := pv.filePath
+	if outFile == "" {
+		panic("Cannot save PrivValidator: filePath not set")
 	}
-	jsonBytes, err := json.Marshal(privVal)
+	jsonBytes, err := json.Marshal(pv)
 	if err != nil {
-		// `@; BOOM!!!
-		cmn.PanicCrisis(err)
+		panic(err)
 	}
-	err = cmn.WriteFileAtomic(privVal.filePath, jsonBytes, 0600)
+	err = cmn.WriteFileAtomic(outFile, jsonBytes, 0600)
 	if err != nil {
-		// `@; BOOM!!!
-		cmn.PanicCrisis(err)
+		panic(err)
 	}
 }
 
 // Reset resets all fields in the PrivValidatorFS.
 // NOTE: Unsafe!
-func (privVal *PrivValidatorFS) Reset() {
-	privVal.LastHeight = 0
-	privVal.LastRound = 0
-	privVal.LastStep = 0
-	privVal.LastSignature = crypto.Signature{}
-	privVal.LastSignBytes = nil
-	privVal.Save()
+func (pv *PrivValidatorFS) Reset() {
+	var sig crypto.Signature
+	pv.LastHeight = 0
+	pv.LastRound = 0
+	pv.LastStep = 0
+	pv.LastSignature = sig
+	pv.LastSignBytes = nil
+	pv.Save()
 }
 
 // SignVote signs a canonical representation of the vote, along with the
 // chainID. Implements PrivValidator.
-func (privVal *PrivValidatorFS) SignVote(chainID string, vote *Vote) error {
-	privVal.mtx.Lock()
-	defer privVal.mtx.Unlock()
-	signature, err := privVal.signBytesHRS(vote.Height, vote.Round, voteToStep(vote),
-		SignBytes(chainID, vote), checkVotesOnlyDifferByTimestamp)
-	if err != nil {
+func (pv *PrivValidatorFS) SignVote(chainID string, vote *Vote) error {
+	pv.mtx.Lock()
+	defer pv.mtx.Unlock()
+	if err := pv.signVote(chainID, vote); err != nil {
 		return errors.New(cmn.Fmt("Error signing vote: %v", err))
 	}
-	vote.Signature = signature
 	return nil
 }
 
 // SignProposal signs a canonical representation of the proposal, along with
 // the chainID. Implements PrivValidator.
-func (privVal *PrivValidatorFS) SignProposal(chainID string, proposal *Proposal) error {
-	privVal.mtx.Lock()
-	defer privVal.mtx.Unlock()
-	signature, err := privVal.signBytesHRS(proposal.Height, proposal.Round, stepPropose,
-		SignBytes(chainID, proposal), checkProposalsOnlyDifferByTimestamp)
-	if err != nil {
+func (pv *PrivValidatorFS) SignProposal(chainID string, proposal *Proposal) error {
+	pv.mtx.Lock()
+	defer pv.mtx.Unlock()
+	if err := pv.signProposal(chainID, proposal); err != nil {
 		return fmt.Errorf("Error signing proposal: %v", err)
 	}
-	proposal.Signature = signature
 	return nil
 }
 
 // returns error if HRS regression or no LastSignBytes. returns true if HRS is unchanged
-func (privVal *PrivValidatorFS) checkHRS(height int64, round int, step int8) (bool, error) {
-	if privVal.LastHeight > height {
+func (pv *PrivValidatorFS) checkHRS(height int64, round int, step int8) (bool, error) {
+	if pv.LastHeight > height {
 		return false, errors.New("Height regression")
 	}
 
-	if privVal.LastHeight == height {
-		if privVal.LastRound > round {
+	if pv.LastHeight == height {
+		if pv.LastRound > round {
 			return false, errors.New("Round regression")
 		}
 
-		if privVal.LastRound == round {
-			if privVal.LastStep > step {
+		if pv.LastRound == round {
+			if pv.LastStep > step {
 				return false, errors.New("Step regression")
-			} else if privVal.LastStep == step {
-				if privVal.LastSignBytes != nil {
-					if privVal.LastSignature.Empty() {
+			} else if pv.LastStep == step {
+				if pv.LastSignBytes != nil {
+					if pv.LastSignature.Empty() {
 						panic("privVal: LastSignature is nil but LastSignBytes is not!")
 					}
 					return true, nil
@@ -250,63 +294,109 @@ func (privVal *PrivValidatorFS) checkHRS(height int64, round int, step int8) (bo
 	return false, nil
 }
 
-// signBytesHRS signs the given signBytes if the height/round/step (HRS) are
-// greater than the latest state. If the HRS are equal and the only thing changed is the timestamp,
-// it returns the privValidator.LastSignature. Else it returns an error.
-func (privVal *PrivValidatorFS) signBytesHRS(height int64, round int, step int8,
-	signBytes []byte, checkFn checkOnlyDifferByTimestamp) (crypto.Signature, error) {
-	sig := crypto.Signature{}
+// signVote checks if the vote is good to sign and sets the vote signature.
+// It may need to set the timestamp as well if the vote is otherwise the same as
+// a previously signed vote (ie. we crashed after signing but before the vote hit the WAL).
+func (pv *PrivValidatorFS) signVote(chainID string, vote *Vote) error {
+	height, round, step := vote.Height, vote.Round, voteToStep(vote)
+	signBytes := vote.SignBytes(chainID)
 
-	sameHRS, err := privVal.checkHRS(height, round, step)
+	sameHRS, err := pv.checkHRS(height, round, step)
 	if err != nil {
-		return sig, err
+		return err
 	}
 
 	// We might crash before writing to the wal,
-	// causing us to try to re-sign for the same HRS
+	// causing us to try to re-sign for the same HRS.
+	// If signbytes are the same, use the last signature.
+	// If they only differ by timestamp, use last timestamp and signature
+	// Otherwise, return error
 	if sameHRS {
-		// if they're the same or only differ by timestamp,
-		// return the LastSignature. Otherwise, error
-		if bytes.Equal(signBytes, privVal.LastSignBytes) ||
-			checkFn(privVal.LastSignBytes, signBytes) {
-			return privVal.LastSignature, nil
+		if bytes.Equal(signBytes, pv.LastSignBytes) {
+			vote.Signature = pv.LastSignature
+		} else if timestamp, ok := checkVotesOnlyDifferByTimestamp(pv.LastSignBytes, signBytes); ok {
+			vote.Timestamp = timestamp
+			vote.Signature = pv.LastSignature
+		} else {
+			err = fmt.Errorf("Conflicting data")
 		}
-		return sig, fmt.Errorf("Conflicting data")
+		return err
 	}
 
-	sig, err = privVal.Sign(signBytes)
+	// It passed the checks. Sign the vote
+	sig, err := pv.Sign(signBytes)
 	if err != nil {
-		return sig, err
+		return err
 	}
-	privVal.saveSigned(height, round, step, signBytes, sig)
-	return sig, nil
+	pv.saveSigned(height, round, step, signBytes, sig)
+	vote.Signature = sig
+	return nil
+}
+
+// signProposal checks if the proposal is good to sign and sets the proposal signature.
+// It may need to set the timestamp as well if the proposal is otherwise the same as
+// a previously signed proposal ie. we crashed after signing but before the proposal hit the WAL).
+func (pv *PrivValidatorFS) signProposal(chainID string, proposal *Proposal) error {
+	height, round, step := proposal.Height, proposal.Round, stepPropose
+	signBytes := proposal.SignBytes(chainID)
+
+	sameHRS, err := pv.checkHRS(height, round, step)
+	if err != nil {
+		return err
+	}
+
+	// We might crash before writing to the wal,
+	// causing us to try to re-sign for the same HRS.
+	// If signbytes are the same, use the last signature.
+	// If they only differ by timestamp, use last timestamp and signature
+	// Otherwise, return error
+	if sameHRS {
+		if bytes.Equal(signBytes, pv.LastSignBytes) {
+			proposal.Signature = pv.LastSignature
+		} else if timestamp, ok := checkProposalsOnlyDifferByTimestamp(pv.LastSignBytes, signBytes); ok {
+			proposal.Timestamp = timestamp
+			proposal.Signature = pv.LastSignature
+		} else {
+			err = fmt.Errorf("Conflicting data")
+		}
+		return err
+	}
+
+	// It passed the checks. Sign the proposal
+	sig, err := pv.Sign(signBytes)
+	if err != nil {
+		return err
+	}
+	pv.saveSigned(height, round, step, signBytes, sig)
+	proposal.Signature = sig
+	return nil
 }
 
 // Persist height/round/step and signature
-func (privVal *PrivValidatorFS) saveSigned(height int64, round int, step int8,
+func (pv *PrivValidatorFS) saveSigned(height int64, round int, step int8,
 	signBytes []byte, sig crypto.Signature) {
 
-	privVal.LastHeight = height
-	privVal.LastRound = round
-	privVal.LastStep = step
-	privVal.LastSignature = sig
-	privVal.LastSignBytes = signBytes
-	privVal.save()
+	pv.LastHeight = height
+	pv.LastRound = round
+	pv.LastStep = step
+	pv.LastSignature = sig
+	pv.LastSignBytes = signBytes
+	pv.save()
 }
 
 // SignHeartbeat signs a canonical representation of the heartbeat, along with the chainID.
 // Implements PrivValidator.
-func (privVal *PrivValidatorFS) SignHeartbeat(chainID string, heartbeat *Heartbeat) error {
-	privVal.mtx.Lock()
-	defer privVal.mtx.Unlock()
+func (pv *PrivValidatorFS) SignHeartbeat(chainID string, heartbeat *Heartbeat) error {
+	pv.mtx.Lock()
+	defer pv.mtx.Unlock()
 	var err error
-	heartbeat.Signature, err = privVal.Sign(SignBytes(chainID, heartbeat))
+	heartbeat.Signature, err = pv.Sign(heartbeat.SignBytes(chainID))
 	return err
 }
 
 // String returns a string representation of the PrivValidatorFS.
-func (privVal *PrivValidatorFS) String() string {
-	return fmt.Sprintf("PrivValidator{%v LH:%v, LR:%v, LS:%v}", privVal.GetAddress(), privVal.LastHeight, privVal.LastRound, privVal.LastStep)
+func (pv *PrivValidatorFS) String() string {
+	return fmt.Sprintf("PrivValidator{%v LH:%v, LR:%v, LS:%v}", pv.GetAddress(), pv.LastHeight, pv.LastRound, pv.LastStep)
 }
 
 //-------------------------------------
@@ -329,10 +419,9 @@ func (pvs PrivValidatorsByAddress) Swap(i, j int) {
 
 //-------------------------------------
 
-type checkOnlyDifferByTimestamp func([]byte, []byte) bool
-
-// returns true if the only difference in the votes is their timestamp
-func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) bool {
+// returns the timestamp from the lastSignBytes.
+// returns true if the only difference in the votes is their timestamp.
+func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) {
 	var lastVote, newVote CanonicalJSONOnceVote
 	if err := json.Unmarshal(lastSignBytes, &lastVote); err != nil {
 		panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into vote: %v", err))
@@ -341,6 +430,11 @@ func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) bool {
 		panic(fmt.Sprintf("signBytes cannot be unmarshalled into vote: %v", err))
 	}
 
+	lastTime, err := time.Parse(TimeFormat, lastVote.Vote.Timestamp)
+	if err != nil {
+		panic(err)
+	}
+
 	// set the times to the same value and check equality
 	now := CanonicalTime(time.Now())
 	lastVote.Vote.Timestamp = now
@@ -348,11 +442,12 @@ func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) bool {
 	lastVoteBytes, _ := json.Marshal(lastVote)
 	newVoteBytes, _ := json.Marshal(newVote)
 
-	return bytes.Equal(newVoteBytes, lastVoteBytes)
+	return lastTime, bytes.Equal(newVoteBytes, lastVoteBytes)
 }
 
+// returns the timestamp from the lastSignBytes.
 // returns true if the only difference in the proposals is their timestamp
-func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) bool {
+func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) {
 	var lastProposal, newProposal CanonicalJSONOnceProposal
 	if err := json.Unmarshal(lastSignBytes, &lastProposal); err != nil {
 		panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into proposal: %v", err))
@@ -361,6 +456,11 @@ func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) boo
 		panic(fmt.Sprintf("signBytes cannot be unmarshalled into proposal: %v", err))
 	}
 
+	lastTime, err := time.Parse(TimeFormat, lastProposal.Proposal.Timestamp)
+	if err != nil {
+		panic(err)
+	}
+
 	// set the times to the same value and check equality
 	now := CanonicalTime(time.Now())
 	lastProposal.Proposal.Timestamp = now
@@ -368,5 +468,5 @@ func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) boo
 	lastProposalBytes, _ := json.Marshal(lastProposal)
 	newProposalBytes, _ := json.Marshal(newProposal)
 
-	return bytes.Equal(newProposalBytes, lastProposalBytes)
+	return lastTime, bytes.Equal(newProposalBytes, lastProposalBytes)
 }
diff --git a/vendor/github.com/tendermint/tendermint/types/priv_validator/json.go b/vendor/github.com/tendermint/tendermint/types/priv_validator/json.go
new file mode 100644
index 00000000..5c0849eb
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/types/priv_validator/json.go
@@ -0,0 +1,197 @@
+package types
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+
+	crypto "github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/types"
+	cmn "github.com/tendermint/tmlibs/common"
+)
+
+// PrivValidator aliases types.PrivValidator
+type PrivValidator = types.PrivValidator2
+
+//-----------------------------------------------------
+
+// PrivKey implements Signer
+type PrivKey crypto.PrivKey
+
+// Sign - Implements Signer
+func (pk PrivKey) Sign(msg []byte) (crypto.Signature, error) {
+	return crypto.PrivKey(pk).Sign(msg), nil
+}
+
+// MarshalJSON satisfies json.Marshaler.
+func (pk PrivKey) MarshalJSON() ([]byte, error) {
+	return crypto.PrivKey(pk).MarshalJSON()
+}
+
+// UnmarshalJSON satisfies json.Unmarshaler.
+func (pk *PrivKey) UnmarshalJSON(b []byte) error {
+	cpk := new(crypto.PrivKey)
+	if err := cpk.UnmarshalJSON(b); err != nil {
+		return err
+	}
+	*pk = (PrivKey)(*cpk)
+	return nil
+}
+
+//-----------------------------------------------------
+
+var _ types.PrivValidator2 = (*PrivValidatorJSON)(nil)
+
+// PrivValidatorJSON wraps PrivValidatorUnencrypted
+// and persists it to disk after every SignVote and SignProposal.
+type PrivValidatorJSON struct {
+	*PrivValidatorUnencrypted
+
+	filePath string
+}
+
+// SignVote implements PrivValidator. It persists to disk.
+func (pvj *PrivValidatorJSON) SignVote(chainID string, vote *types.Vote) error {
+	err := pvj.PrivValidatorUnencrypted.SignVote(chainID, vote)
+	if err != nil {
+		return err
+	}
+	pvj.Save()
+	return nil
+}
+
+// SignProposal implements PrivValidator. It persists to disk.
+func (pvj *PrivValidatorJSON) SignProposal(chainID string, proposal *types.Proposal) error {
+	err := pvj.PrivValidatorUnencrypted.SignProposal(chainID, proposal)
+	if err != nil {
+		return err
+	}
+	pvj.Save()
+	return nil
+}
+
+//-------------------------------------------------------
+
+// String returns a string representation of the PrivValidatorJSON.
+func (pvj *PrivValidatorJSON) String() string {
+	addr, err := pvj.Address()
+	if err != nil {
+		panic(err)
+	}
+
+	return fmt.Sprintf("PrivValidator{%v %v}", addr, pvj.PrivValidatorUnencrypted.String())
+}
+
+// Save persists the PrivValidatorJSON to disk.
+func (pvj *PrivValidatorJSON) Save() {
+	pvj.save()
+}
+
+func (pvj *PrivValidatorJSON) save() {
+	if pvj.filePath == "" {
+		panic("Cannot save PrivValidator: filePath not set")
+	}
+	jsonBytes, err := json.Marshal(pvj)
+	if err != nil {
+		// ; BOOM!!!
+		panic(err)
+	}
+	err = cmn.WriteFileAtomic(pvj.filePath, jsonBytes, 0600)
+	if err != nil {
+		// ; BOOM!!!
+		panic(err)
+	}
+}
+
+// Reset resets the PrivValidatorUnencrypted. Panics if the Signer is the wrong type.
+// NOTE: Unsafe!
+func (pvj *PrivValidatorJSON) Reset() {
+	pvj.PrivValidatorUnencrypted.LastSignedInfo.Reset()
+	pvj.Save()
+}
+
+//----------------------------------------------------------------
+
+// GenPrivValidatorJSON generates a new validator with randomly generated private key
+// and the given filePath. It does not persist to file.
+func GenPrivValidatorJSON(filePath string) *PrivValidatorJSON {
+	privKey := crypto.GenPrivKeyEd25519().Wrap()
+	return &PrivValidatorJSON{
+		PrivValidatorUnencrypted: NewPrivValidatorUnencrypted(privKey),
+		filePath:                 filePath,
+	}
+}
+
+// LoadPrivValidatorJSON loads a PrivValidatorJSON from the filePath.
+func LoadPrivValidatorJSON(filePath string) *PrivValidatorJSON {
+	pvJSONBytes, err := ioutil.ReadFile(filePath)
+	if err != nil {
+		cmn.Exit(err.Error())
+	}
+	pvj := PrivValidatorJSON{}
+	err = json.Unmarshal(pvJSONBytes, &pvj)
+	if err != nil {
+		cmn.Exit(cmn.Fmt("Error reading PrivValidatorJSON from %v: %v\n", filePath, err))
+	}
+
+	// enable persistence
+	pvj.filePath = filePath
+	return &pvj
+}
+
+// LoadOrGenPrivValidatorJSON loads a PrivValidatorJSON from the given filePath
+// or else generates a new one and saves it to the filePath.
+func LoadOrGenPrivValidatorJSON(filePath string) *PrivValidatorJSON {
+	var pvj *PrivValidatorJSON
+	if cmn.FileExists(filePath) {
+		pvj = LoadPrivValidatorJSON(filePath)
+	} else {
+		pvj = GenPrivValidatorJSON(filePath)
+		pvj.Save()
+	}
+	return pvj
+}
+
+//--------------------------------------------------------------
+
+// NewTestPrivValidator returns a PrivValidatorJSON with a tempfile
+// for the file path.
+func NewTestPrivValidator(signer types.TestSigner) *PrivValidatorJSON {
+	_, tempFilePath := cmn.Tempfile("priv_validator_")
+	pv := &PrivValidatorJSON{
+		PrivValidatorUnencrypted: NewPrivValidatorUnencrypted(signer.(*types.DefaultTestSigner).PrivKey),
+		filePath:                 tempFilePath,
+	}
+	return pv
+}
+
+//------------------------------------------------------
+
+// PrivValidatorsByAddress is a list of PrivValidatorJSON ordered by their
+// addresses.
+type PrivValidatorsByAddress []*PrivValidatorJSON
+
+func (pvs PrivValidatorsByAddress) Len() int {
+	return len(pvs)
+}
+
+func (pvs PrivValidatorsByAddress) Less(i, j int) bool {
+	iaddr, err := pvs[j].Address()
+	if err != nil {
+		panic(err)
+	}
+
+	jaddr, err := pvs[i].Address()
+	if err != nil {
+		panic(err)
+	}
+
+	return bytes.Compare(iaddr, jaddr) == -1
+}
+
+func (pvs PrivValidatorsByAddress) Swap(i, j int) {
+	it := pvs[i]
+	pvs[i] = pvs[j]
+	pvs[j] = it
+}
diff --git a/vendor/github.com/tendermint/tendermint/types/priv_validator/sign_info.go b/vendor/github.com/tendermint/tendermint/types/priv_validator/sign_info.go
new file mode 100644
index 00000000..8b135df6
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/types/priv_validator/sign_info.go
@@ -0,0 +1,238 @@
+package types
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"time"
+
+	crypto "github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/types"
+	cmn "github.com/tendermint/tmlibs/common"
+)
+
+// TODO: type ?
+const (
+	stepNone      int8 = 0 // Used to distinguish the initial state
+	stepPropose   int8 = 1
+	stepPrevote   int8 = 2
+	stepPrecommit int8 = 3
+)
+
+func voteToStep(vote *types.Vote) int8 {
+	switch vote.Type {
+	case types.VoteTypePrevote:
+		return stepPrevote
+	case types.VoteTypePrecommit:
+		return stepPrecommit
+	default:
+		panic("Unknown vote type")
+	}
+}
+
+//-------------------------------------
+
+// LastSignedInfo contains information about the latest
+// data signed by a validator to help prevent double signing.
+type LastSignedInfo struct {
+	Height    int64            `json:"height"`
+	Round     int              `json:"round"`
+	Step      int8             `json:"step"`
+	Signature crypto.Signature `json:"signature,omitempty"` // so we dont lose signatures
+	SignBytes cmn.HexBytes     `json:"signbytes,omitempty"` // so we dont lose signatures
+}
+
+func NewLastSignedInfo() *LastSignedInfo {
+	return &LastSignedInfo{
+		Step: stepNone,
+	}
+}
+
+func (lsi *LastSignedInfo) String() string {
+	return fmt.Sprintf("LH:%v, LR:%v, LS:%v", lsi.Height, lsi.Round, lsi.Step)
+}
+
+// Verify returns an error if there is a height/round/step regression
+// or if the HRS matches but there are no LastSignBytes.
+// It returns true if HRS matches exactly and the LastSignature exists.
+// It panics if the HRS matches, the LastSignBytes are not empty, but the LastSignature is empty.
+func (lsi LastSignedInfo) Verify(height int64, round int, step int8) (bool, error) {
+	if lsi.Height > height {
+		return false, errors.New("Height regression")
+	}
+
+	if lsi.Height == height {
+		if lsi.Round > round {
+			return false, errors.New("Round regression")
+		}
+
+		if lsi.Round == round {
+			if lsi.Step > step {
+				return false, errors.New("Step regression")
+			} else if lsi.Step == step {
+				if lsi.SignBytes != nil {
+					if lsi.Signature.Empty() {
+						panic("info: LastSignature is nil but LastSignBytes is not!")
+					}
+					return true, nil
+				}
+				return false, errors.New("No LastSignature found")
+			}
+		}
+	}
+	return false, nil
+}
+
+// Set height/round/step and signature on the info
+func (lsi *LastSignedInfo) Set(height int64, round int, step int8,
+	signBytes []byte, sig crypto.Signature) {
+
+	lsi.Height = height
+	lsi.Round = round
+	lsi.Step = step
+	lsi.Signature = sig
+	lsi.SignBytes = signBytes
+}
+
+// Reset resets all the values.
+// XXX: Unsafe.
+func (lsi *LastSignedInfo) Reset() {
+	lsi.Height = 0
+	lsi.Round = 0
+	lsi.Step = 0
+	lsi.Signature = crypto.Signature{}
+	lsi.SignBytes = nil
+}
+
+// SignVote checks the height/round/step (HRS) are greater than the latest state of the LastSignedInfo.
+// If so, it signs the vote, updates the LastSignedInfo, and sets the signature on the vote.
+// If the HRS are equal and the only thing changed is the timestamp, it sets the vote.Timestamp to the previous
+// value and the Signature to the LastSignedInfo.Signature.
+// Else it returns an error.
+func (lsi *LastSignedInfo) SignVote(signer types.Signer, chainID string, vote *types.Vote) error {
+	height, round, step := vote.Height, vote.Round, voteToStep(vote)
+	signBytes := vote.SignBytes(chainID)
+
+	sameHRS, err := lsi.Verify(height, round, step)
+	if err != nil {
+		return err
+	}
+
+	// We might crash before writing to the wal,
+	// causing us to try to re-sign for the same HRS.
+	// If signbytes are the same, use the last signature.
+	// If they only differ by timestamp, use last timestamp and signature
+	// Otherwise, return error
+	if sameHRS {
+		if bytes.Equal(signBytes, lsi.SignBytes) {
+			vote.Signature = lsi.Signature
+		} else if timestamp, ok := checkVotesOnlyDifferByTimestamp(lsi.SignBytes, signBytes); ok {
+			vote.Timestamp = timestamp
+			vote.Signature = lsi.Signature
+		} else {
+			err = fmt.Errorf("Conflicting data")
+		}
+		return err
+	}
+	sig, err := signer.Sign(signBytes)
+	if err != nil {
+		return err
+	}
+	lsi.Set(height, round, step, signBytes, sig)
+	vote.Signature = sig
+	return nil
+}
+
+// SignProposal checks if the height/round/step (HRS) are greater than the latest state of the LastSignedInfo.
+// If so, it signs the proposal, updates the LastSignedInfo, and sets the signature on the proposal.
+// If the HRS are equal and the only thing changed is the timestamp, it sets the timestamp to the previous
+// value and the Signature to the LastSignedInfo.Signature.
+// Else it returns an error.
+func (lsi *LastSignedInfo) SignProposal(signer types.Signer, chainID string, proposal *types.Proposal) error {
+	height, round, step := proposal.Height, proposal.Round, stepPropose
+	signBytes := proposal.SignBytes(chainID)
+
+	sameHRS, err := lsi.Verify(height, round, step)
+	if err != nil {
+		return err
+	}
+
+	// We might crash before writing to the wal,
+	// causing us to try to re-sign for the same HRS.
+	// If signbytes are the same, use the last signature.
+	// If they only differ by timestamp, use last timestamp and signature
+	// Otherwise, return error
+	if sameHRS {
+		if bytes.Equal(signBytes, lsi.SignBytes) {
+			proposal.Signature = lsi.Signature
+		} else if timestamp, ok := checkProposalsOnlyDifferByTimestamp(lsi.SignBytes, signBytes); ok {
+			proposal.Timestamp = timestamp
+			proposal.Signature = lsi.Signature
+		} else {
+			err = fmt.Errorf("Conflicting data")
+		}
+		return err
+	}
+	sig, err := signer.Sign(signBytes)
+	if err != nil {
+		return err
+	}
+	lsi.Set(height, round, step, signBytes, sig)
+	proposal.Signature = sig
+	return nil
+}
+
+//-------------------------------------
+
+// returns the timestamp from the lastSignBytes.
+// returns true if the only difference in the votes is their timestamp.
+func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) {
+	var lastVote, newVote types.CanonicalJSONOnceVote
+	if err := json.Unmarshal(lastSignBytes, &lastVote); err != nil {
+		panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into vote: %v", err))
+	}
+	if err := json.Unmarshal(newSignBytes, &newVote); err != nil {
+		panic(fmt.Sprintf("signBytes cannot be unmarshalled into vote: %v", err))
+	}
+
+	lastTime, err := time.Parse(types.TimeFormat, lastVote.Vote.Timestamp)
+	if err != nil {
+		panic(err)
+	}
+
+	// set the times to the same value and check equality
+	now := types.CanonicalTime(time.Now())
+	lastVote.Vote.Timestamp = now
+	newVote.Vote.Timestamp = now
+	lastVoteBytes, _ := json.Marshal(lastVote)
+	newVoteBytes, _ := json.Marshal(newVote)
+
+	return lastTime, bytes.Equal(newVoteBytes, lastVoteBytes)
+}
+
+// returns the timestamp from the lastSignBytes.
+// returns true if the only difference in the proposals is their timestamp
+func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) {
+	var lastProposal, newProposal types.CanonicalJSONOnceProposal
+	if err := json.Unmarshal(lastSignBytes, &lastProposal); err != nil {
+		panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into proposal: %v", err))
+	}
+	if err := json.Unmarshal(newSignBytes, &newProposal); err != nil {
+		panic(fmt.Sprintf("signBytes cannot be unmarshalled into proposal: %v", err))
+	}
+
+	lastTime, err := time.Parse(types.TimeFormat, lastProposal.Proposal.Timestamp)
+	if err != nil {
+		panic(err)
+	}
+
+	// set the times to the same value and check equality
+	now := types.CanonicalTime(time.Now())
+	lastProposal.Proposal.Timestamp = now
+	newProposal.Proposal.Timestamp = now
+	lastProposalBytes, _ := json.Marshal(lastProposal)
+	newProposalBytes, _ := json.Marshal(newProposal)
+
+	return lastTime, bytes.Equal(newProposalBytes, lastProposalBytes)
+}
diff --git a/vendor/github.com/tendermint/tendermint/types/priv_validator/socket.go b/vendor/github.com/tendermint/tendermint/types/priv_validator/socket.go
new file mode 100644
index 00000000..26cab72b
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/types/priv_validator/socket.go
@@ -0,0 +1,564 @@
+package types
+
+import (
+	"fmt"
+	"io"
+	"net"
+	"time"
+
+	"github.com/pkg/errors"
+	crypto "github.com/tendermint/go-crypto"
+	wire "github.com/tendermint/go-wire"
+	cmn "github.com/tendermint/tmlibs/common"
+	"github.com/tendermint/tmlibs/log"
+
+	p2pconn "github.com/tendermint/tendermint/p2p/conn"
+	"github.com/tendermint/tendermint/types"
+)
+
+const (
+	defaultAcceptDeadlineSeconds = 3
+	defaultConnDeadlineSeconds   = 3
+	defaultConnHeartBeatSeconds  = 30
+	defaultConnWaitSeconds       = 60
+	defaultDialRetries           = 10
+)
+
+// Socket errors.
+var (
+	ErrDialRetryMax    = errors.New("dialed maximum retries")
+	ErrConnWaitTimeout = errors.New("waited for remote signer for too long")
+	ErrConnTimeout     = errors.New("remote signer timed out")
+)
+
+var (
+	acceptDeadline = time.Second + defaultAcceptDeadlineSeconds
+	connDeadline   = time.Second * defaultConnDeadlineSeconds
+	connHeartbeat  = time.Second * defaultConnHeartBeatSeconds
+)
+
+// SocketClientOption sets an optional parameter on the SocketClient.
+type SocketClientOption func(*SocketClient)
+
+// SocketClientAcceptDeadline sets the deadline for the SocketClient listener.
+// A zero time value disables the deadline.
+func SocketClientAcceptDeadline(deadline time.Duration) SocketClientOption {
+	return func(sc *SocketClient) { sc.acceptDeadline = deadline }
+}
+
+// SocketClientConnDeadline sets the read and write deadline for connections
+// from external signing processes.
+func SocketClientConnDeadline(deadline time.Duration) SocketClientOption {
+	return func(sc *SocketClient) { sc.connDeadline = deadline }
+}
+
+// SocketClientHeartbeat sets the period on which to check the liveness of the
+// connected Signer connections.
+func SocketClientHeartbeat(period time.Duration) SocketClientOption {
+	return func(sc *SocketClient) { sc.connHeartbeat = period }
+}
+
+// SocketClientConnWait sets the timeout duration before connection of external
+// signing processes are considered to be unsuccessful.
+func SocketClientConnWait(timeout time.Duration) SocketClientOption {
+	return func(sc *SocketClient) { sc.connWaitTimeout = timeout }
+}
+
+// SocketClient implements PrivValidator, it uses a socket to request signatures
+// from an external process.
+type SocketClient struct {
+	cmn.BaseService
+
+	addr            string
+	acceptDeadline  time.Duration
+	connDeadline    time.Duration
+	connHeartbeat   time.Duration
+	connWaitTimeout time.Duration
+	privKey         crypto.PrivKeyEd25519
+
+	conn     net.Conn
+	listener net.Listener
+}
+
+// Check that SocketClient implements PrivValidator2.
+var _ types.PrivValidator2 = (*SocketClient)(nil)
+
+// NewSocketClient returns an instance of SocketClient.
+func NewSocketClient(
+	logger log.Logger,
+	socketAddr string,
+	privKey crypto.PrivKeyEd25519,
+) *SocketClient {
+	sc := &SocketClient{
+		addr:            socketAddr,
+		acceptDeadline:  acceptDeadline,
+		connDeadline:    connDeadline,
+		connHeartbeat:   connHeartbeat,
+		connWaitTimeout: time.Second * defaultConnWaitSeconds,
+		privKey:         privKey,
+	}
+
+	sc.BaseService = *cmn.NewBaseService(logger, "SocketClient", sc)
+
+	return sc
+}
+
+// GetAddress implements PrivValidator.
+// TODO(xla): Remove when PrivValidator2 replaced PrivValidator.
+func (sc *SocketClient) GetAddress() types.Address {
+	addr, err := sc.Address()
+	if err != nil {
+		panic(err)
+	}
+
+	return addr
+}
+
+// Address is an alias for PubKey().Address().
+func (sc *SocketClient) Address() (cmn.HexBytes, error) {
+	p, err := sc.PubKey()
+	if err != nil {
+		return nil, err
+	}
+
+	return p.Address(), nil
+}
+
+// GetPubKey implements PrivValidator.
+// TODO(xla): Remove when PrivValidator2 replaced PrivValidator.
+func (sc *SocketClient) GetPubKey() crypto.PubKey {
+	pubKey, err := sc.PubKey()
+	if err != nil {
+		panic(err)
+	}
+
+	return pubKey
+}
+
+// PubKey implements PrivValidator2.
+func (sc *SocketClient) PubKey() (crypto.PubKey, error) {
+	err := writeMsg(sc.conn, &PubKeyMsg{})
+	if err != nil {
+		return crypto.PubKey{}, err
+	}
+
+	res, err := readMsg(sc.conn)
+	if err != nil {
+		return crypto.PubKey{}, err
+	}
+
+	return res.(*PubKeyMsg).PubKey, nil
+}
+
+// SignVote implements PrivValidator2.
+func (sc *SocketClient) SignVote(chainID string, vote *types.Vote) error {
+	err := writeMsg(sc.conn, &SignVoteMsg{Vote: vote})
+	if err != nil {
+		return err
+	}
+
+	res, err := readMsg(sc.conn)
+	if err != nil {
+		return err
+	}
+
+	*vote = *res.(*SignVoteMsg).Vote
+
+	return nil
+}
+
+// SignProposal implements PrivValidator2.
+func (sc *SocketClient) SignProposal(
+	chainID string,
+	proposal *types.Proposal,
+) error {
+	err := writeMsg(sc.conn, &SignProposalMsg{Proposal: proposal})
+	if err != nil {
+		return err
+	}
+
+	res, err := readMsg(sc.conn)
+	if err != nil {
+		return err
+	}
+
+	*proposal = *res.(*SignProposalMsg).Proposal
+
+	return nil
+}
+
+// SignHeartbeat implements PrivValidator2.
+func (sc *SocketClient) SignHeartbeat(
+	chainID string,
+	heartbeat *types.Heartbeat,
+) error {
+	err := writeMsg(sc.conn, &SignHeartbeatMsg{Heartbeat: heartbeat})
+	if err != nil {
+		return err
+	}
+
+	res, err := readMsg(sc.conn)
+	if err != nil {
+		return err
+	}
+
+	*heartbeat = *res.(*SignHeartbeatMsg).Heartbeat
+
+	return nil
+}
+
+// OnStart implements cmn.Service.
+func (sc *SocketClient) OnStart() error {
+	if err := sc.listen(); err != nil {
+		sc.Logger.Error(
+			"OnStart",
+			"err", errors.Wrap(err, "failed to listen"),
+		)
+
+		return err
+	}
+
+	conn, err := sc.waitConnection()
+	if err != nil {
+		sc.Logger.Error(
+			"OnStart",
+			"err", errors.Wrap(err, "failed to accept connection"),
+		)
+
+		return err
+	}
+
+	sc.conn = conn
+
+	return nil
+}
+
+// OnStop implements cmn.Service.
+func (sc *SocketClient) OnStop() {
+	if sc.conn != nil {
+		if err := sc.conn.Close(); err != nil {
+			sc.Logger.Error(
+				"OnStop",
+				"err", errors.Wrap(err, "failed to close connection"),
+			)
+		}
+	}
+
+	if sc.listener != nil {
+		if err := sc.listener.Close(); err != nil {
+			sc.Logger.Error(
+				"OnStop",
+				"err", errors.Wrap(err, "failed to close listener"),
+			)
+		}
+	}
+}
+
+func (sc *SocketClient) acceptConnection() (net.Conn, error) {
+	conn, err := sc.listener.Accept()
+	if err != nil {
+		if !sc.IsRunning() {
+			return nil, nil // Ignore error from listener closing.
+		}
+		return nil, err
+
+	}
+
+	conn, err = p2pconn.MakeSecretConnection(conn, sc.privKey.Wrap())
+	if err != nil {
+		return nil, err
+	}
+
+	return conn, nil
+}
+
+func (sc *SocketClient) listen() error {
+	ln, err := net.Listen(cmn.ProtocolAndAddress(sc.addr))
+	if err != nil {
+		return err
+	}
+
+	sc.listener = newTCPTimeoutListener(
+		ln,
+		sc.acceptDeadline,
+		sc.connDeadline,
+		sc.connHeartbeat,
+	)
+
+	return nil
+}
+
+// waitConnection uses the configured wait timeout to error if no external
+// process connects in the time period.
+func (sc *SocketClient) waitConnection() (net.Conn, error) {
+	var (
+		connc = make(chan net.Conn, 1)
+		errc  = make(chan error, 1)
+	)
+
+	go func(connc chan<- net.Conn, errc chan<- error) {
+		conn, err := sc.acceptConnection()
+		if err != nil {
+			errc <- err
+			return
+		}
+
+		connc <- conn
+	}(connc, errc)
+
+	select {
+	case conn := <-connc:
+		return conn, nil
+	case err := <-errc:
+		if _, ok := err.(timeoutError); ok {
+			return nil, errors.Wrap(ErrConnWaitTimeout, err.Error())
+		}
+		return nil, err
+	case <-time.After(sc.connWaitTimeout):
+		return nil, ErrConnWaitTimeout
+	}
+}
+
+//---------------------------------------------------------
+
+// RemoteSignerOption sets an optional parameter on the RemoteSigner.
+type RemoteSignerOption func(*RemoteSigner)
+
+// RemoteSignerConnDeadline sets the read and write deadline for connections
+// from external signing processes.
+func RemoteSignerConnDeadline(deadline time.Duration) RemoteSignerOption {
+	return func(ss *RemoteSigner) { ss.connDeadline = deadline }
+}
+
+// RemoteSignerConnRetries sets the amount of attempted retries to connect.
+func RemoteSignerConnRetries(retries int) RemoteSignerOption {
+	return func(ss *RemoteSigner) { ss.connRetries = retries }
+}
+
+// RemoteSigner implements PrivValidator by dialing to a socket.
+type RemoteSigner struct {
+	cmn.BaseService
+
+	addr         string
+	chainID      string
+	connDeadline time.Duration
+	connRetries  int
+	privKey      crypto.PrivKeyEd25519
+	privVal      PrivValidator
+
+	conn net.Conn
+}
+
+// NewRemoteSigner returns an instance of RemoteSigner.
+func NewRemoteSigner(
+	logger log.Logger,
+	chainID, socketAddr string,
+	privVal PrivValidator,
+	privKey crypto.PrivKeyEd25519,
+) *RemoteSigner {
+	rs := &RemoteSigner{
+		addr:         socketAddr,
+		chainID:      chainID,
+		connDeadline: time.Second * defaultConnDeadlineSeconds,
+		connRetries:  defaultDialRetries,
+		privKey:      privKey,
+		privVal:      privVal,
+	}
+
+	rs.BaseService = *cmn.NewBaseService(logger, "RemoteSigner", rs)
+
+	return rs
+}
+
+// OnStart implements cmn.Service.
+func (rs *RemoteSigner) OnStart() error {
+	conn, err := rs.connect()
+	if err != nil {
+		rs.Logger.Error("OnStart", "err", errors.Wrap(err, "connect"))
+
+		return err
+	}
+
+	go rs.handleConnection(conn)
+
+	return nil
+}
+
+// OnStop implements cmn.Service.
+func (rs *RemoteSigner) OnStop() {
+	if rs.conn == nil {
+		return
+	}
+
+	if err := rs.conn.Close(); err != nil {
+		rs.Logger.Error("OnStop", "err", errors.Wrap(err, "closing listener failed"))
+	}
+}
+
+func (rs *RemoteSigner) connect() (net.Conn, error) {
+	for retries := rs.connRetries; retries > 0; retries-- {
+		// Don't sleep if it is the first retry.
+		if retries != rs.connRetries {
+			time.Sleep(rs.connDeadline)
+		}
+
+		conn, err := cmn.Connect(rs.addr)
+		if err != nil {
+			rs.Logger.Error(
+				"connect",
+				"addr", rs.addr,
+				"err", errors.Wrap(err, "connection failed"),
+			)
+
+			continue
+		}
+
+		if err := conn.SetDeadline(time.Now().Add(connDeadline)); err != nil {
+			rs.Logger.Error(
+				"connect",
+				"err", errors.Wrap(err, "setting connection timeout failed"),
+			)
+			continue
+		}
+
+		conn, err = p2pconn.MakeSecretConnection(conn, rs.privKey.Wrap())
+		if err != nil {
+			rs.Logger.Error(
+				"connect",
+				"err", errors.Wrap(err, "encrypting connection failed"),
+			)
+
+			continue
+		}
+
+		return conn, nil
+	}
+
+	return nil, ErrDialRetryMax
+}
+
+func (rs *RemoteSigner) handleConnection(conn net.Conn) {
+	for {
+		if !rs.IsRunning() {
+			return // Ignore error from listener closing.
+		}
+
+		req, err := readMsg(conn)
+		if err != nil {
+			if err != io.EOF {
+				rs.Logger.Error("handleConnection", "err", err)
+			}
+			return
+		}
+
+		var res PrivValMsg
+
+		switch r := req.(type) {
+		case *PubKeyMsg:
+			var p crypto.PubKey
+
+			p, err = rs.privVal.PubKey()
+			res = &PubKeyMsg{p}
+		case *SignVoteMsg:
+			err = rs.privVal.SignVote(rs.chainID, r.Vote)
+			res = &SignVoteMsg{r.Vote}
+		case *SignProposalMsg:
+			err = rs.privVal.SignProposal(rs.chainID, r.Proposal)
+			res = &SignProposalMsg{r.Proposal}
+		case *SignHeartbeatMsg:
+			err = rs.privVal.SignHeartbeat(rs.chainID, r.Heartbeat)
+			res = &SignHeartbeatMsg{r.Heartbeat}
+		default:
+			err = fmt.Errorf("unknown msg: %v", r)
+		}
+
+		if err != nil {
+			rs.Logger.Error("handleConnection", "err", err)
+			return
+		}
+
+		err = writeMsg(conn, res)
+		if err != nil {
+			rs.Logger.Error("handleConnection", "err", err)
+			return
+		}
+	}
+}
+
+//---------------------------------------------------------
+
+const (
+	msgTypePubKey        = byte(0x01)
+	msgTypeSignVote      = byte(0x10)
+	msgTypeSignProposal  = byte(0x11)
+	msgTypeSignHeartbeat = byte(0x12)
+)
+
+// PrivValMsg is sent between RemoteSigner and SocketClient.
+type PrivValMsg interface{}
+
+var _ = wire.RegisterInterface(
+	struct{ PrivValMsg }{},
+	wire.ConcreteType{&PubKeyMsg{}, msgTypePubKey},
+	wire.ConcreteType{&SignVoteMsg{}, msgTypeSignVote},
+	wire.ConcreteType{&SignProposalMsg{}, msgTypeSignProposal},
+	wire.ConcreteType{&SignHeartbeatMsg{}, msgTypeSignHeartbeat},
+)
+
+// PubKeyMsg is a PrivValidatorSocket message containing the public key.
+type PubKeyMsg struct {
+	PubKey crypto.PubKey
+}
+
+// SignVoteMsg is a PrivValidatorSocket message containing a vote.
+type SignVoteMsg struct {
+	Vote *types.Vote
+}
+
+// SignProposalMsg is a PrivValidatorSocket message containing a Proposal.
+type SignProposalMsg struct {
+	Proposal *types.Proposal
+}
+
+// SignHeartbeatMsg is a PrivValidatorSocket message containing a Heartbeat.
+type SignHeartbeatMsg struct {
+	Heartbeat *types.Heartbeat
+}
+
+func readMsg(r io.Reader) (PrivValMsg, error) {
+	var (
+		n   int
+		err error
+	)
+
+	read := wire.ReadBinary(struct{ PrivValMsg }{}, r, 0, &n, &err)
+	if err != nil {
+		if _, ok := err.(timeoutError); ok {
+			return nil, errors.Wrap(ErrConnTimeout, err.Error())
+		}
+
+		return nil, err
+	}
+
+	w, ok := read.(struct{ PrivValMsg })
+	if !ok {
+		return nil, errors.New("unknown type")
+	}
+
+	return w.PrivValMsg, nil
+}
+
+func writeMsg(w io.Writer, msg interface{}) error {
+	var (
+		err error
+		n   int
+	)
+
+	// TODO(xla): This extra wrap should be gone with the sdk-2 update.
+	wire.WriteBinary(struct{ PrivValMsg }{msg}, w, &n, &err)
+	if _, ok := err.(timeoutError); ok {
+		return errors.Wrap(ErrConnTimeout, err.Error())
+	}
+
+	return err
+}
diff --git a/vendor/github.com/tendermint/tendermint/types/priv_validator/socket_tcp.go b/vendor/github.com/tendermint/tendermint/types/priv_validator/socket_tcp.go
new file mode 100644
index 00000000..2421eb9f
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/types/priv_validator/socket_tcp.go
@@ -0,0 +1,66 @@
+package types
+
+import (
+	"net"
+	"time"
+)
+
+// timeoutError can be used to check if an error returned from the netp package
+// was due to a timeout.
+type timeoutError interface {
+	Timeout() bool
+}
+
+// tcpTimeoutListener implements net.Listener.
+var _ net.Listener = (*tcpTimeoutListener)(nil)
+
+// tcpTimeoutListener wraps a *net.TCPListener to standardise protocol timeouts
+// and potentially other tuning parameters.
+type tcpTimeoutListener struct {
+	*net.TCPListener
+
+	acceptDeadline time.Duration
+	connDeadline   time.Duration
+	period         time.Duration
+}
+
+// newTCPTimeoutListener returns an instance of tcpTimeoutListener.
+func newTCPTimeoutListener(
+	ln net.Listener,
+	acceptDeadline, connDeadline time.Duration,
+	period time.Duration,
+) tcpTimeoutListener {
+	return tcpTimeoutListener{
+		TCPListener:    ln.(*net.TCPListener),
+		acceptDeadline: acceptDeadline,
+		connDeadline:   connDeadline,
+		period:         period,
+	}
+}
+
+// Accept implements net.Listener.
+func (ln tcpTimeoutListener) Accept() (net.Conn, error) {
+	err := ln.SetDeadline(time.Now().Add(ln.acceptDeadline))
+	if err != nil {
+		return nil, err
+	}
+
+	tc, err := ln.AcceptTCP()
+	if err != nil {
+		return nil, err
+	}
+
+	if err := tc.SetDeadline(time.Now().Add(ln.connDeadline)); err != nil {
+		return nil, err
+	}
+
+	if err := tc.SetKeepAlive(true); err != nil {
+		return nil, err
+	}
+
+	if err := tc.SetKeepAlivePeriod(ln.period); err != nil {
+		return nil, err
+	}
+
+	return tc, nil
+}
diff --git a/vendor/github.com/tendermint/tendermint/types/priv_validator/unencrypted.go b/vendor/github.com/tendermint/tendermint/types/priv_validator/unencrypted.go
new file mode 100644
index 00000000..10a304d9
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/types/priv_validator/unencrypted.go
@@ -0,0 +1,66 @@
+package types
+
+import (
+	"fmt"
+
+	crypto "github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/types"
+	cmn "github.com/tendermint/tmlibs/common"
+)
+
+//-----------------------------------------------------------------
+
+var _ types.PrivValidator2 = (*PrivValidatorUnencrypted)(nil)
+
+// PrivValidatorUnencrypted implements PrivValidator.
+// It uses an in-memory crypto.PrivKey that is
+// persisted to disk unencrypted.
+type PrivValidatorUnencrypted struct {
+	ID             types.ValidatorID `json:"id"`
+	PrivKey        PrivKey           `json:"priv_key"`
+	LastSignedInfo *LastSignedInfo   `json:"last_signed_info"`
+}
+
+// NewPrivValidatorUnencrypted returns an instance of PrivValidatorUnencrypted.
+func NewPrivValidatorUnencrypted(priv crypto.PrivKey) *PrivValidatorUnencrypted {
+	return &PrivValidatorUnencrypted{
+		ID: types.ValidatorID{
+			Address: priv.PubKey().Address(),
+			PubKey:  priv.PubKey(),
+		},
+		PrivKey:        PrivKey(priv),
+		LastSignedInfo: NewLastSignedInfo(),
+	}
+}
+
+// String returns a string representation of the PrivValidatorUnencrypted
+func (upv *PrivValidatorUnencrypted) String() string {
+	addr, err := upv.Address()
+	if err != nil {
+		panic(err)
+	}
+
+	return fmt.Sprintf("PrivValidator{%v %v}", addr, upv.LastSignedInfo.String())
+}
+
+func (upv *PrivValidatorUnencrypted) Address() (cmn.HexBytes, error) {
+	return upv.PrivKey.PubKey().Address(), nil
+}
+
+func (upv *PrivValidatorUnencrypted) PubKey() (crypto.PubKey, error) {
+	return upv.PrivKey.PubKey(), nil
+}
+
+func (upv *PrivValidatorUnencrypted) SignVote(chainID string, vote *types.Vote) error {
+	return upv.LastSignedInfo.SignVote(upv.PrivKey, chainID, vote)
+}
+
+func (upv *PrivValidatorUnencrypted) SignProposal(chainID string, proposal *types.Proposal) error {
+	return upv.LastSignedInfo.SignProposal(upv.PrivKey, chainID, proposal)
+}
+
+func (upv *PrivValidatorUnencrypted) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) error {
+	var err error
+	heartbeat.Signature, err = upv.PrivKey.Sign(heartbeat.SignBytes(chainID))
+	return err
+}
diff --git a/vendor/github.com/tendermint/tendermint/types/priv_validator/upgrade.go b/vendor/github.com/tendermint/tendermint/types/priv_validator/upgrade.go
new file mode 100644
index 00000000..06365542
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/types/priv_validator/upgrade.go
@@ -0,0 +1,59 @@
+package types
+
+import (
+	"encoding/json"
+	"io/ioutil"
+
+	crypto "github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/types"
+	cmn "github.com/tendermint/tmlibs/common"
+)
+
+type PrivValidatorV1 struct {
+	Address       cmn.HexBytes     `json:"address"`
+	PubKey        crypto.PubKey    `json:"pub_key"`
+	LastHeight    int64            `json:"last_height"`
+	LastRound     int              `json:"last_round"`
+	LastStep      int8             `json:"last_step"`
+	LastSignature crypto.Signature `json:"last_signature,omitempty"` // so we dont lose signatures
+	LastSignBytes cmn.HexBytes     `json:"last_signbytes,omitempty"` // so we dont lose signatures
+	PrivKey       crypto.PrivKey   `json:"priv_key"`
+}
+
+func UpgradePrivValidator(filePath string) (*PrivValidatorJSON, error) {
+	b, err := ioutil.ReadFile(filePath)
+	if err != nil {
+		return nil, err
+	}
+
+	pv := new(PrivValidatorV1)
+	err = json.Unmarshal(b, pv)
+	if err != nil {
+		return nil, err
+	}
+
+	pvNew := &PrivValidatorJSON{
+		PrivValidatorUnencrypted: &PrivValidatorUnencrypted{
+			ID: types.ValidatorID{
+				Address: pv.Address,
+				PubKey:  pv.PubKey,
+			},
+			PrivKey: PrivKey(pv.PrivKey),
+			LastSignedInfo: &LastSignedInfo{
+				Height:    pv.LastHeight,
+				Round:     pv.LastRound,
+				Step:      pv.LastStep,
+				SignBytes: pv.LastSignBytes,
+				Signature: pv.LastSignature,
+			},
+		},
+	}
+
+	b, err = json.MarshalIndent(pvNew, "", "  ")
+	if err != nil {
+		return nil, err
+	}
+
+	err = ioutil.WriteFile(filePath, b, 0600)
+	return pvNew, err
+}
diff --git a/vendor/github.com/tendermint/tendermint/types/proposal.go b/vendor/github.com/tendermint/tendermint/types/proposal.go
index 98600681..c240756b 100644
--- a/vendor/github.com/tendermint/tendermint/types/proposal.go
+++ b/vendor/github.com/tendermint/tendermint/types/proposal.go
@@ -3,11 +3,10 @@ package types
 import (
 	"errors"
 	"fmt"
-	"io"
 	"time"
 
 	"github.com/tendermint/go-crypto"
-	"github.com/tendermint/go-wire"
+	"github.com/tendermint/tendermint/wire"
 )
 
 var (
@@ -50,10 +49,14 @@ func (p *Proposal) String() string {
 		p.POLBlockID, p.Signature, CanonicalTime(p.Timestamp))
 }
 
-// WriteSignBytes writes the Proposal bytes for signing
-func (p *Proposal) WriteSignBytes(chainID string, w io.Writer, n *int, err *error) {
-	wire.WriteJSON(CanonicalJSONOnceProposal{
+// SignBytes returns the Proposal bytes for signing
+func (p *Proposal) SignBytes(chainID string) []byte {
+	bz, err := wire.MarshalJSON(CanonicalJSONOnceProposal{
 		ChainID:  chainID,
 		Proposal: CanonicalProposal(p),
-	}, w, n, err)
+	})
+	if err != nil {
+		panic(err)
+	}
+	return bz
 }
diff --git a/vendor/github.com/tendermint/tendermint/types/protobuf.go b/vendor/github.com/tendermint/tendermint/types/protobuf.go
index 43c8f450..e7ae20e3 100644
--- a/vendor/github.com/tendermint/tendermint/types/protobuf.go
+++ b/vendor/github.com/tendermint/tendermint/types/protobuf.go
@@ -10,8 +10,8 @@ var TM2PB = tm2pb{}
 
 type tm2pb struct{}
 
-func (tm2pb) Header(header *Header) *types.Header {
-	return &types.Header{
+func (tm2pb) Header(header *Header) types.Header {
+	return types.Header{
 		ChainID:        header.ChainID,
 		Height:         header.Height,
 		Time:           header.Time.Unix(),
@@ -23,29 +23,29 @@ func (tm2pb) Header(header *Header) *types.Header {
 	}
 }
 
-func (tm2pb) BlockID(blockID BlockID) *types.BlockID {
-	return &types.BlockID{
+func (tm2pb) BlockID(blockID BlockID) types.BlockID {
+	return types.BlockID{
 		Hash:  blockID.Hash,
 		Parts: TM2PB.PartSetHeader(blockID.PartsHeader),
 	}
 }
 
-func (tm2pb) PartSetHeader(partSetHeader PartSetHeader) *types.PartSetHeader {
-	return &types.PartSetHeader{
+func (tm2pb) PartSetHeader(partSetHeader PartSetHeader) types.PartSetHeader {
+	return types.PartSetHeader{
 		Total: int32(partSetHeader.Total), // XXX: overflow
 		Hash:  partSetHeader.Hash,
 	}
 }
 
-func (tm2pb) Validator(val *Validator) *types.Validator {
-	return &types.Validator{
+func (tm2pb) Validator(val *Validator) types.Validator {
+	return types.Validator{
 		PubKey: val.PubKey.Bytes(),
 		Power:  val.VotingPower,
 	}
 }
 
-func (tm2pb) Validators(vals *ValidatorSet) []*types.Validator {
-	validators := make([]*types.Validator, len(vals.Validators))
+func (tm2pb) Validators(vals *ValidatorSet) []types.Validator {
+	validators := make([]types.Validator, len(vals.Validators))
 	for i, val := range vals.Validators {
 		validators[i] = TM2PB.Validator(val)
 	}
diff --git a/vendor/github.com/tendermint/tendermint/types/results.go b/vendor/github.com/tendermint/tendermint/types/results.go
index 29420fbc..71834664 100644
--- a/vendor/github.com/tendermint/tendermint/types/results.go
+++ b/vendor/github.com/tendermint/tendermint/types/results.go
@@ -2,8 +2,8 @@ package types
 
 import (
 	abci "github.com/tendermint/abci/types"
-	wire "github.com/tendermint/go-wire"
-	"github.com/tendermint/go-wire/data"
+	wire "github.com/tendermint/tendermint/wire"
+	cmn "github.com/tendermint/tmlibs/common"
 	"github.com/tendermint/tmlibs/merkle"
 )
 
@@ -12,13 +12,13 @@ import (
 // ABCIResult is the deterministic component of a ResponseDeliverTx.
 // TODO: add Tags
 type ABCIResult struct {
-	Code uint32     `json:"code"`
-	Data data.Bytes `json:"data"`
+	Code uint32       `json:"code"`
+	Data cmn.HexBytes `json:"data"`
 }
 
 // Hash returns the canonical hash of the ABCIResult
 func (a ABCIResult) Hash() []byte {
-	return wire.BinaryRipemd160(a)
+	return tmHash(a)
 }
 
 // ABCIResults wraps the deliver tx results to return a proof
@@ -40,27 +40,31 @@ func NewResultFromResponse(response *abci.ResponseDeliverTx) ABCIResult {
 	}
 }
 
-// Bytes serializes the ABCIResponse using go-wire
+// Bytes serializes the ABCIResponse using wire
 func (a ABCIResults) Bytes() []byte {
-	return wire.BinaryBytes(a)
+	bz, err := wire.MarshalBinary(a)
+	if err != nil {
+		panic(err)
+	}
+	return bz
 }
 
 // Hash returns a merkle hash of all results
 func (a ABCIResults) Hash() []byte {
-	return merkle.SimpleHashFromHashables(a.toHashables())
+	return merkle.SimpleHashFromHashers(a.toHashers())
 }
 
 // ProveResult returns a merkle proof of one result from the set
 func (a ABCIResults) ProveResult(i int) merkle.SimpleProof {
-	_, proofs := merkle.SimpleProofsFromHashables(a.toHashables())
+	_, proofs := merkle.SimpleProofsFromHashers(a.toHashers())
 	return *proofs[i]
 }
 
-func (a ABCIResults) toHashables() []merkle.Hashable {
+func (a ABCIResults) toHashers() []merkle.Hasher {
 	l := len(a)
-	hashables := make([]merkle.Hashable, l)
+	hashers := make([]merkle.Hasher, l)
 	for i := 0; i < l; i++ {
-		hashables[i] = a[i]
+		hashers[i] = a[i]
 	}
-	return hashables
+	return hashers
 }
diff --git a/vendor/github.com/tendermint/tendermint/types/services.go b/vendor/github.com/tendermint/tendermint/types/services.go
index 6900fae7..6b2be8a5 100644
--- a/vendor/github.com/tendermint/tendermint/types/services.go
+++ b/vendor/github.com/tendermint/tendermint/types/services.go
@@ -27,6 +27,7 @@ type Mempool interface {
 	Reap(int) Txs
 	Update(height int64, txs Txs) error
 	Flush()
+	FlushAppConn() error
 
 	TxsAvailable() <-chan int64
 	EnableTxsAvailable()
@@ -44,6 +45,7 @@ func (m MockMempool) CheckTx(tx Tx, cb func(*abci.Response)) error { return nil
 func (m MockMempool) Reap(n int) Txs                               { return Txs{} }
 func (m MockMempool) Update(height int64, txs Txs) error           { return nil }
 func (m MockMempool) Flush()                                       {}
+func (m MockMempool) FlushAppConn() error                          { return nil }
 func (m MockMempool) TxsAvailable() <-chan int64                   { return make(chan int64) }
 func (m MockMempool) EnableTxsAvailable()                          {}
 
diff --git a/vendor/github.com/tendermint/tendermint/types/signable.go b/vendor/github.com/tendermint/tendermint/types/signable.go
index 2134f630..19829ede 100644
--- a/vendor/github.com/tendermint/tendermint/types/signable.go
+++ b/vendor/github.com/tendermint/tendermint/types/signable.go
@@ -1,30 +1,16 @@
 package types
 
-import (
-	"bytes"
-	"io"
-
-	cmn "github.com/tendermint/tmlibs/common"
-	"github.com/tendermint/tmlibs/merkle"
-)
-
 // Signable is an interface for all signable things.
 // It typically removes signatures before serializing.
+// SignBytes returns the bytes to be signed
+// NOTE: chainIDs are part of the SignBytes but not
+// necessarily the object themselves.
+// NOTE: Expected to panic if there is an error marshalling.
 type Signable interface {
-	WriteSignBytes(chainID string, w io.Writer, n *int, err *error)
-}
-
-// SignBytes is a convenience method for getting the bytes to sign of a Signable.
-func SignBytes(chainID string, o Signable) []byte {
-	buf, n, err := new(bytes.Buffer), new(int), new(error)
-	o.WriteSignBytes(chainID, buf, n, err)
-	if *err != nil {
-		cmn.PanicCrisis(err)
-	}
-	return buf.Bytes()
+	SignBytes(chainID string) []byte
 }
 
 // HashSignBytes is a convenience method for getting the hash of the bytes of a signable
 func HashSignBytes(chainID string, o Signable) []byte {
-	return merkle.SimpleHashFromBinary(SignBytes(chainID, o))
+	return tmHash(o.SignBytes(chainID))
 }
diff --git a/vendor/github.com/tendermint/tendermint/types/test_util.go b/vendor/github.com/tendermint/tendermint/types/test_util.go
index d13de04e..73e53eb1 100644
--- a/vendor/github.com/tendermint/tendermint/types/test_util.go
+++ b/vendor/github.com/tendermint/tendermint/types/test_util.go
@@ -29,7 +29,7 @@ func MakeCommit(blockID BlockID, height int64, round int,
 }
 
 func signAddVote(privVal *PrivValidatorFS, vote *Vote, voteSet *VoteSet) (signed bool, err error) {
-	vote.Signature, err = privVal.Signer.Sign(SignBytes(voteSet.ChainID(), vote))
+	vote.Signature, err = privVal.Signer.Sign(vote.SignBytes(voteSet.ChainID()))
 	if err != nil {
 		return false, err
 	}
diff --git a/vendor/github.com/tendermint/tendermint/types/tx.go b/vendor/github.com/tendermint/tendermint/types/tx.go
index 4cf5843a..fc1d2721 100644
--- a/vendor/github.com/tendermint/tendermint/types/tx.go
+++ b/vendor/github.com/tendermint/tendermint/types/tx.go
@@ -6,19 +6,19 @@ import (
 	"fmt"
 
 	abci "github.com/tendermint/abci/types"
-	"github.com/tendermint/go-wire/data"
+	cmn "github.com/tendermint/tmlibs/common"
 	"github.com/tendermint/tmlibs/merkle"
 )
 
 // Tx is an arbitrary byte array.
-// NOTE: Tx has no types at this level, so when go-wire encoded it's just length-prefixed.
+// NOTE: Tx has no types at this level, so when wire encoded it's just length-prefixed.
 // Alternatively, it may make sense to add types here and let
 // []byte be type 0x1 so we can have versioned txs if need be in the future.
 type Tx []byte
 
-// Hash computes the RIPEMD160 hash of the go-wire encoded transaction.
+// Hash computes the RIPEMD160 hash of the wire encoded transaction.
 func (tx Tx) Hash() []byte {
-	return merkle.SimpleHashFromBinary(tx)
+	return wireHasher(tx).Hash()
 }
 
 // String returns the hex-encoded transaction as a string.
@@ -72,11 +72,11 @@ func (txs Txs) IndexByHash(hash []byte) int {
 // TODO: optimize this!
 func (txs Txs) Proof(i int) TxProof {
 	l := len(txs)
-	hashables := make([]merkle.Hashable, l)
+	hashers := make([]merkle.Hasher, l)
 	for i := 0; i < l; i++ {
-		hashables[i] = txs[i]
+		hashers[i] = txs[i]
 	}
-	root, proofs := merkle.SimpleProofsFromHashables(hashables)
+	root, proofs := merkle.SimpleProofsFromHashers(hashers)
 
 	return TxProof{
 		Index:    i,
@@ -90,7 +90,7 @@ func (txs Txs) Proof(i int) TxProof {
 // TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree.
 type TxProof struct {
 	Index, Total int
-	RootHash     data.Bytes
+	RootHash     cmn.HexBytes
 	Data         Tx
 	Proof        merkle.SimpleProof
 }
diff --git a/vendor/github.com/tendermint/tendermint/types/validator.go b/vendor/github.com/tendermint/tendermint/types/validator.go
index c5d064e0..a9b42fe8 100644
--- a/vendor/github.com/tendermint/tendermint/types/validator.go
+++ b/vendor/github.com/tendermint/tendermint/types/validator.go
@@ -3,11 +3,8 @@ package types
 import (
 	"bytes"
 	"fmt"
-	"io"
 
 	"github.com/tendermint/go-crypto"
-	"github.com/tendermint/go-wire"
-	"github.com/tendermint/go-wire/data"
 	cmn "github.com/tendermint/tmlibs/common"
 )
 
@@ -15,7 +12,7 @@ import (
 // NOTE: The Accum is not included in Validator.Hash();
 // make sure to update that method if changes are made here
 type Validator struct {
-	Address     data.Bytes    `json:"address"`
+	Address     Address       `json:"address"`
 	PubKey      crypto.PubKey `json:"pub_key"`
 	VotingPower int64         `json:"voting_power"`
 
@@ -48,9 +45,10 @@ func (v *Validator) CompareAccum(other *Validator) *Validator {
 	} else if v.Accum < other.Accum {
 		return other
 	} else {
-		if bytes.Compare(v.Address, other.Address) < 0 {
+		result := bytes.Compare(v.Address, other.Address)
+		if result < 0 {
 			return v
-		} else if bytes.Compare(v.Address, other.Address) > 0 {
+		} else if result > 0 {
 			return other
 		} else {
 			cmn.PanicSanity("Cannot compare identical validators")
@@ -73,8 +71,8 @@ func (v *Validator) String() string {
 // Hash computes the unique ID of a validator with a given voting power.
 // It excludes the Accum value, which changes with every round.
 func (v *Validator) Hash() []byte {
-	return wire.BinaryRipemd160(struct {
-		Address     data.Bytes
+	return tmHash(struct {
+		Address     Address
 		PubKey      crypto.PubKey
 		VotingPower int64
 	}{
@@ -84,25 +82,6 @@ func (v *Validator) Hash() []byte {
 	})
 }
 
-//-------------------------------------
-
-var ValidatorCodec = validatorCodec{}
-
-type validatorCodec struct{}
-
-func (vc validatorCodec) Encode(o interface{}, w io.Writer, n *int, err *error) {
-	wire.WriteBinary(o.(*Validator), w, n, err)
-}
-
-func (vc validatorCodec) Decode(r io.Reader, n *int, err *error) interface{} {
-	return wire.ReadBinary(&Validator{}, r, 0, n, err)
-}
-
-func (vc validatorCodec) Compare(o1 interface{}, o2 interface{}) int {
-	cmn.PanicSanity("ValidatorCodec.Compare not implemented")
-	return 0
-}
-
 //--------------------------------------------------------------------------------
 // For testing...
 
diff --git a/vendor/github.com/tendermint/tendermint/types/validator_set.go b/vendor/github.com/tendermint/tendermint/types/validator_set.go
index 134e4e06..4b84f85d 100644
--- a/vendor/github.com/tendermint/tendermint/types/validator_set.go
+++ b/vendor/github.com/tendermint/tendermint/types/validator_set.go
@@ -3,6 +3,7 @@ package types
 import (
 	"bytes"
 	"fmt"
+	"math"
 	"sort"
 	"strings"
 
@@ -20,7 +21,6 @@ import (
 // upon calling .IncrementAccum().
 // NOTE: Not goroutine-safe.
 // NOTE: All get/set to validators should copy the value for safety.
-// TODO: consider validator Accum overflow
 type ValidatorSet struct {
 	// NOTE: persisted via reflect, must be exported.
 	Validators []*Validator `json:"validators"`
@@ -48,13 +48,13 @@ func NewValidatorSet(vals []*Validator) *ValidatorSet {
 }
 
 // incrementAccum and update the proposer
-// TODO: mind the overflow when times and votingPower shares too large.
 func (valSet *ValidatorSet) IncrementAccum(times int) {
 	// Add VotingPower * times to each validator and order into heap.
 	validatorsHeap := cmn.NewHeap()
 	for _, val := range valSet.Validators {
-		val.Accum += val.VotingPower * int64(times) // TODO: mind overflow
-		validatorsHeap.Push(val, accumComparable{val})
+		// check for overflow both multiplication and sum
+		val.Accum = safeAddClip(val.Accum, safeMulClip(val.VotingPower, int64(times)))
+		validatorsHeap.PushComparable(val, accumComparable{val})
 	}
 
 	// Decrement the validator with most accum times times
@@ -63,7 +63,9 @@ func (valSet *ValidatorSet) IncrementAccum(times int) {
 		if i == times-1 {
 			valSet.Proposer = mostest
 		}
-		mostest.Accum -= int64(valSet.TotalVotingPower())
+
+		// mind underflow
+		mostest.Accum = safeSubClip(mostest.Accum, valSet.TotalVotingPower())
 		validatorsHeap.Update(mostest, accumComparable{mostest})
 	}
 }
@@ -81,27 +83,30 @@ func (valSet *ValidatorSet) Copy() *ValidatorSet {
 	}
 }
 
+// HasAddress returns true if address given is in the validator set, false -
+// otherwise.
 func (valSet *ValidatorSet) HasAddress(address []byte) bool {
 	idx := sort.Search(len(valSet.Validators), func(i int) bool {
 		return bytes.Compare(address, valSet.Validators[i].Address) <= 0
 	})
-	return idx != len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address)
+	return idx < len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address)
 }
 
+// GetByAddress returns an index of the validator with address and validator
+// itself if found. Otherwise, -1 and nil are returned.
 func (valSet *ValidatorSet) GetByAddress(address []byte) (index int, val *Validator) {
 	idx := sort.Search(len(valSet.Validators), func(i int) bool {
 		return bytes.Compare(address, valSet.Validators[i].Address) <= 0
 	})
-	if idx != len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) {
+	if idx < len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) {
 		return idx, valSet.Validators[idx].Copy()
-	} else {
-		return 0, nil
 	}
+	return -1, nil
 }
 
-// GetByIndex returns the validator by index.
-// It returns nil values if index < 0 or
-// index >= len(ValidatorSet.Validators)
+// GetByIndex returns the validator's address and validator itself by index.
+// It returns nil values if index is less than 0 or greater or equal to
+// len(ValidatorSet.Validators).
 func (valSet *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) {
 	if index < 0 || index >= len(valSet.Validators) {
 		return nil, nil
@@ -110,19 +115,24 @@ func (valSet *ValidatorSet) GetByIndex(index int) (address []byte, val *Validato
 	return val.Address, val.Copy()
 }
 
+// Size returns the length of the validator set.
 func (valSet *ValidatorSet) Size() int {
 	return len(valSet.Validators)
 }
 
+// TotalVotingPower returns the sum of the voting powers of all validators.
 func (valSet *ValidatorSet) TotalVotingPower() int64 {
 	if valSet.totalVotingPower == 0 {
 		for _, val := range valSet.Validators {
-			valSet.totalVotingPower += val.VotingPower
+			// mind overflow
+			valSet.totalVotingPower = safeAddClip(valSet.totalVotingPower, val.VotingPower)
 		}
 	}
 	return valSet.totalVotingPower
 }
 
+// GetProposer returns the current proposer. If the validator set is empty, nil
+// is returned.
 func (valSet *ValidatorSet) GetProposer() (proposer *Validator) {
 	if len(valSet.Validators) == 0 {
 		return nil
@@ -143,23 +153,27 @@ func (valSet *ValidatorSet) findProposer() *Validator {
 	return proposer
 }
 
+// Hash returns the Merkle root hash build using validators (as leaves) in the
+// set.
 func (valSet *ValidatorSet) Hash() []byte {
 	if len(valSet.Validators) == 0 {
 		return nil
 	}
-	hashables := make([]merkle.Hashable, len(valSet.Validators))
+	hashers := make([]merkle.Hasher, len(valSet.Validators))
 	for i, val := range valSet.Validators {
-		hashables[i] = val
+		hashers[i] = val
 	}
-	return merkle.SimpleHashFromHashables(hashables)
+	return merkle.SimpleHashFromHashers(hashers)
 }
 
+// Add adds val to the validator set and returns true. It returns false if val
+// is already in the set.
 func (valSet *ValidatorSet) Add(val *Validator) (added bool) {
 	val = val.Copy()
 	idx := sort.Search(len(valSet.Validators), func(i int) bool {
 		return bytes.Compare(val.Address, valSet.Validators[i].Address) <= 0
 	})
-	if idx == len(valSet.Validators) {
+	if idx >= len(valSet.Validators) {
 		valSet.Validators = append(valSet.Validators, val)
 		// Invalidate cache
 		valSet.Proposer = nil
@@ -180,39 +194,42 @@ func (valSet *ValidatorSet) Add(val *Validator) (added bool) {
 	}
 }
 
+// Update updates val and returns true. It returns false if val is not present
+// in the set.
 func (valSet *ValidatorSet) Update(val *Validator) (updated bool) {
 	index, sameVal := valSet.GetByAddress(val.Address)
 	if sameVal == nil {
 		return false
-	} else {
-		valSet.Validators[index] = val.Copy()
-		// Invalidate cache
-		valSet.Proposer = nil
-		valSet.totalVotingPower = 0
-		return true
 	}
+	valSet.Validators[index] = val.Copy()
+	// Invalidate cache
+	valSet.Proposer = nil
+	valSet.totalVotingPower = 0
+	return true
 }
 
+// Remove deletes the validator with address. It returns the validator removed
+// and true. If returns nil and false if validator is not present in the set.
 func (valSet *ValidatorSet) Remove(address []byte) (val *Validator, removed bool) {
 	idx := sort.Search(len(valSet.Validators), func(i int) bool {
 		return bytes.Compare(address, valSet.Validators[i].Address) <= 0
 	})
-	if idx == len(valSet.Validators) || !bytes.Equal(valSet.Validators[idx].Address, address) {
+	if idx >= len(valSet.Validators) || !bytes.Equal(valSet.Validators[idx].Address, address) {
 		return nil, false
-	} else {
-		removedVal := valSet.Validators[idx]
-		newValidators := valSet.Validators[:idx]
-		if idx+1 < len(valSet.Validators) {
-			newValidators = append(newValidators, valSet.Validators[idx+1:]...)
-		}
-		valSet.Validators = newValidators
-		// Invalidate cache
-		valSet.Proposer = nil
-		valSet.totalVotingPower = 0
-		return removedVal, true
 	}
+	removedVal := valSet.Validators[idx]
+	newValidators := valSet.Validators[:idx]
+	if idx+1 < len(valSet.Validators) {
+		newValidators = append(newValidators, valSet.Validators[idx+1:]...)
+	}
+	valSet.Validators = newValidators
+	// Invalidate cache
+	valSet.Proposer = nil
+	valSet.totalVotingPower = 0
+	return removedVal, true
 }
 
+// Iterate will run the given function over the set.
 func (valSet *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) {
 	for i, val := range valSet.Validators {
 		stop := fn(i, val.Copy())
@@ -250,7 +267,7 @@ func (valSet *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height
 		}
 		_, val := valSet.GetByIndex(idx)
 		// Validate signature
-		precommitSignBytes := SignBytes(chainID, precommit)
+		precommitSignBytes := precommit.SignBytes(chainID)
 		if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) {
 			return fmt.Errorf("Invalid commit -- invalid signature: %v", precommit)
 		}
@@ -263,10 +280,9 @@ func (valSet *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height
 
 	if talliedVotingPower > valSet.TotalVotingPower()*2/3 {
 		return nil
-	} else {
-		return fmt.Errorf("Invalid commit -- insufficient voting power: got %v, needed %v",
-			talliedVotingPower, (valSet.TotalVotingPower()*2/3 + 1))
 	}
+	return fmt.Errorf("Invalid commit -- insufficient voting power: got %v, needed %v",
+		talliedVotingPower, (valSet.TotalVotingPower()*2/3 + 1))
 }
 
 // VerifyCommitAny will check to see if the set would
@@ -324,7 +340,7 @@ func (valSet *ValidatorSet) VerifyCommitAny(newSet *ValidatorSet, chainID string
 		seen[vi] = true
 
 		// Validate signature old school
-		precommitSignBytes := SignBytes(chainID, precommit)
+		precommitSignBytes := precommit.SignBytes(chainID)
 		if !ov.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) {
 			return errors.Errorf("Invalid commit -- invalid signature: %v", precommit)
 		}
@@ -425,3 +441,74 @@ func RandValidatorSet(numValidators int, votingPower int64) (*ValidatorSet, []*P
 	sort.Sort(PrivValidatorsByAddress(privValidators))
 	return valSet, privValidators
 }
+
+///////////////////////////////////////////////////////////////////////////////
+// Safe multiplication and addition/subtraction
+
+func safeMul(a, b int64) (int64, bool) {
+	if a == 0 || b == 0 {
+		return 0, false
+	}
+	if a == 1 {
+		return b, false
+	}
+	if b == 1 {
+		return a, false
+	}
+	if a == math.MinInt64 || b == math.MinInt64 {
+		return -1, true
+	}
+	c := a * b
+	return c, c/b != a
+}
+
+func safeAdd(a, b int64) (int64, bool) {
+	if b > 0 && a > math.MaxInt64-b {
+		return -1, true
+	} else if b < 0 && a < math.MinInt64-b {
+		return -1, true
+	}
+	return a + b, false
+}
+
+func safeSub(a, b int64) (int64, bool) {
+	if b > 0 && a < math.MinInt64+b {
+		return -1, true
+	} else if b < 0 && a > math.MaxInt64+b {
+		return -1, true
+	}
+	return a - b, false
+}
+
+func safeMulClip(a, b int64) int64 {
+	c, overflow := safeMul(a, b)
+	if overflow {
+		if (a < 0 || b < 0) && !(a < 0 && b < 0) {
+			return math.MinInt64
+		}
+		return math.MaxInt64
+	}
+	return c
+}
+
+func safeAddClip(a, b int64) int64 {
+	c, overflow := safeAdd(a, b)
+	if overflow {
+		if b < 0 {
+			return math.MinInt64
+		}
+		return math.MaxInt64
+	}
+	return c
+}
+
+func safeSubClip(a, b int64) int64 {
+	c, overflow := safeSub(a, b)
+	if overflow {
+		if b > 0 {
+			return math.MinInt64
+		}
+		return math.MaxInt64
+	}
+	return c
+}
diff --git a/vendor/github.com/tendermint/tendermint/types/vote.go b/vendor/github.com/tendermint/tendermint/types/vote.go
index 4397152c..ceb6e985 100644
--- a/vendor/github.com/tendermint/tendermint/types/vote.go
+++ b/vendor/github.com/tendermint/tendermint/types/vote.go
@@ -4,12 +4,10 @@ import (
 	"bytes"
 	"errors"
 	"fmt"
-	"io"
 	"time"
 
-	"github.com/tendermint/go-crypto"
-	"github.com/tendermint/go-wire"
-	"github.com/tendermint/go-wire/data"
+	crypto "github.com/tendermint/go-crypto"
+	"github.com/tendermint/tendermint/wire"
 	cmn "github.com/tendermint/tmlibs/common"
 )
 
@@ -59,9 +57,12 @@ func IsVoteTypeValid(type_ byte) bool {
 	}
 }
 
+// Address is hex bytes. TODO: crypto.Address
+type Address = cmn.HexBytes
+
 // Represents a prevote, precommit, or commit vote from validators for consensus.
 type Vote struct {
-	ValidatorAddress data.Bytes       `json:"validator_address"`
+	ValidatorAddress Address          `json:"validator_address"`
 	ValidatorIndex   int              `json:"validator_index"`
 	Height           int64            `json:"height"`
 	Round            int              `json:"round"`
@@ -71,11 +72,15 @@ type Vote struct {
 	Signature        crypto.Signature `json:"signature"`
 }
 
-func (vote *Vote) WriteSignBytes(chainID string, w io.Writer, n *int, err *error) {
-	wire.WriteJSON(CanonicalJSONOnceVote{
+func (vote *Vote) SignBytes(chainID string) []byte {
+	bz, err := wire.MarshalJSON(CanonicalJSONOnceVote{
 		chainID,
 		CanonicalVote(vote),
-	}, w, n, err)
+	})
+	if err != nil {
+		panic(err)
+	}
+	return bz
 }
 
 func (vote *Vote) Copy() *Vote {
@@ -109,7 +114,7 @@ func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error {
 		return ErrVoteInvalidValidatorAddress
 	}
 
-	if !pubKey.VerifyBytes(SignBytes(chainID, vote), vote.Signature) {
+	if !pubKey.VerifyBytes(vote.SignBytes(chainID), vote.Signature) {
 		return ErrVoteInvalidSignature
 	}
 	return nil
diff --git a/vendor/github.com/tendermint/tendermint/types/vote_set.go b/vendor/github.com/tendermint/tendermint/types/vote_set.go
index 34f98956..e255488d 100644
--- a/vendor/github.com/tendermint/tendermint/types/vote_set.go
+++ b/vendor/github.com/tendermint/tendermint/types/vote_set.go
@@ -11,6 +11,12 @@ import (
 	cmn "github.com/tendermint/tmlibs/common"
 )
 
+// UNSTABLE
+// XXX: duplicate of p2p.ID to avoid dependence between packages.
+// Perhaps we can have a minimal types package containing this (and other things?)
+// that both `types` and `p2p` import ?
+type P2PID string
+
 /*
 	VoteSet helps collect signatures from validators at each height+round for a
 	predefined vote type.
@@ -58,7 +64,7 @@ type VoteSet struct {
 	sum           int64                  // Sum of voting power for seen votes, discounting conflicts
 	maj23         *BlockID               // First 2/3 majority seen
 	votesByBlock  map[string]*blockVotes // string(blockHash|blockParts) -> blockVotes
-	peerMaj23s    map[string]BlockID     // Maj23 for each peer
+	peerMaj23s    map[P2PID]BlockID      // Maj23 for each peer
 }
 
 // Constructs a new VoteSet struct used to accumulate votes for given height/round.
@@ -77,7 +83,7 @@ func NewVoteSet(chainID string, height int64, round int, type_ byte, valSet *Val
 		sum:           0,
 		maj23:         nil,
 		votesByBlock:  make(map[string]*blockVotes, valSet.Size()),
-		peerMaj23s:    make(map[string]BlockID),
+		peerMaj23s:    make(map[P2PID]BlockID),
 	}
 }
 
@@ -88,33 +94,29 @@ func (voteSet *VoteSet) ChainID() string {
 func (voteSet *VoteSet) Height() int64 {
 	if voteSet == nil {
 		return 0
-	} else {
-		return voteSet.height
 	}
+	return voteSet.height
 }
 
 func (voteSet *VoteSet) Round() int {
 	if voteSet == nil {
 		return -1
-	} else {
-		return voteSet.round
 	}
+	return voteSet.round
 }
 
 func (voteSet *VoteSet) Type() byte {
 	if voteSet == nil {
 		return 0x00
-	} else {
-		return voteSet.type_
 	}
+	return voteSet.type_
 }
 
 func (voteSet *VoteSet) Size() int {
 	if voteSet == nil {
 		return 0
-	} else {
-		return voteSet.valSet.Size()
 	}
+	return voteSet.valSet.Size()
 }
 
 // Returns added=true if vote is valid and new.
@@ -171,7 +173,7 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) {
 	// Ensure that the signer has the right address
 	if !bytes.Equal(valAddr, lookupAddr) {
 		return false, errors.Wrapf(ErrVoteInvalidValidatorAddress,
-			"vote.ValidatorAddress (%X) does not match address (%X) for vote.ValidatorIndex (%d)",
+			"vote.ValidatorAddress (%X) does not match address (%X) for vote.ValidatorIndex (%d)\nEnsure the genesis file is correct across all validators.",
 			valAddr, lookupAddr, valIndex)
 	}
 
@@ -179,9 +181,8 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) {
 	if existing, ok := voteSet.getVote(valIndex, blockKey); ok {
 		if existing.Signature.Equals(vote.Signature) {
 			return false, nil // duplicate
-		} else {
-			return false, errors.Wrapf(ErrVoteNonDeterministicSignature, "Existing vote: %v; New vote: %v", existing, vote)
 		}
+		return false, errors.Wrapf(ErrVoteNonDeterministicSignature, "Existing vote: %v; New vote: %v", existing, vote)
 	}
 
 	// Check signature.
@@ -193,13 +194,11 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) {
 	added, conflicting := voteSet.addVerifiedVote(vote, blockKey, val.VotingPower)
 	if conflicting != nil {
 		return added, NewConflictingVoteError(val, conflicting, vote)
-	} else {
-		if !added {
-			cmn.PanicSanity("Expected to add non-conflicting vote")
-		}
-		return added, nil
 	}
-
+	if !added {
+		cmn.PanicSanity("Expected to add non-conflicting vote")
+	}
+	return added, nil
 }
 
 // Returns (vote, true) if vote exists for valIndex and blockKey
@@ -251,13 +250,12 @@ func (voteSet *VoteSet) addVerifiedVote(vote *Vote, blockKey string, votingPower
 			// ... and there's a conflicting vote.
 			// We're not even tracking this blockKey, so just forget it.
 			return false, conflicting
-		} else {
-			// ... and there's no conflicting vote.
-			// Start tracking this blockKey
-			votesByBlock = newBlockVotes(false, voteSet.valSet.Size())
-			voteSet.votesByBlock[blockKey] = votesByBlock
-			// We'll add the vote in a bit.
 		}
+		// ... and there's no conflicting vote.
+		// Start tracking this blockKey
+		votesByBlock = newBlockVotes(false, voteSet.valSet.Size())
+		voteSet.votesByBlock[blockKey] = votesByBlock
+		// We'll add the vote in a bit.
 	}
 
 	// Before adding to votesByBlock, see if we'll exceed quorum
@@ -290,7 +288,7 @@ func (voteSet *VoteSet) addVerifiedVote(vote *Vote, blockKey string, votingPower
 // this can cause memory issues.
 // TODO: implement ability to remove peers too
 // NOTE: VoteSet must not be nil
-func (voteSet *VoteSet) SetPeerMaj23(peerID string, blockID BlockID) {
+func (voteSet *VoteSet) SetPeerMaj23(peerID P2PID, blockID BlockID) error {
 	if voteSet == nil {
 		cmn.PanicSanity("SetPeerMaj23() on nil VoteSet")
 	}
@@ -302,10 +300,10 @@ func (voteSet *VoteSet) SetPeerMaj23(peerID string, blockID BlockID) {
 	// Make sure peer hasn't already told us something.
 	if existing, ok := voteSet.peerMaj23s[peerID]; ok {
 		if existing.Equals(blockID) {
-			return // Nothing to do
-		} else {
-			return // TODO bad peer!
+			return nil // Nothing to do
 		}
+		return fmt.Errorf("SetPeerMaj23: Received conflicting blockID from peer %v. Got %v, expected %v",
+			peerID, blockID, existing)
 	}
 	voteSet.peerMaj23s[peerID] = blockID
 
@@ -313,16 +311,16 @@ func (voteSet *VoteSet) SetPeerMaj23(peerID string, blockID BlockID) {
 	votesByBlock, ok := voteSet.votesByBlock[blockKey]
 	if ok {
 		if votesByBlock.peerMaj23 {
-			return // Nothing to do
-		} else {
-			votesByBlock.peerMaj23 = true
-			// No need to copy votes, already there.
+			return nil // Nothing to do
 		}
+		votesByBlock.peerMaj23 = true
+		// No need to copy votes, already there.
 	} else {
 		votesByBlock = newBlockVotes(true, voteSet.valSet.Size())
 		voteSet.votesByBlock[blockKey] = votesByBlock
 		// No need to copy votes, no votes to copy over.
 	}
+	return nil
 }
 
 func (voteSet *VoteSet) BitArray() *cmn.BitArray {
@@ -414,9 +412,8 @@ func (voteSet *VoteSet) TwoThirdsMajority() (blockID BlockID, ok bool) {
 	defer voteSet.mtx.Unlock()
 	if voteSet.maj23 != nil {
 		return *voteSet.maj23, true
-	} else {
-		return BlockID{}, false
 	}
+	return BlockID{}, false
 }
 
 func (voteSet *VoteSet) String() string {
diff --git a/vendor/github.com/tendermint/tendermint/version/version.go b/vendor/github.com/tendermint/tendermint/version/version.go
index d328b41d..a8fbfa84 100644
--- a/vendor/github.com/tendermint/tendermint/version/version.go
+++ b/vendor/github.com/tendermint/tendermint/version/version.go
@@ -1,13 +1,13 @@
 package version
 
 const Maj = "0"
-const Min = "15"
+const Min = "18"
 const Fix = "0"
 
 var (
 	// Version is the current version of Tendermint
 	// Must be a string because scripts like dist.sh read this file.
-	Version = "0.15.0"
+	Version = "0.18.0"
 
 	// GitCommit is the current HEAD set using ldflags.
 	GitCommit string
diff --git a/vendor/github.com/tendermint/tendermint/wire/wire.go b/vendor/github.com/tendermint/tendermint/wire/wire.go
new file mode 100644
index 00000000..9d0d2c20
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/wire/wire.go
@@ -0,0 +1,60 @@
+package wire
+
+import (
+	"github.com/tendermint/go-wire"
+)
+
+/*
+// Expose access to a global wire codec
+// TODO: maybe introduce some Context object
+// containing logger, config, codec that can
+// be threaded through everything to avoid this global
+var cdc *wire.Codec
+
+func init() {
+	cdc = wire.NewCodec()
+	crypto.RegisterWire(cdc)
+}
+*/
+
+// Just a flow through to go-wire.
+// To be used later for the global codec
+
+func MarshalBinary(o interface{}) ([]byte, error) {
+	return wire.MarshalBinary(o)
+}
+
+func UnmarshalBinary(bz []byte, ptr interface{}) error {
+	return wire.UnmarshalBinary(bz, ptr)
+}
+
+func MarshalJSON(o interface{}) ([]byte, error) {
+	return wire.MarshalJSON(o)
+}
+
+func UnmarshalJSON(jsonBz []byte, ptr interface{}) error {
+	return wire.UnmarshalJSON(jsonBz, ptr)
+}
+
+type ConcreteType = wire.ConcreteType
+
+func RegisterInterface(o interface{}, ctypes ...ConcreteType) *wire.TypeInfo {
+	return wire.RegisterInterface(o, ctypes...)
+}
+
+const RFC3339Millis = wire.RFC3339Millis
+
+/*
+
+func RegisterInterface(ptr interface{}, opts *wire.InterfaceOptions) {
+	cdc.RegisterInterface(ptr, opts)
+}
+
+func RegisterConcrete(o interface{}, name string, opts *wire.ConcreteOptions) {
+	cdc.RegisterConcrete(o, name, opts)
+}
+
+//-------------------------------
+
+const RFC3339Millis = wire.RFC3339Millis
+*/
diff --git a/vendor/github.com/tendermint/tmlibs/autofile/autofile.go b/vendor/github.com/tendermint/tmlibs/autofile/autofile.go
index 05fb0d67..790be522 100644
--- a/vendor/github.com/tendermint/tmlibs/autofile/autofile.go
+++ b/vendor/github.com/tendermint/tmlibs/autofile/autofile.go
@@ -5,7 +5,7 @@ import (
 	"sync"
 	"time"
 
-	. "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tmlibs/common"
 )
 
 /* AutoFile usage
@@ -44,7 +44,7 @@ type AutoFile struct {
 
 func OpenAutoFile(path string) (af *AutoFile, err error) {
 	af = &AutoFile{
-		ID:     RandStr(12) + ":" + path,
+		ID:     cmn.RandStr(12) + ":" + path,
 		Path:   path,
 		ticker: time.NewTicker(autoFileOpenDuration),
 	}
@@ -129,9 +129,8 @@ func (af *AutoFile) Size() (int64, error) {
 		if err != nil {
 			if err == os.ErrNotExist {
 				return 0, nil
-			} else {
-				return -1, err
 			}
+			return -1, err
 		}
 	}
 	stat, err := af.file.Stat()
diff --git a/vendor/github.com/tendermint/tmlibs/autofile/group.go b/vendor/github.com/tendermint/tmlibs/autofile/group.go
index f2d0f2ba..652c3331 100644
--- a/vendor/github.com/tendermint/tmlibs/autofile/group.go
+++ b/vendor/github.com/tendermint/tmlibs/autofile/group.go
@@ -15,7 +15,7 @@ import (
 	"sync"
 	"time"
 
-	. "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tmlibs/common"
 )
 
 const (
@@ -54,7 +54,7 @@ The Group can also be used to binary-search for some line,
 assuming that marker lines are written occasionally.
 */
 type Group struct {
-	BaseService
+	cmn.BaseService
 
 	ID             string
 	Head           *AutoFile // The head AutoFile to write to
@@ -90,7 +90,7 @@ func OpenGroup(headPath string) (g *Group, err error) {
 		minIndex:       0,
 		maxIndex:       0,
 	}
-	g.BaseService = *NewBaseService(nil, "Group", g)
+	g.BaseService = *cmn.NewBaseService(nil, "Group", g)
 
 	gInfo := g.readGroupInfo()
 	g.minIndex = gInfo.MinIndex
@@ -267,7 +267,7 @@ func (g *Group) RotateFile() {
 		panic(err)
 	}
 
-	g.maxIndex += 1
+	g.maxIndex++
 }
 
 // NewReader returns a new group reader.
@@ -277,9 +277,8 @@ func (g *Group) NewReader(index int) (*GroupReader, error) {
 	err := r.SetIndex(index)
 	if err != nil {
 		return nil, err
-	} else {
-		return r, nil
 	}
+	return r, nil
 }
 
 // Returns -1 if line comes after, 0 if found, 1 if line comes before.
@@ -311,9 +310,8 @@ func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, bool, error
 			if err != nil {
 				r.Close()
 				return nil, false, err
-			} else {
-				return r, match, err
 			}
+			return r, match, err
 		}
 
 		// Read starting roughly at the middle file,
@@ -349,9 +347,8 @@ func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, bool, error
 			if err != nil {
 				r.Close()
 				return nil, false, err
-			} else {
-				return r, true, err
 			}
+			return r, true, err
 		} else {
 			// We passed it
 			maxIndex = curIndex - 1
@@ -429,9 +426,8 @@ GROUP_LOOP:
 			if err == io.EOF {
 				if found {
 					return match, found, nil
-				} else {
-					continue GROUP_LOOP
 				}
+				continue GROUP_LOOP
 			} else if err != nil {
 				return "", false, err
 			}
@@ -442,9 +438,8 @@ GROUP_LOOP:
 			if r.CurIndex() > i {
 				if found {
 					return match, found, nil
-				} else {
-					continue GROUP_LOOP
 				}
+				continue GROUP_LOOP
 			}
 		}
 	}
@@ -520,7 +515,7 @@ func (g *Group) readGroupInfo() GroupInfo {
 		minIndex, maxIndex = 0, 0
 	} else {
 		// Otherwise, the head file is 1 greater
-		maxIndex += 1
+		maxIndex++
 	}
 	return GroupInfo{minIndex, maxIndex, totalSize, headSize}
 }
@@ -528,9 +523,8 @@ func (g *Group) readGroupInfo() GroupInfo {
 func filePathForIndex(headPath string, index int, maxIndex int) string {
 	if index == maxIndex {
 		return headPath
-	} else {
-		return fmt.Sprintf("%v.%03d", headPath, index)
 	}
+	return fmt.Sprintf("%v.%03d", headPath, index)
 }
 
 //--------------------------------------------------------------------------------
@@ -567,9 +561,8 @@ func (gr *GroupReader) Close() error {
 		gr.curFile = nil
 		gr.curLine = nil
 		return err
-	} else {
-		return nil
 	}
+	return nil
 }
 
 // Read implements io.Reader, reading bytes from the current Reader
@@ -598,10 +591,10 @@ func (gr *GroupReader) Read(p []byte) (n int, err error) {
 		if err == io.EOF {
 			if n >= lenP {
 				return n, nil
-			} else { // Open the next file
-				if err1 := gr.openFile(gr.curIndex + 1); err1 != nil {
-					return n, err1
-				}
+			}
+			// Open the next file
+			if err1 := gr.openFile(gr.curIndex + 1); err1 != nil {
+				return n, err1
 			}
 		} else if err != nil {
 			return n, err
@@ -643,10 +636,9 @@ func (gr *GroupReader) ReadLine() (string, error) {
 			}
 			if len(bytesRead) > 0 && bytesRead[len(bytesRead)-1] == byte('\n') {
 				return linePrefix + string(bytesRead[:len(bytesRead)-1]), nil
-			} else {
-				linePrefix += string(bytesRead)
-				continue
 			}
+			linePrefix += string(bytesRead)
+			continue
 		} else if err != nil {
 			return "", err
 		}
@@ -726,11 +718,11 @@ func (gr *GroupReader) SetIndex(index int) error {
 func MakeSimpleSearchFunc(prefix string, target int) SearchFunc {
 	return func(line string) (int, error) {
 		if !strings.HasPrefix(line, prefix) {
-			return -1, errors.New(Fmt("Marker line did not have prefix: %v", prefix))
+			return -1, errors.New(cmn.Fmt("Marker line did not have prefix: %v", prefix))
 		}
 		i, err := strconv.Atoi(line[len(prefix):])
 		if err != nil {
-			return -1, errors.New(Fmt("Failed to parse marker line: %v", err.Error()))
+			return -1, errors.New(cmn.Fmt("Failed to parse marker line: %v", err.Error()))
 		}
 		if target < i {
 			return 1, nil
diff --git a/vendor/github.com/tendermint/tmlibs/clist/clist.go b/vendor/github.com/tendermint/tmlibs/clist/clist.go
index a52920f8..ccb1f577 100644
--- a/vendor/github.com/tendermint/tmlibs/clist/clist.go
+++ b/vendor/github.com/tendermint/tmlibs/clist/clist.go
@@ -36,12 +36,14 @@ waiting on NextWait() (since it's just a read operation).
 
 */
 type CElement struct {
-	mtx     sync.RWMutex
-	prev    *CElement
-	prevWg  *sync.WaitGroup
-	next    *CElement
-	nextWg  *sync.WaitGroup
-	removed bool
+	mtx        sync.RWMutex
+	prev       *CElement
+	prevWg     *sync.WaitGroup
+	prevWaitCh chan struct{}
+	next       *CElement
+	nextWg     *sync.WaitGroup
+	nextWaitCh chan struct{}
+	removed    bool
 
 	Value interface{} // immutable
 }
@@ -84,6 +86,24 @@ func (e *CElement) PrevWait() *CElement {
 	}
 }
 
+// PrevWaitChan can be used to wait until Prev becomes not nil. Once it does,
+// channel will be closed.
+func (e *CElement) PrevWaitChan() <-chan struct{} {
+	e.mtx.RLock()
+	defer e.mtx.RUnlock()
+
+	return e.prevWaitCh
+}
+
+// NextWaitChan can be used to wait until Next becomes not nil. Once it does,
+// channel will be closed.
+func (e *CElement) NextWaitChan() <-chan struct{} {
+	e.mtx.RLock()
+	defer e.mtx.RUnlock()
+
+	return e.nextWaitCh
+}
+
 // Nonblocking, may return nil if at the end.
 func (e *CElement) Next() *CElement {
 	e.mtx.RLock()
@@ -142,9 +162,11 @@ func (e *CElement) SetNext(newNext *CElement) {
 		// events, new Add calls must happen after all previous Wait calls have
 		// returned.
 		e.nextWg = waitGroup1() // WaitGroups are difficult to re-use.
+		e.nextWaitCh = make(chan struct{})
 	}
 	if oldNext == nil && newNext != nil {
 		e.nextWg.Done()
+		close(e.nextWaitCh)
 	}
 }
 
@@ -158,9 +180,11 @@ func (e *CElement) SetPrev(newPrev *CElement) {
 	e.prev = newPrev
 	if oldPrev != nil && newPrev == nil {
 		e.prevWg = waitGroup1() // WaitGroups are difficult to re-use.
+		e.prevWaitCh = make(chan struct{})
 	}
 	if oldPrev == nil && newPrev != nil {
 		e.prevWg.Done()
+		close(e.prevWaitCh)
 	}
 }
 
@@ -173,9 +197,11 @@ func (e *CElement) SetRemoved() {
 	// This wakes up anyone waiting in either direction.
 	if e.prev == nil {
 		e.prevWg.Done()
+		close(e.prevWaitCh)
 	}
 	if e.next == nil {
 		e.nextWg.Done()
+		close(e.nextWaitCh)
 	}
 }
 
@@ -185,11 +211,12 @@ func (e *CElement) SetRemoved() {
 // The zero value for CList is an empty list ready to use.
 // Operations are goroutine-safe.
 type CList struct {
-	mtx  sync.RWMutex
-	wg   *sync.WaitGroup
-	head *CElement // first element
-	tail *CElement // last element
-	len  int       // list length
+	mtx    sync.RWMutex
+	wg     *sync.WaitGroup
+	waitCh chan struct{}
+	head   *CElement // first element
+	tail   *CElement // last element
+	len    int       // list length
 }
 
 func (l *CList) Init() *CList {
@@ -197,6 +224,7 @@ func (l *CList) Init() *CList {
 	defer l.mtx.Unlock()
 
 	l.wg = waitGroup1()
+	l.waitCh = make(chan struct{})
 	l.head = nil
 	l.tail = nil
 	l.len = 0
@@ -258,25 +286,37 @@ func (l *CList) BackWait() *CElement {
 	}
 }
 
+// WaitChan can be used to wait until Front or Back becomes not nil. Once it
+// does, channel will be closed.
+func (l *CList) WaitChan() <-chan struct{} {
+	l.mtx.Lock()
+	defer l.mtx.Unlock()
+
+	return l.waitCh
+}
+
 func (l *CList) PushBack(v interface{}) *CElement {
 	l.mtx.Lock()
 	defer l.mtx.Unlock()
 
 	// Construct a new element
 	e := &CElement{
-		prev:    nil,
-		prevWg:  waitGroup1(),
-		next:    nil,
-		nextWg:  waitGroup1(),
-		removed: false,
-		Value:   v,
+		prev:       nil,
+		prevWg:     waitGroup1(),
+		prevWaitCh: make(chan struct{}),
+		next:       nil,
+		nextWg:     waitGroup1(),
+		nextWaitCh: make(chan struct{}),
+		removed:    false,
+		Value:      v,
 	}
 
 	// Release waiters on FrontWait/BackWait maybe
 	if l.len == 0 {
 		l.wg.Done()
+		close(l.waitCh)
 	}
-	l.len += 1
+	l.len++
 
 	// Modify the tail
 	if l.tail == nil {
@@ -313,10 +353,11 @@ func (l *CList) Remove(e *CElement) interface{} {
 	// If we're removing the only item, make CList FrontWait/BackWait wait.
 	if l.len == 1 {
 		l.wg = waitGroup1() // WaitGroups are difficult to re-use.
+		l.waitCh = make(chan struct{})
 	}
 
 	// Update l.len
-	l.len -= 1
+	l.len--
 
 	// Connect next/prev and set head/tail
 	if prev == nil {
diff --git a/vendor/github.com/tendermint/tmlibs/common/async.go b/vendor/github.com/tendermint/tmlibs/common/async.go
index 1d302c34..49714d95 100644
--- a/vendor/github.com/tendermint/tmlibs/common/async.go
+++ b/vendor/github.com/tendermint/tmlibs/common/async.go
@@ -1,15 +1,148 @@
 package common
 
-import "sync"
-
-func Parallel(tasks ...func()) {
-	var wg sync.WaitGroup
-	wg.Add(len(tasks))
-	for _, task := range tasks {
-		go func(task func()) {
-			task()
-			wg.Done()
-		}(task)
-	}
-	wg.Wait()
+import (
+	"sync/atomic"
+)
+
+//----------------------------------------
+// Task
+
+// val: the value returned after task execution.
+// err: the error returned during task completion.
+// abort: tells Parallel to return, whether or not all tasks have completed.
+type Task func(i int) (val interface{}, err error, abort bool)
+
+type TaskResult struct {
+	Value interface{}
+	Error error
+}
+
+type TaskResultCh <-chan TaskResult
+
+type taskResultOK struct {
+	TaskResult
+	OK bool
+}
+
+type TaskResultSet struct {
+	chz     []TaskResultCh
+	results []taskResultOK
+}
+
+func newTaskResultSet(chz []TaskResultCh) *TaskResultSet {
+	return &TaskResultSet{
+		chz:     chz,
+		results: nil,
+	}
+}
+
+func (trs *TaskResultSet) Channels() []TaskResultCh {
+	return trs.chz
+}
+
+func (trs *TaskResultSet) LatestResult(index int) (TaskResult, bool) {
+	if len(trs.results) <= index {
+		return TaskResult{}, false
+	}
+	resultOK := trs.results[index]
+	return resultOK.TaskResult, resultOK.OK
+}
+
+// NOTE: Not concurrency safe.
+func (trs *TaskResultSet) Reap() *TaskResultSet {
+	if trs.results == nil {
+		trs.results = make([]taskResultOK, len(trs.chz))
+	}
+	for i := 0; i < len(trs.results); i++ {
+		var trch = trs.chz[i]
+		select {
+		case result := <-trch:
+			// Overwrite result.
+			trs.results[i] = taskResultOK{
+				TaskResult: result,
+				OK:         true,
+			}
+		default:
+			// Do nothing.
+		}
+	}
+	return trs
+}
+
+// Returns the firstmost (by task index) error as
+// discovered by all previous Reap() calls.
+func (trs *TaskResultSet) FirstValue() interface{} {
+	for _, result := range trs.results {
+		if result.Value != nil {
+			return result.Value
+		}
+	}
+	return nil
+}
+
+// Returns the firstmost (by task index) error as
+// discovered by all previous Reap() calls.
+func (trs *TaskResultSet) FirstError() error {
+	for _, result := range trs.results {
+		if result.Error != nil {
+			return result.Error
+		}
+	}
+	return nil
+}
+
+//----------------------------------------
+// Parallel
+
+// Run tasks in parallel, with ability to abort early.
+// Returns ok=false iff any of the tasks returned abort=true.
+// NOTE: Do not implement quit features here.  Instead, provide convenient
+// concurrent quit-like primitives, passed implicitly via Task closures. (e.g.
+// it's not Parallel's concern how you quit/abort your tasks).
+func Parallel(tasks ...Task) (trs *TaskResultSet, ok bool) {
+	var taskResultChz = make([]TaskResultCh, len(tasks)) // To return.
+	var taskDoneCh = make(chan bool, len(tasks))         // A "wait group" channel, early abort if any true received.
+	var numPanics = new(int32)                           // Keep track of panics to set ok=false later.
+	ok = true                                            // We will set it to false iff any tasks panic'd or returned abort.
+
+	// Start all tasks in parallel in separate goroutines.
+	// When the task is complete, it will appear in the
+	// respective taskResultCh (associated by task index).
+	for i, task := range tasks {
+		var taskResultCh = make(chan TaskResult, 1) // Capacity for 1 result.
+		taskResultChz[i] = taskResultCh
+		go func(i int, task Task, taskResultCh chan TaskResult) {
+			// Recovery
+			defer func() {
+				if pnk := recover(); pnk != nil {
+					atomic.AddInt32(numPanics, 1)
+					taskResultCh <- TaskResult{nil, ErrorWrap(pnk, "Panic in task")}
+					taskDoneCh <- false
+				}
+			}()
+			// Run the task.
+			var val, err, abort = task(i)
+			// Send val/err to taskResultCh.
+			// NOTE: Below this line, nothing must panic/
+			taskResultCh <- TaskResult{val, err}
+			// Decrement waitgroup.
+			taskDoneCh <- abort
+		}(i, task, taskResultCh)
+	}
+
+	// Wait until all tasks are done, or until abort.
+	// DONE_LOOP:
+	for i := 0; i < len(tasks); i++ {
+		abort := <-taskDoneCh
+		if abort {
+			ok = false
+			break
+		}
+	}
+
+	// Ok is also false if there were any panics.
+	// We must do this check here (after DONE_LOOP).
+	ok = ok && (atomic.LoadInt32(numPanics) == 0)
+
+	return newTaskResultSet(taskResultChz).Reap(), ok
 }
diff --git a/vendor/github.com/tendermint/tmlibs/common/bit_array.go b/vendor/github.com/tendermint/tmlibs/common/bit_array.go
index 848763b4..ea6a6ee1 100644
--- a/vendor/github.com/tendermint/tmlibs/common/bit_array.go
+++ b/vendor/github.com/tendermint/tmlibs/common/bit_array.go
@@ -15,7 +15,7 @@ type BitArray struct {
 
 // There is no BitArray whose Size is 0.  Use nil instead.
 func NewBitArray(bits int) *BitArray {
-	if bits == 0 {
+	if bits <= 0 {
 		return nil
 	}
 	return &BitArray{
@@ -99,8 +99,14 @@ func (bA *BitArray) copyBits(bits int) *BitArray {
 
 // Returns a BitArray of larger bits size.
 func (bA *BitArray) Or(o *BitArray) *BitArray {
-	if bA == nil {
-		o.Copy()
+	if bA == nil && o == nil {
+		return nil
+	}
+	if bA == nil && o != nil {
+		return o.Copy()
+	}
+	if o == nil {
+		return bA.Copy()
 	}
 	bA.mtx.Lock()
 	defer bA.mtx.Unlock()
@@ -113,7 +119,7 @@ func (bA *BitArray) Or(o *BitArray) *BitArray {
 
 // Returns a BitArray of smaller bit size.
 func (bA *BitArray) And(o *BitArray) *BitArray {
-	if bA == nil {
+	if bA == nil || o == nil {
 		return nil
 	}
 	bA.mtx.Lock()
@@ -143,7 +149,8 @@ func (bA *BitArray) Not() *BitArray {
 }
 
 func (bA *BitArray) Sub(o *BitArray) *BitArray {
-	if bA == nil {
+	if bA == nil || o == nil {
+		// TODO: Decide if we should do 1's complement here?
 		return nil
 	}
 	bA.mtx.Lock()
@@ -161,9 +168,8 @@ func (bA *BitArray) Sub(o *BitArray) *BitArray {
 			}
 		}
 		return c
-	} else {
-		return bA.and(o.Not()) // Note degenerate case where o == nil
 	}
+	return bA.and(o.Not()) // Note degenerate case where o == nil
 }
 
 func (bA *BitArray) IsEmpty() bool {
@@ -306,7 +312,7 @@ func (bA *BitArray) Bytes() []byte {
 // so if necessary, caller must copy or lock o prior to calling Update.
 // If bA is nil, does nothing.
 func (bA *BitArray) Update(o *BitArray) {
-	if bA == nil {
+	if bA == nil || o == nil {
 		return
 	}
 	bA.mtx.Lock()
diff --git a/vendor/github.com/tendermint/tmlibs/common/bytes.go b/vendor/github.com/tendermint/tmlibs/common/bytes.go
new file mode 100644
index 00000000..711720aa
--- /dev/null
+++ b/vendor/github.com/tendermint/tmlibs/common/bytes.go
@@ -0,0 +1,62 @@
+package common
+
+import (
+	"encoding/hex"
+	"fmt"
+	"strings"
+)
+
+// The main purpose of HexBytes is to enable HEX-encoding for json/encoding.
+type HexBytes []byte
+
+// Marshal needed for protobuf compatibility
+func (bz HexBytes) Marshal() ([]byte, error) {
+	return bz, nil
+}
+
+// Unmarshal needed for protobuf compatibility
+func (bz *HexBytes) Unmarshal(data []byte) error {
+	*bz = data
+	return nil
+}
+
+// This is the point of Bytes.
+func (bz HexBytes) MarshalJSON() ([]byte, error) {
+	s := strings.ToUpper(hex.EncodeToString(bz))
+	jbz := make([]byte, len(s)+2)
+	jbz[0] = '"'
+	copy(jbz[1:], []byte(s))
+	jbz[len(jbz)-1] = '"'
+	return jbz, nil
+}
+
+// This is the point of Bytes.
+func (bz *HexBytes) UnmarshalJSON(data []byte) error {
+	if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
+		return fmt.Errorf("Invalid hex string: %s", data)
+	}
+	bz2, err := hex.DecodeString(string(data[1 : len(data)-1]))
+	if err != nil {
+		return err
+	}
+	*bz = bz2
+	return nil
+}
+
+// Allow it to fulfill various interfaces in light-client, etc...
+func (bz HexBytes) Bytes() []byte {
+	return bz
+}
+
+func (bz HexBytes) String() string {
+	return strings.ToUpper(hex.EncodeToString(bz))
+}
+
+func (bz HexBytes) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 'p':
+		s.Write([]byte(fmt.Sprintf("%p", bz)))
+	default:
+		s.Write([]byte(fmt.Sprintf("%X", []byte(bz))))
+	}
+}
diff --git a/vendor/github.com/tendermint/tmlibs/common/colors.go b/vendor/github.com/tendermint/tmlibs/common/colors.go
index 776b22e2..85e59224 100644
--- a/vendor/github.com/tendermint/tmlibs/common/colors.go
+++ b/vendor/github.com/tendermint/tmlibs/common/colors.go
@@ -38,9 +38,8 @@ const (
 func treat(s string, color string) string {
 	if len(s) > 2 && s[:2] == "\x1b[" {
 		return s
-	} else {
-		return color + s + ANSIReset
 	}
+	return color + s + ANSIReset
 }
 
 func treatAll(color string, args ...interface{}) string {
diff --git a/vendor/github.com/tendermint/tmlibs/common/errors.go b/vendor/github.com/tendermint/tmlibs/common/errors.go
index 039342a6..1ee1fb34 100644
--- a/vendor/github.com/tendermint/tmlibs/common/errors.go
+++ b/vendor/github.com/tendermint/tmlibs/common/errors.go
@@ -2,26 +2,243 @@ package common
 
 import (
 	"fmt"
+	"runtime"
 )
 
-type StackError struct {
-	Err   interface{}
-	Stack []byte
+//----------------------------------------
+// Convenience methods
+
+// ErrorWrap will just call .TraceFrom(), or create a new *cmnError.
+func ErrorWrap(cause interface{}, format string, args ...interface{}) Error {
+	msg := Fmt(format, args...)
+	if causeCmnError, ok := cause.(*cmnError); ok {
+		return causeCmnError.TraceFrom(1, msg)
+	}
+	// NOTE: cause may be nil.
+	// NOTE: do not use causeCmnError here, not the same as nil.
+	return newError(msg, cause, cause).Stacktrace()
+}
+
+//----------------------------------------
+// Error & cmnError
+
+/*
+Usage:
+
+```go
+	// Error construction
+	var someT = errors.New("Some err type")
+	var err1 error = NewErrorWithT(someT, "my message")
+	...
+	// Wrapping
+	var err2 error  = ErrorWrap(err1, "another message")
+	if (err1 != err2) { panic("should be the same")
+	...
+	// Error handling
+	switch err2.T() {
+		case someT: ...
+	    default: ...
+	}
+```
+
+*/
+type Error interface {
+	Error() string
+	Message() string
+	Stacktrace() Error
+	Trace(format string, args ...interface{}) Error
+	TraceFrom(offset int, format string, args ...interface{}) Error
+	Cause() interface{}
+	WithT(t interface{}) Error
+	T() interface{}
+	Format(s fmt.State, verb rune)
+}
+
+// New Error with no cause where the type is the format string of the message..
+func NewError(format string, args ...interface{}) Error {
+	msg := Fmt(format, args...)
+	return newError(msg, nil, format)
+
+}
+
+// New Error with specified type and message.
+func NewErrorWithT(t interface{}, format string, args ...interface{}) Error {
+	msg := Fmt(format, args...)
+	return newError(msg, nil, t)
+}
+
+// NOTE: The name of a function "NewErrorWithCause()" implies that you are
+// creating a new Error, yet, if the cause is an Error, creating a new Error to
+// hold a ref to the old Error is probably *not* what you want to do.
+// So, use ErrorWrap(cause, format, a...) instead, which returns the same error
+// if cause is an Error.
+// IF you must set an Error as the cause of an Error,
+// then you can use the WithCauser interface to do so manually.
+// e.g. (error).(tmlibs.WithCauser).WithCause(causeError)
+
+type WithCauser interface {
+	WithCause(cause interface{}) Error
+}
+
+type cmnError struct {
+	msg        string         // first msg which also appears in msg
+	cause      interface{}    // underlying cause (or panic object)
+	t          interface{}    // for switching on error
+	msgtraces  []msgtraceItem // all messages traced
+	stacktrace []uintptr      // first stack trace
+}
+
+var _ WithCauser = &cmnError{}
+var _ Error = &cmnError{}
+
+// NOTE: do not expose.
+func newError(msg string, cause interface{}, t interface{}) *cmnError {
+	return &cmnError{
+		msg:        msg,
+		cause:      cause,
+		t:          t,
+		msgtraces:  nil,
+		stacktrace: nil,
+	}
+}
+
+func (err *cmnError) Message() string {
+	return err.msg
+}
+
+func (err *cmnError) Error() string {
+	return fmt.Sprintf("%v", err)
+}
+
+// Captures a stacktrace if one was not already captured.
+func (err *cmnError) Stacktrace() Error {
+	if err.stacktrace == nil {
+		var offset = 3
+		var depth = 32
+		err.stacktrace = captureStacktrace(offset, depth)
+	}
+	return err
+}
+
+// Add tracing information with msg.
+func (err *cmnError) Trace(format string, args ...interface{}) Error {
+	msg := Fmt(format, args...)
+	return err.doTrace(msg, 0)
+}
+
+// Same as Trace, but traces the line `offset` calls out.
+// If n == 0, the behavior is identical to Trace().
+func (err *cmnError) TraceFrom(offset int, format string, args ...interface{}) Error {
+	msg := Fmt(format, args...)
+	return err.doTrace(msg, offset)
+}
+
+// Return last known cause.
+// NOTE: The meaning of "cause" is left for the caller to define.
+// There exists no "canonical" definition of "cause".
+// Instead of blaming, try to handle it, or organize it.
+func (err *cmnError) Cause() interface{} {
+	return err.cause
+}
+
+// Overwrites the Error's cause.
+func (err *cmnError) WithCause(cause interface{}) Error {
+	err.cause = cause
+	return err
+}
+
+// Overwrites the Error's type.
+func (err *cmnError) WithT(t interface{}) Error {
+	err.t = t
+	return err
+}
+
+// Return the "type" of this message, primarily for switching
+// to handle this Error.
+func (err *cmnError) T() interface{} {
+	return err.t
+}
+
+func (err *cmnError) doTrace(msg string, n int) Error {
+	pc, _, _, _ := runtime.Caller(n + 2) // +1 for doTrace().  +1 for the caller.
+	// Include file & line number & msg.
+	// Do not include the whole stack trace.
+	err.msgtraces = append(err.msgtraces, msgtraceItem{
+		pc:  pc,
+		msg: msg,
+	})
+	return err
+}
+
+func (err *cmnError) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 'p':
+		s.Write([]byte(fmt.Sprintf("%p", &err)))
+	default:
+		if s.Flag('#') {
+			s.Write([]byte("--= Error =--\n"))
+			// Write msg.
+			s.Write([]byte(fmt.Sprintf("Message: %#s\n", err.msg)))
+			// Write cause.
+			s.Write([]byte(fmt.Sprintf("Cause: %#v\n", err.cause)))
+			// Write type.
+			s.Write([]byte(fmt.Sprintf("T: %#v\n", err.t)))
+			// Write msg trace items.
+			s.Write([]byte(fmt.Sprintf("Msg Traces:\n")))
+			for i, msgtrace := range err.msgtraces {
+				s.Write([]byte(fmt.Sprintf(" %4d  %s\n", i, msgtrace.String())))
+			}
+			// Write stack trace.
+			if err.stacktrace != nil {
+				s.Write([]byte(fmt.Sprintf("Stack Trace:\n")))
+				for i, pc := range err.stacktrace {
+					fnc := runtime.FuncForPC(pc)
+					file, line := fnc.FileLine(pc)
+					s.Write([]byte(fmt.Sprintf(" %4d  %s:%d\n", i, file, line)))
+				}
+			}
+			s.Write([]byte("--= /Error =--\n"))
+		} else {
+			// Write msg.
+			if err.cause != nil {
+				s.Write([]byte(fmt.Sprintf("Error{`%s` (cause: %v)}", err.msg, err.cause))) // TODO tick-esc?
+			} else {
+				s.Write([]byte(fmt.Sprintf("Error{`%s`}", err.msg))) // TODO tick-esc?
+			}
+		}
+	}
+}
+
+//----------------------------------------
+// stacktrace & msgtraceItem
+
+func captureStacktrace(offset int, depth int) []uintptr {
+	var pcs = make([]uintptr, depth)
+	n := runtime.Callers(offset, pcs)
+	return pcs[0:n]
 }
 
-func (se StackError) String() string {
-	return fmt.Sprintf("Error: %v\nStack: %s", se.Err, se.Stack)
+type msgtraceItem struct {
+	pc  uintptr
+	msg string
 }
 
-func (se StackError) Error() string {
-	return se.String()
+func (mti msgtraceItem) String() string {
+	fnc := runtime.FuncForPC(mti.pc)
+	file, line := fnc.FileLine(mti.pc)
+	return fmt.Sprintf("%s:%d - %s",
+		file, line,
+		mti.msg,
+	)
 }
 
-//--------------------------------------------------------------------------------------------------
-// panic wrappers
+//----------------------------------------
+// Panic wrappers
+// XXX DEPRECATED
 
 // A panic resulting from a sanity check means there is a programmer error
 // and some guarantee is not satisfied.
+// XXX DEPRECATED
 func PanicSanity(v interface{}) {
 	panic(Fmt("Panicked on a Sanity Check: %v", v))
 }
@@ -29,17 +246,20 @@ func PanicSanity(v interface{}) {
 // A panic here means something has gone horribly wrong, in the form of data corruption or
 // failure of the operating system. In a correct/healthy system, these should never fire.
 // If they do, it's indicative of a much more serious problem.
+// XXX DEPRECATED
 func PanicCrisis(v interface{}) {
 	panic(Fmt("Panicked on a Crisis: %v", v))
 }
 
 // Indicates a failure of consensus. Someone was malicious or something has
 // gone horribly wrong. These should really boot us into an "emergency-recover" mode
+// XXX DEPRECATED
 func PanicConsensus(v interface{}) {
 	panic(Fmt("Panicked on a Consensus Failure: %v", v))
 }
 
 // For those times when we're not sure if we should panic
+// XXX DEPRECATED
 func PanicQ(v interface{}) {
 	panic(Fmt("Panicked questionably: %v", v))
 }
diff --git a/vendor/github.com/tendermint/tmlibs/common/heap.go b/vendor/github.com/tendermint/tmlibs/common/heap.go
index 4a96d7aa..b3bcb9db 100644
--- a/vendor/github.com/tendermint/tmlibs/common/heap.go
+++ b/vendor/github.com/tendermint/tmlibs/common/heap.go
@@ -1,28 +1,25 @@
 package common
 
 import (
+	"bytes"
 	"container/heap"
 )
 
-type Comparable interface {
-	Less(o interface{}) bool
-}
-
-//-----------------------------------------------------------------------------
-
 /*
-Example usage:
+	Example usage:
+
+	```
 	h := NewHeap()
 
-	h.Push(String("msg1"), 1)
-	h.Push(String("msg3"), 3)
-	h.Push(String("msg2"), 2)
+	h.Push("msg1", 1)
+	h.Push("msg3", 3)
+	h.Push("msg2", 2)
 
-	fmt.Println(h.Pop())
-	fmt.Println(h.Pop())
-	fmt.Println(h.Pop())
+	fmt.Println(h.Pop()) // msg1
+	fmt.Println(h.Pop()) // msg2
+	fmt.Println(h.Pop()) // msg3
+	```
 */
-
 type Heap struct {
 	pq priorityQueue
 }
@@ -35,7 +32,15 @@ func (h *Heap) Len() int64 {
 	return int64(len(h.pq))
 }
 
-func (h *Heap) Push(value interface{}, priority Comparable) {
+func (h *Heap) Push(value interface{}, priority int) {
+	heap.Push(&h.pq, &pqItem{value: value, priority: cmpInt(priority)})
+}
+
+func (h *Heap) PushBytes(value interface{}, priority []byte) {
+	heap.Push(&h.pq, &pqItem{value: value, priority: cmpBytes(priority)})
+}
+
+func (h *Heap) PushComparable(value interface{}, priority Comparable) {
 	heap.Push(&h.pq, &pqItem{value: value, priority: priority})
 }
 
@@ -56,8 +61,6 @@ func (h *Heap) Pop() interface{} {
 }
 
 //-----------------------------------------------------------------------------
-
-///////////////////////
 // From: http://golang.org/pkg/container/heap/#example__priorityQueue
 
 type pqItem struct {
@@ -101,3 +104,22 @@ func (pq *priorityQueue) Update(item *pqItem, value interface{}, priority Compar
 	item.priority = priority
 	heap.Fix(pq, item.index)
 }
+
+//--------------------------------------------------------------------------------
+// Comparable
+
+type Comparable interface {
+	Less(o interface{}) bool
+}
+
+type cmpInt int
+
+func (i cmpInt) Less(o interface{}) bool {
+	return int(i) < int(o.(cmpInt))
+}
+
+type cmpBytes []byte
+
+func (bz cmpBytes) Less(o interface{}) bool {
+	return bytes.Compare([]byte(bz), []byte(o.(cmpBytes))) < 0
+}
diff --git a/vendor/github.com/tendermint/tmlibs/common/http.go b/vendor/github.com/tendermint/tmlibs/common/http.go
deleted file mode 100644
index 56b5b6c6..00000000
--- a/vendor/github.com/tendermint/tmlibs/common/http.go
+++ /dev/null
@@ -1,153 +0,0 @@
-package common
-
-import (
-	"encoding/json"
-	"io"
-	"net/http"
-
-	"gopkg.in/go-playground/validator.v9"
-
-	"github.com/pkg/errors"
-)
-
-type ErrorResponse struct {
-	Success bool `json:"success,omitempty"`
-
-	// Err is the error message if Success is false
-	Err string `json:"error,omitempty"`
-
-	// Code is set if Success is false
-	Code int `json:"code,omitempty"`
-}
-
-// ErrorWithCode makes an ErrorResponse with the
-// provided err's Error() content, and status code.
-// It panics if err is nil.
-func ErrorWithCode(err error, code int) *ErrorResponse {
-	return &ErrorResponse{
-		Err:  err.Error(),
-		Code: code,
-	}
-}
-
-// Ensure that ErrorResponse implements error
-var _ error = (*ErrorResponse)(nil)
-
-func (er *ErrorResponse) Error() string {
-	return er.Err
-}
-
-// Ensure that ErrorResponse implements httpCoder
-var _ httpCoder = (*ErrorResponse)(nil)
-
-func (er *ErrorResponse) HTTPCode() int {
-	return er.Code
-}
-
-var errNilBody = errors.Errorf("expecting a non-nil body")
-
-// FparseJSON unmarshals into save, the body of the provided reader.
-// Since it uses json.Unmarshal, save must be of a pointer type
-// or compatible with json.Unmarshal.
-func FparseJSON(r io.Reader, save interface{}) error {
-	if r == nil {
-		return errors.Wrap(errNilBody, "Reader")
-	}
-
-	dec := json.NewDecoder(r)
-	if err := dec.Decode(save); err != nil {
-		return errors.Wrap(err, "Decode/Unmarshal")
-	}
-	return nil
-}
-
-// ParseRequestJSON unmarshals into save, the body of the
-// request. It closes the body of the request after parsing.
-// Since it uses json.Unmarshal, save must be of a pointer type
-// or compatible with json.Unmarshal.
-func ParseRequestJSON(r *http.Request, save interface{}) error {
-	if r == nil || r.Body == nil {
-		return errNilBody
-	}
-	defer r.Body.Close()
-
-	return FparseJSON(r.Body, save)
-}
-
-// ParseRequestAndValidateJSON unmarshals into save, the body of the
-// request and invokes a validator on the saved content. To ensure
-// validation, make sure to set tags "validate" on your struct as
-// per https://godoc.org/gopkg.in/go-playground/validator.v9.
-// It closes the body of the request after parsing.
-// Since it uses json.Unmarshal, save must be of a pointer type
-// or compatible with json.Unmarshal.
-func ParseRequestAndValidateJSON(r *http.Request, save interface{}) error {
-	if r == nil || r.Body == nil {
-		return errNilBody
-	}
-	defer r.Body.Close()
-
-	return FparseAndValidateJSON(r.Body, save)
-}
-
-// FparseAndValidateJSON like FparseJSON unmarshals into save,
-// the body of the provided reader. However, it invokes the validator
-// to check the set validators on your struct fields as per
-// per https://godoc.org/gopkg.in/go-playground/validator.v9.
-// Since it uses json.Unmarshal, save must be of a pointer type
-// or compatible with json.Unmarshal.
-func FparseAndValidateJSON(r io.Reader, save interface{}) error {
-	if err := FparseJSON(r, save); err != nil {
-		return err
-	}
-	return validate(save)
-}
-
-var theValidator = validator.New()
-
-func validate(obj interface{}) error {
-	return errors.Wrap(theValidator.Struct(obj), "Validate")
-}
-
-// WriteSuccess JSON marshals the content provided, to an HTTP
-// response, setting the provided status code and setting header
-// "Content-Type" to "application/json".
-func WriteSuccess(w http.ResponseWriter, data interface{}) {
-	WriteCode(w, data, 200)
-}
-
-// WriteCode JSON marshals content, to an HTTP response,
-// setting the provided status code, and setting header
-// "Content-Type" to "application/json". If JSON marshalling fails
-// with an error, WriteCode instead writes out the error invoking
-// WriteError.
-func WriteCode(w http.ResponseWriter, out interface{}, code int) {
-	blob, err := json.MarshalIndent(out, "", "  ")
-	if err != nil {
-		WriteError(w, err)
-	} else {
-		w.Header().Set("Content-Type", "application/json")
-		w.WriteHeader(code)
-		w.Write(blob)
-	}
-}
-
-type httpCoder interface {
-	HTTPCode() int
-}
-
-// WriteError is a convenience function to write out an
-// error to an http.ResponseWriter, to send out an error
-// that's structured as JSON i.e the form
-//    {"error": sss, "code": ddd}
-// If err implements the interface HTTPCode() int,
-// it will use that status code otherwise, it will
-// set code to be http.StatusBadRequest
-func WriteError(w http.ResponseWriter, err error) {
-	code := http.StatusBadRequest
-	if httpC, ok := err.(httpCoder); ok {
-		code = httpC.HTTPCode()
-	}
-
-	WriteCode(w, ErrorWithCode(err, code), code)
-}
diff --git a/vendor/github.com/tendermint/tmlibs/common/io.go b/vendor/github.com/tendermint/tmlibs/common/io.go
index 378c19fc..fa0443e0 100644
--- a/vendor/github.com/tendermint/tmlibs/common/io.go
+++ b/vendor/github.com/tendermint/tmlibs/common/io.go
@@ -20,9 +20,8 @@ func (pr *PrefixedReader) Read(p []byte) (n int, err error) {
 		read := copy(p, pr.Prefix)
 		pr.Prefix = pr.Prefix[read:]
 		return read, nil
-	} else {
-		return pr.reader.Read(p)
 	}
+	return pr.reader.Read(p)
 }
 
 // NOTE: Not goroutine safe
diff --git a/vendor/github.com/tendermint/tmlibs/common/kvpair.go b/vendor/github.com/tendermint/tmlibs/common/kvpair.go
new file mode 100644
index 00000000..54c3a58c
--- /dev/null
+++ b/vendor/github.com/tendermint/tmlibs/common/kvpair.go
@@ -0,0 +1,67 @@
+package common
+
+import (
+	"bytes"
+	"sort"
+)
+
+//----------------------------------------
+// KVPair
+
+/*
+Defined in types.proto
+
+type KVPair struct {
+	Key   []byte
+	Value []byte
+}
+*/
+
+type KVPairs []KVPair
+
+// Sorting
+func (kvs KVPairs) Len() int { return len(kvs) }
+func (kvs KVPairs) Less(i, j int) bool {
+	switch bytes.Compare(kvs[i].Key, kvs[j].Key) {
+	case -1:
+		return true
+	case 0:
+		return bytes.Compare(kvs[i].Value, kvs[j].Value) < 0
+	case 1:
+		return false
+	default:
+		panic("invalid comparison result")
+	}
+}
+func (kvs KVPairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] }
+func (kvs KVPairs) Sort()         { sort.Sort(kvs) }
+
+//----------------------------------------
+// KI64Pair
+
+/*
+Defined in types.proto
+type KI64Pair struct {
+	Key   []byte
+	Value int64
+}
+*/
+
+type KI64Pairs []KI64Pair
+
+// Sorting
+func (kvs KI64Pairs) Len() int { return len(kvs) }
+func (kvs KI64Pairs) Less(i, j int) bool {
+	switch bytes.Compare(kvs[i].Key, kvs[j].Key) {
+	case -1:
+		return true
+	case 0:
+		return kvs[i].Value < kvs[j].Value
+	case 1:
+		return false
+	default:
+		panic("invalid comparison result")
+	}
+}
+func (kvs KI64Pairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] }
+func (kvs KI64Pairs) Sort()         { sort.Sort(kvs) }
diff --git a/vendor/github.com/tendermint/tmlibs/common/nil.go b/vendor/github.com/tendermint/tmlibs/common/nil.go
new file mode 100644
index 00000000..31f75f00
--- /dev/null
+++ b/vendor/github.com/tendermint/tmlibs/common/nil.go
@@ -0,0 +1,29 @@
+package common
+
+import "reflect"
+
+// Go lacks a simple and safe way to see if something is a typed nil.
+// See:
+//  - https://dave.cheney.net/2017/08/09/typed-nils-in-go-2
+//  - https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I/discussion
+//  - https://github.com/golang/go/issues/21538
+func IsTypedNil(o interface{}) bool {
+	rv := reflect.ValueOf(o)
+	switch rv.Kind() {
+	case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice:
+		return rv.IsNil()
+	default:
+		return false
+	}
+}
+
+// Returns true if it has zero length.
+func IsEmpty(o interface{}) bool {
+	rv := reflect.ValueOf(o)
+	switch rv.Kind() {
+	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
+		return rv.Len() == 0
+	default:
+		return false
+	}
+}
diff --git a/vendor/github.com/tendermint/tmlibs/common/os.go b/vendor/github.com/tendermint/tmlibs/common/os.go
index 36fc969f..00f4da57 100644
--- a/vendor/github.com/tendermint/tmlibs/common/os.go
+++ b/vendor/github.com/tendermint/tmlibs/common/os.go
@@ -124,32 +124,35 @@ func MustWriteFile(filePath string, contents []byte, mode os.FileMode) {
 	}
 }
 
-// WriteFileAtomic writes newBytes to temp and atomically moves to filePath
-// when everything else succeeds.
-func WriteFileAtomic(filePath string, newBytes []byte, mode os.FileMode) error {
-	dir := filepath.Dir(filePath)
-	f, err := ioutil.TempFile(dir, "")
+// WriteFileAtomic creates a temporary file with data and the perm given and
+// swaps it atomically with filename if successful.
+func WriteFileAtomic(filename string, data []byte, perm os.FileMode) error {
+	var (
+		dir      = filepath.Dir(filename)
+		tempFile = filepath.Join(dir, "write-file-atomic-"+RandStr(32))
+		// Override in case it does exist, create in case it doesn't and force kernel
+		// flush, which still leaves the potential of lingering disk cache.
+		flag = os.O_WRONLY | os.O_CREATE | os.O_SYNC | os.O_TRUNC
+	)
+
+	f, err := os.OpenFile(tempFile, flag, perm)
 	if err != nil {
 		return err
 	}
-	_, err = f.Write(newBytes)
-	if err == nil {
-		err = f.Sync()
-	}
-	if closeErr := f.Close(); err == nil {
-		err = closeErr
-	}
-	if permErr := os.Chmod(f.Name(), mode); err == nil {
-		err = permErr
-	}
-	if err == nil {
-		err = os.Rename(f.Name(), filePath)
-	}
-	// any err should result in full cleanup
-	if err != nil {
-		os.Remove(f.Name())
+	// Clean up in any case. Defer stacking order is last-in-first-out.
+	defer os.Remove(f.Name())
+	defer f.Close()
+
+	if n, err := f.Write(data); err != nil {
+		return err
+	} else if n < len(data) {
+		return io.ErrShortWrite
 	}
-	return err
+	// Close the file before renaming it, otherwise it will cause "The process 
+	// cannot access the file because it is being used by another process." on windows.
+	f.Close()
+
+	return os.Rename(f.Name(), filename)
 }
 
 //--------------------------------------------------------------------------------
@@ -183,11 +186,10 @@ func Prompt(prompt string, defaultValue string) (string, error) {
 	line, err := reader.ReadString('\n')
 	if err != nil {
 		return defaultValue, err
-	} else {
-		line = strings.TrimSpace(line)
-		if line == "" {
-			return defaultValue, nil
-		}
-		return line, nil
 	}
+	line = strings.TrimSpace(line)
+	if line == "" {
+		return defaultValue, nil
+	}
+	return line, nil
 }
diff --git a/vendor/github.com/tendermint/tmlibs/common/random.go b/vendor/github.com/tendermint/tmlibs/common/random.go
index ca71b614..389a32fc 100644
--- a/vendor/github.com/tendermint/tmlibs/common/random.go
+++ b/vendor/github.com/tendermint/tmlibs/common/random.go
@@ -13,34 +13,150 @@ const (
 
 // pseudo random number generator.
 // seeded with OS randomness (crand)
-var prng struct {
+
+type Rand struct {
 	sync.Mutex
-	*mrand.Rand
+	rand *mrand.Rand
+}
+
+var grand *Rand
+
+func init() {
+	grand = NewRand()
+	grand.init()
+}
+
+func NewRand() *Rand {
+	rand := &Rand{}
+	rand.init()
+	return rand
 }
 
-func reset() {
-	b := cRandBytes(8)
+func (r *Rand) init() {
+	bz := cRandBytes(8)
 	var seed uint64
 	for i := 0; i < 8; i++ {
-		seed |= uint64(b[i])
+		seed |= uint64(bz[i])
 		seed <<= 8
 	}
-	prng.Lock()
-	prng.Rand = mrand.New(mrand.NewSource(int64(seed)))
-	prng.Unlock()
+	r.reset(int64(seed))
 }
 
-func init() {
-	reset()
+func (r *Rand) reset(seed int64) {
+	r.rand = mrand.New(mrand.NewSource(seed))
+}
+
+//----------------------------------------
+// Global functions
+
+func Seed(seed int64) {
+	grand.Seed(seed)
+}
+
+func RandStr(length int) string {
+	return grand.Str(length)
+}
+
+func RandUint16() uint16 {
+	return grand.Uint16()
+}
+
+func RandUint32() uint32 {
+	return grand.Uint32()
+}
+
+func RandUint64() uint64 {
+	return grand.Uint64()
+}
+
+func RandUint() uint {
+	return grand.Uint()
+}
+
+func RandInt16() int16 {
+	return grand.Int16()
+}
+
+func RandInt32() int32 {
+	return grand.Int32()
+}
+
+func RandInt64() int64 {
+	return grand.Int64()
+}
+
+func RandInt() int {
+	return grand.Int()
+}
+
+func RandInt31() int32 {
+	return grand.Int31()
+}
+
+func RandInt31n(n int32) int32 {
+	return grand.Int31n(n)
+}
+
+func RandInt63() int64 {
+	return grand.Int63()
+}
+
+func RandInt63n(n int64) int64 {
+	return grand.Int63n(n)
+}
+
+func RandUint16Exp() uint16 {
+	return grand.Uint16Exp()
+}
+
+func RandUint32Exp() uint32 {
+	return grand.Uint32Exp()
+}
+
+func RandUint64Exp() uint64 {
+	return grand.Uint64Exp()
+}
+
+func RandFloat32() float32 {
+	return grand.Float32()
+}
+
+func RandFloat64() float64 {
+	return grand.Float64()
+}
+
+func RandTime() time.Time {
+	return grand.Time()
+}
+
+func RandBytes(n int) []byte {
+	return grand.Bytes(n)
+}
+
+func RandIntn(n int) int {
+	return grand.Intn(n)
+}
+
+func RandPerm(n int) []int {
+	return grand.Perm(n)
+}
+
+//----------------------------------------
+// Rand methods
+
+func (r *Rand) Seed(seed int64) {
+	r.Lock()
+	r.reset(seed)
+	r.Unlock()
 }
 
 // Constructs an alphanumeric string of given length.
 // It is not safe for cryptographic usage.
-func RandStr(length int) string {
+func (r *Rand) Str(length int) string {
 	chars := []byte{}
 MAIN_LOOP:
 	for {
-		val := RandInt63()
+		val := r.Int63()
 		for i := 0; i < 10; i++ {
 			v := int(val & 0x3f) // rightmost 6 bits
 			if v >= 62 {         // only 62 characters in strChars
@@ -60,127 +176,151 @@ MAIN_LOOP:
 }
 
 // It is not safe for cryptographic usage.
-func RandUint16() uint16 {
-	return uint16(RandUint32() & (1<<16 - 1))
+func (r *Rand) Uint16() uint16 {
+	return uint16(r.Uint32() & (1<<16 - 1))
 }
 
 // It is not safe for cryptographic usage.
-func RandUint32() uint32 {
-	prng.Lock()
-	u32 := prng.Uint32()
-	prng.Unlock()
+func (r *Rand) Uint32() uint32 {
+	r.Lock()
+	u32 := r.rand.Uint32()
+	r.Unlock()
 	return u32
 }
 
 // It is not safe for cryptographic usage.
-func RandUint64() uint64 {
-	return uint64(RandUint32())<<32 + uint64(RandUint32())
+func (r *Rand) Uint64() uint64 {
+	return uint64(r.Uint32())<<32 + uint64(r.Uint32())
 }
 
 // It is not safe for cryptographic usage.
-func RandUint() uint {
-	prng.Lock()
-	i := prng.Int()
-	prng.Unlock()
+func (r *Rand) Uint() uint {
+	r.Lock()
+	i := r.rand.Int()
+	r.Unlock()
 	return uint(i)
 }
 
 // It is not safe for cryptographic usage.
-func RandInt16() int16 {
-	return int16(RandUint32() & (1<<16 - 1))
+func (r *Rand) Int16() int16 {
+	return int16(r.Uint32() & (1<<16 - 1))
 }
 
 // It is not safe for cryptographic usage.
-func RandInt32() int32 {
-	return int32(RandUint32())
+func (r *Rand) Int32() int32 {
+	return int32(r.Uint32())
 }
 
 // It is not safe for cryptographic usage.
-func RandInt64() int64 {
-	return int64(RandUint64())
+func (r *Rand) Int64() int64 {
+	return int64(r.Uint64())
 }
 
 // It is not safe for cryptographic usage.
-func RandInt() int {
-	prng.Lock()
-	i := prng.Int()
-	prng.Unlock()
+func (r *Rand) Int() int {
+	r.Lock()
+	i := r.rand.Int()
+	r.Unlock()
 	return i
 }
 
 // It is not safe for cryptographic usage.
-func RandInt31() int32 {
-	prng.Lock()
-	i31 := prng.Int31()
-	prng.Unlock()
+func (r *Rand) Int31() int32 {
+	r.Lock()
+	i31 := r.rand.Int31()
+	r.Unlock()
 	return i31
 }
 
 // It is not safe for cryptographic usage.
-func RandInt63() int64 {
-	prng.Lock()
-	i63 := prng.Int63()
-	prng.Unlock()
+func (r *Rand) Int31n(n int32) int32 {
+	r.Lock()
+	i31n := r.rand.Int31n(n)
+	r.Unlock()
+	return i31n
+}
+
+// It is not safe for cryptographic usage.
+func (r *Rand) Int63() int64 {
+	r.Lock()
+	i63 := r.rand.Int63()
+	r.Unlock()
 	return i63
 }
 
+// It is not safe for cryptographic usage.
+func (r *Rand) Int63n(n int64) int64 {
+	r.Lock()
+	i63n := r.rand.Int63n(n)
+	r.Unlock()
+	return i63n
+}
+
 // Distributed pseudo-exponentially to test for various cases
 // It is not safe for cryptographic usage.
-func RandUint16Exp() uint16 {
-	bits := RandUint32() % 16
+func (r *Rand) Uint16Exp() uint16 {
+	bits := r.Uint32() % 16
 	if bits == 0 {
 		return 0
 	}
 	n := uint16(1 << (bits - 1))
-	n += uint16(RandInt31()) & ((1 << (bits - 1)) - 1)
+	n += uint16(r.Int31()) & ((1 << (bits - 1)) - 1)
 	return n
 }
 
 // Distributed pseudo-exponentially to test for various cases
 // It is not safe for cryptographic usage.
-func RandUint32Exp() uint32 {
-	bits := RandUint32() % 32
+func (r *Rand) Uint32Exp() uint32 {
+	bits := r.Uint32() % 32
 	if bits == 0 {
 		return 0
 	}
 	n := uint32(1 << (bits - 1))
-	n += uint32(RandInt31()) & ((1 << (bits - 1)) - 1)
+	n += uint32(r.Int31()) & ((1 << (bits - 1)) - 1)
 	return n
 }
 
 // Distributed pseudo-exponentially to test for various cases
 // It is not safe for cryptographic usage.
-func RandUint64Exp() uint64 {
-	bits := RandUint32() % 64
+func (r *Rand) Uint64Exp() uint64 {
+	bits := r.Uint32() % 64
 	if bits == 0 {
 		return 0
 	}
 	n := uint64(1 << (bits - 1))
-	n += uint64(RandInt63()) & ((1 << (bits - 1)) - 1)
+	n += uint64(r.Int63()) & ((1 << (bits - 1)) - 1)
 	return n
 }
 
 // It is not safe for cryptographic usage.
-func RandFloat32() float32 {
-	prng.Lock()
-	f32 := prng.Float32()
-	prng.Unlock()
+func (r *Rand) Float32() float32 {
+	r.Lock()
+	f32 := r.rand.Float32()
+	r.Unlock()
 	return f32
 }
 
 // It is not safe for cryptographic usage.
-func RandTime() time.Time {
-	return time.Unix(int64(RandUint64Exp()), 0)
+func (r *Rand) Float64() float64 {
+	r.Lock()
+	f64 := r.rand.Float64()
+	r.Unlock()
+	return f64
+}
+
+// It is not safe for cryptographic usage.
+func (r *Rand) Time() time.Time {
+	return time.Unix(int64(r.Uint64Exp()), 0)
 }
 
 // RandBytes returns n random bytes from the OS's source of entropy ie. via crypto/rand.
 // It is not safe for cryptographic usage.
-func RandBytes(n int) []byte {
+func (r *Rand) Bytes(n int) []byte {
 	// cRandBytes isn't guaranteed to be fast so instead
 	// use random bytes generated from the internal PRNG
 	bs := make([]byte, n)
 	for i := 0; i < len(bs); i++ {
-		bs[i] = byte(RandInt() & 0xFF)
+		bs[i] = byte(r.Int() & 0xFF)
 	}
 	return bs
 }
@@ -188,19 +328,19 @@ func RandBytes(n int) []byte {
 // RandIntn returns, as an int, a non-negative pseudo-random number in [0, n).
 // It panics if n <= 0.
 // It is not safe for cryptographic usage.
-func RandIntn(n int) int {
-	prng.Lock()
-	i := prng.Intn(n)
-	prng.Unlock()
+func (r *Rand) Intn(n int) int {
+	r.Lock()
+	i := r.rand.Intn(n)
+	r.Unlock()
 	return i
 }
 
 // RandPerm returns a pseudo-random permutation of n integers in [0, n).
 // It is not safe for cryptographic usage.
-func RandPerm(n int) []int {
-	prng.Lock()
-	perm := prng.Perm(n)
-	prng.Unlock()
+func (r *Rand) Perm(n int) []int {
+	r.Lock()
+	perm := r.rand.Perm(n)
+	r.Unlock()
 	return perm
 }
 
diff --git a/vendor/github.com/tendermint/tmlibs/common/repeat_timer.go b/vendor/github.com/tendermint/tmlibs/common/repeat_timer.go
index 2e6cb81c..5d049738 100644
--- a/vendor/github.com/tendermint/tmlibs/common/repeat_timer.go
+++ b/vendor/github.com/tendermint/tmlibs/common/repeat_timer.go
@@ -20,15 +20,17 @@ type Ticker interface {
 }
 
 //----------------------------------------
-// defaultTickerMaker
+// defaultTicker
+
+var _ Ticker = (*defaultTicker)(nil)
+
+type defaultTicker time.Ticker
 
 func defaultTickerMaker(dur time.Duration) Ticker {
 	ticker := time.NewTicker(dur)
 	return (*defaultTicker)(ticker)
 }
 
-type defaultTicker time.Ticker
-
 // Implements Ticker
 func (t *defaultTicker) Chan() <-chan time.Time {
 	return t.C
@@ -80,13 +82,11 @@ func (t *logicalTicker) fireRoutine(interval time.Duration) {
 	}
 	// Init `lasttime` end
 
-	timeleft := interval
 	for {
 		select {
 		case newtime := <-source:
 			elapsed := newtime.Sub(lasttime)
-			timeleft -= elapsed
-			if timeleft <= 0 {
+			if interval <= elapsed {
 				// Block for determinism until the ticker is stopped.
 				select {
 				case t.ch <- newtime:
@@ -97,7 +97,7 @@ func (t *logicalTicker) fireRoutine(interval time.Duration) {
 				// Don't try to "catch up" by sending more.
 				// "Ticker adjusts the intervals or drops ticks to make up for
 				// slow receivers" - https://golang.org/pkg/time/#Ticker
-				timeleft = interval
+				lasttime = newtime
 			}
 		case <-t.quit:
 			return // done
@@ -153,11 +153,16 @@ func NewRepeatTimerWithTickerMaker(name string, dur time.Duration, tm TickerMake
 	return t
 }
 
+// receive ticks on ch, send out on t.ch
 func (t *RepeatTimer) fireRoutine(ch <-chan time.Time, quit <-chan struct{}) {
 	for {
 		select {
-		case t_ := <-ch:
-			t.ch <- t_
+		case tick := <-ch:
+			select {
+			case t.ch <- tick:
+			case <-quit:
+				return
+			}
 		case <-quit: // NOTE: `t.quit` races.
 			return
 		}
@@ -212,7 +217,6 @@ func (t *RepeatTimer) stop() {
 	t.ticker.Stop()
 	t.ticker = nil
 	/*
-		XXX
 		From https://golang.org/pkg/time/#Ticker:
 		"Stop the ticker to release associated resources"
 		"After Stop, no more ticks will be sent"
diff --git a/vendor/github.com/tendermint/tmlibs/common/service.go b/vendor/github.com/tendermint/tmlibs/common/service.go
index d70d16a8..2f90fa4f 100644
--- a/vendor/github.com/tendermint/tmlibs/common/service.go
+++ b/vendor/github.com/tendermint/tmlibs/common/service.go
@@ -35,9 +35,13 @@ type Service interface {
 	// Return true if the service is running
 	IsRunning() bool
 
+	// Quit returns a channel, which is closed once service is stopped.
+	Quit() <-chan struct{}
+
 	// String representation of the service
 	String() string
 
+	// SetLogger sets a logger.
 	SetLogger(log.Logger)
 }
 
@@ -88,12 +92,13 @@ type BaseService struct {
 	name    string
 	started uint32 // atomic
 	stopped uint32 // atomic
-	Quit    chan struct{}
+	quit    chan struct{}
 
 	// The "subclass" of BaseService
 	impl Service
 }
 
+// NewBaseService creates a new BaseService.
 func NewBaseService(logger log.Logger, name string, impl Service) *BaseService {
 	if logger == nil {
 		logger = log.NewNopLogger()
@@ -102,24 +107,26 @@ func NewBaseService(logger log.Logger, name string, impl Service) *BaseService {
 	return &BaseService{
 		Logger: logger,
 		name:   name,
-		Quit:   make(chan struct{}),
+		quit:   make(chan struct{}),
 		impl:   impl,
 	}
 }
 
+// SetLogger implements Service by setting a logger.
 func (bs *BaseService) SetLogger(l log.Logger) {
 	bs.Logger = l
 }
 
-// Implements Servce
+// Start implements Service by calling OnStart (if defined). An error will be
+// returned if the service is already running or stopped. Not to start the
+// stopped service, you need to call Reset.
 func (bs *BaseService) Start() error {
 	if atomic.CompareAndSwapUint32(&bs.started, 0, 1) {
 		if atomic.LoadUint32(&bs.stopped) == 1 {
 			bs.Logger.Error(Fmt("Not starting %v -- already stopped", bs.name), "impl", bs.impl)
 			return ErrAlreadyStopped
-		} else {
-			bs.Logger.Info(Fmt("Starting %v", bs.name), "impl", bs.impl)
 		}
+		bs.Logger.Info(Fmt("Starting %v", bs.name), "impl", bs.impl)
 		err := bs.impl.OnStart()
 		if err != nil {
 			// revert flag
@@ -127,36 +134,36 @@ func (bs *BaseService) Start() error {
 			return err
 		}
 		return nil
-	} else {
-		bs.Logger.Debug(Fmt("Not starting %v -- already started", bs.name), "impl", bs.impl)
-		return ErrAlreadyStarted
 	}
+	bs.Logger.Debug(Fmt("Not starting %v -- already started", bs.name), "impl", bs.impl)
+	return ErrAlreadyStarted
 }
 
-// Implements Service
+// OnStart implements Service by doing nothing.
 // NOTE: Do not put anything in here,
 // that way users don't need to call BaseService.OnStart()
 func (bs *BaseService) OnStart() error { return nil }
 
-// Implements Service
+// Stop implements Service by calling OnStop (if defined) and closing quit
+// channel. An error will be returned if the service is already stopped.
 func (bs *BaseService) Stop() error {
 	if atomic.CompareAndSwapUint32(&bs.stopped, 0, 1) {
 		bs.Logger.Info(Fmt("Stopping %v", bs.name), "impl", bs.impl)
 		bs.impl.OnStop()
-		close(bs.Quit)
+		close(bs.quit)
 		return nil
-	} else {
-		bs.Logger.Debug(Fmt("Stopping %v (ignoring: already stopped)", bs.name), "impl", bs.impl)
-		return ErrAlreadyStopped
 	}
+	bs.Logger.Debug(Fmt("Stopping %v (ignoring: already stopped)", bs.name), "impl", bs.impl)
+	return ErrAlreadyStopped
 }
 
-// Implements Service
+// OnStop implements Service by doing nothing.
 // NOTE: Do not put anything in here,
 // that way users don't need to call BaseService.OnStop()
 func (bs *BaseService) OnStop() {}
 
-// Implements Service
+// Reset implements Service by calling OnReset callback (if defined). An error
+// will be returned if the service is running.
 func (bs *BaseService) Reset() error {
 	if !atomic.CompareAndSwapUint32(&bs.stopped, 1, 0) {
 		bs.Logger.Debug(Fmt("Can't reset %v. Not stopped", bs.name), "impl", bs.impl)
@@ -166,41 +173,33 @@ func (bs *BaseService) Reset() error {
 	// whether or not we've started, we can reset
 	atomic.CompareAndSwapUint32(&bs.started, 1, 0)
 
-	bs.Quit = make(chan struct{})
+	bs.quit = make(chan struct{})
 	return bs.impl.OnReset()
 }
 
-// Implements Service
+// OnReset implements Service by panicking.
 func (bs *BaseService) OnReset() error {
 	PanicSanity("The service cannot be reset")
 	return nil
 }
 
-// Implements Service
+// IsRunning implements Service by returning true or false depending on the
+// service's state.
 func (bs *BaseService) IsRunning() bool {
 	return atomic.LoadUint32(&bs.started) == 1 && atomic.LoadUint32(&bs.stopped) == 0
 }
 
+// Wait blocks until the service is stopped.
 func (bs *BaseService) Wait() {
-	<-bs.Quit
+	<-bs.quit
 }
 
-// Implements Servce
+// String implements Servce by returning a string representation of the service.
 func (bs *BaseService) String() string {
 	return bs.name
 }
 
-//----------------------------------------
-
-type QuitService struct {
-	BaseService
-}
-
-func NewQuitService(logger log.Logger, name string, impl Service) *QuitService {
-	if logger != nil {
-		logger.Info("QuitService is deprecated, use BaseService instead")
-	}
-	return &QuitService{
-		BaseService: *NewBaseService(logger, name, impl),
-	}
+// Quit Implements Service by returning a quit channel.
+func (bs *BaseService) Quit() <-chan struct{} {
+	return bs.quit
 }
diff --git a/vendor/github.com/tendermint/tmlibs/common/string.go b/vendor/github.com/tendermint/tmlibs/common/string.go
index 6924e6a5..0e2231e9 100644
--- a/vendor/github.com/tendermint/tmlibs/common/string.go
+++ b/vendor/github.com/tendermint/tmlibs/common/string.go
@@ -6,30 +6,17 @@ import (
 	"strings"
 )
 
-// Fmt shorthand, XXX DEPRECATED
-var Fmt = fmt.Sprintf
-
-// RightPadString adds spaces to the right of a string to make it length totalLength
-func RightPadString(s string, totalLength int) string {
-	remaining := totalLength - len(s)
-	if remaining > 0 {
-		s = s + strings.Repeat(" ", remaining)
-	}
-	return s
-}
-
-// LeftPadString adds spaces to the left of a string to make it length totalLength
-func LeftPadString(s string, totalLength int) string {
-	remaining := totalLength - len(s)
-	if remaining > 0 {
-		s = strings.Repeat(" ", remaining) + s
+// Like fmt.Sprintf, but skips formatting if args are empty.
+var Fmt = func(format string, a ...interface{}) string {
+	if len(a) == 0 {
+		return format
 	}
-	return s
+	return fmt.Sprintf(format, a...)
 }
 
 // IsHex returns true for non-empty hex-string prefixed with "0x"
 func IsHex(s string) bool {
-	if len(s) > 2 && s[:2] == "0x" {
+	if len(s) > 2 && strings.EqualFold(s[:2], "0x") {
 		_, err := hex.DecodeString(s[2:])
 		return err == nil
 	}
@@ -53,3 +40,20 @@ func StringInSlice(a string, list []string) bool {
 	}
 	return false
 }
+
+// SplitAndTrim slices s into all subslices separated by sep and returns a
+// slice of the string s with all leading and trailing Unicode code points
+// contained in cutset removed. If sep is empty, SplitAndTrim splits after each
+// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
+// -1.
+func SplitAndTrim(s, sep, cutset string) []string {
+	if s == "" {
+		return []string{}
+	}
+
+	spl := strings.Split(s, sep)
+	for i := 0; i < len(spl); i++ {
+		spl[i] = strings.Trim(spl[i], cutset)
+	}
+	return spl
+}
diff --git a/vendor/github.com/tendermint/tmlibs/common/types.pb.go b/vendor/github.com/tendermint/tmlibs/common/types.pb.go
new file mode 100644
index 00000000..047b7aee
--- /dev/null
+++ b/vendor/github.com/tendermint/tmlibs/common/types.pb.go
@@ -0,0 +1,101 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: common/types.proto
+
+/*
+Package common is a generated protocol buffer package.
+
+It is generated from these files:
+	common/types.proto
+
+It has these top-level messages:
+	KVPair
+	KI64Pair
+*/
+//nolint: gas
+package common
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+// Define these here for compatibility but use tmlibs/common.KVPair.
+type KVPair struct {
+	Key   []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *KVPair) Reset()                    { *m = KVPair{} }
+func (m *KVPair) String() string            { return proto.CompactTextString(m) }
+func (*KVPair) ProtoMessage()               {}
+func (*KVPair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} }
+
+func (m *KVPair) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *KVPair) GetValue() []byte {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+// Define these here for compatibility but use tmlibs/common.KI64Pair.
+type KI64Pair struct {
+	Key   []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	Value int64  `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *KI64Pair) Reset()                    { *m = KI64Pair{} }
+func (m *KI64Pair) String() string            { return proto.CompactTextString(m) }
+func (*KI64Pair) ProtoMessage()               {}
+func (*KI64Pair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} }
+
+func (m *KI64Pair) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *KI64Pair) GetValue() int64 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+func init() {
+	proto.RegisterType((*KVPair)(nil), "common.KVPair")
+	proto.RegisterType((*KI64Pair)(nil), "common.KI64Pair")
+}
+
+func init() { proto.RegisterFile("common/types.proto", fileDescriptorTypes) }
+
+var fileDescriptorTypes = []byte{
+	// 137 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4a, 0xce, 0xcf, 0xcd,
+	0xcd, 0xcf, 0xd3, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62,
+	0x83, 0x88, 0x49, 0xe9, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7,
+	0xe7, 0xa7, 0xe7, 0xeb, 0x83, 0xa5, 0x93, 0x4a, 0xd3, 0xc0, 0x3c, 0x30, 0x07, 0xcc, 0x82, 0x68,
+	0x53, 0x32, 0xe0, 0x62, 0xf3, 0x0e, 0x0b, 0x48, 0xcc, 0x2c, 0x12, 0x12, 0xe0, 0x62, 0xce, 0x4e,
+	0xad, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0x31, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x12,
+	0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x92, 0x11, 0x17, 0x87, 0xb7, 0xa7, 0x99,
+	0x09, 0x31, 0x7a, 0x98, 0xa1, 0x7a, 0x92, 0xd8, 0xc0, 0x96, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff,
+	0xff, 0x5c, 0xb8, 0x46, 0xc5, 0xb9, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/tendermint/tmlibs/common/word.go b/vendor/github.com/tendermint/tmlibs/common/word.go
index 4072482b..a5b841f5 100644
--- a/vendor/github.com/tendermint/tmlibs/common/word.go
+++ b/vendor/github.com/tendermint/tmlibs/common/word.go
@@ -72,9 +72,8 @@ func (tuple Tuple256) Compare(other Tuple256) int {
 	firstCompare := tuple.First.Compare(other.First)
 	if firstCompare == 0 {
 		return tuple.Second.Compare(other.Second)
-	} else {
-		return firstCompare
 	}
+	return firstCompare
 }
 
 func Tuple256Split(t Tuple256) (Word256, Word256) {
diff --git a/vendor/github.com/tendermint/tmlibs/db/c_level_db.go b/vendor/github.com/tendermint/tmlibs/db/c_level_db.go
index b1ae49a1..e3e6c1d5 100644
--- a/vendor/github.com/tendermint/tmlibs/db/c_level_db.go
+++ b/vendor/github.com/tendermint/tmlibs/db/c_level_db.go
@@ -3,22 +3,23 @@
 package db
 
 import (
+	"bytes"
 	"fmt"
-	"path"
+	"path/filepath"
 
 	"github.com/jmhodges/levigo"
-
-	. "github.com/tendermint/tmlibs/common"
 )
 
 func init() {
 	dbCreator := func(name string, dir string) (DB, error) {
 		return NewCLevelDB(name, dir)
 	}
-	registerDBCreator(LevelDBBackendStr, dbCreator, true)
-	registerDBCreator(CLevelDBBackendStr, dbCreator, false)
+	registerDBCreator(LevelDBBackend, dbCreator, true)
+	registerDBCreator(CLevelDBBackend, dbCreator, false)
 }
 
+var _ DB = (*CLevelDB)(nil)
+
 type CLevelDB struct {
 	db     *levigo.DB
 	ro     *levigo.ReadOptions
@@ -27,7 +28,7 @@ type CLevelDB struct {
 }
 
 func NewCLevelDB(name string, dir string) (*CLevelDB, error) {
-	dbPath := path.Join(dir, name+".db")
+	dbPath := filepath.Join(dir, name+".db")
 
 	opts := levigo.NewOptions()
 	opts.SetCache(levigo.NewLRUCache(1 << 30))
@@ -49,39 +50,56 @@ func NewCLevelDB(name string, dir string) (*CLevelDB, error) {
 	return database, nil
 }
 
+// Implements DB.
 func (db *CLevelDB) Get(key []byte) []byte {
+	key = nonNilBytes(key)
 	res, err := db.db.Get(db.ro, key)
 	if err != nil {
-		PanicCrisis(err)
+		panic(err)
 	}
 	return res
 }
 
+// Implements DB.
+func (db *CLevelDB) Has(key []byte) bool {
+	return db.Get(key) != nil
+}
+
+// Implements DB.
 func (db *CLevelDB) Set(key []byte, value []byte) {
+	key = nonNilBytes(key)
+	value = nonNilBytes(value)
 	err := db.db.Put(db.wo, key, value)
 	if err != nil {
-		PanicCrisis(err)
+		panic(err)
 	}
 }
 
+// Implements DB.
 func (db *CLevelDB) SetSync(key []byte, value []byte) {
+	key = nonNilBytes(key)
+	value = nonNilBytes(value)
 	err := db.db.Put(db.woSync, key, value)
 	if err != nil {
-		PanicCrisis(err)
+		panic(err)
 	}
 }
 
+// Implements DB.
 func (db *CLevelDB) Delete(key []byte) {
+	key = nonNilBytes(key)
 	err := db.db.Delete(db.wo, key)
 	if err != nil {
-		PanicCrisis(err)
+		panic(err)
 	}
 }
 
+// Implements DB.
 func (db *CLevelDB) DeleteSync(key []byte) {
+	key = nonNilBytes(key)
 	err := db.db.Delete(db.woSync, key)
 	if err != nil {
-		PanicCrisis(err)
+		panic(err)
 	}
 }
 
@@ -89,6 +107,7 @@ func (db *CLevelDB) DB() *levigo.DB {
 	return db.db
 }
 
+// Implements DB.
 func (db *CLevelDB) Close() {
 	db.db.Close()
 	db.ro.Close()
@@ -96,57 +115,173 @@ func (db *CLevelDB) Close() {
 	db.woSync.Close()
 }
 
+// Implements DB.
 func (db *CLevelDB) Print() {
-	iter := db.db.NewIterator(db.ro)
-	defer iter.Close()
-	for iter.Seek(nil); iter.Valid(); iter.Next() {
-		key := iter.Key()
-		value := iter.Value()
+	itr := db.Iterator(nil, nil)
+	defer itr.Close()
+	for ; itr.Valid(); itr.Next() {
+		key := itr.Key()
+		value := itr.Value()
 		fmt.Printf("[%X]:\t[%X]\n", key, value)
 	}
 }
 
+// Implements DB.
 func (db *CLevelDB) Stats() map[string]string {
 	// TODO: Find the available properties for the C LevelDB implementation
 	keys := []string{}
 
 	stats := make(map[string]string)
 	for _, key := range keys {
-		str, err := db.db.GetProperty(key)
-		if err == nil {
-			stats[key] = str
-		}
+		str := db.db.PropertyValue(key)
+		stats[key] = str
 	}
 	return stats
 }
 
-func (db *CLevelDB) Iterator() Iterator {
-	return db.db.NewIterator(nil, nil)
-}
+//----------------------------------------
+// Batch
 
+// Implements DB.
 func (db *CLevelDB) NewBatch() Batch {
 	batch := levigo.NewWriteBatch()
 	return &cLevelDBBatch{db, batch}
 }
 
-//--------------------------------------------------------------------------------
-
 type cLevelDBBatch struct {
 	db    *CLevelDB
 	batch *levigo.WriteBatch
 }
 
+// Implements Batch.
 func (mBatch *cLevelDBBatch) Set(key, value []byte) {
 	mBatch.batch.Put(key, value)
 }
 
+// Implements Batch.
 func (mBatch *cLevelDBBatch) Delete(key []byte) {
 	mBatch.batch.Delete(key)
 }
 
+// Implements Batch.
 func (mBatch *cLevelDBBatch) Write() {
 	err := mBatch.db.db.Write(mBatch.db.wo, mBatch.batch)
 	if err != nil {
-		PanicCrisis(err)
+		panic(err)
+	}
+}
+
+// Implements Batch.
+func (mBatch *cLevelDBBatch) WriteSync() {
+	err := mBatch.db.db.Write(mBatch.db.woSync, mBatch.batch)
+	if err != nil {
+		panic(err)
+	}
+}
+
+//----------------------------------------
+// Iterator
+// NOTE This is almost identical to db/go_level_db.Iterator
+// Before creating a third version, refactor.
+
+func (db *CLevelDB) Iterator(start, end []byte) Iterator {
+	itr := db.db.NewIterator(db.ro)
+	return newCLevelDBIterator(itr, start, end, false)
+}
+
+func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator {
+	panic("not implemented yet") // XXX
+}
+
+var _ Iterator = (*cLevelDBIterator)(nil)
+
+type cLevelDBIterator struct {
+	source     *levigo.Iterator
+	start, end []byte
+	isReverse  bool
+	isInvalid  bool
+}
+
+func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse bool) *cLevelDBIterator {
+	if isReverse {
+		panic("not implemented yet") // XXX
+	}
+	if start != nil {
+		source.Seek(start)
+	} else {
+		source.SeekToFirst()
+	}
+	return &cLevelDBIterator{
+		source:    source,
+		start:     start,
+		end:       end,
+		isReverse: isReverse,
+		isInvalid: false,
+	}
+}
+
+func (itr cLevelDBIterator) Domain() ([]byte, []byte) {
+	return itr.start, itr.end
+}
+
+func (itr cLevelDBIterator) Valid() bool {
+
+	// Once invalid, forever invalid.
+	if itr.isInvalid {
+		return false
+	}
+
+	// Panic on DB error.  No way to recover.
+	itr.assertNoError()
+
+	// If source is invalid, invalid.
+	if !itr.source.Valid() {
+		itr.isInvalid = true
+		return false
+	}
+
+	// If key is end or past it, invalid.
+	var end = itr.end
+	var key = itr.source.Key()
+	if end != nil && bytes.Compare(end, key) <= 0 {
+		itr.isInvalid = true
+		return false
+	}
+
+	// It's valid.
+	return true
+}
+
+func (itr cLevelDBIterator) Key() []byte {
+	itr.assertNoError()
+	itr.assertIsValid()
+	return itr.source.Key()
+}
+
+func (itr cLevelDBIterator) Value() []byte {
+	itr.assertNoError()
+	itr.assertIsValid()
+	return itr.source.Value()
+}
+
+func (itr cLevelDBIterator) Next() {
+	itr.assertNoError()
+	itr.assertIsValid()
+	itr.source.Next()
+}
+
+func (itr cLevelDBIterator) Close() {
+	itr.source.Close()
+}
+
+func (itr cLevelDBIterator) assertNoError() {
+	if err := itr.source.GetError(); err != nil {
+		panic(err)
+	}
+}
+
+func (itr cLevelDBIterator) assertIsValid() {
+	if !itr.Valid() {
+		panic("cLevelDBIterator is invalid")
 	}
 }
diff --git a/vendor/github.com/tendermint/tmlibs/db/db.go b/vendor/github.com/tendermint/tmlibs/db/db.go
index 8156c1e9..86993766 100644
--- a/vendor/github.com/tendermint/tmlibs/db/db.go
+++ b/vendor/github.com/tendermint/tmlibs/db/db.go
@@ -1,53 +1,25 @@
 package db
 
-import . "github.com/tendermint/tmlibs/common"
+import "fmt"
 
-type DB interface {
-	Get([]byte) []byte
-	Set([]byte, []byte)
-	SetSync([]byte, []byte)
-	Delete([]byte)
-	DeleteSync([]byte)
-	Close()
-	NewBatch() Batch
-	Iterator() Iterator
-	IteratorPrefix([]byte) Iterator
+//----------------------------------------
+// Main entry
 
-	// For debugging
-	Print()
-	Stats() map[string]string
-}
-
-type Batch interface {
-	Set(key, value []byte)
-	Delete(key []byte)
-	Write()
-}
-
-type Iterator interface {
-	Next() bool
-
-	Key() []byte
-	Value() []byte
-
-	Release()
-	Error() error
-}
-
-//-----------------------------------------------------------------------------
+type DBBackendType string
 
 const (
-	LevelDBBackendStr   = "leveldb" // legacy, defaults to goleveldb.
-	CLevelDBBackendStr  = "cleveldb"
-	GoLevelDBBackendStr = "goleveldb"
-	MemDBBackendStr     = "memdb"
+	LevelDBBackend   DBBackendType = "leveldb" // legacy, defaults to goleveldb unless +gcc
+	CLevelDBBackend  DBBackendType = "cleveldb"
+	GoLevelDBBackend DBBackendType = "goleveldb"
+	MemDBBackend     DBBackendType = "memdb"
+	FSDBBackend      DBBackendType = "fsdb" // using the filesystem naively
 )
 
 type dbCreator func(name string, dir string) (DB, error)
 
-var backends = map[string]dbCreator{}
+var backends = map[DBBackendType]dbCreator{}
 
-func registerDBCreator(backend string, creator dbCreator, force bool) {
+func registerDBCreator(backend DBBackendType, creator dbCreator, force bool) {
 	_, ok := backends[backend]
 	if !force && ok {
 		return
@@ -55,10 +27,10 @@ func registerDBCreator(backend string, creator dbCreator, force bool) {
 	backends[backend] = creator
 }
 
-func NewDB(name string, backend string, dir string) DB {
+func NewDB(name string, backend DBBackendType, dir string) DB {
 	db, err := backends[backend](name, dir)
 	if err != nil {
-		PanicSanity(Fmt("Error initializing DB: %v", err))
+		panic(fmt.Sprintf("Error initializing DB: %v", err))
 	}
 	return db
 }
diff --git a/vendor/github.com/tendermint/tmlibs/db/debug_db.go b/vendor/github.com/tendermint/tmlibs/db/debug_db.go
new file mode 100644
index 00000000..7a15bc29
--- /dev/null
+++ b/vendor/github.com/tendermint/tmlibs/db/debug_db.go
@@ -0,0 +1,216 @@
+package db
+
+import (
+	"fmt"
+	"sync"
+)
+
+//----------------------------------------
+// debugDB
+
+type debugDB struct {
+	label string
+	db    DB
+}
+
+// For printing all operationgs to the console for debugging.
+func NewDebugDB(label string, db DB) debugDB {
+	return debugDB{
+		label: label,
+		db:    db,
+	}
+}
+
+// Implements atomicSetDeleter.
+func (ddb debugDB) Mutex() *sync.Mutex { return nil }
+
+// Implements DB.
+func (ddb debugDB) Get(key []byte) (value []byte) {
+	defer fmt.Printf("%v.Get(%X) %X\n", ddb.label, key, value)
+	value = ddb.db.Get(key)
+	return
+}
+
+// Implements DB.
+func (ddb debugDB) Has(key []byte) (has bool) {
+	defer fmt.Printf("%v.Has(%X) %v\n", ddb.label, key, has)
+	return ddb.db.Has(key)
+}
+
+// Implements DB.
+func (ddb debugDB) Set(key []byte, value []byte) {
+	fmt.Printf("%v.Set(%X, %X)\n", ddb.label, key, value)
+	ddb.db.Set(key, value)
+}
+
+// Implements DB.
+func (ddb debugDB) SetSync(key []byte, value []byte) {
+	fmt.Printf("%v.SetSync(%X, %X)\n", ddb.label, key, value)
+	ddb.db.SetSync(key, value)
+}
+
+// Implements atomicSetDeleter.
+func (ddb debugDB) SetNoLock(key []byte, value []byte) {
+	fmt.Printf("%v.SetNoLock(%X, %X)\n", ddb.label, key, value)
+	ddb.db.Set(key, value)
+}
+
+// Implements atomicSetDeleter.
+func (ddb debugDB) SetNoLockSync(key []byte, value []byte) {
+	fmt.Printf("%v.SetNoLockSync(%X, %X)\n", ddb.label, key, value)
+	ddb.db.SetSync(key, value)
+}
+
+// Implements DB.
+func (ddb debugDB) Delete(key []byte) {
+	fmt.Printf("%v.Delete(%X)\n", ddb.label, key)
+	ddb.db.Delete(key)
+}
+
+// Implements DB.
+func (ddb debugDB) DeleteSync(key []byte) {
+	fmt.Printf("%v.DeleteSync(%X)\n", ddb.label, key)
+	ddb.db.DeleteSync(key)
+}
+
+// Implements atomicSetDeleter.
+func (ddb debugDB) DeleteNoLock(key []byte) {
+	fmt.Printf("%v.DeleteNoLock(%X)\n", ddb.label, key)
+	ddb.db.Delete(key)
+}
+
+// Implements atomicSetDeleter.
+func (ddb debugDB) DeleteNoLockSync(key []byte) {
+	fmt.Printf("%v.DeleteNoLockSync(%X)\n", ddb.label, key)
+	ddb.db.DeleteSync(key)
+}
+
+// Implements DB.
+func (ddb debugDB) Iterator(start, end []byte) Iterator {
+	fmt.Printf("%v.Iterator(%X, %X)\n", ddb.label, start, end)
+	return NewDebugIterator(ddb.label, ddb.db.Iterator(start, end))
+}
+
+// Implements DB.
+func (ddb debugDB) ReverseIterator(start, end []byte) Iterator {
+	fmt.Printf("%v.ReverseIterator(%X, %X)\n", ddb.label, start, end)
+	return NewDebugIterator(ddb.label, ddb.db.ReverseIterator(start, end))
+}
+
+// Implements DB.
+func (ddb debugDB) NewBatch() Batch {
+	fmt.Printf("%v.NewBatch()\n", ddb.label)
+	return NewDebugBatch(ddb.label, ddb.db.NewBatch())
+}
+
+// Implements DB.
+func (ddb debugDB) Close() {
+	fmt.Printf("%v.Close()\n", ddb.label)
+	ddb.db.Close()
+}
+
+// Implements DB.
+func (ddb debugDB) Print() {
+	ddb.db.Print()
+}
+
+// Implements DB.
+func (ddb debugDB) Stats() map[string]string {
+	return ddb.db.Stats()
+}
+
+//----------------------------------------
+// debugIterator
+
+type debugIterator struct {
+	label string
+	itr   Iterator
+}
+
+// For printing all operationgs to the console for debugging.
+func NewDebugIterator(label string, itr Iterator) debugIterator {
+	return debugIterator{
+		label: label,
+		itr:   itr,
+	}
+}
+
+// Implements Iterator.
+func (ditr debugIterator) Domain() (start []byte, end []byte) {
+	defer fmt.Printf("%v.itr.Domain() (%X,%X)\n", ditr.label, start, end)
+	start, end = ditr.itr.Domain()
+	return
+}
+
+// Implements Iterator.
+func (ditr debugIterator) Valid() (ok bool) {
+	defer fmt.Printf("%v.itr.Valid() %v\n", ditr.label, ok)
+	ok = ditr.itr.Valid()
+	return
+}
+
+// Implements Iterator.
+func (ditr debugIterator) Next() {
+	fmt.Printf("%v.itr.Next()\n", ditr.label)
+	ditr.itr.Next()
+}
+
+// Implements Iterator.
+func (ditr debugIterator) Key() (key []byte) {
+	fmt.Printf("%v.itr.Key() %X\n", ditr.label, key)
+	key = ditr.itr.Key()
+	return
+}
+
+// Implements Iterator.
+func (ditr debugIterator) Value() (value []byte) {
+	fmt.Printf("%v.itr.Value() %X\n", ditr.label, value)
+	value = ditr.itr.Value()
+	return
+}
+
+// Implements Iterator.
+func (ditr debugIterator) Close() {
+	fmt.Printf("%v.itr.Close()\n", ditr.label)
+	ditr.itr.Close()
+}
+
+//----------------------------------------
+// debugBatch
+
+type debugBatch struct {
+	label string
+	bch   Batch
+}
+
+// For printing all operationgs to the console for debugging.
+func NewDebugBatch(label string, bch Batch) debugBatch {
+	return debugBatch{
+		label: label,
+		bch:   bch,
+	}
+}
+
+// Implements Batch.
+func (dbch debugBatch) Set(key, value []byte) {
+	fmt.Printf("%v.batch.Set(%X, %X)\n", dbch.label, key, value)
+	dbch.bch.Set(key, value)
+}
+
+// Implements Batch.
+func (dbch debugBatch) Delete(key []byte) {
+	fmt.Printf("%v.batch.Delete(%X)\n", dbch.label, key)
+	dbch.bch.Delete(key)
+}
+
+// Implements Batch.
+func (dbch debugBatch) Write() {
+	fmt.Printf("%v.batch.Write()\n", dbch.label)
+	dbch.bch.Write()
+}
+
+// Implements Batch.
+func (dbch debugBatch) WriteSync() {
+	fmt.Printf("%v.batch.WriteSync()\n", dbch.label)
+	dbch.bch.WriteSync()
+}
diff --git a/vendor/github.com/tendermint/tmlibs/db/fsdb.go b/vendor/github.com/tendermint/tmlibs/db/fsdb.go
new file mode 100644
index 00000000..578c1785
--- /dev/null
+++ b/vendor/github.com/tendermint/tmlibs/db/fsdb.go
@@ -0,0 +1,254 @@
+package db
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net/url"
+	"os"
+	"path/filepath"
+	"sort"
+	"sync"
+
+	"github.com/pkg/errors"
+	cmn "github.com/tendermint/tmlibs/common"
+)
+
+const (
+	keyPerm = os.FileMode(0600)
+	dirPerm = os.FileMode(0700)
+)
+
+func init() {
+	registerDBCreator(FSDBBackend, func(name string, dir string) (DB, error) {
+		dbPath := filepath.Join(dir, name+".db")
+		return NewFSDB(dbPath), nil
+	}, false)
+}
+
+var _ DB = (*FSDB)(nil)
+
+// It's slow.
+type FSDB struct {
+	mtx sync.Mutex
+	dir string
+}
+
+func NewFSDB(dir string) *FSDB {
+	err := os.MkdirAll(dir, dirPerm)
+	if err != nil {
+		panic(errors.Wrap(err, "Creating FSDB dir "+dir))
+	}
+	database := &FSDB{
+		dir: dir,
+	}
+	return database
+}
+
+func (db *FSDB) Get(key []byte) []byte {
+	db.mtx.Lock()
+	defer db.mtx.Unlock()
+	key = escapeKey(key)
+
+	path := db.nameToPath(key)
+	value, err := read(path)
+	if os.IsNotExist(err) {
+		return nil
+	} else if err != nil {
+		panic(errors.Wrapf(err, "Getting key %s (0x%X)", string(key), key))
+	}
+	return value
+}
+
+func (db *FSDB) Has(key []byte) bool {
+	db.mtx.Lock()
+	defer db.mtx.Unlock()
+	key = escapeKey(key)
+
+	path := db.nameToPath(key)
+	return cmn.FileExists(path)
+}
+
+func (db *FSDB) Set(key []byte, value []byte) {
+	db.mtx.Lock()
+	defer db.mtx.Unlock()
+
+	db.SetNoLock(key, value)
+}
+
+func (db *FSDB) SetSync(key []byte, value []byte) {
+	db.mtx.Lock()
+	defer db.mtx.Unlock()
+
+	db.SetNoLock(key, value)
+}
+
+// NOTE: Implements atomicSetDeleter.
+func (db *FSDB) SetNoLock(key []byte, value []byte) {
+	key = escapeKey(key)
+	value = nonNilBytes(value)
+	path := db.nameToPath(key)
+	err := write(path, value)
+	if err != nil {
+		panic(errors.Wrapf(err, "Setting key %s (0x%X)", string(key), key))
+	}
+}
+
+func (db *FSDB) Delete(key []byte) {
+	db.mtx.Lock()
+	defer db.mtx.Unlock()
+
+	db.DeleteNoLock(key)
+}
+
+func (db *FSDB) DeleteSync(key []byte) {
+	db.mtx.Lock()
+	defer db.mtx.Unlock()
+
+	db.DeleteNoLock(key)
+}
+
+// NOTE: Implements atomicSetDeleter.
+func (db *FSDB) DeleteNoLock(key []byte) {
+	key = escapeKey(key)
+	path := db.nameToPath(key)
+	err := remove(path)
+	if os.IsNotExist(err) {
+		return
+	} else if err != nil {
+		panic(errors.Wrapf(err, "Removing key %s (0x%X)", string(key), key))
+	}
+}
+
+func (db *FSDB) Close() {
+	// Nothing to do.
+}
+
+func (db *FSDB) Print() {
+	db.mtx.Lock()
+	defer db.mtx.Unlock()
+
+	panic("FSDB.Print not yet implemented")
+}
+
+func (db *FSDB) Stats() map[string]string {
+	db.mtx.Lock()
+	defer db.mtx.Unlock()
+
+	panic("FSDB.Stats not yet implemented")
+}
+
+func (db *FSDB) NewBatch() Batch {
+	db.mtx.Lock()
+	defer db.mtx.Unlock()
+
+	// Not sure we would ever want to try...
+	// It doesn't seem easy for general filesystems.
+	panic("FSDB.NewBatch not yet implemented")
+}
+
+func (db *FSDB) Mutex() *sync.Mutex {
+	return &(db.mtx)
+}
+
+func (db *FSDB) Iterator(start, end []byte) Iterator {
+	db.mtx.Lock()
+	defer db.mtx.Unlock()
+
+	// We need a copy of all of the keys.
+	// Not the best, but probably not a bottleneck depending.
+	keys, err := list(db.dir, start, end)
+	if err != nil {
+		panic(errors.Wrapf(err, "Listing keys in %s", db.dir))
+	}
+	sort.Strings(keys)
+	return newMemDBIterator(db, keys, start, end)
+}
+
+func (db *FSDB) ReverseIterator(start, end []byte) Iterator {
+	panic("not implemented yet") // XXX
+}
+
+func (db *FSDB) nameToPath(name []byte) string {
+	n := url.PathEscape(string(name))
+	return filepath.Join(db.dir, n)
+}
+
+// Read some bytes to a file.
+// CONTRACT: returns os errors directly without wrapping.
+func read(path string) ([]byte, error) {
+	f, err := os.Open(path)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	d, err := ioutil.ReadAll(f)
+	if err != nil {
+		return nil, err
+	}
+	return d, nil
+}
+
+// Write some bytes from a file.
+// CONTRACT: returns os errors directly without wrapping.
+func write(path string, d []byte) error {
+	f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, keyPerm)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	_, err = f.Write(d)
+	if err != nil {
+		return err
+	}
+	err = f.Sync()
+	return err
+}
+
+// Remove a file.
+// CONTRACT: returns os errors directly without wrapping.
+func remove(path string) error {
+	return os.Remove(path)
+}
+
+// List keys in a directory, stripping of escape sequences and dir portions.
+// CONTRACT: returns os errors directly without wrapping.
+func list(dirPath string, start, end []byte) ([]string, error) {
+	dir, err := os.Open(dirPath)
+	if err != nil {
+		return nil, err
+	}
+	defer dir.Close()
+
+	names, err := dir.Readdirnames(0)
+	if err != nil {
+		return nil, err
+	}
+	var keys []string
+	for _, name := range names {
+		n, err := url.PathUnescape(name)
+		if err != nil {
+			return nil, fmt.Errorf("Failed to unescape %s while listing", name)
+		}
+		key := unescapeKey([]byte(n))
+		if IsKeyInDomain(key, start, end, false) {
+			keys = append(keys, string(key))
+		}
+	}
+	return keys, nil
+}
+
+// To support empty or nil keys, while the file system doesn't allow empty
+// filenames.
+func escapeKey(key []byte) []byte {
+	return []byte("k_" + string(key))
+}
+func unescapeKey(escKey []byte) []byte {
+	if len(escKey) < 2 {
+		panic(fmt.Sprintf("Invalid esc key: %x", escKey))
+	}
+	if string(escKey[:2]) != "k_" {
+		panic(fmt.Sprintf("Invalid esc key: %x", escKey))
+	}
+	return escKey[2:]
+}
diff --git a/vendor/github.com/tendermint/tmlibs/db/go_level_db.go b/vendor/github.com/tendermint/tmlibs/db/go_level_db.go
index 4abd7611..9ff162e3 100644
--- a/vendor/github.com/tendermint/tmlibs/db/go_level_db.go
+++ b/vendor/github.com/tendermint/tmlibs/db/go_level_db.go
@@ -1,77 +1,97 @@
 package db
 
 import (
+	"bytes"
 	"fmt"
-	"path"
+	"path/filepath"
 
 	"github.com/syndtr/goleveldb/leveldb"
 	"github.com/syndtr/goleveldb/leveldb/errors"
 	"github.com/syndtr/goleveldb/leveldb/iterator"
 	"github.com/syndtr/goleveldb/leveldb/opt"
-	"github.com/syndtr/goleveldb/leveldb/util"
 
-	. "github.com/tendermint/tmlibs/common"
+	cmn "github.com/tendermint/tmlibs/common"
 )
 
 func init() {
 	dbCreator := func(name string, dir string) (DB, error) {
 		return NewGoLevelDB(name, dir)
 	}
-	registerDBCreator(LevelDBBackendStr, dbCreator, false)
-	registerDBCreator(GoLevelDBBackendStr, dbCreator, false)
+	registerDBCreator(LevelDBBackend, dbCreator, false)
+	registerDBCreator(GoLevelDBBackend, dbCreator, false)
 }
 
+var _ DB = (*GoLevelDB)(nil)
+
 type GoLevelDB struct {
 	db *leveldb.DB
 }
 
 func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) {
-	dbPath := path.Join(dir, name+".db")
+	dbPath := filepath.Join(dir, name+".db")
 	db, err := leveldb.OpenFile(dbPath, nil)
 	if err != nil {
 		return nil, err
 	}
-	database := &GoLevelDB{db: db}
+	database := &GoLevelDB{
+		db: db,
+	}
 	return database, nil
 }
 
+// Implements DB.
 func (db *GoLevelDB) Get(key []byte) []byte {
+	key = nonNilBytes(key)
 	res, err := db.db.Get(key, nil)
 	if err != nil {
 		if err == errors.ErrNotFound {
 			return nil
-		} else {
-			PanicCrisis(err)
 		}
+		panic(err)
 	}
 	return res
 }
 
+// Implements DB.
+func (db *GoLevelDB) Has(key []byte) bool {
+	return db.Get(key) != nil
+}
+
+// Implements DB.
 func (db *GoLevelDB) Set(key []byte, value []byte) {
+	key = nonNilBytes(key)
+	value = nonNilBytes(value)
 	err := db.db.Put(key, value, nil)
 	if err != nil {
-		PanicCrisis(err)
+		cmn.PanicCrisis(err)
 	}
 }
 
+// Implements DB.
 func (db *GoLevelDB) SetSync(key []byte, value []byte) {
+	key = nonNilBytes(key)
+	value = nonNilBytes(value)
 	err := db.db.Put(key, value, &opt.WriteOptions{Sync: true})
 	if err != nil {
-		PanicCrisis(err)
+		cmn.PanicCrisis(err)
 	}
 }
 
+// Implements DB.
 func (db *GoLevelDB) Delete(key []byte) {
+	key = nonNilBytes(key)
 	err := db.db.Delete(key, nil)
 	if err != nil {
-		PanicCrisis(err)
+		cmn.PanicCrisis(err)
 	}
 }
 
+// Implements DB.
 func (db *GoLevelDB) DeleteSync(key []byte) {
+	key = nonNilBytes(key)
 	err := db.db.Delete(key, &opt.WriteOptions{Sync: true})
 	if err != nil {
-		PanicCrisis(err)
+		cmn.PanicCrisis(err)
 	}
 }
 
@@ -79,22 +99,25 @@ func (db *GoLevelDB) DB() *leveldb.DB {
 	return db.db
 }
 
+// Implements DB.
 func (db *GoLevelDB) Close() {
 	db.db.Close()
 }
 
+// Implements DB.
 func (db *GoLevelDB) Print() {
 	str, _ := db.db.GetProperty("leveldb.stats")
 	fmt.Printf("%v\n", str)
 
-	iter := db.db.NewIterator(nil, nil)
-	for iter.Next() {
-		key := iter.Key()
-		value := iter.Value()
+	itr := db.db.NewIterator(nil, nil)
+	for itr.Next() {
+		key := itr.Key()
+		value := itr.Value()
 		fmt.Printf("[%X]:\t[%X]\n", key, value)
 	}
 }
 
+// Implements DB.
 func (db *GoLevelDB) Stats() map[string]string {
 	keys := []string{
 		"leveldb.num-files-at-level{n}",
@@ -117,71 +140,158 @@ func (db *GoLevelDB) Stats() map[string]string {
 	return stats
 }
 
-type goLevelDBIterator struct {
-	source iterator.Iterator
+//----------------------------------------
+// Batch
+
+// Implements DB.
+func (db *GoLevelDB) NewBatch() Batch {
+	batch := new(leveldb.Batch)
+	return &goLevelDBBatch{db, batch}
 }
 
-// Key returns a copy of the current key.
-func (it *goLevelDBIterator) Key() []byte {
-	key := it.source.Key()
-	k := make([]byte, len(key))
-	copy(k, key)
+type goLevelDBBatch struct {
+	db    *GoLevelDB
+	batch *leveldb.Batch
+}
 
-	return k
+// Implements Batch.
+func (mBatch *goLevelDBBatch) Set(key, value []byte) {
+	mBatch.batch.Put(key, value)
+}
+
+// Implements Batch.
+func (mBatch *goLevelDBBatch) Delete(key []byte) {
+	mBatch.batch.Delete(key)
+}
+
+// Implements Batch.
+func (mBatch *goLevelDBBatch) Write() {
+	err := mBatch.db.db.Write(mBatch.batch, &opt.WriteOptions{Sync: false})
+	if err != nil {
+		panic(err)
+	}
+}
+
+// Implements Batch.
+func (mBatch *goLevelDBBatch) WriteSync() {
+	err := mBatch.db.db.Write(mBatch.batch, &opt.WriteOptions{Sync: true})
+	if err != nil {
+		panic(err)
+	}
 }
 
-// Value returns a copy of the current value.
-func (it *goLevelDBIterator) Value() []byte {
-	val := it.source.Value()
-	v := make([]byte, len(val))
-	copy(v, val)
+//----------------------------------------
+// Iterator
+// NOTE This is almost identical to db/c_level_db.Iterator
+// Before creating a third version, refactor.
 
-	return v
+// Implements DB.
+func (db *GoLevelDB) Iterator(start, end []byte) Iterator {
+	itr := db.db.NewIterator(nil, nil)
+	return newGoLevelDBIterator(itr, start, end, false)
 }
 
-func (it *goLevelDBIterator) Error() error {
-	return it.source.Error()
+// Implements DB.
+func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator {
+	panic("not implemented yet") // XXX
 }
 
-func (it *goLevelDBIterator) Next() bool {
-	return it.source.Next()
+type goLevelDBIterator struct {
+	source    iterator.Iterator
+	start     []byte
+	end       []byte
+	isReverse bool
+	isInvalid bool
 }
 
-func (it *goLevelDBIterator) Release() {
-	it.source.Release()
+var _ Iterator = (*goLevelDBIterator)(nil)
+
+func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse bool) *goLevelDBIterator {
+	if isReverse {
+		panic("not implemented yet") // XXX
+	}
+	source.Seek(start)
+	return &goLevelDBIterator{
+		source:    source,
+		start:     start,
+		end:       end,
+		isReverse: isReverse,
+		isInvalid: false,
+	}
 }
 
-func (db *GoLevelDB) Iterator() Iterator {
-	return &goLevelDBIterator{db.db.NewIterator(nil, nil)}
+// Implements Iterator.
+func (itr *goLevelDBIterator) Domain() ([]byte, []byte) {
+	return itr.start, itr.end
 }
 
-func (db *GoLevelDB) IteratorPrefix(prefix []byte) Iterator {
-	return &goLevelDBIterator{db.db.NewIterator(util.BytesPrefix(prefix), nil)}
+// Implements Iterator.
+func (itr *goLevelDBIterator) Valid() bool {
+
+	// Once invalid, forever invalid.
+	if itr.isInvalid {
+		return false
+	}
+
+	// Panic on DB error.  No way to recover.
+	itr.assertNoError()
+
+	// If source is invalid, invalid.
+	if !itr.source.Valid() {
+		itr.isInvalid = true
+		return false
+	}
+
+	// If key is end or past it, invalid.
+	var end = itr.end
+	var key = itr.source.Key()
+	if end != nil && bytes.Compare(end, key) <= 0 {
+		itr.isInvalid = true
+		return false
+	}
+
+	// Valid
+	return true
 }
 
-func (db *GoLevelDB) NewBatch() Batch {
-	batch := new(leveldb.Batch)
-	return &goLevelDBBatch{db, batch}
+// Implements Iterator.
+func (itr *goLevelDBIterator) Key() []byte {
+	// Key returns a copy of the current key.
+	// See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88
+	itr.assertNoError()
+	itr.assertIsValid()
+	return cp(itr.source.Key())
 }
 
-//--------------------------------------------------------------------------------
+// Implements Iterator.
+func (itr *goLevelDBIterator) Value() []byte {
+	// Value returns a copy of the current value.
+	// See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88
+	itr.assertNoError()
+	itr.assertIsValid()
+	return cp(itr.source.Value())
+}
 
-type goLevelDBBatch struct {
-	db    *GoLevelDB
-	batch *leveldb.Batch
+// Implements Iterator.
+func (itr *goLevelDBIterator) Next() {
+	itr.assertNoError()
+	itr.assertIsValid()
+	itr.source.Next()
 }
 
-func (mBatch *goLevelDBBatch) Set(key, value []byte) {
-	mBatch.batch.Put(key, value)
+// Implements Iterator.
+func (itr *goLevelDBIterator) Close() {
+	itr.source.Release()
 }
 
-func (mBatch *goLevelDBBatch) Delete(key []byte) {
-	mBatch.batch.Delete(key)
+func (itr *goLevelDBIterator) assertNoError() {
+	if err := itr.source.Error(); err != nil {
+		panic(err)
+	}
 }
 
-func (mBatch *goLevelDBBatch) Write() {
-	err := mBatch.db.db.Write(mBatch.batch, nil)
-	if err != nil {
-		PanicCrisis(err)
+func (itr goLevelDBIterator) assertIsValid() {
+	if !itr.Valid() {
+		panic("goLevelDBIterator is invalid")
 	}
 }
diff --git a/vendor/github.com/tendermint/tmlibs/db/mem_batch.go b/vendor/github.com/tendermint/tmlibs/db/mem_batch.go
new file mode 100644
index 00000000..81a63d62
--- /dev/null
+++ b/vendor/github.com/tendermint/tmlibs/db/mem_batch.go
@@ -0,0 +1,71 @@
+package db
+
+import "sync"
+
+type atomicSetDeleter interface {
+	Mutex() *sync.Mutex
+	SetNoLock(key, value []byte)
+	SetNoLockSync(key, value []byte)
+	DeleteNoLock(key []byte)
+	DeleteNoLockSync(key []byte)
+}
+
+type memBatch struct {
+	db  atomicSetDeleter
+	ops []operation
+}
+
+type opType int
+
+const (
+	opTypeSet    opType = 1
+	opTypeDelete opType = 2
+)
+
+type operation struct {
+	opType
+	key   []byte
+	value []byte
+}
+
+func (mBatch *memBatch) Set(key, value []byte) {
+	mBatch.ops = append(mBatch.ops, operation{opTypeSet, key, value})
+}
+
+func (mBatch *memBatch) Delete(key []byte) {
+	mBatch.ops = append(mBatch.ops, operation{opTypeDelete, key, nil})
+}
+
+func (mBatch *memBatch) Write() {
+	mBatch.write(false)
+}
+
+func (mBatch *memBatch) WriteSync() {
+	mBatch.write(true)
+}
+
+func (mBatch *memBatch) write(doSync bool) {
+	if mtx := mBatch.db.Mutex(); mtx != nil {
+		mtx.Lock()
+		defer mtx.Unlock()
+	}
+
+	for i, op := range mBatch.ops {
+		if doSync && i == (len(mBatch.ops)-1) {
+			switch op.opType {
+			case opTypeSet:
+				mBatch.db.SetNoLockSync(op.key, op.value)
+			case opTypeDelete:
+				mBatch.db.DeleteNoLockSync(op.key)
+			}
+			break // we're done.
+		}
+		switch op.opType {
+		case opTypeSet:
+			mBatch.db.SetNoLock(op.key, op.value)
+		case opTypeDelete:
+			mBatch.db.DeleteNoLock(op.key)
+		}
+
+	}
+}
diff --git a/vendor/github.com/tendermint/tmlibs/db/mem_db.go b/vendor/github.com/tendermint/tmlibs/db/mem_db.go
index 2f507321..2d802947 100644
--- a/vendor/github.com/tendermint/tmlibs/db/mem_db.go
+++ b/vendor/github.com/tendermint/tmlibs/db/mem_db.go
@@ -3,56 +3,111 @@ package db
 import (
 	"fmt"
 	"sort"
-	"strings"
 	"sync"
 )
 
 func init() {
-	registerDBCreator(MemDBBackendStr, func(name string, dir string) (DB, error) {
+	registerDBCreator(MemDBBackend, func(name string, dir string) (DB, error) {
 		return NewMemDB(), nil
 	}, false)
 }
 
+var _ DB = (*MemDB)(nil)
+
 type MemDB struct {
 	mtx sync.Mutex
 	db  map[string][]byte
 }
 
 func NewMemDB() *MemDB {
-	database := &MemDB{db: make(map[string][]byte)}
+	database := &MemDB{
+		db: make(map[string][]byte),
+	}
 	return database
 }
 
+// Implements atomicSetDeleter.
+func (db *MemDB) Mutex() *sync.Mutex {
+	return &(db.mtx)
+}
+
+// Implements DB.
 func (db *MemDB) Get(key []byte) []byte {
 	db.mtx.Lock()
 	defer db.mtx.Unlock()
+	key = nonNilBytes(key)
+
 	return db.db[string(key)]
 }
 
+// Implements DB.
+func (db *MemDB) Has(key []byte) bool {
+	db.mtx.Lock()
+	defer db.mtx.Unlock()
+	key = nonNilBytes(key)
+
+	_, ok := db.db[string(key)]
+	return ok
+}
+
+// Implements DB.
 func (db *MemDB) Set(key []byte, value []byte) {
 	db.mtx.Lock()
 	defer db.mtx.Unlock()
-	db.db[string(key)] = value
+
+	db.SetNoLock(key, value)
 }
 
+// Implements DB.
 func (db *MemDB) SetSync(key []byte, value []byte) {
 	db.mtx.Lock()
 	defer db.mtx.Unlock()
+
+	db.SetNoLock(key, value)
+}
+
+// Implements atomicSetDeleter.
+func (db *MemDB) SetNoLock(key []byte, value []byte) {
+	db.SetNoLockSync(key, value)
+}
+
+// Implements atomicSetDeleter.
+func (db *MemDB) SetNoLockSync(key []byte, value []byte) {
+	key = nonNilBytes(key)
+	value = nonNilBytes(value)
+
 	db.db[string(key)] = value
 }
 
+// Implements DB.
 func (db *MemDB) Delete(key []byte) {
 	db.mtx.Lock()
 	defer db.mtx.Unlock()
-	delete(db.db, string(key))
+
+	db.DeleteNoLock(key)
 }
 
+// Implements DB.
 func (db *MemDB) DeleteSync(key []byte) {
 	db.mtx.Lock()
 	defer db.mtx.Unlock()
+
+	db.DeleteNoLock(key)
+}
+
+// Implements atomicSetDeleter.
+func (db *MemDB) DeleteNoLock(key []byte) {
+	db.DeleteNoLockSync(key)
+}
+
+// Implements atomicSetDeleter.
+func (db *MemDB) DeleteNoLockSync(key []byte) {
+	key = nonNilBytes(key)
+
 	delete(db.db, string(key))
 }
 
+// Implements DB.
 func (db *MemDB) Close() {
 	// Close is a noop since for an in-memory
 	// database, we don't have a destination
@@ -61,120 +116,136 @@ func (db *MemDB) Close() {
 	// See the discussion in https://github.com/tendermint/tmlibs/pull/56
 }
 
+// Implements DB.
 func (db *MemDB) Print() {
 	db.mtx.Lock()
 	defer db.mtx.Unlock()
+
 	for key, value := range db.db {
 		fmt.Printf("[%X]:\t[%X]\n", []byte(key), value)
 	}
 }
 
+// Implements DB.
 func (db *MemDB) Stats() map[string]string {
+	db.mtx.Lock()
+	defer db.mtx.Unlock()
+
 	stats := make(map[string]string)
 	stats["database.type"] = "memDB"
+	stats["database.size"] = fmt.Sprintf("%d", len(db.db))
 	return stats
 }
 
-type memDBIterator struct {
-	last int
-	keys []string
-	db   *MemDB
-}
-
-func newMemDBIterator() *memDBIterator {
-	return &memDBIterator{}
-}
-
-func (it *memDBIterator) Next() bool {
-	if it.last >= len(it.keys)-1 {
-		return false
-	}
-	it.last++
-	return true
-}
-
-func (it *memDBIterator) Key() []byte {
-	return []byte(it.keys[it.last])
-}
+// Implements DB.
+func (db *MemDB) NewBatch() Batch {
+	db.mtx.Lock()
+	defer db.mtx.Unlock()
 
-func (it *memDBIterator) Value() []byte {
-	return it.db.Get(it.Key())
+	return &memBatch{db, nil}
 }
 
-func (it *memDBIterator) Release() {
-	it.db = nil
-	it.keys = nil
-}
+//----------------------------------------
+// Iterator
 
-func (it *memDBIterator) Error() error {
-	return nil
-}
+// Implements DB.
+func (db *MemDB) Iterator(start, end []byte) Iterator {
+	db.mtx.Lock()
+	defer db.mtx.Unlock()
 
-func (db *MemDB) Iterator() Iterator {
-	return db.IteratorPrefix([]byte{})
+	keys := db.getSortedKeys(start, end, false)
+	return newMemDBIterator(db, keys, start, end)
 }
 
-func (db *MemDB) IteratorPrefix(prefix []byte) Iterator {
-	it := newMemDBIterator()
-	it.db = db
-	it.last = -1
-
+// Implements DB.
+func (db *MemDB) ReverseIterator(start, end []byte) Iterator {
 	db.mtx.Lock()
 	defer db.mtx.Unlock()
 
-	// unfortunately we need a copy of all of the keys
-	for key, _ := range db.db {
-		if strings.HasPrefix(key, string(prefix)) {
-			it.keys = append(it.keys, key)
-		}
-	}
-	// and we need to sort them
-	sort.Strings(it.keys)
-	return it
+	keys := db.getSortedKeys(end, start, true)
+	return newMemDBIterator(db, keys, start, end)
 }
 
-func (db *MemDB) NewBatch() Batch {
-	return &memDBBatch{db, nil}
+// We need a copy of all of the keys.
+// Not the best, but probably not a bottleneck depending.
+type memDBIterator struct {
+	db    DB
+	cur   int
+	keys  []string
+	start []byte
+	end   []byte
+}
+
+var _ Iterator = (*memDBIterator)(nil)
+
+// Keys is expected to be in reverse order for reverse iterators.
+func newMemDBIterator(db DB, keys []string, start, end []byte) *memDBIterator {
+	return &memDBIterator{
+		db:    db,
+		cur:   0,
+		keys:  keys,
+		start: start,
+		end:   end,
+	}
 }
 
-//--------------------------------------------------------------------------------
+// Implements Iterator.
+func (itr *memDBIterator) Domain() ([]byte, []byte) {
+	return itr.start, itr.end
+}
 
-type memDBBatch struct {
-	db  *MemDB
-	ops []operation
+// Implements Iterator.
+func (itr *memDBIterator) Valid() bool {
+	return 0 <= itr.cur && itr.cur < len(itr.keys)
 }
 
-type opType int
+// Implements Iterator.
+func (itr *memDBIterator) Next() {
+	itr.assertIsValid()
+	itr.cur++
+}
 
-const (
-	opTypeSet    = 1
-	opTypeDelete = 2
-)
+// Implements Iterator.
+func (itr *memDBIterator) Key() []byte {
+	itr.assertIsValid()
+	return []byte(itr.keys[itr.cur])
+}
 
-type operation struct {
-	opType
-	key   []byte
-	value []byte
+// Implements Iterator.
+func (itr *memDBIterator) Value() []byte {
+	itr.assertIsValid()
+	key := []byte(itr.keys[itr.cur])
+	return itr.db.Get(key)
 }
 
-func (mBatch *memDBBatch) Set(key, value []byte) {
-	mBatch.ops = append(mBatch.ops, operation{opTypeSet, key, value})
+// Implements Iterator.
+func (itr *memDBIterator) Close() {
+	itr.keys = nil
+	itr.db = nil
 }
 
-func (mBatch *memDBBatch) Delete(key []byte) {
-	mBatch.ops = append(mBatch.ops, operation{opTypeDelete, key, nil})
+func (itr *memDBIterator) assertIsValid() {
+	if !itr.Valid() {
+		panic("memDBIterator is invalid")
+	}
 }
 
-func (mBatch *memDBBatch) Write() {
-	mBatch.db.mtx.Lock()
-	defer mBatch.db.mtx.Unlock()
+//----------------------------------------
+// Misc.
 
-	for _, op := range mBatch.ops {
-		if op.opType == opTypeSet {
-			mBatch.db.db[string(op.key)] = op.value
-		} else if op.opType == opTypeDelete {
-			delete(mBatch.db.db, string(op.key))
+func (db *MemDB) getSortedKeys(start, end []byte, reverse bool) []string {
+	keys := []string{}
+	for key := range db.db {
+		if IsKeyInDomain([]byte(key), start, end, false) {
+			keys = append(keys, key)
 		}
 	}
-
+	sort.Strings(keys)
+	if reverse {
+		nkeys := len(keys)
+		for i := 0; i < nkeys/2; i++ {
+			keys[i] = keys[nkeys-i-1]
+		}
+	}
+	return keys
 }
diff --git a/vendor/github.com/tendermint/tmlibs/db/prefix_db.go b/vendor/github.com/tendermint/tmlibs/db/prefix_db.go
new file mode 100644
index 00000000..4381ce07
--- /dev/null
+++ b/vendor/github.com/tendermint/tmlibs/db/prefix_db.go
@@ -0,0 +1,263 @@
+package db
+
+import (
+	"bytes"
+	"fmt"
+	"sync"
+)
+
+// IteratePrefix is a convenience function for iterating over a key domain
+// restricted by prefix.
+func IteratePrefix(db DB, prefix []byte) Iterator {
+	var start, end []byte
+	if len(prefix) == 0 {
+		start = nil
+		end = nil
+	} else {
+		start = cp(prefix)
+		end = cpIncr(prefix)
+	}
+	return db.Iterator(start, end)
+}
+
+/*
+TODO: Make test, maybe rename.
+// Like IteratePrefix but the iterator strips the prefix from the keys.
+func IteratePrefixStripped(db DB, prefix []byte) Iterator {
+	return newUnprefixIterator(prefix, IteratePrefix(db, prefix))
+}
+*/
+
+//----------------------------------------
+// prefixDB
+
+type prefixDB struct {
+	mtx    sync.Mutex
+	prefix []byte
+	db     DB
+}
+
+// NewPrefixDB lets you namespace multiple DBs within a single DB.
+func NewPrefixDB(db DB, prefix []byte) *prefixDB {
+	return &prefixDB{
+		prefix: prefix,
+		db:     db,
+	}
+}
+
+// Implements atomicSetDeleter.
+func (pdb *prefixDB) Mutex() *sync.Mutex {
+	return &(pdb.mtx)
+}
+
+// Implements DB.
+func (pdb *prefixDB) Get(key []byte) []byte {
+	pdb.mtx.Lock()
+	defer pdb.mtx.Unlock()
+
+	return pdb.db.Get(pdb.prefixed(key))
+}
+
+// Implements DB.
+func (pdb *prefixDB) Has(key []byte) bool {
+	pdb.mtx.Lock()
+	defer pdb.mtx.Unlock()
+
+	return pdb.db.Has(pdb.prefixed(key))
+}
+
+// Implements DB.
+func (pdb *prefixDB) Set(key []byte, value []byte) {
+	pdb.mtx.Lock()
+	defer pdb.mtx.Unlock()
+
+	pdb.db.Set(pdb.prefixed(key), value)
+}
+
+// Implements DB.
+func (pdb *prefixDB) SetSync(key []byte, value []byte) {
+	pdb.mtx.Lock()
+	defer pdb.mtx.Unlock()
+
+	pdb.db.SetSync(pdb.prefixed(key), value)
+}
+
+// Implements atomicSetDeleter.
+func (pdb *prefixDB) SetNoLock(key []byte, value []byte) {
+	pdb.db.Set(pdb.prefixed(key), value)
+}
+
+// Implements atomicSetDeleter.
+func (pdb *prefixDB) SetNoLockSync(key []byte, value []byte) {
+	pdb.db.SetSync(pdb.prefixed(key), value)
+}
+
+// Implements DB.
+func (pdb *prefixDB) Delete(key []byte) {
+	pdb.mtx.Lock()
+	defer pdb.mtx.Unlock()
+
+	pdb.db.Delete(pdb.prefixed(key))
+}
+
+// Implements DB.
+func (pdb *prefixDB) DeleteSync(key []byte) {
+	pdb.mtx.Lock()
+	defer pdb.mtx.Unlock()
+
+	pdb.db.DeleteSync(pdb.prefixed(key))
+}
+
+// Implements atomicSetDeleter.
+func (pdb *prefixDB) DeleteNoLock(key []byte) {
+	pdb.db.Delete(pdb.prefixed(key))
+}
+
+// Implements atomicSetDeleter.
+func (pdb *prefixDB) DeleteNoLockSync(key []byte) {
+	pdb.db.DeleteSync(pdb.prefixed(key))
+}
+
+// Implements DB.
+func (pdb *prefixDB) Iterator(start, end []byte) Iterator {
+	pdb.mtx.Lock()
+	defer pdb.mtx.Unlock()
+
+	pstart := append(pdb.prefix, start...)
+	pend := []byte(nil)
+	if end != nil {
+		pend = append(pdb.prefix, end...)
+	}
+	return newUnprefixIterator(
+		pdb.prefix,
+		pdb.db.Iterator(
+			pstart,
+			pend,
+		),
+	)
+}
+
+// Implements DB.
+func (pdb *prefixDB) ReverseIterator(start, end []byte) Iterator {
+	pdb.mtx.Lock()
+	defer pdb.mtx.Unlock()
+
+	pstart := []byte(nil)
+	if start != nil {
+		pstart = append(pdb.prefix, start...)
+	}
+	pend := []byte(nil)
+	if end != nil {
+		pend = append(pdb.prefix, end...)
+	}
+	return newUnprefixIterator(
+		pdb.prefix,
+		pdb.db.ReverseIterator(
+			pstart,
+			pend,
+		),
+	)
+}
+
+// Implements DB.
+func (pdb *prefixDB) NewBatch() Batch {
+	pdb.mtx.Lock()
+	defer pdb.mtx.Unlock()
+
+	return &memBatch{pdb, nil}
+}
+
+// Implements DB.
+func (pdb *prefixDB) Close() {
+	pdb.mtx.Lock()
+	defer pdb.mtx.Unlock()
+
+	pdb.db.Close()
+}
+
+// Implements DB.
+func (pdb *prefixDB) Print() {
+	fmt.Printf("prefix: %X\n", pdb.prefix)
+
+	itr := pdb.Iterator(nil, nil)
+	defer itr.Close()
+	for ; itr.Valid(); itr.Next() {
+		key := itr.Key()
+		value := itr.Value()
+		fmt.Printf("[%X]:\t[%X]\n", key, value)
+	}
+}
+
+// Implements DB.
+func (pdb *prefixDB) Stats() map[string]string {
+	stats := make(map[string]string)
+	stats["prefixdb.prefix.string"] = string(pdb.prefix)
+	stats["prefixdb.prefix.hex"] = fmt.Sprintf("%X", pdb.prefix)
+	source := pdb.db.Stats()
+	for key, value := range source {
+		stats["prefixdb.source."+key] = value
+	}
+	return stats
+}
+
+func (pdb *prefixDB) prefixed(key []byte) []byte {
+	return append(pdb.prefix, key...)
+}
+
+//----------------------------------------
+
+// Strips prefix while iterating from Iterator.
+type unprefixIterator struct {
+	prefix []byte
+	source Iterator
+}
+
+func newUnprefixIterator(prefix []byte, source Iterator) unprefixIterator {
+	return unprefixIterator{
+		prefix: prefix,
+		source: source,
+	}
+}
+
+func (itr unprefixIterator) Domain() (start []byte, end []byte) {
+	start, end = itr.source.Domain()
+	if len(start) > 0 {
+		start = stripPrefix(start, itr.prefix)
+	}
+	if len(end) > 0 {
+		end = stripPrefix(end, itr.prefix)
+	}
+	return
+}
+
+func (itr unprefixIterator) Valid() bool {
+	return itr.source.Valid()
+}
+
+func (itr unprefixIterator) Next() {
+	itr.source.Next()
+}
+
+func (itr unprefixIterator) Key() (key []byte) {
+	return stripPrefix(itr.source.Key(), itr.prefix)
+}
+
+func (itr unprefixIterator) Value() (value []byte) {
+	return itr.source.Value()
+}
+
+func (itr unprefixIterator) Close() {
+	itr.source.Close()
+}
+
+//----------------------------------------
+
+func stripPrefix(key []byte, prefix []byte) (stripped []byte) {
+	if len(key) < len(prefix) {
+		panic("should not happen")
+	}
+	if !bytes.Equal(key[:len(prefix)], prefix) {
+		panic("should not happne")
+	}
+	return key[len(prefix):]
+}
diff --git a/vendor/github.com/tendermint/tmlibs/db/types.go b/vendor/github.com/tendermint/tmlibs/db/types.go
new file mode 100644
index 00000000..ad78859a
--- /dev/null
+++ b/vendor/github.com/tendermint/tmlibs/db/types.go
@@ -0,0 +1,134 @@
+package db
+
+// DBs are goroutine safe.
+type DB interface {
+
+	// Get returns nil iff key doesn't exist.
+	// A nil key is interpreted as an empty byteslice.
+	// CONTRACT: key, value readonly []byte
+	Get([]byte) []byte
+
+	// Has checks if a key exists.
+	// A nil key is interpreted as an empty byteslice.
+	// CONTRACT: key, value readonly []byte
+	Has(key []byte) bool
+
+	// Set sets the key.
+	// A nil key is interpreted as an empty byteslice.
+	// CONTRACT: key, value readonly []byte
+	Set([]byte, []byte)
+	SetSync([]byte, []byte)
+
+	// Delete deletes the key.
+	// A nil key is interpreted as an empty byteslice.
+	// CONTRACT: key readonly []byte
+	Delete([]byte)
+	DeleteSync([]byte)
+
+	// Iterate over a domain of keys in ascending order. End is exclusive.
+	// Start must be less than end, or the Iterator is invalid.
+	// A nil start is interpreted as an empty byteslice.
+	// If end is nil, iterates up to the last item (inclusive).
+	// CONTRACT: No writes may happen within a domain while an iterator exists over it.
+	// CONTRACT: start, end readonly []byte
+	Iterator(start, end []byte) Iterator
+
+	// Iterate over a domain of keys in descending order. End is exclusive.
+	// Start must be greater than end, or the Iterator is invalid.
+	// If start is nil, iterates from the last/greatest item (inclusive).
+	// If end is nil, iterates up to the first/least item (inclusive).
+	// CONTRACT: No writes may happen within a domain while an iterator exists over it.
+	// CONTRACT: start, end readonly []byte
+	ReverseIterator(start, end []byte) Iterator
+
+	// Closes the connection.
+	Close()
+
+	// Creates a batch for atomic updates.
+	NewBatch() Batch
+
+	// For debugging
+	Print()
+
+	// Stats returns a map of property values for all keys and the size of the cache.
+	Stats() map[string]string
+}
+
+//----------------------------------------
+// Batch
+
+type Batch interface {
+	SetDeleter
+	Write()
+	WriteSync()
+}
+
+type SetDeleter interface {
+	Set(key, value []byte) // CONTRACT: key, value readonly []byte
+	Delete(key []byte)     // CONTRACT: key readonly []byte
+}
+
+//----------------------------------------
+// Iterator
+
+/*
+	Usage:
+
+	var itr Iterator = ...
+	defer itr.Close()
+
+	for ; itr.Valid(); itr.Next() {
+		k, v := itr.Key(); itr.Value()
+		// ...
+	}
+*/
+type Iterator interface {
+
+	// The start & end (exclusive) limits to iterate over.
+	// If end < start, then the Iterator goes in reverse order.
+	//
+	// A domain of ([]byte{12, 13}, []byte{12, 14}) will iterate
+	// over anything with the prefix []byte{12, 13}.
+	//
+	// The smallest key is the empty byte array []byte{} - see BeginningKey().
+	// The largest key is the nil byte array []byte(nil) - see EndingKey().
+	// CONTRACT: start, end readonly []byte
+	Domain() (start []byte, end []byte)
+
+	// Valid returns whether the current position is valid.
+	// Once invalid, an Iterator is forever invalid.
+	Valid() bool
+
+	// Next moves the iterator to the next sequential key in the database, as
+	// defined by order of iteration.
+	//
+	// If Valid returns false, this method will panic.
+	Next()
+
+	// Key returns the key of the cursor.
+	// If Valid returns false, this method will panic.
+	// CONTRACT: key readonly []byte
+	Key() (key []byte)
+
+	// Value returns the value of the cursor.
+	// If Valid returns false, this method will panic.
+	// CONTRACT: value readonly []byte
+	Value() (value []byte)
+
+	// Close releases the Iterator.
+	Close()
+}
+
+// For testing convenience.
+func bz(s string) []byte {
+	return []byte(s)
+}
+
+// We defensively turn nil keys or values into []byte{} for
+// most operations.
+func nonNilBytes(bz []byte) []byte {
+	if bz == nil {
+		return []byte{}
+	}
+	return bz
+}
diff --git a/vendor/github.com/tendermint/tmlibs/db/util.go b/vendor/github.com/tendermint/tmlibs/db/util.go
new file mode 100644
index 00000000..1ad5002d
--- /dev/null
+++ b/vendor/github.com/tendermint/tmlibs/db/util.go
@@ -0,0 +1,54 @@
+package db
+
+import (
+	"bytes"
+)
+
+func cp(bz []byte) (ret []byte) {
+	ret = make([]byte, len(bz))
+	copy(ret, bz)
+	return ret
+}
+
+// Returns a slice of the same length (big endian)
+// except incremented by one.
+// Returns nil on overflow (e.g. if bz bytes are all 0xFF)
+// CONTRACT: len(bz) > 0
+func cpIncr(bz []byte) (ret []byte) {
+	if len(bz) == 0 {
+		panic("cpIncr expects non-zero bz length")
+	}
+	ret = cp(bz)
+	for i := len(bz) - 1; i >= 0; i-- {
+		if ret[i] < byte(0xFF) {
+			ret[i]++
+			return
+		}
+		ret[i] = byte(0x00)
+		if i == 0 {
+			// Overflow
+			return nil
+		}
+	}
+	return nil
+}
+
+// See DB interface documentation for more information.
+func IsKeyInDomain(key, start, end []byte, isReverse bool) bool {
+	if !isReverse {
+		if bytes.Compare(key, start) < 0 {
+			return false
+		}
+		if end != nil && bytes.Compare(end, key) <= 0 {
+			return false
+		}
+		return true
+	}
+	if start != nil && bytes.Compare(start, key) < 0 {
+		return false
+	}
+	if end != nil && bytes.Compare(key, end) <= 0 {
+		return false
+	}
+	return true
+}
diff --git a/vendor/github.com/tendermint/tmlibs/merkle/simple_map.go b/vendor/github.com/tendermint/tmlibs/merkle/simple_map.go
new file mode 100644
index 00000000..b59e3b4b
--- /dev/null
+++ b/vendor/github.com/tendermint/tmlibs/merkle/simple_map.go
@@ -0,0 +1,84 @@
+package merkle
+
+import (
+	cmn "github.com/tendermint/tmlibs/common"
+	"golang.org/x/crypto/ripemd160"
+)
+
+type SimpleMap struct {
+	kvs    cmn.KVPairs
+	sorted bool
+}
+
+func NewSimpleMap() *SimpleMap {
+	return &SimpleMap{
+		kvs:    nil,
+		sorted: false,
+	}
+}
+
+func (sm *SimpleMap) Set(key string, value Hasher) {
+	sm.sorted = false
+
+	// Hash the key to blind it... why not?
+	khash := SimpleHashFromBytes([]byte(key))
+
+	// And the value is hashed too, so you can
+	// check for equality with a cached value (say)
+	// and make a determination to fetch or not.
+	vhash := value.Hash()
+
+	sm.kvs = append(sm.kvs, cmn.KVPair{
+		Key:   khash,
+		Value: vhash,
+	})
+}
+
+// Merkle root hash of items sorted by key
+// (UNSTABLE: and by value too if duplicate key).
+func (sm *SimpleMap) Hash() []byte {
+	sm.Sort()
+	return hashKVPairs(sm.kvs)
+}
+
+func (sm *SimpleMap) Sort() {
+	if sm.sorted {
+		return
+	}
+	sm.kvs.Sort()
+	sm.sorted = true
+}
+
+// Returns a copy of sorted KVPairs.
+func (sm *SimpleMap) KVPairs() cmn.KVPairs {
+	sm.Sort()
+	kvs := make(cmn.KVPairs, len(sm.kvs))
+	copy(kvs, sm.kvs)
+	return kvs
+}
+
+//----------------------------------------
+
+// A local extension to KVPair that can be hashed.
+type kvPair cmn.KVPair
+
+func (kv kvPair) Hash() []byte {
+	hasher := ripemd160.New()
+	err := encodeByteSlice(hasher, kv.Key)
+	if err != nil {
+		panic(err)
+	}
+	err = encodeByteSlice(hasher, kv.Value)
+	if err != nil {
+		panic(err)
+	}
+	return hasher.Sum(nil)
+}
+
+func hashKVPairs(kvs cmn.KVPairs) []byte {
+	kvsH := make([]Hasher, 0, len(kvs))
+	for _, kvp := range kvs {
+		kvsH = append(kvsH, kvPair(kvp))
+	}
+	return SimpleHashFromHashers(kvsH)
+}
diff --git a/vendor/github.com/tendermint/tmlibs/merkle/simple_proof.go b/vendor/github.com/tendermint/tmlibs/merkle/simple_proof.go
new file mode 100644
index 00000000..c81ed674
--- /dev/null
+++ b/vendor/github.com/tendermint/tmlibs/merkle/simple_proof.go
@@ -0,0 +1,130 @@
+package merkle
+
+import (
+	"bytes"
+	"fmt"
+)
+
+type SimpleProof struct {
+	Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child.
+}
+
+// proofs[0] is the proof for items[0].
+func SimpleProofsFromHashers(items []Hasher) (rootHash []byte, proofs []*SimpleProof) {
+	trails, rootSPN := trailsFromHashers(items)
+	rootHash = rootSPN.Hash
+	proofs = make([]*SimpleProof, len(items))
+	for i, trail := range trails {
+		proofs[i] = &SimpleProof{
+			Aunts: trail.FlattenAunts(),
+		}
+	}
+	return
+}
+
+// Verify that leafHash is a leaf hash of the simple-merkle-tree
+// which hashes to rootHash.
+func (sp *SimpleProof) Verify(index int, total int, leafHash []byte, rootHash []byte) bool {
+	computedHash := computeHashFromAunts(index, total, leafHash, sp.Aunts)
+	return computedHash != nil && bytes.Equal(computedHash, rootHash)
+}
+
+func (sp *SimpleProof) String() string {
+	return sp.StringIndented("")
+}
+
+func (sp *SimpleProof) StringIndented(indent string) string {
+	return fmt.Sprintf(`SimpleProof{
+%s  Aunts: %X
+%s}`,
+		indent, sp.Aunts,
+		indent)
+}
+
+// Use the leafHash and innerHashes to get the root merkle hash.
+// If the length of the innerHashes slice isn't exactly correct, the result is nil.
+// Recursive impl.
+func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][]byte) []byte {
+	if index >= total || index < 0 || total <= 0 {
+		return nil
+	}
+	switch total {
+	case 0:
+		panic("Cannot call computeHashFromAunts() with 0 total")
+	case 1:
+		if len(innerHashes) != 0 {
+			return nil
+		}
+		return leafHash
+	default:
+		if len(innerHashes) == 0 {
+			return nil
+		}
+		numLeft := (total + 1) / 2
+		if index < numLeft {
+			leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1])
+			if leftHash == nil {
+				return nil
+			}
+			return SimpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1])
+		}
+		rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1])
+		if rightHash == nil {
+			return nil
+		}
+		return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash)
+	}
+}
+
+// Helper structure to construct merkle proof.
+// The node and the tree is thrown away afterwards.
+// Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil.
+// node.Parent.Hash = hash(node.Hash, node.Right.Hash) or
+// hash(node.Left.Hash, node.Hash), depending on whether node is a left/right child.
+type SimpleProofNode struct {
+	Hash   []byte
+	Parent *SimpleProofNode
+	Left   *SimpleProofNode // Left sibling  (only one of Left,Right is set)
+	Right  *SimpleProofNode // Right sibling (only one of Left,Right is set)
+}
+
+// Starting from a leaf SimpleProofNode, FlattenAunts() will return
+// the inner hashes for the item corresponding to the leaf.
+func (spn *SimpleProofNode) FlattenAunts() [][]byte {
+	// Nonrecursive impl.
+	innerHashes := [][]byte{}
+	for spn != nil {
+		if spn.Left != nil {
+			innerHashes = append(innerHashes, spn.Left.Hash)
+		} else if spn.Right != nil {
+			innerHashes = append(innerHashes, spn.Right.Hash)
+		} else {
+			break
+		}
+		spn = spn.Parent
+	}
+	return innerHashes
+}
+
+// trails[0].Hash is the leaf hash for items[0].
+// trails[i].Parent.Parent....Parent == root for all i.
+func trailsFromHashers(items []Hasher) (trails []*SimpleProofNode, root *SimpleProofNode) {
+	// Recursive impl.
+	switch len(items) {
+	case 0:
+		return nil, nil
+	case 1:
+		trail := &SimpleProofNode{items[0].Hash(), nil, nil, nil}
+		return []*SimpleProofNode{trail}, trail
+	default:
+		lefts, leftRoot := trailsFromHashers(items[:(len(items)+1)/2])
+		rights, rightRoot := trailsFromHashers(items[(len(items)+1)/2:])
+		rootHash := SimpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash)
+		root := &SimpleProofNode{rootHash, nil, nil, nil}
+		leftRoot.Parent = root
+		leftRoot.Right = rightRoot
+		rightRoot.Parent = root
+		rightRoot.Left = leftRoot
+		return append(lefts, rights...), root
+	}
+}
diff --git a/vendor/github.com/tendermint/tmlibs/merkle/simple_tree.go b/vendor/github.com/tendermint/tmlibs/merkle/simple_tree.go
index 8106246d..9bdf52cb 100644
--- a/vendor/github.com/tendermint/tmlibs/merkle/simple_tree.go
+++ b/vendor/github.com/tendermint/tmlibs/merkle/simple_tree.go
@@ -25,24 +25,18 @@ For larger datasets, use IAVLTree.
 package merkle
 
 import (
-	"bytes"
-	"fmt"
-	"sort"
-
 	"golang.org/x/crypto/ripemd160"
-
-	"github.com/tendermint/go-wire"
-	. "github.com/tendermint/tmlibs/common"
 )
 
 func SimpleHashFromTwoHashes(left []byte, right []byte) []byte {
-	var n int
-	var err error
 	var hasher = ripemd160.New()
-	wire.WriteByteSlice(left, hasher, &n, &err)
-	wire.WriteByteSlice(right, hasher, &n, &err)
+	err := encodeByteSlice(hasher, left)
+	if err != nil {
+		panic(err)
+	}
+	err = encodeByteSlice(hasher, right)
 	if err != nil {
-		PanicCrisis(err)
+		panic(err)
 	}
 	return hasher.Sum(nil)
 }
@@ -61,27 +55,25 @@ func SimpleHashFromHashes(hashes [][]byte) []byte {
 	}
 }
 
-// Convenience for SimpleHashFromHashes.
-func SimpleHashFromBinaries(items []interface{}) []byte {
-	hashes := make([][]byte, len(items))
-	for i, item := range items {
-		hashes[i] = SimpleHashFromBinary(item)
+// NOTE: Do not implement this, use SimpleHashFromByteslices instead.
+// type Byteser interface { Bytes() []byte }
+// func SimpleHashFromBytesers(items []Byteser) []byte { ... }
+
+func SimpleHashFromByteslices(bzs [][]byte) []byte {
+	hashes := make([][]byte, len(bzs))
+	for i, bz := range bzs {
+		hashes[i] = SimpleHashFromBytes(bz)
 	}
 	return SimpleHashFromHashes(hashes)
 }
 
-// General Convenience
-func SimpleHashFromBinary(item interface{}) []byte {
-	hasher, n, err := ripemd160.New(), new(int), new(error)
-	wire.WriteBinary(item, hasher, n, err)
-	if *err != nil {
-		PanicCrisis(err)
-	}
+func SimpleHashFromBytes(bz []byte) []byte {
+	hasher := ripemd160.New()
+	hasher.Write(bz)
 	return hasher.Sum(nil)
 }
 
-// Convenience for SimpleHashFromHashes.
-func SimpleHashFromHashables(items []Hashable) []byte {
+func SimpleHashFromHashers(items []Hasher) []byte {
 	hashes := make([][]byte, len(items))
 	for i, item := range items {
 		hash := item.Hash()
@@ -90,188 +82,10 @@ func SimpleHashFromHashables(items []Hashable) []byte {
 	return SimpleHashFromHashes(hashes)
 }
 
-// Convenience for SimpleHashFromHashes.
-func SimpleHashFromMap(m map[string]interface{}) []byte {
-	kpPairsH := MakeSortedKVPairs(m)
-	return SimpleHashFromHashables(kpPairsH)
-}
-
-//--------------------------------------------------------------------------------
-
-/* Convenience struct for key-value pairs.
-A list of KVPairs is hashed via `SimpleHashFromHashables`.
-NOTE: Each `Value` is encoded for hashing without extra type information,
-so the user is presumed to be aware of the Value types.
-*/
-type KVPair struct {
-	Key   string
-	Value interface{}
-}
-
-func (kv KVPair) Hash() []byte {
-	hasher, n, err := ripemd160.New(), new(int), new(error)
-	wire.WriteString(kv.Key, hasher, n, err)
-	if kvH, ok := kv.Value.(Hashable); ok {
-		wire.WriteByteSlice(kvH.Hash(), hasher, n, err)
-	} else {
-		wire.WriteBinary(kv.Value, hasher, n, err)
-	}
-	if *err != nil {
-		PanicSanity(*err)
-	}
-	return hasher.Sum(nil)
-}
-
-type KVPairs []KVPair
-
-func (kvps KVPairs) Len() int           { return len(kvps) }
-func (kvps KVPairs) Less(i, j int) bool { return kvps[i].Key < kvps[j].Key }
-func (kvps KVPairs) Swap(i, j int)      { kvps[i], kvps[j] = kvps[j], kvps[i] }
-func (kvps KVPairs) Sort()              { sort.Sort(kvps) }
-
-func MakeSortedKVPairs(m map[string]interface{}) []Hashable {
-	kvPairs := []KVPair{}
+func SimpleHashFromMap(m map[string]Hasher) []byte {
+	sm := NewSimpleMap()
 	for k, v := range m {
-		kvPairs = append(kvPairs, KVPair{k, v})
-	}
-	KVPairs(kvPairs).Sort()
-	kvPairsH := []Hashable{}
-	for _, kvp := range kvPairs {
-		kvPairsH = append(kvPairsH, kvp)
-	}
-	return kvPairsH
-}
-
-//--------------------------------------------------------------------------------
-
-type SimpleProof struct {
-	Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child.
-}
-
-// proofs[0] is the proof for items[0].
-func SimpleProofsFromHashables(items []Hashable) (rootHash []byte, proofs []*SimpleProof) {
-	trails, rootSPN := trailsFromHashables(items)
-	rootHash = rootSPN.Hash
-	proofs = make([]*SimpleProof, len(items))
-	for i, trail := range trails {
-		proofs[i] = &SimpleProof{
-			Aunts: trail.FlattenAunts(),
-		}
-	}
-	return
-}
-
-// Verify that leafHash is a leaf hash of the simple-merkle-tree
-// which hashes to rootHash.
-func (sp *SimpleProof) Verify(index int, total int, leafHash []byte, rootHash []byte) bool {
-	computedHash := computeHashFromAunts(index, total, leafHash, sp.Aunts)
-	if computedHash == nil {
-		return false
-	}
-	if !bytes.Equal(computedHash, rootHash) {
-		return false
-	}
-	return true
-}
-
-func (sp *SimpleProof) String() string {
-	return sp.StringIndented("")
-}
-
-func (sp *SimpleProof) StringIndented(indent string) string {
-	return fmt.Sprintf(`SimpleProof{
-%s  Aunts: %X
-%s}`,
-		indent, sp.Aunts,
-		indent)
-}
-
-// Use the leafHash and innerHashes to get the root merkle hash.
-// If the length of the innerHashes slice isn't exactly correct, the result is nil.
-func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][]byte) []byte {
-	// Recursive impl.
-	if index >= total {
-		return nil
-	}
-	switch total {
-	case 0:
-		PanicSanity("Cannot call computeHashFromAunts() with 0 total")
-		return nil
-	case 1:
-		if len(innerHashes) != 0 {
-			return nil
-		}
-		return leafHash
-	default:
-		if len(innerHashes) == 0 {
-			return nil
-		}
-		numLeft := (total + 1) / 2
-		if index < numLeft {
-			leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1])
-			if leftHash == nil {
-				return nil
-			}
-			return SimpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1])
-		} else {
-			rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1])
-			if rightHash == nil {
-				return nil
-			}
-			return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash)
-		}
-	}
-}
-
-// Helper structure to construct merkle proof.
-// The node and the tree is thrown away afterwards.
-// Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil.
-// node.Parent.Hash = hash(node.Hash, node.Right.Hash) or
-// 									  hash(node.Left.Hash, node.Hash), depending on whether node is a left/right child.
-type SimpleProofNode struct {
-	Hash   []byte
-	Parent *SimpleProofNode
-	Left   *SimpleProofNode // Left sibling  (only one of Left,Right is set)
-	Right  *SimpleProofNode // Right sibling (only one of Left,Right is set)
-}
-
-// Starting from a leaf SimpleProofNode, FlattenAunts() will return
-// the inner hashes for the item corresponding to the leaf.
-func (spn *SimpleProofNode) FlattenAunts() [][]byte {
-	// Nonrecursive impl.
-	innerHashes := [][]byte{}
-	for spn != nil {
-		if spn.Left != nil {
-			innerHashes = append(innerHashes, spn.Left.Hash)
-		} else if spn.Right != nil {
-			innerHashes = append(innerHashes, spn.Right.Hash)
-		} else {
-			break
-		}
-		spn = spn.Parent
-	}
-	return innerHashes
-}
-
-// trails[0].Hash is the leaf hash for items[0].
-// trails[i].Parent.Parent....Parent == root for all i.
-func trailsFromHashables(items []Hashable) (trails []*SimpleProofNode, root *SimpleProofNode) {
-	// Recursive impl.
-	switch len(items) {
-	case 0:
-		return nil, nil
-	case 1:
-		trail := &SimpleProofNode{items[0].Hash(), nil, nil, nil}
-		return []*SimpleProofNode{trail}, trail
-	default:
-		lefts, leftRoot := trailsFromHashables(items[:(len(items)+1)/2])
-		rights, rightRoot := trailsFromHashables(items[(len(items)+1)/2:])
-		rootHash := SimpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash)
-		root := &SimpleProofNode{rootHash, nil, nil, nil}
-		leftRoot.Parent = root
-		leftRoot.Right = rightRoot
-		rightRoot.Parent = root
-		rightRoot.Left = leftRoot
-		return append(lefts, rights...), root
+		sm.Set(k, v)
 	}
+	return sm.Hash()
 }
diff --git a/vendor/github.com/tendermint/tmlibs/merkle/types.go b/vendor/github.com/tendermint/tmlibs/merkle/types.go
index 93541eda..a0c491a7 100644
--- a/vendor/github.com/tendermint/tmlibs/merkle/types.go
+++ b/vendor/github.com/tendermint/tmlibs/merkle/types.go
@@ -1,5 +1,10 @@
 package merkle
 
+import (
+	"encoding/binary"
+	"io"
+)
+
 type Tree interface {
 	Size() (size int)
 	Height() (height int8)
@@ -18,6 +23,25 @@ type Tree interface {
 	IterateRange(start []byte, end []byte, ascending bool, fx func(key []byte, value []byte) (stop bool)) (stopped bool)
 }
 
-type Hashable interface {
+type Hasher interface {
 	Hash() []byte
 }
+
+//-----------------------------------------------------------------------
+// NOTE: these are duplicated from go-amino so we dont need go-amino as a dep
+
+func encodeByteSlice(w io.Writer, bz []byte) (err error) {
+	err = encodeUvarint(w, uint64(len(bz)))
+	if err != nil {
+		return
+	}
+	_, err = w.Write(bz)
+	return
+}
+
+func encodeUvarint(w io.Writer, i uint64) (err error) {
+	var buf [10]byte
+	n := binary.PutUvarint(buf[:], i)
+	_, err = w.Write(buf[0:n])
+	return
+}
diff --git a/vendor/github.com/tendermint/tmlibs/pubsub/pubsub.go b/vendor/github.com/tendermint/tmlibs/pubsub/pubsub.go
index 54a4b8ae..90f6e4ae 100644
--- a/vendor/github.com/tendermint/tmlibs/pubsub/pubsub.go
+++ b/vendor/github.com/tendermint/tmlibs/pubsub/pubsub.go
@@ -28,6 +28,16 @@ const (
 	shutdown
 )
 
+var (
+	// ErrSubscriptionNotFound is returned when a client tries to unsubscribe
+	// from not existing subscription.
+	ErrSubscriptionNotFound = errors.New("subscription not found")
+
+	// ErrAlreadySubscribed is returned when a client tries to subscribe twice or
+	// more using the same query.
+	ErrAlreadySubscribed = errors.New("already subscribed")
+)
+
 type cmd struct {
 	op       operation
 	query    Query
@@ -52,7 +62,7 @@ type Server struct {
 	cmdsCap int
 
 	mtx           sync.RWMutex
-	subscriptions map[string]map[string]struct{} // subscriber -> query -> struct{}
+	subscriptions map[string]map[string]Query // subscriber -> query (string) -> Query
 }
 
 // Option sets a parameter for the server.
@@ -63,7 +73,7 @@ type Option func(*Server)
 // provided, the resulting server's queue is unbuffered.
 func NewServer(options ...Option) *Server {
 	s := &Server{
-		subscriptions: make(map[string]map[string]struct{}),
+		subscriptions: make(map[string]map[string]Query),
 	}
 	s.BaseService = *cmn.NewBaseService(nil, "PubSub", s)
 
@@ -106,16 +116,16 @@ func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, ou
 	}
 	s.mtx.RUnlock()
 	if ok {
-		return errors.New("already subscribed")
+		return ErrAlreadySubscribed
 	}
 
 	select {
 	case s.cmds <- cmd{op: sub, clientID: clientID, query: query, ch: out}:
 		s.mtx.Lock()
 		if _, ok = s.subscriptions[clientID]; !ok {
-			s.subscriptions[clientID] = make(map[string]struct{})
+			s.subscriptions[clientID] = make(map[string]Query)
 		}
-		s.subscriptions[clientID][query.String()] = struct{}{}
+		s.subscriptions[clientID][query.String()] = query
 		s.mtx.Unlock()
 		return nil
 	case <-ctx.Done():
@@ -127,18 +137,20 @@ func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, ou
 // returned to the caller if the context is canceled or if subscription does
 // not exist.
 func (s *Server) Unsubscribe(ctx context.Context, clientID string, query Query) error {
+	var origQuery Query
 	s.mtx.RLock()
 	clientSubscriptions, ok := s.subscriptions[clientID]
 	if ok {
-		_, ok = clientSubscriptions[query.String()]
+		origQuery, ok = clientSubscriptions[query.String()]
 	}
 	s.mtx.RUnlock()
 	if !ok {
-		return errors.New("subscription not found")
+		return ErrSubscriptionNotFound
 	}
 
+	// original query is used here because we're using pointers as map keys
 	select {
-	case s.cmds <- cmd{op: unsub, clientID: clientID, query: query}:
+	case s.cmds <- cmd{op: unsub, clientID: clientID, query: origQuery}:
 		s.mtx.Lock()
 		delete(clientSubscriptions, query.String())
 		s.mtx.Unlock()
@@ -155,7 +167,7 @@ func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error {
 	_, ok := s.subscriptions[clientID]
 	s.mtx.RUnlock()
 	if !ok {
-		return errors.New("subscription not found")
+		return ErrSubscriptionNotFound
 	}
 
 	select {
@@ -209,6 +221,11 @@ func (s *Server) OnStart() error {
 	return nil
 }
 
+// OnReset implements Service.OnReset
+func (s *Server) OnReset() error {
+	return nil
+}
+
 func (s *Server) loop(state state) {
 loop:
 	for cmd := range s.cmds {
diff --git a/vendor/github.com/ugorji/go/codec/0doc.go b/vendor/github.com/ugorji/go/codec/0doc.go
index cf7b48d9..b61a8180 100644
--- a/vendor/github.com/ugorji/go/codec/0doc.go
+++ b/vendor/github.com/ugorji/go/codec/0doc.go
@@ -225,7 +225,7 @@ with some caveats. See Encode documentation.
 package codec
 
 // TODO:
-//   - In Go 1.10, when mid-stack inlining is enabled,
+//   - For Go 1.11, when mid-stack inlining is enabled,
 //     we should use committed functions for writeXXX and readXXX calls.
 //     This involves uncommenting the methods for decReaderSwitch and encWriterSwitch
 //     and using those (decReaderSwitch and encWriterSwitch) in all handles
diff --git a/vendor/github.com/ugorji/go/codec/binc.go b/vendor/github.com/ugorji/go/codec/binc.go
index 39fd7d4a..a3c96fe7 100644
--- a/vendor/github.com/ugorji/go/codec/binc.go
+++ b/vendor/github.com/ugorji/go/codec/binc.go
@@ -55,6 +55,50 @@ const (
 	// others not currently supported
 )
 
+func bincdesc(vd, vs byte) string {
+	switch vd {
+	case bincVdSpecial:
+		switch vs {
+		case bincSpNil:
+			return "nil"
+		case bincSpFalse:
+			return "false"
+		case bincSpTrue:
+			return "true"
+		case bincSpNan, bincSpPosInf, bincSpNegInf, bincSpZeroFloat:
+			return "float"
+		case bincSpZero:
+			return "uint"
+		case bincSpNegOne:
+			return "int"
+		default:
+			return "unknown"
+		}
+	case bincVdSmallInt, bincVdPosInt:
+		return "uint"
+	case bincVdNegInt:
+		return "int"
+	case bincVdFloat:
+		return "float"
+	case bincVdSymbol:
+		return "string"
+	case bincVdString:
+		return "string"
+	case bincVdByteArray:
+		return "bytes"
+	case bincVdTimestamp:
+		return "time"
+	case bincVdCustomExt:
+		return "ext"
+	case bincVdArray:
+		return "array"
+	case bincVdMap:
+		return "map"
+	default:
+		return "unknown"
+	}
+}
+
 type bincEncDriver struct {
 	e *Encoder
 	h *BincHandle
@@ -405,7 +449,7 @@ func (d *bincDecDriver) DecodeTime() (t time.Time) {
 		return
 	}
 	if d.vd != bincVdTimestamp {
-		d.d.errorf("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd)
+		d.d.errorf("cannot decode time - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
 		return
 	}
 	t, err := bincDecodeTime(d.r.readx(int(d.vs)))
@@ -422,7 +466,7 @@ func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) {
 	} else {
 		l := d.r.readn1()
 		if l > 8 {
-			d.d.errorf("At most 8 bytes used to represent float. Received: %v bytes", l)
+			d.d.errorf("cannot read float - at most 8 bytes used to represent float - received %v bytes", l)
 			return
 		}
 		for i := l; i < 8; i++ {
@@ -441,7 +485,7 @@ func (d *bincDecDriver) decFloat() (f float64) {
 		d.decFloatPre(d.vs, 8)
 		f = math.Float64frombits(bigen.Uint64(d.b[0:8]))
 	} else {
-		d.d.errorf("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs)
+		d.d.errorf("read float - only float32 and float64 are supported - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
 		return
 	}
 	return
@@ -498,7 +542,8 @@ func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) {
 			neg = true
 			ui = 1
 		} else {
-			d.d.errorf("numeric decode fails for special value: d.vs: 0x%x", d.vs)
+			d.d.errorf("integer decode fails - invalid special value from descriptor %x-%x/%s",
+				d.vd, d.vs, bincdesc(d.vd, d.vs))
 			return
 		}
 	} else {
@@ -521,7 +566,7 @@ func (d *bincDecDriver) DecodeInt64() (i int64) {
 func (d *bincDecDriver) DecodeUint64() (ui uint64) {
 	ui, neg := d.decCheckInteger()
 	if neg {
-		d.d.errorf("Assigning negative signed value to unsigned type")
+		d.d.errorf("assigning negative signed value to unsigned integer type")
 		return
 	}
 	d.bdRead = false
@@ -544,7 +589,8 @@ func (d *bincDecDriver) DecodeFloat64() (f float64) {
 		} else if vs == bincSpNegInf {
 			return math.Inf(-1)
 		} else {
-			d.d.errorf("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs)
+			d.d.errorf("float - invalid special value from descriptor %x-%x/%s",
+				d.vd, d.vs, bincdesc(d.vd, d.vs))
 			return
 		}
 	} else if vd == bincVdFloat {
@@ -566,7 +612,7 @@ func (d *bincDecDriver) DecodeBool() (b bool) {
 	} else if bd == (bincVdSpecial | bincSpTrue) {
 		b = true
 	} else {
-		d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+		d.d.errorf("bool - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
 		return
 	}
 	d.bdRead = false
@@ -578,7 +624,7 @@ func (d *bincDecDriver) ReadMapStart() (length int) {
 		d.readNextBd()
 	}
 	if d.vd != bincVdMap {
-		d.d.errorf("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd)
+		d.d.errorf("map - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
 		return
 	}
 	length = d.decLen()
@@ -591,7 +637,7 @@ func (d *bincDecDriver) ReadArrayStart() (length int) {
 		d.readNextBd()
 	}
 	if d.vd != bincVdArray {
-		d.d.errorf("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd)
+		d.d.errorf("array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
 		return
 	}
 	length = d.decLen()
@@ -704,8 +750,7 @@ func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool)
 			d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2})
 		}
 	default:
-		d.d.errorf("Invalid d.vd. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x",
-			bincVdString, bincVdByteArray, bincVdSymbol, d.vd)
+		d.d.errorf("string/bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
 		return
 	}
 	d.bdRead = false
@@ -742,8 +787,7 @@ func (d *bincDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
 	if d.vd == bincVdString || d.vd == bincVdByteArray {
 		clen = d.decLen()
 	} else {
-		d.d.errorf("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x",
-			bincVdString, bincVdByteArray, d.vd)
+		d.d.errorf("bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
 		return
 	}
 	d.bdRead = false
@@ -759,7 +803,7 @@ func (d *bincDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
 
 func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
 	if xtag > 0xff {
-		d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
+		d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag)
 		return
 	}
 	realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
@@ -782,14 +826,14 @@ func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []b
 		l := d.decLen()
 		xtag = d.r.readn1()
 		if verifyTag && xtag != tag {
-			d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
+			d.d.errorf("wrong extension tag - got %b, expecting: %v", xtag, tag)
 			return
 		}
 		xbs = d.r.readx(l)
 	} else if d.vd == bincVdByteArray {
 		xbs = d.DecodeBytes(nil, true)
 	} else {
-		d.d.errorf("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd)
+		d.d.errorf("ext - expecting extensions or byte array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
 		return
 	}
 	d.bdRead = false
@@ -834,7 +878,7 @@ func (d *bincDecDriver) DecodeNaked() {
 			n.v = valueTypeInt
 			n.i = int64(-1) // int8(-1)
 		default:
-			d.d.errorf("decodeNaked: Unrecognized special value 0x%x", d.vs)
+			d.d.errorf("cannot infer value - unrecognized special value from descriptor %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs))
 		}
 	case bincVdSmallInt:
 		n.v = valueTypeUint
@@ -876,7 +920,7 @@ func (d *bincDecDriver) DecodeNaked() {
 		n.v = valueTypeMap
 		decodeFurther = true
 	default:
-		d.d.errorf("decodeNaked: Unrecognized d.vd: 0x%x", d.vd)
+		d.d.errorf("cannot infer value - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
 	}
 
 	if !decodeFurther {
@@ -928,7 +972,7 @@ type BincHandle struct {
 	// - n: none
 	// - a: all: same as m, s, ...
 
-	_ [1]uint64 // padding
+	// _ [1]uint64 // padding
 }
 
 // Name returns the name of the handle: binc
diff --git a/vendor/github.com/ugorji/go/codec/cbor.go b/vendor/github.com/ugorji/go/codec/cbor.go
index be01e19e..7633c04a 100644
--- a/vendor/github.com/ugorji/go/codec/cbor.go
+++ b/vendor/github.com/ugorji/go/codec/cbor.go
@@ -60,6 +60,46 @@ const (
 	cborBaseSimple      = 0xe0
 )
 
+func cbordesc(bd byte) string {
+	switch bd {
+	case cborBdNil:
+		return "nil"
+	case cborBdFalse:
+		return "false"
+	case cborBdTrue:
+		return "true"
+	case cborBdFloat16, cborBdFloat32, cborBdFloat64:
+		return "float"
+	case cborBdIndefiniteBytes:
+		return "bytes*"
+	case cborBdIndefiniteString:
+		return "string*"
+	case cborBdIndefiniteArray:
+		return "array*"
+	case cborBdIndefiniteMap:
+		return "map*"
+	default:
+		switch {
+		case bd >= cborBaseUint && bd < cborBaseNegInt:
+			return "(u)int"
+		case bd >= cborBaseNegInt && bd < cborBaseBytes:
+			return "int"
+		case bd >= cborBaseBytes && bd < cborBaseString:
+			return "bytes"
+		case bd >= cborBaseString && bd < cborBaseArray:
+			return "string"
+		case bd >= cborBaseArray && bd < cborBaseMap:
+			return "array"
+		case bd >= cborBaseMap && bd < cborBaseTag:
+			return "map"
+		case bd >= cborBaseTag && bd < cborBaseSimple:
+			return "ext"
+		default:
+			return "unknown"
+		}
+	}
+}
+
 // -------------------
 
 type cborEncDriver struct {
@@ -326,7 +366,7 @@ func (d *cborDecDriver) decUint() (ui uint64) {
 		} else if v == 0x1b {
 			ui = uint64(bigen.Uint64(d.r.readx(8)))
 		} else {
-			d.d.errorf("decUint: Invalid descriptor: %v", d.bd)
+			d.d.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd))
 			return
 		}
 	}
@@ -342,7 +382,7 @@ func (d *cborDecDriver) decCheckInteger() (neg bool) {
 	} else if major == cborMajorNegInt {
 		neg = true
 	} else {
-		d.d.errorf("invalid major: %v (bd: %v)", major, d.bd)
+		d.d.errorf("not an integer - invalid major %v from descriptor %x/%s", major, d.bd, cbordesc(d.bd))
 		return
 	}
 	return
@@ -363,7 +403,7 @@ func (d *cborDecDriver) DecodeInt64() (i int64) {
 
 func (d *cborDecDriver) DecodeUint64() (ui uint64) {
 	if d.decCheckInteger() {
-		d.d.errorf("Assigning negative signed value to unsigned type")
+		d.d.errorf("assigning negative signed value to unsigned type")
 		return
 	}
 	ui = d.decUint()
@@ -384,7 +424,7 @@ func (d *cborDecDriver) DecodeFloat64() (f float64) {
 	} else if bd >= cborBaseUint && bd < cborBaseBytes {
 		f = float64(d.DecodeInt64())
 	} else {
-		d.d.errorf("Float only valid from float16/32/64: Invalid descriptor: %v", bd)
+		d.d.errorf("float only valid from float16/32/64 - invalid descriptor %x/%s", bd, cbordesc(bd))
 		return
 	}
 	d.bdRead = false
@@ -400,7 +440,7 @@ func (d *cborDecDriver) DecodeBool() (b bool) {
 		b = true
 	} else if bd == cborBdFalse {
 	} else {
-		d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+		d.d.errorf("not bool - %s %x/%s", msgBadDesc, d.bd, cbordesc(d.bd))
 		return
 	}
 	d.bdRead = false
@@ -441,7 +481,7 @@ func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte {
 		}
 		if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText {
 			d.d.errorf("expect bytes/string major type in indefinite string/bytes;"+
-				" got: %v, byte: %v", major, d.bd)
+				" got major %v from descriptor %x/%x", major, d.bd, cbordesc(d.bd))
 			return nil
 		}
 		n := d.decLen()
@@ -684,7 +724,7 @@ type CborHandle struct {
 	// If unset, we encode time.Time using seconds past epoch.
 	TimeRFC3339 bool
 
-	_ [1]uint64 // padding
+	// _ [1]uint64 // padding
 }
 
 // Name returns the name of the handle: cbor
diff --git a/vendor/github.com/ugorji/go/codec/decode.go b/vendor/github.com/ugorji/go/codec/decode.go
index 148c609c..1c0817aa 100644
--- a/vendor/github.com/ugorji/go/codec/decode.go
+++ b/vendor/github.com/ugorji/go/codec/decode.go
@@ -16,10 +16,14 @@ import (
 
 // Some tagging information for error messages.
 const (
-	msgBadDesc            = "Unrecognized descriptor byte"
+	msgBadDesc            = "unrecognized descriptor byte"
 	msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v"
 )
 
+const decDefSliceCap = 8
+const decDefChanCap = 64 // should be large, as cap cannot be expanded
+const decScratchByteArrayLen = cacheLineSize - 8
+
 var (
 	errstrOnlyMapOrArrayCanDecodeIntoStruct = "only encoded map or array can be decoded into a struct"
 	errstrCannotDecodeIntoNil               = "cannot decode into nil"
@@ -1237,7 +1241,7 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
 	// This way, the order can be kept (as order is lost with map).
 	ti := f.ti
 	if f.seq == seqTypeChan && ti.chandir&uint8(reflect.SendDir) == 0 {
-		d.errorf("receive-only channel cannot be used for sending byte(s)")
+		d.errorf("receive-only channel cannot be decoded")
 	}
 	dd := d.d
 	rtelem0 := ti.elem
@@ -1356,14 +1360,17 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
 		if j == 0 && (f.seq == seqTypeSlice || f.seq == seqTypeChan) && rv.IsNil() {
 			if hasLen {
 				rvlen = decInferLen(containerLenS, d.h.MaxInitLen, rtelem0Size)
+			} else if f.seq == seqTypeSlice {
+				rvlen = decDefSliceCap
 			} else {
-				rvlen = 8
+				rvlen = decDefChanCap
 			}
 			if rvCanset {
 				if f.seq == seqTypeSlice {
 					rv = reflect.MakeSlice(ti.rt, rvlen, rvlen)
 					rvChanged = true
 				} else { // chan
+					// xdebugf(">>>>>> haslen = %v, make chan of type '%v' with length: %v", hasLen, ti.rt, rvlen)
 					rv = reflect.MakeChan(ti.rt, rvlen)
 					rvChanged = true
 				}
@@ -1385,6 +1392,7 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
 				fn = d.cf.get(rtelem, true, true)
 			}
 			d.decodeValue(rv9, fn, true)
+			// xdebugf(">>>> rv9 sent on %v during decode: %v, with len=%v, cap=%v", rv.Type(), rv9, rv.Len(), rv.Cap())
 			rv.Send(rv9)
 		} else {
 			// if indefinite, etc, then expand the slice if necessary
@@ -1734,7 +1742,7 @@ type decReaderSwitch struct {
 	esep  bool // has elem separators
 }
 
-// TODO: Uncomment after mid-stack inlining enabled in go 1.10
+// TODO: Uncomment after mid-stack inlining enabled in go 1.11
 //
 // func (z *decReaderSwitch) unreadn1() {
 // 	if z.bytes {
@@ -1800,8 +1808,6 @@ type decReaderSwitch struct {
 // 	return z.ri.readUntil(in, stop)
 // }
 
-const decScratchByteArrayLen = cacheLineSize - 8
-
 // A Decoder reads and decodes an object from an input stream in the codec format.
 type Decoder struct {
 	panicHdl
@@ -2002,9 +2008,7 @@ func (d *Decoder) naked() *decNaked {
 // Note: we allow nil values in the stream anywhere except for map keys.
 // A nil value in the encoded stream where a map key is expected is treated as an error.
 func (d *Decoder) Decode(v interface{}) (err error) {
-	// need to call defer directly, else it seems the recover is not fully handled
-	defer panicToErrs2(d, &d.err, &err)
-	defer d.alwaysAtEnd()
+	defer d.deferred(&err)
 	d.MustDecode(v)
 	return
 }
@@ -2025,11 +2029,15 @@ func (d *Decoder) MustDecode(v interface{}) {
 	// xprintf(">>>>>>>> >>>>>>>> num decFns: %v\n", d.cf.sn)
 }
 
-// // this is not a smart swallow, as it allocates objects and does unnecessary work.
-// func (d *Decoder) swallowViaHammer() {
-// 	var blank interface{}
-// 	d.decodeValueNoFn(reflect.ValueOf(&blank).Elem())
-// }
+func (d *Decoder) deferred(err1 *error) {
+	d.alwaysAtEnd()
+	if recoverPanicToErr {
+		if x := recover(); x != nil {
+			panicValToErr(d, x, err1)
+			panicValToErr(d, x, &d.err)
+		}
+	}
+}
 
 func (d *Decoder) alwaysAtEnd() {
 	if d.n != nil {
@@ -2040,6 +2048,12 @@ func (d *Decoder) alwaysAtEnd() {
 	d.codecFnPooler.alwaysAtEnd()
 }
 
+// // this is not a smart swallow, as it allocates objects and does unnecessary work.
+// func (d *Decoder) swallowViaHammer() {
+// 	var blank interface{}
+// 	d.decodeValueNoFn(reflect.ValueOf(&blank).Elem())
+// }
+
 func (d *Decoder) swallow() {
 	// smarter decode that just swallows the content
 	dd := d.d
diff --git a/vendor/github.com/ugorji/go/codec/encode.go b/vendor/github.com/ugorji/go/codec/encode.go
index 48053d24..ef465294 100644
--- a/vendor/github.com/ugorji/go/codec/encode.go
+++ b/vendor/github.com/ugorji/go/codec/encode.go
@@ -103,7 +103,15 @@ type EncodeOptions struct {
 	// if > 0, we use a smart buffer internally for performance purposes.
 	WriterBufferSize int
 
-	// Encode a struct as an array, and not as a map
+	// ChanRecvTimeout is the timeout used when selecting from a chan.
+	//
+	// Configuring this controls how we receive from a chan during the encoding process.
+	//   - If ==0, we only consume the elements currently available in the chan.
+	//   - if  <0, we consume until the chan is closed.
+	//   - If  >0, we consume until this timeout.
+	ChanRecvTimeout time.Duration
+
+	// StructToArray specifies to encode a struct as an array, and not as a map
 	StructToArray bool
 
 	// Canonical representation means that encoding a value will always result in the same
@@ -219,7 +227,9 @@ func (z *ioEncWriter) writen2(b1, b2 byte) {
 
 func (z *ioEncWriter) atEndOfEncode() {
 	if z.fw != nil {
-		z.fw.Flush()
+		if err := z.fw.Flush(); err != nil {
+			panic(err)
+		}
 	}
 }
 
@@ -312,18 +322,19 @@ func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) {
 		}
 	}
 	if f.seq == seqTypeChan && ti.chandir&uint8(reflect.RecvDir) == 0 {
-		e.errorf("send-only channel cannot be used for receiving byte(s)")
+		e.errorf("send-only channel cannot be encoded")
 	}
 	elemsep := e.esep
-	l := rv.Len()
 	rtelem := ti.elem
 	rtelemIsByte := uint8TypId == rt2id(rtelem) // NOT rtelem.Kind() == reflect.Uint8
+	var l int
 	// if a slice, array or chan of bytes, treat specially
 	if rtelemIsByte {
 		switch f.seq {
 		case seqTypeSlice:
 			ee.EncodeStringBytes(cRAW, rv.Bytes())
 		case seqTypeArray:
+			l = rv.Len()
 			if rv.CanAddr() {
 				ee.EncodeStringBytes(cRAW, rv.Slice(0, l).Bytes())
 			} else {
@@ -337,24 +348,89 @@ func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) {
 				ee.EncodeStringBytes(cRAW, bs)
 			}
 		case seqTypeChan:
-			bs := e.b[:0]
 			// do not use range, so that the number of elements encoded
 			// does not change, and encoding does not hang waiting on someone to close chan.
 			// for b := range rv2i(rv).(<-chan byte) { bs = append(bs, b) }
 			// ch := rv2i(rv).(<-chan byte) // fix error - that this is a chan byte, not a <-chan byte.
+
+			if rv.IsNil() {
+				ee.EncodeNil()
+				break
+			}
+			bs := e.b[:0]
 			irv := rv2i(rv)
 			ch, ok := irv.(<-chan byte)
 			if !ok {
 				ch = irv.(chan byte)
 			}
-			for i := 0; i < l; i++ {
-				bs = append(bs, <-ch)
+
+		L1:
+			switch timeout := e.h.ChanRecvTimeout; {
+			case timeout == 0: // only consume available
+				for {
+					select {
+					case b := <-ch:
+						bs = append(bs, b)
+					default:
+						break L1
+					}
+				}
+			case timeout > 0: // consume until timeout
+				tt := time.NewTimer(timeout)
+				for {
+					select {
+					case b := <-ch:
+						bs = append(bs, b)
+					case <-tt.C:
+						// close(tt.C)
+						break L1
+					}
+				}
+			default: // consume until close
+				for b := range ch {
+					bs = append(bs, b)
+				}
 			}
+
 			ee.EncodeStringBytes(cRAW, bs)
 		}
 		return
 	}
 
+	// if chan, consume chan into a slice, and work off that slice.
+	var rvcs reflect.Value
+	if f.seq == seqTypeChan {
+		rvcs = reflect.Zero(reflect.SliceOf(rtelem))
+		timeout := e.h.ChanRecvTimeout
+		if timeout < 0 { // consume until close
+			for {
+				recv, recvOk := rv.Recv()
+				if !recvOk {
+					break
+				}
+				rvcs = reflect.Append(rvcs, recv)
+			}
+		} else {
+			cases := make([]reflect.SelectCase, 2)
+			cases[0] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: rv}
+			if timeout == 0 {
+				cases[1] = reflect.SelectCase{Dir: reflect.SelectDefault}
+			} else {
+				tt := time.NewTimer(timeout)
+				cases[1] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(tt.C)}
+			}
+			for {
+				chosen, recv, recvOk := reflect.Select(cases)
+				if chosen == 1 || !recvOk {
+					break
+				}
+				rvcs = reflect.Append(rvcs, recv)
+			}
+		}
+		rv = rvcs // TODO: ensure this doesn't mess up anywhere that rv of kind chan is expected
+	}
+
+	l = rv.Len()
 	if ti.mbs {
 		if l%2 == 1 {
 			e.errorf("mapBySlice requires even slice length, but got %v", l)
@@ -388,15 +464,7 @@ func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) {
 					ee.WriteArrayElem()
 				}
 			}
-			if f.seq == seqTypeChan {
-				if rv2, ok2 := rv.Recv(); ok2 {
-					e.encodeValue(rv2, fn, true)
-				} else {
-					ee.EncodeNil() // WE HAVE TO DO SOMETHING, so nil if nothing received.
-				}
-			} else {
-				e.encodeValue(rv.Index(j), fn, true)
-			}
+			e.encodeValue(rv.Index(j), fn, true)
 		}
 	}
 
@@ -835,7 +903,7 @@ type encWriterSwitch struct {
 	isas bool // whether e.as != nil
 }
 
-// // TODO: Uncomment after mid-stack inlining enabled in go 1.10
+// // TODO: Uncomment after mid-stack inlining enabled in go 1.11
 
 // func (z *encWriterSwitch) writeb(s []byte) {
 // 	if z.wx {
@@ -995,9 +1063,12 @@ func (e *Encoder) ResetBytes(out *[]byte) {
 // Encode writes an object into a stream.
 //
 // Encoding can be configured via the struct tag for the fields.
-// The "codec" key in struct field's tag value is the key name,
+// The key (in the struct tags) that we look at is configurable.
+//
+// By default, we look up the "codec" key in the struct field's tags,
+// and fall bak to the "json" key if "codec" is absent.
+// That key in struct field's tag value is the key name,
 // followed by an optional comma and options.
-// Note that the "json" key is used in the absence of the "codec" key.
 //
 // To set an option on all fields (e.g. omitempty on all fields), you
 // can create a field called _struct, and set flags on it. The options
@@ -1073,8 +1144,7 @@ func (e *Encoder) ResetBytes(out *[]byte) {
 // Some formats support symbols (e.g. binc) and will properly encode the string
 // only once in the stream, and use a tag to refer to it thereafter.
 func (e *Encoder) Encode(v interface{}) (err error) {
-	defer panicToErrs2(e, &e.err, &err)
-	defer e.alwaysAtEnd()
+	defer e.deferred(&err)
 	e.MustEncode(v)
 	return
 }
@@ -1091,6 +1161,16 @@ func (e *Encoder) MustEncode(v interface{}) {
 	e.alwaysAtEnd()
 }
 
+func (e *Encoder) deferred(err1 *error) {
+	e.alwaysAtEnd()
+	if recoverPanicToErr {
+		if x := recover(); x != nil {
+			panicValToErr(e, x, err1)
+			panicValToErr(e, x, &e.err)
+		}
+	}
+}
+
 // func (e *Encoder) alwaysAtEnd() {
 // 	e.codecFnPooler.alwaysAtEnd()
 // }
diff --git a/vendor/github.com/ugorji/go/codec/gen.generated.go b/vendor/github.com/ugorji/go/codec/gen.generated.go
index 799bc76e..240ba9f8 100644
--- a/vendor/github.com/ugorji/go/codec/gen.generated.go
+++ b/vendor/github.com/ugorji/go/codec/gen.generated.go
@@ -64,13 +64,14 @@ if {{var "l"}} == 0 {
 	} else if len({{var "v"}}) != 0 {
 		{{var "v"}} = {{var "v"}}[:0]
 		{{var "c"}} = true
-	} {{end}} {{if isChan }}if {{var "v"}} == nil {
+	} {{else if isChan }}if {{var "v"}} == nil {
 		{{var "v"}} = make({{ .CTyp }}, 0)
 		{{var "c"}} = true
 	} {{end}}
 } else {
 	{{var "hl"}} := {{var "l"}} > 0
-	var {{var "rl"}} int; _ =  {{var "rl"}}
+	var {{var "rl"}} int
+	_ =  {{var "rl"}}
 	{{if isSlice }} if {{var "hl"}} {
 	if {{var "l"}} > cap({{var "v"}}) {
 		{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
@@ -88,25 +89,26 @@ if {{var "l"}} == 0 {
 	var {{var "j"}} int 
     // var {{var "dn"}} bool 
 	for ; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
-		{{if not isArray}} if {{var "j"}} == 0 && len({{var "v"}}) == 0 {
+		{{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil {
 			if {{var "hl"}} {
 				{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
 			} else {
-				{{var "rl"}} = 8
+				{{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}}
 			}
-			{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+			{{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}})
 			{{var "c"}} = true 
 		}{{end}}
 		{{var "h"}}.ElemContainerState({{var "j"}})
-        {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}
-        {{if isChan}}{{ $x := printf "%[1]vv%[2]v" .TempVar .Rand }}var {{var $x}} {{ .Typ }}
+        {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */}}
+        {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }}
 		{{ decLineVar $x }}
 		{{var "v"}} <- {{ $x }}
-        {{else}}
-		// if indefinite, etc, then expand the slice if necessary
+        // println(">>>> sending ", {{ $x }}, " into ", {{var "v"}}) // TODO: remove this
+        {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */}}
 		var {{var "db"}} bool
 		if {{var "j"}} >= len({{var "v"}}) {
-			{{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}); {{var "c"}} = true
+			{{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }})
+			{{var "c"}} = true
 			{{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true
 			{{end}}
 		}
@@ -129,5 +131,34 @@ if {{var "l"}} == 0 {
 {{if not isArray }}if {{var "c"}} { 
 	*{{ .Varname }} = {{var "v"}}
 }{{end}}
+`
 
+const genEncChanTmpl = `
+{{.Label}}:
+switch timeout{{.Sfx}} :=  z.EncBasicHandle().ChanRecvTimeout; {
+case timeout{{.Sfx}} == 0: // only consume available
+	for {
+		select {
+		case b{{.Sfx}} := <-{{.Chan}}:
+			{{ .Slice }} = append({{.Slice}}, b{{.Sfx}})
+		default:
+			break {{.Label}}
+		}
+	}
+case timeout{{.Sfx}} > 0: // consume until timeout
+	tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}})
+	for {
+		select {
+		case b{{.Sfx}} := <-{{.Chan}}:
+			{{.Slice}} = append({{.Slice}}, b{{.Sfx}})
+		case <-tt{{.Sfx}}.C:
+			// close(tt.C)
+			break {{.Label}}
+		}
+	}
+default: // consume until close
+	for b{{.Sfx}} := range {{.Chan}} {
+		{{.Slice}} = append({{.Slice}}, b{{.Sfx}})
+	}
+}
 `
diff --git a/vendor/github.com/ugorji/go/codec/gen.go b/vendor/github.com/ugorji/go/codec/gen.go
index d1dcdab3..b4c4031f 100644
--- a/vendor/github.com/ugorji/go/codec/gen.go
+++ b/vendor/github.com/ugorji/go/codec/gen.go
@@ -542,7 +542,6 @@ func (x *genRunner) selfer(encode bool) {
 	if encode {
 		x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {")
 		x.genRequiredMethodVars(true)
-		// x.enc(genTopLevelVarName, t)
 		x.encVar(genTopLevelVarName, t)
 	} else {
 		x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {")
@@ -649,7 +648,7 @@ func (x *genRunner) encVar(varname string, t reflect.Type) {
 	case reflect.Ptr:
 		telem := t.Elem()
 		tek := telem.Kind()
-		if tek == reflect.Array || (tek == reflect.Struct && t != timeTyp) {
+		if tek == reflect.Array || (tek == reflect.Struct && telem != timeTyp) {
 			x.enc(varname, genNonPtr(t))
 			break
 		}
@@ -1083,28 +1082,49 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
 }
 
 func (x *genRunner) encListFallback(varname string, t reflect.Type) {
+	elemBytes := t.Elem().Kind() == reflect.Uint8
 	if t.AssignableTo(uint8SliceTyp) {
 		x.linef("r.EncodeStringBytes(codecSelferCcRAW%s, []byte(%s))", x.xs, varname)
 		return
 	}
-	if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 {
+	if t.Kind() == reflect.Array && elemBytes {
 		x.linef("r.EncodeStringBytes(codecSelferCcRAW%s, ((*[%d]byte)(%s))[:])", x.xs, t.Len(), varname)
 		return
 	}
 	i := x.varsfx()
-	g := genTempVarPfx
-	x.line("r.WriteArrayStart(len(" + varname + "))")
 	if t.Kind() == reflect.Chan {
-		x.linef("for %si%s, %si2%s := 0, len(%s); %si%s < %si2%s; %si%s++ {", g, i, g, i, varname, g, i, g, i, g, i)
-		x.line("r.WriteArrayElem()")
-		x.linef("%sv%s := <-%s", g, i, varname)
-	} else {
-		x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname)
-		x.line("r.WriteArrayElem()")
+		type ts struct {
+			Label, Chan, Slice, Sfx string
+		}
+		tm, err := template.New("").Parse(genEncChanTmpl)
+		if err != nil {
+			panic(err)
+		}
+		x.linef("if %s == nil { r.EncodeNil() } else { ", varname)
+		x.linef("var sch%s []%s", i, x.genTypeName(t.Elem()))
+		err = tm.Execute(x.w, &ts{"Lsch" + i, varname, "sch" + i, i})
+		if err != nil {
+			panic(err)
+		}
+		// x.linef("%s = sch%s", varname, i)
+		if elemBytes {
+			x.linef("r.EncodeStringBytes(codecSelferCcRAW%s, []byte(%s))", x.xs, "sch"+i)
+			x.line("}")
+			return
+		}
+		varname = "sch" + i
 	}
+
+	x.line("r.WriteArrayStart(len(" + varname + "))")
+	x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname)
+	x.line("r.WriteArrayElem()")
+
 	x.encVar(genTempVarPfx+"v"+i, t.Elem())
 	x.line("}")
 	x.line("r.WriteArrayEnd()")
+	if t.Kind() == reflect.Chan {
+		x.line("}")
+	}
 }
 
 func (x *genRunner) encMapFallback(varname string, t reflect.Type) {
diff --git a/vendor/github.com/ugorji/go/codec/helper.go b/vendor/github.com/ugorji/go/codec/helper.go
index e6994a91..bd29895b 100644
--- a/vendor/github.com/ugorji/go/codec/helper.go
+++ b/vendor/github.com/ugorji/go/codec/helper.go
@@ -391,6 +391,10 @@ var immutableKindsSet = [32]bool{
 // Any type which implements Selfer will be able to encode or decode itself.
 // Consequently, during (en|de)code, this takes precedence over
 // (text|binary)(M|Unm)arshal or extension support.
+//
+// Note: *the first set of bytes of any value MUST NOT represent nil in the format*.
+// This is because, during each decode, we first check the the next set of bytes
+// represent nil, and if so, we just set the value to nil.
 type Selfer interface {
 	CodecEncodeSelf(*Encoder)
 	CodecDecodeSelf(*Decoder)
@@ -1543,6 +1547,8 @@ func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool)
 // }
 
 func panicToErr(h errstrDecorator, err *error) {
+	// Note: This method MUST be called directly from defer i.e. defer panicToErr ...
+	// else it seems the recover is not fully handled
 	if recoverPanicToErr {
 		if x := recover(); x != nil {
 			// fmt.Printf("panic'ing with: %v\n", x)
@@ -1552,15 +1558,6 @@ func panicToErr(h errstrDecorator, err *error) {
 	}
 }
 
-func panicToErrs2(h errstrDecorator, err1, err2 *error) {
-	if recoverPanicToErr {
-		if x := recover(); x != nil {
-			panicValToErr(h, x, err1)
-			panicValToErr(h, x, err2)
-		}
-	}
-}
-
 func panicValToErr(h errstrDecorator, v interface{}, err *error) {
 	switch xerr := v.(type) {
 	case nil:
diff --git a/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_unsafe.go
index 21aa4db7..e3df60ab 100644
--- a/vendor/github.com/ugorji/go/codec/helper_unsafe.go
+++ b/vendor/github.com/ugorji/go/codec/helper_unsafe.go
@@ -157,7 +157,8 @@ func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) b
 		}
 		return isnil
 	case reflect.Ptr:
-		isnil := urv.ptr == nil
+		// isnil := urv.ptr == nil (not sufficient, as a pointer value encodes the type)
+		isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
 		if deref {
 			if isnil {
 				return true
@@ -175,25 +176,31 @@ func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) b
 
 // --------------------------
 
+// atomicTypeInfoSlice contains length and pointer to the array for a slice.
+// It is expected to be 2 words.
+//
+// Previously, we atomically loaded and stored the length and array pointer separately,
+// which could lead to some races.
+// We now just atomically store and load the pointer to the value directly.
+
 type atomicTypeInfoSlice struct { // expected to be 2 words
+	l int            // length of the data array (must be first in struct, for 64-bit alignment necessary for 386)
 	v unsafe.Pointer // data array - Pointer (not uintptr) to maintain GC reference
-	l int64          // length of the data array
 }
 
 func (x *atomicTypeInfoSlice) load() []rtid2ti {
-	l := int(atomic.LoadInt64(&x.l))
-	if l == 0 {
+	xp := unsafe.Pointer(x)
+	x2 := *(*atomicTypeInfoSlice)(atomic.LoadPointer(&xp))
+	if x2.l == 0 {
 		return nil
 	}
-	return *(*[]rtid2ti)(unsafe.Pointer(&unsafeSlice{Data: atomic.LoadPointer(&x.v), Len: l, Cap: l}))
-	// return (*[]rtid2ti)(atomic.LoadPointer(&x.v))
+	return *(*[]rtid2ti)(unsafe.Pointer(&unsafeSlice{Data: x2.v, Len: x2.l, Cap: x2.l}))
 }
 
 func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
 	s := (*unsafeSlice)(unsafe.Pointer(&p))
-	atomic.StorePointer(&x.v, s.Data)
-	atomic.StoreInt64(&x.l, int64(s.Len))
-	// atomic.StorePointer(&x.v, unsafe.Pointer(p))
+	xp := unsafe.Pointer(x)
+	atomic.StorePointer(&xp, unsafe.Pointer(&atomicTypeInfoSlice{l: s.Len, v: s.Data}))
 }
 
 // --------------------------
diff --git a/vendor/github.com/ugorji/go/codec/json.go b/vendor/github.com/ugorji/go/codec/json.go
index ec7b0d65..bdd19966 100644
--- a/vendor/github.com/ugorji/go/codec/json.go
+++ b/vendor/github.com/ugorji/go/codec/json.go
@@ -606,7 +606,7 @@ func (d *jsonDecDriver) ReadMapStart() int {
 	}
 	const xc uint8 = '{'
 	if d.tok != xc {
-		d.d.errorf("expect char '%c' but got char '%c'", xc, d.tok)
+		d.d.errorf("read map - expect char '%c' but got char '%c'", xc, d.tok)
 	}
 	d.tok = 0
 	d.c = containerMapStart
@@ -619,7 +619,7 @@ func (d *jsonDecDriver) ReadArrayStart() int {
 	}
 	const xc uint8 = '['
 	if d.tok != xc {
-		d.d.errorf("expect char '%c' but got char '%c'", xc, d.tok)
+		d.d.errorf("read array - expect char '%c' but got char '%c'", xc, d.tok)
 	}
 	d.tok = 0
 	d.c = containerArrayStart
@@ -638,9 +638,10 @@ func (d *jsonDecDriver) CheckBreak() bool {
 // - ReadArrayElem would become:
 //   readContainerState(containerArrayElem, ',', d.c != containerArrayStart)
 //
-// However, until mid-stack inlining (go 1.10?) comes, supporting inlining of
-// oneliners, we explicitly write them all 5 out to elide the extra func call.
-// TODO: For Go 1.10, if inlined, consider consolidating these.
+// However, until mid-stack inlining comes in go1.11 which supports inlining of
+// one-liners, we explicitly write them all 5 out to elide the extra func call.
+//
+// TODO: For Go 1.11, if inlined, consider consolidating these.
 
 func (d *jsonDecDriver) ReadArrayElem() {
 	const xc uint8 = ','
@@ -649,7 +650,7 @@ func (d *jsonDecDriver) ReadArrayElem() {
 	}
 	if d.c != containerArrayStart {
 		if d.tok != xc {
-			d.d.errorf("expect char '%c' but got char '%c'", xc, d.tok)
+			d.d.errorf("read array element - expect char '%c' but got char '%c'", xc, d.tok)
 		}
 		d.tok = 0
 	}
@@ -662,7 +663,7 @@ func (d *jsonDecDriver) ReadArrayEnd() {
 		d.tok = d.r.skip(&jsonCharWhitespaceSet)
 	}
 	if d.tok != xc {
-		d.d.errorf("expect char '%c' but got char '%c'", xc, d.tok)
+		d.d.errorf("read array end - expect char '%c' but got char '%c'", xc, d.tok)
 	}
 	d.tok = 0
 	d.c = containerArrayEnd
@@ -675,7 +676,7 @@ func (d *jsonDecDriver) ReadMapElemKey() {
 	}
 	if d.c != containerMapStart {
 		if d.tok != xc {
-			d.d.errorf("expect char '%c' but got char '%c'", xc, d.tok)
+			d.d.errorf("read map key - expect char '%c' but got char '%c'", xc, d.tok)
 		}
 		d.tok = 0
 	}
@@ -688,7 +689,7 @@ func (d *jsonDecDriver) ReadMapElemValue() {
 		d.tok = d.r.skip(&jsonCharWhitespaceSet)
 	}
 	if d.tok != xc {
-		d.d.errorf("expect char '%c' but got char '%c'", xc, d.tok)
+		d.d.errorf("read map value - expect char '%c' but got char '%c'", xc, d.tok)
 	}
 	d.tok = 0
 	d.c = containerMapValue
@@ -700,7 +701,7 @@ func (d *jsonDecDriver) ReadMapEnd() {
 		d.tok = d.r.skip(&jsonCharWhitespaceSet)
 	}
 	if d.tok != xc {
-		d.d.errorf("expect char '%c' but got char '%c'", xc, d.tok)
+		d.d.errorf("read map end - expect char '%c' but got char '%c'", xc, d.tok)
 	}
 	d.tok = 0
 	d.c = containerMapEnd
@@ -1267,7 +1268,7 @@ type JsonHandle struct {
 	// If not configured, raw bytes are encoded to/from base64 text.
 	RawBytesExt InterfaceExt
 
-	_ [3]uint64 // padding
+	_ [2]uint64 // padding
 }
 
 // Name returns the name of the handle: json
diff --git a/vendor/github.com/ugorji/go/codec/msgpack.go b/vendor/github.com/ugorji/go/codec/msgpack.go
index 31265cc6..3271579a 100644
--- a/vendor/github.com/ugorji/go/codec/msgpack.go
+++ b/vendor/github.com/ugorji/go/codec/msgpack.go
@@ -82,6 +82,86 @@ const (
 var mpTimeExtTag int8 = -1
 var mpTimeExtTagU = uint8(mpTimeExtTag)
 
+// var mpdesc = map[byte]string{
+// 	mpPosFixNumMin: "PosFixNumMin",
+// 	mpPosFixNumMax: "PosFixNumMax",
+// 	mpFixMapMin:    "FixMapMin",
+// 	mpFixMapMax:    "FixMapMax",
+// 	mpFixArrayMin:  "FixArrayMin",
+// 	mpFixArrayMax:  "FixArrayMax",
+// 	mpFixStrMin:    "FixStrMin",
+// 	mpFixStrMax:    "FixStrMax",
+// 	mpNil:          "Nil",
+// 	mpFalse:        "False",
+// 	mpTrue:         "True",
+// 	mpFloat:        "Float",
+// 	mpDouble:       "Double",
+// 	mpUint8:        "Uint8",
+// 	mpUint16:       "Uint16",
+// 	mpUint32:       "Uint32",
+// 	mpUint64:       "Uint64",
+// 	mpInt8:         "Int8",
+// 	mpInt16:        "Int16",
+// 	mpInt32:        "Int32",
+// 	mpInt64:        "Int64",
+// 	mpBin8:         "Bin8",
+// 	mpBin16:        "Bin16",
+// 	mpBin32:        "Bin32",
+// 	mpExt8:         "Ext8",
+// 	mpExt16:        "Ext16",
+// 	mpExt32:        "Ext32",
+// 	mpFixExt1:      "FixExt1",
+// 	mpFixExt2:      "FixExt2",
+// 	mpFixExt4:      "FixExt4",
+// 	mpFixExt8:      "FixExt8",
+// 	mpFixExt16:     "FixExt16",
+// 	mpStr8:         "Str8",
+// 	mpStr16:        "Str16",
+// 	mpStr32:        "Str32",
+// 	mpArray16:      "Array16",
+// 	mpArray32:      "Array32",
+// 	mpMap16:        "Map16",
+// 	mpMap32:        "Map32",
+// 	mpNegFixNumMin: "NegFixNumMin",
+// 	mpNegFixNumMax: "NegFixNumMax",
+// }
+
+func mpdesc(bd byte) string {
+	switch bd {
+	case mpNil:
+		return "nil"
+	case mpFalse:
+		return "false"
+	case mpTrue:
+		return "true"
+	case mpFloat, mpDouble:
+		return "float"
+	case mpUint8, mpUint16, mpUint32, mpUint64:
+		return "uint"
+	case mpInt8, mpInt16, mpInt32, mpInt64:
+		return "int"
+	default:
+		switch {
+		case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax:
+			return "int"
+		case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
+			return "int"
+		case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax:
+			return "string|bytes"
+		case bd == mpBin8, bd == mpBin16, bd == mpBin32:
+			return "bytes"
+		case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
+			return "array"
+		case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
+			return "map"
+		case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32:
+			return "ext"
+		default:
+			return "unknown"
+		}
+	}
+}
+
 // MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec
 // that the backend RPC service takes multiple arguments, which have been arranged
 // in sequence in the slice.
@@ -442,7 +522,7 @@ func (d *msgpackDecDriver) DecodeNaked() {
 				n.l = d.r.readx(clen)
 			}
 		default:
-			d.d.errorf("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd)
+			d.d.errorf("cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd))
 		}
 	}
 	if !decodeFurther {
@@ -484,7 +564,7 @@ func (d *msgpackDecDriver) DecodeInt64() (i int64) {
 		case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
 			i = int64(int8(d.bd))
 		default:
-			d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd)
+			d.d.errorf("cannot decode signed integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd))
 			return
 		}
 	}
@@ -510,28 +590,28 @@ func (d *msgpackDecDriver) DecodeUint64() (ui uint64) {
 		if i := int64(int8(d.r.readn1())); i >= 0 {
 			ui = uint64(i)
 		} else {
-			d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
+			d.d.errorf("assigning negative signed value: %v, to unsigned type", i)
 			return
 		}
 	case mpInt16:
 		if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 {
 			ui = uint64(i)
 		} else {
-			d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
+			d.d.errorf("assigning negative signed value: %v, to unsigned type", i)
 			return
 		}
 	case mpInt32:
 		if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 {
 			ui = uint64(i)
 		} else {
-			d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
+			d.d.errorf("assigning negative signed value: %v, to unsigned type", i)
 			return
 		}
 	case mpInt64:
 		if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 {
 			ui = uint64(i)
 		} else {
-			d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
+			d.d.errorf("assigning negative signed value: %v, to unsigned type", i)
 			return
 		}
 	default:
@@ -539,10 +619,10 @@ func (d *msgpackDecDriver) DecodeUint64() (ui uint64) {
 		case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
 			ui = uint64(d.bd)
 		case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
-			d.d.errorf("Assigning negative signed value: %v, to unsigned type", int(d.bd))
+			d.d.errorf("assigning negative signed value: %v, to unsigned type", int(d.bd))
 			return
 		default:
-			d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd)
+			d.d.errorf("cannot decode unsigned integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd))
 			return
 		}
 	}
@@ -576,7 +656,7 @@ func (d *msgpackDecDriver) DecodeBool() (b bool) {
 	} else if d.bd == mpTrue || d.bd == 1 {
 		b = true
 	} else {
-		d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+		d.d.errorf("cannot decode bool: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd))
 		return
 	}
 	d.bdRead = false
@@ -699,7 +779,7 @@ func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int)
 	} else if (ct.bFixMin & bd) == ct.bFixMin {
 		clen = int(ct.bFixMin ^ bd)
 	} else {
-		d.d.errorf("readContainerLen: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd)
+		d.d.errorf("cannot read container length: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd)
 		return
 	}
 	d.bdRead = false
@@ -800,7 +880,7 @@ func (d *msgpackDecDriver) decodeTime(clen int) (t time.Time) {
 
 func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
 	if xtag > 0xff {
-		d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
+		d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag)
 		return
 	}
 	realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
@@ -829,7 +909,7 @@ func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs
 		clen := d.readExtLen()
 		xtag = d.r.readn1()
 		if verifyTag && xtag != tag {
-			d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
+			d.d.errorf("wrong extension tag - got %b, expecting %v", xtag, tag)
 			return
 		}
 		xbs = d.r.readx(clen)
@@ -865,7 +945,7 @@ type MsgpackHandle struct {
 	binaryEncodingType
 	noElemSeparators
 
-	_ [1]uint64 // padding
+	// _ [1]uint64 // padding
 }
 
 // Name returns the name of the handle: msgpack
@@ -970,13 +1050,13 @@ func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint
 
 	var b = ba[0]
 	if b != fia {
-		err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b)
+		err = fmt.Errorf("not array - %s %x/%s", msgBadDesc, b, mpdesc(b))
 	} else {
 		err = c.read(&b)
 		if err == nil {
 			if b != expectTypeByte {
-				err = fmt.Errorf("Unexpected byte descriptor. Expecting %v; Received %v",
-					expectTypeByte, b)
+				err = fmt.Errorf("%s - expecting %v but got %x/%s",
+					msgBadDesc, expectTypeByte, b, mpdesc(b))
 			} else {
 				err = c.read(msgid)
 				if err == nil {
diff --git a/vendor/github.com/ugorji/go/codec/noop.go b/vendor/github.com/ugorji/go/codec/noop.go
deleted file mode 100644
index 424bd49d..00000000
--- a/vendor/github.com/ugorji/go/codec/noop.go
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
-// Use of this source code is governed by a MIT license found in the LICENSE file.
-
-// +build ignore
-
-package codec
-
-import (
-	"math/rand"
-	"time"
-)
-
-// NoopHandle returns a no-op handle. It basically does nothing.
-// It is only useful for benchmarking, as it gives an idea of the
-// overhead from the codec framework.
-//
-// LIBRARY USERS: *** DO NOT USE ***
-func NoopHandle(slen int) *noopHandle {
-	h := noopHandle{}
-	h.rand = rand.New(rand.NewSource(time.Now().UnixNano()))
-	h.B = make([][]byte, slen)
-	h.S = make([]string, slen)
-	for i := 0; i < len(h.S); i++ {
-		b := make([]byte, i+1)
-		for j := 0; j < len(b); j++ {
-			b[j] = 'a' + byte(i)
-		}
-		h.B[i] = b
-		h.S[i] = string(b)
-	}
-	return &h
-}
-
-// noopHandle does nothing.
-// It is used to simulate the overhead of the codec framework.
-type noopHandle struct {
-	BasicHandle
-	binaryEncodingType
-	noopDrv // noopDrv is unexported here, so we can get a copy of it when needed.
-}
-
-type noopDrv struct {
-	d    *Decoder
-	e    *Encoder
-	i    int
-	S    []string
-	B    [][]byte
-	mks  []bool    // stack. if map (true), else if array (false)
-	mk   bool      // top of stack. what container are we on? map or array?
-	ct   valueType // last response for IsContainerType.
-	cb   int       // counter for ContainerType
-	rand *rand.Rand
-}
-
-func (h *noopDrv) r(v int) int { return h.rand.Intn(v) }
-func (h *noopDrv) m(v int) int { h.i++; return h.i % v }
-
-func (h *noopDrv) newEncDriver(e *Encoder) encDriver { h.e = e; return h }
-func (h *noopDrv) newDecDriver(d *Decoder) decDriver { h.d = d; return h }
-
-func (h *noopDrv) reset()       {}
-func (h *noopDrv) uncacheRead() {}
-
-// --- encDriver
-
-// stack functions (for map and array)
-func (h *noopDrv) start(b bool) {
-	// println("start", len(h.mks)+1)
-	h.mks = append(h.mks, b)
-	h.mk = b
-}
-func (h *noopDrv) end() {
-	// println("end: ", len(h.mks)-1)
-	h.mks = h.mks[:len(h.mks)-1]
-	if len(h.mks) > 0 {
-		h.mk = h.mks[len(h.mks)-1]
-	} else {
-		h.mk = false
-	}
-}
-
-func (h *noopDrv) EncodeBuiltin(rt uintptr, v interface{}) {}
-func (h *noopDrv) EncodeNil()                              {}
-func (h *noopDrv) EncodeInt(i int64)                       {}
-func (h *noopDrv) EncodeUint(i uint64)                     {}
-func (h *noopDrv) EncodeBool(b bool)                       {}
-func (h *noopDrv) EncodeFloat32(f float32)                 {}
-func (h *noopDrv) EncodeFloat64(f float64)                 {}
-func (h *noopDrv) EncodeRawExt(re *RawExt, e *Encoder)     {}
-func (h *noopDrv) EncodeArrayStart(length int)             { h.start(true) }
-func (h *noopDrv) EncodeMapStart(length int)               { h.start(false) }
-func (h *noopDrv) EncodeEnd()                              { h.end() }
-
-func (h *noopDrv) EncodeString(c charEncoding, v string) {}
-
-// func (h *noopDrv) EncodeSymbol(v string)                      {}
-func (h *noopDrv) EncodeStringBytes(c charEncoding, v []byte) {}
-
-func (h *noopDrv) EncodeExt(rv interface{}, xtag uint64, ext Ext, e *Encoder) {}
-
-// ---- decDriver
-func (h *noopDrv) initReadNext()                              {}
-func (h *noopDrv) CheckBreak() bool                           { return false }
-func (h *noopDrv) IsBuiltinType(rt uintptr) bool              { return false }
-func (h *noopDrv) DecodeBuiltin(rt uintptr, v interface{})    {}
-func (h *noopDrv) DecodeInt(bitsize uint8) (i int64)          { return int64(h.m(15)) }
-func (h *noopDrv) DecodeUint(bitsize uint8) (ui uint64)       { return uint64(h.m(35)) }
-func (h *noopDrv) DecodeFloat(chkOverflow32 bool) (f float64) { return float64(h.m(95)) }
-func (h *noopDrv) DecodeBool() (b bool)                       { return h.m(2) == 0 }
-func (h *noopDrv) DecodeString() (s string)                   { return h.S[h.m(8)] }
-func (h *noopDrv) DecodeStringAsBytes() []byte                { return h.DecodeBytes(nil, true) }
-
-func (h *noopDrv) DecodeBytes(bs []byte, zerocopy bool) []byte { return h.B[h.m(len(h.B))] }
-
-func (h *noopDrv) ReadEnd() { h.end() }
-
-// toggle map/slice
-func (h *noopDrv) ReadMapStart() int   { h.start(true); return h.m(10) }
-func (h *noopDrv) ReadArrayStart() int { h.start(false); return h.m(10) }
-
-func (h *noopDrv) ContainerType() (vt valueType) {
-	// return h.m(2) == 0
-	// handle kStruct, which will bomb is it calls this and
-	// doesn't get back a map or array.
-	// consequently, if the return value is not map or array,
-	// reset it to one of them based on h.m(7) % 2
-	// for kstruct: at least one out of every 2 times,
-	// return one of valueTypeMap or Array (else kstruct bombs)
-	// however, every 10th time it is called, we just return something else.
-	var vals = [...]valueType{valueTypeArray, valueTypeMap}
-	//  ------------ TAKE ------------
-	// if h.cb%2 == 0 {
-	// 	if h.ct == valueTypeMap || h.ct == valueTypeArray {
-	// 	} else {
-	// 		h.ct = vals[h.m(2)]
-	// 	}
-	// } else if h.cb%5 == 0 {
-	// 	h.ct = valueType(h.m(8))
-	// } else {
-	// 	h.ct = vals[h.m(2)]
-	// }
-	//  ------------ TAKE ------------
-	// if h.cb%16 == 0 {
-	// 	h.ct = valueType(h.cb % 8)
-	// } else {
-	// 	h.ct = vals[h.cb%2]
-	// }
-	h.ct = vals[h.cb%2]
-	h.cb++
-	return h.ct
-
-	// if h.ct == valueTypeNil || h.ct == valueTypeString || h.ct == valueTypeBytes {
-	// 	return h.ct
-	// }
-	// return valueTypeUnset
-	// TODO: may need to tweak this so it works.
-	// if h.ct == valueTypeMap && vt == valueTypeArray ||
-	// 	h.ct == valueTypeArray && vt == valueTypeMap {
-	// 	h.cb = !h.cb
-	// 	h.ct = vt
-	// 	return h.cb
-	// }
-	// // go in a loop and check it.
-	// h.ct = vt
-	// h.cb = h.m(7) == 0
-	// return h.cb
-}
-func (h *noopDrv) TryDecodeAsNil() bool {
-	if h.mk {
-		return false
-	} else {
-		return h.m(8) == 0
-	}
-}
-func (h *noopDrv) DecodeExt(rv interface{}, xtag uint64, ext Ext) uint64 {
-	return 0
-}
-
-func (h *noopDrv) DecodeNaked() {
-	// use h.r (random) not h.m() because h.m() could cause the same value to be given.
-	var sk int
-	if h.mk {
-		// if mapkey, do not support values of nil OR bytes, array, map or rawext
-		sk = h.r(7) + 1
-	} else {
-		sk = h.r(12)
-	}
-	n := &h.d.n
-	switch sk {
-	case 0:
-		n.v = valueTypeNil
-	case 1:
-		n.v, n.b = valueTypeBool, false
-	case 2:
-		n.v, n.b = valueTypeBool, true
-	case 3:
-		n.v, n.i = valueTypeInt, h.DecodeInt(64)
-	case 4:
-		n.v, n.u = valueTypeUint, h.DecodeUint(64)
-	case 5:
-		n.v, n.f = valueTypeFloat, h.DecodeFloat(true)
-	case 6:
-		n.v, n.f = valueTypeFloat, h.DecodeFloat(false)
-	case 7:
-		n.v, n.s = valueTypeString, h.DecodeString()
-	case 8:
-		n.v, n.l = valueTypeBytes, h.B[h.m(len(h.B))]
-	case 9:
-		n.v = valueTypeArray
-	case 10:
-		n.v = valueTypeMap
-	default:
-		n.v = valueTypeExt
-		n.u = h.DecodeUint(64)
-		n.l = h.B[h.m(len(h.B))]
-	}
-	h.ct = n.v
-	return
-}
diff --git a/vendor/github.com/ugorji/go/codec/rpc.go b/vendor/github.com/ugorji/go/codec/rpc.go
index 7c3069e7..9fb3c014 100644
--- a/vendor/github.com/ugorji/go/codec/rpc.go
+++ b/vendor/github.com/ugorji/go/codec/rpc.go
@@ -104,7 +104,7 @@ func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2 bool) (err error) {
 		if err == nil {
 			err = c.f.Flush()
 		} else {
-			c.f.Flush()
+			_ = c.f.Flush() // swallow flush error, so we maintain prior error on write
 		}
 	}
 	return
@@ -144,15 +144,6 @@ func (c *rpcCodec) Close() error {
 	}
 	c.clsmu.Lock()
 	c.cls = true
-	// var fErr error
-	// if c.f != nil {
-	// 	fErr = c.f.Flush()
-	// }
-	// _ = fErr
-	// c.clsErr = c.c.Close()
-	// if c.clsErr == nil && fErr != nil {
-	// 	c.clsErr = fErr
-	// }
 	c.clsErr = c.c.Close()
 	c.clsmu.Unlock()
 	return c.clsErr
diff --git a/vendor/github.com/ugorji/go/codec/simple.go b/vendor/github.com/ugorji/go/codec/simple.go
index a839d810..f1e181ef 100644
--- a/vendor/github.com/ugorji/go/codec/simple.go
+++ b/vendor/github.com/ugorji/go/codec/simple.go
@@ -290,7 +290,7 @@ func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) {
 		ui = uint64(bigen.Uint64(d.r.readx(8)))
 		neg = true
 	default:
-		d.d.errorf("Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
+		d.d.errorf("integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
 		return
 	}
 	// don't do this check, because callers may only want the unsigned value.
@@ -314,7 +314,7 @@ func (d *simpleDecDriver) DecodeInt64() (i int64) {
 func (d *simpleDecDriver) DecodeUint64() (ui uint64) {
 	ui, neg := d.decCheckInteger()
 	if neg {
-		d.d.errorf("Assigning negative signed value to unsigned type")
+		d.d.errorf("assigning negative signed value to unsigned type")
 		return
 	}
 	d.bdRead = false
@@ -333,7 +333,7 @@ func (d *simpleDecDriver) DecodeFloat64() (f float64) {
 		if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 {
 			f = float64(d.DecodeInt64())
 		} else {
-			d.d.errorf("Float only valid from float32/64: Invalid descriptor: %v", d.bd)
+			d.d.errorf("float only valid from float32/64: Invalid descriptor: %v", d.bd)
 			return
 		}
 	}
@@ -350,7 +350,7 @@ func (d *simpleDecDriver) DecodeBool() (b bool) {
 		b = true
 	} else if d.bd == simpleVdFalse {
 	} else {
-		d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+		d.d.errorf("cannot decode bool - %s: %x", msgBadDesc, d.bd)
 		return
 	}
 	d.bdRead = false
@@ -418,7 +418,7 @@ func (d *simpleDecDriver) decLen() int {
 		}
 		return int(ui)
 	}
-	d.d.errorf("decLen: Cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8)
+	d.d.errorf("cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8)
 	return -1
 }
 
@@ -482,7 +482,7 @@ func (d *simpleDecDriver) DecodeTime() (t time.Time) {
 
 func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
 	if xtag > 0xff {
-		d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
+		d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag)
 		return
 	}
 	realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
@@ -506,7 +506,7 @@ func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs [
 		l := d.decLen()
 		xtag = d.r.readn1()
 		if verifyTag && xtag != tag {
-			d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
+			d.d.errorf("wrong extension tag. Got %b. Expecting: %v", xtag, tag)
 			return
 		}
 		xbs = d.r.readx(l)
@@ -514,7 +514,7 @@ func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs [
 		simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
 		xbs = d.DecodeBytes(nil, true)
 	default:
-		d.d.errorf("Invalid descriptor - expecting extensions/bytearray, got: 0x%x", d.bd)
+		d.d.errorf("ext - %s - expecting extensions/bytearray, got: 0x%x", msgBadDesc, d.bd)
 		return
 	}
 	d.bdRead = false
@@ -579,7 +579,7 @@ func (d *simpleDecDriver) DecodeNaked() {
 		n.v = valueTypeMap
 		decodeFurther = true
 	default:
-		d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
+		d.d.errorf("cannot infer value - %s 0x%x", msgBadDesc, d.bd)
 	}
 
 	if !decodeFurther {
@@ -616,7 +616,7 @@ type SimpleHandle struct {
 	// EncZeroValuesAsNil says to encode zero values for numbers, bool, string, etc as nil
 	EncZeroValuesAsNil bool
 
-	_ [1]uint64 // padding
+	// _ [1]uint64 // padding
 }
 
 // Name returns the name of the handle: simple
diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go
index 4f26b49b..a57771a1 100644
--- a/vendor/golang.org/x/crypto/ed25519/ed25519.go
+++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go
@@ -171,9 +171,16 @@ func Verify(publicKey PublicKey, message, sig []byte) bool {
 	edwards25519.ScReduce(&hReduced, &digest)
 
 	var R edwards25519.ProjectiveGroupElement
-	var b [32]byte
-	copy(b[:], sig[32:])
-	edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b)
+	var s [32]byte
+	copy(s[:], sig[32:])
+
+	// https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in
+	// the range [0, order) in order to prevent signature malleability.
+	if !edwards25519.ScMinimal(&s) {
+		return false
+	}
+
+	edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s)
 
 	var checkR [32]byte
 	R.ToBytes(&checkR)
diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
index 5f8b9947..fd03c252 100644
--- a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
+++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
@@ -4,6 +4,8 @@
 
 package edwards25519
 
+import "encoding/binary"
+
 // This code is a port of the public domain, “ref10” implementation of ed25519
 // from SUPERCOP.
 
@@ -1769,3 +1771,23 @@ func ScReduce(out *[32]byte, s *[64]byte) {
 	out[30] = byte(s11 >> 9)
 	out[31] = byte(s11 >> 17)
 }
+
+// order is the order of Curve25519 in little-endian form.
+var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000}
+
+// ScMinimal returns true if the given scalar is less than the order of the
+// curve.
+func ScMinimal(scalar *[32]byte) bool {
+	for i := 3; ; i-- {
+		v := binary.LittleEndian.Uint64(scalar[i*8:])
+		if v > order[i] {
+			return false
+		} else if v < order[i] {
+			break
+		} else if i == 0 {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go
index 7bc8e6c4..e0edc02f 100644
--- a/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go
+++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go
@@ -8,6 +8,10 @@
 
 package ripemd160
 
+import (
+	"math/bits"
+)
+
 // work buffer indices and roll amounts for one line
 var _n = [80]uint{
 	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
@@ -59,16 +63,16 @@ func _Block(md *digest, p []byte) int {
 		i := 0
 		for i < 16 {
 			alpha = a + (b ^ c ^ d) + x[_n[i]]
-			s := _r[i]
-			alpha = (alpha<<s | alpha>>(32-s)) + e
-			beta = c<<10 | c>>22
+			s := int(_r[i])
+			alpha = bits.RotateLeft32(alpha, s) + e
+			beta = bits.RotateLeft32(c, 10)
 			a, b, c, d, e = e, alpha, b, beta, d
 
 			// parallel line
 			alpha = aa + (bb ^ (cc | ^dd)) + x[n_[i]] + 0x50a28be6
-			s = r_[i]
-			alpha = (alpha<<s | alpha>>(32-s)) + ee
-			beta = cc<<10 | cc>>22
+			s = int(r_[i])
+			alpha = bits.RotateLeft32(alpha, s) + ee
+			beta = bits.RotateLeft32(cc, 10)
 			aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
 
 			i++
@@ -77,16 +81,16 @@ func _Block(md *digest, p []byte) int {
 		// round 2
 		for i < 32 {
 			alpha = a + (b&c | ^b&d) + x[_n[i]] + 0x5a827999
-			s := _r[i]
-			alpha = (alpha<<s | alpha>>(32-s)) + e
-			beta = c<<10 | c>>22
+			s := int(_r[i])
+			alpha = bits.RotateLeft32(alpha, s) + e
+			beta = bits.RotateLeft32(c, 10)
 			a, b, c, d, e = e, alpha, b, beta, d
 
 			// parallel line
 			alpha = aa + (bb&dd | cc&^dd) + x[n_[i]] + 0x5c4dd124
-			s = r_[i]
-			alpha = (alpha<<s | alpha>>(32-s)) + ee
-			beta = cc<<10 | cc>>22
+			s = int(r_[i])
+			alpha = bits.RotateLeft32(alpha, s) + ee
+			beta = bits.RotateLeft32(cc, 10)
 			aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
 
 			i++
@@ -95,16 +99,16 @@ func _Block(md *digest, p []byte) int {
 		// round 3
 		for i < 48 {
 			alpha = a + (b | ^c ^ d) + x[_n[i]] + 0x6ed9eba1
-			s := _r[i]
-			alpha = (alpha<<s | alpha>>(32-s)) + e
-			beta = c<<10 | c>>22
+			s := int(_r[i])
+			alpha = bits.RotateLeft32(alpha, s) + e
+			beta = bits.RotateLeft32(c, 10)
 			a, b, c, d, e = e, alpha, b, beta, d
 
 			// parallel line
 			alpha = aa + (bb | ^cc ^ dd) + x[n_[i]] + 0x6d703ef3
-			s = r_[i]
-			alpha = (alpha<<s | alpha>>(32-s)) + ee
-			beta = cc<<10 | cc>>22
+			s = int(r_[i])
+			alpha = bits.RotateLeft32(alpha, s) + ee
+			beta = bits.RotateLeft32(cc, 10)
 			aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
 
 			i++
@@ -113,16 +117,16 @@ func _Block(md *digest, p []byte) int {
 		// round 4
 		for i < 64 {
 			alpha = a + (b&d | c&^d) + x[_n[i]] + 0x8f1bbcdc
-			s := _r[i]
-			alpha = (alpha<<s | alpha>>(32-s)) + e
-			beta = c<<10 | c>>22
+			s := int(_r[i])
+			alpha = bits.RotateLeft32(alpha, s) + e
+			beta = bits.RotateLeft32(c, 10)
 			a, b, c, d, e = e, alpha, b, beta, d
 
 			// parallel line
 			alpha = aa + (bb&cc | ^bb&dd) + x[n_[i]] + 0x7a6d76e9
-			s = r_[i]
-			alpha = (alpha<<s | alpha>>(32-s)) + ee
-			beta = cc<<10 | cc>>22
+			s = int(r_[i])
+			alpha = bits.RotateLeft32(alpha, s) + ee
+			beta = bits.RotateLeft32(cc, 10)
 			aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
 
 			i++
@@ -131,16 +135,16 @@ func _Block(md *digest, p []byte) int {
 		// round 5
 		for i < 80 {
 			alpha = a + (b ^ (c | ^d)) + x[_n[i]] + 0xa953fd4e
-			s := _r[i]
-			alpha = (alpha<<s | alpha>>(32-s)) + e
-			beta = c<<10 | c>>22
+			s := int(_r[i])
+			alpha = bits.RotateLeft32(alpha, s) + e
+			beta = bits.RotateLeft32(c, 10)
 			a, b, c, d, e = e, alpha, b, beta, d
 
 			// parallel line
 			alpha = aa + (bb ^ cc ^ dd) + x[n_[i]]
-			s = r_[i]
-			alpha = (alpha<<s | alpha>>(32-s)) + ee
-			beta = cc<<10 | cc>>22
+			s = int(r_[i])
+			alpha = bits.RotateLeft32(alpha, s) + ee
+			beta = bits.RotateLeft32(cc, 10)
 			aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
 
 			i++
diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go
index 54726c2a..1565cf27 100644
--- a/vendor/golang.org/x/net/http2/hpack/encode.go
+++ b/vendor/golang.org/x/net/http2/hpack/encode.go
@@ -206,7 +206,7 @@ func appendVarInt(dst []byte, n byte, i uint64) []byte {
 }
 
 // appendHpackString appends s, as encoded in "String Literal"
-// representation, to dst and returns the the extended buffer.
+// representation, to dst and returns the extended buffer.
 //
 // s will be encoded in Huffman codes only when it produces strictly
 // shorter byte string.
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index d565f40e..71db28a8 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -312,7 +312,7 @@ func mustUint31(v int32) uint32 {
 }
 
 // bodyAllowedForStatus reports whether a given response status code
-// permits a body. See RFC 2616, section 4.4.
+// permits a body. See RFC 7230, section 3.3.
 func bodyAllowedForStatus(status int) bool {
 	switch {
 	case status >= 100 && status <= 199:
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 460ede03..39ed755a 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -406,7 +406,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
 			// addresses during development.
 			//
 			// TODO: optionally enforce? Or enforce at the time we receive
-			// a new request, and verify the the ServerName matches the :authority?
+			// a new request, and verify the ServerName matches the :authority?
 			// But that precludes proxy situations, perhaps.
 			//
 			// So for now, do nothing here again.
@@ -2285,7 +2285,7 @@ func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) !=
 func (rws *responseWriterState) declareTrailer(k string) {
 	k = http.CanonicalHeaderKey(k)
 	if !ValidTrailerHeader(k) {
-		// Forbidden by RFC 2616 14.40.
+		// Forbidden by RFC 7230, section 4.1.2.
 		rws.conn.logf("ignoring invalid trailer %q", k)
 		return
 	}
@@ -2406,7 +2406,7 @@ const TrailerPrefix = "Trailer:"
 // after the header has already been flushed. Because the Go
 // ResponseWriter interface has no way to set Trailers (only the
 // Header), and because we didn't want to expand the ResponseWriter
-// interface, and because nobody used trailers, and because RFC 2616
+// interface, and because nobody used trailers, and because RFC 7230
 // says you SHOULD (but not must) predeclare any trailers in the
 // header, the official ResponseWriter rules said trailers in Go must
 // be predeclared, and then we reuse the same ResponseWriter.Header()
@@ -2790,7 +2790,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) {
 }
 
 // foreachHeaderElement splits v according to the "#rule" construction
-// in RFC 2616 section 2.1 and calls fn for each non-empty element.
+// in RFC 7230 section 7 and calls fn for each non-empty element.
 func foreachHeaderElement(v string, fn func(string)) {
 	v = textproto.TrimString(v)
 	if v == "" {
diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go
index bb72a527..a46ee0ea 100644
--- a/vendor/golang.org/x/net/trace/trace.go
+++ b/vendor/golang.org/x/net/trace/trace.go
@@ -368,7 +368,11 @@ func New(family, title string) Trace {
 }
 
 func (tr *trace) Finish() {
-	tr.Elapsed = time.Now().Sub(tr.Start)
+	elapsed := time.Now().Sub(tr.Start)
+	tr.mu.Lock()
+	tr.Elapsed = elapsed
+	tr.mu.Unlock()
+
 	if DebugUseAfterFinish {
 		buf := make([]byte, 4<<10) // 4 KB should be enough
 		n := runtime.Stack(buf, false)
@@ -381,14 +385,17 @@ func (tr *trace) Finish() {
 	m.Remove(tr)
 
 	f := getFamily(tr.Family, true)
+	tr.mu.RLock() // protects tr fields in Cond.match calls
 	for _, b := range f.Buckets {
 		if b.Cond.match(tr) {
 			b.Add(tr)
 		}
 	}
+	tr.mu.RUnlock()
+
 	// Add a sample of elapsed time as microseconds to the family's timeseries
 	h := new(histogram)
-	h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3)
+	h.addMeasurement(elapsed.Nanoseconds() / 1e3)
 	f.LatencyMu.Lock()
 	f.Latency.Add(h)
 	f.LatencyMu.Unlock()
@@ -684,25 +691,20 @@ type trace struct {
 	// Title is the title of this trace.
 	Title string
 
-	// Timing information.
-	Start   time.Time
-	Elapsed time.Duration // zero while active
-
-	// Trace information if non-zero.
-	traceID uint64
-	spanID  uint64
-
-	// Whether this trace resulted in an error.
-	IsError bool
+	// Start time of the this trace.
+	Start time.Time
 
-	// Append-only sequence of events (modulo discards).
 	mu        sync.RWMutex
-	events    []event
+	events    []event // Append-only sequence of events (modulo discards).
 	maxEvents int
+	recycler  func(interface{})
+	IsError   bool          // Whether this trace resulted in an error.
+	Elapsed   time.Duration // Elapsed time for this trace, zero while active.
+	traceID   uint64        // Trace information if non-zero.
+	spanID    uint64
 
-	refs     int32 // how many buckets this is in
-	recycler func(interface{})
-	disc     discarded // scratch space to avoid allocation
+	refs int32     // how many buckets this is in
+	disc discarded // scratch space to avoid allocation
 
 	finishStack []byte // where finish was called, if DebugUseAfterFinish is set
 
@@ -714,14 +716,18 @@ func (tr *trace) reset() {
 	tr.Family = ""
 	tr.Title = ""
 	tr.Start = time.Time{}
+
+	tr.mu.Lock()
 	tr.Elapsed = 0
 	tr.traceID = 0
 	tr.spanID = 0
 	tr.IsError = false
 	tr.maxEvents = 0
 	tr.events = nil
-	tr.refs = 0
 	tr.recycler = nil
+	tr.mu.Unlock()
+
+	tr.refs = 0
 	tr.disc = 0
 	tr.finishStack = nil
 	for i := range tr.eventsBuf {
@@ -801,21 +807,31 @@ func (tr *trace) LazyPrintf(format string, a ...interface{}) {
 	tr.addEvent(&lazySprintf{format, a}, false, false)
 }
 
-func (tr *trace) SetError() { tr.IsError = true }
+func (tr *trace) SetError() {
+	tr.mu.Lock()
+	tr.IsError = true
+	tr.mu.Unlock()
+}
 
 func (tr *trace) SetRecycler(f func(interface{})) {
+	tr.mu.Lock()
 	tr.recycler = f
+	tr.mu.Unlock()
 }
 
 func (tr *trace) SetTraceInfo(traceID, spanID uint64) {
+	tr.mu.Lock()
 	tr.traceID, tr.spanID = traceID, spanID
+	tr.mu.Unlock()
 }
 
 func (tr *trace) SetMaxEvents(m int) {
+	tr.mu.Lock()
 	// Always keep at least three events: first, discarded count, last.
 	if len(tr.events) == 0 && m > 3 {
 		tr.maxEvents = m
 	}
+	tr.mu.Unlock()
 }
 
 func (tr *trace) ref() {
@@ -824,6 +840,7 @@ func (tr *trace) ref() {
 
 func (tr *trace) unref() {
 	if atomic.AddInt32(&tr.refs, -1) == 0 {
+		tr.mu.RLock()
 		if tr.recycler != nil {
 			// freeTrace clears tr, so we hold tr.recycler and tr.events here.
 			go func(f func(interface{}), es []event) {
@@ -834,6 +851,7 @@ func (tr *trace) unref() {
 				}
 			}(tr.recycler, tr.events)
 		}
+		tr.mu.RUnlock()
 
 		freeTrace(tr)
 	}
@@ -844,7 +862,10 @@ func (tr *trace) When() string {
 }
 
 func (tr *trace) ElapsedTime() string {
+	tr.mu.RLock()
 	t := tr.Elapsed
+	tr.mu.RUnlock()
+
 	if t == 0 {
 		// Active trace.
 		t = time.Since(tr.Start)
diff --git a/vendor/golang.org/x/sys/unix/flock.go b/vendor/golang.org/x/sys/unix/fcntl.go
similarity index 74%
rename from vendor/golang.org/x/sys/unix/flock.go
rename to vendor/golang.org/x/sys/unix/fcntl.go
index 2994ce75..0c58c7e1 100644
--- a/vendor/golang.org/x/sys/unix/flock.go
+++ b/vendor/golang.org/x/sys/unix/fcntl.go
@@ -12,6 +12,12 @@ import "unsafe"
 // systems by flock_linux_32bit.go to be SYS_FCNTL64.
 var fcntl64Syscall uintptr = SYS_FCNTL
 
+// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+	valptr, _, err := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(arg))
+	return int(valptr), err
+}
+
 // FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
 func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
 	_, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk)))
diff --git a/vendor/golang.org/x/sys/unix/flock_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
similarity index 100%
rename from vendor/golang.org/x/sys/unix/flock_linux_32bit.go
rename to vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go
index 857d2a42..ef35fce8 100644
--- a/vendor/golang.org/x/sys/unix/syscall.go
+++ b/vendor/golang.org/x/sys/unix/syscall.go
@@ -11,24 +11,27 @@
 // system, set $GOOS and $GOARCH to the desired system. For example, if
 // you want to view documentation for freebsd/arm on linux/amd64, set $GOOS
 // to freebsd and $GOARCH to arm.
+//
 // The primary use of this package is inside other packages that provide a more
 // portable interface to the system, such as "os", "time" and "net".  Use
 // those packages rather than this one if you can.
+//
 // For details of the functions and data types in this package consult
 // the manuals for the appropriate operating system.
+//
 // These calls return err == nil to indicate success; otherwise
 // err represents an operating system error describing the failure and
 // holds a value of type syscall.Errno.
 package unix // import "golang.org/x/sys/unix"
 
+import "strings"
+
 // ByteSliceFromString returns a NUL-terminated slice of bytes
 // containing the text of s. If s contains a NUL byte at any
 // location, it returns (nil, EINVAL).
 func ByteSliceFromString(s string) ([]byte, error) {
-	for i := 0; i < len(s); i++ {
-		if s[i] == 0 {
-			return nil, EINVAL
-		}
+	if strings.IndexByte(s, 0) != -1 {
+		return nil, EINVAL
 	}
 	a := make([]byte, len(s)+1)
 	copy(a, s)
diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go
index d3903ede..53fb8518 100644
--- a/vendor/golang.org/x/sys/unix/syscall_bsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go
@@ -311,47 +311,6 @@ func Getsockname(fd int) (sa Sockaddr, err error) {
 
 //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
 
-func GetsockoptByte(fd, level, opt int) (value byte, err error) {
-	var n byte
-	vallen := _Socklen(1)
-	err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen)
-	return n, err
-}
-
-func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) {
-	vallen := _Socklen(4)
-	err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
-	return value, err
-}
-
-func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) {
-	var value IPMreq
-	vallen := _Socklen(SizeofIPMreq)
-	err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
-	return &value, err
-}
-
-func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) {
-	var value IPv6Mreq
-	vallen := _Socklen(SizeofIPv6Mreq)
-	err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
-	return &value, err
-}
-
-func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) {
-	var value IPv6MTUInfo
-	vallen := _Socklen(SizeofIPv6MTUInfo)
-	err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
-	return &value, err
-}
-
-func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) {
-	var value ICMPv6Filter
-	vallen := _Socklen(SizeofICMPv6Filter)
-	err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
-	return &value, err
-}
-
 // GetsockoptString returns the string value of the socket option opt for the
 // socket associated with fd at the given socket level.
 func GetsockoptString(fd, level, opt int) (string, error) {
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index b9598694..006e21f5 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -330,6 +330,7 @@ func Uname(uname *Utsname) error {
 //sys	Flock(fd int, how int) (err error)
 //sys	Fpathconf(fd int, name int) (val int, err error)
 //sys	Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64
+//sys	Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64
 //sys	Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64
 //sys	Fsync(fd int) (err error)
 //sys	Ftruncate(fd int, length int64) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
index 777860bf..b5072de2 100644
--- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
+++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
@@ -251,10 +251,12 @@ func Uname(uname *Utsname) error {
 //sys	Fchdir(fd int) (err error)
 //sys	Fchflags(fd int, flags int) (err error)
 //sys	Fchmod(fd int, mode uint32) (err error)
+//sys	Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
 //sys	Fchown(fd int, uid int, gid int) (err error)
 //sys	Flock(fd int, how int) (err error)
 //sys	Fpathconf(fd int, name int) (val int, err error)
 //sys	Fstat(fd int, stat *Stat_t) (err error)
+//sys	Fstatat(fd int, path string, stat *Stat_t, flags int) (err error)
 //sys	Fstatfs(fd int, stat *Statfs_t) (err error)
 //sys	Fsync(fd int) (err error)
 //sys	Ftruncate(fd int, length int64) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go
index 89f2c3fc..ba9df4ac 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go
@@ -12,7 +12,10 @@
 
 package unix
 
-import "unsafe"
+import (
+	"strings"
+	"unsafe"
+)
 
 // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets.
 type SockaddrDatalink struct {
@@ -134,14 +137,7 @@ func setattrlistTimes(path string, times []Timespec, flags int) error {
 // Derive extattr namespace and attribute name
 
 func xattrnamespace(fullattr string) (ns int, attr string, err error) {
-	s := -1
-	for idx, val := range fullattr {
-		if val == '.' {
-			s = idx
-			break
-		}
-	}
-
+	s := strings.IndexByte(fullattr, '.')
 	if s == -1 {
 		return -1, "", ENOATTR
 	}
@@ -482,6 +478,7 @@ func Uname(uname *Utsname) error {
 //sys	Flock(fd int, how int) (err error)
 //sys	Fpathconf(fd int, name int) (val int, err error)
 //sys	Fstat(fd int, stat *Stat_t) (err error)
+//sys	Fstatat(fd int, path string, stat *Stat_t, flags int) (err error)
 //sys	Fstatfs(fd int, stat *Statfs_t) (err error)
 //sys	Fsync(fd int) (err error)
 //sys	Ftruncate(fd int, length int64) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 76cf81f5..a24ba5fb 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -782,19 +782,6 @@ func Getsockname(fd int) (sa Sockaddr, err error) {
 	return anyToSockaddr(&rsa)
 }
 
-func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) {
-	vallen := _Socklen(4)
-	err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
-	return value, err
-}
-
-func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) {
-	var value IPMreq
-	vallen := _Socklen(SizeofIPMreq)
-	err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
-	return &value, err
-}
-
 func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) {
 	var value IPMreqn
 	vallen := _Socklen(SizeofIPMreqn)
@@ -802,27 +789,6 @@ func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) {
 	return &value, err
 }
 
-func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) {
-	var value IPv6Mreq
-	vallen := _Socklen(SizeofIPv6Mreq)
-	err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
-	return &value, err
-}
-
-func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) {
-	var value IPv6MTUInfo
-	vallen := _Socklen(SizeofIPv6MTUInfo)
-	err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
-	return &value, err
-}
-
-func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) {
-	var value ICMPv6Filter
-	vallen := _Socklen(SizeofICMPv6Filter)
-	err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
-	return &value, err
-}
-
 func GetsockoptUcred(fd, level, opt int) (*Ucred, error) {
 	var value Ucred
 	vallen := _Socklen(SizeofUcred)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
index 9a8e6e41..a1e8a609 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
@@ -7,6 +7,7 @@
 package unix
 
 //sys	EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT
+//sys	Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
 //sys	Fchown(fd int, uid int, gid int) (err error)
 //sys	Fstat(fd int, stat *Stat_t) (err error)
 //sys	Fstatat(fd int, path string, stat *Stat_t, flags int) (err error)
@@ -23,8 +24,11 @@ package unix
 //sys	Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
 
 func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
-	ts := Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
-	return Pselect(nfd, r, w, e, &ts, nil)
+	var ts *Timespec
+	if timeout != nil {
+		ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
+	}
+	return Pselect(nfd, r, w, e, ts, nil)
 }
 
 //sys	sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo.go
new file mode 100644
index 00000000..df9c1237
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo.go
@@ -0,0 +1,21 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build gccgo
+// +build 386 arm
+
+package unix
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno) {
+	offsetLow := uint32(offset & 0xffffffff)
+	offsetHigh := uint32((offset >> 32) & 0xffffffff)
+	_, _, err = Syscall6(SYS__LLSEEK, uintptr(fd), uintptr(offsetHigh), uintptr(offsetLow), uintptr(unsafe.Pointer(&newoffset)), uintptr(whence), 0)
+	return newoffset, err
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
index 46aa4ff9..090ed404 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
@@ -9,6 +9,7 @@ package unix
 
 //sys	Dup2(oldfd int, newfd int) (err error)
 //sys	EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
+//sys	Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
 //sys	Fchown(fd int, uid int, gid int) (err error)
 //sys	Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_NEWFSTATAT
 //sys	Fstatfs(fd int, buf *Statfs_t) (err error)
@@ -26,8 +27,11 @@ package unix
 //sys	Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
 
 func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
-	ts := Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
-	return Pselect(nfd, r, w, e, &ts, nil)
+	var ts *Timespec
+	if timeout != nil {
+		ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
+	}
+	return Pselect(nfd, r, w, e, ts, nil)
 }
 
 //sys	sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
index 40b8e4f0..3d5817f6 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
@@ -15,6 +15,7 @@ import (
 func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
 
 //sys	Dup2(oldfd int, newfd int) (err error)
+//sys	Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
 //sys	Fchown(fd int, uid int, gid int) (err error)
 //sys	Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64
 //sysnb	Getegid() (egid int)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
index 17c9116e..6fb8733d 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
@@ -9,6 +9,7 @@ package unix
 
 //sys	EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
 //sys	Dup2(oldfd int, newfd int) (err error)
+//sys	Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
 //sys	Fchown(fd int, uid int, gid int) (err error)
 //sys	Fstat(fd int, stat *Stat_t) (err error)
 //sys	Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_NEWFSTATAT
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
index a00f9927..78c1e0df 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
@@ -7,6 +7,7 @@
 package unix
 
 //sys	EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
+//sys	Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
 //sys	Dup2(oldfd int, newfd int) (err error)
 //sys	Fchown(fd int, uid int, gid int) (err error)
 //sys	Fstat(fd int, stat *Stat_t) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
index 71b70783..e1a3baa2 100644
--- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
@@ -233,13 +233,16 @@ func Uname(uname *Utsname) error {
 //sys	Dup(fd int) (nfd int, err error)
 //sys	Dup2(from int, to int) (err error)
 //sys	Exit(code int)
+//sys	Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_POSIX_FADVISE
 //sys	Fchdir(fd int) (err error)
 //sys	Fchflags(fd int, flags int) (err error)
 //sys	Fchmod(fd int, mode uint32) (err error)
+//sys	Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
 //sys	Fchown(fd int, uid int, gid int) (err error)
 //sys	Flock(fd int, how int) (err error)
 //sys	Fpathconf(fd int, name int) (val int, err error)
 //sys	Fstat(fd int, stat *Stat_t) (err error)
+//sys	Fstatat(fd int, path string, stat *Stat_t, flags int) (err error)
 //sys	Fsync(fd int) (err error)
 //sys	Ftruncate(fd int, length int64) (err error)
 //sysnb	Getegid() (egid int)
@@ -320,7 +323,6 @@ func Uname(uname *Utsname) error {
 // __msync13
 // __ntp_gettime30
 // __posix_chown
-// __posix_fadvise50
 // __posix_fchown
 // __posix_lchown
 // __posix_rename
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go
index 37556e77..387e1cfc 100644
--- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go
@@ -204,10 +204,12 @@ func Uname(uname *Utsname) error {
 //sys	Fchdir(fd int) (err error)
 //sys	Fchflags(fd int, flags int) (err error)
 //sys	Fchmod(fd int, mode uint32) (err error)
+//sys	Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
 //sys	Fchown(fd int, uid int, gid int) (err error)
 //sys	Flock(fd int, how int) (err error)
 //sys	Fpathconf(fd int, name int) (val int, err error)
 //sys	Fstat(fd int, stat *Stat_t) (err error)
+//sys	Fstatat(fd int, path string, stat *Stat_t, flags int) (err error)
 //sys	Fstatfs(fd int, stat *Statfs_t) (err error)
 //sys	Fsync(fd int) (err error)
 //sys	Ftruncate(fd int, length int64) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
index eca8d1d0..b7629529 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -312,6 +312,12 @@ func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error {
 
 //sys	fcntl(fd int, cmd int, arg int) (val int, err error)
 
+// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+	valptr, _, err := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0)
+	return int(valptr), err
+}
+
 // FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
 func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(lk)), 0, 0, 0)
@@ -595,9 +601,10 @@ func Poll(fds []PollFd, timeout int) (n int, err error) {
 //sys	Fchown(fd int, uid int, gid int) (err error)
 //sys	Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error)
 //sys	Fdatasync(fd int) (err error)
-//sys Flock(fd int, how int) (err error)
+//sys	Flock(fd int, how int) (err error)
 //sys	Fpathconf(fd int, name int) (val int, err error)
 //sys	Fstat(fd int, stat *Stat_t) (err error)
+//sys	Fstatat(fd int, path string, stat *Stat_t, flags int) (err error)
 //sys	Fstatvfs(fd int, vfsstat *Statvfs_t) (err error)
 //sys	Getdents(fd int, buf []byte, basep *uintptr) (n int, err error)
 //sysnb	Getgid() (gid int)
@@ -675,6 +682,7 @@ func Poll(fds []PollFd, timeout int) (n int, err error) {
 //sys	connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.__xnet_connect
 //sys	mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
 //sys	munmap(addr uintptr, length uintptr) (err error)
+//sys	sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = libsendfile.sendfile
 //sys	sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.__xnet_sendto
 //sys	socket(domain int, typ int, proto int) (fd int, err error) = libsocket.__xnet_socket
 //sysnb	socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) = libsocket.__xnet_socketpair
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go
index 9d4e7a67..91c32ddf 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go
@@ -21,8 +21,3 @@ func (iov *Iovec) SetLen(length int) {
 func (cmsg *Cmsghdr) SetLen(length int) {
 	cmsg.Len = uint32(length)
 }
-
-func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
-	// TODO(aram): implement this, see issue 5847.
-	panic("unimplemented")
-}
diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go
index cd8f3a9c..262dc520 100644
--- a/vendor/golang.org/x/sys/unix/syscall_unix.go
+++ b/vendor/golang.org/x/sys/unix/syscall_unix.go
@@ -7,6 +7,7 @@
 package unix
 
 import (
+	"bytes"
 	"runtime"
 	"sync"
 	"syscall"
@@ -52,12 +53,11 @@ func errnoErr(e syscall.Errno) error {
 
 // clen returns the index of the first NULL byte in n or len(n) if n contains no NULL byte.
 func clen(n []byte) int {
-	for i := 0; i < len(n); i++ {
-		if n[i] == 0 {
-			return i
-		}
+	i := bytes.IndexByte(n, 0)
+	if i == -1 {
+		i = len(n)
 	}
-	return len(n)
+	return i
 }
 
 // Mmap manager, for use by operating system-specific implementations.
@@ -199,6 +199,13 @@ func Getpeername(fd int) (sa Sockaddr, err error) {
 	return anyToSockaddr(&rsa)
 }
 
+func GetsockoptByte(fd, level, opt int) (value byte, err error) {
+	var n byte
+	vallen := _Socklen(1)
+	err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen)
+	return n, err
+}
+
 func GetsockoptInt(fd, level, opt int) (value int, err error) {
 	var n int32
 	vallen := _Socklen(4)
@@ -206,6 +213,54 @@ func GetsockoptInt(fd, level, opt int) (value int, err error) {
 	return int(n), err
 }
 
+func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) {
+	vallen := _Socklen(4)
+	err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
+	return value, err
+}
+
+func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) {
+	var value IPMreq
+	vallen := _Socklen(SizeofIPMreq)
+	err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+	return &value, err
+}
+
+func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) {
+	var value IPv6Mreq
+	vallen := _Socklen(SizeofIPv6Mreq)
+	err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+	return &value, err
+}
+
+func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) {
+	var value IPv6MTUInfo
+	vallen := _Socklen(SizeofIPv6MTUInfo)
+	err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+	return &value, err
+}
+
+func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) {
+	var value ICMPv6Filter
+	vallen := _Socklen(SizeofICMPv6Filter)
+	err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+	return &value, err
+}
+
+func GetsockoptLinger(fd, level, opt int) (*Linger, error) {
+	var linger Linger
+	vallen := _Socklen(SizeofLinger)
+	err := getsockopt(fd, level, opt, unsafe.Pointer(&linger), &vallen)
+	return &linger, err
+}
+
+func GetsockoptTimeval(fd, level, opt int) (*Timeval, error) {
+	var tv Timeval
+	vallen := _Socklen(unsafe.Sizeof(tv))
+	err := getsockopt(fd, level, opt, unsafe.Pointer(&tv), &vallen)
+	return &tv, err
+}
+
 func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) {
 	var rsa RawSockaddrAny
 	var len _Socklen = SizeofSockaddrAny
@@ -305,3 +360,12 @@ func SetNonblock(fd int, nonblocking bool) (err error) {
 	_, err = fcntl(fd, F_SETFL, flag)
 	return err
 }
+
+// Exec calls execve(2), which replaces the calling executable in the process
+// tree. argv0 should be the full path to an executable ("/bin/ls") and the
+// executable name should also be the first argument in argv (["ls", "-l"]).
+// envv are the environment variables that should be passed to the new
+// process (["USER=go", "PWD=/tmp"]).
+func Exec(argv0 string, argv []string, envv []string) error {
+	return syscall.Exec(argv0, argv, envv)
+}
diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go
index 10aa9b3a..1494aafc 100644
--- a/vendor/golang.org/x/sys/unix/types_netbsd.go
+++ b/vendor/golang.org/x/sys/unix/types_netbsd.go
@@ -118,6 +118,17 @@ const (
 	PathMax = C.PATH_MAX
 )
 
+// Advice to Fadvise
+
+const (
+	FADV_NORMAL     = C.POSIX_FADV_NORMAL
+	FADV_RANDOM     = C.POSIX_FADV_RANDOM
+	FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL
+	FADV_WILLNEED   = C.POSIX_FADV_WILLNEED
+	FADV_DONTNEED   = C.POSIX_FADV_DONTNEED
+	FADV_NOREUSE    = C.POSIX_FADV_NOREUSE
+)
+
 // Sockets
 
 type RawSockaddrInet4 C.struct_sockaddr_in
diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go
index d9601550..474441b8 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go
@@ -980,7 +980,10 @@ const (
 	RLIMIT_CPU                        = 0x0
 	RLIMIT_DATA                       = 0x2
 	RLIMIT_FSIZE                      = 0x1
+	RLIMIT_MEMLOCK                    = 0x6
 	RLIMIT_NOFILE                     = 0x8
+	RLIMIT_NPROC                      = 0x7
+	RLIMIT_RSS                        = 0x5
 	RLIMIT_STACK                      = 0x3
 	RLIM_INFINITY                     = 0x7fffffffffffffff
 	RTAX_AUTHOR                       = 0x6
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index 4fba476e..68de61b8 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -121,6 +121,7 @@ const (
 	ARPHRD_PPP                           = 0x200
 	ARPHRD_PRONET                        = 0x4
 	ARPHRD_RAWHDLC                       = 0x206
+	ARPHRD_RAWIP                         = 0x207
 	ARPHRD_ROSE                          = 0x10e
 	ARPHRD_RSRVD                         = 0x104
 	ARPHRD_SIT                           = 0x308
@@ -390,6 +391,8 @@ const (
 	ETH_P_DSA                            = 0x1b
 	ETH_P_ECONET                         = 0x18
 	ETH_P_EDSA                           = 0xdada
+	ETH_P_ERSPAN                         = 0x88be
+	ETH_P_ERSPAN2                        = 0x22eb
 	ETH_P_FCOE                           = 0x8906
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
@@ -398,6 +401,7 @@ const (
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
+	ETH_P_IFE                            = 0xed3e
 	ETH_P_IP                             = 0x800
 	ETH_P_IPV6                           = 0x86dd
 	ETH_P_IPX                            = 0x8137
@@ -408,11 +412,13 @@ const (
 	ETH_P_LOOP                           = 0x60
 	ETH_P_LOOPBACK                       = 0x9000
 	ETH_P_MACSEC                         = 0x88e5
+	ETH_P_MAP                            = 0xf9
 	ETH_P_MOBITEX                        = 0x15
 	ETH_P_MPLS_MC                        = 0x8848
 	ETH_P_MPLS_UC                        = 0x8847
 	ETH_P_MVRP                           = 0x88f5
 	ETH_P_NCSI                           = 0x88f8
+	ETH_P_NSH                            = 0x894f
 	ETH_P_PAE                            = 0x888e
 	ETH_P_PAUSE                          = 0x8808
 	ETH_P_PHONET                         = 0xf5
@@ -476,6 +482,7 @@ const (
 	FS_POLICY_FLAGS_PAD_8                = 0x1
 	FS_POLICY_FLAGS_PAD_MASK             = 0x3
 	FS_POLICY_FLAGS_VALID                = 0x3
+	F_ADD_SEALS                          = 0x409
 	F_DUPFD                              = 0x0
 	F_DUPFD_CLOEXEC                      = 0x406
 	F_EXLCK                              = 0x4
@@ -488,6 +495,9 @@ const (
 	F_GETOWN_EX                          = 0x10
 	F_GETPIPE_SZ                         = 0x408
 	F_GETSIG                             = 0xb
+	F_GET_FILE_RW_HINT                   = 0x40d
+	F_GET_RW_HINT                        = 0x40b
+	F_GET_SEALS                          = 0x40a
 	F_LOCK                               = 0x1
 	F_NOTIFY                             = 0x402
 	F_OFD_GETLK                          = 0x24
@@ -495,6 +505,10 @@ const (
 	F_OFD_SETLKW                         = 0x26
 	F_OK                                 = 0x0
 	F_RDLCK                              = 0x0
+	F_SEAL_GROW                          = 0x4
+	F_SEAL_SEAL                          = 0x1
+	F_SEAL_SHRINK                        = 0x2
+	F_SEAL_WRITE                         = 0x8
 	F_SETFD                              = 0x2
 	F_SETFL                              = 0x4
 	F_SETLEASE                           = 0x400
@@ -506,6 +520,8 @@ const (
 	F_SETOWN_EX                          = 0xf
 	F_SETPIPE_SZ                         = 0x407
 	F_SETSIG                             = 0xa
+	F_SET_FILE_RW_HINT                   = 0x40e
+	F_SET_RW_HINT                        = 0x40c
 	F_SHLCK                              = 0x8
 	F_TEST                               = 0x3
 	F_TLOCK                              = 0x2
@@ -561,6 +577,8 @@ const (
 	IFF_MASTER                           = 0x400
 	IFF_MULTICAST                        = 0x1000
 	IFF_MULTI_QUEUE                      = 0x100
+	IFF_NAPI                             = 0x10
+	IFF_NAPI_FRAGS                       = 0x20
 	IFF_NOARP                            = 0x80
 	IFF_NOFILTER                         = 0x1000
 	IFF_NOTRAILERS                       = 0x20
@@ -845,6 +863,7 @@ const (
 	MADV_FREE                            = 0x8
 	MADV_HUGEPAGE                        = 0xe
 	MADV_HWPOISON                        = 0x64
+	MADV_KEEPONFORK                      = 0x13
 	MADV_MERGEABLE                       = 0xc
 	MADV_NOHUGEPAGE                      = 0xf
 	MADV_NORMAL                          = 0x0
@@ -853,6 +872,7 @@ const (
 	MADV_SEQUENTIAL                      = 0x2
 	MADV_UNMERGEABLE                     = 0xd
 	MADV_WILLNEED                        = 0x3
+	MADV_WIPEONFORK                      = 0x12
 	MAP_32BIT                            = 0x40
 	MAP_ANON                             = 0x20
 	MAP_ANONYMOUS                        = 0x20
@@ -899,6 +919,7 @@ const (
 	MSG_TRYHARD                          = 0x4
 	MSG_WAITALL                          = 0x100
 	MSG_WAITFORONE                       = 0x10000
+	MSG_ZEROCOPY                         = 0x4000000
 	MS_ACTIVE                            = 0x40000000
 	MS_ASYNC                             = 0x1
 	MS_BIND                              = 0x1000
@@ -998,6 +1019,7 @@ const (
 	NLM_F_EXCL                           = 0x200
 	NLM_F_MATCH                          = 0x200
 	NLM_F_MULTI                          = 0x2
+	NLM_F_NONREC                         = 0x100
 	NLM_F_REPLACE                        = 0x100
 	NLM_F_REQUEST                        = 0x1
 	NLM_F_ROOT                           = 0x100
@@ -1096,6 +1118,7 @@ const (
 	PERF_EVENT_IOC_ID                    = 0x80042407
 	PERF_EVENT_IOC_PAUSE_OUTPUT          = 0x40042409
 	PERF_EVENT_IOC_PERIOD                = 0x40082404
+	PERF_EVENT_IOC_QUERY_BPF             = 0xc004240a
 	PERF_EVENT_IOC_REFRESH               = 0x2402
 	PERF_EVENT_IOC_RESET                 = 0x2403
 	PERF_EVENT_IOC_SET_BPF               = 0x40042408
@@ -1196,6 +1219,11 @@ const (
 	PR_SET_TIMING                        = 0xe
 	PR_SET_TSC                           = 0x1a
 	PR_SET_UNALIGN                       = 0x6
+	PR_SVE_GET_VL                        = 0x33
+	PR_SVE_SET_VL                        = 0x32
+	PR_SVE_SET_VL_ONEXEC                 = 0x40000
+	PR_SVE_VL_INHERIT                    = 0x20000
+	PR_SVE_VL_LEN_MASK                   = 0xffff
 	PR_TASK_PERF_EVENTS_DISABLE          = 0x1f
 	PR_TASK_PERF_EVENTS_ENABLE           = 0x20
 	PR_TIMING_STATISTICAL                = 0x0
@@ -1247,6 +1275,7 @@ const (
 	PTRACE_POKETEXT                      = 0x4
 	PTRACE_POKEUSR                       = 0x6
 	PTRACE_SECCOMP_GET_FILTER            = 0x420c
+	PTRACE_SECCOMP_GET_METADATA          = 0x420d
 	PTRACE_SEIZE                         = 0x4206
 	PTRACE_SETFPREGS                     = 0xf
 	PTRACE_SETFPXREGS                    = 0x13
@@ -1282,6 +1311,7 @@ const (
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
+	RTAX_FASTOPEN_NO_COOKIE              = 0x11
 	RTAX_FEATURES                        = 0xc
 	RTAX_FEATURE_ALLFRAG                 = 0x8
 	RTAX_FEATURE_ECN                     = 0x1
@@ -1292,7 +1322,7 @@ const (
 	RTAX_INITCWND                        = 0xb
 	RTAX_INITRWND                        = 0xe
 	RTAX_LOCK                            = 0x1
-	RTAX_MAX                             = 0x10
+	RTAX_MAX                             = 0x11
 	RTAX_MTU                             = 0x2
 	RTAX_QUICKACK                        = 0xf
 	RTAX_REORDERING                      = 0x9
@@ -1566,6 +1596,7 @@ const (
 	SOL_SOCKET                           = 0x1
 	SOL_TCP                              = 0x6
 	SOL_TIPC                             = 0x10f
+	SOL_TLS                              = 0x11a
 	SOL_X25                              = 0x106
 	SOMAXCONN                            = 0x80
 	SO_ACCEPTCONN                        = 0x1e
@@ -1634,6 +1665,7 @@ const (
 	SO_VM_SOCKETS_PEER_HOST_VM_ID        = 0x3
 	SO_VM_SOCKETS_TRUSTED                = 0x5
 	SO_WIFI_STATUS                       = 0x29
+	SO_ZEROCOPY                          = 0x3c
 	SPLICE_F_GIFT                        = 0x8
 	SPLICE_F_MORE                        = 0x4
 	SPLICE_F_MOVE                        = 0x1
@@ -1730,6 +1762,8 @@ const (
 	TCP_MAXWIN                           = 0xffff
 	TCP_MAX_WINSHIFT                     = 0xe
 	TCP_MD5SIG                           = 0xe
+	TCP_MD5SIG_EXT                       = 0x20
+	TCP_MD5SIG_FLAG_PREFIX               = 0x1
 	TCP_MD5SIG_MAXKEYLEN                 = 0x50
 	TCP_MSS                              = 0x200
 	TCP_MSS_DEFAULT                      = 0x218
@@ -1750,6 +1784,7 @@ const (
 	TCP_THIN_DUPACK                      = 0x11
 	TCP_THIN_LINEAR_TIMEOUTS             = 0x10
 	TCP_TIMESTAMP                        = 0x18
+	TCP_ULP                              = 0x1f
 	TCP_USER_TIMEOUT                     = 0x12
 	TCP_WINDOW_CLAMP                     = 0xa
 	TCSAFLUSH                            = 0x2
@@ -1849,6 +1884,7 @@ const (
 	TUNGETVNETHDRSZ                      = 0x800454d7
 	TUNGETVNETLE                         = 0x800454dd
 	TUNSETDEBUG                          = 0x400454c9
+	TUNSETFILTEREBPF                     = 0x800454e1
 	TUNSETGROUP                          = 0x400454ce
 	TUNSETIFF                            = 0x400454ca
 	TUNSETIFINDEX                        = 0x400454da
@@ -1859,6 +1895,7 @@ const (
 	TUNSETPERSIST                        = 0x400454cb
 	TUNSETQUEUE                          = 0x400454d9
 	TUNSETSNDBUF                         = 0x400454d4
+	TUNSETSTEERINGEBPF                   = 0x800454e0
 	TUNSETTXFILTER                       = 0x400454d1
 	TUNSETVNETBE                         = 0x400454de
 	TUNSETVNETHDRSZ                      = 0x400454d8
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index 7e2a108d..a5748ae9 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -121,6 +121,7 @@ const (
 	ARPHRD_PPP                           = 0x200
 	ARPHRD_PRONET                        = 0x4
 	ARPHRD_RAWHDLC                       = 0x206
+	ARPHRD_RAWIP                         = 0x207
 	ARPHRD_ROSE                          = 0x10e
 	ARPHRD_RSRVD                         = 0x104
 	ARPHRD_SIT                           = 0x308
@@ -390,6 +391,8 @@ const (
 	ETH_P_DSA                            = 0x1b
 	ETH_P_ECONET                         = 0x18
 	ETH_P_EDSA                           = 0xdada
+	ETH_P_ERSPAN                         = 0x88be
+	ETH_P_ERSPAN2                        = 0x22eb
 	ETH_P_FCOE                           = 0x8906
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
@@ -398,6 +401,7 @@ const (
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
+	ETH_P_IFE                            = 0xed3e
 	ETH_P_IP                             = 0x800
 	ETH_P_IPV6                           = 0x86dd
 	ETH_P_IPX                            = 0x8137
@@ -408,11 +412,13 @@ const (
 	ETH_P_LOOP                           = 0x60
 	ETH_P_LOOPBACK                       = 0x9000
 	ETH_P_MACSEC                         = 0x88e5
+	ETH_P_MAP                            = 0xf9
 	ETH_P_MOBITEX                        = 0x15
 	ETH_P_MPLS_MC                        = 0x8848
 	ETH_P_MPLS_UC                        = 0x8847
 	ETH_P_MVRP                           = 0x88f5
 	ETH_P_NCSI                           = 0x88f8
+	ETH_P_NSH                            = 0x894f
 	ETH_P_PAE                            = 0x888e
 	ETH_P_PAUSE                          = 0x8808
 	ETH_P_PHONET                         = 0xf5
@@ -476,6 +482,7 @@ const (
 	FS_POLICY_FLAGS_PAD_8                = 0x1
 	FS_POLICY_FLAGS_PAD_MASK             = 0x3
 	FS_POLICY_FLAGS_VALID                = 0x3
+	F_ADD_SEALS                          = 0x409
 	F_DUPFD                              = 0x0
 	F_DUPFD_CLOEXEC                      = 0x406
 	F_EXLCK                              = 0x4
@@ -488,6 +495,9 @@ const (
 	F_GETOWN_EX                          = 0x10
 	F_GETPIPE_SZ                         = 0x408
 	F_GETSIG                             = 0xb
+	F_GET_FILE_RW_HINT                   = 0x40d
+	F_GET_RW_HINT                        = 0x40b
+	F_GET_SEALS                          = 0x40a
 	F_LOCK                               = 0x1
 	F_NOTIFY                             = 0x402
 	F_OFD_GETLK                          = 0x24
@@ -495,6 +505,10 @@ const (
 	F_OFD_SETLKW                         = 0x26
 	F_OK                                 = 0x0
 	F_RDLCK                              = 0x0
+	F_SEAL_GROW                          = 0x4
+	F_SEAL_SEAL                          = 0x1
+	F_SEAL_SHRINK                        = 0x2
+	F_SEAL_WRITE                         = 0x8
 	F_SETFD                              = 0x2
 	F_SETFL                              = 0x4
 	F_SETLEASE                           = 0x400
@@ -506,6 +520,8 @@ const (
 	F_SETOWN_EX                          = 0xf
 	F_SETPIPE_SZ                         = 0x407
 	F_SETSIG                             = 0xa
+	F_SET_FILE_RW_HINT                   = 0x40e
+	F_SET_RW_HINT                        = 0x40c
 	F_SHLCK                              = 0x8
 	F_TEST                               = 0x3
 	F_TLOCK                              = 0x2
@@ -561,6 +577,8 @@ const (
 	IFF_MASTER                           = 0x400
 	IFF_MULTICAST                        = 0x1000
 	IFF_MULTI_QUEUE                      = 0x100
+	IFF_NAPI                             = 0x10
+	IFF_NAPI_FRAGS                       = 0x20
 	IFF_NOARP                            = 0x80
 	IFF_NOFILTER                         = 0x1000
 	IFF_NOTRAILERS                       = 0x20
@@ -845,6 +863,7 @@ const (
 	MADV_FREE                            = 0x8
 	MADV_HUGEPAGE                        = 0xe
 	MADV_HWPOISON                        = 0x64
+	MADV_KEEPONFORK                      = 0x13
 	MADV_MERGEABLE                       = 0xc
 	MADV_NOHUGEPAGE                      = 0xf
 	MADV_NORMAL                          = 0x0
@@ -853,6 +872,7 @@ const (
 	MADV_SEQUENTIAL                      = 0x2
 	MADV_UNMERGEABLE                     = 0xd
 	MADV_WILLNEED                        = 0x3
+	MADV_WIPEONFORK                      = 0x12
 	MAP_32BIT                            = 0x40
 	MAP_ANON                             = 0x20
 	MAP_ANONYMOUS                        = 0x20
@@ -899,6 +919,7 @@ const (
 	MSG_TRYHARD                          = 0x4
 	MSG_WAITALL                          = 0x100
 	MSG_WAITFORONE                       = 0x10000
+	MSG_ZEROCOPY                         = 0x4000000
 	MS_ACTIVE                            = 0x40000000
 	MS_ASYNC                             = 0x1
 	MS_BIND                              = 0x1000
@@ -998,6 +1019,7 @@ const (
 	NLM_F_EXCL                           = 0x200
 	NLM_F_MATCH                          = 0x200
 	NLM_F_MULTI                          = 0x2
+	NLM_F_NONREC                         = 0x100
 	NLM_F_REPLACE                        = 0x100
 	NLM_F_REQUEST                        = 0x1
 	NLM_F_ROOT                           = 0x100
@@ -1096,6 +1118,7 @@ const (
 	PERF_EVENT_IOC_ID                    = 0x80082407
 	PERF_EVENT_IOC_PAUSE_OUTPUT          = 0x40042409
 	PERF_EVENT_IOC_PERIOD                = 0x40082404
+	PERF_EVENT_IOC_QUERY_BPF             = 0xc008240a
 	PERF_EVENT_IOC_REFRESH               = 0x2402
 	PERF_EVENT_IOC_RESET                 = 0x2403
 	PERF_EVENT_IOC_SET_BPF               = 0x40042408
@@ -1196,6 +1219,11 @@ const (
 	PR_SET_TIMING                        = 0xe
 	PR_SET_TSC                           = 0x1a
 	PR_SET_UNALIGN                       = 0x6
+	PR_SVE_GET_VL                        = 0x33
+	PR_SVE_SET_VL                        = 0x32
+	PR_SVE_SET_VL_ONEXEC                 = 0x40000
+	PR_SVE_VL_INHERIT                    = 0x20000
+	PR_SVE_VL_LEN_MASK                   = 0xffff
 	PR_TASK_PERF_EVENTS_DISABLE          = 0x1f
 	PR_TASK_PERF_EVENTS_ENABLE           = 0x20
 	PR_TIMING_STATISTICAL                = 0x0
@@ -1248,6 +1276,7 @@ const (
 	PTRACE_POKETEXT                      = 0x4
 	PTRACE_POKEUSR                       = 0x6
 	PTRACE_SECCOMP_GET_FILTER            = 0x420c
+	PTRACE_SECCOMP_GET_METADATA          = 0x420d
 	PTRACE_SEIZE                         = 0x4206
 	PTRACE_SETFPREGS                     = 0xf
 	PTRACE_SETFPXREGS                    = 0x13
@@ -1283,6 +1312,7 @@ const (
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
+	RTAX_FASTOPEN_NO_COOKIE              = 0x11
 	RTAX_FEATURES                        = 0xc
 	RTAX_FEATURE_ALLFRAG                 = 0x8
 	RTAX_FEATURE_ECN                     = 0x1
@@ -1293,7 +1323,7 @@ const (
 	RTAX_INITCWND                        = 0xb
 	RTAX_INITRWND                        = 0xe
 	RTAX_LOCK                            = 0x1
-	RTAX_MAX                             = 0x10
+	RTAX_MAX                             = 0x11
 	RTAX_MTU                             = 0x2
 	RTAX_QUICKACK                        = 0xf
 	RTAX_REORDERING                      = 0x9
@@ -1567,6 +1597,7 @@ const (
 	SOL_SOCKET                           = 0x1
 	SOL_TCP                              = 0x6
 	SOL_TIPC                             = 0x10f
+	SOL_TLS                              = 0x11a
 	SOL_X25                              = 0x106
 	SOMAXCONN                            = 0x80
 	SO_ACCEPTCONN                        = 0x1e
@@ -1635,6 +1666,7 @@ const (
 	SO_VM_SOCKETS_PEER_HOST_VM_ID        = 0x3
 	SO_VM_SOCKETS_TRUSTED                = 0x5
 	SO_WIFI_STATUS                       = 0x29
+	SO_ZEROCOPY                          = 0x3c
 	SPLICE_F_GIFT                        = 0x8
 	SPLICE_F_MORE                        = 0x4
 	SPLICE_F_MOVE                        = 0x1
@@ -1731,6 +1763,8 @@ const (
 	TCP_MAXWIN                           = 0xffff
 	TCP_MAX_WINSHIFT                     = 0xe
 	TCP_MD5SIG                           = 0xe
+	TCP_MD5SIG_EXT                       = 0x20
+	TCP_MD5SIG_FLAG_PREFIX               = 0x1
 	TCP_MD5SIG_MAXKEYLEN                 = 0x50
 	TCP_MSS                              = 0x200
 	TCP_MSS_DEFAULT                      = 0x218
@@ -1751,6 +1785,7 @@ const (
 	TCP_THIN_DUPACK                      = 0x11
 	TCP_THIN_LINEAR_TIMEOUTS             = 0x10
 	TCP_TIMESTAMP                        = 0x18
+	TCP_ULP                              = 0x1f
 	TCP_USER_TIMEOUT                     = 0x12
 	TCP_WINDOW_CLAMP                     = 0xa
 	TCSAFLUSH                            = 0x2
@@ -1850,6 +1885,7 @@ const (
 	TUNGETVNETHDRSZ                      = 0x800454d7
 	TUNGETVNETLE                         = 0x800454dd
 	TUNSETDEBUG                          = 0x400454c9
+	TUNSETFILTEREBPF                     = 0x800454e1
 	TUNSETGROUP                          = 0x400454ce
 	TUNSETIFF                            = 0x400454ca
 	TUNSETIFINDEX                        = 0x400454da
@@ -1860,6 +1896,7 @@ const (
 	TUNSETPERSIST                        = 0x400454cb
 	TUNSETQUEUE                          = 0x400454d9
 	TUNSETSNDBUF                         = 0x400454d4
+	TUNSETSTEERINGEBPF                   = 0x800454e0
 	TUNSETTXFILTER                       = 0x400454d1
 	TUNSETVNETBE                         = 0x400454de
 	TUNSETVNETHDRSZ                      = 0x400454d8
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index 250841bd..6d9fc7e9 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -121,6 +121,7 @@ const (
 	ARPHRD_PPP                           = 0x200
 	ARPHRD_PRONET                        = 0x4
 	ARPHRD_RAWHDLC                       = 0x206
+	ARPHRD_RAWIP                         = 0x207
 	ARPHRD_ROSE                          = 0x10e
 	ARPHRD_RSRVD                         = 0x104
 	ARPHRD_SIT                           = 0x308
@@ -390,6 +391,8 @@ const (
 	ETH_P_DSA                            = 0x1b
 	ETH_P_ECONET                         = 0x18
 	ETH_P_EDSA                           = 0xdada
+	ETH_P_ERSPAN                         = 0x88be
+	ETH_P_ERSPAN2                        = 0x22eb
 	ETH_P_FCOE                           = 0x8906
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
@@ -398,6 +401,7 @@ const (
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
+	ETH_P_IFE                            = 0xed3e
 	ETH_P_IP                             = 0x800
 	ETH_P_IPV6                           = 0x86dd
 	ETH_P_IPX                            = 0x8137
@@ -408,11 +412,13 @@ const (
 	ETH_P_LOOP                           = 0x60
 	ETH_P_LOOPBACK                       = 0x9000
 	ETH_P_MACSEC                         = 0x88e5
+	ETH_P_MAP                            = 0xf9
 	ETH_P_MOBITEX                        = 0x15
 	ETH_P_MPLS_MC                        = 0x8848
 	ETH_P_MPLS_UC                        = 0x8847
 	ETH_P_MVRP                           = 0x88f5
 	ETH_P_NCSI                           = 0x88f8
+	ETH_P_NSH                            = 0x894f
 	ETH_P_PAE                            = 0x888e
 	ETH_P_PAUSE                          = 0x8808
 	ETH_P_PHONET                         = 0xf5
@@ -476,6 +482,7 @@ const (
 	FS_POLICY_FLAGS_PAD_8                = 0x1
 	FS_POLICY_FLAGS_PAD_MASK             = 0x3
 	FS_POLICY_FLAGS_VALID                = 0x3
+	F_ADD_SEALS                          = 0x409
 	F_DUPFD                              = 0x0
 	F_DUPFD_CLOEXEC                      = 0x406
 	F_EXLCK                              = 0x4
@@ -488,6 +495,9 @@ const (
 	F_GETOWN_EX                          = 0x10
 	F_GETPIPE_SZ                         = 0x408
 	F_GETSIG                             = 0xb
+	F_GET_FILE_RW_HINT                   = 0x40d
+	F_GET_RW_HINT                        = 0x40b
+	F_GET_SEALS                          = 0x40a
 	F_LOCK                               = 0x1
 	F_NOTIFY                             = 0x402
 	F_OFD_GETLK                          = 0x24
@@ -495,6 +505,10 @@ const (
 	F_OFD_SETLKW                         = 0x26
 	F_OK                                 = 0x0
 	F_RDLCK                              = 0x0
+	F_SEAL_GROW                          = 0x4
+	F_SEAL_SEAL                          = 0x1
+	F_SEAL_SHRINK                        = 0x2
+	F_SEAL_WRITE                         = 0x8
 	F_SETFD                              = 0x2
 	F_SETFL                              = 0x4
 	F_SETLEASE                           = 0x400
@@ -506,6 +520,8 @@ const (
 	F_SETOWN_EX                          = 0xf
 	F_SETPIPE_SZ                         = 0x407
 	F_SETSIG                             = 0xa
+	F_SET_FILE_RW_HINT                   = 0x40e
+	F_SET_RW_HINT                        = 0x40c
 	F_SHLCK                              = 0x8
 	F_TEST                               = 0x3
 	F_TLOCK                              = 0x2
@@ -561,6 +577,8 @@ const (
 	IFF_MASTER                           = 0x400
 	IFF_MULTICAST                        = 0x1000
 	IFF_MULTI_QUEUE                      = 0x100
+	IFF_NAPI                             = 0x10
+	IFF_NAPI_FRAGS                       = 0x20
 	IFF_NOARP                            = 0x80
 	IFF_NOFILTER                         = 0x1000
 	IFF_NOTRAILERS                       = 0x20
@@ -845,6 +863,7 @@ const (
 	MADV_FREE                            = 0x8
 	MADV_HUGEPAGE                        = 0xe
 	MADV_HWPOISON                        = 0x64
+	MADV_KEEPONFORK                      = 0x13
 	MADV_MERGEABLE                       = 0xc
 	MADV_NOHUGEPAGE                      = 0xf
 	MADV_NORMAL                          = 0x0
@@ -853,6 +872,7 @@ const (
 	MADV_SEQUENTIAL                      = 0x2
 	MADV_UNMERGEABLE                     = 0xd
 	MADV_WILLNEED                        = 0x3
+	MADV_WIPEONFORK                      = 0x12
 	MAP_ANON                             = 0x20
 	MAP_ANONYMOUS                        = 0x20
 	MAP_DENYWRITE                        = 0x800
@@ -898,6 +918,7 @@ const (
 	MSG_TRYHARD                          = 0x4
 	MSG_WAITALL                          = 0x100
 	MSG_WAITFORONE                       = 0x10000
+	MSG_ZEROCOPY                         = 0x4000000
 	MS_ACTIVE                            = 0x40000000
 	MS_ASYNC                             = 0x1
 	MS_BIND                              = 0x1000
@@ -997,6 +1018,7 @@ const (
 	NLM_F_EXCL                           = 0x200
 	NLM_F_MATCH                          = 0x200
 	NLM_F_MULTI                          = 0x2
+	NLM_F_NONREC                         = 0x100
 	NLM_F_REPLACE                        = 0x100
 	NLM_F_REQUEST                        = 0x1
 	NLM_F_ROOT                           = 0x100
@@ -1095,6 +1117,7 @@ const (
 	PERF_EVENT_IOC_ID                    = 0x80042407
 	PERF_EVENT_IOC_PAUSE_OUTPUT          = 0x40042409
 	PERF_EVENT_IOC_PERIOD                = 0x40082404
+	PERF_EVENT_IOC_QUERY_BPF             = 0xc004240a
 	PERF_EVENT_IOC_REFRESH               = 0x2402
 	PERF_EVENT_IOC_RESET                 = 0x2403
 	PERF_EVENT_IOC_SET_BPF               = 0x40042408
@@ -1195,6 +1218,11 @@ const (
 	PR_SET_TIMING                        = 0xe
 	PR_SET_TSC                           = 0x1a
 	PR_SET_UNALIGN                       = 0x6
+	PR_SVE_GET_VL                        = 0x33
+	PR_SVE_SET_VL                        = 0x32
+	PR_SVE_SET_VL_ONEXEC                 = 0x40000
+	PR_SVE_VL_INHERIT                    = 0x20000
+	PR_SVE_VL_LEN_MASK                   = 0xffff
 	PR_TASK_PERF_EVENTS_DISABLE          = 0x1f
 	PR_TASK_PERF_EVENTS_ENABLE           = 0x20
 	PR_TIMING_STATISTICAL                = 0x0
@@ -1216,6 +1244,9 @@ const (
 	PTRACE_EVENT_VFORK_DONE              = 0x5
 	PTRACE_GETCRUNCHREGS                 = 0x19
 	PTRACE_GETEVENTMSG                   = 0x4201
+	PTRACE_GETFDPIC                      = 0x1f
+	PTRACE_GETFDPIC_EXEC                 = 0x0
+	PTRACE_GETFDPIC_INTERP               = 0x1
 	PTRACE_GETFPREGS                     = 0xe
 	PTRACE_GETHBPREGS                    = 0x1d
 	PTRACE_GETREGS                       = 0xc
@@ -1249,6 +1280,7 @@ const (
 	PTRACE_POKETEXT                      = 0x4
 	PTRACE_POKEUSR                       = 0x6
 	PTRACE_SECCOMP_GET_FILTER            = 0x420c
+	PTRACE_SECCOMP_GET_METADATA          = 0x420d
 	PTRACE_SEIZE                         = 0x4206
 	PTRACE_SETCRUNCHREGS                 = 0x1a
 	PTRACE_SETFPREGS                     = 0xf
@@ -1287,6 +1319,7 @@ const (
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
+	RTAX_FASTOPEN_NO_COOKIE              = 0x11
 	RTAX_FEATURES                        = 0xc
 	RTAX_FEATURE_ALLFRAG                 = 0x8
 	RTAX_FEATURE_ECN                     = 0x1
@@ -1297,7 +1330,7 @@ const (
 	RTAX_INITCWND                        = 0xb
 	RTAX_INITRWND                        = 0xe
 	RTAX_LOCK                            = 0x1
-	RTAX_MAX                             = 0x10
+	RTAX_MAX                             = 0x11
 	RTAX_MTU                             = 0x2
 	RTAX_QUICKACK                        = 0xf
 	RTAX_REORDERING                      = 0x9
@@ -1571,6 +1604,7 @@ const (
 	SOL_SOCKET                           = 0x1
 	SOL_TCP                              = 0x6
 	SOL_TIPC                             = 0x10f
+	SOL_TLS                              = 0x11a
 	SOL_X25                              = 0x106
 	SOMAXCONN                            = 0x80
 	SO_ACCEPTCONN                        = 0x1e
@@ -1639,6 +1673,7 @@ const (
 	SO_VM_SOCKETS_PEER_HOST_VM_ID        = 0x3
 	SO_VM_SOCKETS_TRUSTED                = 0x5
 	SO_WIFI_STATUS                       = 0x29
+	SO_ZEROCOPY                          = 0x3c
 	SPLICE_F_GIFT                        = 0x8
 	SPLICE_F_MORE                        = 0x4
 	SPLICE_F_MOVE                        = 0x1
@@ -1735,6 +1770,8 @@ const (
 	TCP_MAXWIN                           = 0xffff
 	TCP_MAX_WINSHIFT                     = 0xe
 	TCP_MD5SIG                           = 0xe
+	TCP_MD5SIG_EXT                       = 0x20
+	TCP_MD5SIG_FLAG_PREFIX               = 0x1
 	TCP_MD5SIG_MAXKEYLEN                 = 0x50
 	TCP_MSS                              = 0x200
 	TCP_MSS_DEFAULT                      = 0x218
@@ -1755,6 +1792,7 @@ const (
 	TCP_THIN_DUPACK                      = 0x11
 	TCP_THIN_LINEAR_TIMEOUTS             = 0x10
 	TCP_TIMESTAMP                        = 0x18
+	TCP_ULP                              = 0x1f
 	TCP_USER_TIMEOUT                     = 0x12
 	TCP_WINDOW_CLAMP                     = 0xa
 	TCSAFLUSH                            = 0x2
@@ -1854,6 +1892,7 @@ const (
 	TUNGETVNETHDRSZ                      = 0x800454d7
 	TUNGETVNETLE                         = 0x800454dd
 	TUNSETDEBUG                          = 0x400454c9
+	TUNSETFILTEREBPF                     = 0x800454e1
 	TUNSETGROUP                          = 0x400454ce
 	TUNSETIFF                            = 0x400454ca
 	TUNSETIFINDEX                        = 0x400454da
@@ -1864,6 +1903,7 @@ const (
 	TUNSETPERSIST                        = 0x400454cb
 	TUNSETQUEUE                          = 0x400454d9
 	TUNSETSNDBUF                         = 0x400454d4
+	TUNSETSTEERINGEBPF                   = 0x800454e0
 	TUNSETTXFILTER                       = 0x400454d1
 	TUNSETVNETBE                         = 0x400454de
 	TUNSETVNETHDRSZ                      = 0x400454d8
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index f5d78561..0253ba34 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -121,6 +121,7 @@ const (
 	ARPHRD_PPP                           = 0x200
 	ARPHRD_PRONET                        = 0x4
 	ARPHRD_RAWHDLC                       = 0x206
+	ARPHRD_RAWIP                         = 0x207
 	ARPHRD_ROSE                          = 0x10e
 	ARPHRD_RSRVD                         = 0x104
 	ARPHRD_SIT                           = 0x308
@@ -391,6 +392,8 @@ const (
 	ETH_P_DSA                            = 0x1b
 	ETH_P_ECONET                         = 0x18
 	ETH_P_EDSA                           = 0xdada
+	ETH_P_ERSPAN                         = 0x88be
+	ETH_P_ERSPAN2                        = 0x22eb
 	ETH_P_FCOE                           = 0x8906
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
@@ -399,6 +402,7 @@ const (
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
+	ETH_P_IFE                            = 0xed3e
 	ETH_P_IP                             = 0x800
 	ETH_P_IPV6                           = 0x86dd
 	ETH_P_IPX                            = 0x8137
@@ -409,11 +413,13 @@ const (
 	ETH_P_LOOP                           = 0x60
 	ETH_P_LOOPBACK                       = 0x9000
 	ETH_P_MACSEC                         = 0x88e5
+	ETH_P_MAP                            = 0xf9
 	ETH_P_MOBITEX                        = 0x15
 	ETH_P_MPLS_MC                        = 0x8848
 	ETH_P_MPLS_UC                        = 0x8847
 	ETH_P_MVRP                           = 0x88f5
 	ETH_P_NCSI                           = 0x88f8
+	ETH_P_NSH                            = 0x894f
 	ETH_P_PAE                            = 0x888e
 	ETH_P_PAUSE                          = 0x8808
 	ETH_P_PHONET                         = 0xf5
@@ -478,6 +484,7 @@ const (
 	FS_POLICY_FLAGS_PAD_8                = 0x1
 	FS_POLICY_FLAGS_PAD_MASK             = 0x3
 	FS_POLICY_FLAGS_VALID                = 0x3
+	F_ADD_SEALS                          = 0x409
 	F_DUPFD                              = 0x0
 	F_DUPFD_CLOEXEC                      = 0x406
 	F_EXLCK                              = 0x4
@@ -490,6 +497,9 @@ const (
 	F_GETOWN_EX                          = 0x10
 	F_GETPIPE_SZ                         = 0x408
 	F_GETSIG                             = 0xb
+	F_GET_FILE_RW_HINT                   = 0x40d
+	F_GET_RW_HINT                        = 0x40b
+	F_GET_SEALS                          = 0x40a
 	F_LOCK                               = 0x1
 	F_NOTIFY                             = 0x402
 	F_OFD_GETLK                          = 0x24
@@ -497,6 +507,10 @@ const (
 	F_OFD_SETLKW                         = 0x26
 	F_OK                                 = 0x0
 	F_RDLCK                              = 0x0
+	F_SEAL_GROW                          = 0x4
+	F_SEAL_SEAL                          = 0x1
+	F_SEAL_SHRINK                        = 0x2
+	F_SEAL_WRITE                         = 0x8
 	F_SETFD                              = 0x2
 	F_SETFL                              = 0x4
 	F_SETLEASE                           = 0x400
@@ -508,6 +522,8 @@ const (
 	F_SETOWN_EX                          = 0xf
 	F_SETPIPE_SZ                         = 0x407
 	F_SETSIG                             = 0xa
+	F_SET_FILE_RW_HINT                   = 0x40e
+	F_SET_RW_HINT                        = 0x40c
 	F_SHLCK                              = 0x8
 	F_TEST                               = 0x3
 	F_TLOCK                              = 0x2
@@ -563,6 +579,8 @@ const (
 	IFF_MASTER                           = 0x400
 	IFF_MULTICAST                        = 0x1000
 	IFF_MULTI_QUEUE                      = 0x100
+	IFF_NAPI                             = 0x10
+	IFF_NAPI_FRAGS                       = 0x20
 	IFF_NOARP                            = 0x80
 	IFF_NOFILTER                         = 0x1000
 	IFF_NOTRAILERS                       = 0x20
@@ -847,6 +865,7 @@ const (
 	MADV_FREE                            = 0x8
 	MADV_HUGEPAGE                        = 0xe
 	MADV_HWPOISON                        = 0x64
+	MADV_KEEPONFORK                      = 0x13
 	MADV_MERGEABLE                       = 0xc
 	MADV_NOHUGEPAGE                      = 0xf
 	MADV_NORMAL                          = 0x0
@@ -855,6 +874,7 @@ const (
 	MADV_SEQUENTIAL                      = 0x2
 	MADV_UNMERGEABLE                     = 0xd
 	MADV_WILLNEED                        = 0x3
+	MADV_WIPEONFORK                      = 0x12
 	MAP_ANON                             = 0x20
 	MAP_ANONYMOUS                        = 0x20
 	MAP_DENYWRITE                        = 0x800
@@ -900,6 +920,7 @@ const (
 	MSG_TRYHARD                          = 0x4
 	MSG_WAITALL                          = 0x100
 	MSG_WAITFORONE                       = 0x10000
+	MSG_ZEROCOPY                         = 0x4000000
 	MS_ACTIVE                            = 0x40000000
 	MS_ASYNC                             = 0x1
 	MS_BIND                              = 0x1000
@@ -999,6 +1020,7 @@ const (
 	NLM_F_EXCL                           = 0x200
 	NLM_F_MATCH                          = 0x200
 	NLM_F_MULTI                          = 0x2
+	NLM_F_NONREC                         = 0x100
 	NLM_F_REPLACE                        = 0x100
 	NLM_F_REQUEST                        = 0x1
 	NLM_F_ROOT                           = 0x100
@@ -1097,6 +1119,7 @@ const (
 	PERF_EVENT_IOC_ID                    = 0x80082407
 	PERF_EVENT_IOC_PAUSE_OUTPUT          = 0x40042409
 	PERF_EVENT_IOC_PERIOD                = 0x40082404
+	PERF_EVENT_IOC_QUERY_BPF             = 0xc008240a
 	PERF_EVENT_IOC_REFRESH               = 0x2402
 	PERF_EVENT_IOC_RESET                 = 0x2403
 	PERF_EVENT_IOC_SET_BPF               = 0x40042408
@@ -1197,6 +1220,11 @@ const (
 	PR_SET_TIMING                        = 0xe
 	PR_SET_TSC                           = 0x1a
 	PR_SET_UNALIGN                       = 0x6
+	PR_SVE_GET_VL                        = 0x33
+	PR_SVE_SET_VL                        = 0x32
+	PR_SVE_SET_VL_ONEXEC                 = 0x40000
+	PR_SVE_VL_INHERIT                    = 0x20000
+	PR_SVE_VL_LEN_MASK                   = 0xffff
 	PR_TASK_PERF_EVENTS_DISABLE          = 0x1f
 	PR_TASK_PERF_EVENTS_ENABLE           = 0x20
 	PR_TIMING_STATISTICAL                = 0x0
@@ -1244,6 +1272,7 @@ const (
 	PTRACE_POKETEXT                      = 0x4
 	PTRACE_POKEUSR                       = 0x6
 	PTRACE_SECCOMP_GET_FILTER            = 0x420c
+	PTRACE_SECCOMP_GET_METADATA          = 0x420d
 	PTRACE_SEIZE                         = 0x4206
 	PTRACE_SETOPTIONS                    = 0x4200
 	PTRACE_SETREGS                       = 0xd
@@ -1273,6 +1302,7 @@ const (
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
+	RTAX_FASTOPEN_NO_COOKIE              = 0x11
 	RTAX_FEATURES                        = 0xc
 	RTAX_FEATURE_ALLFRAG                 = 0x8
 	RTAX_FEATURE_ECN                     = 0x1
@@ -1283,7 +1313,7 @@ const (
 	RTAX_INITCWND                        = 0xb
 	RTAX_INITRWND                        = 0xe
 	RTAX_LOCK                            = 0x1
-	RTAX_MAX                             = 0x10
+	RTAX_MAX                             = 0x11
 	RTAX_MTU                             = 0x2
 	RTAX_QUICKACK                        = 0xf
 	RTAX_REORDERING                      = 0x9
@@ -1557,6 +1587,7 @@ const (
 	SOL_SOCKET                           = 0x1
 	SOL_TCP                              = 0x6
 	SOL_TIPC                             = 0x10f
+	SOL_TLS                              = 0x11a
 	SOL_X25                              = 0x106
 	SOMAXCONN                            = 0x80
 	SO_ACCEPTCONN                        = 0x1e
@@ -1625,6 +1656,7 @@ const (
 	SO_VM_SOCKETS_PEER_HOST_VM_ID        = 0x3
 	SO_VM_SOCKETS_TRUSTED                = 0x5
 	SO_WIFI_STATUS                       = 0x29
+	SO_ZEROCOPY                          = 0x3c
 	SPLICE_F_GIFT                        = 0x8
 	SPLICE_F_MORE                        = 0x4
 	SPLICE_F_MOVE                        = 0x1
@@ -1721,6 +1753,8 @@ const (
 	TCP_MAXWIN                           = 0xffff
 	TCP_MAX_WINSHIFT                     = 0xe
 	TCP_MD5SIG                           = 0xe
+	TCP_MD5SIG_EXT                       = 0x20
+	TCP_MD5SIG_FLAG_PREFIX               = 0x1
 	TCP_MD5SIG_MAXKEYLEN                 = 0x50
 	TCP_MSS                              = 0x200
 	TCP_MSS_DEFAULT                      = 0x218
@@ -1741,6 +1775,7 @@ const (
 	TCP_THIN_DUPACK                      = 0x11
 	TCP_THIN_LINEAR_TIMEOUTS             = 0x10
 	TCP_TIMESTAMP                        = 0x18
+	TCP_ULP                              = 0x1f
 	TCP_USER_TIMEOUT                     = 0x12
 	TCP_WINDOW_CLAMP                     = 0xa
 	TCSAFLUSH                            = 0x2
@@ -1840,6 +1875,7 @@ const (
 	TUNGETVNETHDRSZ                      = 0x800454d7
 	TUNGETVNETLE                         = 0x800454dd
 	TUNSETDEBUG                          = 0x400454c9
+	TUNSETFILTEREBPF                     = 0x800454e1
 	TUNSETGROUP                          = 0x400454ce
 	TUNSETIFF                            = 0x400454ca
 	TUNSETIFINDEX                        = 0x400454da
@@ -1850,6 +1886,7 @@ const (
 	TUNSETPERSIST                        = 0x400454cb
 	TUNSETQUEUE                          = 0x400454d9
 	TUNSETSNDBUF                         = 0x400454d4
+	TUNSETSTEERINGEBPF                   = 0x800454e0
 	TUNSETTXFILTER                       = 0x400454d1
 	TUNSETVNETBE                         = 0x400454de
 	TUNSETVNETHDRSZ                      = 0x400454d8
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index f45492db..4cef12d8 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -121,6 +121,7 @@ const (
 	ARPHRD_PPP                           = 0x200
 	ARPHRD_PRONET                        = 0x4
 	ARPHRD_RAWHDLC                       = 0x206
+	ARPHRD_RAWIP                         = 0x207
 	ARPHRD_ROSE                          = 0x10e
 	ARPHRD_RSRVD                         = 0x104
 	ARPHRD_SIT                           = 0x308
@@ -390,6 +391,8 @@ const (
 	ETH_P_DSA                            = 0x1b
 	ETH_P_ECONET                         = 0x18
 	ETH_P_EDSA                           = 0xdada
+	ETH_P_ERSPAN                         = 0x88be
+	ETH_P_ERSPAN2                        = 0x22eb
 	ETH_P_FCOE                           = 0x8906
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
@@ -398,6 +401,7 @@ const (
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
+	ETH_P_IFE                            = 0xed3e
 	ETH_P_IP                             = 0x800
 	ETH_P_IPV6                           = 0x86dd
 	ETH_P_IPX                            = 0x8137
@@ -408,11 +412,13 @@ const (
 	ETH_P_LOOP                           = 0x60
 	ETH_P_LOOPBACK                       = 0x9000
 	ETH_P_MACSEC                         = 0x88e5
+	ETH_P_MAP                            = 0xf9
 	ETH_P_MOBITEX                        = 0x15
 	ETH_P_MPLS_MC                        = 0x8848
 	ETH_P_MPLS_UC                        = 0x8847
 	ETH_P_MVRP                           = 0x88f5
 	ETH_P_NCSI                           = 0x88f8
+	ETH_P_NSH                            = 0x894f
 	ETH_P_PAE                            = 0x888e
 	ETH_P_PAUSE                          = 0x8808
 	ETH_P_PHONET                         = 0xf5
@@ -476,6 +482,7 @@ const (
 	FS_POLICY_FLAGS_PAD_8                = 0x1
 	FS_POLICY_FLAGS_PAD_MASK             = 0x3
 	FS_POLICY_FLAGS_VALID                = 0x3
+	F_ADD_SEALS                          = 0x409
 	F_DUPFD                              = 0x0
 	F_DUPFD_CLOEXEC                      = 0x406
 	F_EXLCK                              = 0x4
@@ -488,6 +495,9 @@ const (
 	F_GETOWN_EX                          = 0x10
 	F_GETPIPE_SZ                         = 0x408
 	F_GETSIG                             = 0xb
+	F_GET_FILE_RW_HINT                   = 0x40d
+	F_GET_RW_HINT                        = 0x40b
+	F_GET_SEALS                          = 0x40a
 	F_LOCK                               = 0x1
 	F_NOTIFY                             = 0x402
 	F_OFD_GETLK                          = 0x24
@@ -495,6 +505,10 @@ const (
 	F_OFD_SETLKW                         = 0x26
 	F_OK                                 = 0x0
 	F_RDLCK                              = 0x0
+	F_SEAL_GROW                          = 0x4
+	F_SEAL_SEAL                          = 0x1
+	F_SEAL_SHRINK                        = 0x2
+	F_SEAL_WRITE                         = 0x8
 	F_SETFD                              = 0x2
 	F_SETFL                              = 0x4
 	F_SETLEASE                           = 0x400
@@ -506,6 +520,8 @@ const (
 	F_SETOWN_EX                          = 0xf
 	F_SETPIPE_SZ                         = 0x407
 	F_SETSIG                             = 0xa
+	F_SET_FILE_RW_HINT                   = 0x40e
+	F_SET_RW_HINT                        = 0x40c
 	F_SHLCK                              = 0x8
 	F_TEST                               = 0x3
 	F_TLOCK                              = 0x2
@@ -561,6 +577,8 @@ const (
 	IFF_MASTER                           = 0x400
 	IFF_MULTICAST                        = 0x1000
 	IFF_MULTI_QUEUE                      = 0x100
+	IFF_NAPI                             = 0x10
+	IFF_NAPI_FRAGS                       = 0x20
 	IFF_NOARP                            = 0x80
 	IFF_NOFILTER                         = 0x1000
 	IFF_NOTRAILERS                       = 0x20
@@ -845,6 +863,7 @@ const (
 	MADV_FREE                            = 0x8
 	MADV_HUGEPAGE                        = 0xe
 	MADV_HWPOISON                        = 0x64
+	MADV_KEEPONFORK                      = 0x13
 	MADV_MERGEABLE                       = 0xc
 	MADV_NOHUGEPAGE                      = 0xf
 	MADV_NORMAL                          = 0x0
@@ -853,6 +872,7 @@ const (
 	MADV_SEQUENTIAL                      = 0x2
 	MADV_UNMERGEABLE                     = 0xd
 	MADV_WILLNEED                        = 0x3
+	MADV_WIPEONFORK                      = 0x12
 	MAP_ANON                             = 0x800
 	MAP_ANONYMOUS                        = 0x800
 	MAP_DENYWRITE                        = 0x2000
@@ -899,6 +919,7 @@ const (
 	MSG_TRYHARD                          = 0x4
 	MSG_WAITALL                          = 0x100
 	MSG_WAITFORONE                       = 0x10000
+	MSG_ZEROCOPY                         = 0x4000000
 	MS_ACTIVE                            = 0x40000000
 	MS_ASYNC                             = 0x1
 	MS_BIND                              = 0x1000
@@ -998,6 +1019,7 @@ const (
 	NLM_F_EXCL                           = 0x200
 	NLM_F_MATCH                          = 0x200
 	NLM_F_MULTI                          = 0x2
+	NLM_F_NONREC                         = 0x100
 	NLM_F_REPLACE                        = 0x100
 	NLM_F_REQUEST                        = 0x1
 	NLM_F_ROOT                           = 0x100
@@ -1096,6 +1118,7 @@ const (
 	PERF_EVENT_IOC_ID                    = 0x40042407
 	PERF_EVENT_IOC_PAUSE_OUTPUT          = 0x80042409
 	PERF_EVENT_IOC_PERIOD                = 0x80082404
+	PERF_EVENT_IOC_QUERY_BPF             = 0xc004240a
 	PERF_EVENT_IOC_REFRESH               = 0x20002402
 	PERF_EVENT_IOC_RESET                 = 0x20002403
 	PERF_EVENT_IOC_SET_BPF               = 0x80042408
@@ -1196,6 +1219,11 @@ const (
 	PR_SET_TIMING                        = 0xe
 	PR_SET_TSC                           = 0x1a
 	PR_SET_UNALIGN                       = 0x6
+	PR_SVE_GET_VL                        = 0x33
+	PR_SVE_SET_VL                        = 0x32
+	PR_SVE_SET_VL_ONEXEC                 = 0x40000
+	PR_SVE_VL_INHERIT                    = 0x20000
+	PR_SVE_VL_LEN_MASK                   = 0xffff
 	PR_TASK_PERF_EVENTS_DISABLE          = 0x1f
 	PR_TASK_PERF_EVENTS_ENABLE           = 0x20
 	PR_TIMING_STATISTICAL                = 0x0
@@ -1252,6 +1280,7 @@ const (
 	PTRACE_POKETEXT_3264                 = 0xc2
 	PTRACE_POKEUSR                       = 0x6
 	PTRACE_SECCOMP_GET_FILTER            = 0x420c
+	PTRACE_SECCOMP_GET_METADATA          = 0x420d
 	PTRACE_SEIZE                         = 0x4206
 	PTRACE_SETFPREGS                     = 0xf
 	PTRACE_SETOPTIONS                    = 0x4200
@@ -1284,6 +1313,7 @@ const (
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
+	RTAX_FASTOPEN_NO_COOKIE              = 0x11
 	RTAX_FEATURES                        = 0xc
 	RTAX_FEATURE_ALLFRAG                 = 0x8
 	RTAX_FEATURE_ECN                     = 0x1
@@ -1294,7 +1324,7 @@ const (
 	RTAX_INITCWND                        = 0xb
 	RTAX_INITRWND                        = 0xe
 	RTAX_LOCK                            = 0x1
-	RTAX_MAX                             = 0x10
+	RTAX_MAX                             = 0x11
 	RTAX_MTU                             = 0x2
 	RTAX_QUICKACK                        = 0xf
 	RTAX_REORDERING                      = 0x9
@@ -1568,6 +1598,7 @@ const (
 	SOL_SOCKET                           = 0xffff
 	SOL_TCP                              = 0x6
 	SOL_TIPC                             = 0x10f
+	SOL_TLS                              = 0x11a
 	SOL_X25                              = 0x106
 	SOMAXCONN                            = 0x80
 	SO_ACCEPTCONN                        = 0x1009
@@ -1637,6 +1668,7 @@ const (
 	SO_VM_SOCKETS_PEER_HOST_VM_ID        = 0x3
 	SO_VM_SOCKETS_TRUSTED                = 0x5
 	SO_WIFI_STATUS                       = 0x29
+	SO_ZEROCOPY                          = 0x3c
 	SPLICE_F_GIFT                        = 0x8
 	SPLICE_F_MORE                        = 0x4
 	SPLICE_F_MOVE                        = 0x1
@@ -1732,6 +1764,8 @@ const (
 	TCP_MAXWIN                           = 0xffff
 	TCP_MAX_WINSHIFT                     = 0xe
 	TCP_MD5SIG                           = 0xe
+	TCP_MD5SIG_EXT                       = 0x20
+	TCP_MD5SIG_FLAG_PREFIX               = 0x1
 	TCP_MD5SIG_MAXKEYLEN                 = 0x50
 	TCP_MSS                              = 0x200
 	TCP_MSS_DEFAULT                      = 0x218
@@ -1752,6 +1786,7 @@ const (
 	TCP_THIN_DUPACK                      = 0x11
 	TCP_THIN_LINEAR_TIMEOUTS             = 0x10
 	TCP_TIMESTAMP                        = 0x18
+	TCP_ULP                              = 0x1f
 	TCP_USER_TIMEOUT                     = 0x12
 	TCP_WINDOW_CLAMP                     = 0xa
 	TCSAFLUSH                            = 0x5410
@@ -1853,6 +1888,7 @@ const (
 	TUNGETVNETHDRSZ                      = 0x400454d7
 	TUNGETVNETLE                         = 0x400454dd
 	TUNSETDEBUG                          = 0x800454c9
+	TUNSETFILTEREBPF                     = 0x400454e1
 	TUNSETGROUP                          = 0x800454ce
 	TUNSETIFF                            = 0x800454ca
 	TUNSETIFINDEX                        = 0x800454da
@@ -1863,6 +1899,7 @@ const (
 	TUNSETPERSIST                        = 0x800454cb
 	TUNSETQUEUE                          = 0x800454d9
 	TUNSETSNDBUF                         = 0x800454d4
+	TUNSETSTEERINGEBPF                   = 0x400454e0
 	TUNSETTXFILTER                       = 0x800454d1
 	TUNSETVNETBE                         = 0x800454de
 	TUNSETVNETHDRSZ                      = 0x800454d8
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index f5a64fba..4782b3ef 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -121,6 +121,7 @@ const (
 	ARPHRD_PPP                           = 0x200
 	ARPHRD_PRONET                        = 0x4
 	ARPHRD_RAWHDLC                       = 0x206
+	ARPHRD_RAWIP                         = 0x207
 	ARPHRD_ROSE                          = 0x10e
 	ARPHRD_RSRVD                         = 0x104
 	ARPHRD_SIT                           = 0x308
@@ -390,6 +391,8 @@ const (
 	ETH_P_DSA                            = 0x1b
 	ETH_P_ECONET                         = 0x18
 	ETH_P_EDSA                           = 0xdada
+	ETH_P_ERSPAN                         = 0x88be
+	ETH_P_ERSPAN2                        = 0x22eb
 	ETH_P_FCOE                           = 0x8906
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
@@ -398,6 +401,7 @@ const (
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
+	ETH_P_IFE                            = 0xed3e
 	ETH_P_IP                             = 0x800
 	ETH_P_IPV6                           = 0x86dd
 	ETH_P_IPX                            = 0x8137
@@ -408,11 +412,13 @@ const (
 	ETH_P_LOOP                           = 0x60
 	ETH_P_LOOPBACK                       = 0x9000
 	ETH_P_MACSEC                         = 0x88e5
+	ETH_P_MAP                            = 0xf9
 	ETH_P_MOBITEX                        = 0x15
 	ETH_P_MPLS_MC                        = 0x8848
 	ETH_P_MPLS_UC                        = 0x8847
 	ETH_P_MVRP                           = 0x88f5
 	ETH_P_NCSI                           = 0x88f8
+	ETH_P_NSH                            = 0x894f
 	ETH_P_PAE                            = 0x888e
 	ETH_P_PAUSE                          = 0x8808
 	ETH_P_PHONET                         = 0xf5
@@ -476,6 +482,7 @@ const (
 	FS_POLICY_FLAGS_PAD_8                = 0x1
 	FS_POLICY_FLAGS_PAD_MASK             = 0x3
 	FS_POLICY_FLAGS_VALID                = 0x3
+	F_ADD_SEALS                          = 0x409
 	F_DUPFD                              = 0x0
 	F_DUPFD_CLOEXEC                      = 0x406
 	F_EXLCK                              = 0x4
@@ -488,6 +495,9 @@ const (
 	F_GETOWN_EX                          = 0x10
 	F_GETPIPE_SZ                         = 0x408
 	F_GETSIG                             = 0xb
+	F_GET_FILE_RW_HINT                   = 0x40d
+	F_GET_RW_HINT                        = 0x40b
+	F_GET_SEALS                          = 0x40a
 	F_LOCK                               = 0x1
 	F_NOTIFY                             = 0x402
 	F_OFD_GETLK                          = 0x24
@@ -495,6 +505,10 @@ const (
 	F_OFD_SETLKW                         = 0x26
 	F_OK                                 = 0x0
 	F_RDLCK                              = 0x0
+	F_SEAL_GROW                          = 0x4
+	F_SEAL_SEAL                          = 0x1
+	F_SEAL_SHRINK                        = 0x2
+	F_SEAL_WRITE                         = 0x8
 	F_SETFD                              = 0x2
 	F_SETFL                              = 0x4
 	F_SETLEASE                           = 0x400
@@ -506,6 +520,8 @@ const (
 	F_SETOWN_EX                          = 0xf
 	F_SETPIPE_SZ                         = 0x407
 	F_SETSIG                             = 0xa
+	F_SET_FILE_RW_HINT                   = 0x40e
+	F_SET_RW_HINT                        = 0x40c
 	F_SHLCK                              = 0x8
 	F_TEST                               = 0x3
 	F_TLOCK                              = 0x2
@@ -561,6 +577,8 @@ const (
 	IFF_MASTER                           = 0x400
 	IFF_MULTICAST                        = 0x1000
 	IFF_MULTI_QUEUE                      = 0x100
+	IFF_NAPI                             = 0x10
+	IFF_NAPI_FRAGS                       = 0x20
 	IFF_NOARP                            = 0x80
 	IFF_NOFILTER                         = 0x1000
 	IFF_NOTRAILERS                       = 0x20
@@ -845,6 +863,7 @@ const (
 	MADV_FREE                            = 0x8
 	MADV_HUGEPAGE                        = 0xe
 	MADV_HWPOISON                        = 0x64
+	MADV_KEEPONFORK                      = 0x13
 	MADV_MERGEABLE                       = 0xc
 	MADV_NOHUGEPAGE                      = 0xf
 	MADV_NORMAL                          = 0x0
@@ -853,6 +872,7 @@ const (
 	MADV_SEQUENTIAL                      = 0x2
 	MADV_UNMERGEABLE                     = 0xd
 	MADV_WILLNEED                        = 0x3
+	MADV_WIPEONFORK                      = 0x12
 	MAP_ANON                             = 0x800
 	MAP_ANONYMOUS                        = 0x800
 	MAP_DENYWRITE                        = 0x2000
@@ -899,6 +919,7 @@ const (
 	MSG_TRYHARD                          = 0x4
 	MSG_WAITALL                          = 0x100
 	MSG_WAITFORONE                       = 0x10000
+	MSG_ZEROCOPY                         = 0x4000000
 	MS_ACTIVE                            = 0x40000000
 	MS_ASYNC                             = 0x1
 	MS_BIND                              = 0x1000
@@ -998,6 +1019,7 @@ const (
 	NLM_F_EXCL                           = 0x200
 	NLM_F_MATCH                          = 0x200
 	NLM_F_MULTI                          = 0x2
+	NLM_F_NONREC                         = 0x100
 	NLM_F_REPLACE                        = 0x100
 	NLM_F_REQUEST                        = 0x1
 	NLM_F_ROOT                           = 0x100
@@ -1096,6 +1118,7 @@ const (
 	PERF_EVENT_IOC_ID                    = 0x40082407
 	PERF_EVENT_IOC_PAUSE_OUTPUT          = 0x80042409
 	PERF_EVENT_IOC_PERIOD                = 0x80082404
+	PERF_EVENT_IOC_QUERY_BPF             = 0xc008240a
 	PERF_EVENT_IOC_REFRESH               = 0x20002402
 	PERF_EVENT_IOC_RESET                 = 0x20002403
 	PERF_EVENT_IOC_SET_BPF               = 0x80042408
@@ -1196,6 +1219,11 @@ const (
 	PR_SET_TIMING                        = 0xe
 	PR_SET_TSC                           = 0x1a
 	PR_SET_UNALIGN                       = 0x6
+	PR_SVE_GET_VL                        = 0x33
+	PR_SVE_SET_VL                        = 0x32
+	PR_SVE_SET_VL_ONEXEC                 = 0x40000
+	PR_SVE_VL_INHERIT                    = 0x20000
+	PR_SVE_VL_LEN_MASK                   = 0xffff
 	PR_TASK_PERF_EVENTS_DISABLE          = 0x1f
 	PR_TASK_PERF_EVENTS_ENABLE           = 0x20
 	PR_TIMING_STATISTICAL                = 0x0
@@ -1252,6 +1280,7 @@ const (
 	PTRACE_POKETEXT_3264                 = 0xc2
 	PTRACE_POKEUSR                       = 0x6
 	PTRACE_SECCOMP_GET_FILTER            = 0x420c
+	PTRACE_SECCOMP_GET_METADATA          = 0x420d
 	PTRACE_SEIZE                         = 0x4206
 	PTRACE_SETFPREGS                     = 0xf
 	PTRACE_SETOPTIONS                    = 0x4200
@@ -1284,6 +1313,7 @@ const (
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
+	RTAX_FASTOPEN_NO_COOKIE              = 0x11
 	RTAX_FEATURES                        = 0xc
 	RTAX_FEATURE_ALLFRAG                 = 0x8
 	RTAX_FEATURE_ECN                     = 0x1
@@ -1294,7 +1324,7 @@ const (
 	RTAX_INITCWND                        = 0xb
 	RTAX_INITRWND                        = 0xe
 	RTAX_LOCK                            = 0x1
-	RTAX_MAX                             = 0x10
+	RTAX_MAX                             = 0x11
 	RTAX_MTU                             = 0x2
 	RTAX_QUICKACK                        = 0xf
 	RTAX_REORDERING                      = 0x9
@@ -1568,6 +1598,7 @@ const (
 	SOL_SOCKET                           = 0xffff
 	SOL_TCP                              = 0x6
 	SOL_TIPC                             = 0x10f
+	SOL_TLS                              = 0x11a
 	SOL_X25                              = 0x106
 	SOMAXCONN                            = 0x80
 	SO_ACCEPTCONN                        = 0x1009
@@ -1637,6 +1668,7 @@ const (
 	SO_VM_SOCKETS_PEER_HOST_VM_ID        = 0x3
 	SO_VM_SOCKETS_TRUSTED                = 0x5
 	SO_WIFI_STATUS                       = 0x29
+	SO_ZEROCOPY                          = 0x3c
 	SPLICE_F_GIFT                        = 0x8
 	SPLICE_F_MORE                        = 0x4
 	SPLICE_F_MOVE                        = 0x1
@@ -1732,6 +1764,8 @@ const (
 	TCP_MAXWIN                           = 0xffff
 	TCP_MAX_WINSHIFT                     = 0xe
 	TCP_MD5SIG                           = 0xe
+	TCP_MD5SIG_EXT                       = 0x20
+	TCP_MD5SIG_FLAG_PREFIX               = 0x1
 	TCP_MD5SIG_MAXKEYLEN                 = 0x50
 	TCP_MSS                              = 0x200
 	TCP_MSS_DEFAULT                      = 0x218
@@ -1752,6 +1786,7 @@ const (
 	TCP_THIN_DUPACK                      = 0x11
 	TCP_THIN_LINEAR_TIMEOUTS             = 0x10
 	TCP_TIMESTAMP                        = 0x18
+	TCP_ULP                              = 0x1f
 	TCP_USER_TIMEOUT                     = 0x12
 	TCP_WINDOW_CLAMP                     = 0xa
 	TCSAFLUSH                            = 0x5410
@@ -1853,6 +1888,7 @@ const (
 	TUNGETVNETHDRSZ                      = 0x400454d7
 	TUNGETVNETLE                         = 0x400454dd
 	TUNSETDEBUG                          = 0x800454c9
+	TUNSETFILTEREBPF                     = 0x400454e1
 	TUNSETGROUP                          = 0x800454ce
 	TUNSETIFF                            = 0x800454ca
 	TUNSETIFINDEX                        = 0x800454da
@@ -1863,6 +1899,7 @@ const (
 	TUNSETPERSIST                        = 0x800454cb
 	TUNSETQUEUE                          = 0x800454d9
 	TUNSETSNDBUF                         = 0x800454d4
+	TUNSETSTEERINGEBPF                   = 0x400454e0
 	TUNSETTXFILTER                       = 0x800454d1
 	TUNSETVNETBE                         = 0x800454de
 	TUNSETVNETHDRSZ                      = 0x800454d8
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index db6d556b..86eb3063 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -121,6 +121,7 @@ const (
 	ARPHRD_PPP                           = 0x200
 	ARPHRD_PRONET                        = 0x4
 	ARPHRD_RAWHDLC                       = 0x206
+	ARPHRD_RAWIP                         = 0x207
 	ARPHRD_ROSE                          = 0x10e
 	ARPHRD_RSRVD                         = 0x104
 	ARPHRD_SIT                           = 0x308
@@ -390,6 +391,8 @@ const (
 	ETH_P_DSA                            = 0x1b
 	ETH_P_ECONET                         = 0x18
 	ETH_P_EDSA                           = 0xdada
+	ETH_P_ERSPAN                         = 0x88be
+	ETH_P_ERSPAN2                        = 0x22eb
 	ETH_P_FCOE                           = 0x8906
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
@@ -398,6 +401,7 @@ const (
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
+	ETH_P_IFE                            = 0xed3e
 	ETH_P_IP                             = 0x800
 	ETH_P_IPV6                           = 0x86dd
 	ETH_P_IPX                            = 0x8137
@@ -408,11 +412,13 @@ const (
 	ETH_P_LOOP                           = 0x60
 	ETH_P_LOOPBACK                       = 0x9000
 	ETH_P_MACSEC                         = 0x88e5
+	ETH_P_MAP                            = 0xf9
 	ETH_P_MOBITEX                        = 0x15
 	ETH_P_MPLS_MC                        = 0x8848
 	ETH_P_MPLS_UC                        = 0x8847
 	ETH_P_MVRP                           = 0x88f5
 	ETH_P_NCSI                           = 0x88f8
+	ETH_P_NSH                            = 0x894f
 	ETH_P_PAE                            = 0x888e
 	ETH_P_PAUSE                          = 0x8808
 	ETH_P_PHONET                         = 0xf5
@@ -476,6 +482,7 @@ const (
 	FS_POLICY_FLAGS_PAD_8                = 0x1
 	FS_POLICY_FLAGS_PAD_MASK             = 0x3
 	FS_POLICY_FLAGS_VALID                = 0x3
+	F_ADD_SEALS                          = 0x409
 	F_DUPFD                              = 0x0
 	F_DUPFD_CLOEXEC                      = 0x406
 	F_EXLCK                              = 0x4
@@ -488,6 +495,9 @@ const (
 	F_GETOWN_EX                          = 0x10
 	F_GETPIPE_SZ                         = 0x408
 	F_GETSIG                             = 0xb
+	F_GET_FILE_RW_HINT                   = 0x40d
+	F_GET_RW_HINT                        = 0x40b
+	F_GET_SEALS                          = 0x40a
 	F_LOCK                               = 0x1
 	F_NOTIFY                             = 0x402
 	F_OFD_GETLK                          = 0x24
@@ -495,6 +505,10 @@ const (
 	F_OFD_SETLKW                         = 0x26
 	F_OK                                 = 0x0
 	F_RDLCK                              = 0x0
+	F_SEAL_GROW                          = 0x4
+	F_SEAL_SEAL                          = 0x1
+	F_SEAL_SHRINK                        = 0x2
+	F_SEAL_WRITE                         = 0x8
 	F_SETFD                              = 0x2
 	F_SETFL                              = 0x4
 	F_SETLEASE                           = 0x400
@@ -506,6 +520,8 @@ const (
 	F_SETOWN_EX                          = 0xf
 	F_SETPIPE_SZ                         = 0x407
 	F_SETSIG                             = 0xa
+	F_SET_FILE_RW_HINT                   = 0x40e
+	F_SET_RW_HINT                        = 0x40c
 	F_SHLCK                              = 0x8
 	F_TEST                               = 0x3
 	F_TLOCK                              = 0x2
@@ -561,6 +577,8 @@ const (
 	IFF_MASTER                           = 0x400
 	IFF_MULTICAST                        = 0x1000
 	IFF_MULTI_QUEUE                      = 0x100
+	IFF_NAPI                             = 0x10
+	IFF_NAPI_FRAGS                       = 0x20
 	IFF_NOARP                            = 0x80
 	IFF_NOFILTER                         = 0x1000
 	IFF_NOTRAILERS                       = 0x20
@@ -845,6 +863,7 @@ const (
 	MADV_FREE                            = 0x8
 	MADV_HUGEPAGE                        = 0xe
 	MADV_HWPOISON                        = 0x64
+	MADV_KEEPONFORK                      = 0x13
 	MADV_MERGEABLE                       = 0xc
 	MADV_NOHUGEPAGE                      = 0xf
 	MADV_NORMAL                          = 0x0
@@ -853,6 +872,7 @@ const (
 	MADV_SEQUENTIAL                      = 0x2
 	MADV_UNMERGEABLE                     = 0xd
 	MADV_WILLNEED                        = 0x3
+	MADV_WIPEONFORK                      = 0x12
 	MAP_ANON                             = 0x800
 	MAP_ANONYMOUS                        = 0x800
 	MAP_DENYWRITE                        = 0x2000
@@ -899,6 +919,7 @@ const (
 	MSG_TRYHARD                          = 0x4
 	MSG_WAITALL                          = 0x100
 	MSG_WAITFORONE                       = 0x10000
+	MSG_ZEROCOPY                         = 0x4000000
 	MS_ACTIVE                            = 0x40000000
 	MS_ASYNC                             = 0x1
 	MS_BIND                              = 0x1000
@@ -998,6 +1019,7 @@ const (
 	NLM_F_EXCL                           = 0x200
 	NLM_F_MATCH                          = 0x200
 	NLM_F_MULTI                          = 0x2
+	NLM_F_NONREC                         = 0x100
 	NLM_F_REPLACE                        = 0x100
 	NLM_F_REQUEST                        = 0x1
 	NLM_F_ROOT                           = 0x100
@@ -1096,6 +1118,7 @@ const (
 	PERF_EVENT_IOC_ID                    = 0x40082407
 	PERF_EVENT_IOC_PAUSE_OUTPUT          = 0x80042409
 	PERF_EVENT_IOC_PERIOD                = 0x80082404
+	PERF_EVENT_IOC_QUERY_BPF             = 0xc008240a
 	PERF_EVENT_IOC_REFRESH               = 0x20002402
 	PERF_EVENT_IOC_RESET                 = 0x20002403
 	PERF_EVENT_IOC_SET_BPF               = 0x80042408
@@ -1196,6 +1219,11 @@ const (
 	PR_SET_TIMING                        = 0xe
 	PR_SET_TSC                           = 0x1a
 	PR_SET_UNALIGN                       = 0x6
+	PR_SVE_GET_VL                        = 0x33
+	PR_SVE_SET_VL                        = 0x32
+	PR_SVE_SET_VL_ONEXEC                 = 0x40000
+	PR_SVE_VL_INHERIT                    = 0x20000
+	PR_SVE_VL_LEN_MASK                   = 0xffff
 	PR_TASK_PERF_EVENTS_DISABLE          = 0x1f
 	PR_TASK_PERF_EVENTS_ENABLE           = 0x20
 	PR_TIMING_STATISTICAL                = 0x0
@@ -1252,6 +1280,7 @@ const (
 	PTRACE_POKETEXT_3264                 = 0xc2
 	PTRACE_POKEUSR                       = 0x6
 	PTRACE_SECCOMP_GET_FILTER            = 0x420c
+	PTRACE_SECCOMP_GET_METADATA          = 0x420d
 	PTRACE_SEIZE                         = 0x4206
 	PTRACE_SETFPREGS                     = 0xf
 	PTRACE_SETOPTIONS                    = 0x4200
@@ -1284,6 +1313,7 @@ const (
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
+	RTAX_FASTOPEN_NO_COOKIE              = 0x11
 	RTAX_FEATURES                        = 0xc
 	RTAX_FEATURE_ALLFRAG                 = 0x8
 	RTAX_FEATURE_ECN                     = 0x1
@@ -1294,7 +1324,7 @@ const (
 	RTAX_INITCWND                        = 0xb
 	RTAX_INITRWND                        = 0xe
 	RTAX_LOCK                            = 0x1
-	RTAX_MAX                             = 0x10
+	RTAX_MAX                             = 0x11
 	RTAX_MTU                             = 0x2
 	RTAX_QUICKACK                        = 0xf
 	RTAX_REORDERING                      = 0x9
@@ -1568,6 +1598,7 @@ const (
 	SOL_SOCKET                           = 0xffff
 	SOL_TCP                              = 0x6
 	SOL_TIPC                             = 0x10f
+	SOL_TLS                              = 0x11a
 	SOL_X25                              = 0x106
 	SOMAXCONN                            = 0x80
 	SO_ACCEPTCONN                        = 0x1009
@@ -1637,6 +1668,7 @@ const (
 	SO_VM_SOCKETS_PEER_HOST_VM_ID        = 0x3
 	SO_VM_SOCKETS_TRUSTED                = 0x5
 	SO_WIFI_STATUS                       = 0x29
+	SO_ZEROCOPY                          = 0x3c
 	SPLICE_F_GIFT                        = 0x8
 	SPLICE_F_MORE                        = 0x4
 	SPLICE_F_MOVE                        = 0x1
@@ -1732,6 +1764,8 @@ const (
 	TCP_MAXWIN                           = 0xffff
 	TCP_MAX_WINSHIFT                     = 0xe
 	TCP_MD5SIG                           = 0xe
+	TCP_MD5SIG_EXT                       = 0x20
+	TCP_MD5SIG_FLAG_PREFIX               = 0x1
 	TCP_MD5SIG_MAXKEYLEN                 = 0x50
 	TCP_MSS                              = 0x200
 	TCP_MSS_DEFAULT                      = 0x218
@@ -1752,6 +1786,7 @@ const (
 	TCP_THIN_DUPACK                      = 0x11
 	TCP_THIN_LINEAR_TIMEOUTS             = 0x10
 	TCP_TIMESTAMP                        = 0x18
+	TCP_ULP                              = 0x1f
 	TCP_USER_TIMEOUT                     = 0x12
 	TCP_WINDOW_CLAMP                     = 0xa
 	TCSAFLUSH                            = 0x5410
@@ -1853,6 +1888,7 @@ const (
 	TUNGETVNETHDRSZ                      = 0x400454d7
 	TUNGETVNETLE                         = 0x400454dd
 	TUNSETDEBUG                          = 0x800454c9
+	TUNSETFILTEREBPF                     = 0x400454e1
 	TUNSETGROUP                          = 0x800454ce
 	TUNSETIFF                            = 0x800454ca
 	TUNSETIFINDEX                        = 0x800454da
@@ -1863,6 +1899,7 @@ const (
 	TUNSETPERSIST                        = 0x800454cb
 	TUNSETQUEUE                          = 0x800454d9
 	TUNSETSNDBUF                         = 0x800454d4
+	TUNSETSTEERINGEBPF                   = 0x400454e0
 	TUNSETTXFILTER                       = 0x800454d1
 	TUNSETVNETBE                         = 0x800454de
 	TUNSETVNETHDRSZ                      = 0x800454d8
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 4a62a550..2168ece9 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -121,6 +121,7 @@ const (
 	ARPHRD_PPP                           = 0x200
 	ARPHRD_PRONET                        = 0x4
 	ARPHRD_RAWHDLC                       = 0x206
+	ARPHRD_RAWIP                         = 0x207
 	ARPHRD_ROSE                          = 0x10e
 	ARPHRD_RSRVD                         = 0x104
 	ARPHRD_SIT                           = 0x308
@@ -390,6 +391,8 @@ const (
 	ETH_P_DSA                            = 0x1b
 	ETH_P_ECONET                         = 0x18
 	ETH_P_EDSA                           = 0xdada
+	ETH_P_ERSPAN                         = 0x88be
+	ETH_P_ERSPAN2                        = 0x22eb
 	ETH_P_FCOE                           = 0x8906
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
@@ -398,6 +401,7 @@ const (
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
+	ETH_P_IFE                            = 0xed3e
 	ETH_P_IP                             = 0x800
 	ETH_P_IPV6                           = 0x86dd
 	ETH_P_IPX                            = 0x8137
@@ -408,11 +412,13 @@ const (
 	ETH_P_LOOP                           = 0x60
 	ETH_P_LOOPBACK                       = 0x9000
 	ETH_P_MACSEC                         = 0x88e5
+	ETH_P_MAP                            = 0xf9
 	ETH_P_MOBITEX                        = 0x15
 	ETH_P_MPLS_MC                        = 0x8848
 	ETH_P_MPLS_UC                        = 0x8847
 	ETH_P_MVRP                           = 0x88f5
 	ETH_P_NCSI                           = 0x88f8
+	ETH_P_NSH                            = 0x894f
 	ETH_P_PAE                            = 0x888e
 	ETH_P_PAUSE                          = 0x8808
 	ETH_P_PHONET                         = 0xf5
@@ -476,6 +482,7 @@ const (
 	FS_POLICY_FLAGS_PAD_8                = 0x1
 	FS_POLICY_FLAGS_PAD_MASK             = 0x3
 	FS_POLICY_FLAGS_VALID                = 0x3
+	F_ADD_SEALS                          = 0x409
 	F_DUPFD                              = 0x0
 	F_DUPFD_CLOEXEC                      = 0x406
 	F_EXLCK                              = 0x4
@@ -488,6 +495,9 @@ const (
 	F_GETOWN_EX                          = 0x10
 	F_GETPIPE_SZ                         = 0x408
 	F_GETSIG                             = 0xb
+	F_GET_FILE_RW_HINT                   = 0x40d
+	F_GET_RW_HINT                        = 0x40b
+	F_GET_SEALS                          = 0x40a
 	F_LOCK                               = 0x1
 	F_NOTIFY                             = 0x402
 	F_OFD_GETLK                          = 0x24
@@ -495,6 +505,10 @@ const (
 	F_OFD_SETLKW                         = 0x26
 	F_OK                                 = 0x0
 	F_RDLCK                              = 0x0
+	F_SEAL_GROW                          = 0x4
+	F_SEAL_SEAL                          = 0x1
+	F_SEAL_SHRINK                        = 0x2
+	F_SEAL_WRITE                         = 0x8
 	F_SETFD                              = 0x2
 	F_SETFL                              = 0x4
 	F_SETLEASE                           = 0x400
@@ -506,6 +520,8 @@ const (
 	F_SETOWN_EX                          = 0xf
 	F_SETPIPE_SZ                         = 0x407
 	F_SETSIG                             = 0xa
+	F_SET_FILE_RW_HINT                   = 0x40e
+	F_SET_RW_HINT                        = 0x40c
 	F_SHLCK                              = 0x8
 	F_TEST                               = 0x3
 	F_TLOCK                              = 0x2
@@ -561,6 +577,8 @@ const (
 	IFF_MASTER                           = 0x400
 	IFF_MULTICAST                        = 0x1000
 	IFF_MULTI_QUEUE                      = 0x100
+	IFF_NAPI                             = 0x10
+	IFF_NAPI_FRAGS                       = 0x20
 	IFF_NOARP                            = 0x80
 	IFF_NOFILTER                         = 0x1000
 	IFF_NOTRAILERS                       = 0x20
@@ -845,6 +863,7 @@ const (
 	MADV_FREE                            = 0x8
 	MADV_HUGEPAGE                        = 0xe
 	MADV_HWPOISON                        = 0x64
+	MADV_KEEPONFORK                      = 0x13
 	MADV_MERGEABLE                       = 0xc
 	MADV_NOHUGEPAGE                      = 0xf
 	MADV_NORMAL                          = 0x0
@@ -853,6 +872,7 @@ const (
 	MADV_SEQUENTIAL                      = 0x2
 	MADV_UNMERGEABLE                     = 0xd
 	MADV_WILLNEED                        = 0x3
+	MADV_WIPEONFORK                      = 0x12
 	MAP_ANON                             = 0x800
 	MAP_ANONYMOUS                        = 0x800
 	MAP_DENYWRITE                        = 0x2000
@@ -899,6 +919,7 @@ const (
 	MSG_TRYHARD                          = 0x4
 	MSG_WAITALL                          = 0x100
 	MSG_WAITFORONE                       = 0x10000
+	MSG_ZEROCOPY                         = 0x4000000
 	MS_ACTIVE                            = 0x40000000
 	MS_ASYNC                             = 0x1
 	MS_BIND                              = 0x1000
@@ -998,6 +1019,7 @@ const (
 	NLM_F_EXCL                           = 0x200
 	NLM_F_MATCH                          = 0x200
 	NLM_F_MULTI                          = 0x2
+	NLM_F_NONREC                         = 0x100
 	NLM_F_REPLACE                        = 0x100
 	NLM_F_REQUEST                        = 0x1
 	NLM_F_ROOT                           = 0x100
@@ -1096,6 +1118,7 @@ const (
 	PERF_EVENT_IOC_ID                    = 0x40042407
 	PERF_EVENT_IOC_PAUSE_OUTPUT          = 0x80042409
 	PERF_EVENT_IOC_PERIOD                = 0x80082404
+	PERF_EVENT_IOC_QUERY_BPF             = 0xc004240a
 	PERF_EVENT_IOC_REFRESH               = 0x20002402
 	PERF_EVENT_IOC_RESET                 = 0x20002403
 	PERF_EVENT_IOC_SET_BPF               = 0x80042408
@@ -1196,6 +1219,11 @@ const (
 	PR_SET_TIMING                        = 0xe
 	PR_SET_TSC                           = 0x1a
 	PR_SET_UNALIGN                       = 0x6
+	PR_SVE_GET_VL                        = 0x33
+	PR_SVE_SET_VL                        = 0x32
+	PR_SVE_SET_VL_ONEXEC                 = 0x40000
+	PR_SVE_VL_INHERIT                    = 0x20000
+	PR_SVE_VL_LEN_MASK                   = 0xffff
 	PR_TASK_PERF_EVENTS_DISABLE          = 0x1f
 	PR_TASK_PERF_EVENTS_ENABLE           = 0x20
 	PR_TIMING_STATISTICAL                = 0x0
@@ -1252,6 +1280,7 @@ const (
 	PTRACE_POKETEXT_3264                 = 0xc2
 	PTRACE_POKEUSR                       = 0x6
 	PTRACE_SECCOMP_GET_FILTER            = 0x420c
+	PTRACE_SECCOMP_GET_METADATA          = 0x420d
 	PTRACE_SEIZE                         = 0x4206
 	PTRACE_SETFPREGS                     = 0xf
 	PTRACE_SETOPTIONS                    = 0x4200
@@ -1284,6 +1313,7 @@ const (
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
+	RTAX_FASTOPEN_NO_COOKIE              = 0x11
 	RTAX_FEATURES                        = 0xc
 	RTAX_FEATURE_ALLFRAG                 = 0x8
 	RTAX_FEATURE_ECN                     = 0x1
@@ -1294,7 +1324,7 @@ const (
 	RTAX_INITCWND                        = 0xb
 	RTAX_INITRWND                        = 0xe
 	RTAX_LOCK                            = 0x1
-	RTAX_MAX                             = 0x10
+	RTAX_MAX                             = 0x11
 	RTAX_MTU                             = 0x2
 	RTAX_QUICKACK                        = 0xf
 	RTAX_REORDERING                      = 0x9
@@ -1568,6 +1598,7 @@ const (
 	SOL_SOCKET                           = 0xffff
 	SOL_TCP                              = 0x6
 	SOL_TIPC                             = 0x10f
+	SOL_TLS                              = 0x11a
 	SOL_X25                              = 0x106
 	SOMAXCONN                            = 0x80
 	SO_ACCEPTCONN                        = 0x1009
@@ -1637,6 +1668,7 @@ const (
 	SO_VM_SOCKETS_PEER_HOST_VM_ID        = 0x3
 	SO_VM_SOCKETS_TRUSTED                = 0x5
 	SO_WIFI_STATUS                       = 0x29
+	SO_ZEROCOPY                          = 0x3c
 	SPLICE_F_GIFT                        = 0x8
 	SPLICE_F_MORE                        = 0x4
 	SPLICE_F_MOVE                        = 0x1
@@ -1732,6 +1764,8 @@ const (
 	TCP_MAXWIN                           = 0xffff
 	TCP_MAX_WINSHIFT                     = 0xe
 	TCP_MD5SIG                           = 0xe
+	TCP_MD5SIG_EXT                       = 0x20
+	TCP_MD5SIG_FLAG_PREFIX               = 0x1
 	TCP_MD5SIG_MAXKEYLEN                 = 0x50
 	TCP_MSS                              = 0x200
 	TCP_MSS_DEFAULT                      = 0x218
@@ -1752,6 +1786,7 @@ const (
 	TCP_THIN_DUPACK                      = 0x11
 	TCP_THIN_LINEAR_TIMEOUTS             = 0x10
 	TCP_TIMESTAMP                        = 0x18
+	TCP_ULP                              = 0x1f
 	TCP_USER_TIMEOUT                     = 0x12
 	TCP_WINDOW_CLAMP                     = 0xa
 	TCSAFLUSH                            = 0x5410
@@ -1853,6 +1888,7 @@ const (
 	TUNGETVNETHDRSZ                      = 0x400454d7
 	TUNGETVNETLE                         = 0x400454dd
 	TUNSETDEBUG                          = 0x800454c9
+	TUNSETFILTEREBPF                     = 0x400454e1
 	TUNSETGROUP                          = 0x800454ce
 	TUNSETIFF                            = 0x800454ca
 	TUNSETIFINDEX                        = 0x800454da
@@ -1863,6 +1899,7 @@ const (
 	TUNSETPERSIST                        = 0x800454cb
 	TUNSETQUEUE                          = 0x800454d9
 	TUNSETSNDBUF                         = 0x800454d4
+	TUNSETSTEERINGEBPF                   = 0x400454e0
 	TUNSETTXFILTER                       = 0x800454d1
 	TUNSETVNETBE                         = 0x800454de
 	TUNSETVNETHDRSZ                      = 0x800454d8
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index 5e1e81e0..18e48b4f 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -121,6 +121,7 @@ const (
 	ARPHRD_PPP                           = 0x200
 	ARPHRD_PRONET                        = 0x4
 	ARPHRD_RAWHDLC                       = 0x206
+	ARPHRD_RAWIP                         = 0x207
 	ARPHRD_ROSE                          = 0x10e
 	ARPHRD_RSRVD                         = 0x104
 	ARPHRD_SIT                           = 0x308
@@ -390,6 +391,8 @@ const (
 	ETH_P_DSA                            = 0x1b
 	ETH_P_ECONET                         = 0x18
 	ETH_P_EDSA                           = 0xdada
+	ETH_P_ERSPAN                         = 0x88be
+	ETH_P_ERSPAN2                        = 0x22eb
 	ETH_P_FCOE                           = 0x8906
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
@@ -398,6 +401,7 @@ const (
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
+	ETH_P_IFE                            = 0xed3e
 	ETH_P_IP                             = 0x800
 	ETH_P_IPV6                           = 0x86dd
 	ETH_P_IPX                            = 0x8137
@@ -408,11 +412,13 @@ const (
 	ETH_P_LOOP                           = 0x60
 	ETH_P_LOOPBACK                       = 0x9000
 	ETH_P_MACSEC                         = 0x88e5
+	ETH_P_MAP                            = 0xf9
 	ETH_P_MOBITEX                        = 0x15
 	ETH_P_MPLS_MC                        = 0x8848
 	ETH_P_MPLS_UC                        = 0x8847
 	ETH_P_MVRP                           = 0x88f5
 	ETH_P_NCSI                           = 0x88f8
+	ETH_P_NSH                            = 0x894f
 	ETH_P_PAE                            = 0x888e
 	ETH_P_PAUSE                          = 0x8808
 	ETH_P_PHONET                         = 0xf5
@@ -476,6 +482,7 @@ const (
 	FS_POLICY_FLAGS_PAD_8                = 0x1
 	FS_POLICY_FLAGS_PAD_MASK             = 0x3
 	FS_POLICY_FLAGS_VALID                = 0x3
+	F_ADD_SEALS                          = 0x409
 	F_DUPFD                              = 0x0
 	F_DUPFD_CLOEXEC                      = 0x406
 	F_EXLCK                              = 0x4
@@ -488,6 +495,9 @@ const (
 	F_GETOWN_EX                          = 0x10
 	F_GETPIPE_SZ                         = 0x408
 	F_GETSIG                             = 0xb
+	F_GET_FILE_RW_HINT                   = 0x40d
+	F_GET_RW_HINT                        = 0x40b
+	F_GET_SEALS                          = 0x40a
 	F_LOCK                               = 0x1
 	F_NOTIFY                             = 0x402
 	F_OFD_GETLK                          = 0x24
@@ -495,6 +505,10 @@ const (
 	F_OFD_SETLKW                         = 0x26
 	F_OK                                 = 0x0
 	F_RDLCK                              = 0x0
+	F_SEAL_GROW                          = 0x4
+	F_SEAL_SEAL                          = 0x1
+	F_SEAL_SHRINK                        = 0x2
+	F_SEAL_WRITE                         = 0x8
 	F_SETFD                              = 0x2
 	F_SETFL                              = 0x4
 	F_SETLEASE                           = 0x400
@@ -506,6 +520,8 @@ const (
 	F_SETOWN_EX                          = 0xf
 	F_SETPIPE_SZ                         = 0x407
 	F_SETSIG                             = 0xa
+	F_SET_FILE_RW_HINT                   = 0x40e
+	F_SET_RW_HINT                        = 0x40c
 	F_SHLCK                              = 0x8
 	F_TEST                               = 0x3
 	F_TLOCK                              = 0x2
@@ -561,6 +577,8 @@ const (
 	IFF_MASTER                           = 0x400
 	IFF_MULTICAST                        = 0x1000
 	IFF_MULTI_QUEUE                      = 0x100
+	IFF_NAPI                             = 0x10
+	IFF_NAPI_FRAGS                       = 0x20
 	IFF_NOARP                            = 0x80
 	IFF_NOFILTER                         = 0x1000
 	IFF_NOTRAILERS                       = 0x20
@@ -845,6 +863,7 @@ const (
 	MADV_FREE                            = 0x8
 	MADV_HUGEPAGE                        = 0xe
 	MADV_HWPOISON                        = 0x64
+	MADV_KEEPONFORK                      = 0x13
 	MADV_MERGEABLE                       = 0xc
 	MADV_NOHUGEPAGE                      = 0xf
 	MADV_NORMAL                          = 0x0
@@ -853,6 +872,7 @@ const (
 	MADV_SEQUENTIAL                      = 0x2
 	MADV_UNMERGEABLE                     = 0xd
 	MADV_WILLNEED                        = 0x3
+	MADV_WIPEONFORK                      = 0x12
 	MAP_ANON                             = 0x20
 	MAP_ANONYMOUS                        = 0x20
 	MAP_DENYWRITE                        = 0x800
@@ -898,6 +918,7 @@ const (
 	MSG_TRYHARD                          = 0x4
 	MSG_WAITALL                          = 0x100
 	MSG_WAITFORONE                       = 0x10000
+	MSG_ZEROCOPY                         = 0x4000000
 	MS_ACTIVE                            = 0x40000000
 	MS_ASYNC                             = 0x1
 	MS_BIND                              = 0x1000
@@ -999,6 +1020,7 @@ const (
 	NLM_F_EXCL                           = 0x200
 	NLM_F_MATCH                          = 0x200
 	NLM_F_MULTI                          = 0x2
+	NLM_F_NONREC                         = 0x100
 	NLM_F_REPLACE                        = 0x100
 	NLM_F_REQUEST                        = 0x1
 	NLM_F_ROOT                           = 0x100
@@ -1097,6 +1119,7 @@ const (
 	PERF_EVENT_IOC_ID                    = 0x40082407
 	PERF_EVENT_IOC_PAUSE_OUTPUT          = 0x80042409
 	PERF_EVENT_IOC_PERIOD                = 0x80082404
+	PERF_EVENT_IOC_QUERY_BPF             = 0xc008240a
 	PERF_EVENT_IOC_REFRESH               = 0x20002402
 	PERF_EVENT_IOC_RESET                 = 0x20002403
 	PERF_EVENT_IOC_SET_BPF               = 0x80042408
@@ -1198,6 +1221,11 @@ const (
 	PR_SET_TIMING                        = 0xe
 	PR_SET_TSC                           = 0x1a
 	PR_SET_UNALIGN                       = 0x6
+	PR_SVE_GET_VL                        = 0x33
+	PR_SVE_SET_VL                        = 0x32
+	PR_SVE_SET_VL_ONEXEC                 = 0x40000
+	PR_SVE_VL_INHERIT                    = 0x20000
+	PR_SVE_VL_LEN_MASK                   = 0xffff
 	PR_TASK_PERF_EVENTS_DISABLE          = 0x1f
 	PR_TASK_PERF_EVENTS_ENABLE           = 0x20
 	PR_TIMING_STATISTICAL                = 0x0
@@ -1251,6 +1279,7 @@ const (
 	PTRACE_POKETEXT                      = 0x4
 	PTRACE_POKEUSR                       = 0x6
 	PTRACE_SECCOMP_GET_FILTER            = 0x420c
+	PTRACE_SECCOMP_GET_METADATA          = 0x420d
 	PTRACE_SEIZE                         = 0x4206
 	PTRACE_SETEVRREGS                    = 0x15
 	PTRACE_SETFPREGS                     = 0xf
@@ -1340,6 +1369,7 @@ const (
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
+	RTAX_FASTOPEN_NO_COOKIE              = 0x11
 	RTAX_FEATURES                        = 0xc
 	RTAX_FEATURE_ALLFRAG                 = 0x8
 	RTAX_FEATURE_ECN                     = 0x1
@@ -1350,7 +1380,7 @@ const (
 	RTAX_INITCWND                        = 0xb
 	RTAX_INITRWND                        = 0xe
 	RTAX_LOCK                            = 0x1
-	RTAX_MAX                             = 0x10
+	RTAX_MAX                             = 0x11
 	RTAX_MTU                             = 0x2
 	RTAX_QUICKACK                        = 0xf
 	RTAX_REORDERING                      = 0x9
@@ -1624,6 +1654,7 @@ const (
 	SOL_SOCKET                           = 0x1
 	SOL_TCP                              = 0x6
 	SOL_TIPC                             = 0x10f
+	SOL_TLS                              = 0x11a
 	SOL_X25                              = 0x106
 	SOMAXCONN                            = 0x80
 	SO_ACCEPTCONN                        = 0x1e
@@ -1692,6 +1723,7 @@ const (
 	SO_VM_SOCKETS_PEER_HOST_VM_ID        = 0x3
 	SO_VM_SOCKETS_TRUSTED                = 0x5
 	SO_WIFI_STATUS                       = 0x29
+	SO_ZEROCOPY                          = 0x3c
 	SPLICE_F_GIFT                        = 0x8
 	SPLICE_F_MORE                        = 0x4
 	SPLICE_F_MOVE                        = 0x1
@@ -1786,6 +1818,8 @@ const (
 	TCP_MAXWIN                           = 0xffff
 	TCP_MAX_WINSHIFT                     = 0xe
 	TCP_MD5SIG                           = 0xe
+	TCP_MD5SIG_EXT                       = 0x20
+	TCP_MD5SIG_FLAG_PREFIX               = 0x1
 	TCP_MD5SIG_MAXKEYLEN                 = 0x50
 	TCP_MSS                              = 0x200
 	TCP_MSS_DEFAULT                      = 0x218
@@ -1806,6 +1840,7 @@ const (
 	TCP_THIN_DUPACK                      = 0x11
 	TCP_THIN_LINEAR_TIMEOUTS             = 0x10
 	TCP_TIMESTAMP                        = 0x18
+	TCP_ULP                              = 0x1f
 	TCP_USER_TIMEOUT                     = 0x12
 	TCP_WINDOW_CLAMP                     = 0xa
 	TCSAFLUSH                            = 0x2
@@ -1911,6 +1946,7 @@ const (
 	TUNGETVNETHDRSZ                      = 0x400454d7
 	TUNGETVNETLE                         = 0x400454dd
 	TUNSETDEBUG                          = 0x800454c9
+	TUNSETFILTEREBPF                     = 0x400454e1
 	TUNSETGROUP                          = 0x800454ce
 	TUNSETIFF                            = 0x800454ca
 	TUNSETIFINDEX                        = 0x800454da
@@ -1921,6 +1957,7 @@ const (
 	TUNSETPERSIST                        = 0x800454cb
 	TUNSETQUEUE                          = 0x800454d9
 	TUNSETSNDBUF                         = 0x800454d4
+	TUNSETSTEERINGEBPF                   = 0x400454e0
 	TUNSETTXFILTER                       = 0x800454d1
 	TUNSETVNETBE                         = 0x800454de
 	TUNSETVNETHDRSZ                      = 0x800454d8
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index 6a803243..c069f8fa 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -121,6 +121,7 @@ const (
 	ARPHRD_PPP                           = 0x200
 	ARPHRD_PRONET                        = 0x4
 	ARPHRD_RAWHDLC                       = 0x206
+	ARPHRD_RAWIP                         = 0x207
 	ARPHRD_ROSE                          = 0x10e
 	ARPHRD_RSRVD                         = 0x104
 	ARPHRD_SIT                           = 0x308
@@ -390,6 +391,8 @@ const (
 	ETH_P_DSA                            = 0x1b
 	ETH_P_ECONET                         = 0x18
 	ETH_P_EDSA                           = 0xdada
+	ETH_P_ERSPAN                         = 0x88be
+	ETH_P_ERSPAN2                        = 0x22eb
 	ETH_P_FCOE                           = 0x8906
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
@@ -398,6 +401,7 @@ const (
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
+	ETH_P_IFE                            = 0xed3e
 	ETH_P_IP                             = 0x800
 	ETH_P_IPV6                           = 0x86dd
 	ETH_P_IPX                            = 0x8137
@@ -408,11 +412,13 @@ const (
 	ETH_P_LOOP                           = 0x60
 	ETH_P_LOOPBACK                       = 0x9000
 	ETH_P_MACSEC                         = 0x88e5
+	ETH_P_MAP                            = 0xf9
 	ETH_P_MOBITEX                        = 0x15
 	ETH_P_MPLS_MC                        = 0x8848
 	ETH_P_MPLS_UC                        = 0x8847
 	ETH_P_MVRP                           = 0x88f5
 	ETH_P_NCSI                           = 0x88f8
+	ETH_P_NSH                            = 0x894f
 	ETH_P_PAE                            = 0x888e
 	ETH_P_PAUSE                          = 0x8808
 	ETH_P_PHONET                         = 0xf5
@@ -476,6 +482,7 @@ const (
 	FS_POLICY_FLAGS_PAD_8                = 0x1
 	FS_POLICY_FLAGS_PAD_MASK             = 0x3
 	FS_POLICY_FLAGS_VALID                = 0x3
+	F_ADD_SEALS                          = 0x409
 	F_DUPFD                              = 0x0
 	F_DUPFD_CLOEXEC                      = 0x406
 	F_EXLCK                              = 0x4
@@ -488,6 +495,9 @@ const (
 	F_GETOWN_EX                          = 0x10
 	F_GETPIPE_SZ                         = 0x408
 	F_GETSIG                             = 0xb
+	F_GET_FILE_RW_HINT                   = 0x40d
+	F_GET_RW_HINT                        = 0x40b
+	F_GET_SEALS                          = 0x40a
 	F_LOCK                               = 0x1
 	F_NOTIFY                             = 0x402
 	F_OFD_GETLK                          = 0x24
@@ -495,6 +505,10 @@ const (
 	F_OFD_SETLKW                         = 0x26
 	F_OK                                 = 0x0
 	F_RDLCK                              = 0x0
+	F_SEAL_GROW                          = 0x4
+	F_SEAL_SEAL                          = 0x1
+	F_SEAL_SHRINK                        = 0x2
+	F_SEAL_WRITE                         = 0x8
 	F_SETFD                              = 0x2
 	F_SETFL                              = 0x4
 	F_SETLEASE                           = 0x400
@@ -506,6 +520,8 @@ const (
 	F_SETOWN_EX                          = 0xf
 	F_SETPIPE_SZ                         = 0x407
 	F_SETSIG                             = 0xa
+	F_SET_FILE_RW_HINT                   = 0x40e
+	F_SET_RW_HINT                        = 0x40c
 	F_SHLCK                              = 0x8
 	F_TEST                               = 0x3
 	F_TLOCK                              = 0x2
@@ -561,6 +577,8 @@ const (
 	IFF_MASTER                           = 0x400
 	IFF_MULTICAST                        = 0x1000
 	IFF_MULTI_QUEUE                      = 0x100
+	IFF_NAPI                             = 0x10
+	IFF_NAPI_FRAGS                       = 0x20
 	IFF_NOARP                            = 0x80
 	IFF_NOFILTER                         = 0x1000
 	IFF_NOTRAILERS                       = 0x20
@@ -845,6 +863,7 @@ const (
 	MADV_FREE                            = 0x8
 	MADV_HUGEPAGE                        = 0xe
 	MADV_HWPOISON                        = 0x64
+	MADV_KEEPONFORK                      = 0x13
 	MADV_MERGEABLE                       = 0xc
 	MADV_NOHUGEPAGE                      = 0xf
 	MADV_NORMAL                          = 0x0
@@ -853,6 +872,7 @@ const (
 	MADV_SEQUENTIAL                      = 0x2
 	MADV_UNMERGEABLE                     = 0xd
 	MADV_WILLNEED                        = 0x3
+	MADV_WIPEONFORK                      = 0x12
 	MAP_ANON                             = 0x20
 	MAP_ANONYMOUS                        = 0x20
 	MAP_DENYWRITE                        = 0x800
@@ -898,6 +918,7 @@ const (
 	MSG_TRYHARD                          = 0x4
 	MSG_WAITALL                          = 0x100
 	MSG_WAITFORONE                       = 0x10000
+	MSG_ZEROCOPY                         = 0x4000000
 	MS_ACTIVE                            = 0x40000000
 	MS_ASYNC                             = 0x1
 	MS_BIND                              = 0x1000
@@ -999,6 +1020,7 @@ const (
 	NLM_F_EXCL                           = 0x200
 	NLM_F_MATCH                          = 0x200
 	NLM_F_MULTI                          = 0x2
+	NLM_F_NONREC                         = 0x100
 	NLM_F_REPLACE                        = 0x100
 	NLM_F_REQUEST                        = 0x1
 	NLM_F_ROOT                           = 0x100
@@ -1097,6 +1119,7 @@ const (
 	PERF_EVENT_IOC_ID                    = 0x40082407
 	PERF_EVENT_IOC_PAUSE_OUTPUT          = 0x80042409
 	PERF_EVENT_IOC_PERIOD                = 0x80082404
+	PERF_EVENT_IOC_QUERY_BPF             = 0xc008240a
 	PERF_EVENT_IOC_REFRESH               = 0x20002402
 	PERF_EVENT_IOC_RESET                 = 0x20002403
 	PERF_EVENT_IOC_SET_BPF               = 0x80042408
@@ -1198,6 +1221,11 @@ const (
 	PR_SET_TIMING                        = 0xe
 	PR_SET_TSC                           = 0x1a
 	PR_SET_UNALIGN                       = 0x6
+	PR_SVE_GET_VL                        = 0x33
+	PR_SVE_SET_VL                        = 0x32
+	PR_SVE_SET_VL_ONEXEC                 = 0x40000
+	PR_SVE_VL_INHERIT                    = 0x20000
+	PR_SVE_VL_LEN_MASK                   = 0xffff
 	PR_TASK_PERF_EVENTS_DISABLE          = 0x1f
 	PR_TASK_PERF_EVENTS_ENABLE           = 0x20
 	PR_TIMING_STATISTICAL                = 0x0
@@ -1251,6 +1279,7 @@ const (
 	PTRACE_POKETEXT                      = 0x4
 	PTRACE_POKEUSR                       = 0x6
 	PTRACE_SECCOMP_GET_FILTER            = 0x420c
+	PTRACE_SECCOMP_GET_METADATA          = 0x420d
 	PTRACE_SEIZE                         = 0x4206
 	PTRACE_SETEVRREGS                    = 0x15
 	PTRACE_SETFPREGS                     = 0xf
@@ -1340,6 +1369,7 @@ const (
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
+	RTAX_FASTOPEN_NO_COOKIE              = 0x11
 	RTAX_FEATURES                        = 0xc
 	RTAX_FEATURE_ALLFRAG                 = 0x8
 	RTAX_FEATURE_ECN                     = 0x1
@@ -1350,7 +1380,7 @@ const (
 	RTAX_INITCWND                        = 0xb
 	RTAX_INITRWND                        = 0xe
 	RTAX_LOCK                            = 0x1
-	RTAX_MAX                             = 0x10
+	RTAX_MAX                             = 0x11
 	RTAX_MTU                             = 0x2
 	RTAX_QUICKACK                        = 0xf
 	RTAX_REORDERING                      = 0x9
@@ -1624,6 +1654,7 @@ const (
 	SOL_SOCKET                           = 0x1
 	SOL_TCP                              = 0x6
 	SOL_TIPC                             = 0x10f
+	SOL_TLS                              = 0x11a
 	SOL_X25                              = 0x106
 	SOMAXCONN                            = 0x80
 	SO_ACCEPTCONN                        = 0x1e
@@ -1692,6 +1723,7 @@ const (
 	SO_VM_SOCKETS_PEER_HOST_VM_ID        = 0x3
 	SO_VM_SOCKETS_TRUSTED                = 0x5
 	SO_WIFI_STATUS                       = 0x29
+	SO_ZEROCOPY                          = 0x3c
 	SPLICE_F_GIFT                        = 0x8
 	SPLICE_F_MORE                        = 0x4
 	SPLICE_F_MOVE                        = 0x1
@@ -1786,6 +1818,8 @@ const (
 	TCP_MAXWIN                           = 0xffff
 	TCP_MAX_WINSHIFT                     = 0xe
 	TCP_MD5SIG                           = 0xe
+	TCP_MD5SIG_EXT                       = 0x20
+	TCP_MD5SIG_FLAG_PREFIX               = 0x1
 	TCP_MD5SIG_MAXKEYLEN                 = 0x50
 	TCP_MSS                              = 0x200
 	TCP_MSS_DEFAULT                      = 0x218
@@ -1806,6 +1840,7 @@ const (
 	TCP_THIN_DUPACK                      = 0x11
 	TCP_THIN_LINEAR_TIMEOUTS             = 0x10
 	TCP_TIMESTAMP                        = 0x18
+	TCP_ULP                              = 0x1f
 	TCP_USER_TIMEOUT                     = 0x12
 	TCP_WINDOW_CLAMP                     = 0xa
 	TCSAFLUSH                            = 0x2
@@ -1911,6 +1946,7 @@ const (
 	TUNGETVNETHDRSZ                      = 0x400454d7
 	TUNGETVNETLE                         = 0x400454dd
 	TUNSETDEBUG                          = 0x800454c9
+	TUNSETFILTEREBPF                     = 0x400454e1
 	TUNSETGROUP                          = 0x800454ce
 	TUNSETIFF                            = 0x800454ca
 	TUNSETIFINDEX                        = 0x800454da
@@ -1921,6 +1957,7 @@ const (
 	TUNSETPERSIST                        = 0x800454cb
 	TUNSETQUEUE                          = 0x800454d9
 	TUNSETSNDBUF                         = 0x800454d4
+	TUNSETSTEERINGEBPF                   = 0x400454e0
 	TUNSETTXFILTER                       = 0x800454d1
 	TUNSETVNETBE                         = 0x800454de
 	TUNSETVNETHDRSZ                      = 0x800454d8
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index af5a8950..e3f0e27e 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -121,6 +121,7 @@ const (
 	ARPHRD_PPP                           = 0x200
 	ARPHRD_PRONET                        = 0x4
 	ARPHRD_RAWHDLC                       = 0x206
+	ARPHRD_RAWIP                         = 0x207
 	ARPHRD_ROSE                          = 0x10e
 	ARPHRD_RSRVD                         = 0x104
 	ARPHRD_SIT                           = 0x308
@@ -390,6 +391,8 @@ const (
 	ETH_P_DSA                            = 0x1b
 	ETH_P_ECONET                         = 0x18
 	ETH_P_EDSA                           = 0xdada
+	ETH_P_ERSPAN                         = 0x88be
+	ETH_P_ERSPAN2                        = 0x22eb
 	ETH_P_FCOE                           = 0x8906
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
@@ -398,6 +401,7 @@ const (
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
+	ETH_P_IFE                            = 0xed3e
 	ETH_P_IP                             = 0x800
 	ETH_P_IPV6                           = 0x86dd
 	ETH_P_IPX                            = 0x8137
@@ -408,11 +412,13 @@ const (
 	ETH_P_LOOP                           = 0x60
 	ETH_P_LOOPBACK                       = 0x9000
 	ETH_P_MACSEC                         = 0x88e5
+	ETH_P_MAP                            = 0xf9
 	ETH_P_MOBITEX                        = 0x15
 	ETH_P_MPLS_MC                        = 0x8848
 	ETH_P_MPLS_UC                        = 0x8847
 	ETH_P_MVRP                           = 0x88f5
 	ETH_P_NCSI                           = 0x88f8
+	ETH_P_NSH                            = 0x894f
 	ETH_P_PAE                            = 0x888e
 	ETH_P_PAUSE                          = 0x8808
 	ETH_P_PHONET                         = 0xf5
@@ -476,6 +482,7 @@ const (
 	FS_POLICY_FLAGS_PAD_8                = 0x1
 	FS_POLICY_FLAGS_PAD_MASK             = 0x3
 	FS_POLICY_FLAGS_VALID                = 0x3
+	F_ADD_SEALS                          = 0x409
 	F_DUPFD                              = 0x0
 	F_DUPFD_CLOEXEC                      = 0x406
 	F_EXLCK                              = 0x4
@@ -488,6 +495,9 @@ const (
 	F_GETOWN_EX                          = 0x10
 	F_GETPIPE_SZ                         = 0x408
 	F_GETSIG                             = 0xb
+	F_GET_FILE_RW_HINT                   = 0x40d
+	F_GET_RW_HINT                        = 0x40b
+	F_GET_SEALS                          = 0x40a
 	F_LOCK                               = 0x1
 	F_NOTIFY                             = 0x402
 	F_OFD_GETLK                          = 0x24
@@ -495,6 +505,10 @@ const (
 	F_OFD_SETLKW                         = 0x26
 	F_OK                                 = 0x0
 	F_RDLCK                              = 0x0
+	F_SEAL_GROW                          = 0x4
+	F_SEAL_SEAL                          = 0x1
+	F_SEAL_SHRINK                        = 0x2
+	F_SEAL_WRITE                         = 0x8
 	F_SETFD                              = 0x2
 	F_SETFL                              = 0x4
 	F_SETLEASE                           = 0x400
@@ -506,6 +520,8 @@ const (
 	F_SETOWN_EX                          = 0xf
 	F_SETPIPE_SZ                         = 0x407
 	F_SETSIG                             = 0xa
+	F_SET_FILE_RW_HINT                   = 0x40e
+	F_SET_RW_HINT                        = 0x40c
 	F_SHLCK                              = 0x8
 	F_TEST                               = 0x3
 	F_TLOCK                              = 0x2
@@ -561,6 +577,8 @@ const (
 	IFF_MASTER                           = 0x400
 	IFF_MULTICAST                        = 0x1000
 	IFF_MULTI_QUEUE                      = 0x100
+	IFF_NAPI                             = 0x10
+	IFF_NAPI_FRAGS                       = 0x20
 	IFF_NOARP                            = 0x80
 	IFF_NOFILTER                         = 0x1000
 	IFF_NOTRAILERS                       = 0x20
@@ -845,6 +863,7 @@ const (
 	MADV_FREE                            = 0x8
 	MADV_HUGEPAGE                        = 0xe
 	MADV_HWPOISON                        = 0x64
+	MADV_KEEPONFORK                      = 0x13
 	MADV_MERGEABLE                       = 0xc
 	MADV_NOHUGEPAGE                      = 0xf
 	MADV_NORMAL                          = 0x0
@@ -853,6 +872,7 @@ const (
 	MADV_SEQUENTIAL                      = 0x2
 	MADV_UNMERGEABLE                     = 0xd
 	MADV_WILLNEED                        = 0x3
+	MADV_WIPEONFORK                      = 0x12
 	MAP_ANON                             = 0x20
 	MAP_ANONYMOUS                        = 0x20
 	MAP_DENYWRITE                        = 0x800
@@ -898,6 +918,7 @@ const (
 	MSG_TRYHARD                          = 0x4
 	MSG_WAITALL                          = 0x100
 	MSG_WAITFORONE                       = 0x10000
+	MSG_ZEROCOPY                         = 0x4000000
 	MS_ACTIVE                            = 0x40000000
 	MS_ASYNC                             = 0x1
 	MS_BIND                              = 0x1000
@@ -997,6 +1018,7 @@ const (
 	NLM_F_EXCL                           = 0x200
 	NLM_F_MATCH                          = 0x200
 	NLM_F_MULTI                          = 0x2
+	NLM_F_NONREC                         = 0x100
 	NLM_F_REPLACE                        = 0x100
 	NLM_F_REQUEST                        = 0x1
 	NLM_F_ROOT                           = 0x100
@@ -1095,6 +1117,7 @@ const (
 	PERF_EVENT_IOC_ID                    = 0x80082407
 	PERF_EVENT_IOC_PAUSE_OUTPUT          = 0x40042409
 	PERF_EVENT_IOC_PERIOD                = 0x40082404
+	PERF_EVENT_IOC_QUERY_BPF             = 0xc008240a
 	PERF_EVENT_IOC_REFRESH               = 0x2402
 	PERF_EVENT_IOC_RESET                 = 0x2403
 	PERF_EVENT_IOC_SET_BPF               = 0x40042408
@@ -1195,6 +1218,11 @@ const (
 	PR_SET_TIMING                        = 0xe
 	PR_SET_TSC                           = 0x1a
 	PR_SET_UNALIGN                       = 0x6
+	PR_SVE_GET_VL                        = 0x33
+	PR_SVE_SET_VL                        = 0x32
+	PR_SVE_SET_VL_ONEXEC                 = 0x40000
+	PR_SVE_VL_INHERIT                    = 0x20000
+	PR_SVE_VL_LEN_MASK                   = 0xffff
 	PR_TASK_PERF_EVENTS_DISABLE          = 0x1f
 	PR_TASK_PERF_EVENTS_ENABLE           = 0x20
 	PR_TIMING_STATISTICAL                = 0x0
@@ -1255,6 +1283,7 @@ const (
 	PTRACE_POKE_SYSTEM_CALL              = 0x5008
 	PTRACE_PROT                          = 0x15
 	PTRACE_SECCOMP_GET_FILTER            = 0x420c
+	PTRACE_SECCOMP_GET_METADATA          = 0x420d
 	PTRACE_SEIZE                         = 0x4206
 	PTRACE_SETOPTIONS                    = 0x4200
 	PTRACE_SETREGS                       = 0xd
@@ -1344,6 +1373,7 @@ const (
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
+	RTAX_FASTOPEN_NO_COOKIE              = 0x11
 	RTAX_FEATURES                        = 0xc
 	RTAX_FEATURE_ALLFRAG                 = 0x8
 	RTAX_FEATURE_ECN                     = 0x1
@@ -1354,7 +1384,7 @@ const (
 	RTAX_INITCWND                        = 0xb
 	RTAX_INITRWND                        = 0xe
 	RTAX_LOCK                            = 0x1
-	RTAX_MAX                             = 0x10
+	RTAX_MAX                             = 0x11
 	RTAX_MTU                             = 0x2
 	RTAX_QUICKACK                        = 0xf
 	RTAX_REORDERING                      = 0x9
@@ -1628,6 +1658,7 @@ const (
 	SOL_SOCKET                           = 0x1
 	SOL_TCP                              = 0x6
 	SOL_TIPC                             = 0x10f
+	SOL_TLS                              = 0x11a
 	SOL_X25                              = 0x106
 	SOMAXCONN                            = 0x80
 	SO_ACCEPTCONN                        = 0x1e
@@ -1696,6 +1727,7 @@ const (
 	SO_VM_SOCKETS_PEER_HOST_VM_ID        = 0x3
 	SO_VM_SOCKETS_TRUSTED                = 0x5
 	SO_WIFI_STATUS                       = 0x29
+	SO_ZEROCOPY                          = 0x3c
 	SPLICE_F_GIFT                        = 0x8
 	SPLICE_F_MORE                        = 0x4
 	SPLICE_F_MOVE                        = 0x1
@@ -1792,6 +1824,8 @@ const (
 	TCP_MAXWIN                           = 0xffff
 	TCP_MAX_WINSHIFT                     = 0xe
 	TCP_MD5SIG                           = 0xe
+	TCP_MD5SIG_EXT                       = 0x20
+	TCP_MD5SIG_FLAG_PREFIX               = 0x1
 	TCP_MD5SIG_MAXKEYLEN                 = 0x50
 	TCP_MSS                              = 0x200
 	TCP_MSS_DEFAULT                      = 0x218
@@ -1812,6 +1846,7 @@ const (
 	TCP_THIN_DUPACK                      = 0x11
 	TCP_THIN_LINEAR_TIMEOUTS             = 0x10
 	TCP_TIMESTAMP                        = 0x18
+	TCP_ULP                              = 0x1f
 	TCP_USER_TIMEOUT                     = 0x12
 	TCP_WINDOW_CLAMP                     = 0xa
 	TCSAFLUSH                            = 0x2
@@ -1911,6 +1946,7 @@ const (
 	TUNGETVNETHDRSZ                      = 0x800454d7
 	TUNGETVNETLE                         = 0x800454dd
 	TUNSETDEBUG                          = 0x400454c9
+	TUNSETFILTEREBPF                     = 0x800454e1
 	TUNSETGROUP                          = 0x400454ce
 	TUNSETIFF                            = 0x400454ca
 	TUNSETIFINDEX                        = 0x400454da
@@ -1921,6 +1957,7 @@ const (
 	TUNSETPERSIST                        = 0x400454cb
 	TUNSETQUEUE                          = 0x400454d9
 	TUNSETSNDBUF                         = 0x400454d4
+	TUNSETSTEERINGEBPF                   = 0x800454e0
 	TUNSETTXFILTER                       = 0x400454d1
 	TUNSETVNETBE                         = 0x400454de
 	TUNSETVNETHDRSZ                      = 0x400454d8
diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go
index 1612b660..3eef63f5 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go
@@ -159,6 +159,7 @@ const (
 	CLONE_VFORK                       = 0x4000
 	CLONE_VM                          = 0x100
 	CREAD                             = 0x800
+	CRTSCTS                           = 0x10000
 	CS5                               = 0x0
 	CS6                               = 0x100
 	CS7                               = 0x200
diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go
index c994ab61..40c870be 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go
@@ -159,6 +159,7 @@ const (
 	CLONE_VFORK                       = 0x4000
 	CLONE_VM                          = 0x100
 	CREAD                             = 0x800
+	CRTSCTS                           = 0x10000
 	CS5                               = 0x0
 	CS6                               = 0x100
 	CS7                               = 0x200
diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go
index a8f9efed..43c4add5 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go
@@ -151,6 +151,7 @@ const (
 	CFLUSH                            = 0xf
 	CLOCAL                            = 0x8000
 	CREAD                             = 0x800
+	CRTSCTS                           = 0x10000
 	CS5                               = 0x0
 	CS6                               = 0x100
 	CS7                               = 0x200
diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
index 04e4f331..f47536dc 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
@@ -147,6 +147,7 @@ const (
 	CFLUSH                            = 0xf
 	CLOCAL                            = 0x8000
 	CREAD                             = 0x800
+	CRTSCTS                           = 0x10000
 	CS5                               = 0x0
 	CS6                               = 0x100
 	CS7                               = 0x200
diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
index c80ff981..c96ca653 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
@@ -147,6 +147,7 @@ const (
 	CFLUSH                            = 0xf
 	CLOCAL                            = 0x8000
 	CREAD                             = 0x800
+	CRTSCTS                           = 0x10000
 	CS5                               = 0x0
 	CS6                               = 0x100
 	CS7                               = 0x200
diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
index 4c320495..4c027352 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
@@ -147,6 +147,7 @@ const (
 	CFLUSH                            = 0xf
 	CLOCAL                            = 0x8000
 	CREAD                             = 0x800
+	CRTSCTS                           = 0x10000
 	CS5                               = 0x0
 	CS6                               = 0x100
 	CS7                               = 0x200
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go
index 763ae4fb..4c9f7275 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go
@@ -693,6 +693,21 @@ func Fstat(fd int, stat *Stat_t) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fstatfs(fd int, stat *Statfs_t) (err error) {
 	_, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
index d6808e07..25623777 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
@@ -693,6 +693,21 @@ func Fstat(fd int, stat *Stat_t) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fstatfs(fd int, stat *Statfs_t) (err error) {
 	_, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go
index 6ae95e6b..4ae787e4 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go
@@ -693,6 +693,21 @@ func Fstat(fd int, stat *Stat_t) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fstatfs(fd int, stat *Statfs_t) (err error) {
 	_, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
index ca6a7ea8..14ed6886 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
@@ -693,6 +693,21 @@ func Fstat(fd int, stat *Stat_t) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fstatfs(fd int, stat *Statfs_t) (err error) {
 	_, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
index a0241de1..91f36e9e 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
@@ -618,6 +618,21 @@ func Fchmod(fd int, mode uint32) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fchown(fd int, uid int, gid int) (err error) {
 	_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
 	if e1 != 0 {
@@ -659,6 +674,21 @@ func Fstat(fd int, stat *Stat_t) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fstatfs(fd int, stat *Statfs_t) (err error) {
 	_, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
index fd9ca5a4..a86434a7 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
@@ -924,6 +924,21 @@ func Fstat(fd int, stat *Stat_t) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fstatfs(fd int, stat *Statfs_t) (err error) {
 	_, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
index a9f18b22..040e2f76 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
@@ -924,6 +924,21 @@ func Fstat(fd int, stat *Stat_t) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fstatfs(fd int, stat *Statfs_t) (err error) {
 	_, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
index 9823e18a..cddc5e86 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
@@ -924,6 +924,21 @@ func Fstat(fd int, stat *Stat_t) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fstatfs(fd int, stat *Statfs_t) (err error) {
 	_, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
index 8f276d65..8c9e26a0 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
@@ -1541,6 +1541,16 @@ func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fadvise(fd int, offset int64, length int64, advice int) (err error) {
+	_, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fchown(fd int, uid int, gid int) (err error) {
 	_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
index 61169b33..8dc2b58f 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
@@ -1534,6 +1534,16 @@ func Dup2(oldfd int, newfd int) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fadvise(fd int, offset int64, length int64, advice int) (err error) {
+	_, _, e1 := Syscall9(SYS_FADVISE64, uintptr(fd), 0, uintptr(offset>>32), uintptr(offset), uintptr(length>>32), uintptr(length), uintptr(advice), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fchown(fd int, uid int, gid int) (err error) {
 	_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
index 4cb59b4a..e8beef85 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
@@ -1551,6 +1551,16 @@ func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fadvise(fd int, offset int64, length int64, advice int) (err error) {
+	_, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fchown(fd int, uid int, gid int) (err error) {
 	_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
index 0b547ae3..899e4403 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
@@ -1551,6 +1551,16 @@ func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fadvise(fd int, offset int64, length int64, advice int) (err error) {
+	_, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fchown(fd int, uid int, gid int) (err error) {
 	_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
index cd94d3a8..7a477cbd 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
@@ -1534,6 +1534,16 @@ func Dup2(oldfd int, newfd int) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fadvise(fd int, offset int64, length int64, advice int) (err error) {
+	_, _, e1 := Syscall9(SYS_FADVISE64, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(length), uintptr(length>>32), uintptr(advice), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fchown(fd int, uid int, gid int) (err error) {
 	_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
index cdad555a..9dc4c7d6 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
@@ -1551,6 +1551,16 @@ func Dup2(oldfd int, newfd int) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fadvise(fd int, offset int64, length int64, advice int) (err error) {
+	_, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fchown(fd int, uid int, gid int) (err error) {
 	_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
index 38f4e44b..f0d1ee12 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
@@ -1551,6 +1551,16 @@ func Dup2(oldfd int, newfd int) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fadvise(fd int, offset int64, length int64, advice int) (err error) {
+	_, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fchown(fd int, uid int, gid int) (err error) {
 	_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
index 2dd98434..c01b3b6b 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
@@ -1222,6 +1222,16 @@ func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fadvise(fd int, offset int64, length int64, advice int) (err error) {
+	_, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Dup2(oldfd int, newfd int) (err error) {
 	_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
index 62eadff1..fb4b9627 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
@@ -571,6 +571,16 @@ func Exit(code int) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fadvise(fd int, offset int64, length int64, advice int) (err error) {
+	_, _, e1 := Syscall9(SYS_POSIX_FADVISE, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), 0, uintptr(length), uintptr(length>>32), uintptr(advice), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fchdir(fd int) (err error) {
 	_, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
 	if e1 != 0 {
@@ -601,6 +611,21 @@ func Fchmod(fd int, mode uint32) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fchown(fd int, uid int, gid int) (err error) {
 	_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
 	if e1 != 0 {
@@ -642,6 +667,21 @@ func Fstat(fd int, stat *Stat_t) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fsync(fd int) (err error) {
 	_, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
index 307f4e99..beac82ef 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
@@ -571,6 +571,16 @@ func Exit(code int) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fadvise(fd int, offset int64, length int64, advice int) (err error) {
+	_, _, e1 := Syscall6(SYS_POSIX_FADVISE, uintptr(fd), 0, uintptr(offset), 0, uintptr(length), uintptr(advice))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fchdir(fd int) (err error) {
 	_, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
 	if e1 != 0 {
@@ -601,6 +611,21 @@ func Fchmod(fd int, mode uint32) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fchown(fd int, uid int, gid int) (err error) {
 	_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
 	if e1 != 0 {
@@ -642,6 +667,21 @@ func Fstat(fd int, stat *Stat_t) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fsync(fd int) (err error) {
 	_, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
index 61109313..7bd5f60b 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
@@ -571,6 +571,16 @@ func Exit(code int) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fadvise(fd int, offset int64, length int64, advice int) (err error) {
+	_, _, e1 := Syscall9(SYS_POSIX_FADVISE, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), 0, uintptr(length), uintptr(length>>32), uintptr(advice), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fchdir(fd int) (err error) {
 	_, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
 	if e1 != 0 {
@@ -601,6 +611,21 @@ func Fchmod(fd int, mode uint32) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fchown(fd int, uid int, gid int) (err error) {
 	_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
 	if e1 != 0 {
@@ -642,6 +667,21 @@ func Fstat(fd int, stat *Stat_t) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fsync(fd int) (err error) {
 	_, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
index 003f820e..5c09c075 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
@@ -599,6 +599,21 @@ func Fchmod(fd int, mode uint32) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fchown(fd int, uid int, gid int) (err error) {
 	_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
 	if e1 != 0 {
@@ -640,6 +655,21 @@ func Fstat(fd int, stat *Stat_t) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fstatfs(fd int, stat *Statfs_t) (err error) {
 	_, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
index ba0e8f32..54ccc935 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
@@ -599,6 +599,21 @@ func Fchmod(fd int, mode uint32) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fchown(fd int, uid int, gid int) (err error) {
 	_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
 	if e1 != 0 {
@@ -640,6 +655,21 @@ func Fstat(fd int, stat *Stat_t) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fstatfs(fd int, stat *Statfs_t) (err error) {
 	_, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
index 2ce02c7c..59258b0a 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
@@ -599,6 +599,21 @@ func Fchmod(fd int, mode uint32) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fchown(fd int, uid int, gid int) (err error) {
 	_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
 	if e1 != 0 {
@@ -640,6 +655,21 @@ func Fstat(fd int, stat *Stat_t) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fstatfs(fd int, stat *Statfs_t) (err error) {
 	_, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
index f5d01b3a..39789630 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
@@ -50,6 +50,7 @@ import (
 //go:cgo_import_dynamic libc_flock flock "libc.so"
 //go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so"
 //go:cgo_import_dynamic libc_fstat fstat "libc.so"
+//go:cgo_import_dynamic libc_fstatat fstatat "libc.so"
 //go:cgo_import_dynamic libc_fstatvfs fstatvfs "libc.so"
 //go:cgo_import_dynamic libc_getdents getdents "libc.so"
 //go:cgo_import_dynamic libc_getgid getgid "libc.so"
@@ -127,6 +128,7 @@ import (
 //go:cgo_import_dynamic libc___xnet_connect __xnet_connect "libsocket.so"
 //go:cgo_import_dynamic libc_mmap mmap "libc.so"
 //go:cgo_import_dynamic libc_munmap munmap "libc.so"
+//go:cgo_import_dynamic libc_sendfile sendfile "libsendfile.so"
 //go:cgo_import_dynamic libc___xnet_sendto __xnet_sendto "libsocket.so"
 //go:cgo_import_dynamic libc___xnet_socket __xnet_socket "libsocket.so"
 //go:cgo_import_dynamic libc___xnet_socketpair __xnet_socketpair "libsocket.so"
@@ -176,6 +178,7 @@ import (
 //go:linkname procFlock libc_flock
 //go:linkname procFpathconf libc_fpathconf
 //go:linkname procFstat libc_fstat
+//go:linkname procFstatat libc_fstatat
 //go:linkname procFstatvfs libc_fstatvfs
 //go:linkname procGetdents libc_getdents
 //go:linkname procGetgid libc_getgid
@@ -253,6 +256,7 @@ import (
 //go:linkname proc__xnet_connect libc___xnet_connect
 //go:linkname procmmap libc_mmap
 //go:linkname procmunmap libc_munmap
+//go:linkname procsendfile libc_sendfile
 //go:linkname proc__xnet_sendto libc___xnet_sendto
 //go:linkname proc__xnet_socket libc___xnet_socket
 //go:linkname proc__xnet_socketpair libc___xnet_socketpair
@@ -303,6 +307,7 @@ var (
 	procFlock,
 	procFpathconf,
 	procFstat,
+	procFstatat,
 	procFstatvfs,
 	procGetdents,
 	procGetgid,
@@ -380,6 +385,7 @@ var (
 	proc__xnet_connect,
 	procmmap,
 	procmunmap,
+	procsendfile,
 	proc__xnet_sendto,
 	proc__xnet_socket,
 	proc__xnet_socketpair,
@@ -772,6 +778,19 @@ func Fstat(fd int, stat *Stat_t) (err error) {
 	return
 }
 
+func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatat)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func Fstatvfs(fd int, vfsstat *Statvfs_t) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatvfs)), 2, uintptr(fd), uintptr(unsafe.Pointer(vfsstat)), 0, 0, 0, 0)
 	if e1 != 0 {
@@ -1573,6 +1592,15 @@ func munmap(addr uintptr, length uintptr) (err error) {
 	return
 }
 
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsendfile)), 4, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
+	written = int(r0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
 	var _p0 *byte
 	if len(buf) > 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
index dfe5dab6..384d49bf 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
@@ -367,4 +367,7 @@ const (
 	SYS_PWRITEV2               = 381
 	SYS_KEXEC_FILE_LOAD        = 382
 	SYS_STATX                  = 383
+	SYS_PKEY_ALLOC             = 384
+	SYS_PKEY_FREE              = 385
+	SYS_PKEY_MPROTECT          = 386
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
index eca97f73..9623248a 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
@@ -367,4 +367,7 @@ const (
 	SYS_PWRITEV2               = 381
 	SYS_KEXEC_FILE_LOAD        = 382
 	SYS_STATX                  = 383
+	SYS_PKEY_ALLOC             = 384
+	SYS_PKEY_FREE              = 385
+	SYS_PKEY_MPROTECT          = 386
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
index 8ea18e6c..ed92409d 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
@@ -109,6 +109,7 @@ const (
 	SYS_PERSONALITY            = 136
 	SYS_AFS_SYSCALL            = 137
 	SYS_GETDENTS               = 141
+	SYS_SELECT                 = 142
 	SYS_FLOCK                  = 143
 	SYS_MSYNC                  = 144
 	SYS_READV                  = 145
@@ -151,6 +152,26 @@ const (
 	SYS_GETPMSG                = 188
 	SYS_PUTPMSG                = 189
 	SYS_VFORK                  = 190
+	SYS_GETRLIMIT              = 191
+	SYS_LCHOWN                 = 198
+	SYS_GETUID                 = 199
+	SYS_GETGID                 = 200
+	SYS_GETEUID                = 201
+	SYS_GETEGID                = 202
+	SYS_SETREUID               = 203
+	SYS_SETREGID               = 204
+	SYS_GETGROUPS              = 205
+	SYS_SETGROUPS              = 206
+	SYS_FCHOWN                 = 207
+	SYS_SETRESUID              = 208
+	SYS_GETRESUID              = 209
+	SYS_SETRESGID              = 210
+	SYS_GETRESGID              = 211
+	SYS_CHOWN                  = 212
+	SYS_SETUID                 = 213
+	SYS_SETGID                 = 214
+	SYS_SETFSUID               = 215
+	SYS_SETFSGID               = 216
 	SYS_PIVOT_ROOT             = 217
 	SYS_MINCORE                = 218
 	SYS_MADVISE                = 219
@@ -222,6 +243,7 @@ const (
 	SYS_MKNODAT                = 290
 	SYS_FCHOWNAT               = 291
 	SYS_FUTIMESAT              = 292
+	SYS_NEWFSTATAT             = 293
 	SYS_UNLINKAT               = 294
 	SYS_RENAMEAT               = 295
 	SYS_LINKAT                 = 296
@@ -308,26 +330,5 @@ const (
 	SYS_PWRITEV2               = 377
 	SYS_S390_GUARDED_STORAGE   = 378
 	SYS_STATX                  = 379
-	SYS_SELECT                 = 142
-	SYS_GETRLIMIT              = 191
-	SYS_LCHOWN                 = 198
-	SYS_GETUID                 = 199
-	SYS_GETGID                 = 200
-	SYS_GETEUID                = 201
-	SYS_GETEGID                = 202
-	SYS_SETREUID               = 203
-	SYS_SETREGID               = 204
-	SYS_GETGROUPS              = 205
-	SYS_SETGROUPS              = 206
-	SYS_FCHOWN                 = 207
-	SYS_SETRESUID              = 208
-	SYS_GETRESUID              = 209
-	SYS_SETRESGID              = 210
-	SYS_GETRESGID              = 211
-	SYS_CHOWN                  = 212
-	SYS_SETUID                 = 213
-	SYS_SETGID                 = 214
-	SYS_SETFSUID               = 215
-	SYS_SETFSGID               = 216
-	SYS_NEWFSTATAT             = 293
+	SYS_S390_STHYI             = 380
 )
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go
index bc4bc89f..327af5fb 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go
@@ -136,13 +136,13 @@ type Fsid struct {
 }
 
 type Dirent struct {
-	Ino       uint64
-	Seekoff   uint64
-	Reclen    uint16
-	Namlen    uint16
-	Type      uint8
-	Name      [1024]int8
-	Pad_cgo_0 [3]byte
+	Ino     uint64
+	Seekoff uint64
+	Reclen  uint16
+	Namlen  uint16
+	Type    uint8
+	Name    [1024]int8
+	_       [3]byte
 }
 
 type RawSockaddrInet4 struct {
@@ -295,14 +295,14 @@ const (
 )
 
 type IfMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Data      IfData
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Addrs   int32
+	Flags   int32
+	Index   uint16
+	_       [2]byte
+	Data    IfData
 }
 
 type IfData struct {
@@ -338,51 +338,51 @@ type IfData struct {
 }
 
 type IfaMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Metric    int32
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Addrs   int32
+	Flags   int32
+	Index   uint16
+	_       [2]byte
+	Metric  int32
 }
 
 type IfmaMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Addrs   int32
+	Flags   int32
+	Index   uint16
+	_       [2]byte
 }
 
 type IfmaMsghdr2 struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Refcount  int32
+	Msglen   uint16
+	Version  uint8
+	Type     uint8
+	Addrs    int32
+	Flags    int32
+	Index    uint16
+	_        [2]byte
+	Refcount int32
 }
 
 type RtMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Flags     int32
-	Addrs     int32
-	Pid       int32
-	Seq       int32
-	Errno     int32
-	Use       int32
-	Inits     uint32
-	Rmx       RtMetrics
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Index   uint16
+	_       [2]byte
+	Flags   int32
+	Addrs   int32
+	Pid     int32
+	Seq     int32
+	Errno   int32
+	Use     int32
+	Inits   uint32
+	Rmx     RtMetrics
 }
 
 type RtMetrics struct {
@@ -430,11 +430,11 @@ type BpfInsn struct {
 }
 
 type BpfHdr struct {
-	Tstamp    Timeval
-	Caplen    uint32
-	Datalen   uint32
-	Hdrlen    uint16
-	Pad_cgo_0 [2]byte
+	Tstamp  Timeval
+	Caplen  uint32
+	Datalen uint32
+	Hdrlen  uint16
+	_       [2]byte
 }
 
 type Termios struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
index d8abcab1..116e6e07 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
@@ -26,9 +26,9 @@ type Timespec struct {
 }
 
 type Timeval struct {
-	Sec       int64
-	Usec      int32
-	Pad_cgo_0 [4]byte
+	Sec  int64
+	Usec int32
+	_    [4]byte
 }
 
 type Timeval32 struct {
@@ -70,7 +70,7 @@ type Stat_t struct {
 	Uid           uint32
 	Gid           uint32
 	Rdev          int32
-	Pad_cgo_0     [4]byte
+	_             [4]byte
 	Atimespec     Timespec
 	Mtimespec     Timespec
 	Ctimespec     Timespec
@@ -120,9 +120,9 @@ type Fstore_t struct {
 }
 
 type Radvisory_t struct {
-	Offset    int64
-	Count     int32
-	Pad_cgo_0 [4]byte
+	Offset int64
+	Count  int32
+	_      [4]byte
 }
 
 type Fbootstraptransfer_t struct {
@@ -132,9 +132,9 @@ type Fbootstraptransfer_t struct {
 }
 
 type Log2phys_t struct {
-	Flags     uint32
-	Pad_cgo_0 [8]byte
-	Pad_cgo_1 [8]byte
+	Flags uint32
+	_     [8]byte
+	_     [8]byte
 }
 
 type Fsid struct {
@@ -142,13 +142,13 @@ type Fsid struct {
 }
 
 type Dirent struct {
-	Ino       uint64
-	Seekoff   uint64
-	Reclen    uint16
-	Namlen    uint16
-	Type      uint8
-	Name      [1024]int8
-	Pad_cgo_0 [3]byte
+	Ino     uint64
+	Seekoff uint64
+	Reclen  uint16
+	Namlen  uint16
+	Type    uint8
+	Name    [1024]int8
+	_       [3]byte
 }
 
 type RawSockaddrInet4 struct {
@@ -221,10 +221,10 @@ type IPv6Mreq struct {
 type Msghdr struct {
 	Name       *byte
 	Namelen    uint32
-	Pad_cgo_0  [4]byte
+	_          [4]byte
 	Iov        *Iovec
 	Iovlen     int32
-	Pad_cgo_1  [4]byte
+	_          [4]byte
 	Control    *byte
 	Controllen uint32
 	Flags      int32
@@ -303,14 +303,14 @@ const (
 )
 
 type IfMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Data      IfData
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Addrs   int32
+	Flags   int32
+	Index   uint16
+	_       [2]byte
+	Data    IfData
 }
 
 type IfData struct {
@@ -346,51 +346,51 @@ type IfData struct {
 }
 
 type IfaMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Metric    int32
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Addrs   int32
+	Flags   int32
+	Index   uint16
+	_       [2]byte
+	Metric  int32
 }
 
 type IfmaMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Addrs   int32
+	Flags   int32
+	Index   uint16
+	_       [2]byte
 }
 
 type IfmaMsghdr2 struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Refcount  int32
+	Msglen   uint16
+	Version  uint8
+	Type     uint8
+	Addrs    int32
+	Flags    int32
+	Index    uint16
+	_        [2]byte
+	Refcount int32
 }
 
 type RtMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Flags     int32
-	Addrs     int32
-	Pid       int32
-	Seq       int32
-	Errno     int32
-	Use       int32
-	Inits     uint32
-	Rmx       RtMetrics
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Index   uint16
+	_       [2]byte
+	Flags   int32
+	Addrs   int32
+	Pid     int32
+	Seq     int32
+	Errno   int32
+	Use     int32
+	Inits   uint32
+	Rmx     RtMetrics
 }
 
 type RtMetrics struct {
@@ -426,9 +426,9 @@ type BpfStat struct {
 }
 
 type BpfProgram struct {
-	Len       uint32
-	Pad_cgo_0 [4]byte
-	Insns     *BpfInsn
+	Len   uint32
+	_     [4]byte
+	Insns *BpfInsn
 }
 
 type BpfInsn struct {
@@ -439,22 +439,22 @@ type BpfInsn struct {
 }
 
 type BpfHdr struct {
-	Tstamp    Timeval32
-	Caplen    uint32
-	Datalen   uint32
-	Hdrlen    uint16
-	Pad_cgo_0 [2]byte
+	Tstamp  Timeval32
+	Caplen  uint32
+	Datalen uint32
+	Hdrlen  uint16
+	_       [2]byte
 }
 
 type Termios struct {
-	Iflag     uint64
-	Oflag     uint64
-	Cflag     uint64
-	Lflag     uint64
-	Cc        [20]uint8
-	Pad_cgo_0 [4]byte
-	Ispeed    uint64
-	Ospeed    uint64
+	Iflag  uint64
+	Oflag  uint64
+	Cflag  uint64
+	Lflag  uint64
+	Cc     [20]uint8
+	_      [4]byte
+	Ispeed uint64
+	Ospeed uint64
 }
 
 type Winsize struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go
index 9749c9f7..2750ad76 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go
@@ -137,13 +137,13 @@ type Fsid struct {
 }
 
 type Dirent struct {
-	Ino       uint64
-	Seekoff   uint64
-	Reclen    uint16
-	Namlen    uint16
-	Type      uint8
-	Name      [1024]int8
-	Pad_cgo_0 [3]byte
+	Ino     uint64
+	Seekoff uint64
+	Reclen  uint16
+	Namlen  uint16
+	Type    uint8
+	Name    [1024]int8
+	_       [3]byte
 }
 
 type RawSockaddrInet4 struct {
@@ -296,14 +296,14 @@ const (
 )
 
 type IfMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Data      IfData
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Addrs   int32
+	Flags   int32
+	Index   uint16
+	_       [2]byte
+	Data    IfData
 }
 
 type IfData struct {
@@ -339,51 +339,51 @@ type IfData struct {
 }
 
 type IfaMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Metric    int32
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Addrs   int32
+	Flags   int32
+	Index   uint16
+	_       [2]byte
+	Metric  int32
 }
 
 type IfmaMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Addrs   int32
+	Flags   int32
+	Index   uint16
+	_       [2]byte
 }
 
 type IfmaMsghdr2 struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Refcount  int32
+	Msglen   uint16
+	Version  uint8
+	Type     uint8
+	Addrs    int32
+	Flags    int32
+	Index    uint16
+	_        [2]byte
+	Refcount int32
 }
 
 type RtMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Flags     int32
-	Addrs     int32
-	Pid       int32
-	Seq       int32
-	Errno     int32
-	Use       int32
-	Inits     uint32
-	Rmx       RtMetrics
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Index   uint16
+	_       [2]byte
+	Flags   int32
+	Addrs   int32
+	Pid     int32
+	Seq     int32
+	Errno   int32
+	Use     int32
+	Inits   uint32
+	Rmx     RtMetrics
 }
 
 type RtMetrics struct {
@@ -431,11 +431,11 @@ type BpfInsn struct {
 }
 
 type BpfHdr struct {
-	Tstamp    Timeval
-	Caplen    uint32
-	Datalen   uint32
-	Hdrlen    uint16
-	Pad_cgo_0 [2]byte
+	Tstamp  Timeval
+	Caplen  uint32
+	Datalen uint32
+	Hdrlen  uint16
+	_       [2]byte
 }
 
 type Termios struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
index 810b0bd4..8cead099 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
@@ -26,9 +26,9 @@ type Timespec struct {
 }
 
 type Timeval struct {
-	Sec       int64
-	Usec      int32
-	Pad_cgo_0 [4]byte
+	Sec  int64
+	Usec int32
+	_    [4]byte
 }
 
 type Timeval32 struct {
@@ -70,7 +70,7 @@ type Stat_t struct {
 	Uid           uint32
 	Gid           uint32
 	Rdev          int32
-	Pad_cgo_0     [4]byte
+	_             [4]byte
 	Atimespec     Timespec
 	Mtimespec     Timespec
 	Ctimespec     Timespec
@@ -120,9 +120,9 @@ type Fstore_t struct {
 }
 
 type Radvisory_t struct {
-	Offset    int64
-	Count     int32
-	Pad_cgo_0 [4]byte
+	Offset int64
+	Count  int32
+	_      [4]byte
 }
 
 type Fbootstraptransfer_t struct {
@@ -132,9 +132,9 @@ type Fbootstraptransfer_t struct {
 }
 
 type Log2phys_t struct {
-	Flags     uint32
-	Pad_cgo_0 [8]byte
-	Pad_cgo_1 [8]byte
+	Flags uint32
+	_     [8]byte
+	_     [8]byte
 }
 
 type Fsid struct {
@@ -142,13 +142,13 @@ type Fsid struct {
 }
 
 type Dirent struct {
-	Ino       uint64
-	Seekoff   uint64
-	Reclen    uint16
-	Namlen    uint16
-	Type      uint8
-	Name      [1024]int8
-	Pad_cgo_0 [3]byte
+	Ino     uint64
+	Seekoff uint64
+	Reclen  uint16
+	Namlen  uint16
+	Type    uint8
+	Name    [1024]int8
+	_       [3]byte
 }
 
 type RawSockaddrInet4 struct {
@@ -221,10 +221,10 @@ type IPv6Mreq struct {
 type Msghdr struct {
 	Name       *byte
 	Namelen    uint32
-	Pad_cgo_0  [4]byte
+	_          [4]byte
 	Iov        *Iovec
 	Iovlen     int32
-	Pad_cgo_1  [4]byte
+	_          [4]byte
 	Control    *byte
 	Controllen uint32
 	Flags      int32
@@ -303,14 +303,14 @@ const (
 )
 
 type IfMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Data      IfData
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Addrs   int32
+	Flags   int32
+	Index   uint16
+	_       [2]byte
+	Data    IfData
 }
 
 type IfData struct {
@@ -346,51 +346,51 @@ type IfData struct {
 }
 
 type IfaMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Metric    int32
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Addrs   int32
+	Flags   int32
+	Index   uint16
+	_       [2]byte
+	Metric  int32
 }
 
 type IfmaMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Addrs   int32
+	Flags   int32
+	Index   uint16
+	_       [2]byte
 }
 
 type IfmaMsghdr2 struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Refcount  int32
+	Msglen   uint16
+	Version  uint8
+	Type     uint8
+	Addrs    int32
+	Flags    int32
+	Index    uint16
+	_        [2]byte
+	Refcount int32
 }
 
 type RtMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Flags     int32
-	Addrs     int32
-	Pid       int32
-	Seq       int32
-	Errno     int32
-	Use       int32
-	Inits     uint32
-	Rmx       RtMetrics
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Index   uint16
+	_       [2]byte
+	Flags   int32
+	Addrs   int32
+	Pid     int32
+	Seq     int32
+	Errno   int32
+	Use     int32
+	Inits   uint32
+	Rmx     RtMetrics
 }
 
 type RtMetrics struct {
@@ -426,9 +426,9 @@ type BpfStat struct {
 }
 
 type BpfProgram struct {
-	Len       uint32
-	Pad_cgo_0 [4]byte
-	Insns     *BpfInsn
+	Len   uint32
+	_     [4]byte
+	Insns *BpfInsn
 }
 
 type BpfInsn struct {
@@ -439,22 +439,22 @@ type BpfInsn struct {
 }
 
 type BpfHdr struct {
-	Tstamp    Timeval32
-	Caplen    uint32
-	Datalen   uint32
-	Hdrlen    uint16
-	Pad_cgo_0 [2]byte
+	Tstamp  Timeval32
+	Caplen  uint32
+	Datalen uint32
+	Hdrlen  uint16
+	_       [2]byte
 }
 
 type Termios struct {
-	Iflag     uint64
-	Oflag     uint64
-	Cflag     uint64
-	Lflag     uint64
-	Cc        [20]uint8
-	Pad_cgo_0 [4]byte
-	Ispeed    uint64
-	Ospeed    uint64
+	Iflag  uint64
+	Oflag  uint64
+	Cflag  uint64
+	Lflag  uint64
+	Cc     [20]uint8
+	_      [4]byte
+	Ispeed uint64
+	Ospeed uint64
 }
 
 type Winsize struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go
index e3b8ebb0..315a553b 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go
@@ -108,7 +108,7 @@ type Statfs_t struct {
 	Owner       uint32
 	Type        int32
 	Flags       int32
-	Pad_cgo_0   [4]byte
+	_           [4]byte
 	Syncwrites  int64
 	Asyncwrites int64
 	Fstypename  [16]int8
@@ -118,7 +118,7 @@ type Statfs_t struct {
 	Spares1     int16
 	Mntfromname [80]int8
 	Spares2     int16
-	Pad_cgo_1   [4]byte
+	_           [4]byte
 	Spare       [2]int64
 }
 
@@ -219,10 +219,10 @@ type IPv6Mreq struct {
 type Msghdr struct {
 	Name       *byte
 	Namelen    uint32
-	Pad_cgo_0  [4]byte
+	_          [4]byte
 	Iov        *Iovec
 	Iovlen     int32
-	Pad_cgo_1  [4]byte
+	_          [4]byte
 	Control    *byte
 	Controllen uint32
 	Flags      int32
@@ -294,14 +294,14 @@ const (
 )
 
 type IfMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Data      IfData
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Addrs   int32
+	Flags   int32
+	Index   uint16
+	_       [2]byte
+	Data    IfData
 }
 
 type IfData struct {
@@ -311,7 +311,7 @@ type IfData struct {
 	Hdrlen     uint8
 	Recvquota  uint8
 	Xmitquota  uint8
-	Pad_cgo_0  [2]byte
+	_          [2]byte
 	Mtu        uint64
 	Metric     uint64
 	Link_state uint64
@@ -333,24 +333,24 @@ type IfData struct {
 }
 
 type IfaMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Metric    int32
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Addrs   int32
+	Flags   int32
+	Index   uint16
+	_       [2]byte
+	Metric  int32
 }
 
 type IfmaMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Addrs   int32
+	Flags   int32
+	Index   uint16
+	_       [2]byte
 }
 
 type IfAnnounceMsghdr struct {
@@ -363,19 +363,19 @@ type IfAnnounceMsghdr struct {
 }
 
 type RtMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Flags     int32
-	Addrs     int32
-	Pid       int32
-	Seq       int32
-	Errno     int32
-	Use       int32
-	Inits     uint64
-	Rmx       RtMetrics
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Index   uint16
+	_       [2]byte
+	Flags   int32
+	Addrs   int32
+	Pid     int32
+	Seq     int32
+	Errno   int32
+	Use     int32
+	Inits   uint64
+	Rmx     RtMetrics
 }
 
 type RtMetrics struct {
@@ -391,7 +391,7 @@ type RtMetrics struct {
 	Hopcount  uint64
 	Mssopt    uint16
 	Pad       uint16
-	Pad_cgo_0 [4]byte
+	_         [4]byte
 	Msl       uint64
 	Iwmaxsegs uint64
 	Iwcapsegs uint64
@@ -416,9 +416,9 @@ type BpfStat struct {
 }
 
 type BpfProgram struct {
-	Len       uint32
-	Pad_cgo_0 [4]byte
-	Insns     *BpfInsn
+	Len   uint32
+	_     [4]byte
+	Insns *BpfInsn
 }
 
 type BpfInsn struct {
@@ -429,11 +429,11 @@ type BpfInsn struct {
 }
 
 type BpfHdr struct {
-	Tstamp    Timeval
-	Caplen    uint32
-	Datalen   uint32
-	Hdrlen    uint16
-	Pad_cgo_0 [6]byte
+	Tstamp  Timeval
+	Caplen  uint32
+	Datalen uint32
+	Hdrlen  uint16
+	_       [6]byte
 }
 
 type Termios struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index 7aa206e3..3c56b207 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -436,97 +436,123 @@ const (
 )
 
 const (
-	IFA_UNSPEC          = 0x0
-	IFA_ADDRESS         = 0x1
-	IFA_LOCAL           = 0x2
-	IFA_LABEL           = 0x3
-	IFA_BROADCAST       = 0x4
-	IFA_ANYCAST         = 0x5
-	IFA_CACHEINFO       = 0x6
-	IFA_MULTICAST       = 0x7
-	IFLA_UNSPEC         = 0x0
-	IFLA_ADDRESS        = 0x1
-	IFLA_BROADCAST      = 0x2
-	IFLA_IFNAME         = 0x3
-	IFLA_MTU            = 0x4
-	IFLA_LINK           = 0x5
-	IFLA_QDISC          = 0x6
-	IFLA_STATS          = 0x7
-	IFLA_COST           = 0x8
-	IFLA_PRIORITY       = 0x9
-	IFLA_MASTER         = 0xa
-	IFLA_WIRELESS       = 0xb
-	IFLA_PROTINFO       = 0xc
-	IFLA_TXQLEN         = 0xd
-	IFLA_MAP            = 0xe
-	IFLA_WEIGHT         = 0xf
-	IFLA_OPERSTATE      = 0x10
-	IFLA_LINKMODE       = 0x11
-	IFLA_LINKINFO       = 0x12
-	IFLA_NET_NS_PID     = 0x13
-	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2c
-	RT_SCOPE_UNIVERSE   = 0x0
-	RT_SCOPE_SITE       = 0xc8
-	RT_SCOPE_LINK       = 0xfd
-	RT_SCOPE_HOST       = 0xfe
-	RT_SCOPE_NOWHERE    = 0xff
-	RT_TABLE_UNSPEC     = 0x0
-	RT_TABLE_COMPAT     = 0xfc
-	RT_TABLE_DEFAULT    = 0xfd
-	RT_TABLE_MAIN       = 0xfe
-	RT_TABLE_LOCAL      = 0xff
-	RT_TABLE_MAX        = 0xffffffff
-	RTA_UNSPEC          = 0x0
-	RTA_DST             = 0x1
-	RTA_SRC             = 0x2
-	RTA_IIF             = 0x3
-	RTA_OIF             = 0x4
-	RTA_GATEWAY         = 0x5
-	RTA_PRIORITY        = 0x6
-	RTA_PREFSRC         = 0x7
-	RTA_METRICS         = 0x8
-	RTA_MULTIPATH       = 0x9
-	RTA_FLOW            = 0xb
-	RTA_CACHEINFO       = 0xc
-	RTA_TABLE           = 0xf
-	RTN_UNSPEC          = 0x0
-	RTN_UNICAST         = 0x1
-	RTN_LOCAL           = 0x2
-	RTN_BROADCAST       = 0x3
-	RTN_ANYCAST         = 0x4
-	RTN_MULTICAST       = 0x5
-	RTN_BLACKHOLE       = 0x6
-	RTN_UNREACHABLE     = 0x7
-	RTN_PROHIBIT        = 0x8
-	RTN_THROW           = 0x9
-	RTN_NAT             = 0xa
-	RTN_XRESOLVE        = 0xb
-	RTNLGRP_NONE        = 0x0
-	RTNLGRP_LINK        = 0x1
-	RTNLGRP_NOTIFY      = 0x2
-	RTNLGRP_NEIGH       = 0x3
-	RTNLGRP_TC          = 0x4
-	RTNLGRP_IPV4_IFADDR = 0x5
-	RTNLGRP_IPV4_MROUTE = 0x6
-	RTNLGRP_IPV4_ROUTE  = 0x7
-	RTNLGRP_IPV4_RULE   = 0x8
-	RTNLGRP_IPV6_IFADDR = 0x9
-	RTNLGRP_IPV6_MROUTE = 0xa
-	RTNLGRP_IPV6_ROUTE  = 0xb
-	RTNLGRP_IPV6_IFINFO = 0xc
-	RTNLGRP_IPV6_PREFIX = 0x12
-	RTNLGRP_IPV6_RULE   = 0x13
-	RTNLGRP_ND_USEROPT  = 0x14
-	SizeofNlMsghdr      = 0x10
-	SizeofNlMsgerr      = 0x14
-	SizeofRtGenmsg      = 0x1
-	SizeofNlAttr        = 0x4
-	SizeofRtAttr        = 0x4
-	SizeofIfInfomsg     = 0x10
-	SizeofIfAddrmsg     = 0x8
-	SizeofRtMsg         = 0xc
-	SizeofRtNexthop     = 0x8
+	IFA_UNSPEC           = 0x0
+	IFA_ADDRESS          = 0x1
+	IFA_LOCAL            = 0x2
+	IFA_LABEL            = 0x3
+	IFA_BROADCAST        = 0x4
+	IFA_ANYCAST          = 0x5
+	IFA_CACHEINFO        = 0x6
+	IFA_MULTICAST        = 0x7
+	IFLA_UNSPEC          = 0x0
+	IFLA_ADDRESS         = 0x1
+	IFLA_BROADCAST       = 0x2
+	IFLA_IFNAME          = 0x3
+	IFLA_MTU             = 0x4
+	IFLA_LINK            = 0x5
+	IFLA_QDISC           = 0x6
+	IFLA_STATS           = 0x7
+	IFLA_COST            = 0x8
+	IFLA_PRIORITY        = 0x9
+	IFLA_MASTER          = 0xa
+	IFLA_WIRELESS        = 0xb
+	IFLA_PROTINFO        = 0xc
+	IFLA_TXQLEN          = 0xd
+	IFLA_MAP             = 0xe
+	IFLA_WEIGHT          = 0xf
+	IFLA_OPERSTATE       = 0x10
+	IFLA_LINKMODE        = 0x11
+	IFLA_LINKINFO        = 0x12
+	IFLA_NET_NS_PID      = 0x13
+	IFLA_IFALIAS         = 0x14
+	IFLA_NUM_VF          = 0x15
+	IFLA_VFINFO_LIST     = 0x16
+	IFLA_STATS64         = 0x17
+	IFLA_VF_PORTS        = 0x18
+	IFLA_PORT_SELF       = 0x19
+	IFLA_AF_SPEC         = 0x1a
+	IFLA_GROUP           = 0x1b
+	IFLA_NET_NS_FD       = 0x1c
+	IFLA_EXT_MASK        = 0x1d
+	IFLA_PROMISCUITY     = 0x1e
+	IFLA_NUM_TX_QUEUES   = 0x1f
+	IFLA_NUM_RX_QUEUES   = 0x20
+	IFLA_CARRIER         = 0x21
+	IFLA_PHYS_PORT_ID    = 0x22
+	IFLA_CARRIER_CHANGES = 0x23
+	IFLA_PHYS_SWITCH_ID  = 0x24
+	IFLA_LINK_NETNSID    = 0x25
+	IFLA_PHYS_PORT_NAME  = 0x26
+	IFLA_PROTO_DOWN      = 0x27
+	IFLA_GSO_MAX_SEGS    = 0x28
+	IFLA_GSO_MAX_SIZE    = 0x29
+	IFLA_PAD             = 0x2a
+	IFLA_XDP             = 0x2b
+	IFLA_EVENT           = 0x2c
+	IFLA_NEW_NETNSID     = 0x2d
+	IFLA_IF_NETNSID      = 0x2e
+	IFLA_MAX             = 0x31
+	RT_SCOPE_UNIVERSE    = 0x0
+	RT_SCOPE_SITE        = 0xc8
+	RT_SCOPE_LINK        = 0xfd
+	RT_SCOPE_HOST        = 0xfe
+	RT_SCOPE_NOWHERE     = 0xff
+	RT_TABLE_UNSPEC      = 0x0
+	RT_TABLE_COMPAT      = 0xfc
+	RT_TABLE_DEFAULT     = 0xfd
+	RT_TABLE_MAIN        = 0xfe
+	RT_TABLE_LOCAL       = 0xff
+	RT_TABLE_MAX         = 0xffffffff
+	RTA_UNSPEC           = 0x0
+	RTA_DST              = 0x1
+	RTA_SRC              = 0x2
+	RTA_IIF              = 0x3
+	RTA_OIF              = 0x4
+	RTA_GATEWAY          = 0x5
+	RTA_PRIORITY         = 0x6
+	RTA_PREFSRC          = 0x7
+	RTA_METRICS          = 0x8
+	RTA_MULTIPATH        = 0x9
+	RTA_FLOW             = 0xb
+	RTA_CACHEINFO        = 0xc
+	RTA_TABLE            = 0xf
+	RTN_UNSPEC           = 0x0
+	RTN_UNICAST          = 0x1
+	RTN_LOCAL            = 0x2
+	RTN_BROADCAST        = 0x3
+	RTN_ANYCAST          = 0x4
+	RTN_MULTICAST        = 0x5
+	RTN_BLACKHOLE        = 0x6
+	RTN_UNREACHABLE      = 0x7
+	RTN_PROHIBIT         = 0x8
+	RTN_THROW            = 0x9
+	RTN_NAT              = 0xa
+	RTN_XRESOLVE         = 0xb
+	RTNLGRP_NONE         = 0x0
+	RTNLGRP_LINK         = 0x1
+	RTNLGRP_NOTIFY       = 0x2
+	RTNLGRP_NEIGH        = 0x3
+	RTNLGRP_TC           = 0x4
+	RTNLGRP_IPV4_IFADDR  = 0x5
+	RTNLGRP_IPV4_MROUTE  = 0x6
+	RTNLGRP_IPV4_ROUTE   = 0x7
+	RTNLGRP_IPV4_RULE    = 0x8
+	RTNLGRP_IPV6_IFADDR  = 0x9
+	RTNLGRP_IPV6_MROUTE  = 0xa
+	RTNLGRP_IPV6_ROUTE   = 0xb
+	RTNLGRP_IPV6_IFINFO  = 0xc
+	RTNLGRP_IPV6_PREFIX  = 0x12
+	RTNLGRP_IPV6_RULE    = 0x13
+	RTNLGRP_ND_USEROPT   = 0x14
+	SizeofNlMsghdr       = 0x10
+	SizeofNlMsgerr       = 0x14
+	SizeofRtGenmsg       = 0x1
+	SizeofNlAttr         = 0x4
+	SizeofRtAttr         = 0x4
+	SizeofIfInfomsg      = 0x10
+	SizeofIfAddrmsg      = 0x8
+	SizeofRtMsg          = 0xc
+	SizeofRtNexthop      = 0x8
 )
 
 type NlMsghdr struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index abb3d89a..92f1c8fe 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -440,97 +440,123 @@ const (
 )
 
 const (
-	IFA_UNSPEC          = 0x0
-	IFA_ADDRESS         = 0x1
-	IFA_LOCAL           = 0x2
-	IFA_LABEL           = 0x3
-	IFA_BROADCAST       = 0x4
-	IFA_ANYCAST         = 0x5
-	IFA_CACHEINFO       = 0x6
-	IFA_MULTICAST       = 0x7
-	IFLA_UNSPEC         = 0x0
-	IFLA_ADDRESS        = 0x1
-	IFLA_BROADCAST      = 0x2
-	IFLA_IFNAME         = 0x3
-	IFLA_MTU            = 0x4
-	IFLA_LINK           = 0x5
-	IFLA_QDISC          = 0x6
-	IFLA_STATS          = 0x7
-	IFLA_COST           = 0x8
-	IFLA_PRIORITY       = 0x9
-	IFLA_MASTER         = 0xa
-	IFLA_WIRELESS       = 0xb
-	IFLA_PROTINFO       = 0xc
-	IFLA_TXQLEN         = 0xd
-	IFLA_MAP            = 0xe
-	IFLA_WEIGHT         = 0xf
-	IFLA_OPERSTATE      = 0x10
-	IFLA_LINKMODE       = 0x11
-	IFLA_LINKINFO       = 0x12
-	IFLA_NET_NS_PID     = 0x13
-	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2c
-	RT_SCOPE_UNIVERSE   = 0x0
-	RT_SCOPE_SITE       = 0xc8
-	RT_SCOPE_LINK       = 0xfd
-	RT_SCOPE_HOST       = 0xfe
-	RT_SCOPE_NOWHERE    = 0xff
-	RT_TABLE_UNSPEC     = 0x0
-	RT_TABLE_COMPAT     = 0xfc
-	RT_TABLE_DEFAULT    = 0xfd
-	RT_TABLE_MAIN       = 0xfe
-	RT_TABLE_LOCAL      = 0xff
-	RT_TABLE_MAX        = 0xffffffff
-	RTA_UNSPEC          = 0x0
-	RTA_DST             = 0x1
-	RTA_SRC             = 0x2
-	RTA_IIF             = 0x3
-	RTA_OIF             = 0x4
-	RTA_GATEWAY         = 0x5
-	RTA_PRIORITY        = 0x6
-	RTA_PREFSRC         = 0x7
-	RTA_METRICS         = 0x8
-	RTA_MULTIPATH       = 0x9
-	RTA_FLOW            = 0xb
-	RTA_CACHEINFO       = 0xc
-	RTA_TABLE           = 0xf
-	RTN_UNSPEC          = 0x0
-	RTN_UNICAST         = 0x1
-	RTN_LOCAL           = 0x2
-	RTN_BROADCAST       = 0x3
-	RTN_ANYCAST         = 0x4
-	RTN_MULTICAST       = 0x5
-	RTN_BLACKHOLE       = 0x6
-	RTN_UNREACHABLE     = 0x7
-	RTN_PROHIBIT        = 0x8
-	RTN_THROW           = 0x9
-	RTN_NAT             = 0xa
-	RTN_XRESOLVE        = 0xb
-	RTNLGRP_NONE        = 0x0
-	RTNLGRP_LINK        = 0x1
-	RTNLGRP_NOTIFY      = 0x2
-	RTNLGRP_NEIGH       = 0x3
-	RTNLGRP_TC          = 0x4
-	RTNLGRP_IPV4_IFADDR = 0x5
-	RTNLGRP_IPV4_MROUTE = 0x6
-	RTNLGRP_IPV4_ROUTE  = 0x7
-	RTNLGRP_IPV4_RULE   = 0x8
-	RTNLGRP_IPV6_IFADDR = 0x9
-	RTNLGRP_IPV6_MROUTE = 0xa
-	RTNLGRP_IPV6_ROUTE  = 0xb
-	RTNLGRP_IPV6_IFINFO = 0xc
-	RTNLGRP_IPV6_PREFIX = 0x12
-	RTNLGRP_IPV6_RULE   = 0x13
-	RTNLGRP_ND_USEROPT  = 0x14
-	SizeofNlMsghdr      = 0x10
-	SizeofNlMsgerr      = 0x14
-	SizeofRtGenmsg      = 0x1
-	SizeofNlAttr        = 0x4
-	SizeofRtAttr        = 0x4
-	SizeofIfInfomsg     = 0x10
-	SizeofIfAddrmsg     = 0x8
-	SizeofRtMsg         = 0xc
-	SizeofRtNexthop     = 0x8
+	IFA_UNSPEC           = 0x0
+	IFA_ADDRESS          = 0x1
+	IFA_LOCAL            = 0x2
+	IFA_LABEL            = 0x3
+	IFA_BROADCAST        = 0x4
+	IFA_ANYCAST          = 0x5
+	IFA_CACHEINFO        = 0x6
+	IFA_MULTICAST        = 0x7
+	IFLA_UNSPEC          = 0x0
+	IFLA_ADDRESS         = 0x1
+	IFLA_BROADCAST       = 0x2
+	IFLA_IFNAME          = 0x3
+	IFLA_MTU             = 0x4
+	IFLA_LINK            = 0x5
+	IFLA_QDISC           = 0x6
+	IFLA_STATS           = 0x7
+	IFLA_COST            = 0x8
+	IFLA_PRIORITY        = 0x9
+	IFLA_MASTER          = 0xa
+	IFLA_WIRELESS        = 0xb
+	IFLA_PROTINFO        = 0xc
+	IFLA_TXQLEN          = 0xd
+	IFLA_MAP             = 0xe
+	IFLA_WEIGHT          = 0xf
+	IFLA_OPERSTATE       = 0x10
+	IFLA_LINKMODE        = 0x11
+	IFLA_LINKINFO        = 0x12
+	IFLA_NET_NS_PID      = 0x13
+	IFLA_IFALIAS         = 0x14
+	IFLA_NUM_VF          = 0x15
+	IFLA_VFINFO_LIST     = 0x16
+	IFLA_STATS64         = 0x17
+	IFLA_VF_PORTS        = 0x18
+	IFLA_PORT_SELF       = 0x19
+	IFLA_AF_SPEC         = 0x1a
+	IFLA_GROUP           = 0x1b
+	IFLA_NET_NS_FD       = 0x1c
+	IFLA_EXT_MASK        = 0x1d
+	IFLA_PROMISCUITY     = 0x1e
+	IFLA_NUM_TX_QUEUES   = 0x1f
+	IFLA_NUM_RX_QUEUES   = 0x20
+	IFLA_CARRIER         = 0x21
+	IFLA_PHYS_PORT_ID    = 0x22
+	IFLA_CARRIER_CHANGES = 0x23
+	IFLA_PHYS_SWITCH_ID  = 0x24
+	IFLA_LINK_NETNSID    = 0x25
+	IFLA_PHYS_PORT_NAME  = 0x26
+	IFLA_PROTO_DOWN      = 0x27
+	IFLA_GSO_MAX_SEGS    = 0x28
+	IFLA_GSO_MAX_SIZE    = 0x29
+	IFLA_PAD             = 0x2a
+	IFLA_XDP             = 0x2b
+	IFLA_EVENT           = 0x2c
+	IFLA_NEW_NETNSID     = 0x2d
+	IFLA_IF_NETNSID      = 0x2e
+	IFLA_MAX             = 0x31
+	RT_SCOPE_UNIVERSE    = 0x0
+	RT_SCOPE_SITE        = 0xc8
+	RT_SCOPE_LINK        = 0xfd
+	RT_SCOPE_HOST        = 0xfe
+	RT_SCOPE_NOWHERE     = 0xff
+	RT_TABLE_UNSPEC      = 0x0
+	RT_TABLE_COMPAT      = 0xfc
+	RT_TABLE_DEFAULT     = 0xfd
+	RT_TABLE_MAIN        = 0xfe
+	RT_TABLE_LOCAL       = 0xff
+	RT_TABLE_MAX         = 0xffffffff
+	RTA_UNSPEC           = 0x0
+	RTA_DST              = 0x1
+	RTA_SRC              = 0x2
+	RTA_IIF              = 0x3
+	RTA_OIF              = 0x4
+	RTA_GATEWAY          = 0x5
+	RTA_PRIORITY         = 0x6
+	RTA_PREFSRC          = 0x7
+	RTA_METRICS          = 0x8
+	RTA_MULTIPATH        = 0x9
+	RTA_FLOW             = 0xb
+	RTA_CACHEINFO        = 0xc
+	RTA_TABLE            = 0xf
+	RTN_UNSPEC           = 0x0
+	RTN_UNICAST          = 0x1
+	RTN_LOCAL            = 0x2
+	RTN_BROADCAST        = 0x3
+	RTN_ANYCAST          = 0x4
+	RTN_MULTICAST        = 0x5
+	RTN_BLACKHOLE        = 0x6
+	RTN_UNREACHABLE      = 0x7
+	RTN_PROHIBIT         = 0x8
+	RTN_THROW            = 0x9
+	RTN_NAT              = 0xa
+	RTN_XRESOLVE         = 0xb
+	RTNLGRP_NONE         = 0x0
+	RTNLGRP_LINK         = 0x1
+	RTNLGRP_NOTIFY       = 0x2
+	RTNLGRP_NEIGH        = 0x3
+	RTNLGRP_TC           = 0x4
+	RTNLGRP_IPV4_IFADDR  = 0x5
+	RTNLGRP_IPV4_MROUTE  = 0x6
+	RTNLGRP_IPV4_ROUTE   = 0x7
+	RTNLGRP_IPV4_RULE    = 0x8
+	RTNLGRP_IPV6_IFADDR  = 0x9
+	RTNLGRP_IPV6_MROUTE  = 0xa
+	RTNLGRP_IPV6_ROUTE   = 0xb
+	RTNLGRP_IPV6_IFINFO  = 0xc
+	RTNLGRP_IPV6_PREFIX  = 0x12
+	RTNLGRP_IPV6_RULE    = 0x13
+	RTNLGRP_ND_USEROPT   = 0x14
+	SizeofNlMsghdr       = 0x10
+	SizeofNlMsgerr       = 0x14
+	SizeofRtGenmsg       = 0x1
+	SizeofNlAttr         = 0x4
+	SizeofRtAttr         = 0x4
+	SizeofIfInfomsg      = 0x10
+	SizeofIfAddrmsg      = 0x8
+	SizeofRtMsg          = 0xc
+	SizeofRtNexthop      = 0x8
 )
 
 type NlMsghdr struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index 11654174..cee5459a 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -440,97 +440,123 @@ const (
 )
 
 const (
-	IFA_UNSPEC          = 0x0
-	IFA_ADDRESS         = 0x1
-	IFA_LOCAL           = 0x2
-	IFA_LABEL           = 0x3
-	IFA_BROADCAST       = 0x4
-	IFA_ANYCAST         = 0x5
-	IFA_CACHEINFO       = 0x6
-	IFA_MULTICAST       = 0x7
-	IFLA_UNSPEC         = 0x0
-	IFLA_ADDRESS        = 0x1
-	IFLA_BROADCAST      = 0x2
-	IFLA_IFNAME         = 0x3
-	IFLA_MTU            = 0x4
-	IFLA_LINK           = 0x5
-	IFLA_QDISC          = 0x6
-	IFLA_STATS          = 0x7
-	IFLA_COST           = 0x8
-	IFLA_PRIORITY       = 0x9
-	IFLA_MASTER         = 0xa
-	IFLA_WIRELESS       = 0xb
-	IFLA_PROTINFO       = 0xc
-	IFLA_TXQLEN         = 0xd
-	IFLA_MAP            = 0xe
-	IFLA_WEIGHT         = 0xf
-	IFLA_OPERSTATE      = 0x10
-	IFLA_LINKMODE       = 0x11
-	IFLA_LINKINFO       = 0x12
-	IFLA_NET_NS_PID     = 0x13
-	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2c
-	RT_SCOPE_UNIVERSE   = 0x0
-	RT_SCOPE_SITE       = 0xc8
-	RT_SCOPE_LINK       = 0xfd
-	RT_SCOPE_HOST       = 0xfe
-	RT_SCOPE_NOWHERE    = 0xff
-	RT_TABLE_UNSPEC     = 0x0
-	RT_TABLE_COMPAT     = 0xfc
-	RT_TABLE_DEFAULT    = 0xfd
-	RT_TABLE_MAIN       = 0xfe
-	RT_TABLE_LOCAL      = 0xff
-	RT_TABLE_MAX        = 0xffffffff
-	RTA_UNSPEC          = 0x0
-	RTA_DST             = 0x1
-	RTA_SRC             = 0x2
-	RTA_IIF             = 0x3
-	RTA_OIF             = 0x4
-	RTA_GATEWAY         = 0x5
-	RTA_PRIORITY        = 0x6
-	RTA_PREFSRC         = 0x7
-	RTA_METRICS         = 0x8
-	RTA_MULTIPATH       = 0x9
-	RTA_FLOW            = 0xb
-	RTA_CACHEINFO       = 0xc
-	RTA_TABLE           = 0xf
-	RTN_UNSPEC          = 0x0
-	RTN_UNICAST         = 0x1
-	RTN_LOCAL           = 0x2
-	RTN_BROADCAST       = 0x3
-	RTN_ANYCAST         = 0x4
-	RTN_MULTICAST       = 0x5
-	RTN_BLACKHOLE       = 0x6
-	RTN_UNREACHABLE     = 0x7
-	RTN_PROHIBIT        = 0x8
-	RTN_THROW           = 0x9
-	RTN_NAT             = 0xa
-	RTN_XRESOLVE        = 0xb
-	RTNLGRP_NONE        = 0x0
-	RTNLGRP_LINK        = 0x1
-	RTNLGRP_NOTIFY      = 0x2
-	RTNLGRP_NEIGH       = 0x3
-	RTNLGRP_TC          = 0x4
-	RTNLGRP_IPV4_IFADDR = 0x5
-	RTNLGRP_IPV4_MROUTE = 0x6
-	RTNLGRP_IPV4_ROUTE  = 0x7
-	RTNLGRP_IPV4_RULE   = 0x8
-	RTNLGRP_IPV6_IFADDR = 0x9
-	RTNLGRP_IPV6_MROUTE = 0xa
-	RTNLGRP_IPV6_ROUTE  = 0xb
-	RTNLGRP_IPV6_IFINFO = 0xc
-	RTNLGRP_IPV6_PREFIX = 0x12
-	RTNLGRP_IPV6_RULE   = 0x13
-	RTNLGRP_ND_USEROPT  = 0x14
-	SizeofNlMsghdr      = 0x10
-	SizeofNlMsgerr      = 0x14
-	SizeofRtGenmsg      = 0x1
-	SizeofNlAttr        = 0x4
-	SizeofRtAttr        = 0x4
-	SizeofIfInfomsg     = 0x10
-	SizeofIfAddrmsg     = 0x8
-	SizeofRtMsg         = 0xc
-	SizeofRtNexthop     = 0x8
+	IFA_UNSPEC           = 0x0
+	IFA_ADDRESS          = 0x1
+	IFA_LOCAL            = 0x2
+	IFA_LABEL            = 0x3
+	IFA_BROADCAST        = 0x4
+	IFA_ANYCAST          = 0x5
+	IFA_CACHEINFO        = 0x6
+	IFA_MULTICAST        = 0x7
+	IFLA_UNSPEC          = 0x0
+	IFLA_ADDRESS         = 0x1
+	IFLA_BROADCAST       = 0x2
+	IFLA_IFNAME          = 0x3
+	IFLA_MTU             = 0x4
+	IFLA_LINK            = 0x5
+	IFLA_QDISC           = 0x6
+	IFLA_STATS           = 0x7
+	IFLA_COST            = 0x8
+	IFLA_PRIORITY        = 0x9
+	IFLA_MASTER          = 0xa
+	IFLA_WIRELESS        = 0xb
+	IFLA_PROTINFO        = 0xc
+	IFLA_TXQLEN          = 0xd
+	IFLA_MAP             = 0xe
+	IFLA_WEIGHT          = 0xf
+	IFLA_OPERSTATE       = 0x10
+	IFLA_LINKMODE        = 0x11
+	IFLA_LINKINFO        = 0x12
+	IFLA_NET_NS_PID      = 0x13
+	IFLA_IFALIAS         = 0x14
+	IFLA_NUM_VF          = 0x15
+	IFLA_VFINFO_LIST     = 0x16
+	IFLA_STATS64         = 0x17
+	IFLA_VF_PORTS        = 0x18
+	IFLA_PORT_SELF       = 0x19
+	IFLA_AF_SPEC         = 0x1a
+	IFLA_GROUP           = 0x1b
+	IFLA_NET_NS_FD       = 0x1c
+	IFLA_EXT_MASK        = 0x1d
+	IFLA_PROMISCUITY     = 0x1e
+	IFLA_NUM_TX_QUEUES   = 0x1f
+	IFLA_NUM_RX_QUEUES   = 0x20
+	IFLA_CARRIER         = 0x21
+	IFLA_PHYS_PORT_ID    = 0x22
+	IFLA_CARRIER_CHANGES = 0x23
+	IFLA_PHYS_SWITCH_ID  = 0x24
+	IFLA_LINK_NETNSID    = 0x25
+	IFLA_PHYS_PORT_NAME  = 0x26
+	IFLA_PROTO_DOWN      = 0x27
+	IFLA_GSO_MAX_SEGS    = 0x28
+	IFLA_GSO_MAX_SIZE    = 0x29
+	IFLA_PAD             = 0x2a
+	IFLA_XDP             = 0x2b
+	IFLA_EVENT           = 0x2c
+	IFLA_NEW_NETNSID     = 0x2d
+	IFLA_IF_NETNSID      = 0x2e
+	IFLA_MAX             = 0x31
+	RT_SCOPE_UNIVERSE    = 0x0
+	RT_SCOPE_SITE        = 0xc8
+	RT_SCOPE_LINK        = 0xfd
+	RT_SCOPE_HOST        = 0xfe
+	RT_SCOPE_NOWHERE     = 0xff
+	RT_TABLE_UNSPEC      = 0x0
+	RT_TABLE_COMPAT      = 0xfc
+	RT_TABLE_DEFAULT     = 0xfd
+	RT_TABLE_MAIN        = 0xfe
+	RT_TABLE_LOCAL       = 0xff
+	RT_TABLE_MAX         = 0xffffffff
+	RTA_UNSPEC           = 0x0
+	RTA_DST              = 0x1
+	RTA_SRC              = 0x2
+	RTA_IIF              = 0x3
+	RTA_OIF              = 0x4
+	RTA_GATEWAY          = 0x5
+	RTA_PRIORITY         = 0x6
+	RTA_PREFSRC          = 0x7
+	RTA_METRICS          = 0x8
+	RTA_MULTIPATH        = 0x9
+	RTA_FLOW             = 0xb
+	RTA_CACHEINFO        = 0xc
+	RTA_TABLE            = 0xf
+	RTN_UNSPEC           = 0x0
+	RTN_UNICAST          = 0x1
+	RTN_LOCAL            = 0x2
+	RTN_BROADCAST        = 0x3
+	RTN_ANYCAST          = 0x4
+	RTN_MULTICAST        = 0x5
+	RTN_BLACKHOLE        = 0x6
+	RTN_UNREACHABLE      = 0x7
+	RTN_PROHIBIT         = 0x8
+	RTN_THROW            = 0x9
+	RTN_NAT              = 0xa
+	RTN_XRESOLVE         = 0xb
+	RTNLGRP_NONE         = 0x0
+	RTNLGRP_LINK         = 0x1
+	RTNLGRP_NOTIFY       = 0x2
+	RTNLGRP_NEIGH        = 0x3
+	RTNLGRP_TC           = 0x4
+	RTNLGRP_IPV4_IFADDR  = 0x5
+	RTNLGRP_IPV4_MROUTE  = 0x6
+	RTNLGRP_IPV4_ROUTE   = 0x7
+	RTNLGRP_IPV4_RULE    = 0x8
+	RTNLGRP_IPV6_IFADDR  = 0x9
+	RTNLGRP_IPV6_MROUTE  = 0xa
+	RTNLGRP_IPV6_ROUTE   = 0xb
+	RTNLGRP_IPV6_IFINFO  = 0xc
+	RTNLGRP_IPV6_PREFIX  = 0x12
+	RTNLGRP_IPV6_RULE    = 0x13
+	RTNLGRP_ND_USEROPT   = 0x14
+	SizeofNlMsghdr       = 0x10
+	SizeofNlMsgerr       = 0x14
+	SizeofRtGenmsg       = 0x1
+	SizeofNlAttr         = 0x4
+	SizeofRtAttr         = 0x4
+	SizeofIfInfomsg      = 0x10
+	SizeofIfAddrmsg      = 0x8
+	SizeofRtMsg          = 0xc
+	SizeofRtNexthop      = 0x8
 )
 
 type NlMsghdr struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index 0d0de46f..5ce7cfe8 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -441,97 +441,123 @@ const (
 )
 
 const (
-	IFA_UNSPEC          = 0x0
-	IFA_ADDRESS         = 0x1
-	IFA_LOCAL           = 0x2
-	IFA_LABEL           = 0x3
-	IFA_BROADCAST       = 0x4
-	IFA_ANYCAST         = 0x5
-	IFA_CACHEINFO       = 0x6
-	IFA_MULTICAST       = 0x7
-	IFLA_UNSPEC         = 0x0
-	IFLA_ADDRESS        = 0x1
-	IFLA_BROADCAST      = 0x2
-	IFLA_IFNAME         = 0x3
-	IFLA_MTU            = 0x4
-	IFLA_LINK           = 0x5
-	IFLA_QDISC          = 0x6
-	IFLA_STATS          = 0x7
-	IFLA_COST           = 0x8
-	IFLA_PRIORITY       = 0x9
-	IFLA_MASTER         = 0xa
-	IFLA_WIRELESS       = 0xb
-	IFLA_PROTINFO       = 0xc
-	IFLA_TXQLEN         = 0xd
-	IFLA_MAP            = 0xe
-	IFLA_WEIGHT         = 0xf
-	IFLA_OPERSTATE      = 0x10
-	IFLA_LINKMODE       = 0x11
-	IFLA_LINKINFO       = 0x12
-	IFLA_NET_NS_PID     = 0x13
-	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2c
-	RT_SCOPE_UNIVERSE   = 0x0
-	RT_SCOPE_SITE       = 0xc8
-	RT_SCOPE_LINK       = 0xfd
-	RT_SCOPE_HOST       = 0xfe
-	RT_SCOPE_NOWHERE    = 0xff
-	RT_TABLE_UNSPEC     = 0x0
-	RT_TABLE_COMPAT     = 0xfc
-	RT_TABLE_DEFAULT    = 0xfd
-	RT_TABLE_MAIN       = 0xfe
-	RT_TABLE_LOCAL      = 0xff
-	RT_TABLE_MAX        = 0xffffffff
-	RTA_UNSPEC          = 0x0
-	RTA_DST             = 0x1
-	RTA_SRC             = 0x2
-	RTA_IIF             = 0x3
-	RTA_OIF             = 0x4
-	RTA_GATEWAY         = 0x5
-	RTA_PRIORITY        = 0x6
-	RTA_PREFSRC         = 0x7
-	RTA_METRICS         = 0x8
-	RTA_MULTIPATH       = 0x9
-	RTA_FLOW            = 0xb
-	RTA_CACHEINFO       = 0xc
-	RTA_TABLE           = 0xf
-	RTN_UNSPEC          = 0x0
-	RTN_UNICAST         = 0x1
-	RTN_LOCAL           = 0x2
-	RTN_BROADCAST       = 0x3
-	RTN_ANYCAST         = 0x4
-	RTN_MULTICAST       = 0x5
-	RTN_BLACKHOLE       = 0x6
-	RTN_UNREACHABLE     = 0x7
-	RTN_PROHIBIT        = 0x8
-	RTN_THROW           = 0x9
-	RTN_NAT             = 0xa
-	RTN_XRESOLVE        = 0xb
-	RTNLGRP_NONE        = 0x0
-	RTNLGRP_LINK        = 0x1
-	RTNLGRP_NOTIFY      = 0x2
-	RTNLGRP_NEIGH       = 0x3
-	RTNLGRP_TC          = 0x4
-	RTNLGRP_IPV4_IFADDR = 0x5
-	RTNLGRP_IPV4_MROUTE = 0x6
-	RTNLGRP_IPV4_ROUTE  = 0x7
-	RTNLGRP_IPV4_RULE   = 0x8
-	RTNLGRP_IPV6_IFADDR = 0x9
-	RTNLGRP_IPV6_MROUTE = 0xa
-	RTNLGRP_IPV6_ROUTE  = 0xb
-	RTNLGRP_IPV6_IFINFO = 0xc
-	RTNLGRP_IPV6_PREFIX = 0x12
-	RTNLGRP_IPV6_RULE   = 0x13
-	RTNLGRP_ND_USEROPT  = 0x14
-	SizeofNlMsghdr      = 0x10
-	SizeofNlMsgerr      = 0x14
-	SizeofRtGenmsg      = 0x1
-	SizeofNlAttr        = 0x4
-	SizeofRtAttr        = 0x4
-	SizeofIfInfomsg     = 0x10
-	SizeofIfAddrmsg     = 0x8
-	SizeofRtMsg         = 0xc
-	SizeofRtNexthop     = 0x8
+	IFA_UNSPEC           = 0x0
+	IFA_ADDRESS          = 0x1
+	IFA_LOCAL            = 0x2
+	IFA_LABEL            = 0x3
+	IFA_BROADCAST        = 0x4
+	IFA_ANYCAST          = 0x5
+	IFA_CACHEINFO        = 0x6
+	IFA_MULTICAST        = 0x7
+	IFLA_UNSPEC          = 0x0
+	IFLA_ADDRESS         = 0x1
+	IFLA_BROADCAST       = 0x2
+	IFLA_IFNAME          = 0x3
+	IFLA_MTU             = 0x4
+	IFLA_LINK            = 0x5
+	IFLA_QDISC           = 0x6
+	IFLA_STATS           = 0x7
+	IFLA_COST            = 0x8
+	IFLA_PRIORITY        = 0x9
+	IFLA_MASTER          = 0xa
+	IFLA_WIRELESS        = 0xb
+	IFLA_PROTINFO        = 0xc
+	IFLA_TXQLEN          = 0xd
+	IFLA_MAP             = 0xe
+	IFLA_WEIGHT          = 0xf
+	IFLA_OPERSTATE       = 0x10
+	IFLA_LINKMODE        = 0x11
+	IFLA_LINKINFO        = 0x12
+	IFLA_NET_NS_PID      = 0x13
+	IFLA_IFALIAS         = 0x14
+	IFLA_NUM_VF          = 0x15
+	IFLA_VFINFO_LIST     = 0x16
+	IFLA_STATS64         = 0x17
+	IFLA_VF_PORTS        = 0x18
+	IFLA_PORT_SELF       = 0x19
+	IFLA_AF_SPEC         = 0x1a
+	IFLA_GROUP           = 0x1b
+	IFLA_NET_NS_FD       = 0x1c
+	IFLA_EXT_MASK        = 0x1d
+	IFLA_PROMISCUITY     = 0x1e
+	IFLA_NUM_TX_QUEUES   = 0x1f
+	IFLA_NUM_RX_QUEUES   = 0x20
+	IFLA_CARRIER         = 0x21
+	IFLA_PHYS_PORT_ID    = 0x22
+	IFLA_CARRIER_CHANGES = 0x23
+	IFLA_PHYS_SWITCH_ID  = 0x24
+	IFLA_LINK_NETNSID    = 0x25
+	IFLA_PHYS_PORT_NAME  = 0x26
+	IFLA_PROTO_DOWN      = 0x27
+	IFLA_GSO_MAX_SEGS    = 0x28
+	IFLA_GSO_MAX_SIZE    = 0x29
+	IFLA_PAD             = 0x2a
+	IFLA_XDP             = 0x2b
+	IFLA_EVENT           = 0x2c
+	IFLA_NEW_NETNSID     = 0x2d
+	IFLA_IF_NETNSID      = 0x2e
+	IFLA_MAX             = 0x31
+	RT_SCOPE_UNIVERSE    = 0x0
+	RT_SCOPE_SITE        = 0xc8
+	RT_SCOPE_LINK        = 0xfd
+	RT_SCOPE_HOST        = 0xfe
+	RT_SCOPE_NOWHERE     = 0xff
+	RT_TABLE_UNSPEC      = 0x0
+	RT_TABLE_COMPAT      = 0xfc
+	RT_TABLE_DEFAULT     = 0xfd
+	RT_TABLE_MAIN        = 0xfe
+	RT_TABLE_LOCAL       = 0xff
+	RT_TABLE_MAX         = 0xffffffff
+	RTA_UNSPEC           = 0x0
+	RTA_DST              = 0x1
+	RTA_SRC              = 0x2
+	RTA_IIF              = 0x3
+	RTA_OIF              = 0x4
+	RTA_GATEWAY          = 0x5
+	RTA_PRIORITY         = 0x6
+	RTA_PREFSRC          = 0x7
+	RTA_METRICS          = 0x8
+	RTA_MULTIPATH        = 0x9
+	RTA_FLOW             = 0xb
+	RTA_CACHEINFO        = 0xc
+	RTA_TABLE            = 0xf
+	RTN_UNSPEC           = 0x0
+	RTN_UNICAST          = 0x1
+	RTN_LOCAL            = 0x2
+	RTN_BROADCAST        = 0x3
+	RTN_ANYCAST          = 0x4
+	RTN_MULTICAST        = 0x5
+	RTN_BLACKHOLE        = 0x6
+	RTN_UNREACHABLE      = 0x7
+	RTN_PROHIBIT         = 0x8
+	RTN_THROW            = 0x9
+	RTN_NAT              = 0xa
+	RTN_XRESOLVE         = 0xb
+	RTNLGRP_NONE         = 0x0
+	RTNLGRP_LINK         = 0x1
+	RTNLGRP_NOTIFY       = 0x2
+	RTNLGRP_NEIGH        = 0x3
+	RTNLGRP_TC           = 0x4
+	RTNLGRP_IPV4_IFADDR  = 0x5
+	RTNLGRP_IPV4_MROUTE  = 0x6
+	RTNLGRP_IPV4_ROUTE   = 0x7
+	RTNLGRP_IPV4_RULE    = 0x8
+	RTNLGRP_IPV6_IFADDR  = 0x9
+	RTNLGRP_IPV6_MROUTE  = 0xa
+	RTNLGRP_IPV6_ROUTE   = 0xb
+	RTNLGRP_IPV6_IFINFO  = 0xc
+	RTNLGRP_IPV6_PREFIX  = 0x12
+	RTNLGRP_IPV6_RULE    = 0x13
+	RTNLGRP_ND_USEROPT   = 0x14
+	SizeofNlMsghdr       = 0x10
+	SizeofNlMsgerr       = 0x14
+	SizeofRtGenmsg       = 0x1
+	SizeofNlAttr         = 0x4
+	SizeofRtAttr         = 0x4
+	SizeofIfInfomsg      = 0x10
+	SizeofIfAddrmsg      = 0x8
+	SizeofRtMsg          = 0xc
+	SizeofRtNexthop      = 0x8
 )
 
 type NlMsghdr struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
index a9087c52..2860b3e2 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
@@ -439,97 +439,123 @@ const (
 )
 
 const (
-	IFA_UNSPEC          = 0x0
-	IFA_ADDRESS         = 0x1
-	IFA_LOCAL           = 0x2
-	IFA_LABEL           = 0x3
-	IFA_BROADCAST       = 0x4
-	IFA_ANYCAST         = 0x5
-	IFA_CACHEINFO       = 0x6
-	IFA_MULTICAST       = 0x7
-	IFLA_UNSPEC         = 0x0
-	IFLA_ADDRESS        = 0x1
-	IFLA_BROADCAST      = 0x2
-	IFLA_IFNAME         = 0x3
-	IFLA_MTU            = 0x4
-	IFLA_LINK           = 0x5
-	IFLA_QDISC          = 0x6
-	IFLA_STATS          = 0x7
-	IFLA_COST           = 0x8
-	IFLA_PRIORITY       = 0x9
-	IFLA_MASTER         = 0xa
-	IFLA_WIRELESS       = 0xb
-	IFLA_PROTINFO       = 0xc
-	IFLA_TXQLEN         = 0xd
-	IFLA_MAP            = 0xe
-	IFLA_WEIGHT         = 0xf
-	IFLA_OPERSTATE      = 0x10
-	IFLA_LINKMODE       = 0x11
-	IFLA_LINKINFO       = 0x12
-	IFLA_NET_NS_PID     = 0x13
-	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2c
-	RT_SCOPE_UNIVERSE   = 0x0
-	RT_SCOPE_SITE       = 0xc8
-	RT_SCOPE_LINK       = 0xfd
-	RT_SCOPE_HOST       = 0xfe
-	RT_SCOPE_NOWHERE    = 0xff
-	RT_TABLE_UNSPEC     = 0x0
-	RT_TABLE_COMPAT     = 0xfc
-	RT_TABLE_DEFAULT    = 0xfd
-	RT_TABLE_MAIN       = 0xfe
-	RT_TABLE_LOCAL      = 0xff
-	RT_TABLE_MAX        = 0xffffffff
-	RTA_UNSPEC          = 0x0
-	RTA_DST             = 0x1
-	RTA_SRC             = 0x2
-	RTA_IIF             = 0x3
-	RTA_OIF             = 0x4
-	RTA_GATEWAY         = 0x5
-	RTA_PRIORITY        = 0x6
-	RTA_PREFSRC         = 0x7
-	RTA_METRICS         = 0x8
-	RTA_MULTIPATH       = 0x9
-	RTA_FLOW            = 0xb
-	RTA_CACHEINFO       = 0xc
-	RTA_TABLE           = 0xf
-	RTN_UNSPEC          = 0x0
-	RTN_UNICAST         = 0x1
-	RTN_LOCAL           = 0x2
-	RTN_BROADCAST       = 0x3
-	RTN_ANYCAST         = 0x4
-	RTN_MULTICAST       = 0x5
-	RTN_BLACKHOLE       = 0x6
-	RTN_UNREACHABLE     = 0x7
-	RTN_PROHIBIT        = 0x8
-	RTN_THROW           = 0x9
-	RTN_NAT             = 0xa
-	RTN_XRESOLVE        = 0xb
-	RTNLGRP_NONE        = 0x0
-	RTNLGRP_LINK        = 0x1
-	RTNLGRP_NOTIFY      = 0x2
-	RTNLGRP_NEIGH       = 0x3
-	RTNLGRP_TC          = 0x4
-	RTNLGRP_IPV4_IFADDR = 0x5
-	RTNLGRP_IPV4_MROUTE = 0x6
-	RTNLGRP_IPV4_ROUTE  = 0x7
-	RTNLGRP_IPV4_RULE   = 0x8
-	RTNLGRP_IPV6_IFADDR = 0x9
-	RTNLGRP_IPV6_MROUTE = 0xa
-	RTNLGRP_IPV6_ROUTE  = 0xb
-	RTNLGRP_IPV6_IFINFO = 0xc
-	RTNLGRP_IPV6_PREFIX = 0x12
-	RTNLGRP_IPV6_RULE   = 0x13
-	RTNLGRP_ND_USEROPT  = 0x14
-	SizeofNlMsghdr      = 0x10
-	SizeofNlMsgerr      = 0x14
-	SizeofRtGenmsg      = 0x1
-	SizeofNlAttr        = 0x4
-	SizeofRtAttr        = 0x4
-	SizeofIfInfomsg     = 0x10
-	SizeofIfAddrmsg     = 0x8
-	SizeofRtMsg         = 0xc
-	SizeofRtNexthop     = 0x8
+	IFA_UNSPEC           = 0x0
+	IFA_ADDRESS          = 0x1
+	IFA_LOCAL            = 0x2
+	IFA_LABEL            = 0x3
+	IFA_BROADCAST        = 0x4
+	IFA_ANYCAST          = 0x5
+	IFA_CACHEINFO        = 0x6
+	IFA_MULTICAST        = 0x7
+	IFLA_UNSPEC          = 0x0
+	IFLA_ADDRESS         = 0x1
+	IFLA_BROADCAST       = 0x2
+	IFLA_IFNAME          = 0x3
+	IFLA_MTU             = 0x4
+	IFLA_LINK            = 0x5
+	IFLA_QDISC           = 0x6
+	IFLA_STATS           = 0x7
+	IFLA_COST            = 0x8
+	IFLA_PRIORITY        = 0x9
+	IFLA_MASTER          = 0xa
+	IFLA_WIRELESS        = 0xb
+	IFLA_PROTINFO        = 0xc
+	IFLA_TXQLEN          = 0xd
+	IFLA_MAP             = 0xe
+	IFLA_WEIGHT          = 0xf
+	IFLA_OPERSTATE       = 0x10
+	IFLA_LINKMODE        = 0x11
+	IFLA_LINKINFO        = 0x12
+	IFLA_NET_NS_PID      = 0x13
+	IFLA_IFALIAS         = 0x14
+	IFLA_NUM_VF          = 0x15
+	IFLA_VFINFO_LIST     = 0x16
+	IFLA_STATS64         = 0x17
+	IFLA_VF_PORTS        = 0x18
+	IFLA_PORT_SELF       = 0x19
+	IFLA_AF_SPEC         = 0x1a
+	IFLA_GROUP           = 0x1b
+	IFLA_NET_NS_FD       = 0x1c
+	IFLA_EXT_MASK        = 0x1d
+	IFLA_PROMISCUITY     = 0x1e
+	IFLA_NUM_TX_QUEUES   = 0x1f
+	IFLA_NUM_RX_QUEUES   = 0x20
+	IFLA_CARRIER         = 0x21
+	IFLA_PHYS_PORT_ID    = 0x22
+	IFLA_CARRIER_CHANGES = 0x23
+	IFLA_PHYS_SWITCH_ID  = 0x24
+	IFLA_LINK_NETNSID    = 0x25
+	IFLA_PHYS_PORT_NAME  = 0x26
+	IFLA_PROTO_DOWN      = 0x27
+	IFLA_GSO_MAX_SEGS    = 0x28
+	IFLA_GSO_MAX_SIZE    = 0x29
+	IFLA_PAD             = 0x2a
+	IFLA_XDP             = 0x2b
+	IFLA_EVENT           = 0x2c
+	IFLA_NEW_NETNSID     = 0x2d
+	IFLA_IF_NETNSID      = 0x2e
+	IFLA_MAX             = 0x31
+	RT_SCOPE_UNIVERSE    = 0x0
+	RT_SCOPE_SITE        = 0xc8
+	RT_SCOPE_LINK        = 0xfd
+	RT_SCOPE_HOST        = 0xfe
+	RT_SCOPE_NOWHERE     = 0xff
+	RT_TABLE_UNSPEC      = 0x0
+	RT_TABLE_COMPAT      = 0xfc
+	RT_TABLE_DEFAULT     = 0xfd
+	RT_TABLE_MAIN        = 0xfe
+	RT_TABLE_LOCAL       = 0xff
+	RT_TABLE_MAX         = 0xffffffff
+	RTA_UNSPEC           = 0x0
+	RTA_DST              = 0x1
+	RTA_SRC              = 0x2
+	RTA_IIF              = 0x3
+	RTA_OIF              = 0x4
+	RTA_GATEWAY          = 0x5
+	RTA_PRIORITY         = 0x6
+	RTA_PREFSRC          = 0x7
+	RTA_METRICS          = 0x8
+	RTA_MULTIPATH        = 0x9
+	RTA_FLOW             = 0xb
+	RTA_CACHEINFO        = 0xc
+	RTA_TABLE            = 0xf
+	RTN_UNSPEC           = 0x0
+	RTN_UNICAST          = 0x1
+	RTN_LOCAL            = 0x2
+	RTN_BROADCAST        = 0x3
+	RTN_ANYCAST          = 0x4
+	RTN_MULTICAST        = 0x5
+	RTN_BLACKHOLE        = 0x6
+	RTN_UNREACHABLE      = 0x7
+	RTN_PROHIBIT         = 0x8
+	RTN_THROW            = 0x9
+	RTN_NAT              = 0xa
+	RTN_XRESOLVE         = 0xb
+	RTNLGRP_NONE         = 0x0
+	RTNLGRP_LINK         = 0x1
+	RTNLGRP_NOTIFY       = 0x2
+	RTNLGRP_NEIGH        = 0x3
+	RTNLGRP_TC           = 0x4
+	RTNLGRP_IPV4_IFADDR  = 0x5
+	RTNLGRP_IPV4_MROUTE  = 0x6
+	RTNLGRP_IPV4_ROUTE   = 0x7
+	RTNLGRP_IPV4_RULE    = 0x8
+	RTNLGRP_IPV6_IFADDR  = 0x9
+	RTNLGRP_IPV6_MROUTE  = 0xa
+	RTNLGRP_IPV6_ROUTE   = 0xb
+	RTNLGRP_IPV6_IFINFO  = 0xc
+	RTNLGRP_IPV6_PREFIX  = 0x12
+	RTNLGRP_IPV6_RULE    = 0x13
+	RTNLGRP_ND_USEROPT   = 0x14
+	SizeofNlMsghdr       = 0x10
+	SizeofNlMsgerr       = 0x14
+	SizeofRtGenmsg       = 0x1
+	SizeofNlAttr         = 0x4
+	SizeofRtAttr         = 0x4
+	SizeofIfInfomsg      = 0x10
+	SizeofIfAddrmsg      = 0x8
+	SizeofRtMsg          = 0xc
+	SizeofRtNexthop      = 0x8
 )
 
 type NlMsghdr struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
index 01e8f65c..17b881ca 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -441,97 +441,123 @@ const (
 )
 
 const (
-	IFA_UNSPEC          = 0x0
-	IFA_ADDRESS         = 0x1
-	IFA_LOCAL           = 0x2
-	IFA_LABEL           = 0x3
-	IFA_BROADCAST       = 0x4
-	IFA_ANYCAST         = 0x5
-	IFA_CACHEINFO       = 0x6
-	IFA_MULTICAST       = 0x7
-	IFLA_UNSPEC         = 0x0
-	IFLA_ADDRESS        = 0x1
-	IFLA_BROADCAST      = 0x2
-	IFLA_IFNAME         = 0x3
-	IFLA_MTU            = 0x4
-	IFLA_LINK           = 0x5
-	IFLA_QDISC          = 0x6
-	IFLA_STATS          = 0x7
-	IFLA_COST           = 0x8
-	IFLA_PRIORITY       = 0x9
-	IFLA_MASTER         = 0xa
-	IFLA_WIRELESS       = 0xb
-	IFLA_PROTINFO       = 0xc
-	IFLA_TXQLEN         = 0xd
-	IFLA_MAP            = 0xe
-	IFLA_WEIGHT         = 0xf
-	IFLA_OPERSTATE      = 0x10
-	IFLA_LINKMODE       = 0x11
-	IFLA_LINKINFO       = 0x12
-	IFLA_NET_NS_PID     = 0x13
-	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2c
-	RT_SCOPE_UNIVERSE   = 0x0
-	RT_SCOPE_SITE       = 0xc8
-	RT_SCOPE_LINK       = 0xfd
-	RT_SCOPE_HOST       = 0xfe
-	RT_SCOPE_NOWHERE    = 0xff
-	RT_TABLE_UNSPEC     = 0x0
-	RT_TABLE_COMPAT     = 0xfc
-	RT_TABLE_DEFAULT    = 0xfd
-	RT_TABLE_MAIN       = 0xfe
-	RT_TABLE_LOCAL      = 0xff
-	RT_TABLE_MAX        = 0xffffffff
-	RTA_UNSPEC          = 0x0
-	RTA_DST             = 0x1
-	RTA_SRC             = 0x2
-	RTA_IIF             = 0x3
-	RTA_OIF             = 0x4
-	RTA_GATEWAY         = 0x5
-	RTA_PRIORITY        = 0x6
-	RTA_PREFSRC         = 0x7
-	RTA_METRICS         = 0x8
-	RTA_MULTIPATH       = 0x9
-	RTA_FLOW            = 0xb
-	RTA_CACHEINFO       = 0xc
-	RTA_TABLE           = 0xf
-	RTN_UNSPEC          = 0x0
-	RTN_UNICAST         = 0x1
-	RTN_LOCAL           = 0x2
-	RTN_BROADCAST       = 0x3
-	RTN_ANYCAST         = 0x4
-	RTN_MULTICAST       = 0x5
-	RTN_BLACKHOLE       = 0x6
-	RTN_UNREACHABLE     = 0x7
-	RTN_PROHIBIT        = 0x8
-	RTN_THROW           = 0x9
-	RTN_NAT             = 0xa
-	RTN_XRESOLVE        = 0xb
-	RTNLGRP_NONE        = 0x0
-	RTNLGRP_LINK        = 0x1
-	RTNLGRP_NOTIFY      = 0x2
-	RTNLGRP_NEIGH       = 0x3
-	RTNLGRP_TC          = 0x4
-	RTNLGRP_IPV4_IFADDR = 0x5
-	RTNLGRP_IPV4_MROUTE = 0x6
-	RTNLGRP_IPV4_ROUTE  = 0x7
-	RTNLGRP_IPV4_RULE   = 0x8
-	RTNLGRP_IPV6_IFADDR = 0x9
-	RTNLGRP_IPV6_MROUTE = 0xa
-	RTNLGRP_IPV6_ROUTE  = 0xb
-	RTNLGRP_IPV6_IFINFO = 0xc
-	RTNLGRP_IPV6_PREFIX = 0x12
-	RTNLGRP_IPV6_RULE   = 0x13
-	RTNLGRP_ND_USEROPT  = 0x14
-	SizeofNlMsghdr      = 0x10
-	SizeofNlMsgerr      = 0x14
-	SizeofRtGenmsg      = 0x1
-	SizeofNlAttr        = 0x4
-	SizeofRtAttr        = 0x4
-	SizeofIfInfomsg     = 0x10
-	SizeofIfAddrmsg     = 0x8
-	SizeofRtMsg         = 0xc
-	SizeofRtNexthop     = 0x8
+	IFA_UNSPEC           = 0x0
+	IFA_ADDRESS          = 0x1
+	IFA_LOCAL            = 0x2
+	IFA_LABEL            = 0x3
+	IFA_BROADCAST        = 0x4
+	IFA_ANYCAST          = 0x5
+	IFA_CACHEINFO        = 0x6
+	IFA_MULTICAST        = 0x7
+	IFLA_UNSPEC          = 0x0
+	IFLA_ADDRESS         = 0x1
+	IFLA_BROADCAST       = 0x2
+	IFLA_IFNAME          = 0x3
+	IFLA_MTU             = 0x4
+	IFLA_LINK            = 0x5
+	IFLA_QDISC           = 0x6
+	IFLA_STATS           = 0x7
+	IFLA_COST            = 0x8
+	IFLA_PRIORITY        = 0x9
+	IFLA_MASTER          = 0xa
+	IFLA_WIRELESS        = 0xb
+	IFLA_PROTINFO        = 0xc
+	IFLA_TXQLEN          = 0xd
+	IFLA_MAP             = 0xe
+	IFLA_WEIGHT          = 0xf
+	IFLA_OPERSTATE       = 0x10
+	IFLA_LINKMODE        = 0x11
+	IFLA_LINKINFO        = 0x12
+	IFLA_NET_NS_PID      = 0x13
+	IFLA_IFALIAS         = 0x14
+	IFLA_NUM_VF          = 0x15
+	IFLA_VFINFO_LIST     = 0x16
+	IFLA_STATS64         = 0x17
+	IFLA_VF_PORTS        = 0x18
+	IFLA_PORT_SELF       = 0x19
+	IFLA_AF_SPEC         = 0x1a
+	IFLA_GROUP           = 0x1b
+	IFLA_NET_NS_FD       = 0x1c
+	IFLA_EXT_MASK        = 0x1d
+	IFLA_PROMISCUITY     = 0x1e
+	IFLA_NUM_TX_QUEUES   = 0x1f
+	IFLA_NUM_RX_QUEUES   = 0x20
+	IFLA_CARRIER         = 0x21
+	IFLA_PHYS_PORT_ID    = 0x22
+	IFLA_CARRIER_CHANGES = 0x23
+	IFLA_PHYS_SWITCH_ID  = 0x24
+	IFLA_LINK_NETNSID    = 0x25
+	IFLA_PHYS_PORT_NAME  = 0x26
+	IFLA_PROTO_DOWN      = 0x27
+	IFLA_GSO_MAX_SEGS    = 0x28
+	IFLA_GSO_MAX_SIZE    = 0x29
+	IFLA_PAD             = 0x2a
+	IFLA_XDP             = 0x2b
+	IFLA_EVENT           = 0x2c
+	IFLA_NEW_NETNSID     = 0x2d
+	IFLA_IF_NETNSID      = 0x2e
+	IFLA_MAX             = 0x31
+	RT_SCOPE_UNIVERSE    = 0x0
+	RT_SCOPE_SITE        = 0xc8
+	RT_SCOPE_LINK        = 0xfd
+	RT_SCOPE_HOST        = 0xfe
+	RT_SCOPE_NOWHERE     = 0xff
+	RT_TABLE_UNSPEC      = 0x0
+	RT_TABLE_COMPAT      = 0xfc
+	RT_TABLE_DEFAULT     = 0xfd
+	RT_TABLE_MAIN        = 0xfe
+	RT_TABLE_LOCAL       = 0xff
+	RT_TABLE_MAX         = 0xffffffff
+	RTA_UNSPEC           = 0x0
+	RTA_DST              = 0x1
+	RTA_SRC              = 0x2
+	RTA_IIF              = 0x3
+	RTA_OIF              = 0x4
+	RTA_GATEWAY          = 0x5
+	RTA_PRIORITY         = 0x6
+	RTA_PREFSRC          = 0x7
+	RTA_METRICS          = 0x8
+	RTA_MULTIPATH        = 0x9
+	RTA_FLOW             = 0xb
+	RTA_CACHEINFO        = 0xc
+	RTA_TABLE            = 0xf
+	RTN_UNSPEC           = 0x0
+	RTN_UNICAST          = 0x1
+	RTN_LOCAL            = 0x2
+	RTN_BROADCAST        = 0x3
+	RTN_ANYCAST          = 0x4
+	RTN_MULTICAST        = 0x5
+	RTN_BLACKHOLE        = 0x6
+	RTN_UNREACHABLE      = 0x7
+	RTN_PROHIBIT         = 0x8
+	RTN_THROW            = 0x9
+	RTN_NAT              = 0xa
+	RTN_XRESOLVE         = 0xb
+	RTNLGRP_NONE         = 0x0
+	RTNLGRP_LINK         = 0x1
+	RTNLGRP_NOTIFY       = 0x2
+	RTNLGRP_NEIGH        = 0x3
+	RTNLGRP_TC           = 0x4
+	RTNLGRP_IPV4_IFADDR  = 0x5
+	RTNLGRP_IPV4_MROUTE  = 0x6
+	RTNLGRP_IPV4_ROUTE   = 0x7
+	RTNLGRP_IPV4_RULE    = 0x8
+	RTNLGRP_IPV6_IFADDR  = 0x9
+	RTNLGRP_IPV6_MROUTE  = 0xa
+	RTNLGRP_IPV6_ROUTE   = 0xb
+	RTNLGRP_IPV6_IFINFO  = 0xc
+	RTNLGRP_IPV6_PREFIX  = 0x12
+	RTNLGRP_IPV6_RULE    = 0x13
+	RTNLGRP_ND_USEROPT   = 0x14
+	SizeofNlMsghdr       = 0x10
+	SizeofNlMsgerr       = 0x14
+	SizeofRtGenmsg       = 0x1
+	SizeofNlAttr         = 0x4
+	SizeofRtAttr         = 0x4
+	SizeofIfInfomsg      = 0x10
+	SizeofIfAddrmsg      = 0x8
+	SizeofRtMsg          = 0xc
+	SizeofRtNexthop      = 0x8
 )
 
 type NlMsghdr struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
index 6f9452d8..ec802dc8 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -441,97 +441,123 @@ const (
 )
 
 const (
-	IFA_UNSPEC          = 0x0
-	IFA_ADDRESS         = 0x1
-	IFA_LOCAL           = 0x2
-	IFA_LABEL           = 0x3
-	IFA_BROADCAST       = 0x4
-	IFA_ANYCAST         = 0x5
-	IFA_CACHEINFO       = 0x6
-	IFA_MULTICAST       = 0x7
-	IFLA_UNSPEC         = 0x0
-	IFLA_ADDRESS        = 0x1
-	IFLA_BROADCAST      = 0x2
-	IFLA_IFNAME         = 0x3
-	IFLA_MTU            = 0x4
-	IFLA_LINK           = 0x5
-	IFLA_QDISC          = 0x6
-	IFLA_STATS          = 0x7
-	IFLA_COST           = 0x8
-	IFLA_PRIORITY       = 0x9
-	IFLA_MASTER         = 0xa
-	IFLA_WIRELESS       = 0xb
-	IFLA_PROTINFO       = 0xc
-	IFLA_TXQLEN         = 0xd
-	IFLA_MAP            = 0xe
-	IFLA_WEIGHT         = 0xf
-	IFLA_OPERSTATE      = 0x10
-	IFLA_LINKMODE       = 0x11
-	IFLA_LINKINFO       = 0x12
-	IFLA_NET_NS_PID     = 0x13
-	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2c
-	RT_SCOPE_UNIVERSE   = 0x0
-	RT_SCOPE_SITE       = 0xc8
-	RT_SCOPE_LINK       = 0xfd
-	RT_SCOPE_HOST       = 0xfe
-	RT_SCOPE_NOWHERE    = 0xff
-	RT_TABLE_UNSPEC     = 0x0
-	RT_TABLE_COMPAT     = 0xfc
-	RT_TABLE_DEFAULT    = 0xfd
-	RT_TABLE_MAIN       = 0xfe
-	RT_TABLE_LOCAL      = 0xff
-	RT_TABLE_MAX        = 0xffffffff
-	RTA_UNSPEC          = 0x0
-	RTA_DST             = 0x1
-	RTA_SRC             = 0x2
-	RTA_IIF             = 0x3
-	RTA_OIF             = 0x4
-	RTA_GATEWAY         = 0x5
-	RTA_PRIORITY        = 0x6
-	RTA_PREFSRC         = 0x7
-	RTA_METRICS         = 0x8
-	RTA_MULTIPATH       = 0x9
-	RTA_FLOW            = 0xb
-	RTA_CACHEINFO       = 0xc
-	RTA_TABLE           = 0xf
-	RTN_UNSPEC          = 0x0
-	RTN_UNICAST         = 0x1
-	RTN_LOCAL           = 0x2
-	RTN_BROADCAST       = 0x3
-	RTN_ANYCAST         = 0x4
-	RTN_MULTICAST       = 0x5
-	RTN_BLACKHOLE       = 0x6
-	RTN_UNREACHABLE     = 0x7
-	RTN_PROHIBIT        = 0x8
-	RTN_THROW           = 0x9
-	RTN_NAT             = 0xa
-	RTN_XRESOLVE        = 0xb
-	RTNLGRP_NONE        = 0x0
-	RTNLGRP_LINK        = 0x1
-	RTNLGRP_NOTIFY      = 0x2
-	RTNLGRP_NEIGH       = 0x3
-	RTNLGRP_TC          = 0x4
-	RTNLGRP_IPV4_IFADDR = 0x5
-	RTNLGRP_IPV4_MROUTE = 0x6
-	RTNLGRP_IPV4_ROUTE  = 0x7
-	RTNLGRP_IPV4_RULE   = 0x8
-	RTNLGRP_IPV6_IFADDR = 0x9
-	RTNLGRP_IPV6_MROUTE = 0xa
-	RTNLGRP_IPV6_ROUTE  = 0xb
-	RTNLGRP_IPV6_IFINFO = 0xc
-	RTNLGRP_IPV6_PREFIX = 0x12
-	RTNLGRP_IPV6_RULE   = 0x13
-	RTNLGRP_ND_USEROPT  = 0x14
-	SizeofNlMsghdr      = 0x10
-	SizeofNlMsgerr      = 0x14
-	SizeofRtGenmsg      = 0x1
-	SizeofNlAttr        = 0x4
-	SizeofRtAttr        = 0x4
-	SizeofIfInfomsg     = 0x10
-	SizeofIfAddrmsg     = 0x8
-	SizeofRtMsg         = 0xc
-	SizeofRtNexthop     = 0x8
+	IFA_UNSPEC           = 0x0
+	IFA_ADDRESS          = 0x1
+	IFA_LOCAL            = 0x2
+	IFA_LABEL            = 0x3
+	IFA_BROADCAST        = 0x4
+	IFA_ANYCAST          = 0x5
+	IFA_CACHEINFO        = 0x6
+	IFA_MULTICAST        = 0x7
+	IFLA_UNSPEC          = 0x0
+	IFLA_ADDRESS         = 0x1
+	IFLA_BROADCAST       = 0x2
+	IFLA_IFNAME          = 0x3
+	IFLA_MTU             = 0x4
+	IFLA_LINK            = 0x5
+	IFLA_QDISC           = 0x6
+	IFLA_STATS           = 0x7
+	IFLA_COST            = 0x8
+	IFLA_PRIORITY        = 0x9
+	IFLA_MASTER          = 0xa
+	IFLA_WIRELESS        = 0xb
+	IFLA_PROTINFO        = 0xc
+	IFLA_TXQLEN          = 0xd
+	IFLA_MAP             = 0xe
+	IFLA_WEIGHT          = 0xf
+	IFLA_OPERSTATE       = 0x10
+	IFLA_LINKMODE        = 0x11
+	IFLA_LINKINFO        = 0x12
+	IFLA_NET_NS_PID      = 0x13
+	IFLA_IFALIAS         = 0x14
+	IFLA_NUM_VF          = 0x15
+	IFLA_VFINFO_LIST     = 0x16
+	IFLA_STATS64         = 0x17
+	IFLA_VF_PORTS        = 0x18
+	IFLA_PORT_SELF       = 0x19
+	IFLA_AF_SPEC         = 0x1a
+	IFLA_GROUP           = 0x1b
+	IFLA_NET_NS_FD       = 0x1c
+	IFLA_EXT_MASK        = 0x1d
+	IFLA_PROMISCUITY     = 0x1e
+	IFLA_NUM_TX_QUEUES   = 0x1f
+	IFLA_NUM_RX_QUEUES   = 0x20
+	IFLA_CARRIER         = 0x21
+	IFLA_PHYS_PORT_ID    = 0x22
+	IFLA_CARRIER_CHANGES = 0x23
+	IFLA_PHYS_SWITCH_ID  = 0x24
+	IFLA_LINK_NETNSID    = 0x25
+	IFLA_PHYS_PORT_NAME  = 0x26
+	IFLA_PROTO_DOWN      = 0x27
+	IFLA_GSO_MAX_SEGS    = 0x28
+	IFLA_GSO_MAX_SIZE    = 0x29
+	IFLA_PAD             = 0x2a
+	IFLA_XDP             = 0x2b
+	IFLA_EVENT           = 0x2c
+	IFLA_NEW_NETNSID     = 0x2d
+	IFLA_IF_NETNSID      = 0x2e
+	IFLA_MAX             = 0x31
+	RT_SCOPE_UNIVERSE    = 0x0
+	RT_SCOPE_SITE        = 0xc8
+	RT_SCOPE_LINK        = 0xfd
+	RT_SCOPE_HOST        = 0xfe
+	RT_SCOPE_NOWHERE     = 0xff
+	RT_TABLE_UNSPEC      = 0x0
+	RT_TABLE_COMPAT      = 0xfc
+	RT_TABLE_DEFAULT     = 0xfd
+	RT_TABLE_MAIN        = 0xfe
+	RT_TABLE_LOCAL       = 0xff
+	RT_TABLE_MAX         = 0xffffffff
+	RTA_UNSPEC           = 0x0
+	RTA_DST              = 0x1
+	RTA_SRC              = 0x2
+	RTA_IIF              = 0x3
+	RTA_OIF              = 0x4
+	RTA_GATEWAY          = 0x5
+	RTA_PRIORITY         = 0x6
+	RTA_PREFSRC          = 0x7
+	RTA_METRICS          = 0x8
+	RTA_MULTIPATH        = 0x9
+	RTA_FLOW             = 0xb
+	RTA_CACHEINFO        = 0xc
+	RTA_TABLE            = 0xf
+	RTN_UNSPEC           = 0x0
+	RTN_UNICAST          = 0x1
+	RTN_LOCAL            = 0x2
+	RTN_BROADCAST        = 0x3
+	RTN_ANYCAST          = 0x4
+	RTN_MULTICAST        = 0x5
+	RTN_BLACKHOLE        = 0x6
+	RTN_UNREACHABLE      = 0x7
+	RTN_PROHIBIT         = 0x8
+	RTN_THROW            = 0x9
+	RTN_NAT              = 0xa
+	RTN_XRESOLVE         = 0xb
+	RTNLGRP_NONE         = 0x0
+	RTNLGRP_LINK         = 0x1
+	RTNLGRP_NOTIFY       = 0x2
+	RTNLGRP_NEIGH        = 0x3
+	RTNLGRP_TC           = 0x4
+	RTNLGRP_IPV4_IFADDR  = 0x5
+	RTNLGRP_IPV4_MROUTE  = 0x6
+	RTNLGRP_IPV4_ROUTE   = 0x7
+	RTNLGRP_IPV4_RULE    = 0x8
+	RTNLGRP_IPV6_IFADDR  = 0x9
+	RTNLGRP_IPV6_MROUTE  = 0xa
+	RTNLGRP_IPV6_ROUTE   = 0xb
+	RTNLGRP_IPV6_IFINFO  = 0xc
+	RTNLGRP_IPV6_PREFIX  = 0x12
+	RTNLGRP_IPV6_RULE    = 0x13
+	RTNLGRP_ND_USEROPT   = 0x14
+	SizeofNlMsghdr       = 0x10
+	SizeofNlMsgerr       = 0x14
+	SizeofRtGenmsg       = 0x1
+	SizeofNlAttr         = 0x4
+	SizeofRtAttr         = 0x4
+	SizeofIfInfomsg      = 0x10
+	SizeofIfAddrmsg      = 0x8
+	SizeofRtMsg          = 0xc
+	SizeofRtNexthop      = 0x8
 )
 
 type NlMsghdr struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
index 6de721f7..4a996485 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
@@ -439,97 +439,123 @@ const (
 )
 
 const (
-	IFA_UNSPEC          = 0x0
-	IFA_ADDRESS         = 0x1
-	IFA_LOCAL           = 0x2
-	IFA_LABEL           = 0x3
-	IFA_BROADCAST       = 0x4
-	IFA_ANYCAST         = 0x5
-	IFA_CACHEINFO       = 0x6
-	IFA_MULTICAST       = 0x7
-	IFLA_UNSPEC         = 0x0
-	IFLA_ADDRESS        = 0x1
-	IFLA_BROADCAST      = 0x2
-	IFLA_IFNAME         = 0x3
-	IFLA_MTU            = 0x4
-	IFLA_LINK           = 0x5
-	IFLA_QDISC          = 0x6
-	IFLA_STATS          = 0x7
-	IFLA_COST           = 0x8
-	IFLA_PRIORITY       = 0x9
-	IFLA_MASTER         = 0xa
-	IFLA_WIRELESS       = 0xb
-	IFLA_PROTINFO       = 0xc
-	IFLA_TXQLEN         = 0xd
-	IFLA_MAP            = 0xe
-	IFLA_WEIGHT         = 0xf
-	IFLA_OPERSTATE      = 0x10
-	IFLA_LINKMODE       = 0x11
-	IFLA_LINKINFO       = 0x12
-	IFLA_NET_NS_PID     = 0x13
-	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2c
-	RT_SCOPE_UNIVERSE   = 0x0
-	RT_SCOPE_SITE       = 0xc8
-	RT_SCOPE_LINK       = 0xfd
-	RT_SCOPE_HOST       = 0xfe
-	RT_SCOPE_NOWHERE    = 0xff
-	RT_TABLE_UNSPEC     = 0x0
-	RT_TABLE_COMPAT     = 0xfc
-	RT_TABLE_DEFAULT    = 0xfd
-	RT_TABLE_MAIN       = 0xfe
-	RT_TABLE_LOCAL      = 0xff
-	RT_TABLE_MAX        = 0xffffffff
-	RTA_UNSPEC          = 0x0
-	RTA_DST             = 0x1
-	RTA_SRC             = 0x2
-	RTA_IIF             = 0x3
-	RTA_OIF             = 0x4
-	RTA_GATEWAY         = 0x5
-	RTA_PRIORITY        = 0x6
-	RTA_PREFSRC         = 0x7
-	RTA_METRICS         = 0x8
-	RTA_MULTIPATH       = 0x9
-	RTA_FLOW            = 0xb
-	RTA_CACHEINFO       = 0xc
-	RTA_TABLE           = 0xf
-	RTN_UNSPEC          = 0x0
-	RTN_UNICAST         = 0x1
-	RTN_LOCAL           = 0x2
-	RTN_BROADCAST       = 0x3
-	RTN_ANYCAST         = 0x4
-	RTN_MULTICAST       = 0x5
-	RTN_BLACKHOLE       = 0x6
-	RTN_UNREACHABLE     = 0x7
-	RTN_PROHIBIT        = 0x8
-	RTN_THROW           = 0x9
-	RTN_NAT             = 0xa
-	RTN_XRESOLVE        = 0xb
-	RTNLGRP_NONE        = 0x0
-	RTNLGRP_LINK        = 0x1
-	RTNLGRP_NOTIFY      = 0x2
-	RTNLGRP_NEIGH       = 0x3
-	RTNLGRP_TC          = 0x4
-	RTNLGRP_IPV4_IFADDR = 0x5
-	RTNLGRP_IPV4_MROUTE = 0x6
-	RTNLGRP_IPV4_ROUTE  = 0x7
-	RTNLGRP_IPV4_RULE   = 0x8
-	RTNLGRP_IPV6_IFADDR = 0x9
-	RTNLGRP_IPV6_MROUTE = 0xa
-	RTNLGRP_IPV6_ROUTE  = 0xb
-	RTNLGRP_IPV6_IFINFO = 0xc
-	RTNLGRP_IPV6_PREFIX = 0x12
-	RTNLGRP_IPV6_RULE   = 0x13
-	RTNLGRP_ND_USEROPT  = 0x14
-	SizeofNlMsghdr      = 0x10
-	SizeofNlMsgerr      = 0x14
-	SizeofRtGenmsg      = 0x1
-	SizeofNlAttr        = 0x4
-	SizeofRtAttr        = 0x4
-	SizeofIfInfomsg     = 0x10
-	SizeofIfAddrmsg     = 0x8
-	SizeofRtMsg         = 0xc
-	SizeofRtNexthop     = 0x8
+	IFA_UNSPEC           = 0x0
+	IFA_ADDRESS          = 0x1
+	IFA_LOCAL            = 0x2
+	IFA_LABEL            = 0x3
+	IFA_BROADCAST        = 0x4
+	IFA_ANYCAST          = 0x5
+	IFA_CACHEINFO        = 0x6
+	IFA_MULTICAST        = 0x7
+	IFLA_UNSPEC          = 0x0
+	IFLA_ADDRESS         = 0x1
+	IFLA_BROADCAST       = 0x2
+	IFLA_IFNAME          = 0x3
+	IFLA_MTU             = 0x4
+	IFLA_LINK            = 0x5
+	IFLA_QDISC           = 0x6
+	IFLA_STATS           = 0x7
+	IFLA_COST            = 0x8
+	IFLA_PRIORITY        = 0x9
+	IFLA_MASTER          = 0xa
+	IFLA_WIRELESS        = 0xb
+	IFLA_PROTINFO        = 0xc
+	IFLA_TXQLEN          = 0xd
+	IFLA_MAP             = 0xe
+	IFLA_WEIGHT          = 0xf
+	IFLA_OPERSTATE       = 0x10
+	IFLA_LINKMODE        = 0x11
+	IFLA_LINKINFO        = 0x12
+	IFLA_NET_NS_PID      = 0x13
+	IFLA_IFALIAS         = 0x14
+	IFLA_NUM_VF          = 0x15
+	IFLA_VFINFO_LIST     = 0x16
+	IFLA_STATS64         = 0x17
+	IFLA_VF_PORTS        = 0x18
+	IFLA_PORT_SELF       = 0x19
+	IFLA_AF_SPEC         = 0x1a
+	IFLA_GROUP           = 0x1b
+	IFLA_NET_NS_FD       = 0x1c
+	IFLA_EXT_MASK        = 0x1d
+	IFLA_PROMISCUITY     = 0x1e
+	IFLA_NUM_TX_QUEUES   = 0x1f
+	IFLA_NUM_RX_QUEUES   = 0x20
+	IFLA_CARRIER         = 0x21
+	IFLA_PHYS_PORT_ID    = 0x22
+	IFLA_CARRIER_CHANGES = 0x23
+	IFLA_PHYS_SWITCH_ID  = 0x24
+	IFLA_LINK_NETNSID    = 0x25
+	IFLA_PHYS_PORT_NAME  = 0x26
+	IFLA_PROTO_DOWN      = 0x27
+	IFLA_GSO_MAX_SEGS    = 0x28
+	IFLA_GSO_MAX_SIZE    = 0x29
+	IFLA_PAD             = 0x2a
+	IFLA_XDP             = 0x2b
+	IFLA_EVENT           = 0x2c
+	IFLA_NEW_NETNSID     = 0x2d
+	IFLA_IF_NETNSID      = 0x2e
+	IFLA_MAX             = 0x31
+	RT_SCOPE_UNIVERSE    = 0x0
+	RT_SCOPE_SITE        = 0xc8
+	RT_SCOPE_LINK        = 0xfd
+	RT_SCOPE_HOST        = 0xfe
+	RT_SCOPE_NOWHERE     = 0xff
+	RT_TABLE_UNSPEC      = 0x0
+	RT_TABLE_COMPAT      = 0xfc
+	RT_TABLE_DEFAULT     = 0xfd
+	RT_TABLE_MAIN        = 0xfe
+	RT_TABLE_LOCAL       = 0xff
+	RT_TABLE_MAX         = 0xffffffff
+	RTA_UNSPEC           = 0x0
+	RTA_DST              = 0x1
+	RTA_SRC              = 0x2
+	RTA_IIF              = 0x3
+	RTA_OIF              = 0x4
+	RTA_GATEWAY          = 0x5
+	RTA_PRIORITY         = 0x6
+	RTA_PREFSRC          = 0x7
+	RTA_METRICS          = 0x8
+	RTA_MULTIPATH        = 0x9
+	RTA_FLOW             = 0xb
+	RTA_CACHEINFO        = 0xc
+	RTA_TABLE            = 0xf
+	RTN_UNSPEC           = 0x0
+	RTN_UNICAST          = 0x1
+	RTN_LOCAL            = 0x2
+	RTN_BROADCAST        = 0x3
+	RTN_ANYCAST          = 0x4
+	RTN_MULTICAST        = 0x5
+	RTN_BLACKHOLE        = 0x6
+	RTN_UNREACHABLE      = 0x7
+	RTN_PROHIBIT         = 0x8
+	RTN_THROW            = 0x9
+	RTN_NAT              = 0xa
+	RTN_XRESOLVE         = 0xb
+	RTNLGRP_NONE         = 0x0
+	RTNLGRP_LINK         = 0x1
+	RTNLGRP_NOTIFY       = 0x2
+	RTNLGRP_NEIGH        = 0x3
+	RTNLGRP_TC           = 0x4
+	RTNLGRP_IPV4_IFADDR  = 0x5
+	RTNLGRP_IPV4_MROUTE  = 0x6
+	RTNLGRP_IPV4_ROUTE   = 0x7
+	RTNLGRP_IPV4_RULE    = 0x8
+	RTNLGRP_IPV6_IFADDR  = 0x9
+	RTNLGRP_IPV6_MROUTE  = 0xa
+	RTNLGRP_IPV6_ROUTE   = 0xb
+	RTNLGRP_IPV6_IFINFO  = 0xc
+	RTNLGRP_IPV6_PREFIX  = 0x12
+	RTNLGRP_IPV6_RULE    = 0x13
+	RTNLGRP_ND_USEROPT   = 0x14
+	SizeofNlMsghdr       = 0x10
+	SizeofNlMsgerr       = 0x14
+	SizeofRtGenmsg       = 0x1
+	SizeofNlAttr         = 0x4
+	SizeofRtAttr         = 0x4
+	SizeofIfInfomsg      = 0x10
+	SizeofIfAddrmsg      = 0x8
+	SizeofRtMsg          = 0xc
+	SizeofRtNexthop      = 0x8
 )
 
 type NlMsghdr struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index cb2701fd..63a0dbe9 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -442,97 +442,123 @@ const (
 )
 
 const (
-	IFA_UNSPEC          = 0x0
-	IFA_ADDRESS         = 0x1
-	IFA_LOCAL           = 0x2
-	IFA_LABEL           = 0x3
-	IFA_BROADCAST       = 0x4
-	IFA_ANYCAST         = 0x5
-	IFA_CACHEINFO       = 0x6
-	IFA_MULTICAST       = 0x7
-	IFLA_UNSPEC         = 0x0
-	IFLA_ADDRESS        = 0x1
-	IFLA_BROADCAST      = 0x2
-	IFLA_IFNAME         = 0x3
-	IFLA_MTU            = 0x4
-	IFLA_LINK           = 0x5
-	IFLA_QDISC          = 0x6
-	IFLA_STATS          = 0x7
-	IFLA_COST           = 0x8
-	IFLA_PRIORITY       = 0x9
-	IFLA_MASTER         = 0xa
-	IFLA_WIRELESS       = 0xb
-	IFLA_PROTINFO       = 0xc
-	IFLA_TXQLEN         = 0xd
-	IFLA_MAP            = 0xe
-	IFLA_WEIGHT         = 0xf
-	IFLA_OPERSTATE      = 0x10
-	IFLA_LINKMODE       = 0x11
-	IFLA_LINKINFO       = 0x12
-	IFLA_NET_NS_PID     = 0x13
-	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2c
-	RT_SCOPE_UNIVERSE   = 0x0
-	RT_SCOPE_SITE       = 0xc8
-	RT_SCOPE_LINK       = 0xfd
-	RT_SCOPE_HOST       = 0xfe
-	RT_SCOPE_NOWHERE    = 0xff
-	RT_TABLE_UNSPEC     = 0x0
-	RT_TABLE_COMPAT     = 0xfc
-	RT_TABLE_DEFAULT    = 0xfd
-	RT_TABLE_MAIN       = 0xfe
-	RT_TABLE_LOCAL      = 0xff
-	RT_TABLE_MAX        = 0xffffffff
-	RTA_UNSPEC          = 0x0
-	RTA_DST             = 0x1
-	RTA_SRC             = 0x2
-	RTA_IIF             = 0x3
-	RTA_OIF             = 0x4
-	RTA_GATEWAY         = 0x5
-	RTA_PRIORITY        = 0x6
-	RTA_PREFSRC         = 0x7
-	RTA_METRICS         = 0x8
-	RTA_MULTIPATH       = 0x9
-	RTA_FLOW            = 0xb
-	RTA_CACHEINFO       = 0xc
-	RTA_TABLE           = 0xf
-	RTN_UNSPEC          = 0x0
-	RTN_UNICAST         = 0x1
-	RTN_LOCAL           = 0x2
-	RTN_BROADCAST       = 0x3
-	RTN_ANYCAST         = 0x4
-	RTN_MULTICAST       = 0x5
-	RTN_BLACKHOLE       = 0x6
-	RTN_UNREACHABLE     = 0x7
-	RTN_PROHIBIT        = 0x8
-	RTN_THROW           = 0x9
-	RTN_NAT             = 0xa
-	RTN_XRESOLVE        = 0xb
-	RTNLGRP_NONE        = 0x0
-	RTNLGRP_LINK        = 0x1
-	RTNLGRP_NOTIFY      = 0x2
-	RTNLGRP_NEIGH       = 0x3
-	RTNLGRP_TC          = 0x4
-	RTNLGRP_IPV4_IFADDR = 0x5
-	RTNLGRP_IPV4_MROUTE = 0x6
-	RTNLGRP_IPV4_ROUTE  = 0x7
-	RTNLGRP_IPV4_RULE   = 0x8
-	RTNLGRP_IPV6_IFADDR = 0x9
-	RTNLGRP_IPV6_MROUTE = 0xa
-	RTNLGRP_IPV6_ROUTE  = 0xb
-	RTNLGRP_IPV6_IFINFO = 0xc
-	RTNLGRP_IPV6_PREFIX = 0x12
-	RTNLGRP_IPV6_RULE   = 0x13
-	RTNLGRP_ND_USEROPT  = 0x14
-	SizeofNlMsghdr      = 0x10
-	SizeofNlMsgerr      = 0x14
-	SizeofRtGenmsg      = 0x1
-	SizeofNlAttr        = 0x4
-	SizeofRtAttr        = 0x4
-	SizeofIfInfomsg     = 0x10
-	SizeofIfAddrmsg     = 0x8
-	SizeofRtMsg         = 0xc
-	SizeofRtNexthop     = 0x8
+	IFA_UNSPEC           = 0x0
+	IFA_ADDRESS          = 0x1
+	IFA_LOCAL            = 0x2
+	IFA_LABEL            = 0x3
+	IFA_BROADCAST        = 0x4
+	IFA_ANYCAST          = 0x5
+	IFA_CACHEINFO        = 0x6
+	IFA_MULTICAST        = 0x7
+	IFLA_UNSPEC          = 0x0
+	IFLA_ADDRESS         = 0x1
+	IFLA_BROADCAST       = 0x2
+	IFLA_IFNAME          = 0x3
+	IFLA_MTU             = 0x4
+	IFLA_LINK            = 0x5
+	IFLA_QDISC           = 0x6
+	IFLA_STATS           = 0x7
+	IFLA_COST            = 0x8
+	IFLA_PRIORITY        = 0x9
+	IFLA_MASTER          = 0xa
+	IFLA_WIRELESS        = 0xb
+	IFLA_PROTINFO        = 0xc
+	IFLA_TXQLEN          = 0xd
+	IFLA_MAP             = 0xe
+	IFLA_WEIGHT          = 0xf
+	IFLA_OPERSTATE       = 0x10
+	IFLA_LINKMODE        = 0x11
+	IFLA_LINKINFO        = 0x12
+	IFLA_NET_NS_PID      = 0x13
+	IFLA_IFALIAS         = 0x14
+	IFLA_NUM_VF          = 0x15
+	IFLA_VFINFO_LIST     = 0x16
+	IFLA_STATS64         = 0x17
+	IFLA_VF_PORTS        = 0x18
+	IFLA_PORT_SELF       = 0x19
+	IFLA_AF_SPEC         = 0x1a
+	IFLA_GROUP           = 0x1b
+	IFLA_NET_NS_FD       = 0x1c
+	IFLA_EXT_MASK        = 0x1d
+	IFLA_PROMISCUITY     = 0x1e
+	IFLA_NUM_TX_QUEUES   = 0x1f
+	IFLA_NUM_RX_QUEUES   = 0x20
+	IFLA_CARRIER         = 0x21
+	IFLA_PHYS_PORT_ID    = 0x22
+	IFLA_CARRIER_CHANGES = 0x23
+	IFLA_PHYS_SWITCH_ID  = 0x24
+	IFLA_LINK_NETNSID    = 0x25
+	IFLA_PHYS_PORT_NAME  = 0x26
+	IFLA_PROTO_DOWN      = 0x27
+	IFLA_GSO_MAX_SEGS    = 0x28
+	IFLA_GSO_MAX_SIZE    = 0x29
+	IFLA_PAD             = 0x2a
+	IFLA_XDP             = 0x2b
+	IFLA_EVENT           = 0x2c
+	IFLA_NEW_NETNSID     = 0x2d
+	IFLA_IF_NETNSID      = 0x2e
+	IFLA_MAX             = 0x31
+	RT_SCOPE_UNIVERSE    = 0x0
+	RT_SCOPE_SITE        = 0xc8
+	RT_SCOPE_LINK        = 0xfd
+	RT_SCOPE_HOST        = 0xfe
+	RT_SCOPE_NOWHERE     = 0xff
+	RT_TABLE_UNSPEC      = 0x0
+	RT_TABLE_COMPAT      = 0xfc
+	RT_TABLE_DEFAULT     = 0xfd
+	RT_TABLE_MAIN        = 0xfe
+	RT_TABLE_LOCAL       = 0xff
+	RT_TABLE_MAX         = 0xffffffff
+	RTA_UNSPEC           = 0x0
+	RTA_DST              = 0x1
+	RTA_SRC              = 0x2
+	RTA_IIF              = 0x3
+	RTA_OIF              = 0x4
+	RTA_GATEWAY          = 0x5
+	RTA_PRIORITY         = 0x6
+	RTA_PREFSRC          = 0x7
+	RTA_METRICS          = 0x8
+	RTA_MULTIPATH        = 0x9
+	RTA_FLOW             = 0xb
+	RTA_CACHEINFO        = 0xc
+	RTA_TABLE            = 0xf
+	RTN_UNSPEC           = 0x0
+	RTN_UNICAST          = 0x1
+	RTN_LOCAL            = 0x2
+	RTN_BROADCAST        = 0x3
+	RTN_ANYCAST          = 0x4
+	RTN_MULTICAST        = 0x5
+	RTN_BLACKHOLE        = 0x6
+	RTN_UNREACHABLE      = 0x7
+	RTN_PROHIBIT         = 0x8
+	RTN_THROW            = 0x9
+	RTN_NAT              = 0xa
+	RTN_XRESOLVE         = 0xb
+	RTNLGRP_NONE         = 0x0
+	RTNLGRP_LINK         = 0x1
+	RTNLGRP_NOTIFY       = 0x2
+	RTNLGRP_NEIGH        = 0x3
+	RTNLGRP_TC           = 0x4
+	RTNLGRP_IPV4_IFADDR  = 0x5
+	RTNLGRP_IPV4_MROUTE  = 0x6
+	RTNLGRP_IPV4_ROUTE   = 0x7
+	RTNLGRP_IPV4_RULE    = 0x8
+	RTNLGRP_IPV6_IFADDR  = 0x9
+	RTNLGRP_IPV6_MROUTE  = 0xa
+	RTNLGRP_IPV6_ROUTE   = 0xb
+	RTNLGRP_IPV6_IFINFO  = 0xc
+	RTNLGRP_IPV6_PREFIX  = 0x12
+	RTNLGRP_IPV6_RULE    = 0x13
+	RTNLGRP_ND_USEROPT   = 0x14
+	SizeofNlMsghdr       = 0x10
+	SizeofNlMsgerr       = 0x14
+	SizeofRtGenmsg       = 0x1
+	SizeofNlAttr         = 0x4
+	SizeofRtAttr         = 0x4
+	SizeofIfInfomsg      = 0x10
+	SizeofIfAddrmsg      = 0x8
+	SizeofRtMsg          = 0xc
+	SizeofRtNexthop      = 0x8
 )
 
 type NlMsghdr struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index fa5b15be..f20a6359 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -442,97 +442,123 @@ const (
 )
 
 const (
-	IFA_UNSPEC          = 0x0
-	IFA_ADDRESS         = 0x1
-	IFA_LOCAL           = 0x2
-	IFA_LABEL           = 0x3
-	IFA_BROADCAST       = 0x4
-	IFA_ANYCAST         = 0x5
-	IFA_CACHEINFO       = 0x6
-	IFA_MULTICAST       = 0x7
-	IFLA_UNSPEC         = 0x0
-	IFLA_ADDRESS        = 0x1
-	IFLA_BROADCAST      = 0x2
-	IFLA_IFNAME         = 0x3
-	IFLA_MTU            = 0x4
-	IFLA_LINK           = 0x5
-	IFLA_QDISC          = 0x6
-	IFLA_STATS          = 0x7
-	IFLA_COST           = 0x8
-	IFLA_PRIORITY       = 0x9
-	IFLA_MASTER         = 0xa
-	IFLA_WIRELESS       = 0xb
-	IFLA_PROTINFO       = 0xc
-	IFLA_TXQLEN         = 0xd
-	IFLA_MAP            = 0xe
-	IFLA_WEIGHT         = 0xf
-	IFLA_OPERSTATE      = 0x10
-	IFLA_LINKMODE       = 0x11
-	IFLA_LINKINFO       = 0x12
-	IFLA_NET_NS_PID     = 0x13
-	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2c
-	RT_SCOPE_UNIVERSE   = 0x0
-	RT_SCOPE_SITE       = 0xc8
-	RT_SCOPE_LINK       = 0xfd
-	RT_SCOPE_HOST       = 0xfe
-	RT_SCOPE_NOWHERE    = 0xff
-	RT_TABLE_UNSPEC     = 0x0
-	RT_TABLE_COMPAT     = 0xfc
-	RT_TABLE_DEFAULT    = 0xfd
-	RT_TABLE_MAIN       = 0xfe
-	RT_TABLE_LOCAL      = 0xff
-	RT_TABLE_MAX        = 0xffffffff
-	RTA_UNSPEC          = 0x0
-	RTA_DST             = 0x1
-	RTA_SRC             = 0x2
-	RTA_IIF             = 0x3
-	RTA_OIF             = 0x4
-	RTA_GATEWAY         = 0x5
-	RTA_PRIORITY        = 0x6
-	RTA_PREFSRC         = 0x7
-	RTA_METRICS         = 0x8
-	RTA_MULTIPATH       = 0x9
-	RTA_FLOW            = 0xb
-	RTA_CACHEINFO       = 0xc
-	RTA_TABLE           = 0xf
-	RTN_UNSPEC          = 0x0
-	RTN_UNICAST         = 0x1
-	RTN_LOCAL           = 0x2
-	RTN_BROADCAST       = 0x3
-	RTN_ANYCAST         = 0x4
-	RTN_MULTICAST       = 0x5
-	RTN_BLACKHOLE       = 0x6
-	RTN_UNREACHABLE     = 0x7
-	RTN_PROHIBIT        = 0x8
-	RTN_THROW           = 0x9
-	RTN_NAT             = 0xa
-	RTN_XRESOLVE        = 0xb
-	RTNLGRP_NONE        = 0x0
-	RTNLGRP_LINK        = 0x1
-	RTNLGRP_NOTIFY      = 0x2
-	RTNLGRP_NEIGH       = 0x3
-	RTNLGRP_TC          = 0x4
-	RTNLGRP_IPV4_IFADDR = 0x5
-	RTNLGRP_IPV4_MROUTE = 0x6
-	RTNLGRP_IPV4_ROUTE  = 0x7
-	RTNLGRP_IPV4_RULE   = 0x8
-	RTNLGRP_IPV6_IFADDR = 0x9
-	RTNLGRP_IPV6_MROUTE = 0xa
-	RTNLGRP_IPV6_ROUTE  = 0xb
-	RTNLGRP_IPV6_IFINFO = 0xc
-	RTNLGRP_IPV6_PREFIX = 0x12
-	RTNLGRP_IPV6_RULE   = 0x13
-	RTNLGRP_ND_USEROPT  = 0x14
-	SizeofNlMsghdr      = 0x10
-	SizeofNlMsgerr      = 0x14
-	SizeofRtGenmsg      = 0x1
-	SizeofNlAttr        = 0x4
-	SizeofRtAttr        = 0x4
-	SizeofIfInfomsg     = 0x10
-	SizeofIfAddrmsg     = 0x8
-	SizeofRtMsg         = 0xc
-	SizeofRtNexthop     = 0x8
+	IFA_UNSPEC           = 0x0
+	IFA_ADDRESS          = 0x1
+	IFA_LOCAL            = 0x2
+	IFA_LABEL            = 0x3
+	IFA_BROADCAST        = 0x4
+	IFA_ANYCAST          = 0x5
+	IFA_CACHEINFO        = 0x6
+	IFA_MULTICAST        = 0x7
+	IFLA_UNSPEC          = 0x0
+	IFLA_ADDRESS         = 0x1
+	IFLA_BROADCAST       = 0x2
+	IFLA_IFNAME          = 0x3
+	IFLA_MTU             = 0x4
+	IFLA_LINK            = 0x5
+	IFLA_QDISC           = 0x6
+	IFLA_STATS           = 0x7
+	IFLA_COST            = 0x8
+	IFLA_PRIORITY        = 0x9
+	IFLA_MASTER          = 0xa
+	IFLA_WIRELESS        = 0xb
+	IFLA_PROTINFO        = 0xc
+	IFLA_TXQLEN          = 0xd
+	IFLA_MAP             = 0xe
+	IFLA_WEIGHT          = 0xf
+	IFLA_OPERSTATE       = 0x10
+	IFLA_LINKMODE        = 0x11
+	IFLA_LINKINFO        = 0x12
+	IFLA_NET_NS_PID      = 0x13
+	IFLA_IFALIAS         = 0x14
+	IFLA_NUM_VF          = 0x15
+	IFLA_VFINFO_LIST     = 0x16
+	IFLA_STATS64         = 0x17
+	IFLA_VF_PORTS        = 0x18
+	IFLA_PORT_SELF       = 0x19
+	IFLA_AF_SPEC         = 0x1a
+	IFLA_GROUP           = 0x1b
+	IFLA_NET_NS_FD       = 0x1c
+	IFLA_EXT_MASK        = 0x1d
+	IFLA_PROMISCUITY     = 0x1e
+	IFLA_NUM_TX_QUEUES   = 0x1f
+	IFLA_NUM_RX_QUEUES   = 0x20
+	IFLA_CARRIER         = 0x21
+	IFLA_PHYS_PORT_ID    = 0x22
+	IFLA_CARRIER_CHANGES = 0x23
+	IFLA_PHYS_SWITCH_ID  = 0x24
+	IFLA_LINK_NETNSID    = 0x25
+	IFLA_PHYS_PORT_NAME  = 0x26
+	IFLA_PROTO_DOWN      = 0x27
+	IFLA_GSO_MAX_SEGS    = 0x28
+	IFLA_GSO_MAX_SIZE    = 0x29
+	IFLA_PAD             = 0x2a
+	IFLA_XDP             = 0x2b
+	IFLA_EVENT           = 0x2c
+	IFLA_NEW_NETNSID     = 0x2d
+	IFLA_IF_NETNSID      = 0x2e
+	IFLA_MAX             = 0x31
+	RT_SCOPE_UNIVERSE    = 0x0
+	RT_SCOPE_SITE        = 0xc8
+	RT_SCOPE_LINK        = 0xfd
+	RT_SCOPE_HOST        = 0xfe
+	RT_SCOPE_NOWHERE     = 0xff
+	RT_TABLE_UNSPEC      = 0x0
+	RT_TABLE_COMPAT      = 0xfc
+	RT_TABLE_DEFAULT     = 0xfd
+	RT_TABLE_MAIN        = 0xfe
+	RT_TABLE_LOCAL       = 0xff
+	RT_TABLE_MAX         = 0xffffffff
+	RTA_UNSPEC           = 0x0
+	RTA_DST              = 0x1
+	RTA_SRC              = 0x2
+	RTA_IIF              = 0x3
+	RTA_OIF              = 0x4
+	RTA_GATEWAY          = 0x5
+	RTA_PRIORITY         = 0x6
+	RTA_PREFSRC          = 0x7
+	RTA_METRICS          = 0x8
+	RTA_MULTIPATH        = 0x9
+	RTA_FLOW             = 0xb
+	RTA_CACHEINFO        = 0xc
+	RTA_TABLE            = 0xf
+	RTN_UNSPEC           = 0x0
+	RTN_UNICAST          = 0x1
+	RTN_LOCAL            = 0x2
+	RTN_BROADCAST        = 0x3
+	RTN_ANYCAST          = 0x4
+	RTN_MULTICAST        = 0x5
+	RTN_BLACKHOLE        = 0x6
+	RTN_UNREACHABLE      = 0x7
+	RTN_PROHIBIT         = 0x8
+	RTN_THROW            = 0x9
+	RTN_NAT              = 0xa
+	RTN_XRESOLVE         = 0xb
+	RTNLGRP_NONE         = 0x0
+	RTNLGRP_LINK         = 0x1
+	RTNLGRP_NOTIFY       = 0x2
+	RTNLGRP_NEIGH        = 0x3
+	RTNLGRP_TC           = 0x4
+	RTNLGRP_IPV4_IFADDR  = 0x5
+	RTNLGRP_IPV4_MROUTE  = 0x6
+	RTNLGRP_IPV4_ROUTE   = 0x7
+	RTNLGRP_IPV4_RULE    = 0x8
+	RTNLGRP_IPV6_IFADDR  = 0x9
+	RTNLGRP_IPV6_MROUTE  = 0xa
+	RTNLGRP_IPV6_ROUTE   = 0xb
+	RTNLGRP_IPV6_IFINFO  = 0xc
+	RTNLGRP_IPV6_PREFIX  = 0x12
+	RTNLGRP_IPV6_RULE    = 0x13
+	RTNLGRP_ND_USEROPT   = 0x14
+	SizeofNlMsghdr       = 0x10
+	SizeofNlMsgerr       = 0x14
+	SizeofRtGenmsg       = 0x1
+	SizeofNlAttr         = 0x4
+	SizeofRtAttr         = 0x4
+	SizeofIfInfomsg      = 0x10
+	SizeofIfAddrmsg      = 0x8
+	SizeofRtMsg          = 0xc
+	SizeofRtNexthop      = 0x8
 )
 
 type NlMsghdr struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
index 64952cb7..30dc1058 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
@@ -441,97 +441,123 @@ const (
 )
 
 const (
-	IFA_UNSPEC          = 0x0
-	IFA_ADDRESS         = 0x1
-	IFA_LOCAL           = 0x2
-	IFA_LABEL           = 0x3
-	IFA_BROADCAST       = 0x4
-	IFA_ANYCAST         = 0x5
-	IFA_CACHEINFO       = 0x6
-	IFA_MULTICAST       = 0x7
-	IFLA_UNSPEC         = 0x0
-	IFLA_ADDRESS        = 0x1
-	IFLA_BROADCAST      = 0x2
-	IFLA_IFNAME         = 0x3
-	IFLA_MTU            = 0x4
-	IFLA_LINK           = 0x5
-	IFLA_QDISC          = 0x6
-	IFLA_STATS          = 0x7
-	IFLA_COST           = 0x8
-	IFLA_PRIORITY       = 0x9
-	IFLA_MASTER         = 0xa
-	IFLA_WIRELESS       = 0xb
-	IFLA_PROTINFO       = 0xc
-	IFLA_TXQLEN         = 0xd
-	IFLA_MAP            = 0xe
-	IFLA_WEIGHT         = 0xf
-	IFLA_OPERSTATE      = 0x10
-	IFLA_LINKMODE       = 0x11
-	IFLA_LINKINFO       = 0x12
-	IFLA_NET_NS_PID     = 0x13
-	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2c
-	RT_SCOPE_UNIVERSE   = 0x0
-	RT_SCOPE_SITE       = 0xc8
-	RT_SCOPE_LINK       = 0xfd
-	RT_SCOPE_HOST       = 0xfe
-	RT_SCOPE_NOWHERE    = 0xff
-	RT_TABLE_UNSPEC     = 0x0
-	RT_TABLE_COMPAT     = 0xfc
-	RT_TABLE_DEFAULT    = 0xfd
-	RT_TABLE_MAIN       = 0xfe
-	RT_TABLE_LOCAL      = 0xff
-	RT_TABLE_MAX        = 0xffffffff
-	RTA_UNSPEC          = 0x0
-	RTA_DST             = 0x1
-	RTA_SRC             = 0x2
-	RTA_IIF             = 0x3
-	RTA_OIF             = 0x4
-	RTA_GATEWAY         = 0x5
-	RTA_PRIORITY        = 0x6
-	RTA_PREFSRC         = 0x7
-	RTA_METRICS         = 0x8
-	RTA_MULTIPATH       = 0x9
-	RTA_FLOW            = 0xb
-	RTA_CACHEINFO       = 0xc
-	RTA_TABLE           = 0xf
-	RTN_UNSPEC          = 0x0
-	RTN_UNICAST         = 0x1
-	RTN_LOCAL           = 0x2
-	RTN_BROADCAST       = 0x3
-	RTN_ANYCAST         = 0x4
-	RTN_MULTICAST       = 0x5
-	RTN_BLACKHOLE       = 0x6
-	RTN_UNREACHABLE     = 0x7
-	RTN_PROHIBIT        = 0x8
-	RTN_THROW           = 0x9
-	RTN_NAT             = 0xa
-	RTN_XRESOLVE        = 0xb
-	RTNLGRP_NONE        = 0x0
-	RTNLGRP_LINK        = 0x1
-	RTNLGRP_NOTIFY      = 0x2
-	RTNLGRP_NEIGH       = 0x3
-	RTNLGRP_TC          = 0x4
-	RTNLGRP_IPV4_IFADDR = 0x5
-	RTNLGRP_IPV4_MROUTE = 0x6
-	RTNLGRP_IPV4_ROUTE  = 0x7
-	RTNLGRP_IPV4_RULE   = 0x8
-	RTNLGRP_IPV6_IFADDR = 0x9
-	RTNLGRP_IPV6_MROUTE = 0xa
-	RTNLGRP_IPV6_ROUTE  = 0xb
-	RTNLGRP_IPV6_IFINFO = 0xc
-	RTNLGRP_IPV6_PREFIX = 0x12
-	RTNLGRP_IPV6_RULE   = 0x13
-	RTNLGRP_ND_USEROPT  = 0x14
-	SizeofNlMsghdr      = 0x10
-	SizeofNlMsgerr      = 0x14
-	SizeofRtGenmsg      = 0x1
-	SizeofNlAttr        = 0x4
-	SizeofRtAttr        = 0x4
-	SizeofIfInfomsg     = 0x10
-	SizeofIfAddrmsg     = 0x8
-	SizeofRtMsg         = 0xc
-	SizeofRtNexthop     = 0x8
+	IFA_UNSPEC           = 0x0
+	IFA_ADDRESS          = 0x1
+	IFA_LOCAL            = 0x2
+	IFA_LABEL            = 0x3
+	IFA_BROADCAST        = 0x4
+	IFA_ANYCAST          = 0x5
+	IFA_CACHEINFO        = 0x6
+	IFA_MULTICAST        = 0x7
+	IFLA_UNSPEC          = 0x0
+	IFLA_ADDRESS         = 0x1
+	IFLA_BROADCAST       = 0x2
+	IFLA_IFNAME          = 0x3
+	IFLA_MTU             = 0x4
+	IFLA_LINK            = 0x5
+	IFLA_QDISC           = 0x6
+	IFLA_STATS           = 0x7
+	IFLA_COST            = 0x8
+	IFLA_PRIORITY        = 0x9
+	IFLA_MASTER          = 0xa
+	IFLA_WIRELESS        = 0xb
+	IFLA_PROTINFO        = 0xc
+	IFLA_TXQLEN          = 0xd
+	IFLA_MAP             = 0xe
+	IFLA_WEIGHT          = 0xf
+	IFLA_OPERSTATE       = 0x10
+	IFLA_LINKMODE        = 0x11
+	IFLA_LINKINFO        = 0x12
+	IFLA_NET_NS_PID      = 0x13
+	IFLA_IFALIAS         = 0x14
+	IFLA_NUM_VF          = 0x15
+	IFLA_VFINFO_LIST     = 0x16
+	IFLA_STATS64         = 0x17
+	IFLA_VF_PORTS        = 0x18
+	IFLA_PORT_SELF       = 0x19
+	IFLA_AF_SPEC         = 0x1a
+	IFLA_GROUP           = 0x1b
+	IFLA_NET_NS_FD       = 0x1c
+	IFLA_EXT_MASK        = 0x1d
+	IFLA_PROMISCUITY     = 0x1e
+	IFLA_NUM_TX_QUEUES   = 0x1f
+	IFLA_NUM_RX_QUEUES   = 0x20
+	IFLA_CARRIER         = 0x21
+	IFLA_PHYS_PORT_ID    = 0x22
+	IFLA_CARRIER_CHANGES = 0x23
+	IFLA_PHYS_SWITCH_ID  = 0x24
+	IFLA_LINK_NETNSID    = 0x25
+	IFLA_PHYS_PORT_NAME  = 0x26
+	IFLA_PROTO_DOWN      = 0x27
+	IFLA_GSO_MAX_SEGS    = 0x28
+	IFLA_GSO_MAX_SIZE    = 0x29
+	IFLA_PAD             = 0x2a
+	IFLA_XDP             = 0x2b
+	IFLA_EVENT           = 0x2c
+	IFLA_NEW_NETNSID     = 0x2d
+	IFLA_IF_NETNSID      = 0x2e
+	IFLA_MAX             = 0x31
+	RT_SCOPE_UNIVERSE    = 0x0
+	RT_SCOPE_SITE        = 0xc8
+	RT_SCOPE_LINK        = 0xfd
+	RT_SCOPE_HOST        = 0xfe
+	RT_SCOPE_NOWHERE     = 0xff
+	RT_TABLE_UNSPEC      = 0x0
+	RT_TABLE_COMPAT      = 0xfc
+	RT_TABLE_DEFAULT     = 0xfd
+	RT_TABLE_MAIN        = 0xfe
+	RT_TABLE_LOCAL       = 0xff
+	RT_TABLE_MAX         = 0xffffffff
+	RTA_UNSPEC           = 0x0
+	RTA_DST              = 0x1
+	RTA_SRC              = 0x2
+	RTA_IIF              = 0x3
+	RTA_OIF              = 0x4
+	RTA_GATEWAY          = 0x5
+	RTA_PRIORITY         = 0x6
+	RTA_PREFSRC          = 0x7
+	RTA_METRICS          = 0x8
+	RTA_MULTIPATH        = 0x9
+	RTA_FLOW             = 0xb
+	RTA_CACHEINFO        = 0xc
+	RTA_TABLE            = 0xf
+	RTN_UNSPEC           = 0x0
+	RTN_UNICAST          = 0x1
+	RTN_LOCAL            = 0x2
+	RTN_BROADCAST        = 0x3
+	RTN_ANYCAST          = 0x4
+	RTN_MULTICAST        = 0x5
+	RTN_BLACKHOLE        = 0x6
+	RTN_UNREACHABLE      = 0x7
+	RTN_PROHIBIT         = 0x8
+	RTN_THROW            = 0x9
+	RTN_NAT              = 0xa
+	RTN_XRESOLVE         = 0xb
+	RTNLGRP_NONE         = 0x0
+	RTNLGRP_LINK         = 0x1
+	RTNLGRP_NOTIFY       = 0x2
+	RTNLGRP_NEIGH        = 0x3
+	RTNLGRP_TC           = 0x4
+	RTNLGRP_IPV4_IFADDR  = 0x5
+	RTNLGRP_IPV4_MROUTE  = 0x6
+	RTNLGRP_IPV4_ROUTE   = 0x7
+	RTNLGRP_IPV4_RULE    = 0x8
+	RTNLGRP_IPV6_IFADDR  = 0x9
+	RTNLGRP_IPV6_MROUTE  = 0xa
+	RTNLGRP_IPV6_ROUTE   = 0xb
+	RTNLGRP_IPV6_IFINFO  = 0xc
+	RTNLGRP_IPV6_PREFIX  = 0x12
+	RTNLGRP_IPV6_RULE    = 0x13
+	RTNLGRP_ND_USEROPT   = 0x14
+	SizeofNlMsghdr       = 0x10
+	SizeofNlMsgerr       = 0x14
+	SizeofRtGenmsg       = 0x1
+	SizeofNlAttr         = 0x4
+	SizeofRtAttr         = 0x4
+	SizeofIfInfomsg      = 0x10
+	SizeofIfAddrmsg      = 0x8
+	SizeofRtMsg          = 0xc
+	SizeofRtNexthop      = 0x8
 )
 
 type NlMsghdr struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
index 9dbbb1ce..8e7384b8 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
@@ -376,97 +376,123 @@ const (
 )
 
 const (
-	IFA_UNSPEC          = 0x0
-	IFA_ADDRESS         = 0x1
-	IFA_LOCAL           = 0x2
-	IFA_LABEL           = 0x3
-	IFA_BROADCAST       = 0x4
-	IFA_ANYCAST         = 0x5
-	IFA_CACHEINFO       = 0x6
-	IFA_MULTICAST       = 0x7
-	IFLA_UNSPEC         = 0x0
-	IFLA_ADDRESS        = 0x1
-	IFLA_BROADCAST      = 0x2
-	IFLA_IFNAME         = 0x3
-	IFLA_MTU            = 0x4
-	IFLA_LINK           = 0x5
-	IFLA_QDISC          = 0x6
-	IFLA_STATS          = 0x7
-	IFLA_COST           = 0x8
-	IFLA_PRIORITY       = 0x9
-	IFLA_MASTER         = 0xa
-	IFLA_WIRELESS       = 0xb
-	IFLA_PROTINFO       = 0xc
-	IFLA_TXQLEN         = 0xd
-	IFLA_MAP            = 0xe
-	IFLA_WEIGHT         = 0xf
-	IFLA_OPERSTATE      = 0x10
-	IFLA_LINKMODE       = 0x11
-	IFLA_LINKINFO       = 0x12
-	IFLA_NET_NS_PID     = 0x13
-	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2a
-	RT_SCOPE_UNIVERSE   = 0x0
-	RT_SCOPE_SITE       = 0xc8
-	RT_SCOPE_LINK       = 0xfd
-	RT_SCOPE_HOST       = 0xfe
-	RT_SCOPE_NOWHERE    = 0xff
-	RT_TABLE_UNSPEC     = 0x0
-	RT_TABLE_COMPAT     = 0xfc
-	RT_TABLE_DEFAULT    = 0xfd
-	RT_TABLE_MAIN       = 0xfe
-	RT_TABLE_LOCAL      = 0xff
-	RT_TABLE_MAX        = 0xffffffff
-	RTA_UNSPEC          = 0x0
-	RTA_DST             = 0x1
-	RTA_SRC             = 0x2
-	RTA_IIF             = 0x3
-	RTA_OIF             = 0x4
-	RTA_GATEWAY         = 0x5
-	RTA_PRIORITY        = 0x6
-	RTA_PREFSRC         = 0x7
-	RTA_METRICS         = 0x8
-	RTA_MULTIPATH       = 0x9
-	RTA_FLOW            = 0xb
-	RTA_CACHEINFO       = 0xc
-	RTA_TABLE           = 0xf
-	RTN_UNSPEC          = 0x0
-	RTN_UNICAST         = 0x1
-	RTN_LOCAL           = 0x2
-	RTN_BROADCAST       = 0x3
-	RTN_ANYCAST         = 0x4
-	RTN_MULTICAST       = 0x5
-	RTN_BLACKHOLE       = 0x6
-	RTN_UNREACHABLE     = 0x7
-	RTN_PROHIBIT        = 0x8
-	RTN_THROW           = 0x9
-	RTN_NAT             = 0xa
-	RTN_XRESOLVE        = 0xb
-	RTNLGRP_NONE        = 0x0
-	RTNLGRP_LINK        = 0x1
-	RTNLGRP_NOTIFY      = 0x2
-	RTNLGRP_NEIGH       = 0x3
-	RTNLGRP_TC          = 0x4
-	RTNLGRP_IPV4_IFADDR = 0x5
-	RTNLGRP_IPV4_MROUTE = 0x6
-	RTNLGRP_IPV4_ROUTE  = 0x7
-	RTNLGRP_IPV4_RULE   = 0x8
-	RTNLGRP_IPV6_IFADDR = 0x9
-	RTNLGRP_IPV6_MROUTE = 0xa
-	RTNLGRP_IPV6_ROUTE  = 0xb
-	RTNLGRP_IPV6_IFINFO = 0xc
-	RTNLGRP_IPV6_PREFIX = 0x12
-	RTNLGRP_IPV6_RULE   = 0x13
-	RTNLGRP_ND_USEROPT  = 0x14
-	SizeofNlMsghdr      = 0x10
-	SizeofNlMsgerr      = 0x14
-	SizeofRtGenmsg      = 0x1
-	SizeofNlAttr        = 0x4
-	SizeofRtAttr        = 0x4
-	SizeofIfInfomsg     = 0x10
-	SizeofIfAddrmsg     = 0x8
-	SizeofRtMsg         = 0xc
-	SizeofRtNexthop     = 0x8
+	IFA_UNSPEC           = 0x0
+	IFA_ADDRESS          = 0x1
+	IFA_LOCAL            = 0x2
+	IFA_LABEL            = 0x3
+	IFA_BROADCAST        = 0x4
+	IFA_ANYCAST          = 0x5
+	IFA_CACHEINFO        = 0x6
+	IFA_MULTICAST        = 0x7
+	IFLA_UNSPEC          = 0x0
+	IFLA_ADDRESS         = 0x1
+	IFLA_BROADCAST       = 0x2
+	IFLA_IFNAME          = 0x3
+	IFLA_MTU             = 0x4
+	IFLA_LINK            = 0x5
+	IFLA_QDISC           = 0x6
+	IFLA_STATS           = 0x7
+	IFLA_COST            = 0x8
+	IFLA_PRIORITY        = 0x9
+	IFLA_MASTER          = 0xa
+	IFLA_WIRELESS        = 0xb
+	IFLA_PROTINFO        = 0xc
+	IFLA_TXQLEN          = 0xd
+	IFLA_MAP             = 0xe
+	IFLA_WEIGHT          = 0xf
+	IFLA_OPERSTATE       = 0x10
+	IFLA_LINKMODE        = 0x11
+	IFLA_LINKINFO        = 0x12
+	IFLA_NET_NS_PID      = 0x13
+	IFLA_IFALIAS         = 0x14
+	IFLA_NUM_VF          = 0x15
+	IFLA_VFINFO_LIST     = 0x16
+	IFLA_STATS64         = 0x17
+	IFLA_VF_PORTS        = 0x18
+	IFLA_PORT_SELF       = 0x19
+	IFLA_AF_SPEC         = 0x1a
+	IFLA_GROUP           = 0x1b
+	IFLA_NET_NS_FD       = 0x1c
+	IFLA_EXT_MASK        = 0x1d
+	IFLA_PROMISCUITY     = 0x1e
+	IFLA_NUM_TX_QUEUES   = 0x1f
+	IFLA_NUM_RX_QUEUES   = 0x20
+	IFLA_CARRIER         = 0x21
+	IFLA_PHYS_PORT_ID    = 0x22
+	IFLA_CARRIER_CHANGES = 0x23
+	IFLA_PHYS_SWITCH_ID  = 0x24
+	IFLA_LINK_NETNSID    = 0x25
+	IFLA_PHYS_PORT_NAME  = 0x26
+	IFLA_PROTO_DOWN      = 0x27
+	IFLA_GSO_MAX_SEGS    = 0x28
+	IFLA_GSO_MAX_SIZE    = 0x29
+	IFLA_PAD             = 0x2a
+	IFLA_XDP             = 0x2b
+	IFLA_EVENT           = 0x2c
+	IFLA_NEW_NETNSID     = 0x2d
+	IFLA_IF_NETNSID      = 0x2e
+	IFLA_MAX             = 0x2e
+	RT_SCOPE_UNIVERSE    = 0x0
+	RT_SCOPE_SITE        = 0xc8
+	RT_SCOPE_LINK        = 0xfd
+	RT_SCOPE_HOST        = 0xfe
+	RT_SCOPE_NOWHERE     = 0xff
+	RT_TABLE_UNSPEC      = 0x0
+	RT_TABLE_COMPAT      = 0xfc
+	RT_TABLE_DEFAULT     = 0xfd
+	RT_TABLE_MAIN        = 0xfe
+	RT_TABLE_LOCAL       = 0xff
+	RT_TABLE_MAX         = 0xffffffff
+	RTA_UNSPEC           = 0x0
+	RTA_DST              = 0x1
+	RTA_SRC              = 0x2
+	RTA_IIF              = 0x3
+	RTA_OIF              = 0x4
+	RTA_GATEWAY          = 0x5
+	RTA_PRIORITY         = 0x6
+	RTA_PREFSRC          = 0x7
+	RTA_METRICS          = 0x8
+	RTA_MULTIPATH        = 0x9
+	RTA_FLOW             = 0xb
+	RTA_CACHEINFO        = 0xc
+	RTA_TABLE            = 0xf
+	RTN_UNSPEC           = 0x0
+	RTN_UNICAST          = 0x1
+	RTN_LOCAL            = 0x2
+	RTN_BROADCAST        = 0x3
+	RTN_ANYCAST          = 0x4
+	RTN_MULTICAST        = 0x5
+	RTN_BLACKHOLE        = 0x6
+	RTN_UNREACHABLE      = 0x7
+	RTN_PROHIBIT         = 0x8
+	RTN_THROW            = 0x9
+	RTN_NAT              = 0xa
+	RTN_XRESOLVE         = 0xb
+	RTNLGRP_NONE         = 0x0
+	RTNLGRP_LINK         = 0x1
+	RTNLGRP_NOTIFY       = 0x2
+	RTNLGRP_NEIGH        = 0x3
+	RTNLGRP_TC           = 0x4
+	RTNLGRP_IPV4_IFADDR  = 0x5
+	RTNLGRP_IPV4_MROUTE  = 0x6
+	RTNLGRP_IPV4_ROUTE   = 0x7
+	RTNLGRP_IPV4_RULE    = 0x8
+	RTNLGRP_IPV6_IFADDR  = 0x9
+	RTNLGRP_IPV6_MROUTE  = 0xa
+	RTNLGRP_IPV6_ROUTE   = 0xb
+	RTNLGRP_IPV6_IFINFO  = 0xc
+	RTNLGRP_IPV6_PREFIX  = 0x12
+	RTNLGRP_IPV6_RULE    = 0x13
+	RTNLGRP_ND_USEROPT   = 0x14
+	SizeofNlMsghdr       = 0x10
+	SizeofNlMsgerr       = 0x14
+	SizeofRtGenmsg       = 0x1
+	SizeofNlAttr         = 0x4
+	SizeofRtAttr         = 0x4
+	SizeofIfInfomsg      = 0x10
+	SizeofIfAddrmsg      = 0x8
+	SizeofRtMsg          = 0xc
+	SizeofRtNexthop      = 0x8
 )
 
 type NlMsghdr struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go
index da70faa8..4b86fb2b 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go
@@ -103,6 +103,15 @@ const (
 	PathMax = 0x400
 )
 
+const (
+	FADV_NORMAL     = 0x0
+	FADV_RANDOM     = 0x1
+	FADV_SEQUENTIAL = 0x2
+	FADV_WILLNEED   = 0x3
+	FADV_DONTNEED   = 0x4
+	FADV_NOREUSE    = 0x5
+)
+
 type RawSockaddrInet4 struct {
 	Len    uint8
 	Family uint8
diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go
index 0963ab8c..9048a509 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go
@@ -107,6 +107,15 @@ const (
 	PathMax = 0x400
 )
 
+const (
+	FADV_NORMAL     = 0x0
+	FADV_RANDOM     = 0x1
+	FADV_SEQUENTIAL = 0x2
+	FADV_WILLNEED   = 0x3
+	FADV_DONTNEED   = 0x4
+	FADV_NOREUSE    = 0x5
+)
+
 type RawSockaddrInet4 struct {
 	Len    uint8
 	Family uint8
diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
index 211f6419..00525e7b 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
@@ -108,6 +108,15 @@ const (
 	PathMax = 0x400
 )
 
+const (
+	FADV_NORMAL     = 0x0
+	FADV_RANDOM     = 0x1
+	FADV_SEQUENTIAL = 0x2
+	FADV_WILLNEED   = 0x3
+	FADV_DONTNEED   = 0x4
+	FADV_NOREUSE    = 0x5
+)
+
 type RawSockaddrInet4 struct {
 	Len    uint8
 	Family uint8
diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go
index d4454524..2248598d 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go
@@ -93,40 +93,40 @@ const (
 )
 
 type Stat_t struct {
-	Dev       uint64
-	Ino       uint64
-	Mode      uint32
-	Nlink     uint32
-	Uid       uint32
-	Gid       uint32
-	Rdev      uint64
-	Size      int64
-	Atim      Timespec
-	Mtim      Timespec
-	Ctim      Timespec
-	Blksize   int32
-	Pad_cgo_0 [4]byte
-	Blocks    int64
-	Fstype    [16]int8
+	Dev     uint64
+	Ino     uint64
+	Mode    uint32
+	Nlink   uint32
+	Uid     uint32
+	Gid     uint32
+	Rdev    uint64
+	Size    int64
+	Atim    Timespec
+	Mtim    Timespec
+	Ctim    Timespec
+	Blksize int32
+	_       [4]byte
+	Blocks  int64
+	Fstype  [16]int8
 }
 
 type Flock_t struct {
-	Type      int16
-	Whence    int16
-	Pad_cgo_0 [4]byte
-	Start     int64
-	Len       int64
-	Sysid     int32
-	Pid       int32
-	Pad       [4]int64
+	Type   int16
+	Whence int16
+	_      [4]byte
+	Start  int64
+	Len    int64
+	Sysid  int32
+	Pid    int32
+	Pad    [4]int64
 }
 
 type Dirent struct {
-	Ino       uint64
-	Off       int64
-	Reclen    uint16
-	Name      [1]int8
-	Pad_cgo_0 [5]byte
+	Ino    uint64
+	Off    int64
+	Reclen uint16
+	Name   [1]int8
+	_      [5]byte
 }
 
 type _Fsblkcnt_t uint64
@@ -213,13 +213,13 @@ type IPv6Mreq struct {
 type Msghdr struct {
 	Name         *byte
 	Namelen      uint32
-	Pad_cgo_0    [4]byte
+	_            [4]byte
 	Iov          *Iovec
 	Iovlen       int32
-	Pad_cgo_1    [4]byte
+	_            [4]byte
 	Accrights    *int8
 	Accrightslen int32
-	Pad_cgo_2    [4]byte
+	_            [4]byte
 }
 
 type Cmsghdr struct {
@@ -271,11 +271,11 @@ type Utsname struct {
 }
 
 type Ustat_t struct {
-	Tfree     int64
-	Tinode    uint64
-	Fname     [6]int8
-	Fpack     [6]int8
-	Pad_cgo_0 [4]byte
+	Tfree  int64
+	Tinode uint64
+	Fname  [6]int8
+	Fpack  [6]int8
+	_      [4]byte
 }
 
 const (
@@ -295,21 +295,21 @@ const (
 )
 
 type IfMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Data      IfData
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Addrs   int32
+	Flags   int32
+	Index   uint16
+	_       [2]byte
+	Data    IfData
 }
 
 type IfData struct {
 	Type       uint8
 	Addrlen    uint8
 	Hdrlen     uint8
-	Pad_cgo_0  [1]byte
+	_          [1]byte
 	Mtu        uint32
 	Metric     uint32
 	Baudrate   uint32
@@ -328,30 +328,30 @@ type IfData struct {
 }
 
 type IfaMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Addrs     int32
-	Flags     int32
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Metric    int32
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Addrs   int32
+	Flags   int32
+	Index   uint16
+	_       [2]byte
+	Metric  int32
 }
 
 type RtMsghdr struct {
-	Msglen    uint16
-	Version   uint8
-	Type      uint8
-	Index     uint16
-	Pad_cgo_0 [2]byte
-	Flags     int32
-	Addrs     int32
-	Pid       int32
-	Seq       int32
-	Errno     int32
-	Use       int32
-	Inits     uint32
-	Rmx       RtMetrics
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Index   uint16
+	_       [2]byte
+	Flags   int32
+	Addrs   int32
+	Pid     int32
+	Seq     int32
+	Errno   int32
+	Use     int32
+	Inits   uint32
+	Rmx     RtMetrics
 }
 
 type RtMetrics struct {
@@ -388,9 +388,9 @@ type BpfStat struct {
 }
 
 type BpfProgram struct {
-	Len       uint32
-	Pad_cgo_0 [4]byte
-	Insns     *BpfInsn
+	Len   uint32
+	_     [4]byte
+	Insns *BpfInsn
 }
 
 type BpfInsn struct {
@@ -406,30 +406,30 @@ type BpfTimeval struct {
 }
 
 type BpfHdr struct {
-	Tstamp    BpfTimeval
-	Caplen    uint32
-	Datalen   uint32
-	Hdrlen    uint16
-	Pad_cgo_0 [2]byte
+	Tstamp  BpfTimeval
+	Caplen  uint32
+	Datalen uint32
+	Hdrlen  uint16
+	_       [2]byte
 }
 
 type Termios struct {
-	Iflag     uint32
-	Oflag     uint32
-	Cflag     uint32
-	Lflag     uint32
-	Cc        [19]uint8
-	Pad_cgo_0 [1]byte
+	Iflag uint32
+	Oflag uint32
+	Cflag uint32
+	Lflag uint32
+	Cc    [19]uint8
+	_     [1]byte
 }
 
 type Termio struct {
-	Iflag     uint16
-	Oflag     uint16
-	Cflag     uint16
-	Lflag     uint16
-	Line      int8
-	Cc        [8]uint8
-	Pad_cgo_0 [1]byte
+	Iflag uint16
+	Oflag uint16
+	Cflag uint16
+	Lflag uint16
+	Line  int8
+	Cc    [8]uint8
+	_     [1]byte
 }
 
 type Winsize struct {
diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go
index c40facce..090fbe87 100644
--- a/vendor/google.golang.org/grpc/backoff.go
+++ b/vendor/google.golang.org/grpc/backoff.go
@@ -25,12 +25,14 @@ import (
 
 // DefaultBackoffConfig uses values specified for backoff in
 // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
-var DefaultBackoffConfig = BackoffConfig{
-	MaxDelay:  120 * time.Second,
-	baseDelay: 1.0 * time.Second,
-	factor:    1.6,
-	jitter:    0.2,
-}
+var (
+	DefaultBackoffConfig = BackoffConfig{
+		MaxDelay:  120 * time.Second,
+		baseDelay: 1.0 * time.Second,
+		factor:    1.6,
+		jitter:    0.2,
+	}
+)
 
 // backoffStrategy defines the methodology for backing off after a grpc
 // connection failure.
diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go
index 300da6c5..ab65049d 100644
--- a/vendor/google.golang.org/grpc/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer.go
@@ -28,7 +28,6 @@ import (
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/naming"
-	"google.golang.org/grpc/status"
 )
 
 // Address represents a server the client connects to.
@@ -311,7 +310,7 @@ func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Ad
 	if !opts.BlockingWait {
 		if len(rr.addrs) == 0 {
 			rr.mu.Unlock()
-			err = status.Errorf(codes.Unavailable, "there is no address available")
+			err = Errorf(codes.Unavailable, "there is no address available")
 			return
 		}
 		// Returns the next addr on rr.addrs for failfast RPCs.
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index 219a2940..84e10b63 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -23,7 +23,6 @@ package balancer
 import (
 	"errors"
 	"net"
-	"strings"
 
 	"golang.org/x/net/context"
 	"google.golang.org/grpc/connectivity"
@@ -34,23 +33,24 @@ import (
 var (
 	// m is a map from name to balancer builder.
 	m = make(map[string]Builder)
+	// defaultBuilder is the default balancer to use.
+	defaultBuilder Builder // TODO(bar) install pickfirst as default.
 )
 
 // Register registers the balancer builder to the balancer map.
-// b.Name (lowercased) will be used as the name registered with
-// this builder.
+// b.Name will be used as the name registered with this builder.
 func Register(b Builder) {
-	m[strings.ToLower(b.Name())] = b
+	m[b.Name()] = b
 }
 
 // Get returns the resolver builder registered with the given name.
-// Note that the compare is done in a case-insenstive fashion.
-// If no builder is register with the name, nil will be returned.
+// If no builder is register with the name, the default pickfirst will
+// be used.
 func Get(name string) Builder {
-	if b, ok := m[strings.ToLower(name)]; ok {
+	if b, ok := m[name]; ok {
 		return b
 	}
-	return nil
+	return defaultBuilder
 }
 
 // SubConn represents a gRPC sub connection.
@@ -66,11 +66,6 @@ func Get(name string) Builder {
 // When the connection encounters an error, it will reconnect immediately.
 // When the connection becomes IDLE, it will not reconnect unless Connect is
 // called.
-//
-// This interface is to be implemented by gRPC. Users should not need a
-// brand new implementation of this interface. For the situations like
-// testing, the new implementation should embed this interface. This allows
-// gRPC to add new methods to this interface.
 type SubConn interface {
 	// UpdateAddresses updates the addresses used in this SubConn.
 	// gRPC checks if currently-connected address is still in the new list.
@@ -88,11 +83,6 @@ type SubConn interface {
 type NewSubConnOptions struct{}
 
 // ClientConn represents a gRPC ClientConn.
-//
-// This interface is to be implemented by gRPC. Users should not need a
-// brand new implementation of this interface. For the situations like
-// testing, the new implementation should embed this interface. This allows
-// gRPC to add new methods to this interface.
 type ClientConn interface {
 	// NewSubConn is called by balancer to create a new SubConn.
 	// It doesn't block and wait for the connections to be established.
@@ -109,9 +99,6 @@ type ClientConn interface {
 	// on the new picker to pick new SubConn.
 	UpdateBalancerState(s connectivity.State, p Picker)
 
-	// ResolveNow is called by balancer to notify gRPC to do a name resolving.
-	ResolveNow(resolver.ResolveNowOption)
-
 	// Target returns the dial target for this ClientConn.
 	Target() string
 }
@@ -144,10 +131,6 @@ type PickOptions struct{}
 type DoneInfo struct {
 	// Err is the rpc error the RPC finished with. It could be nil.
 	Err error
-	// BytesSent indicates if any bytes have been sent to the server.
-	BytesSent bool
-	// BytesReceived indicates if any byte has been received from the server.
-	BytesReceived bool
 }
 
 var (
@@ -178,7 +161,7 @@ type Picker interface {
 	// If a SubConn is returned:
 	// - If it is READY, gRPC will send the RPC on it;
 	// - If it is not ready, or becomes not ready after it's returned, gRPC will block
-	//   until UpdateBalancerState() is called and will call pick on the new picker.
+	//   this call until a new picker is updated and will call pick on the new picker.
 	//
 	// If the returned error is not nil:
 	// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
deleted file mode 100644
index 1e962b72..00000000
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package base
-
-import (
-	"golang.org/x/net/context"
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/connectivity"
-	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/resolver"
-)
-
-type baseBuilder struct {
-	name          string
-	pickerBuilder PickerBuilder
-}
-
-func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
-	return &baseBalancer{
-		cc:            cc,
-		pickerBuilder: bb.pickerBuilder,
-
-		subConns: make(map[resolver.Address]balancer.SubConn),
-		scStates: make(map[balancer.SubConn]connectivity.State),
-		csEvltr:  &connectivityStateEvaluator{},
-		// Initialize picker to a picker that always return
-		// ErrNoSubConnAvailable, because when state of a SubConn changes, we
-		// may call UpdateBalancerState with this picker.
-		picker: NewErrPicker(balancer.ErrNoSubConnAvailable),
-	}
-}
-
-func (bb *baseBuilder) Name() string {
-	return bb.name
-}
-
-type baseBalancer struct {
-	cc            balancer.ClientConn
-	pickerBuilder PickerBuilder
-
-	csEvltr *connectivityStateEvaluator
-	state   connectivity.State
-
-	subConns map[resolver.Address]balancer.SubConn
-	scStates map[balancer.SubConn]connectivity.State
-	picker   balancer.Picker
-}
-
-func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
-	if err != nil {
-		grpclog.Infof("base.baseBalancer: HandleResolvedAddrs called with error %v", err)
-		return
-	}
-	grpclog.Infoln("base.baseBalancer: got new resolved addresses: ", addrs)
-	// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
-	addrsSet := make(map[resolver.Address]struct{})
-	for _, a := range addrs {
-		addrsSet[a] = struct{}{}
-		if _, ok := b.subConns[a]; !ok {
-			// a is a new address (not existing in b.subConns).
-			sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
-			if err != nil {
-				grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
-				continue
-			}
-			b.subConns[a] = sc
-			b.scStates[sc] = connectivity.Idle
-			sc.Connect()
-		}
-	}
-	for a, sc := range b.subConns {
-		// a was removed by resolver.
-		if _, ok := addrsSet[a]; !ok {
-			b.cc.RemoveSubConn(sc)
-			delete(b.subConns, a)
-			// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
-			// The entry will be deleted in HandleSubConnStateChange.
-		}
-	}
-}
-
-// regeneratePicker takes a snapshot of the balancer, and generates a picker
-// from it. The picker is
-//  - errPicker with ErrTransientFailure if the balancer is in TransientFailure,
-//  - built by the pickerBuilder with all READY SubConns otherwise.
-func (b *baseBalancer) regeneratePicker() {
-	if b.state == connectivity.TransientFailure {
-		b.picker = NewErrPicker(balancer.ErrTransientFailure)
-		return
-	}
-	readySCs := make(map[resolver.Address]balancer.SubConn)
-
-	// Filter out all ready SCs from full subConn map.
-	for addr, sc := range b.subConns {
-		if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
-			readySCs[addr] = sc
-		}
-	}
-	b.picker = b.pickerBuilder.Build(readySCs)
-}
-
-func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
-	grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
-	oldS, ok := b.scStates[sc]
-	if !ok {
-		grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
-		return
-	}
-	b.scStates[sc] = s
-	switch s {
-	case connectivity.Idle:
-		sc.Connect()
-	case connectivity.Shutdown:
-		// When an address was removed by resolver, b called RemoveSubConn but
-		// kept the sc's state in scStates. Remove state for this sc here.
-		delete(b.scStates, sc)
-	}
-
-	oldAggrState := b.state
-	b.state = b.csEvltr.recordTransition(oldS, s)
-
-	// Regenerate picker when one of the following happens:
-	//  - this sc became ready from not-ready
-	//  - this sc became not-ready from ready
-	//  - the aggregated state of balancer became TransientFailure from non-TransientFailure
-	//  - the aggregated state of balancer became non-TransientFailure from TransientFailure
-	if (s == connectivity.Ready) != (oldS == connectivity.Ready) ||
-		(b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
-		b.regeneratePicker()
-	}
-
-	b.cc.UpdateBalancerState(b.state, b.picker)
-	return
-}
-
-// Close is a nop because base balancer doesn't have internal state to clean up,
-// and it doesn't need to call RemoveSubConn for the SubConns.
-func (b *baseBalancer) Close() {
-}
-
-// NewErrPicker returns a picker that always returns err on Pick().
-func NewErrPicker(err error) balancer.Picker {
-	return &errPicker{err: err}
-}
-
-type errPicker struct {
-	err error // Pick() always returns this err.
-}
-
-func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
-	return nil, nil, p.err
-}
-
-// connectivityStateEvaluator gets updated by addrConns when their
-// states transition, based on which it evaluates the state of
-// ClientConn.
-type connectivityStateEvaluator struct {
-	numReady            uint64 // Number of addrConns in ready state.
-	numConnecting       uint64 // Number of addrConns in connecting state.
-	numTransientFailure uint64 // Number of addrConns in transientFailure.
-}
-
-// recordTransition records state change happening in every subConn and based on
-// that it evaluates what aggregated state should be.
-// It can only transition between Ready, Connecting and TransientFailure. Other states,
-// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection
-// before any subConn is created ClientConn is in idle state. In the end when ClientConn
-// closes it is in Shutdown state.
-//
-// recordTransition should only be called synchronously from the same goroutine.
-func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State {
-	// Update counters.
-	for idx, state := range []connectivity.State{oldState, newState} {
-		updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
-		switch state {
-		case connectivity.Ready:
-			cse.numReady += updateVal
-		case connectivity.Connecting:
-			cse.numConnecting += updateVal
-		case connectivity.TransientFailure:
-			cse.numTransientFailure += updateVal
-		}
-	}
-
-	// Evaluate.
-	if cse.numReady > 0 {
-		return connectivity.Ready
-	}
-	if cse.numConnecting > 0 {
-		return connectivity.Connecting
-	}
-	return connectivity.TransientFailure
-}
diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go
deleted file mode 100644
index 012ace2f..00000000
--- a/vendor/google.golang.org/grpc/balancer/base/base.go
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package base defines a balancer base that can be used to build balancers with
-// different picking algorithms.
-//
-// The base balancer creates a new SubConn for each resolved address. The
-// provided picker will only be notified about READY SubConns.
-//
-// This package is the base of round_robin balancer, its purpose is to be used
-// to build round_robin like balancers with complex picking algorithms.
-// Balancers with more complicated logic should try to implement a balancer
-// builder from scratch.
-//
-// All APIs in this package are experimental.
-package base
-
-import (
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/resolver"
-)
-
-// PickerBuilder creates balancer.Picker.
-type PickerBuilder interface {
-	// Build takes a slice of ready SubConns, and returns a picker that will be
-	// used by gRPC to pick a SubConn.
-	Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
-}
-
-// NewBalancerBuilder returns a balancer builder. The balancers
-// built by this builder will use the picker builder to build pickers.
-func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
-	return &baseBuilder{
-		name:          name,
-		pickerBuilder: pb,
-	}
-}
diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
deleted file mode 100644
index 2eda0a1c..00000000
--- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is
-// installed as one of the default balancers in gRPC, users don't need to
-// explicitly install this balancer.
-package roundrobin
-
-import (
-	"sync"
-
-	"golang.org/x/net/context"
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/balancer/base"
-	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/resolver"
-)
-
-// Name is the name of round_robin balancer.
-const Name = "round_robin"
-
-// newBuilder creates a new roundrobin balancer builder.
-func newBuilder() balancer.Builder {
-	return base.NewBalancerBuilder(Name, &rrPickerBuilder{})
-}
-
-func init() {
-	balancer.Register(newBuilder())
-}
-
-type rrPickerBuilder struct{}
-
-func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker {
-	grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs)
-	var scs []balancer.SubConn
-	for _, sc := range readySCs {
-		scs = append(scs, sc)
-	}
-	return &rrPicker{
-		subConns: scs,
-	}
-}
-
-type rrPicker struct {
-	// subConns is the snapshot of the roundrobin balancer when this picker was
-	// created. The slice is immutable. Each Get() will do a round robin
-	// selection from it and return the selected SubConn.
-	subConns []balancer.SubConn
-
-	mu   sync.Mutex
-	next int
-}
-
-func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
-	if len(p.subConns) <= 0 {
-		return nil, nil, balancer.ErrNoSubConnAvailable
-	}
-
-	p.mu.Lock()
-	sc := p.subConns[p.next]
-	p.next = (p.next + 1) % len(p.subConns)
-	p.mu.Unlock()
-	return sc, nil, nil
-}
diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
index db6f0ae3..f5dbc4ba 100644
--- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
+++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
@@ -19,7 +19,6 @@
 package grpc
 
 import (
-	"fmt"
 	"sync"
 
 	"google.golang.org/grpc/balancer"
@@ -74,7 +73,7 @@ func (b *scStateUpdateBuffer) load() {
 	}
 }
 
-// get returns the channel that the scStateUpdate will be sent to.
+// get returns the channel that receives a recvMsg in the buffer.
 //
 // Upon receiving, the caller should call load to send another
 // scStateChangeTuple onto the channel if there is any.
@@ -97,9 +96,6 @@ type ccBalancerWrapper struct {
 	stateChangeQueue *scStateUpdateBuffer
 	resolverUpdateCh chan *resolverUpdate
 	done             chan struct{}
-
-	mu       sync.Mutex
-	subConns map[*acBalancerWrapper]struct{}
 }
 
 func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
@@ -108,7 +104,6 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui
 		stateChangeQueue: newSCStateUpdateBuffer(),
 		resolverUpdateCh: make(chan *resolverUpdate, 1),
 		done:             make(chan struct{}),
-		subConns:         make(map[*acBalancerWrapper]struct{}),
 	}
 	go ccb.watcher()
 	ccb.balancer = b.Build(ccb, bopts)
@@ -122,20 +117,8 @@ func (ccb *ccBalancerWrapper) watcher() {
 		select {
 		case t := <-ccb.stateChangeQueue.get():
 			ccb.stateChangeQueue.load()
-			select {
-			case <-ccb.done:
-				ccb.balancer.Close()
-				return
-			default:
-			}
 			ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
 		case t := <-ccb.resolverUpdateCh:
-			select {
-			case <-ccb.done:
-				ccb.balancer.Close()
-				return
-			default:
-			}
 			ccb.balancer.HandleResolvedAddrs(t.addrs, t.err)
 		case <-ccb.done:
 		}
@@ -143,13 +126,6 @@ func (ccb *ccBalancerWrapper) watcher() {
 		select {
 		case <-ccb.done:
 			ccb.balancer.Close()
-			ccb.mu.Lock()
-			scs := ccb.subConns
-			ccb.subConns = nil
-			ccb.mu.Unlock()
-			for acbw := range scs {
-				ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
-			}
 			return
 		default:
 		}
@@ -189,54 +165,33 @@ func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err
 }
 
 func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
-	if len(addrs) <= 0 {
-		return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
-	}
-	ccb.mu.Lock()
-	defer ccb.mu.Unlock()
-	if ccb.subConns == nil {
-		return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
-	}
+	grpclog.Infof("ccBalancerWrapper: new subconn: %v", addrs)
 	ac, err := ccb.cc.newAddrConn(addrs)
 	if err != nil {
 		return nil, err
 	}
 	acbw := &acBalancerWrapper{ac: ac}
-	acbw.ac.mu.Lock()
+	ac.mu.Lock()
 	ac.acbw = acbw
-	acbw.ac.mu.Unlock()
-	ccb.subConns[acbw] = struct{}{}
+	ac.mu.Unlock()
 	return acbw, nil
 }
 
 func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
+	grpclog.Infof("ccBalancerWrapper: removing subconn")
 	acbw, ok := sc.(*acBalancerWrapper)
 	if !ok {
 		return
 	}
-	ccb.mu.Lock()
-	defer ccb.mu.Unlock()
-	if ccb.subConns == nil {
-		return
-	}
-	delete(ccb.subConns, acbw)
 	ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
 }
 
 func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
-	ccb.mu.Lock()
-	defer ccb.mu.Unlock()
-	if ccb.subConns == nil {
-		return
-	}
+	grpclog.Infof("ccBalancerWrapper: updating state and picker called by balancer: %v, %p", s, p)
 	ccb.cc.csMgr.updateState(s)
 	ccb.cc.blockingpicker.updatePicker(p)
 }
 
-func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) {
-	ccb.cc.resolveNow(o)
-}
-
 func (ccb *ccBalancerWrapper) Target() string {
 	return ccb.cc.target
 }
@@ -249,12 +204,9 @@ type acBalancerWrapper struct {
 }
 
 func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
+	grpclog.Infof("acBalancerWrapper: UpdateAddresses called with %v", addrs)
 	acbw.mu.Lock()
 	defer acbw.mu.Unlock()
-	if len(addrs) <= 0 {
-		acbw.ac.tearDown(errConnDrain)
-		return
-	}
 	if !acbw.ac.tryUpdateAddrs(addrs) {
 		cc := acbw.ac.cc
 		acbw.ac.mu.Lock()
@@ -282,7 +234,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
 		ac.acbw = acbw
 		ac.mu.Unlock()
 		if acState != connectivity.Idle {
-			ac.connect()
+			ac.connect(false)
 		}
 	}
 }
@@ -290,7 +242,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
 func (acbw *acBalancerWrapper) Connect() {
 	acbw.mu.Lock()
 	defer acbw.mu.Unlock()
-	acbw.ac.connect()
+	acbw.ac.connect(false)
 }
 
 func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
index faabf87d..9d061608 100644
--- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
+++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
@@ -19,7 +19,6 @@
 package grpc
 
 import (
-	"strings"
 	"sync"
 
 	"golang.org/x/net/context"
@@ -28,7 +27,6 @@ import (
 	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/resolver"
-	"google.golang.org/grpc/status"
 )
 
 type balancerWrapperBuilder struct {
@@ -36,27 +34,20 @@ type balancerWrapperBuilder struct {
 }
 
 func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
-	targetAddr := cc.Target()
-	targetSplitted := strings.Split(targetAddr, ":///")
-	if len(targetSplitted) >= 2 {
-		targetAddr = targetSplitted[1]
-	}
-
-	bwb.b.Start(targetAddr, BalancerConfig{
+	bwb.b.Start(cc.Target(), BalancerConfig{
 		DialCreds: opts.DialCreds,
 		Dialer:    opts.Dialer,
 	})
 	_, pickfirst := bwb.b.(*pickFirst)
 	bw := &balancerWrapper{
-		balancer:   bwb.b,
-		pickfirst:  pickfirst,
-		cc:         cc,
-		targetAddr: targetAddr,
-		startCh:    make(chan struct{}),
-		conns:      make(map[resolver.Address]balancer.SubConn),
-		connSt:     make(map[balancer.SubConn]*scState),
-		csEvltr:    &connectivityStateEvaluator{},
-		state:      connectivity.Idle,
+		balancer:  bwb.b,
+		pickfirst: pickfirst,
+		cc:        cc,
+		startCh:   make(chan struct{}),
+		conns:     make(map[resolver.Address]balancer.SubConn),
+		connSt:    make(map[balancer.SubConn]*scState),
+		csEvltr:   &connectivityStateEvaluator{},
+		state:     connectivity.Idle,
 	}
 	cc.UpdateBalancerState(connectivity.Idle, bw)
 	go bw.lbWatcher()
@@ -77,8 +68,7 @@ type balancerWrapper struct {
 	balancer  Balancer // The v1 balancer.
 	pickfirst bool
 
-	cc         balancer.ClientConn
-	targetAddr string // Target without the scheme.
+	cc balancer.ClientConn
 
 	// To aggregate the connectivity state.
 	csEvltr *connectivityStateEvaluator
@@ -98,11 +88,12 @@ type balancerWrapper struct {
 // connections accordingly.
 func (bw *balancerWrapper) lbWatcher() {
 	<-bw.startCh
+	grpclog.Infof("balancerWrapper: is pickfirst: %v\n", bw.pickfirst)
 	notifyCh := bw.balancer.Notify()
 	if notifyCh == nil {
 		// There's no resolver in the balancer. Connect directly.
 		a := resolver.Address{
-			Addr: bw.targetAddr,
+			Addr: bw.cc.Target(),
 			Type: resolver.Backend,
 		}
 		sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
@@ -112,7 +103,7 @@ func (bw *balancerWrapper) lbWatcher() {
 			bw.mu.Lock()
 			bw.conns[a] = sc
 			bw.connSt[sc] = &scState{
-				addr: Address{Addr: bw.targetAddr},
+				addr: Address{Addr: bw.cc.Target()},
 				s:    connectivity.Idle,
 			}
 			bw.mu.Unlock()
@@ -174,10 +165,10 @@ func (bw *balancerWrapper) lbWatcher() {
 					sc.Connect()
 				}
 			} else {
+				oldSC.UpdateAddresses(newAddrs)
 				bw.mu.Lock()
 				bw.connSt[oldSC].addr = addrs[0]
 				bw.mu.Unlock()
-				oldSC.UpdateAddresses(newAddrs)
 			}
 		} else {
 			var (
@@ -230,6 +221,7 @@ func (bw *balancerWrapper) lbWatcher() {
 }
 
 func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+	grpclog.Infof("balancerWrapper: handle subconn state change: %p, %v", sc, s)
 	bw.mu.Lock()
 	defer bw.mu.Unlock()
 	scSt, ok := bw.connSt[sc]
@@ -318,12 +310,12 @@ func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions)
 			Metadata:   a.Metadata,
 		}]
 		if !ok && failfast {
-			return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
+			return nil, nil, Errorf(codes.Unavailable, "there is no connection available")
 		}
 		if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) {
 			// If the returned sc is not ready and RPC is failfast,
 			// return error, and this RPC will fail.
-			return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
+			return nil, nil, Errorf(codes.Unavailable, "there is no connection available")
 		}
 	}
 
diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go
index 13cf8b13..1ef2507c 100644
--- a/vendor/google.golang.org/grpc/call.go
+++ b/vendor/google.golang.org/grpc/call.go
@@ -19,6 +19,7 @@
 package grpc
 
 import (
+	"bytes"
 	"io"
 	"time"
 
@@ -26,7 +27,6 @@ import (
 	"golang.org/x/net/trace"
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/encoding"
 	"google.golang.org/grpc/peer"
 	"google.golang.org/grpc/stats"
 	"google.golang.org/grpc/status"
@@ -60,19 +60,9 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran
 	}
 	for {
 		if c.maxReceiveMessageSize == nil {
-			return status.Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
+			return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
 		}
-
-		// Set dc if it exists and matches the message compression type used,
-		// otherwise set comp if a registered compressor exists for it.
-		var comp encoding.Compressor
-		var dc Decompressor
-		if rc := stream.RecvCompress(); dopts.dc != nil && dopts.dc.Type() == rc {
-			dc = dopts.dc
-		} else if rc != "" && rc != encoding.Identity {
-			comp = encoding.GetCompressor(rc)
-		}
-		if err = recv(p, dopts.codec, stream, dc, reply, *c.maxReceiveMessageSize, inPayload, comp); err != nil {
+		if err = recv(p, dopts.codec, stream, dopts.dc, reply, *c.maxReceiveMessageSize, inPayload); err != nil {
 			if err == io.EOF {
 				break
 			}
@@ -99,33 +89,26 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor,
 		}
 	}()
 	var (
+		cbuf       *bytes.Buffer
 		outPayload *stats.OutPayload
 	)
+	if compressor != nil {
+		cbuf = new(bytes.Buffer)
+	}
 	if dopts.copts.StatsHandler != nil {
 		outPayload = &stats.OutPayload{
 			Client: true,
 		}
 	}
-	// Set comp and clear compressor if a registered compressor matches the type
-	// specified via UseCompressor.  (And error if a matching compressor is not
-	// registered.)
-	var comp encoding.Compressor
-	if ct := c.compressorType; ct != "" && ct != encoding.Identity {
-		compressor = nil // Disable the legacy compressor.
-		comp = encoding.GetCompressor(ct)
-		if comp == nil {
-			return status.Errorf(codes.Internal, "grpc: Compressor is not installed for grpc-encoding %q", ct)
-		}
-	}
-	hdr, data, err := encode(dopts.codec, args, compressor, outPayload, comp)
+	hdr, data, err := encode(dopts.codec, args, compressor, cbuf, outPayload)
 	if err != nil {
 		return err
 	}
 	if c.maxSendMessageSize == nil {
-		return status.Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
+		return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
 	}
 	if len(data) > *c.maxSendMessageSize {
-		return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), *c.maxSendMessageSize)
+		return Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), *c.maxSendMessageSize)
 	}
 	err = t.Write(stream, hdr, data, opts)
 	if err == nil && outPayload != nil {
@@ -142,23 +125,16 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor,
 	return nil
 }
 
-// Invoke sends the RPC request on the wire and returns after response is
-// received.  This is typically called by generated code.
-func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
+// Invoke sends the RPC request on the wire and returns after response is received.
+// Invoke is called by generated code. Also users can call Invoke directly when it
+// is really needed in their use cases.
+func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
 	if cc.dopts.unaryInt != nil {
 		return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
 	}
 	return invoke(ctx, method, args, reply, cc, opts...)
 }
 
-// Invoke sends the RPC request on the wire and returns after response is
-// received.  This is typically called by generated code.
-//
-// DEPRECATED: Use ClientConn.Invoke instead.
-func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
-	return cc.Invoke(ctx, method, args, reply, opts...)
-}
-
 func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) {
 	c := defaultCallInfo()
 	mc := cc.GetMethodConfig(method)
@@ -226,45 +202,57 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
 		Last:  true,
 		Delay: false,
 	}
-	callHdr := &transport.CallHdr{
-		Host:   cc.authority,
-		Method: method,
-	}
-	if c.creds != nil {
-		callHdr.Creds = c.creds
-	}
-	if c.compressorType != "" {
-		callHdr.SendCompress = c.compressorType
-	} else if cc.dopts.cp != nil {
-		callHdr.SendCompress = cc.dopts.cp.Type()
-	}
-	firstAttempt := true
-
 	for {
-		// Check to make sure the context has expired.  This will prevent us from
-		// looping forever if an error occurs for wait-for-ready RPCs where no data
-		// is sent on the wire.
-		select {
-		case <-ctx.Done():
-			return toRPCErr(ctx.Err())
-		default:
+		var (
+			err    error
+			t      transport.ClientTransport
+			stream *transport.Stream
+			// Record the done handler from Balancer.Get(...). It is called once the
+			// RPC has completed or failed.
+			done func(balancer.DoneInfo)
+		)
+		// TODO(zhaoq): Need a formal spec of fail-fast.
+		callHdr := &transport.CallHdr{
+			Host:   cc.authority,
+			Method: method,
+		}
+		if cc.dopts.cp != nil {
+			callHdr.SendCompress = cc.dopts.cp.Type()
+		}
+		if c.creds != nil {
+			callHdr.Creds = c.creds
 		}
 
-		// Record the done handler from Balancer.Get(...). It is called once the
-		// RPC has completed or failed.
-		t, done, err := cc.getTransport(ctx, c.failFast)
+		t, done, err = cc.getTransport(ctx, c.failFast)
 		if err != nil {
-			return err
+			// TODO(zhaoq): Probably revisit the error handling.
+			if _, ok := status.FromError(err); ok {
+				return err
+			}
+			if err == errConnClosing || err == errConnUnavailable {
+				if c.failFast {
+					return Errorf(codes.Unavailable, "%v", err)
+				}
+				continue
+			}
+			// All the other errors are treated as Internal errors.
+			return Errorf(codes.Internal, "%v", err)
 		}
-		stream, err := t.NewStream(ctx, callHdr)
+		if c.traceInfo.tr != nil {
+			c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
+		}
+		stream, err = t.NewStream(ctx, callHdr)
 		if err != nil {
 			if done != nil {
+				if _, ok := err.(transport.ConnectionError); ok {
+					// If error is connection error, transport was sending data on wire,
+					// and we are not sure if anything has been sent on wire.
+					// If error is not connection error, we are sure nothing has been sent.
+					updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
+				}
 				done(balancer.DoneInfo{Err: err})
 			}
-			// In the event of any error from NewStream, we never attempted to write
-			// anything to the wire, so we can retry indefinitely for non-fail-fast
-			// RPCs.
-			if !c.failFast {
+			if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
 				continue
 			}
 			return toRPCErr(err)
@@ -272,51 +260,34 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
 		if peer, ok := peer.FromContext(stream.Context()); ok {
 			c.peer = peer
 		}
-		if c.traceInfo.tr != nil {
-			c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
-		}
 		err = sendRequest(ctx, cc.dopts, cc.dopts.cp, c, callHdr, stream, t, args, topts)
 		if err != nil {
 			if done != nil {
-				done(balancer.DoneInfo{
-					Err:           err,
-					BytesSent:     true,
-					BytesReceived: stream.BytesReceived(),
+				updateRPCInfoInContext(ctx, rpcInfo{
+					bytesSent:     stream.BytesSent(),
+					bytesReceived: stream.BytesReceived(),
 				})
+				done(balancer.DoneInfo{Err: err})
 			}
 			// Retry a non-failfast RPC when
-			// i) the server started to drain before this RPC was initiated.
-			// ii) the server refused the stream.
-			if !c.failFast && stream.Unprocessed() {
-				// In this case, the server did not receive the data, but we still
-				// created wire traffic, so we should not retry indefinitely.
-				if firstAttempt {
-					// TODO: Add a field to header for grpc-transparent-retry-attempts
-					firstAttempt = false
-					continue
-				}
-				// Otherwise, give up and return an error anyway.
+			// i) there is a connection error; or
+			// ii) the server started to drain before this RPC was initiated.
+			if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
+				continue
 			}
 			return toRPCErr(err)
 		}
 		err = recvResponse(ctx, cc.dopts, t, c, stream, reply)
 		if err != nil {
 			if done != nil {
-				done(balancer.DoneInfo{
-					Err:           err,
-					BytesSent:     true,
-					BytesReceived: stream.BytesReceived(),
+				updateRPCInfoInContext(ctx, rpcInfo{
+					bytesSent:     stream.BytesSent(),
+					bytesReceived: stream.BytesReceived(),
 				})
+				done(balancer.DoneInfo{Err: err})
 			}
-			if !c.failFast && stream.Unprocessed() {
-				// In these cases, the server did not receive the data, but we still
-				// created wire traffic, so we should not retry indefinitely.
-				if firstAttempt {
-					// TODO: Add a field to header for grpc-transparent-retry-attempts
-					firstAttempt = false
-					continue
-				}
-				// Otherwise, give up and return an error anyway.
+			if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
+				continue
 			}
 			return toRPCErr(err)
 		}
@@ -324,23 +295,13 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
 			c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
 		}
 		t.CloseStream(stream, nil)
-		err = stream.Status().Err()
 		if done != nil {
-			done(balancer.DoneInfo{
-				Err:           err,
-				BytesSent:     true,
-				BytesReceived: stream.BytesReceived(),
+			updateRPCInfoInContext(ctx, rpcInfo{
+				bytesSent:     stream.BytesSent(),
+				bytesReceived: stream.BytesReceived(),
 			})
+			done(balancer.DoneInfo{Err: err})
 		}
-		if !c.failFast && stream.Unprocessed() {
-			// In these cases, the server did not receive the data, but we still
-			// created wire traffic, so we should not retry indefinitely.
-			if firstAttempt {
-				// TODO: Add a field to header for grpc-transparent-retry-attempts
-				firstAttempt = false
-				continue
-			}
-		}
-		return err
+		return stream.Status().Err()
 	}
 }
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index bfbef362..71de2e50 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -31,14 +31,11 @@ import (
 	"golang.org/x/net/context"
 	"golang.org/x/net/trace"
 	"google.golang.org/grpc/balancer"
-	_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
 	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/keepalive"
 	"google.golang.org/grpc/resolver"
-	_ "google.golang.org/grpc/resolver/dns"         // To register dns resolver.
-	_ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver.
 	"google.golang.org/grpc/stats"
 	"google.golang.org/grpc/transport"
 )
@@ -51,20 +48,7 @@ var (
 	// underlying connections within the specified timeout.
 	// DEPRECATED: Please use context.DeadlineExceeded instead.
 	ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
-	// errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs.
-	errConnDrain = errors.New("grpc: the connection is drained")
-	// errConnClosing indicates that the connection is closing.
-	errConnClosing = errors.New("grpc: the connection is closing")
-	// errConnUnavailable indicates that the connection is unavailable.
-	errConnUnavailable = errors.New("grpc: the connection is unavailable")
-	// errBalancerClosed indicates that the balancer is closed.
-	errBalancerClosed = errors.New("grpc: balancer is closed")
-	// minimum time to give a connection to complete
-	minConnectTimeout = 20 * time.Second
-)
 
-// The following errors are returned from Dial and DialContext
-var (
 	// errNoTransportSecurity indicates that there is no transport security
 	// being set for ClientConn. Users should either set one or explicitly
 	// call WithInsecure DialOption to disable security.
@@ -78,6 +62,16 @@ var (
 	errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)")
 	// errNetworkIO indicates that the connection is down due to some network I/O error.
 	errNetworkIO = errors.New("grpc: failed with network I/O error")
+	// errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs.
+	errConnDrain = errors.New("grpc: the connection is drained")
+	// errConnClosing indicates that the connection is closing.
+	errConnClosing = errors.New("grpc: the connection is closing")
+	// errConnUnavailable indicates that the connection is unavailable.
+	errConnUnavailable = errors.New("grpc: the connection is unavailable")
+	// errBalancerClosed indicates that the balancer is closed.
+	errBalancerClosed = errors.New("grpc: balancer is closed")
+	// minimum time to give a connection to complete
+	minConnectTimeout = 20 * time.Second
 )
 
 // dialOptions configure a Dial call. dialOptions are set by the DialOption
@@ -95,14 +89,8 @@ type dialOptions struct {
 	scChan      <-chan ServiceConfig
 	copts       transport.ConnectOptions
 	callOptions []CallOption
-	// This is used by v1 balancer dial option WithBalancer to support v1
-	// balancer, and also by WithBalancerName dial option.
+	// This is to support v1 balancer.
 	balancerBuilder balancer.Builder
-	// This is to support grpclb.
-	resolverBuilder resolver.Builder
-	// Custom user options for resolver.Build.
-	resolverBuildUserOptions interface{}
-	waitForHandshake         bool
 }
 
 const (
@@ -113,15 +101,6 @@ const (
 // DialOption configures how we set up the connection.
 type DialOption func(*dialOptions)
 
-// WithWaitForHandshake blocks until the initial settings frame is received from the
-// server before assigning RPCs to the connection.
-// Experimental API.
-func WithWaitForHandshake() DialOption {
-	return func(o *dialOptions) {
-		o.waitForHandshake = true
-	}
-}
-
 // WithWriteBufferSize lets you set the size of write buffer, this determines how much data can be batched
 // before doing a write on the wire.
 func WithWriteBufferSize(s int) DialOption {
@@ -173,26 +152,16 @@ func WithCodec(c Codec) DialOption {
 	}
 }
 
-// WithCompressor returns a DialOption which sets a Compressor to use for
-// message compression. It has lower priority than the compressor set by
-// the UseCompressor CallOption.
-//
-// Deprecated: use UseCompressor instead.
+// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message
+// compressor.
 func WithCompressor(cp Compressor) DialOption {
 	return func(o *dialOptions) {
 		o.cp = cp
 	}
 }
 
-// WithDecompressor returns a DialOption which sets a Decompressor to use for
-// incoming message decompression.  If incoming response messages are encoded
-// using the decompressor's Type(), it will be used.  Otherwise, the message
-// encoding will be used to look up the compressor registered via
-// encoding.RegisterCompressor, which will then be used to decompress the
-// message.  If no compressor is registered for the encoding, an Unimplemented
-// status error will be returned.
-//
-// Deprecated: use encoding.RegisterCompressor instead.
+// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating
+// message decompressor.
 func WithDecompressor(dc Decompressor) DialOption {
 	return func(o *dialOptions) {
 		o.dc = dc
@@ -201,8 +170,7 @@ func WithDecompressor(dc Decompressor) DialOption {
 
 // WithBalancer returns a DialOption which sets a load balancer with the v1 API.
 // Name resolver will be ignored if this DialOption is specified.
-//
-// Deprecated: use the new balancer APIs in balancer package and WithBalancerName.
+// Deprecated: use the new balancer APIs in balancer package instead.
 func WithBalancer(b Balancer) DialOption {
 	return func(o *dialOptions) {
 		o.balancerBuilder = &balancerWrapperBuilder{
@@ -211,42 +179,16 @@ func WithBalancer(b Balancer) DialOption {
 	}
 }
 
-// WithBalancerName sets the balancer that the ClientConn will be initialized
-// with. Balancer registered with balancerName will be used. This function
-// panics if no balancer was registered by balancerName.
-//
-// The balancer cannot be overridden by balancer option specified by service
-// config.
-//
-// This is an EXPERIMENTAL API.
-func WithBalancerName(balancerName string) DialOption {
-	builder := balancer.Get(balancerName)
-	if builder == nil {
-		panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName))
-	}
-	return func(o *dialOptions) {
-		o.balancerBuilder = builder
-	}
-}
-
-// withResolverBuilder is only for grpclb.
-func withResolverBuilder(b resolver.Builder) DialOption {
+// WithBalancerBuilder is for testing only. Users using custom balancers should
+// register their balancer and use service config to choose the balancer to use.
+func WithBalancerBuilder(b balancer.Builder) DialOption {
+	// TODO(bar) remove this when switching balancer is done.
 	return func(o *dialOptions) {
-		o.resolverBuilder = b
-	}
-}
-
-// WithResolverUserOptions returns a DialOption which sets the UserOptions
-// field of resolver's BuildOption.
-func WithResolverUserOptions(userOpt interface{}) DialOption {
-	return func(o *dialOptions) {
-		o.resolverBuildUserOptions = userOpt
+		o.balancerBuilder = b
 	}
 }
 
 // WithServiceConfig returns a DialOption which has a channel to read the service configuration.
-// DEPRECATED: service config should be received through name resolver, as specified here.
-// https://github.com/grpc/grpc/blob/master/doc/service_config.md
 func WithServiceConfig(c <-chan ServiceConfig) DialOption {
 	return func(o *dialOptions) {
 		o.scChan = c
@@ -271,7 +213,7 @@ func WithBackoffConfig(b BackoffConfig) DialOption {
 	return withBackoff(b)
 }
 
-// withBackoff sets the backoff strategy used for connectRetryNum after a
+// withBackoff sets the backoff strategy used for retries after a
 // failed connection attempt.
 //
 // This can be exported if arbitrary backoff strategies are allowed by gRPC.
@@ -323,23 +265,18 @@ func WithTimeout(d time.Duration) DialOption {
 	}
 }
 
-func withContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
-	return func(o *dialOptions) {
-		o.copts.Dialer = f
-	}
-}
-
 // WithDialer returns a DialOption that specifies a function to use for dialing network addresses.
 // If FailOnNonTempDialError() is set to true, and an error is returned by f, gRPC checks the error's
 // Temporary() method to decide if it should try to reconnect to the network address.
 func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
-	return withContextDialer(
-		func(ctx context.Context, addr string) (net.Conn, error) {
+	return func(o *dialOptions) {
+		o.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) {
 			if deadline, ok := ctx.Deadline(); ok {
 				return f(addr, deadline.Sub(time.Now()))
 			}
 			return f(addr, 0)
-		})
+		}
+	}
 }
 
 // WithStatsHandler returns a DialOption that specifies the stats handler
@@ -441,7 +378,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
 	if cc.dopts.copts.Dialer == nil {
 		cc.dopts.copts.Dialer = newProxyDialer(
 			func(ctx context.Context, addr string) (net.Conn, error) {
-				return dialContext(ctx, "tcp", addr)
+				return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
 			},
 		)
 	}
@@ -489,18 +426,51 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
 	if cc.dopts.bs == nil {
 		cc.dopts.bs = DefaultBackoffConfig
 	}
-	cc.parsedTarget = parseTarget(cc.target)
 	creds := cc.dopts.copts.TransportCredentials
 	if creds != nil && creds.Info().ServerName != "" {
 		cc.authority = creds.Info().ServerName
 	} else if cc.dopts.insecure && cc.dopts.copts.Authority != "" {
 		cc.authority = cc.dopts.copts.Authority
 	} else {
-		// Use endpoint from "scheme://authority/endpoint" as the default
-		// authority for ClientConn.
-		cc.authority = cc.parsedTarget.Endpoint
+		cc.authority = target
 	}
 
+	if cc.dopts.balancerBuilder != nil {
+		var credsClone credentials.TransportCredentials
+		if creds != nil {
+			credsClone = creds.Clone()
+		}
+		buildOpts := balancer.BuildOptions{
+			DialCreds: credsClone,
+			Dialer:    cc.dopts.copts.Dialer,
+		}
+		// Build should not take long time. So it's ok to not have a goroutine for it.
+		// TODO(bar) init balancer after first resolver result to support service config balancer.
+		cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, buildOpts)
+	} else {
+		waitC := make(chan error, 1)
+		go func() {
+			defer close(waitC)
+			// No balancer, or no resolver within the balancer.  Connect directly.
+			ac, err := cc.newAddrConn([]resolver.Address{{Addr: target}})
+			if err != nil {
+				waitC <- err
+				return
+			}
+			if err := ac.connect(cc.dopts.block); err != nil {
+				waitC <- err
+				return
+			}
+		}()
+		select {
+		case <-ctx.Done():
+			return nil, ctx.Err()
+		case err := <-waitC:
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
 	if cc.dopts.scChan != nil && !scSet {
 		// Blocking wait for the initial service config.
 		select {
@@ -516,28 +486,19 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
 		go cc.scWatcher()
 	}
 
-	var credsClone credentials.TransportCredentials
-	if creds := cc.dopts.copts.TransportCredentials; creds != nil {
-		credsClone = creds.Clone()
-	}
-	cc.balancerBuildOpts = balancer.BuildOptions{
-		DialCreds: credsClone,
-		Dialer:    cc.dopts.copts.Dialer,
-	}
-
 	// Build the resolver.
 	cc.resolverWrapper, err = newCCResolverWrapper(cc)
 	if err != nil {
 		return nil, fmt.Errorf("failed to build resolver: %v", err)
 	}
-	// Start the resolver wrapper goroutine after resolverWrapper is created.
-	//
-	// If the goroutine is started before resolverWrapper is ready, the
-	// following may happen: The goroutine sends updates to cc. cc forwards
-	// those to balancer. Balancer creates new addrConn. addrConn fails to
-	// connect, and calls resolveNow(). resolveNow() tries to use the non-ready
-	// resolverWrapper.
-	cc.resolverWrapper.start()
+
+	if cc.balancerWrapper != nil && cc.resolverWrapper == nil {
+		// TODO(bar) there should always be a resolver (DNS as the default).
+		// Unblock balancer initialization with a fake resolver update if there's no resolver.
+		// The balancer wrapper will not read the addresses, so an empty list works.
+		// TODO(bar) remove this after the real resolver is started.
+		cc.balancerWrapper.handleResolvedAddrs([]resolver.Address{}, nil)
+	}
 
 	// A blocking dial blocks until the clientConn is ready.
 	if cc.dopts.block {
@@ -604,26 +565,21 @@ type ClientConn struct {
 	ctx    context.Context
 	cancel context.CancelFunc
 
-	target       string
-	parsedTarget resolver.Target
-	authority    string
-	dopts        dialOptions
-	csMgr        *connectivityStateManager
+	target    string
+	authority string
+	dopts     dialOptions
+	csMgr     *connectivityStateManager
 
-	balancerBuildOpts balancer.BuildOptions
-	resolverWrapper   *ccResolverWrapper
-	blockingpicker    *pickerWrapper
+	balancerWrapper *ccBalancerWrapper
+	resolverWrapper *ccResolverWrapper
+
+	blockingpicker *pickerWrapper
 
 	mu    sync.RWMutex
 	sc    ServiceConfig
-	scRaw string
 	conns map[*addrConn]struct{}
 	// Keepalive parameter can be updated if a GoAway is received.
-	mkp             keepalive.ClientParameters
-	curBalancerName string
-	preBalancerName string // previous balancer name.
-	curAddresses    []resolver.Address
-	balancerWrapper *ccBalancerWrapper
+	mkp keepalive.ClientParameters
 }
 
 // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
@@ -659,7 +615,6 @@ func (cc *ClientConn) scWatcher() {
 			// TODO: load balance policy runtime change is ignored.
 			// We may revist this decision in the future.
 			cc.sc = sc
-			cc.scRaw = ""
 			cc.mu.Unlock()
 		case <-cc.ctx.Done():
 			return
@@ -667,113 +622,7 @@ func (cc *ClientConn) scWatcher() {
 	}
 }
 
-func (cc *ClientConn) handleResolvedAddrs(addrs []resolver.Address, err error) {
-	cc.mu.Lock()
-	defer cc.mu.Unlock()
-	if cc.conns == nil {
-		// cc was closed.
-		return
-	}
-
-	if reflect.DeepEqual(cc.curAddresses, addrs) {
-		return
-	}
-
-	cc.curAddresses = addrs
-
-	if cc.dopts.balancerBuilder == nil {
-		// Only look at balancer types and switch balancer if balancer dial
-		// option is not set.
-		var isGRPCLB bool
-		for _, a := range addrs {
-			if a.Type == resolver.GRPCLB {
-				isGRPCLB = true
-				break
-			}
-		}
-		var newBalancerName string
-		if isGRPCLB {
-			newBalancerName = grpclbName
-		} else {
-			// Address list doesn't contain grpclb address. Try to pick a
-			// non-grpclb balancer.
-			newBalancerName = cc.curBalancerName
-			// If current balancer is grpclb, switch to the previous one.
-			if newBalancerName == grpclbName {
-				newBalancerName = cc.preBalancerName
-			}
-			// The following could be true in two cases:
-			// - the first time handling resolved addresses
-			//   (curBalancerName="")
-			// - the first time handling non-grpclb addresses
-			//   (curBalancerName="grpclb", preBalancerName="")
-			if newBalancerName == "" {
-				newBalancerName = PickFirstBalancerName
-			}
-		}
-		cc.switchBalancer(newBalancerName)
-	} else if cc.balancerWrapper == nil {
-		// Balancer dial option was set, and this is the first time handling
-		// resolved addresses. Build a balancer with dopts.balancerBuilder.
-		cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
-	}
-
-	cc.balancerWrapper.handleResolvedAddrs(addrs, nil)
-}
-
-// switchBalancer starts the switching from current balancer to the balancer
-// with the given name.
-//
-// It will NOT send the current address list to the new balancer. If needed,
-// caller of this function should send address list to the new balancer after
-// this function returns.
-//
-// Caller must hold cc.mu.
-func (cc *ClientConn) switchBalancer(name string) {
-	if cc.conns == nil {
-		return
-	}
-
-	if strings.ToLower(cc.curBalancerName) == strings.ToLower(name) {
-		return
-	}
-
-	grpclog.Infof("ClientConn switching balancer to %q", name)
-	if cc.dopts.balancerBuilder != nil {
-		grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead")
-		return
-	}
-	// TODO(bar switching) change this to two steps: drain and close.
-	// Keep track of sc in wrapper.
-	if cc.balancerWrapper != nil {
-		cc.balancerWrapper.close()
-	}
-
-	builder := balancer.Get(name)
-	if builder == nil {
-		grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name)
-		builder = newPickfirstBuilder()
-	}
-	cc.preBalancerName = cc.curBalancerName
-	cc.curBalancerName = builder.Name()
-	cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
-}
-
-func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
-	cc.mu.Lock()
-	if cc.conns == nil {
-		cc.mu.Unlock()
-		return
-	}
-	// TODO(bar switching) send updates to all balancer wrappers when balancer
-	// gracefully switching is supported.
-	cc.balancerWrapper.handleSubConnStateChange(sc, s)
-	cc.mu.Unlock()
-}
-
 // newAddrConn creates an addrConn for addrs and adds it to cc.conns.
-//
-// Caller needs to make sure len(addrs) > 0.
 func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) {
 	ac := &addrConn{
 		cc:    cc,
@@ -810,7 +659,7 @@ func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) {
 // It does nothing if the ac is not IDLE.
 // TODO(bar) Move this to the addrConn section.
 // This was part of resetAddrConn, keep it here to make the diff look clean.
-func (ac *addrConn) connect() error {
+func (ac *addrConn) connect(block bool) error {
 	ac.mu.Lock()
 	if ac.state == connectivity.Shutdown {
 		ac.mu.Unlock()
@@ -821,21 +670,39 @@ func (ac *addrConn) connect() error {
 		return nil
 	}
 	ac.state = connectivity.Connecting
-	ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
+	if ac.cc.balancerWrapper != nil {
+		ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state)
+	} else {
+		ac.cc.csMgr.updateState(ac.state)
+	}
 	ac.mu.Unlock()
 
-	// Start a goroutine connecting to the server asynchronously.
-	go func() {
+	if block {
 		if err := ac.resetTransport(); err != nil {
-			grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err)
 			if err != errConnClosing {
-				// Keep this ac in cc.conns, to get the reason it's torn down.
 				ac.tearDown(err)
 			}
-			return
+			if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() {
+				return e.Origin()
+			}
+			return err
 		}
-		ac.transportMonitor()
-	}()
+		// Start to monitor the error status of transport.
+		go ac.transportMonitor()
+	} else {
+		// Start a goroutine connecting to the server asynchronously.
+		go func() {
+			if err := ac.resetTransport(); err != nil {
+				grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err)
+				if err != errConnClosing {
+					// Keep this ac in cc.conns, to get the reason it's torn down.
+					ac.tearDown(err)
+				}
+				return
+			}
+			ac.transportMonitor()
+		}()
+	}
 	return nil
 }
 
@@ -864,7 +731,6 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
 	grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
 	if curAddrFound {
 		ac.addrs = addrs
-		ac.reconnectIdx = 0 // Start reconnecting from beginning in the new list.
 	}
 
 	return curAddrFound
@@ -890,6 +756,31 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
 }
 
 func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transport.ClientTransport, func(balancer.DoneInfo), error) {
+	if cc.balancerWrapper == nil {
+		// If balancer is nil, there should be only one addrConn available.
+		cc.mu.RLock()
+		if cc.conns == nil {
+			cc.mu.RUnlock()
+			// TODO this function returns toRPCErr and non-toRPCErr. Clean up
+			// the errors in ClientConn.
+			return nil, nil, toRPCErr(ErrClientConnClosing)
+		}
+		var ac *addrConn
+		for ac = range cc.conns {
+			// Break after the first iteration to get the first addrConn.
+			break
+		}
+		cc.mu.RUnlock()
+		if ac == nil {
+			return nil, nil, errConnClosing
+		}
+		t, err := ac.wait(ctx, false /*hasBalancer*/, failfast)
+		if err != nil {
+			return nil, nil, err
+		}
+		return t, nil, nil
+	}
+
 	t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{})
 	if err != nil {
 		return nil, nil, toRPCErr(err)
@@ -897,43 +788,6 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transpor
 	return t, done, nil
 }
 
-// handleServiceConfig parses the service config string in JSON format to Go native
-// struct ServiceConfig, and store both the struct and the JSON string in ClientConn.
-func (cc *ClientConn) handleServiceConfig(js string) error {
-	sc, err := parseServiceConfig(js)
-	if err != nil {
-		return err
-	}
-	cc.mu.Lock()
-	cc.scRaw = js
-	cc.sc = sc
-	if sc.LB != nil && *sc.LB != grpclbName { // "grpclb" is not a valid balancer option in service config.
-		if cc.curBalancerName == grpclbName {
-			// If current balancer is grpclb, there's at least one grpclb
-			// balancer address in the resolved list. Don't switch the balancer,
-			// but change the previous balancer name, so if a new resolved
-			// address list doesn't contain grpclb address, balancer will be
-			// switched to *sc.LB.
-			cc.preBalancerName = *sc.LB
-		} else {
-			cc.switchBalancer(*sc.LB)
-			cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil)
-		}
-	}
-	cc.mu.Unlock()
-	return nil
-}
-
-func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
-	cc.mu.Lock()
-	r := cc.resolverWrapper
-	cc.mu.Unlock()
-	if r == nil {
-		return
-	}
-	go r.resolveNow(o)
-}
-
 // Close tears down the ClientConn and all underlying connections.
 func (cc *ClientConn) Close() error {
 	cc.cancel()
@@ -946,18 +800,13 @@ func (cc *ClientConn) Close() error {
 	conns := cc.conns
 	cc.conns = nil
 	cc.csMgr.updateState(connectivity.Shutdown)
-
-	rWrapper := cc.resolverWrapper
-	cc.resolverWrapper = nil
-	bWrapper := cc.balancerWrapper
-	cc.balancerWrapper = nil
 	cc.mu.Unlock()
 	cc.blockingpicker.close()
-	if rWrapper != nil {
-		rWrapper.close()
+	if cc.resolverWrapper != nil {
+		cc.resolverWrapper.close()
 	}
-	if bWrapper != nil {
-		bWrapper.close()
+	if cc.balancerWrapper != nil {
+		cc.balancerWrapper.close()
 	}
 	for ac := range conns {
 		ac.tearDown(ErrClientConnClosing)
@@ -970,16 +819,15 @@ type addrConn struct {
 	ctx    context.Context
 	cancel context.CancelFunc
 
-	cc     *ClientConn
-	addrs  []resolver.Address
-	dopts  dialOptions
-	events trace.EventLog
-	acbw   balancer.SubConn
+	cc      *ClientConn
+	curAddr resolver.Address
+	addrs   []resolver.Address
+	dopts   dialOptions
+	events  trace.EventLog
+	acbw    balancer.SubConn
 
-	mu           sync.Mutex
-	curAddr      resolver.Address
-	reconnectIdx int // The index in addrs list to start reconnecting from.
-	state        connectivity.State
+	mu    sync.Mutex
+	state connectivity.State
 	// ready is closed and becomes nil when a new transport is up or failed
 	// due to timeout.
 	ready     chan struct{}
@@ -987,21 +835,13 @@ type addrConn struct {
 
 	// The reason this addrConn is torn down.
 	tearDownErr error
-
-	connectRetryNum int
-	// backoffDeadline is the time until which resetTransport needs to
-	// wait before increasing connectRetryNum count.
-	backoffDeadline time.Time
-	// connectDeadline is the time by which all connection
-	// negotiations must complete.
-	connectDeadline time.Time
 }
 
 // adjustParams updates parameters used to create transports upon
 // receiving a GoAway.
 func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
 	switch r {
-	case transport.GoAwayTooManyPings:
+	case transport.TooManyPings:
 		v := 2 * ac.dopts.copts.KeepaliveParams.Time
 		ac.cc.mu.Lock()
 		if v > ac.cc.mkp.Time {
@@ -1029,15 +869,6 @@ func (ac *addrConn) errorf(format string, a ...interface{}) {
 
 // resetTransport recreates a transport to the address for ac.  The old
 // transport will close itself on error or when the clientconn is closed.
-// The created transport must receive initial settings frame from the server.
-// In case that doesnt happen, transportMonitor will kill the newly created
-// transport after connectDeadline has expired.
-// In case there was an error on the transport before the settings frame was
-// received, resetTransport resumes connecting to backends after the one that
-// was previously connected to. In case end of the list is reached, resetTransport
-// backs off until the original deadline.
-// If the DialOption WithWaitForHandshake was set, resetTrasport returns
-// successfully only after server settings are received.
 //
 // TODO(bar) make sure all state transitions are valid.
 func (ac *addrConn) resetTransport() error {
@@ -1051,38 +882,19 @@ func (ac *addrConn) resetTransport() error {
 		ac.ready = nil
 	}
 	ac.transport = nil
-	ridx := ac.reconnectIdx
+	ac.curAddr = resolver.Address{}
 	ac.mu.Unlock()
 	ac.cc.mu.RLock()
 	ac.dopts.copts.KeepaliveParams = ac.cc.mkp
 	ac.cc.mu.RUnlock()
-	var backoffDeadline, connectDeadline time.Time
-	for connectRetryNum := 0; ; connectRetryNum++ {
+	for retries := 0; ; retries++ {
+		sleepTime := ac.dopts.bs.backoff(retries)
+		timeout := minConnectTimeout
 		ac.mu.Lock()
-		if ac.backoffDeadline.IsZero() {
-			// This means either a successful HTTP2 connection was established
-			// or this is the first time this addrConn is trying to establish a
-			// connection.
-			backoffFor := ac.dopts.bs.backoff(connectRetryNum) // time.Duration.
-			// This will be the duration that dial gets to finish.
-			dialDuration := minConnectTimeout
-			if backoffFor > dialDuration {
-				// Give dial more time as we keep failing to connect.
-				dialDuration = backoffFor
-			}
-			start := time.Now()
-			backoffDeadline = start.Add(backoffFor)
-			connectDeadline = start.Add(dialDuration)
-			ridx = 0 // Start connecting from the beginning.
-		} else {
-			// Continue trying to conect with the same deadlines.
-			connectRetryNum = ac.connectRetryNum
-			backoffDeadline = ac.backoffDeadline
-			connectDeadline = ac.connectDeadline
-			ac.backoffDeadline = time.Time{}
-			ac.connectDeadline = time.Time{}
-			ac.connectRetryNum = 0
+		if timeout < time.Duration(int(sleepTime)/len(ac.addrs)) {
+			timeout = time.Duration(int(sleepTime) / len(ac.addrs))
 		}
+		connectTime := time.Now()
 		if ac.state == connectivity.Shutdown {
 			ac.mu.Unlock()
 			return errConnClosing
@@ -1090,166 +902,116 @@ func (ac *addrConn) resetTransport() error {
 		ac.printf("connecting")
 		if ac.state != connectivity.Connecting {
 			ac.state = connectivity.Connecting
-			ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
+			// TODO(bar) remove condition once we always have a balancer.
+			if ac.cc.balancerWrapper != nil {
+				ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state)
+			} else {
+				ac.cc.csMgr.updateState(ac.state)
+			}
 		}
 		// copy ac.addrs in case of race
 		addrsIter := make([]resolver.Address, len(ac.addrs))
 		copy(addrsIter, ac.addrs)
 		copts := ac.dopts.copts
 		ac.mu.Unlock()
-		connected, err := ac.createTransport(connectRetryNum, ridx, backoffDeadline, connectDeadline, addrsIter, copts)
-		if err != nil {
-			return err
-		}
-		if connected {
-			return nil
-		}
-	}
-}
-
-// createTransport creates a connection to one of the backends in addrs.
-// It returns true if a connection was established.
-func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline, connectDeadline time.Time, addrs []resolver.Address, copts transport.ConnectOptions) (bool, error) {
-	for i := ridx; i < len(addrs); i++ {
-		addr := addrs[i]
-		target := transport.TargetInfo{
-			Addr:      addr.Addr,
-			Metadata:  addr.Metadata,
-			Authority: ac.cc.authority,
-		}
-		done := make(chan struct{})
-		onPrefaceReceipt := func() {
+		for _, addr := range addrsIter {
 			ac.mu.Lock()
-			close(done)
-			if !ac.backoffDeadline.IsZero() {
-				// If we haven't already started reconnecting to
-				// other backends.
-				// Note, this can happen when writer notices an error
-				// and triggers resetTransport while at the same time
-				// reader receives the preface and invokes this closure.
-				ac.backoffDeadline = time.Time{}
-				ac.connectDeadline = time.Time{}
-				ac.connectRetryNum = 0
+			if ac.state == connectivity.Shutdown {
+				// ac.tearDown(...) has been invoked.
+				ac.mu.Unlock()
+				return errConnClosing
 			}
 			ac.mu.Unlock()
-		}
-		// Do not cancel in the success path because of
-		// this issue in Go1.6: https://github.com/golang/go/issues/15078.
-		connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
-		newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt)
-		if err != nil {
-			cancel()
-			if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() {
+			sinfo := transport.TargetInfo{
+				Addr:     addr.Addr,
+				Metadata: addr.Metadata,
+			}
+			newTransport, err := transport.NewClientTransport(ac.cc.ctx, sinfo, copts, timeout)
+			if err != nil {
+				if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() {
+					ac.mu.Lock()
+					if ac.state != connectivity.Shutdown {
+						ac.state = connectivity.TransientFailure
+						if ac.cc.balancerWrapper != nil {
+							ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state)
+						} else {
+							ac.cc.csMgr.updateState(ac.state)
+						}
+					}
+					ac.mu.Unlock()
+					return err
+				}
+				grpclog.Warningf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, addr)
 				ac.mu.Lock()
-				if ac.state != connectivity.Shutdown {
-					ac.state = connectivity.TransientFailure
-					ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
+				if ac.state == connectivity.Shutdown {
+					// ac.tearDown(...) has been invoked.
+					ac.mu.Unlock()
+					return errConnClosing
 				}
 				ac.mu.Unlock()
-				return false, err
+				continue
 			}
 			ac.mu.Lock()
+			ac.printf("ready")
 			if ac.state == connectivity.Shutdown {
 				// ac.tearDown(...) has been invoked.
 				ac.mu.Unlock()
-				return false, errConnClosing
+				newTransport.Close()
+				return errConnClosing
 			}
-			ac.mu.Unlock()
-			grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err)
-			continue
-		}
-		if ac.dopts.waitForHandshake {
-			select {
-			case <-done:
-			case <-connectCtx.Done():
-				// Didn't receive server preface, must kill this new transport now.
-				grpclog.Warningf("grpc: addrConn.createTransport failed to receive server preface before deadline.")
-				newTr.Close()
-				break
-			case <-ac.ctx.Done():
+			ac.state = connectivity.Ready
+			if ac.cc.balancerWrapper != nil {
+				ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state)
+			} else {
+				ac.cc.csMgr.updateState(ac.state)
+			}
+			t := ac.transport
+			ac.transport = newTransport
+			if t != nil {
+				t.Close()
+			}
+			ac.curAddr = addr
+			if ac.ready != nil {
+				close(ac.ready)
+				ac.ready = nil
 			}
+			ac.mu.Unlock()
+			return nil
 		}
 		ac.mu.Lock()
-		if ac.state == connectivity.Shutdown {
-			ac.mu.Unlock()
-			// ac.tearDonn(...) has been invoked.
-			newTr.Close()
-			return false, errConnClosing
+		ac.state = connectivity.TransientFailure
+		if ac.cc.balancerWrapper != nil {
+			ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state)
+		} else {
+			ac.cc.csMgr.updateState(ac.state)
 		}
-		ac.printf("ready")
-		ac.state = connectivity.Ready
-		ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
-		ac.transport = newTr
-		ac.curAddr = addr
 		if ac.ready != nil {
 			close(ac.ready)
 			ac.ready = nil
 		}
+		ac.mu.Unlock()
+		timer := time.NewTimer(sleepTime - time.Since(connectTime))
 		select {
-		case <-done:
-			// If the server has responded back with preface already,
-			// don't set the reconnect parameters.
-		default:
-			ac.connectRetryNum = connectRetryNum
-			ac.backoffDeadline = backoffDeadline
-			ac.connectDeadline = connectDeadline
-			ac.reconnectIdx = i + 1 // Start reconnecting from the next backend in the list.
+		case <-timer.C:
+		case <-ac.ctx.Done():
+			timer.Stop()
+			return ac.ctx.Err()
 		}
-		ac.mu.Unlock()
-		return true, nil
-	}
-	ac.mu.Lock()
-	ac.state = connectivity.TransientFailure
-	ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
-	ac.cc.resolveNow(resolver.ResolveNowOption{})
-	if ac.ready != nil {
-		close(ac.ready)
-		ac.ready = nil
-	}
-	ac.mu.Unlock()
-	timer := time.NewTimer(backoffDeadline.Sub(time.Now()))
-	select {
-	case <-timer.C:
-	case <-ac.ctx.Done():
 		timer.Stop()
-		return false, ac.ctx.Err()
 	}
-	return false, nil
 }
 
 // Run in a goroutine to track the error in transport and create the
 // new transport if an error happens. It returns when the channel is closing.
 func (ac *addrConn) transportMonitor() {
 	for {
-		var timer *time.Timer
-		var cdeadline <-chan time.Time
 		ac.mu.Lock()
 		t := ac.transport
-		if !ac.connectDeadline.IsZero() {
-			timer = time.NewTimer(ac.connectDeadline.Sub(time.Now()))
-			cdeadline = timer.C
-		}
 		ac.mu.Unlock()
 		// Block until we receive a goaway or an error occurs.
 		select {
 		case <-t.GoAway():
 		case <-t.Error():
-		case <-cdeadline:
-			ac.mu.Lock()
-			// This implies that client received server preface.
-			if ac.backoffDeadline.IsZero() {
-				ac.mu.Unlock()
-				continue
-			}
-			ac.mu.Unlock()
-			timer = nil
-			// No server preface received until deadline.
-			// Kill the connection.
-			grpclog.Warningf("grpc: addrConn.transportMonitor didn't get server preface after waiting. Closing the new transport now.")
-			t.Close()
-		}
-		if timer != nil {
-			timer.Stop()
 		}
 		// If a GoAway happened, regardless of error, adjust our keepalive
 		// parameters as appropriate.
@@ -1266,8 +1028,11 @@ func (ac *addrConn) transportMonitor() {
 		// Set connectivity state to TransientFailure before calling
 		// resetTransport. Transition READY->CONNECTING is not valid.
 		ac.state = connectivity.TransientFailure
-		ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
-		ac.cc.resolveNow(resolver.ResolveNowOption{})
+		if ac.cc.balancerWrapper != nil {
+			ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state)
+		} else {
+			ac.cc.csMgr.updateState(ac.state)
+		}
 		ac.curAddr = resolver.Address{}
 		ac.mu.Unlock()
 		if err := ac.resetTransport(); err != nil {
@@ -1341,7 +1106,7 @@ func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) {
 	ac.mu.Unlock()
 	// Trigger idle ac to connect.
 	if idle {
-		ac.connect()
+		ac.connect(false)
 	}
 	return nil, false
 }
@@ -1354,11 +1119,8 @@ func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) {
 func (ac *addrConn) tearDown(err error) {
 	ac.cancel()
 	ac.mu.Lock()
-	defer ac.mu.Unlock()
-	if ac.state == connectivity.Shutdown {
-		return
-	}
 	ac.curAddr = resolver.Address{}
+	defer ac.mu.Unlock()
 	if err == errConnDrain && ac.transport != nil {
 		// GracefulClose(...) may be executed multiple times when
 		// i) receiving multiple GoAway frames from the server; or
@@ -1366,9 +1128,16 @@ func (ac *addrConn) tearDown(err error) {
 		// address removal and GoAway.
 		ac.transport.GracefulClose()
 	}
+	if ac.state == connectivity.Shutdown {
+		return
+	}
 	ac.state = connectivity.Shutdown
 	ac.tearDownErr = err
-	ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
+	if ac.cc.balancerWrapper != nil {
+		ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state)
+	} else {
+		ac.cc.csMgr.updateState(ac.state)
+	}
 	if ac.events != nil {
 		ac.events.Finish()
 		ac.events = nil
diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go
index 43d81ed2..905b048e 100644
--- a/vendor/google.golang.org/grpc/codec.go
+++ b/vendor/google.golang.org/grpc/codec.go
@@ -69,11 +69,6 @@ func (p protoCodec) marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error
 }
 
 func (p protoCodec) Marshal(v interface{}) ([]byte, error) {
-	if pm, ok := v.(proto.Marshaler); ok {
-		// object can marshal itself, no need for buffer
-		return pm.Marshal()
-	}
-
 	cb := protoBufferPool.Get().(*cachedProtoBuffer)
 	out, err := p.marshal(v, cb)
 
@@ -84,17 +79,10 @@ func (p protoCodec) Marshal(v interface{}) ([]byte, error) {
 }
 
 func (p protoCodec) Unmarshal(data []byte, v interface{}) error {
-	protoMsg := v.(proto.Message)
-	protoMsg.Reset()
-
-	if pu, ok := protoMsg.(proto.Unmarshaler); ok {
-		// object can unmarshal itself, no need for buffer
-		return pu.Unmarshal(data)
-	}
-
 	cb := protoBufferPool.Get().(*cachedProtoBuffer)
 	cb.SetBuf(data)
-	err := cb.Unmarshal(protoMsg)
+	v.(proto.Message).Reset()
+	err := cb.Unmarshal(v.(proto.Message))
 	cb.SetBuf(nil)
 	protoBufferPool.Put(cb)
 	return err
@@ -104,11 +92,13 @@ func (protoCodec) String() string {
 	return "proto"
 }
 
-var protoBufferPool = &sync.Pool{
-	New: func() interface{} {
-		return &cachedProtoBuffer{
-			Buffer:            proto.Buffer{},
-			lastMarshaledSize: 16,
-		}
-	},
-}
+var (
+	protoBufferPool = &sync.Pool{
+		New: func() interface{} {
+			return &cachedProtoBuffer{
+				Buffer:            proto.Buffer{},
+				lastMarshaledSize: 16,
+			}
+		},
+	}
+)
diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go
index 0b206a57..25983706 100644
--- a/vendor/google.golang.org/grpc/codes/code_string.go
+++ b/vendor/google.golang.org/grpc/codes/code_string.go
@@ -1,62 +1,16 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
+// Code generated by "stringer -type=Code"; DO NOT EDIT.
 
 package codes
 
-import "strconv"
+import "fmt"
 
-func (c Code) String() string {
-	switch c {
-	case OK:
-		return "OK"
-	case Canceled:
-		return "Canceled"
-	case Unknown:
-		return "Unknown"
-	case InvalidArgument:
-		return "InvalidArgument"
-	case DeadlineExceeded:
-		return "DeadlineExceeded"
-	case NotFound:
-		return "NotFound"
-	case AlreadyExists:
-		return "AlreadyExists"
-	case PermissionDenied:
-		return "PermissionDenied"
-	case ResourceExhausted:
-		return "ResourceExhausted"
-	case FailedPrecondition:
-		return "FailedPrecondition"
-	case Aborted:
-		return "Aborted"
-	case OutOfRange:
-		return "OutOfRange"
-	case Unimplemented:
-		return "Unimplemented"
-	case Internal:
-		return "Internal"
-	case Unavailable:
-		return "Unavailable"
-	case DataLoss:
-		return "DataLoss"
-	case Unauthenticated:
-		return "Unauthenticated"
-	default:
-		return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
+const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated"
+
+var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192}
+
+func (i Code) String() string {
+	if i >= Code(len(_Code_index)-1) {
+		return fmt.Sprintf("Code(%d)", i)
 	}
+	return _Code_name[_Code_index[i]:_Code_index[i+1]]
 }
diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go
index f3719d56..21e7733a 100644
--- a/vendor/google.golang.org/grpc/codes/codes.go
+++ b/vendor/google.golang.org/grpc/codes/codes.go
@@ -19,13 +19,12 @@
 // Package codes defines the canonical error codes used by gRPC. It is
 // consistent across various languages.
 package codes // import "google.golang.org/grpc/codes"
-import (
-	"fmt"
-)
 
 // A Code is an unsigned 32-bit error code as defined in the gRPC spec.
 type Code uint32
 
+//go:generate stringer -type=Code
+
 const (
 	// OK is returned on success.
 	OK Code = 0
@@ -143,41 +142,3 @@ const (
 	// DataLoss indicates unrecoverable data loss or corruption.
 	DataLoss Code = 15
 )
-
-var strToCode = map[string]Code{
-	`"OK"`: OK,
-	`"CANCELLED"`:/* [sic] */ Canceled,
-	`"UNKNOWN"`:             Unknown,
-	`"INVALID_ARGUMENT"`:    InvalidArgument,
-	`"DEADLINE_EXCEEDED"`:   DeadlineExceeded,
-	`"NOT_FOUND"`:           NotFound,
-	`"ALREADY_EXISTS"`:      AlreadyExists,
-	`"PERMISSION_DENIED"`:   PermissionDenied,
-	`"RESOURCE_EXHAUSTED"`:  ResourceExhausted,
-	`"FAILED_PRECONDITION"`: FailedPrecondition,
-	`"ABORTED"`:             Aborted,
-	`"OUT_OF_RANGE"`:        OutOfRange,
-	`"UNIMPLEMENTED"`:       Unimplemented,
-	`"INTERNAL"`:            Internal,
-	`"UNAVAILABLE"`:         Unavailable,
-	`"DATA_LOSS"`:           DataLoss,
-	`"UNAUTHENTICATED"`:     Unauthenticated,
-}
-
-// UnmarshalJSON unmarshals b into the Code.
-func (c *Code) UnmarshalJSON(b []byte) error {
-	// From json.Unmarshaler: By convention, to approximate the behavior of
-	// Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
-	// a no-op.
-	if string(b) == "null" {
-		return nil
-	}
-	if c == nil {
-		return fmt.Errorf("nil receiver passed to UnmarshalJSON")
-	}
-	if jc, ok := strToCode[string(b)]; ok {
-		*c = jc
-		return nil
-	}
-	return fmt.Errorf("invalid code: %q", string(b))
-}
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
index 1d2e864f..946aa1f2 100644
--- a/vendor/google.golang.org/grpc/credentials/credentials.go
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -34,8 +34,10 @@ import (
 	"golang.org/x/net/context"
 )
 
-// alpnProtoStr are the specified application level protocols for gRPC.
-var alpnProtoStr = []string{"h2"}
+var (
+	// alpnProtoStr are the specified application level protocols for gRPC.
+	alpnProtoStr = []string{"h2"}
+)
 
 // PerRPCCredentials defines the common interface for the credentials which need to
 // attach security information to every RPC (e.g., oauth2).
@@ -72,9 +74,11 @@ type AuthInfo interface {
 	AuthType() string
 }
 
-// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
-// and the caller should not close rawConn.
-var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
+var (
+	// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
+	// and the caller should not close rawConn.
+	ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
+)
 
 // TransportCredentials defines the common interface for all the live gRPC wire
 // protocols and supported transport security protocols (e.g., TLS, SSL).
@@ -131,15 +135,15 @@ func (c tlsCreds) Info() ProtocolInfo {
 	}
 }
 
-func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
+func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
 	// use local cfg to avoid clobbering ServerName if using multiple endpoints
 	cfg := cloneTLSConfig(c.config)
 	if cfg.ServerName == "" {
-		colonPos := strings.LastIndex(authority, ":")
+		colonPos := strings.LastIndex(addr, ":")
 		if colonPos == -1 {
-			colonPos = len(authority)
+			colonPos = len(addr)
 		}
-		cfg.ServerName = authority[:colonPos]
+		cfg.ServerName = addr[:colonPos]
 	}
 	conn := tls.Client(rawConn, cfg)
 	errChannel := make(chan error, 1)
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
deleted file mode 100644
index 47d10b07..00000000
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package encoding defines the interface for the compressor and the functions
-// to register and get the compossor.
-// This package is EXPERIMENTAL.
-package encoding
-
-import (
-	"io"
-)
-
-var registerCompressor = make(map[string]Compressor)
-
-// Compressor is used for compressing and decompressing when sending or receiving messages.
-type Compressor interface {
-	// Compress writes the data written to wc to w after compressing it.  If an error
-	// occurs while initializing the compressor, that error is returned instead.
-	Compress(w io.Writer) (io.WriteCloser, error)
-	// Decompress reads data from r, decompresses it, and provides the uncompressed data
-	// via the returned io.Reader.  If an error occurs while initializing the decompressor, that error
-	// is returned instead.
-	Decompress(r io.Reader) (io.Reader, error)
-	// Name is the name of the compression codec and is used to set the content coding header.
-	Name() string
-}
-
-// RegisterCompressor registers the compressor with gRPC by its name.  It can be activated when
-// sending an RPC via grpc.UseCompressor().  It will be automatically accessed when receiving a
-// message based on the content coding header.  Servers also use it to send a response with the
-// same encoding as the request.
-//
-// NOTE: this function must only be called during initialization time (i.e. in an init() function).  If
-// multiple Compressors are registered with the same name, the one registered last will take effect.
-func RegisterCompressor(c Compressor) {
-	registerCompressor[c.Name()] = c
-}
-
-// GetCompressor returns Compressor for the given compressor name.
-func GetCompressor(name string) Compressor {
-	return registerCompressor[name]
-}
-
-// Identity specifies the optional encoding for uncompressed streams.
-// It is intended for grpc internal use only.
-const Identity = "identity"
diff --git a/vendor/google.golang.org/grpc/go16.go b/vendor/google.golang.org/grpc/go16.go
deleted file mode 100644
index f3dbf217..00000000
--- a/vendor/google.golang.org/grpc/go16.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// +build go1.6,!go1.7
-
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
-	"fmt"
-	"io"
-	"net"
-	"net/http"
-	"os"
-
-	"golang.org/x/net/context"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/status"
-	"google.golang.org/grpc/transport"
-)
-
-// dialContext connects to the address on the named network.
-func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
-	return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
-}
-
-func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
-	req.Cancel = ctx.Done()
-	if err := req.Write(conn); err != nil {
-		return fmt.Errorf("failed to write the HTTP request: %v", err)
-	}
-	return nil
-}
-
-// toRPCErr converts an error into an error from the status package.
-func toRPCErr(err error) error {
-	if _, ok := status.FromError(err); ok {
-		return err
-	}
-	switch e := err.(type) {
-	case transport.StreamError:
-		return status.Error(e.Code, e.Desc)
-	case transport.ConnectionError:
-		return status.Error(codes.Unavailable, e.Desc)
-	default:
-		switch err {
-		case context.DeadlineExceeded:
-			return status.Error(codes.DeadlineExceeded, err.Error())
-		case context.Canceled:
-			return status.Error(codes.Canceled, err.Error())
-		case ErrClientConnClosing:
-			return status.Error(codes.FailedPrecondition, err.Error())
-		}
-	}
-	return status.Error(codes.Unknown, err.Error())
-}
-
-// convertCode converts a standard Go error into its canonical code. Note that
-// this is only used to translate the error returned by the server applications.
-func convertCode(err error) codes.Code {
-	switch err {
-	case nil:
-		return codes.OK
-	case io.EOF:
-		return codes.OutOfRange
-	case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
-		return codes.FailedPrecondition
-	case os.ErrInvalid:
-		return codes.InvalidArgument
-	case context.Canceled:
-		return codes.Canceled
-	case context.DeadlineExceeded:
-		return codes.DeadlineExceeded
-	}
-	switch {
-	case os.IsExist(err):
-		return codes.AlreadyExists
-	case os.IsNotExist(err):
-		return codes.NotFound
-	case os.IsPermission(err):
-		return codes.PermissionDenied
-	}
-	return codes.Unknown
-}
diff --git a/vendor/google.golang.org/grpc/go17.go b/vendor/google.golang.org/grpc/go17.go
deleted file mode 100644
index de23098e..00000000
--- a/vendor/google.golang.org/grpc/go17.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// +build go1.7
-
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
-	"context"
-	"fmt"
-	"io"
-	"net"
-	"net/http"
-	"os"
-
-	netctx "golang.org/x/net/context"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/status"
-	"google.golang.org/grpc/transport"
-)
-
-// dialContext connects to the address on the named network.
-func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
-	return (&net.Dialer{}).DialContext(ctx, network, address)
-}
-
-func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
-	req = req.WithContext(ctx)
-	if err := req.Write(conn); err != nil {
-		return fmt.Errorf("failed to write the HTTP request: %v", err)
-	}
-	return nil
-}
-
-// toRPCErr converts an error into an error from the status package.
-func toRPCErr(err error) error {
-	if _, ok := status.FromError(err); ok {
-		return err
-	}
-	switch e := err.(type) {
-	case transport.StreamError:
-		return status.Error(e.Code, e.Desc)
-	case transport.ConnectionError:
-		return status.Error(codes.Unavailable, e.Desc)
-	default:
-		switch err {
-		case context.DeadlineExceeded, netctx.DeadlineExceeded:
-			return status.Error(codes.DeadlineExceeded, err.Error())
-		case context.Canceled, netctx.Canceled:
-			return status.Error(codes.Canceled, err.Error())
-		case ErrClientConnClosing:
-			return status.Error(codes.FailedPrecondition, err.Error())
-		}
-	}
-	return status.Error(codes.Unknown, err.Error())
-}
-
-// convertCode converts a standard Go error into its canonical code. Note that
-// this is only used to translate the error returned by the server applications.
-func convertCode(err error) codes.Code {
-	switch err {
-	case nil:
-		return codes.OK
-	case io.EOF:
-		return codes.OutOfRange
-	case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
-		return codes.FailedPrecondition
-	case os.ErrInvalid:
-		return codes.InvalidArgument
-	case context.Canceled, netctx.Canceled:
-		return codes.Canceled
-	case context.DeadlineExceeded, netctx.DeadlineExceeded:
-		return codes.DeadlineExceeded
-	}
-	switch {
-	case os.IsExist(err):
-		return codes.AlreadyExists
-	case os.IsNotExist(err):
-		return codes.NotFound
-	case os.IsPermission(err):
-		return codes.PermissionDenied
-	}
-	return codes.Unknown
-}
diff --git a/vendor/google.golang.org/grpc/grpclb.go b/vendor/google.golang.org/grpc/grpclb.go
index d14a5d40..db56ff36 100644
--- a/vendor/google.golang.org/grpc/grpclb.go
+++ b/vendor/google.golang.org/grpc/grpclb.go
@@ -19,32 +19,21 @@
 package grpc
 
 import (
-	"strconv"
-	"strings"
+	"errors"
+	"fmt"
+	"math/rand"
+	"net"
 	"sync"
 	"time"
 
 	"golang.org/x/net/context"
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/connectivity"
-	lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
+	"google.golang.org/grpc/codes"
+	lbmpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
 	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/naming"
 )
 
-const (
-	lbTokeyKey             = "lb-token"
-	defaultFallbackTimeout = 10 * time.Second
-	grpclbName             = "grpclb"
-)
-
-func convertDuration(d *lbpb.Duration) time.Duration {
-	if d == nil {
-		return 0
-	}
-	return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
-}
-
 // Client API for LoadBalancer service.
 // Mostly copied from generated pb.go file.
 // To avoid circular dependency.
@@ -70,273 +59,646 @@ type balanceLoadClientStream struct {
 	ClientStream
 }
 
-func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error {
+func (x *balanceLoadClientStream) Send(m *lbmpb.LoadBalanceRequest) error {
 	return x.ClientStream.SendMsg(m)
 }
 
-func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) {
-	m := new(lbpb.LoadBalanceResponse)
+func (x *balanceLoadClientStream) Recv() (*lbmpb.LoadBalanceResponse, error) {
+	m := new(lbmpb.LoadBalanceResponse)
 	if err := x.ClientStream.RecvMsg(m); err != nil {
 		return nil, err
 	}
 	return m, nil
 }
 
-func init() {
-	balancer.Register(newLBBuilder())
-}
-
-// newLBBuilder creates a builder for grpclb.
-func newLBBuilder() balancer.Builder {
-	return NewLBBuilderWithFallbackTimeout(defaultFallbackTimeout)
-}
-
-// NewLBBuilderWithFallbackTimeout creates a grpclb builder with the given
-// fallbackTimeout. If no response is received from the remote balancer within
-// fallbackTimeout, the backend addresses from the resolved address list will be
-// used.
-//
-// Only call this function when a non-default fallback timeout is needed.
-func NewLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder {
-	return &lbBuilder{
-		fallbackTimeout: fallbackTimeout,
+// NewGRPCLBBalancer creates a grpclb load balancer.
+func NewGRPCLBBalancer(r naming.Resolver) Balancer {
+	return &grpclbBalancer{
+		r: r,
 	}
 }
 
-type lbBuilder struct {
-	fallbackTimeout time.Duration
+type remoteBalancerInfo struct {
+	addr string
+	// the server name used for authentication with the remote LB server.
+	name string
 }
 
-func (b *lbBuilder) Name() string {
-	return grpclbName
+// grpclbAddrInfo consists of the information of a backend server.
+type grpclbAddrInfo struct {
+	addr      Address
+	connected bool
+	// dropForRateLimiting indicates whether this particular request should be
+	// dropped by the client for rate limiting.
+	dropForRateLimiting bool
+	// dropForLoadBalancing indicates whether this particular request should be
+	// dropped by the client for load balancing.
+	dropForLoadBalancing bool
 }
 
-func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
-	// This generates a manual resolver builder with a random scheme. This
-	// scheme will be used to dial to remote LB, so we can send filtered address
-	// updates to remote LB ClientConn using this manual resolver.
-	scheme := "grpclb_internal_" + strconv.FormatInt(time.Now().UnixNano(), 36)
-	r := &lbManualResolver{scheme: scheme, ccb: cc}
+type grpclbBalancer struct {
+	r      naming.Resolver
+	target string
+	mu     sync.Mutex
+	seq    int // a sequence number to make sure addrCh does not get stale addresses.
+	w      naming.Watcher
+	addrCh chan []Address
+	rbs    []remoteBalancerInfo
+	addrs  []*grpclbAddrInfo
+	next   int
+	waitCh chan struct{}
+	done   bool
+	rand   *rand.Rand
+
+	clientStats lbmpb.ClientStats
+}
 
-	var target string
-	targetSplitted := strings.Split(cc.Target(), ":///")
-	if len(targetSplitted) < 2 {
-		target = cc.Target()
-	} else {
-		target = targetSplitted[1]
+func (b *grpclbBalancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error {
+	updates, err := w.Next()
+	if err != nil {
+		grpclog.Warningf("grpclb: failed to get next addr update from watcher: %v", err)
+		return err
 	}
-
-	lb := &lbBalancer{
-		cc:              cc,
-		target:          target,
-		opt:             opt,
-		fallbackTimeout: b.fallbackTimeout,
-		doneCh:          make(chan struct{}),
-
-		manualResolver: r,
-		csEvltr:        &connectivityStateEvaluator{},
-		subConns:       make(map[resolver.Address]balancer.SubConn),
-		scStates:       make(map[balancer.SubConn]connectivity.State),
-		picker:         &errPicker{err: balancer.ErrNoSubConnAvailable},
-		clientStats:    &rpcStats{},
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	if b.done {
+		return ErrClientConnClosing
 	}
-
-	return lb
+	for _, update := range updates {
+		switch update.Op {
+		case naming.Add:
+			var exist bool
+			for _, v := range b.rbs {
+				// TODO: Is the same addr with different server name a different balancer?
+				if update.Addr == v.addr {
+					exist = true
+					break
+				}
+			}
+			if exist {
+				continue
+			}
+			md, ok := update.Metadata.(*naming.AddrMetadataGRPCLB)
+			if !ok {
+				// TODO: Revisit the handling here and may introduce some fallback mechanism.
+				grpclog.Errorf("The name resolution contains unexpected metadata %v", update.Metadata)
+				continue
+			}
+			switch md.AddrType {
+			case naming.Backend:
+				// TODO: Revisit the handling here and may introduce some fallback mechanism.
+				grpclog.Errorf("The name resolution does not give grpclb addresses")
+				continue
+			case naming.GRPCLB:
+				b.rbs = append(b.rbs, remoteBalancerInfo{
+					addr: update.Addr,
+					name: md.ServerName,
+				})
+			default:
+				grpclog.Errorf("Received unknow address type %d", md.AddrType)
+				continue
+			}
+		case naming.Delete:
+			for i, v := range b.rbs {
+				if update.Addr == v.addr {
+					copy(b.rbs[i:], b.rbs[i+1:])
+					b.rbs = b.rbs[:len(b.rbs)-1]
+					break
+				}
+			}
+		default:
+			grpclog.Errorf("Unknown update.Op %v", update.Op)
+		}
+	}
+	// TODO: Fall back to the basic round-robin load balancing if the resulting address is
+	// not a load balancer.
+	select {
+	case <-ch:
+	default:
+	}
+	ch <- b.rbs
+	return nil
 }
 
-type lbBalancer struct {
-	cc              balancer.ClientConn
-	target          string
-	opt             balancer.BuildOptions
-	fallbackTimeout time.Duration
-	doneCh          chan struct{}
-
-	// manualResolver is used in the remote LB ClientConn inside grpclb. When
-	// resolved address updates are received by grpclb, filtered updates will be
-	// send to remote LB ClientConn through this resolver.
-	manualResolver *lbManualResolver
-	// The ClientConn to talk to the remote balancer.
-	ccRemoteLB *ClientConn
-
-	// Support client side load reporting. Each picker gets a reference to this,
-	// and will update its content.
-	clientStats *rpcStats
-
-	mu sync.Mutex // guards everything following.
-	// The full server list including drops, used to check if the newly received
-	// serverList contains anything new. Each generate picker will also have
-	// reference to this list to do the first layer pick.
-	fullServerList []*lbpb.Server
-	// All backends addresses, with metadata set to nil. This list contains all
-	// backend addresses in the same order and with the same duplicates as in
-	// serverlist. When generating picker, a SubConn slice with the same order
-	// but with only READY SCs will be gerenated.
-	backendAddrs []resolver.Address
-	// Roundrobin functionalities.
-	csEvltr  *connectivityStateEvaluator
-	state    connectivity.State
-	subConns map[resolver.Address]balancer.SubConn   // Used to new/remove SubConn.
-	scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns.
-	picker   balancer.Picker
-	// Support fallback to resolved backend addresses if there's no response
-	// from remote balancer within fallbackTimeout.
-	fallbackTimerExpired bool
-	serverListReceived   bool
-	// resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set
-	// when resolved address updates are received, and read in the goroutine
-	// handling fallback.
-	resolvedBackendAddrs []resolver.Address
+func convertDuration(d *lbmpb.Duration) time.Duration {
+	if d == nil {
+		return 0
+	}
+	return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
 }
 
-// regeneratePicker takes a snapshot of the balancer, and generates a picker from
-// it. The picker
-//  - always returns ErrTransientFailure if the balancer is in TransientFailure,
-//  - does two layer roundrobin pick otherwise.
-// Caller must hold lb.mu.
-func (lb *lbBalancer) regeneratePicker() {
-	if lb.state == connectivity.TransientFailure {
-		lb.picker = &errPicker{err: balancer.ErrTransientFailure}
+func (b *grpclbBalancer) processServerList(l *lbmpb.ServerList, seq int) {
+	if l == nil {
 		return
 	}
-	var readySCs []balancer.SubConn
-	for _, a := range lb.backendAddrs {
-		if sc, ok := lb.subConns[a]; ok {
-			if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready {
-				readySCs = append(readySCs, sc)
-			}
+	servers := l.GetServers()
+	var (
+		sl    []*grpclbAddrInfo
+		addrs []Address
+	)
+	for _, s := range servers {
+		md := metadata.Pairs("lb-token", s.LoadBalanceToken)
+		ip := net.IP(s.IpAddress)
+		ipStr := ip.String()
+		if ip.To4() == nil {
+			// Add square brackets to ipv6 addresses, otherwise net.Dial() and
+			// net.SplitHostPort() will return too many colons error.
+			ipStr = fmt.Sprintf("[%s]", ipStr)
 		}
-	}
-
-	if len(lb.fullServerList) <= 0 {
-		if len(readySCs) <= 0 {
-			lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable}
-			return
+		addr := Address{
+			Addr:     fmt.Sprintf("%s:%d", ipStr, s.Port),
+			Metadata: &md,
 		}
-		lb.picker = &rrPicker{subConns: readySCs}
+		sl = append(sl, &grpclbAddrInfo{
+			addr:                 addr,
+			dropForRateLimiting:  s.DropForRateLimiting,
+			dropForLoadBalancing: s.DropForLoadBalancing,
+		})
+		addrs = append(addrs, addr)
+	}
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	if b.done || seq < b.seq {
 		return
 	}
-	lb.picker = &lbPicker{
-		serverList: lb.fullServerList,
-		subConns:   readySCs,
-		stats:      lb.clientStats,
+	if len(sl) > 0 {
+		// reset b.next to 0 when replacing the server list.
+		b.next = 0
+		b.addrs = sl
+		b.addrCh <- addrs
 	}
 	return
 }
 
-func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
-	grpclog.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s)
-	lb.mu.Lock()
-	defer lb.mu.Unlock()
+func (b *grpclbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) {
+	ticker := time.NewTicker(interval)
+	defer ticker.Stop()
+	for {
+		select {
+		case <-ticker.C:
+		case <-done:
+			return
+		}
+		b.mu.Lock()
+		stats := b.clientStats
+		b.clientStats = lbmpb.ClientStats{} // Clear the stats.
+		b.mu.Unlock()
+		t := time.Now()
+		stats.Timestamp = &lbmpb.Timestamp{
+			Seconds: t.Unix(),
+			Nanos:   int32(t.Nanosecond()),
+		}
+		if err := s.Send(&lbmpb.LoadBalanceRequest{
+			LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_ClientStats{
+				ClientStats: &stats,
+			},
+		}); err != nil {
+			grpclog.Errorf("grpclb: failed to send load report: %v", err)
+			return
+		}
+	}
+}
 
-	oldS, ok := lb.scStates[sc]
-	if !ok {
-		grpclog.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
+func (b *grpclbBalancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	stream, err := lbc.BalanceLoad(ctx)
+	if err != nil {
+		grpclog.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err)
+		return
+	}
+	b.mu.Lock()
+	if b.done {
+		b.mu.Unlock()
 		return
 	}
-	lb.scStates[sc] = s
-	switch s {
-	case connectivity.Idle:
-		sc.Connect()
-	case connectivity.Shutdown:
-		// When an address was removed by resolver, b called RemoveSubConn but
-		// kept the sc's state in scStates. Remove state for this sc here.
-		delete(lb.scStates, sc)
+	b.mu.Unlock()
+	initReq := &lbmpb.LoadBalanceRequest{
+		LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_InitialRequest{
+			InitialRequest: &lbmpb.InitialLoadBalanceRequest{
+				Name: b.target,
+			},
+		},
 	}
+	if err := stream.Send(initReq); err != nil {
+		grpclog.Errorf("grpclb: failed to send init request: %v", err)
+		// TODO: backoff on retry?
+		return true
+	}
+	reply, err := stream.Recv()
+	if err != nil {
+		grpclog.Errorf("grpclb: failed to recv init response: %v", err)
+		// TODO: backoff on retry?
+		return true
+	}
+	initResp := reply.GetInitialResponse()
+	if initResp == nil {
+		grpclog.Errorf("grpclb: reply from remote balancer did not include initial response.")
+		return
+	}
+	// TODO: Support delegation.
+	if initResp.LoadBalancerDelegate != "" {
+		// delegation
+		grpclog.Errorf("TODO: Delegation is not supported yet.")
+		return
+	}
+	streamDone := make(chan struct{})
+	defer close(streamDone)
+	b.mu.Lock()
+	b.clientStats = lbmpb.ClientStats{} // Clear client stats.
+	b.mu.Unlock()
+	if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
+		go b.sendLoadReport(stream, d, streamDone)
+	}
+	// Retrieve the server list.
+	for {
+		reply, err := stream.Recv()
+		if err != nil {
+			grpclog.Errorf("grpclb: failed to recv server list: %v", err)
+			break
+		}
+		b.mu.Lock()
+		if b.done || seq < b.seq {
+			b.mu.Unlock()
+			return
+		}
+		b.seq++ // tick when receiving a new list of servers.
+		seq = b.seq
+		b.mu.Unlock()
+		if serverList := reply.GetServerList(); serverList != nil {
+			b.processServerList(serverList, seq)
+		}
+	}
+	return true
+}
 
-	oldAggrState := lb.state
-	lb.state = lb.csEvltr.recordTransition(oldS, s)
-
-	// Regenerate picker when one of the following happens:
-	//  - this sc became ready from not-ready
-	//  - this sc became not-ready from ready
-	//  - the aggregated state of balancer became TransientFailure from non-TransientFailure
-	//  - the aggregated state of balancer became non-TransientFailure from TransientFailure
-	if (oldS == connectivity.Ready) != (s == connectivity.Ready) ||
-		(lb.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
-		lb.regeneratePicker()
+func (b *grpclbBalancer) Start(target string, config BalancerConfig) error {
+	b.rand = rand.New(rand.NewSource(time.Now().Unix()))
+	// TODO: Fall back to the basic direct connection if there is no name resolver.
+	if b.r == nil {
+		return errors.New("there is no name resolver installed")
+	}
+	b.target = target
+	b.mu.Lock()
+	if b.done {
+		b.mu.Unlock()
+		return ErrClientConnClosing
+	}
+	b.addrCh = make(chan []Address)
+	w, err := b.r.Resolve(target)
+	if err != nil {
+		b.mu.Unlock()
+		grpclog.Errorf("grpclb: failed to resolve address: %v, err: %v", target, err)
+		return err
 	}
+	b.w = w
+	b.mu.Unlock()
+	balancerAddrsCh := make(chan []remoteBalancerInfo, 1)
+	// Spawn a goroutine to monitor the name resolution of remote load balancer.
+	go func() {
+		for {
+			if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil {
+				grpclog.Warningf("grpclb: the naming watcher stops working due to %v.\n", err)
+				close(balancerAddrsCh)
+				return
+			}
+		}
+	}()
+	// Spawn a goroutine to talk to the remote load balancer.
+	go func() {
+		var (
+			cc *ClientConn
+			// ccError is closed when there is an error in the current cc.
+			// A new rb should be picked from rbs and connected.
+			ccError chan struct{}
+			rb      *remoteBalancerInfo
+			rbs     []remoteBalancerInfo
+			rbIdx   int
+		)
+
+		defer func() {
+			if ccError != nil {
+				select {
+				case <-ccError:
+				default:
+					close(ccError)
+				}
+			}
+			if cc != nil {
+				cc.Close()
+			}
+		}()
+
+		for {
+			var ok bool
+			select {
+			case rbs, ok = <-balancerAddrsCh:
+				if !ok {
+					return
+				}
+				foundIdx := -1
+				if rb != nil {
+					for i, trb := range rbs {
+						if trb == *rb {
+							foundIdx = i
+							break
+						}
+					}
+				}
+				if foundIdx >= 0 {
+					if foundIdx >= 1 {
+						// Move the address in use to the beginning of the list.
+						b.rbs[0], b.rbs[foundIdx] = b.rbs[foundIdx], b.rbs[0]
+						rbIdx = 0
+					}
+					continue // If found, don't dial new cc.
+				} else if len(rbs) > 0 {
+					// Pick a random one from the list, instead of always using the first one.
+					if l := len(rbs); l > 1 && rb != nil {
+						tmpIdx := b.rand.Intn(l - 1)
+						b.rbs[0], b.rbs[tmpIdx] = b.rbs[tmpIdx], b.rbs[0]
+					}
+					rbIdx = 0
+					rb = &rbs[0]
+				} else {
+					// foundIdx < 0 && len(rbs) <= 0.
+					rb = nil
+				}
+			case <-ccError:
+				ccError = nil
+				if rbIdx < len(rbs)-1 {
+					rbIdx++
+					rb = &rbs[rbIdx]
+				} else {
+					rb = nil
+				}
+			}
 
-	lb.cc.UpdateBalancerState(lb.state, lb.picker)
-	return
+			if rb == nil {
+				continue
+			}
+
+			if cc != nil {
+				cc.Close()
+			}
+			// Talk to the remote load balancer to get the server list.
+			var (
+				err   error
+				dopts []DialOption
+			)
+			if creds := config.DialCreds; creds != nil {
+				if rb.name != "" {
+					if err := creds.OverrideServerName(rb.name); err != nil {
+						grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v", err)
+						continue
+					}
+				}
+				dopts = append(dopts, WithTransportCredentials(creds))
+			} else {
+				dopts = append(dopts, WithInsecure())
+			}
+			if dialer := config.Dialer; dialer != nil {
+				// WithDialer takes a different type of function, so we instead use a special DialOption here.
+				dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer })
+			}
+			dopts = append(dopts, WithBlock())
+			ccError = make(chan struct{})
+			ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+			cc, err = DialContext(ctx, rb.addr, dopts...)
+			cancel()
+			if err != nil {
+				grpclog.Warningf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err)
+				close(ccError)
+				continue
+			}
+			b.mu.Lock()
+			b.seq++ // tick when getting a new balancer address
+			seq := b.seq
+			b.next = 0
+			b.mu.Unlock()
+			go func(cc *ClientConn, ccError chan struct{}) {
+				lbc := &loadBalancerClient{cc}
+				b.callRemoteBalancer(lbc, seq)
+				cc.Close()
+				select {
+				case <-ccError:
+				default:
+					close(ccError)
+				}
+			}(cc, ccError)
+		}
+	}()
+	return nil
 }
 
-// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use
-// resolved backends (backends received from resolver, not from remote balancer)
-// if no connection to remote balancers was successful.
-func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) {
-	timer := time.NewTimer(fallbackTimeout)
-	defer timer.Stop()
-	select {
-	case <-timer.C:
-	case <-lb.doneCh:
-		return
+func (b *grpclbBalancer) down(addr Address, err error) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	for _, a := range b.addrs {
+		if addr == a.addr {
+			a.connected = false
+			break
+		}
 	}
-	lb.mu.Lock()
-	if lb.serverListReceived {
-		lb.mu.Unlock()
-		return
+}
+
+func (b *grpclbBalancer) Up(addr Address) func(error) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	if b.done {
+		return nil
+	}
+	var cnt int
+	for _, a := range b.addrs {
+		if a.addr == addr {
+			if a.connected {
+				return nil
+			}
+			a.connected = true
+		}
+		if a.connected && !a.dropForRateLimiting && !a.dropForLoadBalancing {
+			cnt++
+		}
+	}
+	// addr is the only one which is connected. Notify the Get() callers who are blocking.
+	if cnt == 1 && b.waitCh != nil {
+		close(b.waitCh)
+		b.waitCh = nil
+	}
+	return func(err error) {
+		b.down(addr, err)
 	}
-	lb.fallbackTimerExpired = true
-	lb.refreshSubConns(lb.resolvedBackendAddrs)
-	lb.mu.Unlock()
 }
 
-// HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB
-// clientConn. The remoteLB clientConn will handle creating/removing remoteLB
-// connections.
-func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
-	grpclog.Infof("lbBalancer: handleResolvedResult: %+v", addrs)
-	if len(addrs) <= 0 {
+func (b *grpclbBalancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
+	var ch chan struct{}
+	b.mu.Lock()
+	if b.done {
+		b.mu.Unlock()
+		err = ErrClientConnClosing
 		return
 	}
+	seq := b.seq
 
-	var remoteBalancerAddrs, backendAddrs []resolver.Address
-	for _, a := range addrs {
-		if a.Type == resolver.GRPCLB {
-			remoteBalancerAddrs = append(remoteBalancerAddrs, a)
-		} else {
-			backendAddrs = append(backendAddrs, a)
+	defer func() {
+		if err != nil {
+			return
 		}
-	}
+		put = func() {
+			s, ok := rpcInfoFromContext(ctx)
+			if !ok {
+				return
+			}
+			b.mu.Lock()
+			defer b.mu.Unlock()
+			if b.done || seq < b.seq {
+				return
+			}
+			b.clientStats.NumCallsFinished++
+			if !s.bytesSent {
+				b.clientStats.NumCallsFinishedWithClientFailedToSend++
+			} else if s.bytesReceived {
+				b.clientStats.NumCallsFinishedKnownReceived++
+			}
+		}
+	}()
 
-	if lb.ccRemoteLB == nil {
-		if len(remoteBalancerAddrs) <= 0 {
-			grpclog.Errorf("grpclb: no remote balancer address is available, should never happen")
-			return
+	b.clientStats.NumCallsStarted++
+	if len(b.addrs) > 0 {
+		if b.next >= len(b.addrs) {
+			b.next = 0
 		}
-		// First time receiving resolved addresses, create a cc to remote
-		// balancers.
-		lb.dialRemoteLB(remoteBalancerAddrs[0].ServerName)
-		// Start the fallback goroutine.
-		go lb.fallbackToBackendsAfter(lb.fallbackTimeout)
+		next := b.next
+		for {
+			a := b.addrs[next]
+			next = (next + 1) % len(b.addrs)
+			if a.connected {
+				if !a.dropForRateLimiting && !a.dropForLoadBalancing {
+					addr = a.addr
+					b.next = next
+					b.mu.Unlock()
+					return
+				}
+				if !opts.BlockingWait {
+					b.next = next
+					if a.dropForLoadBalancing {
+						b.clientStats.NumCallsFinished++
+						b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
+					} else if a.dropForRateLimiting {
+						b.clientStats.NumCallsFinished++
+						b.clientStats.NumCallsFinishedWithDropForRateLimiting++
+					}
+					b.mu.Unlock()
+					err = Errorf(codes.Unavailable, "%s drops requests", a.addr.Addr)
+					return
+				}
+			}
+			if next == b.next {
+				// Has iterated all the possible address but none is connected.
+				break
+			}
+		}
+	}
+	if !opts.BlockingWait {
+		b.clientStats.NumCallsFinished++
+		b.clientStats.NumCallsFinishedWithClientFailedToSend++
+		b.mu.Unlock()
+		err = Errorf(codes.Unavailable, "there is no address available")
+		return
 	}
+	// Wait on b.waitCh for non-failfast RPCs.
+	if b.waitCh == nil {
+		ch = make(chan struct{})
+		b.waitCh = ch
+	} else {
+		ch = b.waitCh
+	}
+	b.mu.Unlock()
+	for {
+		select {
+		case <-ctx.Done():
+			b.mu.Lock()
+			b.clientStats.NumCallsFinished++
+			b.clientStats.NumCallsFinishedWithClientFailedToSend++
+			b.mu.Unlock()
+			err = ctx.Err()
+			return
+		case <-ch:
+			b.mu.Lock()
+			if b.done {
+				b.clientStats.NumCallsFinished++
+				b.clientStats.NumCallsFinishedWithClientFailedToSend++
+				b.mu.Unlock()
+				err = ErrClientConnClosing
+				return
+			}
 
-	// cc to remote balancers uses lb.manualResolver. Send the updated remote
-	// balancer addresses to it through manualResolver.
-	lb.manualResolver.NewAddress(remoteBalancerAddrs)
+			if len(b.addrs) > 0 {
+				if b.next >= len(b.addrs) {
+					b.next = 0
+				}
+				next := b.next
+				for {
+					a := b.addrs[next]
+					next = (next + 1) % len(b.addrs)
+					if a.connected {
+						if !a.dropForRateLimiting && !a.dropForLoadBalancing {
+							addr = a.addr
+							b.next = next
+							b.mu.Unlock()
+							return
+						}
+						if !opts.BlockingWait {
+							b.next = next
+							if a.dropForLoadBalancing {
+								b.clientStats.NumCallsFinished++
+								b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
+							} else if a.dropForRateLimiting {
+								b.clientStats.NumCallsFinished++
+								b.clientStats.NumCallsFinishedWithDropForRateLimiting++
+							}
+							b.mu.Unlock()
+							err = Errorf(codes.Unavailable, "drop requests for the addreess %s", a.addr.Addr)
+							return
+						}
+					}
+					if next == b.next {
+						// Has iterated all the possible address but none is connected.
+						break
+					}
+				}
+			}
+			// The newly added addr got removed by Down() again.
+			if b.waitCh == nil {
+				ch = make(chan struct{})
+				b.waitCh = ch
+			} else {
+				ch = b.waitCh
+			}
+			b.mu.Unlock()
+		}
+	}
+}
 
-	lb.mu.Lock()
-	lb.resolvedBackendAddrs = backendAddrs
-	// If serverListReceived is true, connection to remote balancer was
-	// successful and there's no need to do fallback anymore.
-	// If fallbackTimerExpired is false, fallback hasn't happened yet.
-	if !lb.serverListReceived && lb.fallbackTimerExpired {
-		// This means we received a new list of resolved backends, and we are
-		// still in fallback mode. Need to update the list of backends we are
-		// using to the new list of backends.
-		lb.refreshSubConns(lb.resolvedBackendAddrs)
-	}
-	lb.mu.Unlock()
+func (b *grpclbBalancer) Notify() <-chan []Address {
+	return b.addrCh
 }
 
-func (lb *lbBalancer) Close() {
-	select {
-	case <-lb.doneCh:
-		return
-	default:
+func (b *grpclbBalancer) Close() error {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	if b.done {
+		return errBalancerClosed
+	}
+	b.done = true
+	if b.waitCh != nil {
+		close(b.waitCh)
+	}
+	if b.addrCh != nil {
+		close(b.addrCh)
 	}
-	close(lb.doneCh)
-	if lb.ccRemoteLB != nil {
-		lb.ccRemoteLB.Close()
+	if b.w != nil {
+		b.w.Close()
 	}
+	return nil
 }
diff --git a/vendor/google.golang.org/grpc/grpclb_picker.go b/vendor/google.golang.org/grpc/grpclb_picker.go
deleted file mode 100644
index 872c7cce..00000000
--- a/vendor/google.golang.org/grpc/grpclb_picker.go
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
-	"sync"
-	"sync/atomic"
-
-	"golang.org/x/net/context"
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/codes"
-	lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
-	"google.golang.org/grpc/status"
-)
-
-type rpcStats struct {
-	NumCallsStarted                          int64
-	NumCallsFinished                         int64
-	NumCallsFinishedWithDropForRateLimiting  int64
-	NumCallsFinishedWithDropForLoadBalancing int64
-	NumCallsFinishedWithClientFailedToSend   int64
-	NumCallsFinishedKnownReceived            int64
-}
-
-// toClientStats converts rpcStats to lbpb.ClientStats, and clears rpcStats.
-func (s *rpcStats) toClientStats() *lbpb.ClientStats {
-	stats := &lbpb.ClientStats{
-		NumCallsStarted:                          atomic.SwapInt64(&s.NumCallsStarted, 0),
-		NumCallsFinished:                         atomic.SwapInt64(&s.NumCallsFinished, 0),
-		NumCallsFinishedWithDropForRateLimiting:  atomic.SwapInt64(&s.NumCallsFinishedWithDropForRateLimiting, 0),
-		NumCallsFinishedWithDropForLoadBalancing: atomic.SwapInt64(&s.NumCallsFinishedWithDropForLoadBalancing, 0),
-		NumCallsFinishedWithClientFailedToSend:   atomic.SwapInt64(&s.NumCallsFinishedWithClientFailedToSend, 0),
-		NumCallsFinishedKnownReceived:            atomic.SwapInt64(&s.NumCallsFinishedKnownReceived, 0),
-	}
-	return stats
-}
-
-func (s *rpcStats) dropForRateLimiting() {
-	atomic.AddInt64(&s.NumCallsStarted, 1)
-	atomic.AddInt64(&s.NumCallsFinishedWithDropForRateLimiting, 1)
-	atomic.AddInt64(&s.NumCallsFinished, 1)
-}
-
-func (s *rpcStats) dropForLoadBalancing() {
-	atomic.AddInt64(&s.NumCallsStarted, 1)
-	atomic.AddInt64(&s.NumCallsFinishedWithDropForLoadBalancing, 1)
-	atomic.AddInt64(&s.NumCallsFinished, 1)
-}
-
-func (s *rpcStats) failedToSend() {
-	atomic.AddInt64(&s.NumCallsStarted, 1)
-	atomic.AddInt64(&s.NumCallsFinishedWithClientFailedToSend, 1)
-	atomic.AddInt64(&s.NumCallsFinished, 1)
-}
-
-func (s *rpcStats) knownReceived() {
-	atomic.AddInt64(&s.NumCallsStarted, 1)
-	atomic.AddInt64(&s.NumCallsFinishedKnownReceived, 1)
-	atomic.AddInt64(&s.NumCallsFinished, 1)
-}
-
-type errPicker struct {
-	// Pick always returns this err.
-	err error
-}
-
-func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
-	return nil, nil, p.err
-}
-
-// rrPicker does roundrobin on subConns. It's typically used when there's no
-// response from remote balancer, and grpclb falls back to the resolved
-// backends.
-//
-// It guaranteed that len(subConns) > 0.
-type rrPicker struct {
-	mu           sync.Mutex
-	subConns     []balancer.SubConn // The subConns that were READY when taking the snapshot.
-	subConnsNext int
-}
-
-func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
-	p.mu.Lock()
-	defer p.mu.Unlock()
-	sc := p.subConns[p.subConnsNext]
-	p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns)
-	return sc, nil, nil
-}
-
-// lbPicker does two layers of picks:
-//
-// First layer: roundrobin on all servers in serverList, including drops and backends.
-// - If it picks a drop, the RPC will fail as being dropped.
-// - If it picks a backend, do a second layer pick to pick the real backend.
-//
-// Second layer: roundrobin on all READY backends.
-//
-// It's guaranteed that len(serverList) > 0.
-type lbPicker struct {
-	mu             sync.Mutex
-	serverList     []*lbpb.Server
-	serverListNext int
-	subConns       []balancer.SubConn // The subConns that were READY when taking the snapshot.
-	subConnsNext   int
-
-	stats *rpcStats
-}
-
-func (p *lbPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
-	p.mu.Lock()
-	defer p.mu.Unlock()
-
-	// Layer one roundrobin on serverList.
-	s := p.serverList[p.serverListNext]
-	p.serverListNext = (p.serverListNext + 1) % len(p.serverList)
-
-	// If it's a drop, return an error and fail the RPC.
-	if s.DropForRateLimiting {
-		p.stats.dropForRateLimiting()
-		return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb")
-	}
-	if s.DropForLoadBalancing {
-		p.stats.dropForLoadBalancing()
-		return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb")
-	}
-
-	// If not a drop but there's no ready subConns.
-	if len(p.subConns) <= 0 {
-		return nil, nil, balancer.ErrNoSubConnAvailable
-	}
-
-	// Return the next ready subConn in the list, also collect rpc stats.
-	sc := p.subConns[p.subConnsNext]
-	p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns)
-	done := func(info balancer.DoneInfo) {
-		if !info.BytesSent {
-			p.stats.failedToSend()
-		} else if info.BytesReceived {
-			p.stats.knownReceived()
-		}
-	}
-	return sc, done, nil
-}
diff --git a/vendor/google.golang.org/grpc/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/grpclb_remote_balancer.go
deleted file mode 100644
index 1b580df2..00000000
--- a/vendor/google.golang.org/grpc/grpclb_remote_balancer.go
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
-	"fmt"
-	"net"
-	"reflect"
-	"time"
-
-	"golang.org/x/net/context"
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/connectivity"
-	lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
-	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/metadata"
-	"google.golang.org/grpc/resolver"
-)
-
-// processServerList updates balaner's internal state, create/remove SubConns
-// and regenerates picker using the received serverList.
-func (lb *lbBalancer) processServerList(l *lbpb.ServerList) {
-	grpclog.Infof("lbBalancer: processing server list: %+v", l)
-	lb.mu.Lock()
-	defer lb.mu.Unlock()
-
-	// Set serverListReceived to true so fallback will not take effect if it has
-	// not hit timeout.
-	lb.serverListReceived = true
-
-	// If the new server list == old server list, do nothing.
-	if reflect.DeepEqual(lb.fullServerList, l.Servers) {
-		grpclog.Infof("lbBalancer: new serverlist same as the previous one, ignoring")
-		return
-	}
-	lb.fullServerList = l.Servers
-
-	var backendAddrs []resolver.Address
-	for _, s := range l.Servers {
-		if s.DropForLoadBalancing || s.DropForRateLimiting {
-			continue
-		}
-
-		md := metadata.Pairs(lbTokeyKey, s.LoadBalanceToken)
-		ip := net.IP(s.IpAddress)
-		ipStr := ip.String()
-		if ip.To4() == nil {
-			// Add square brackets to ipv6 addresses, otherwise net.Dial() and
-			// net.SplitHostPort() will return too many colons error.
-			ipStr = fmt.Sprintf("[%s]", ipStr)
-		}
-		addr := resolver.Address{
-			Addr:     fmt.Sprintf("%s:%d", ipStr, s.Port),
-			Metadata: &md,
-		}
-
-		backendAddrs = append(backendAddrs, addr)
-	}
-
-	// Call refreshSubConns to create/remove SubConns.
-	backendsUpdated := lb.refreshSubConns(backendAddrs)
-	// If no backend was updated, no SubConn will be newed/removed. But since
-	// the full serverList was different, there might be updates in drops or
-	// pick weights(different number of duplicates). We need to update picker
-	// with the fulllist.
-	if !backendsUpdated {
-		lb.regeneratePicker()
-		lb.cc.UpdateBalancerState(lb.state, lb.picker)
-	}
-}
-
-// refreshSubConns creates/removes SubConns with backendAddrs. It returns a bool
-// indicating whether the backendAddrs are different from the cached
-// backendAddrs (whether any SubConn was newed/removed).
-// Caller must hold lb.mu.
-func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address) bool {
-	lb.backendAddrs = nil
-	var backendsUpdated bool
-	// addrsSet is the set converted from backendAddrs, it's used to quick
-	// lookup for an address.
-	addrsSet := make(map[resolver.Address]struct{})
-	// Create new SubConns.
-	for _, addr := range backendAddrs {
-		addrWithoutMD := addr
-		addrWithoutMD.Metadata = nil
-		addrsSet[addrWithoutMD] = struct{}{}
-		lb.backendAddrs = append(lb.backendAddrs, addrWithoutMD)
-
-		if _, ok := lb.subConns[addrWithoutMD]; !ok {
-			backendsUpdated = true
-
-			// Use addrWithMD to create the SubConn.
-			sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{})
-			if err != nil {
-				grpclog.Warningf("roundrobinBalancer: failed to create new SubConn: %v", err)
-				continue
-			}
-			lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map.
-			lb.scStates[sc] = connectivity.Idle
-			sc.Connect()
-		}
-	}
-
-	for a, sc := range lb.subConns {
-		// a was removed by resolver.
-		if _, ok := addrsSet[a]; !ok {
-			backendsUpdated = true
-
-			lb.cc.RemoveSubConn(sc)
-			delete(lb.subConns, a)
-			// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
-			// The entry will be deleted in HandleSubConnStateChange.
-		}
-	}
-
-	return backendsUpdated
-}
-
-func (lb *lbBalancer) readServerList(s *balanceLoadClientStream) error {
-	for {
-		reply, err := s.Recv()
-		if err != nil {
-			return fmt.Errorf("grpclb: failed to recv server list: %v", err)
-		}
-		if serverList := reply.GetServerList(); serverList != nil {
-			lb.processServerList(serverList)
-		}
-	}
-}
-
-func (lb *lbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration) {
-	ticker := time.NewTicker(interval)
-	defer ticker.Stop()
-	for {
-		select {
-		case <-ticker.C:
-		case <-s.Context().Done():
-			return
-		}
-		stats := lb.clientStats.toClientStats()
-		t := time.Now()
-		stats.Timestamp = &lbpb.Timestamp{
-			Seconds: t.Unix(),
-			Nanos:   int32(t.Nanosecond()),
-		}
-		if err := s.Send(&lbpb.LoadBalanceRequest{
-			LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{
-				ClientStats: stats,
-			},
-		}); err != nil {
-			return
-		}
-	}
-}
-func (lb *lbBalancer) callRemoteBalancer() error {
-	lbClient := &loadBalancerClient{cc: lb.ccRemoteLB}
-	ctx, cancel := context.WithCancel(context.Background())
-	defer cancel()
-	stream, err := lbClient.BalanceLoad(ctx, FailFast(false))
-	if err != nil {
-		return fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err)
-	}
-
-	// grpclb handshake on the stream.
-	initReq := &lbpb.LoadBalanceRequest{
-		LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{
-			InitialRequest: &lbpb.InitialLoadBalanceRequest{
-				Name: lb.target,
-			},
-		},
-	}
-	if err := stream.Send(initReq); err != nil {
-		return fmt.Errorf("grpclb: failed to send init request: %v", err)
-	}
-	reply, err := stream.Recv()
-	if err != nil {
-		return fmt.Errorf("grpclb: failed to recv init response: %v", err)
-	}
-	initResp := reply.GetInitialResponse()
-	if initResp == nil {
-		return fmt.Errorf("grpclb: reply from remote balancer did not include initial response")
-	}
-	if initResp.LoadBalancerDelegate != "" {
-		return fmt.Errorf("grpclb: Delegation is not supported")
-	}
-
-	go func() {
-		if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
-			lb.sendLoadReport(stream, d)
-		}
-	}()
-	return lb.readServerList(stream)
-}
-
-func (lb *lbBalancer) watchRemoteBalancer() {
-	for {
-		err := lb.callRemoteBalancer()
-		select {
-		case <-lb.doneCh:
-			return
-		default:
-			if err != nil {
-				grpclog.Error(err)
-			}
-		}
-
-	}
-}
-
-func (lb *lbBalancer) dialRemoteLB(remoteLBName string) {
-	var dopts []DialOption
-	if creds := lb.opt.DialCreds; creds != nil {
-		if err := creds.OverrideServerName(remoteLBName); err == nil {
-			dopts = append(dopts, WithTransportCredentials(creds))
-		} else {
-			grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v, using Insecure", err)
-			dopts = append(dopts, WithInsecure())
-		}
-	} else {
-		dopts = append(dopts, WithInsecure())
-	}
-	if lb.opt.Dialer != nil {
-		// WithDialer takes a different type of function, so we instead use a
-		// special DialOption here.
-		dopts = append(dopts, withContextDialer(lb.opt.Dialer))
-	}
-	// Explicitly set pickfirst as the balancer.
-	dopts = append(dopts, WithBalancerName(PickFirstBalancerName))
-	dopts = append(dopts, withResolverBuilder(lb.manualResolver))
-	// Dial using manualResolver.Scheme, which is a random scheme generated
-	// when init grpclb. The target name is not important.
-	cc, err := Dial("grpclb:///grpclb.server", dopts...)
-	if err != nil {
-		grpclog.Fatalf("failed to dial: %v", err)
-	}
-	lb.ccRemoteLB = cc
-	go lb.watchRemoteBalancer()
-}
diff --git a/vendor/google.golang.org/grpc/grpclb_util.go b/vendor/google.golang.org/grpc/grpclb_util.go
deleted file mode 100644
index 93ab2db3..00000000
--- a/vendor/google.golang.org/grpc/grpclb_util.go
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/resolver"
-)
-
-// The parent ClientConn should re-resolve when grpclb loses connection to the
-// remote balancer. When the ClientConn inside grpclb gets a TransientFailure,
-// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's
-// ResolveNow, and eventually results in re-resolve happening in parent
-// ClientConn's resolver (DNS for example).
-//
-//                          parent
-//                          ClientConn
-//  +-----------------------------------------------------------------+
-//  |             parent          +---------------------------------+ |
-//  | DNS         ClientConn      |  grpclb                         | |
-//  | resolver    balancerWrapper |                                 | |
-//  | +              +            |    grpclb          grpclb       | |
-//  | |              |            |    ManualResolver  ClientConn   | |
-//  | |              |            |     +              +            | |
-//  | |              |            |     |              | Transient  | |
-//  | |              |            |     |              | Failure    | |
-//  | |              |            |     |  <---------  |            | |
-//  | |              | <--------------- |  ResolveNow  |            | |
-//  | |  <---------  | ResolveNow |     |              |            | |
-//  | |  ResolveNow  |            |     |              |            | |
-//  | |              |            |     |              |            | |
-//  | +              +            |     +              +            | |
-//  |                             +---------------------------------+ |
-//  +-----------------------------------------------------------------+
-
-// lbManualResolver is used by the ClientConn inside grpclb. It's a manual
-// resolver with a special ResolveNow() function.
-//
-// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn,
-// so when grpclb client lose contact with remote balancers, the parent
-// ClientConn's resolver will re-resolve.
-type lbManualResolver struct {
-	scheme string
-	ccr    resolver.ClientConn
-
-	ccb balancer.ClientConn
-}
-
-func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOption) (resolver.Resolver, error) {
-	r.ccr = cc
-	return r, nil
-}
-
-func (r *lbManualResolver) Scheme() string {
-	return r.scheme
-}
-
-// ResolveNow calls resolveNow on the parent ClientConn.
-func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOption) {
-	r.ccb.ResolveNow(o)
-}
-
-// Close is a noop for Resolver.
-func (*lbManualResolver) Close() {}
-
-// NewAddress calls cc.NewAddress.
-func (r *lbManualResolver) NewAddress(addrs []resolver.Address) {
-	r.ccr.NewAddress(addrs)
-}
-
-// NewServiceConfig calls cc.NewServiceConfig.
-func (r *lbManualResolver) NewServiceConfig(sc string) {
-	r.ccr.NewServiceConfig(sc)
-}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 53f17752..07083832 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -19,6 +19,13 @@
 // the godoc of the top-level grpc package.
 package internal
 
+// TestingCloseConns closes all existing transports but keeps
+// grpcServer.lis accepting new connections.
+//
+// The provided grpcServer must be of type *grpc.Server. It is untyped
+// for circular dependency reasons.
+var TestingCloseConns func(grpcServer interface{})
+
 // TestingUseHandlerImpl enables the http.Handler-based server implementation.
 // It must be called before Serve and requires TLS credentials.
 //
diff --git a/vendor/google.golang.org/grpc/naming/go17.go b/vendor/google.golang.org/grpc/naming/go17.go
index 57b65d7b..8bdf21e7 100644
--- a/vendor/google.golang.org/grpc/naming/go17.go
+++ b/vendor/google.golang.org/grpc/naming/go17.go
@@ -1,4 +1,4 @@
-// +build go1.6,!go1.8
+// +build go1.7, !go1.8
 
 /*
  *
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
index db82bfb3..9085dbc9 100644
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -97,7 +97,7 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
 		p = bp.picker
 		bp.mu.Unlock()
 
-		subConn, done, err := p.Pick(ctx, opts)
+		subConn, put, err := p.Pick(ctx, opts)
 
 		if err != nil {
 			switch err {
@@ -120,7 +120,7 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
 			continue
 		}
 		if t, ok := acw.getAddrConn().getReadyTransport(); ok {
-			return t, done, nil
+			return t, put, nil
 		}
 		grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
 		// If ok == false, ac.state is not READY.
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
index bf659d49..7f993ef5 100644
--- a/vendor/google.golang.org/grpc/pickfirst.go
+++ b/vendor/google.golang.org/grpc/pickfirst.go
@@ -26,9 +26,6 @@ import (
 	"google.golang.org/grpc/resolver"
 )
 
-// PickFirstBalancerName is the name of the pick_first balancer.
-const PickFirstBalancerName = "pick_first"
-
 func newPickfirstBuilder() balancer.Builder {
 	return &pickfirstBuilder{}
 }
@@ -40,7 +37,7 @@ func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions
 }
 
 func (*pickfirstBuilder) Name() string {
-	return PickFirstBalancerName
+	return "pickfirst"
 }
 
 type pickfirstBalancer struct {
@@ -60,20 +57,14 @@ func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err er
 			return
 		}
 		b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc})
-		b.sc.Connect()
 	} else {
 		b.sc.UpdateAddresses(addrs)
-		b.sc.Connect()
 	}
 }
 
 func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
 	grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
-	if b.sc != sc {
-		grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
-		return
-	}
-	if s == connectivity.Shutdown {
+	if b.sc != sc || s == connectivity.Shutdown {
 		b.sc = nil
 		return
 	}
@@ -102,7 +93,3 @@ func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.
 	}
 	return p.sc, nil, nil
 }
-
-func init() {
-	balancer.Register(newPickfirstBuilder())
-}
diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go
index 2d40236e..3e17efec 100644
--- a/vendor/google.golang.org/grpc/proxy.go
+++ b/vendor/google.golang.org/grpc/proxy.go
@@ -82,7 +82,8 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_
 		Header: map[string][]string{"User-Agent": {grpcUA}},
 	})
 
-	if err := sendHTTPRequest(ctx, req, conn); err != nil {
+	req = req.WithContext(ctx)
+	if err := req.Write(conn); err != nil {
 		return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
 	}
 
diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
deleted file mode 100644
index a543a709..00000000
--- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package dns implements a dns resolver to be installed as the default resolver
-// in grpc.
-package dns
-
-import (
-	"encoding/json"
-	"errors"
-	"fmt"
-	"math/rand"
-	"net"
-	"os"
-	"strconv"
-	"strings"
-	"sync"
-	"time"
-
-	"golang.org/x/net/context"
-	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/resolver"
-)
-
-func init() {
-	resolver.Register(NewBuilder())
-}
-
-const (
-	defaultPort = "443"
-	defaultFreq = time.Minute * 30
-	golang      = "GO"
-	// In DNS, service config is encoded in a TXT record via the mechanism
-	// described in RFC-1464 using the attribute name grpc_config.
-	txtAttribute = "grpc_config="
-)
-
-var errMissingAddr = errors.New("missing address")
-
-// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
-func NewBuilder() resolver.Builder {
-	return &dnsBuilder{freq: defaultFreq}
-}
-
-type dnsBuilder struct {
-	// frequency of polling the DNS server.
-	freq time.Duration
-}
-
-// Build creates and starts a DNS resolver that watches the name resolution of the target.
-func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
-	host, port, err := parseTarget(target.Endpoint)
-	if err != nil {
-		return nil, err
-	}
-
-	// IP address.
-	if net.ParseIP(host) != nil {
-		host, _ = formatIP(host)
-		addr := []resolver.Address{{Addr: host + ":" + port}}
-		i := &ipResolver{
-			cc: cc,
-			ip: addr,
-			rn: make(chan struct{}, 1),
-			q:  make(chan struct{}),
-		}
-		cc.NewAddress(addr)
-		go i.watcher()
-		return i, nil
-	}
-
-	// DNS address (non-IP).
-	ctx, cancel := context.WithCancel(context.Background())
-	d := &dnsResolver{
-		freq:   b.freq,
-		host:   host,
-		port:   port,
-		ctx:    ctx,
-		cancel: cancel,
-		cc:     cc,
-		t:      time.NewTimer(0),
-		rn:     make(chan struct{}, 1),
-	}
-
-	d.wg.Add(1)
-	go d.watcher()
-	return d, nil
-}
-
-// Scheme returns the naming scheme of this resolver builder, which is "dns".
-func (b *dnsBuilder) Scheme() string {
-	return "dns"
-}
-
-// ipResolver watches for the name resolution update for an IP address.
-type ipResolver struct {
-	cc resolver.ClientConn
-	ip []resolver.Address
-	// rn channel is used by ResolveNow() to force an immediate resolution of the target.
-	rn chan struct{}
-	q  chan struct{}
-}
-
-// ResolveNow resend the address it stores, no resolution is needed.
-func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) {
-	select {
-	case i.rn <- struct{}{}:
-	default:
-	}
-}
-
-// Close closes the ipResolver.
-func (i *ipResolver) Close() {
-	close(i.q)
-}
-
-func (i *ipResolver) watcher() {
-	for {
-		select {
-		case <-i.rn:
-			i.cc.NewAddress(i.ip)
-		case <-i.q:
-			return
-		}
-	}
-}
-
-// dnsResolver watches for the name resolution update for a non-IP target.
-type dnsResolver struct {
-	freq   time.Duration
-	host   string
-	port   string
-	ctx    context.Context
-	cancel context.CancelFunc
-	cc     resolver.ClientConn
-	// rn channel is used by ResolveNow() to force an immediate resolution of the target.
-	rn chan struct{}
-	t  *time.Timer
-	// wg is used to enforce Close() to return after the watcher() goroutine has finished.
-	// Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
-	// replace the real lookup functions with mocked ones to facilitate testing.
-	// If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
-	// will warns lookup (READ the lookup function pointers) inside watcher() goroutine
-	// has data race with replaceNetFunc (WRITE the lookup function pointers).
-	wg sync.WaitGroup
-}
-
-// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
-func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) {
-	select {
-	case d.rn <- struct{}{}:
-	default:
-	}
-}
-
-// Close closes the dnsResolver.
-func (d *dnsResolver) Close() {
-	d.cancel()
-	d.wg.Wait()
-	d.t.Stop()
-}
-
-func (d *dnsResolver) watcher() {
-	defer d.wg.Done()
-	for {
-		select {
-		case <-d.ctx.Done():
-			return
-		case <-d.t.C:
-		case <-d.rn:
-		}
-		result, sc := d.lookup()
-		// Next lookup should happen after an interval defined by d.freq.
-		d.t.Reset(d.freq)
-		d.cc.NewServiceConfig(string(sc))
-		d.cc.NewAddress(result)
-	}
-}
-
-func (d *dnsResolver) lookupSRV() []resolver.Address {
-	var newAddrs []resolver.Address
-	_, srvs, err := lookupSRV(d.ctx, "grpclb", "tcp", d.host)
-	if err != nil {
-		grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
-		return nil
-	}
-	for _, s := range srvs {
-		lbAddrs, err := lookupHost(d.ctx, s.Target)
-		if err != nil {
-			grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err)
-			continue
-		}
-		for _, a := range lbAddrs {
-			a, ok := formatIP(a)
-			if !ok {
-				grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
-				continue
-			}
-			addr := a + ":" + strconv.Itoa(int(s.Port))
-			newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
-		}
-	}
-	return newAddrs
-}
-
-func (d *dnsResolver) lookupTXT() string {
-	ss, err := lookupTXT(d.ctx, d.host)
-	if err != nil {
-		grpclog.Warningf("grpc: failed dns TXT record lookup due to %v.\n", err)
-		return ""
-	}
-	var res string
-	for _, s := range ss {
-		res += s
-	}
-
-	// TXT record must have "grpc_config=" attribute in order to be used as service config.
-	if !strings.HasPrefix(res, txtAttribute) {
-		grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute)
-		return ""
-	}
-	return strings.TrimPrefix(res, txtAttribute)
-}
-
-func (d *dnsResolver) lookupHost() []resolver.Address {
-	var newAddrs []resolver.Address
-	addrs, err := lookupHost(d.ctx, d.host)
-	if err != nil {
-		grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
-		return nil
-	}
-	for _, a := range addrs {
-		a, ok := formatIP(a)
-		if !ok {
-			grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
-			continue
-		}
-		addr := a + ":" + d.port
-		newAddrs = append(newAddrs, resolver.Address{Addr: addr})
-	}
-	return newAddrs
-}
-
-func (d *dnsResolver) lookup() ([]resolver.Address, string) {
-	var newAddrs []resolver.Address
-	newAddrs = d.lookupSRV()
-	// Support fallback to non-balancer address.
-	newAddrs = append(newAddrs, d.lookupHost()...)
-	sc := d.lookupTXT()
-	return newAddrs, canaryingSC(sc)
-}
-
-// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
-// If addr is an IPv4 address, return the addr and ok = true.
-// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
-func formatIP(addr string) (addrIP string, ok bool) {
-	ip := net.ParseIP(addr)
-	if ip == nil {
-		return "", false
-	}
-	if ip.To4() != nil {
-		return addr, true
-	}
-	return "[" + addr + "]", true
-}
-
-// parseTarget takes the user input target string, returns formatted host and port info.
-// If target doesn't specify a port, set the port to be the defaultPort.
-// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets
-// are strippd when setting the host.
-// examples:
-// target: "www.google.com" returns host: "www.google.com", port: "443"
-// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
-// target: "[ipv6-host]" returns host: "ipv6-host", port: "443"
-// target: ":80" returns host: "localhost", port: "80"
-// target: ":" returns host: "localhost", port: "443"
-func parseTarget(target string) (host, port string, err error) {
-	if target == "" {
-		return "", "", errMissingAddr
-	}
-	if ip := net.ParseIP(target); ip != nil {
-		// target is an IPv4 or IPv6(without brackets) address
-		return target, defaultPort, nil
-	}
-	if host, port, err = net.SplitHostPort(target); err == nil {
-		// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
-		if host == "" {
-			// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
-			host = "localhost"
-		}
-		if port == "" {
-			// If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used.
-			port = defaultPort
-		}
-		return host, port, nil
-	}
-	if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil {
-		// target doesn't have port
-		return host, port, nil
-	}
-	return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err)
-}
-
-type rawChoice struct {
-	ClientLanguage *[]string        `json:"clientLanguage,omitempty"`
-	Percentage     *int             `json:"percentage,omitempty"`
-	ClientHostName *[]string        `json:"clientHostName,omitempty"`
-	ServiceConfig  *json.RawMessage `json:"serviceConfig,omitempty"`
-}
-
-func containsString(a *[]string, b string) bool {
-	if a == nil {
-		return true
-	}
-	for _, c := range *a {
-		if c == b {
-			return true
-		}
-	}
-	return false
-}
-
-func chosenByPercentage(a *int) bool {
-	if a == nil {
-		return true
-	}
-	s := rand.NewSource(time.Now().UnixNano())
-	r := rand.New(s)
-	if r.Intn(100)+1 > *a {
-		return false
-	}
-	return true
-}
-
-func canaryingSC(js string) string {
-	if js == "" {
-		return ""
-	}
-	var rcs []rawChoice
-	err := json.Unmarshal([]byte(js), &rcs)
-	if err != nil {
-		grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err)
-		return ""
-	}
-	cliHostname, err := os.Hostname()
-	if err != nil {
-		grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err)
-		return ""
-	}
-	var sc string
-	for _, c := range rcs {
-		if !containsString(c.ClientLanguage, golang) ||
-			!chosenByPercentage(c.Percentage) ||
-			!containsString(c.ClientHostName, cliHostname) ||
-			c.ServiceConfig == nil {
-			continue
-		}
-		sc = string(*c.ServiceConfig)
-		break
-	}
-	return sc
-}
diff --git a/vendor/google.golang.org/grpc/resolver/dns/go17.go b/vendor/google.golang.org/grpc/resolver/dns/go17.go
deleted file mode 100644
index b466bc8f..00000000
--- a/vendor/google.golang.org/grpc/resolver/dns/go17.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// +build go1.6, !go1.8
-
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package dns
-
-import (
-	"net"
-
-	"golang.org/x/net/context"
-)
-
-var (
-	lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) }
-	lookupSRV  = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
-		return net.LookupSRV(service, proto, name)
-	}
-	lookupTXT = func(ctx context.Context, name string) ([]string, error) { return net.LookupTXT(name) }
-)
diff --git a/vendor/google.golang.org/grpc/resolver/dns/go18.go b/vendor/google.golang.org/grpc/resolver/dns/go18.go
deleted file mode 100644
index fa34f14c..00000000
--- a/vendor/google.golang.org/grpc/resolver/dns/go18.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// +build go1.8
-
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package dns
-
-import "net"
-
-var (
-	lookupHost = net.DefaultResolver.LookupHost
-	lookupSRV  = net.DefaultResolver.LookupSRV
-	lookupTXT  = net.DefaultResolver.LookupTXT
-)
diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
deleted file mode 100644
index b76010d7..00000000
--- a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package passthrough implements a pass-through resolver. It sends the target
-// name without scheme back to gRPC as resolved address.
-package passthrough
-
-import "google.golang.org/grpc/resolver"
-
-const scheme = "passthrough"
-
-type passthroughBuilder struct{}
-
-func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
-	r := &passthroughResolver{
-		target: target,
-		cc:     cc,
-	}
-	r.start()
-	return r, nil
-}
-
-func (*passthroughBuilder) Scheme() string {
-	return scheme
-}
-
-type passthroughResolver struct {
-	target resolver.Target
-	cc     resolver.ClientConn
-}
-
-func (r *passthroughResolver) start() {
-	r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}})
-}
-
-func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}
-
-func (*passthroughResolver) Close() {}
-
-func init() {
-	resolver.Register(&passthroughBuilder{})
-}
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
index df097eed..49307e8f 100644
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -24,7 +24,7 @@ var (
 	// m is a map from scheme to resolver builder.
 	m = make(map[string]Builder)
 	// defaultScheme is the default scheme to use.
-	defaultScheme = "passthrough"
+	defaultScheme string
 )
 
 // TODO(bar) install dns resolver in init(){}.
@@ -38,7 +38,7 @@ func Register(b Builder) {
 // Get returns the resolver builder registered with the given scheme.
 // If no builder is register with the scheme, the default scheme will
 // be used.
-// If the default scheme is not modified, "passthrough" will be the default
+// If the default scheme is not modified, "dns" will be the default
 // scheme, and the preinstalled dns resolver will be used.
 // If the default scheme is modified, and a resolver is registered with
 // the scheme, that resolver will be returned.
@@ -55,7 +55,7 @@ func Get(scheme string) Builder {
 }
 
 // SetDefaultScheme sets the default scheme that will be used.
-// The default default scheme is "passthrough".
+// The default default scheme is "dns".
 func SetDefaultScheme(scheme string) {
 	defaultScheme = scheme
 }
@@ -78,9 +78,7 @@ type Address struct {
 	// Type is the type of this address.
 	Type AddressType
 	// ServerName is the name of this address.
-	//
-	// e.g. if Type is GRPCLB, ServerName should be the name of the remote load
-	// balancer, not the name of the backend.
+	// It's the name of the grpc load balancer, which will be used for authentication.
 	ServerName string
 	// Metadata is the information associated with Addr, which may be used
 	// to make load balancing decision.
@@ -90,18 +88,10 @@ type Address struct {
 // BuildOption includes additional information for the builder to create
 // the resolver.
 type BuildOption struct {
-	// UserOptions can be used to pass configuration between DialOptions and the
-	// resolver.
-	UserOptions interface{}
 }
 
 // ClientConn contains the callbacks for resolver to notify any updates
 // to the gRPC ClientConn.
-//
-// This interface is to be implemented by gRPC. Users should not need a
-// brand new implementation of this interface. For the situations like
-// testing, the new implementation should embed this interface. This allows
-// gRPC to add new methods to this interface.
 type ClientConn interface {
 	// NewAddress is called by resolver to notify ClientConn a new list
 	// of resolved addresses.
@@ -138,10 +128,8 @@ type ResolveNowOption struct{}
 // Resolver watches for the updates on the specified target.
 // Updates include address updates and service config updates.
 type Resolver interface {
-	// ResolveNow will be called by gRPC to try to resolve the target name
-	// again. It's just a hint, resolver can ignore this if it's not necessary.
-	//
-	// It could be called multiple times concurrently.
+	// ResolveNow will be called by gRPC to try to resolve the target name again.
+	// It's just a hint, resolver can ignore this if it's not necessary.
 	ResolveNow(ResolveNowOption)
 	// Close closes the resolver.
 	Close()
diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
index ef5d4c28..7d53964d 100644
--- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
+++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
@@ -19,7 +19,6 @@
 package grpc
 
 import (
-	"fmt"
 	"strings"
 
 	"google.golang.org/grpc/grpclog"
@@ -37,24 +36,20 @@ type ccResolverWrapper struct {
 }
 
 // split2 returns the values from strings.SplitN(s, sep, 2).
-// If sep is not found, it returns ("", s, false) instead.
-func split2(s, sep string) (string, string, bool) {
+// If sep is not found, it returns "", s instead.
+func split2(s, sep string) (string, string) {
 	spl := strings.SplitN(s, sep, 2)
 	if len(spl) < 2 {
-		return "", "", false
+		return "", s
 	}
-	return spl[0], spl[1], true
+	return spl[0], spl[1]
 }
 
 // parseTarget splits target into a struct containing scheme, authority and
 // endpoint.
 func parseTarget(target string) (ret resolver.Target) {
-	var ok bool
-	ret.Scheme, ret.Endpoint, ok = split2(target, "://")
-	if !ok {
-		return resolver.Target{Endpoint: target}
-	}
-	ret.Authority, ret.Endpoint, _ = split2(ret.Endpoint, "/")
+	ret.Scheme, ret.Endpoint = split2(target, "://")
+	ret.Authority, ret.Endpoint = split2(ret.Endpoint, "/")
 	return ret
 }
 
@@ -62,17 +57,18 @@ func parseTarget(target string) (ret resolver.Target) {
 // builder for this scheme. It then builds the resolver and starts the
 // monitoring goroutine for it.
 //
-// If withResolverBuilder dial option is set, the specified resolver will be
-// used instead.
+// This function could return nil, nil, in tests for old behaviors.
+// TODO(bar) never return nil, nil when DNS becomes the default resolver.
 func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
-	grpclog.Infof("dialing to target with scheme: %q", cc.parsedTarget.Scheme)
+	target := parseTarget(cc.target)
+	grpclog.Infof("dialing to target with scheme: %q", target.Scheme)
 
-	rb := cc.dopts.resolverBuilder
+	rb := resolver.Get(target.Scheme)
 	if rb == nil {
-		rb = resolver.Get(cc.parsedTarget.Scheme)
-		if rb == nil {
-			return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme)
-		}
+		// TODO(bar) return error when DNS becomes the default (implemented and
+		// registered by DNS package).
+		grpclog.Infof("could not get resolver for scheme: %q", target.Scheme)
+		return nil, nil
 	}
 
 	ccr := &ccResolverWrapper{
@@ -83,17 +79,12 @@ func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
 	}
 
 	var err error
-	ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{
-		UserOptions: cc.dopts.resolverBuildUserOptions,
-	})
+	ccr.resolver, err = rb.Build(target, ccr, resolver.BuildOption{})
 	if err != nil {
 		return nil, err
 	}
-	return ccr, nil
-}
-
-func (ccr *ccResolverWrapper) start() {
 	go ccr.watcher()
+	return ccr, nil
 }
 
 // watcher processes address updates and service config updates sequencially.
@@ -109,31 +100,20 @@ func (ccr *ccResolverWrapper) watcher() {
 
 		select {
 		case addrs := <-ccr.addrCh:
-			select {
-			case <-ccr.done:
-				return
-			default:
+			grpclog.Infof("ccResolverWrapper: sending new addresses to balancer wrapper: %v", addrs)
+			// TODO(bar switching) this should never be nil. Pickfirst should be default.
+			if ccr.cc.balancerWrapper != nil {
+				// TODO(bar switching) create balancer if it's nil?
+				ccr.cc.balancerWrapper.handleResolvedAddrs(addrs, nil)
 			}
-			grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
-			ccr.cc.handleResolvedAddrs(addrs, nil)
 		case sc := <-ccr.scCh:
-			select {
-			case <-ccr.done:
-				return
-			default:
-			}
 			grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
-			ccr.cc.handleServiceConfig(sc)
 		case <-ccr.done:
 			return
 		}
 	}
 }
 
-func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
-	ccr.resolver.ResolveNow(o)
-}
-
 func (ccr *ccResolverWrapper) close() {
 	ccr.resolver.Close()
 	close(ccr.done)
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index 1a4c288b..188a75ff 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -21,17 +21,18 @@ package grpc
 import (
 	"bytes"
 	"compress/gzip"
+	stdctx "context"
 	"encoding/binary"
 	"io"
 	"io/ioutil"
 	"math"
+	"os"
 	"sync"
 	"time"
 
 	"golang.org/x/net/context"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
-	"google.golang.org/grpc/encoding"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/peer"
 	"google.golang.org/grpc/stats"
@@ -123,7 +124,6 @@ func (d *gzipDecompressor) Type() string {
 
 // callInfo contains all related configuration and information about an RPC.
 type callInfo struct {
-	compressorType        string
 	failFast              bool
 	headerMD              metadata.MD
 	trailerMD             metadata.MD
@@ -195,15 +195,12 @@ func Peer(peer *peer.Peer) CallOption {
 }
 
 // FailFast configures the action to take when an RPC is attempted on broken
-// connections or unreachable servers.  If failFast is true, the RPC will fail
+// connections or unreachable servers. If failfast is true, the RPC will fail
 // immediately. Otherwise, the RPC client will block the call until a
-// connection is available (or the call is canceled or times out) and will
-// retry the call if it fails due to a transient error.  gRPC will not retry if
-// data was written to the wire unless the server indicates it did not process
-// the data.  Please refer to
+// connection is available (or the call is canceled or times out) and will retry
+// the call if it fails due to a transient error. Please refer to
 // https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
-//
-// By default, RPCs are "Fail Fast".
+// Note: failFast is default to true.
 func FailFast(failFast bool) CallOption {
 	return beforeCall(func(c *callInfo) error {
 		c.failFast = failFast
@@ -236,18 +233,6 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
 	})
 }
 
-// UseCompressor returns a CallOption which sets the compressor used when
-// sending the request.  If WithCompressor is also set, UseCompressor has
-// higher priority.
-//
-// This API is EXPERIMENTAL.
-func UseCompressor(name string) CallOption {
-	return beforeCall(func(c *callInfo) error {
-		c.compressorType = name
-		return nil
-	})
-}
-
 // The format of the payload: compressed or not?
 type payloadFormat uint8
 
@@ -292,11 +277,8 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
 	if length == 0 {
 		return pf, nil, nil
 	}
-	if int64(length) > int64(maxInt) {
-		return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt)
-	}
-	if int(length) > maxReceiveMessageSize {
-		return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
+	if length > uint32(maxReceiveMessageSize) {
+		return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
 	}
 	// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
 	// of making it for each message:
@@ -312,21 +294,18 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
 
 // encode serializes msg and returns a buffer of message header and a buffer of msg.
 // If msg is nil, it generates the message header and an empty msg buffer.
-// TODO(ddyihai): eliminate extra Compressor parameter.
-func encode(c Codec, msg interface{}, cp Compressor, outPayload *stats.OutPayload, compressor encoding.Compressor) ([]byte, []byte, error) {
-	var (
-		b    []byte
-		cbuf *bytes.Buffer
-	)
+func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, []byte, error) {
+	var b []byte
 	const (
 		payloadLen = 1
 		sizeLen    = 4
 	)
+
 	if msg != nil {
 		var err error
 		b, err = c.Marshal(msg)
 		if err != nil {
-			return nil, nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
+			return nil, nil, Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
 		}
 		if outPayload != nil {
 			outPayload.Payload = msg
@@ -334,35 +313,24 @@ func encode(c Codec, msg interface{}, cp Compressor, outPayload *stats.OutPayloa
 			outPayload.Data = b
 			outPayload.Length = len(b)
 		}
-		if compressor != nil || cp != nil {
-			cbuf = new(bytes.Buffer)
-			// Has compressor, check Compressor is set by UseCompressor first.
-			if compressor != nil {
-				z, _ := compressor.Compress(cbuf)
-				if _, err := z.Write(b); err != nil {
-					return nil, nil, status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
-				}
-				z.Close()
-			} else {
-				// If Compressor is not set by UseCompressor, use default Compressor
-				if err := cp.Do(cbuf, b); err != nil {
-					return nil, nil, status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
-				}
+		if cp != nil {
+			if err := cp.Do(cbuf, b); err != nil {
+				return nil, nil, Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
 			}
 			b = cbuf.Bytes()
 		}
 	}
+
 	if uint(len(b)) > math.MaxUint32 {
-		return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
+		return nil, nil, Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
 	}
 
 	bufHeader := make([]byte, payloadLen+sizeLen)
-	if compressor != nil || cp != nil {
-		bufHeader[0] = byte(compressionMade)
-	} else {
+	if cp == nil {
 		bufHeader[0] = byte(compressionNone)
+	} else {
+		bufHeader[0] = byte(compressionMade)
 	}
-
 	// Write length of b into buf
 	binary.BigEndian.PutUint32(bufHeader[payloadLen:], uint32(len(b)))
 	if outPayload != nil {
@@ -371,26 +339,20 @@ func encode(c Codec, msg interface{}, cp Compressor, outPayload *stats.OutPayloa
 	return bufHeader, b, nil
 }
 
-func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
+func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error {
 	switch pf {
 	case compressionNone:
 	case compressionMade:
-		if recvCompress == "" || recvCompress == encoding.Identity {
-			return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding")
-		}
-		if !haveCompressor {
-			return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+		if dc == nil || recvCompress != dc.Type() {
+			return Errorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
 		}
 	default:
-		return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
+		return Errorf(codes.Internal, "grpc: received unexpected payload format %d", pf)
 	}
 	return nil
 }
 
-// For the two compressor parameters, both should not be set, but if they are,
-// dc takes precedence over compressor.
-// TODO(dfawley): wrap the old compressor/decompressor using the new API?
-func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error {
+func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload) error {
 	pf, d, err := p.recvMsg(maxReceiveMessageSize)
 	if err != nil {
 		return err
@@ -398,37 +360,22 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
 	if inPayload != nil {
 		inPayload.WireLength = len(d)
 	}
-
-	if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
-		return st.Err()
+	if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil {
+		return err
 	}
-
 	if pf == compressionMade {
-		// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
-		// use this decompressor as the default.
-		if dc != nil {
-			d, err = dc.Do(bytes.NewReader(d))
-			if err != nil {
-				return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
-			}
-		} else {
-			dcReader, err := compressor.Decompress(bytes.NewReader(d))
-			if err != nil {
-				return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
-			}
-			d, err = ioutil.ReadAll(dcReader)
-			if err != nil {
-				return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
-			}
+		d, err = dc.Do(bytes.NewReader(d))
+		if err != nil {
+			return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
 		}
 	}
 	if len(d) > maxReceiveMessageSize {
 		// TODO: Revisit the error code. Currently keep it consistent with java
 		// implementation.
-		return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
+		return Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
 	}
 	if err := c.Unmarshal(d, m); err != nil {
-		return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
+		return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
 	}
 	if inPayload != nil {
 		inPayload.RecvTime = time.Now()
@@ -441,7 +388,9 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
 }
 
 type rpcInfo struct {
-	failfast bool
+	failfast      bool
+	bytesSent     bool
+	bytesReceived bool
 }
 
 type rpcInfoContextKey struct{}
@@ -455,10 +404,69 @@ func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
 	return
 }
 
+func updateRPCInfoInContext(ctx context.Context, s rpcInfo) {
+	if ss, ok := rpcInfoFromContext(ctx); ok {
+		ss.bytesReceived = s.bytesReceived
+		ss.bytesSent = s.bytesSent
+	}
+	return
+}
+
+// toRPCErr converts an error into an error from the status package.
+func toRPCErr(err error) error {
+	if _, ok := status.FromError(err); ok {
+		return err
+	}
+	switch e := err.(type) {
+	case transport.StreamError:
+		return status.Error(e.Code, e.Desc)
+	case transport.ConnectionError:
+		return status.Error(codes.Unavailable, e.Desc)
+	default:
+		switch err {
+		case context.DeadlineExceeded, stdctx.DeadlineExceeded:
+			return status.Error(codes.DeadlineExceeded, err.Error())
+		case context.Canceled, stdctx.Canceled:
+			return status.Error(codes.Canceled, err.Error())
+		case ErrClientConnClosing:
+			return status.Error(codes.FailedPrecondition, err.Error())
+		}
+	}
+	return status.Error(codes.Unknown, err.Error())
+}
+
+// convertCode converts a standard Go error into its canonical code. Note that
+// this is only used to translate the error returned by the server applications.
+func convertCode(err error) codes.Code {
+	switch err {
+	case nil:
+		return codes.OK
+	case io.EOF:
+		return codes.OutOfRange
+	case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
+		return codes.FailedPrecondition
+	case os.ErrInvalid:
+		return codes.InvalidArgument
+	case context.Canceled, stdctx.Canceled:
+		return codes.Canceled
+	case context.DeadlineExceeded, stdctx.DeadlineExceeded:
+		return codes.DeadlineExceeded
+	}
+	switch {
+	case os.IsExist(err):
+		return codes.AlreadyExists
+	case os.IsNotExist(err):
+		return codes.NotFound
+	case os.IsPermission(err):
+		return codes.PermissionDenied
+	}
+	return codes.Unknown
+}
+
 // Code returns the error code for err if it was produced by the rpc system.
 // Otherwise, it returns codes.Unknown.
 //
-// Deprecated: use status.FromError and Code method instead.
+// Deprecated; use status.FromError and Code method instead.
 func Code(err error) codes.Code {
 	if s, ok := status.FromError(err); ok {
 		return s.Code()
@@ -469,7 +477,7 @@ func Code(err error) codes.Code {
 // ErrorDesc returns the error description of err if it was produced by the rpc system.
 // Otherwise, it returns err.Error() or empty string when err is nil.
 //
-// Deprecated: use status.FromError and Message method instead.
+// Deprecated; use status.FromError and Message method instead.
 func ErrorDesc(err error) string {
 	if s, ok := status.FromError(err); ok {
 		return s.Message()
@@ -480,26 +488,85 @@ func ErrorDesc(err error) string {
 // Errorf returns an error containing an error code and a description;
 // Errorf returns nil if c is OK.
 //
-// Deprecated: use status.Errorf instead.
+// Deprecated; use status.Errorf instead.
 func Errorf(c codes.Code, format string, a ...interface{}) error {
 	return status.Errorf(c, format, a...)
 }
 
-// The SupportPackageIsVersion variables are referenced from generated protocol
-// buffer files to ensure compatibility with the gRPC version used.  The latest
-// support package version is 5.
-//
-// Older versions are kept for compatibility. They may be removed if
-// compatibility cannot be maintained.
+// MethodConfig defines the configuration recommended by the service providers for a
+// particular method.
+// This is EXPERIMENTAL and subject to change.
+type MethodConfig struct {
+	// WaitForReady indicates whether RPCs sent to this method should wait until
+	// the connection is ready by default (!failfast). The value specified via the
+	// gRPC client API will override the value set here.
+	WaitForReady *bool
+	// Timeout is the default timeout for RPCs sent to this method. The actual
+	// deadline used will be the minimum of the value specified here and the value
+	// set by the application via the gRPC client API.  If either one is not set,
+	// then the other will be used.  If neither is set, then the RPC has no deadline.
+	Timeout *time.Duration
+	// MaxReqSize is the maximum allowed payload size for an individual request in a
+	// stream (client->server) in bytes. The size which is measured is the serialized
+	// payload after per-message compression (but before stream compression) in bytes.
+	// The actual value used is the minimum of the value specified here and the value set
+	// by the application via the gRPC client API. If either one is not set, then the other
+	// will be used.  If neither is set, then the built-in default is used.
+	MaxReqSize *int
+	// MaxRespSize is the maximum allowed payload size for an individual response in a
+	// stream (server->client) in bytes.
+	MaxRespSize *int
+}
+
+// ServiceConfig is provided by the service provider and contains parameters for how
+// clients that connect to the service should behave.
+// This is EXPERIMENTAL and subject to change.
+type ServiceConfig struct {
+	// LB is the load balancer the service providers recommends. The balancer specified
+	// via grpc.WithBalancer will override this.
+	LB Balancer
+	// Methods contains a map for the methods in this service.
+	// If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig.
+	// If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
+	// Otherwise, the method has no MethodConfig to use.
+	Methods map[string]MethodConfig
+}
+
+func min(a, b *int) *int {
+	if *a < *b {
+		return a
+	}
+	return b
+}
+
+func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
+	if mcMax == nil && doptMax == nil {
+		return &defaultVal
+	}
+	if mcMax != nil && doptMax != nil {
+		return min(mcMax, doptMax)
+	}
+	if mcMax != nil {
+		return mcMax
+	}
+	return doptMax
+}
+
+// SupportPackageIsVersion3 is referenced from generated protocol buffer files.
+// The latest support package version is 4.
+// SupportPackageIsVersion3 is kept for compatibility. It will be removed in the
+// next support package version update.
+const SupportPackageIsVersion3 = true
+
+// SupportPackageIsVersion4 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the grpc package.
 //
-// These constants should not be referenced from any other code.
-const (
-	SupportPackageIsVersion3 = true
-	SupportPackageIsVersion4 = true
-	SupportPackageIsVersion5 = true
-)
+// This constant may be renamed in the future if a change in the generated code
+// requires a synchronised update of grpc-go and protoc-gen-go. This constant
+// should not be referenced from any other code.
+const SupportPackageIsVersion4 = true
 
 // Version is the current grpc version.
-const Version = "1.9.2"
+const Version = "1.7.5"
 
 const grpcUA = "grpc-go/" + Version
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index f6516216..787665df 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -32,14 +32,11 @@ import (
 	"sync"
 	"time"
 
-	"io/ioutil"
-
 	"golang.org/x/net/context"
 	"golang.org/x/net/http2"
 	"golang.org/x/net/trace"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
-	"google.golang.org/grpc/encoding"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/internal"
 	"google.golang.org/grpc/keepalive"
@@ -92,15 +89,13 @@ type Server struct {
 	conns  map[io.Closer]bool
 	serve  bool
 	drain  bool
-	cv     *sync.Cond          // signaled when connections close for GracefulStop
+	ctx    context.Context
+	cancel context.CancelFunc
+	// A CondVar to let GracefulStop() blocks until all the pending RPCs are finished
+	// and all the transport goes away.
+	cv     *sync.Cond
 	m      map[string]*service // service name -> service info
 	events trace.EventLog
-
-	quit     chan struct{}
-	done     chan struct{}
-	quitOnce sync.Once
-	doneOnce sync.Once
-	serveWG  sync.WaitGroup // counts active Serve goroutines for GracefulStop
 }
 
 type options struct {
@@ -188,24 +183,14 @@ func CustomCodec(codec Codec) ServerOption {
 	}
 }
 
-// RPCCompressor returns a ServerOption that sets a compressor for outbound
-// messages.  For backward compatibility, all outbound messages will be sent
-// using this compressor, regardless of incoming message compression.  By
-// default, server messages will be sent using the same compressor with which
-// request messages were sent.
-//
-// Deprecated: use encoding.RegisterCompressor instead.
+// RPCCompressor returns a ServerOption that sets a compressor for outbound messages.
 func RPCCompressor(cp Compressor) ServerOption {
 	return func(o *options) {
 		o.cp = cp
 	}
 }
 
-// RPCDecompressor returns a ServerOption that sets a decompressor for inbound
-// messages.  It has higher priority than decompressors registered via
-// encoding.RegisterCompressor.
-//
-// Deprecated: use encoding.RegisterCompressor instead.
+// RPCDecompressor returns a ServerOption that sets a decompressor for inbound messages.
 func RPCDecompressor(dc Decompressor) ServerOption {
 	return func(o *options) {
 		o.dc = dc
@@ -312,8 +297,6 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
 // connection establishment (up to and including HTTP/2 handshaking) for all
 // new connections.  If this is not set, the default is 120 seconds.  A zero or
 // negative value will result in an immediate timeout.
-//
-// This API is EXPERIMENTAL.
 func ConnectionTimeout(d time.Duration) ServerOption {
 	return func(o *options) {
 		o.connectionTimeout = d
@@ -336,10 +319,9 @@ func NewServer(opt ...ServerOption) *Server {
 		opts:  opts,
 		conns: make(map[io.Closer]bool),
 		m:     make(map[string]*service),
-		quit:  make(chan struct{}),
-		done:  make(chan struct{}),
 	}
 	s.cv = sync.NewCond(&s.mu)
+	s.ctx, s.cancel = context.WithCancel(context.Background())
 	if EnableTracing {
 		_, file, line, _ := runtime.Caller(1)
 		s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
@@ -448,9 +430,11 @@ func (s *Server) GetServiceInfo() map[string]ServiceInfo {
 	return ret
 }
 
-// ErrServerStopped indicates that the operation is now illegal because of
-// the server being stopped.
-var ErrServerStopped = errors.New("grpc: the server has been stopped")
+var (
+	// ErrServerStopped indicates that the operation is now illegal because of
+	// the server being stopped.
+	ErrServerStopped = errors.New("grpc: the server has been stopped")
+)
 
 func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
 	if s.opts.creds == nil {
@@ -464,29 +448,16 @@ func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credenti
 // read gRPC requests and then call the registered handlers to reply to them.
 // Serve returns when lis.Accept fails with fatal errors.  lis will be closed when
 // this method returns.
-// Serve will return a non-nil error unless Stop or GracefulStop is called.
+// Serve always returns non-nil error.
 func (s *Server) Serve(lis net.Listener) error {
 	s.mu.Lock()
 	s.printf("serving")
 	s.serve = true
 	if s.lis == nil {
-		// Serve called after Stop or GracefulStop.
 		s.mu.Unlock()
 		lis.Close()
 		return ErrServerStopped
 	}
-
-	s.serveWG.Add(1)
-	defer func() {
-		s.serveWG.Done()
-		select {
-		// Stop or GracefulStop called; block until done and return nil.
-		case <-s.quit:
-			<-s.done
-		default:
-		}
-	}()
-
 	s.lis[lis] = true
 	s.mu.Unlock()
 	defer func() {
@@ -520,39 +491,25 @@ func (s *Server) Serve(lis net.Listener) error {
 				timer := time.NewTimer(tempDelay)
 				select {
 				case <-timer.C:
-				case <-s.quit:
-					timer.Stop()
-					return nil
+				case <-s.ctx.Done():
 				}
+				timer.Stop()
 				continue
 			}
 			s.mu.Lock()
 			s.printf("done serving; Accept = %v", err)
 			s.mu.Unlock()
-
-			select {
-			case <-s.quit:
-				return nil
-			default:
-			}
 			return err
 		}
 		tempDelay = 0
-		// Start a new goroutine to deal with rawConn so we don't stall this Accept
-		// loop goroutine.
-		//
-		// Make sure we account for the goroutine so GracefulStop doesn't nil out
-		// s.conns before this conn can be added.
-		s.serveWG.Add(1)
-		go func() {
-			s.handleRawConn(rawConn)
-			s.serveWG.Done()
-		}()
+		// Start a new goroutine to deal with rawConn
+		// so we don't stall this Accept loop goroutine.
+		go s.handleRawConn(rawConn)
 	}
 }
 
-// handleRawConn forks a goroutine to handle a just-accepted connection that
-// has not had any I/O performed on it yet.
+// handleRawConn is run in its own goroutine and handles a just-accepted
+// connection that has not had any I/O performed on it yet.
 func (s *Server) handleRawConn(rawConn net.Conn) {
 	rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
 	conn, authInfo, err := s.useTransportAuthenticator(rawConn)
@@ -577,28 +534,17 @@ func (s *Server) handleRawConn(rawConn net.Conn) {
 	}
 	s.mu.Unlock()
 
-	var serve func()
-	c := conn.(io.Closer)
 	if s.opts.useHandlerImpl {
-		serve = func() { s.serveUsingHandler(conn) }
+		rawConn.SetDeadline(time.Time{})
+		s.serveUsingHandler(conn)
 	} else {
-		// Finish handshaking (HTTP2)
 		st := s.newHTTP2Transport(conn, authInfo)
 		if st == nil {
 			return
 		}
-		c = st
-		serve = func() { s.serveStreams(st) }
-	}
-
-	rawConn.SetDeadline(time.Time{})
-	if !s.addConn(c) {
-		return
+		rawConn.SetDeadline(time.Time{})
+		s.serveStreams(st)
 	}
-	go func() {
-		serve()
-		s.removeConn(c)
-	}()
 }
 
 // newHTTP2Transport sets up a http/2 transport (using the
@@ -625,10 +571,15 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
 		grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err)
 		return nil
 	}
+	if !s.addConn(st) {
+		st.Close()
+		return nil
+	}
 	return st
 }
 
 func (s *Server) serveStreams(st transport.ServerTransport) {
+	defer s.removeConn(st)
 	defer st.Close()
 	var wg sync.WaitGroup
 	st.HandleStreams(func(stream *transport.Stream) {
@@ -662,6 +613,11 @@ var _ http.Handler = (*Server)(nil)
 //
 // conn is the *tls.Conn that's already been authenticated.
 func (s *Server) serveUsingHandler(conn net.Conn) {
+	if !s.addConn(conn) {
+		conn.Close()
+		return
+	}
+	defer s.removeConn(conn)
 	h2s := &http2.Server{
 		MaxConcurrentStreams: s.opts.maxConcurrentStreams,
 	}
@@ -701,6 +657,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 	if !s.addConn(st) {
+		st.Close()
 		return
 	}
 	defer s.removeConn(st)
@@ -730,15 +687,9 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea
 func (s *Server) addConn(c io.Closer) bool {
 	s.mu.Lock()
 	defer s.mu.Unlock()
-	if s.conns == nil {
-		c.Close()
+	if s.conns == nil || s.drain {
 		return false
 	}
-	if s.drain {
-		// Transport added after we drained our existing conns: drain it
-		// immediately.
-		c.(transport.ServerTransport).Drain()
-	}
 	s.conns[c] = true
 	return true
 }
@@ -752,14 +703,18 @@ func (s *Server) removeConn(c io.Closer) {
 	}
 }
 
-func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
+func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error {
 	var (
+		cbuf       *bytes.Buffer
 		outPayload *stats.OutPayload
 	)
+	if cp != nil {
+		cbuf = new(bytes.Buffer)
+	}
 	if s.opts.statsHandler != nil {
 		outPayload = &stats.OutPayload{}
 	}
-	hdr, data, err := encode(s.opts.codec, msg, cp, outPayload, comp)
+	hdr, data, err := encode(s.opts.codec, msg, cp, cbuf, outPayload)
 	if err != nil {
 		grpclog.Errorln("grpc: server failed to encode response: ", err)
 		return err
@@ -803,43 +758,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
 			}
 		}()
 	}
-
-	// comp and cp are used for compression.  decomp and dc are used for
-	// decompression.  If comp and decomp are both set, they are the same;
-	// however they are kept separate to ensure that at most one of the
-	// compressor/decompressor variable pairs are set for use later.
-	var comp, decomp encoding.Compressor
-	var cp Compressor
-	var dc Decompressor
-
-	// If dc is set and matches the stream's compression, use it.  Otherwise, try
-	// to find a matching registered compressor for decomp.
-	if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
-		dc = s.opts.dc
-	} else if rc != "" && rc != encoding.Identity {
-		decomp = encoding.GetCompressor(rc)
-		if decomp == nil {
-			st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
-			t.WriteStatus(stream, st)
-			return st.Err()
-		}
-	}
-
-	// If cp is set, use it.  Otherwise, attempt to compress the response using
-	// the incoming message compression method.
-	//
-	// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
 	if s.opts.cp != nil {
-		cp = s.opts.cp
-		stream.SetSendCompress(cp.Type())
-	} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
-		// Legacy compressor not specified; attempt to respond with same encoding.
-		comp = encoding.GetCompressor(rc)
-		if comp != nil {
-			stream.SetSendCompress(rc)
-		}
+		// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
+		stream.SetSendCompress(s.opts.cp.Type())
 	}
-
 	p := &parser{r: stream}
 	pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize)
 	if err == io.EOF {
@@ -847,7 +769,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
 		return err
 	}
 	if err == io.ErrUnexpectedEOF {
-		err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
+		err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
 	}
 	if err != nil {
 		if st, ok := status.FromError(err); ok {
@@ -868,11 +790,19 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
 		}
 		return err
 	}
-	if st := checkRecvPayload(pf, stream.RecvCompress(), dc != nil || decomp != nil); st != nil {
-		if e := t.WriteStatus(stream, st); e != nil {
+
+	if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil {
+		if st, ok := status.FromError(err); ok {
+			if e := t.WriteStatus(stream, st); e != nil {
+				grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
+			}
+			return err
+		}
+		if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil {
 			grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
 		}
-		return st.Err()
+
+		// TODO checkRecvPayload always return RPC error. Add a return here if necessary.
 	}
 	var inPayload *stats.InPayload
 	if sh != nil {
@@ -886,17 +816,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
 		}
 		if pf == compressionMade {
 			var err error
-			if dc != nil {
-				req, err = dc.Do(bytes.NewReader(req))
-				if err != nil {
-					return status.Errorf(codes.Internal, err.Error())
-				}
-			} else {
-				tmp, _ := decomp.Decompress(bytes.NewReader(req))
-				req, err = ioutil.ReadAll(tmp)
-				if err != nil {
-					return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
-				}
+			req, err = s.opts.dc.Do(bytes.NewReader(req))
+			if err != nil {
+				return Errorf(codes.Internal, err.Error())
 			}
 		}
 		if len(req) > s.opts.maxReceiveMessageSize {
@@ -942,8 +864,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
 		Last:  true,
 		Delay: false,
 	}
-
-	if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
+	if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil {
 		if err == io.EOF {
 			// The entire stream is done (for unary RPC only).
 			return err
@@ -992,45 +913,21 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
 			sh.HandleRPC(stream.Context(), end)
 		}()
 	}
+	if s.opts.cp != nil {
+		stream.SetSendCompress(s.opts.cp.Type())
+	}
 	ss := &serverStream{
 		t:     t,
 		s:     stream,
 		p:     &parser{r: stream},
 		codec: s.opts.codec,
+		cp:    s.opts.cp,
+		dc:    s.opts.dc,
 		maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
 		maxSendMessageSize:    s.opts.maxSendMessageSize,
 		trInfo:                trInfo,
 		statsHandler:          sh,
 	}
-
-	// If dc is set and matches the stream's compression, use it.  Otherwise, try
-	// to find a matching registered compressor for decomp.
-	if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
-		ss.dc = s.opts.dc
-	} else if rc != "" && rc != encoding.Identity {
-		ss.decomp = encoding.GetCompressor(rc)
-		if ss.decomp == nil {
-			st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
-			t.WriteStatus(ss.s, st)
-			return st.Err()
-		}
-	}
-
-	// If cp is set, use it.  Otherwise, attempt to compress the response using
-	// the incoming message compression method.
-	//
-	// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
-	if s.opts.cp != nil {
-		ss.cp = s.opts.cp
-		stream.SetSendCompress(s.opts.cp.Type())
-	} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
-		// Legacy compressor not specified; attempt to respond with same encoding.
-		ss.comp = encoding.GetCompressor(rc)
-		if ss.comp != nil {
-			stream.SetSendCompress(rc)
-		}
-	}
-
 	if trInfo != nil {
 		trInfo.tr.LazyLog(&trInfo.firstLine, false)
 		defer func() {
@@ -1174,17 +1071,6 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
 // pending RPCs on the client side will get notified by connection
 // errors.
 func (s *Server) Stop() {
-	s.quitOnce.Do(func() {
-		close(s.quit)
-	})
-
-	defer func() {
-		s.serveWG.Wait()
-		s.doneOnce.Do(func() {
-			close(s.done)
-		})
-	}()
-
 	s.mu.Lock()
 	listeners := s.lis
 	s.lis = nil
@@ -1202,6 +1088,7 @@ func (s *Server) Stop() {
 	}
 
 	s.mu.Lock()
+	s.cancel()
 	if s.events != nil {
 		s.events.Finish()
 		s.events = nil
@@ -1213,38 +1100,22 @@ func (s *Server) Stop() {
 // accepting new connections and RPCs and blocks until all the pending RPCs are
 // finished.
 func (s *Server) GracefulStop() {
-	s.quitOnce.Do(func() {
-		close(s.quit)
-	})
-
-	defer func() {
-		s.doneOnce.Do(func() {
-			close(s.done)
-		})
-	}()
-
 	s.mu.Lock()
+	defer s.mu.Unlock()
 	if s.conns == nil {
-		s.mu.Unlock()
 		return
 	}
 	for lis := range s.lis {
 		lis.Close()
 	}
 	s.lis = nil
+	s.cancel()
 	if !s.drain {
 		for c := range s.conns {
 			c.(transport.ServerTransport).Drain()
 		}
 		s.drain = true
 	}
-
-	// Wait for serving threads to be ready to exit.  Only then can we be sure no
-	// new conns will be created.
-	s.mu.Unlock()
-	s.serveWG.Wait()
-	s.mu.Lock()
-
 	for len(s.conns) != 0 {
 		s.cv.Wait()
 	}
@@ -1253,15 +1124,28 @@ func (s *Server) GracefulStop() {
 		s.events.Finish()
 		s.events = nil
 	}
-	s.mu.Unlock()
 }
 
 func init() {
+	internal.TestingCloseConns = func(arg interface{}) {
+		arg.(*Server).testingCloseConns()
+	}
 	internal.TestingUseHandlerImpl = func(arg interface{}) {
 		arg.(*Server).opts.useHandlerImpl = true
 	}
 }
 
+// testingCloseConns closes all existing transports but keeps s.lis
+// accepting new connections.
+func (s *Server) testingCloseConns() {
+	s.mu.Lock()
+	for c := range s.conns {
+		c.Close()
+		delete(s.conns, c)
+	}
+	s.mu.Unlock()
+}
+
 // SetHeader sets the header metadata.
 // When called multiple times, all the provided metadata will be merged.
 // All the metadata will be sent out when one of the following happens:
@@ -1274,7 +1158,7 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
 	}
 	stream, ok := transport.StreamFromContext(ctx)
 	if !ok {
-		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
+		return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
 	}
 	return stream.SetHeader(md)
 }
@@ -1284,7 +1168,7 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
 func SendHeader(ctx context.Context, md metadata.MD) error {
 	stream, ok := transport.StreamFromContext(ctx)
 	if !ok {
-		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
+		return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
 	}
 	t := stream.ServerTransport()
 	if t == nil {
@@ -1304,7 +1188,7 @@ func SetTrailer(ctx context.Context, md metadata.MD) error {
 	}
 	stream, ok := transport.StreamFromContext(ctx)
 	if !ok {
-		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
+		return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
 	}
 	return stream.SetTrailer(md)
 }
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
deleted file mode 100644
index 53fa88f3..00000000
--- a/vendor/google.golang.org/grpc/service_config.go
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
-	"encoding/json"
-	"fmt"
-	"strconv"
-	"strings"
-	"time"
-
-	"google.golang.org/grpc/grpclog"
-)
-
-const maxInt = int(^uint(0) >> 1)
-
-// MethodConfig defines the configuration recommended by the service providers for a
-// particular method.
-// DEPRECATED: Users should not use this struct. Service config should be received
-// through name resolver, as specified here
-// https://github.com/grpc/grpc/blob/master/doc/service_config.md
-type MethodConfig struct {
-	// WaitForReady indicates whether RPCs sent to this method should wait until
-	// the connection is ready by default (!failfast). The value specified via the
-	// gRPC client API will override the value set here.
-	WaitForReady *bool
-	// Timeout is the default timeout for RPCs sent to this method. The actual
-	// deadline used will be the minimum of the value specified here and the value
-	// set by the application via the gRPC client API.  If either one is not set,
-	// then the other will be used.  If neither is set, then the RPC has no deadline.
-	Timeout *time.Duration
-	// MaxReqSize is the maximum allowed payload size for an individual request in a
-	// stream (client->server) in bytes. The size which is measured is the serialized
-	// payload after per-message compression (but before stream compression) in bytes.
-	// The actual value used is the minimum of the value specified here and the value set
-	// by the application via the gRPC client API. If either one is not set, then the other
-	// will be used.  If neither is set, then the built-in default is used.
-	MaxReqSize *int
-	// MaxRespSize is the maximum allowed payload size for an individual response in a
-	// stream (server->client) in bytes.
-	MaxRespSize *int
-}
-
-// ServiceConfig is provided by the service provider and contains parameters for how
-// clients that connect to the service should behave.
-// DEPRECATED: Users should not use this struct. Service config should be received
-// through name resolver, as specified here
-// https://github.com/grpc/grpc/blob/master/doc/service_config.md
-type ServiceConfig struct {
-	// LB is the load balancer the service providers recommends. The balancer specified
-	// via grpc.WithBalancer will override this.
-	LB *string
-	// Methods contains a map for the methods in this service.
-	// If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig.
-	// If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
-	// Otherwise, the method has no MethodConfig to use.
-	Methods map[string]MethodConfig
-}
-
-func parseDuration(s *string) (*time.Duration, error) {
-	if s == nil {
-		return nil, nil
-	}
-	if !strings.HasSuffix(*s, "s") {
-		return nil, fmt.Errorf("malformed duration %q", *s)
-	}
-	ss := strings.SplitN((*s)[:len(*s)-1], ".", 3)
-	if len(ss) > 2 {
-		return nil, fmt.Errorf("malformed duration %q", *s)
-	}
-	// hasDigits is set if either the whole or fractional part of the number is
-	// present, since both are optional but one is required.
-	hasDigits := false
-	var d time.Duration
-	if len(ss[0]) > 0 {
-		i, err := strconv.ParseInt(ss[0], 10, 32)
-		if err != nil {
-			return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
-		}
-		d = time.Duration(i) * time.Second
-		hasDigits = true
-	}
-	if len(ss) == 2 && len(ss[1]) > 0 {
-		if len(ss[1]) > 9 {
-			return nil, fmt.Errorf("malformed duration %q", *s)
-		}
-		f, err := strconv.ParseInt(ss[1], 10, 64)
-		if err != nil {
-			return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
-		}
-		for i := 9; i > len(ss[1]); i-- {
-			f *= 10
-		}
-		d += time.Duration(f)
-		hasDigits = true
-	}
-	if !hasDigits {
-		return nil, fmt.Errorf("malformed duration %q", *s)
-	}
-
-	return &d, nil
-}
-
-type jsonName struct {
-	Service *string
-	Method  *string
-}
-
-func (j jsonName) generatePath() (string, bool) {
-	if j.Service == nil {
-		return "", false
-	}
-	res := "/" + *j.Service + "/"
-	if j.Method != nil {
-		res += *j.Method
-	}
-	return res, true
-}
-
-// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
-type jsonMC struct {
-	Name                    *[]jsonName
-	WaitForReady            *bool
-	Timeout                 *string
-	MaxRequestMessageBytes  *int64
-	MaxResponseMessageBytes *int64
-}
-
-// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
-type jsonSC struct {
-	LoadBalancingPolicy *string
-	MethodConfig        *[]jsonMC
-}
-
-func parseServiceConfig(js string) (ServiceConfig, error) {
-	var rsc jsonSC
-	err := json.Unmarshal([]byte(js), &rsc)
-	if err != nil {
-		grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
-		return ServiceConfig{}, err
-	}
-	sc := ServiceConfig{
-		LB:      rsc.LoadBalancingPolicy,
-		Methods: make(map[string]MethodConfig),
-	}
-	if rsc.MethodConfig == nil {
-		return sc, nil
-	}
-
-	for _, m := range *rsc.MethodConfig {
-		if m.Name == nil {
-			continue
-		}
-		d, err := parseDuration(m.Timeout)
-		if err != nil {
-			grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
-			return ServiceConfig{}, err
-		}
-
-		mc := MethodConfig{
-			WaitForReady: m.WaitForReady,
-			Timeout:      d,
-		}
-		if m.MaxRequestMessageBytes != nil {
-			if *m.MaxRequestMessageBytes > int64(maxInt) {
-				mc.MaxReqSize = newInt(maxInt)
-			} else {
-				mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes))
-			}
-		}
-		if m.MaxResponseMessageBytes != nil {
-			if *m.MaxResponseMessageBytes > int64(maxInt) {
-				mc.MaxRespSize = newInt(maxInt)
-			} else {
-				mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes))
-			}
-		}
-		for _, n := range *m.Name {
-			if path, valid := n.generatePath(); valid {
-				sc.Methods[path] = mc
-			}
-		}
-	}
-
-	return sc, nil
-}
-
-func min(a, b *int) *int {
-	if *a < *b {
-		return a
-	}
-	return b
-}
-
-func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
-	if mcMax == nil && doptMax == nil {
-		return &defaultVal
-	}
-	if mcMax != nil && doptMax != nil {
-		return min(mcMax, doptMax)
-	}
-	if mcMax != nil {
-		return mcMax
-	}
-	return doptMax
-}
-
-func newInt(b int) *int {
-	return &b
-}
diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go
index d9defaeb..871dc4b3 100644
--- a/vendor/google.golang.org/grpc/status/status.go
+++ b/vendor/google.golang.org/grpc/status/status.go
@@ -125,8 +125,8 @@ func FromError(err error) (s *Status, ok bool) {
 	if err == nil {
 		return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
 	}
-	if se, ok := err.(*statusError); ok {
-		return se.status(), true
+	if s, ok := err.(*statusError); ok {
+		return s.status(), true
 	}
 	return nil, false
 }
@@ -166,16 +166,3 @@ func (s *Status) Details() []interface{} {
 	}
 	return details
 }
-
-// Code returns the Code of the error if it is a Status error, codes.OK if err
-// is nil, or codes.Unknown otherwise.
-func Code(err error) codes.Code {
-	// Don't use FromError to avoid allocation of OK status.
-	if err == nil {
-		return codes.OK
-	}
-	if se, ok := err.(*statusError); ok {
-		return se.status().Code()
-	}
-	return codes.Unknown
-}
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index f9138199..75eab40b 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -19,6 +19,7 @@
 package grpc
 
 import (
+	"bytes"
 	"errors"
 	"io"
 	"sync"
@@ -28,7 +29,6 @@ import (
 	"golang.org/x/net/trace"
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/encoding"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/peer"
 	"google.golang.org/grpc/stats"
@@ -94,23 +94,15 @@ type ClientStream interface {
 	Stream
 }
 
-// NewStream creates a new Stream for the client side. This is typically
-// called by generated code.
-func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
+// NewClientStream creates a new Stream for the client side. This is called
+// by generated code.
+func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
 	if cc.dopts.streamInt != nil {
 		return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
 	}
 	return newClientStream(ctx, desc, cc, method, opts...)
 }
 
-// NewClientStream creates a new Stream for the client side. This is typically
-// called by generated code.
-//
-// DEPRECATED: Use ClientConn.NewStream instead.
-func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
-	return cc.NewStream(ctx, desc, method, opts...)
-}
-
 func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
 	var (
 		t      transport.ClientTransport
@@ -124,7 +116,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
 		c.failFast = !*mc.WaitForReady
 	}
 
-	if mc.Timeout != nil && *mc.Timeout >= 0 {
+	if mc.Timeout != nil {
 		ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
 		defer func() {
 			if err != nil {
@@ -151,24 +143,8 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
 		// time soon, so we ask the transport to flush the header.
 		Flush: desc.ClientStreams,
 	}
-
-	// Set our outgoing compression according to the UseCompressor CallOption, if
-	// set.  In that case, also find the compressor from the encoding package.
-	// Otherwise, use the compressor configured by the WithCompressor DialOption,
-	// if set.
-	var cp Compressor
-	var comp encoding.Compressor
-	if ct := c.compressorType; ct != "" {
-		callHdr.SendCompress = ct
-		if ct != encoding.Identity {
-			comp = encoding.GetCompressor(ct)
-			if comp == nil {
-				return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
-			}
-		}
-	} else if cc.dopts.cp != nil {
+	if cc.dopts.cp != nil {
 		callHdr.SendCompress = cc.dopts.cp.Type()
-		cp = cc.dopts.cp
 	}
 	if c.creds != nil {
 		callHdr.Creds = c.creds
@@ -213,46 +189,42 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
 			}
 		}()
 	}
-
 	for {
-		// Check to make sure the context has expired.  This will prevent us from
-		// looping forever if an error occurs for wait-for-ready RPCs where no data
-		// is sent on the wire.
-		select {
-		case <-ctx.Done():
-			return nil, toRPCErr(ctx.Err())
-		default:
-		}
-
 		t, done, err = cc.getTransport(ctx, c.failFast)
 		if err != nil {
-			return nil, err
+			// TODO(zhaoq): Probably revisit the error handling.
+			if _, ok := status.FromError(err); ok {
+				return nil, err
+			}
+			if err == errConnClosing || err == errConnUnavailable {
+				if c.failFast {
+					return nil, Errorf(codes.Unavailable, "%v", err)
+				}
+				continue
+			}
+			// All the other errors are treated as Internal errors.
+			return nil, Errorf(codes.Internal, "%v", err)
 		}
 
 		s, err = t.NewStream(ctx, callHdr)
 		if err != nil {
+			if _, ok := err.(transport.ConnectionError); ok && done != nil {
+				// If error is connection error, transport was sending data on wire,
+				// and we are not sure if anything has been sent on wire.
+				// If error is not connection error, we are sure nothing has been sent.
+				updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
+			}
 			if done != nil {
-				doneInfo := balancer.DoneInfo{Err: err}
-				if _, ok := err.(transport.ConnectionError); ok {
-					// If error is connection error, transport was sending data on wire,
-					// and we are not sure if anything has been sent on wire.
-					// If error is not connection error, we are sure nothing has been sent.
-					doneInfo.BytesSent = true
-				}
-				done(doneInfo)
+				done(balancer.DoneInfo{Err: err})
 				done = nil
 			}
-			// In the event of any error from NewStream, we never attempted to write
-			// anything to the wire, so we can retry indefinitely for non-fail-fast
-			// RPCs.
-			if !c.failFast {
+			if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
 				continue
 			}
 			return nil, toRPCErr(err)
 		}
 		break
 	}
-
 	// Set callInfo.peer object from stream's context.
 	if peer, ok := peer.FromContext(s.Context()); ok {
 		c.peer = peer
@@ -262,9 +234,8 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
 		c:      c,
 		desc:   desc,
 		codec:  cc.dopts.codec,
-		cp:     cp,
+		cp:     cc.dopts.cp,
 		dc:     cc.dopts.dc,
-		comp:   comp,
 		cancel: cancel,
 
 		done: done,
@@ -278,8 +249,8 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
 		statsCtx:     ctx,
 		statsHandler: cc.dopts.copts.StatsHandler,
 	}
-	// Listen on s.Context().Done() to detect cancellation and s.Done() to detect
-	// normal termination when there is no pending I/O operations on this stream.
+	// Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination
+	// when there is no pending I/O operations on this stream.
 	go func() {
 		select {
 		case <-t.Error():
@@ -306,20 +277,15 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
 
 // clientStream implements a client side Stream.
 type clientStream struct {
-	opts []CallOption
-	c    *callInfo
-	t    transport.ClientTransport
-	s    *transport.Stream
-	p    *parser
-	desc *StreamDesc
-
-	codec     Codec
-	cp        Compressor
-	dc        Decompressor
-	comp      encoding.Compressor
-	decomp    encoding.Compressor
-	decompSet bool
-
+	opts   []CallOption
+	c      *callInfo
+	t      transport.ClientTransport
+	s      *transport.Stream
+	p      *parser
+	desc   *StreamDesc
+	codec  Codec
+	cp     Compressor
+	dc     Decompressor
 	cancel context.CancelFunc
 
 	tracing bool // set to EnableTracing when the clientStream is created.
@@ -395,15 +361,15 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
 			Client: true,
 		}
 	}
-	hdr, data, err := encode(cs.codec, m, cs.cp, outPayload, cs.comp)
+	hdr, data, err := encode(cs.codec, m, cs.cp, bytes.NewBuffer([]byte{}), outPayload)
 	if err != nil {
 		return err
 	}
 	if cs.c.maxSendMessageSize == nil {
-		return status.Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
+		return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
 	}
 	if len(data) > *cs.c.maxSendMessageSize {
-		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize)
+		return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize)
 	}
 	err = cs.t.Write(cs.s, hdr, data, &transport.Options{Last: false})
 	if err == nil && outPayload != nil {
@@ -421,25 +387,9 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
 		}
 	}
 	if cs.c.maxReceiveMessageSize == nil {
-		return status.Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
-	}
-	if !cs.decompSet {
-		// Block until we receive headers containing received message encoding.
-		if ct := cs.s.RecvCompress(); ct != "" && ct != encoding.Identity {
-			if cs.dc == nil || cs.dc.Type() != ct {
-				// No configured decompressor, or it does not match the incoming
-				// message encoding; attempt to find a registered compressor that does.
-				cs.dc = nil
-				cs.decomp = encoding.GetCompressor(ct)
-			}
-		} else {
-			// No compression is used; disable our decompressor.
-			cs.dc = nil
-		}
-		// Only initialize this state once per stream.
-		cs.decompSet = true
+		return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
 	}
-	err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload, cs.decomp)
+	err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload)
 	defer func() {
 		// err != nil indicates the termination of the stream.
 		if err != nil {
@@ -463,9 +413,9 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
 		// Special handling for client streaming rpc.
 		// This recv expects EOF or errors, so we don't collect inPayload.
 		if cs.c.maxReceiveMessageSize == nil {
-			return status.Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
+			return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
 		}
-		err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil, cs.decomp)
+		err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil)
 		cs.closeTransportStream(err)
 		if err == nil {
 			return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
@@ -536,11 +486,11 @@ func (cs *clientStream) finish(err error) {
 		o.after(cs.c)
 	}
 	if cs.done != nil {
-		cs.done(balancer.DoneInfo{
-			Err:           err,
-			BytesSent:     true,
-			BytesReceived: cs.s.BytesReceived(),
+		updateRPCInfoInContext(cs.s.Context(), rpcInfo{
+			bytesSent:     cs.s.BytesSent(),
+			bytesReceived: cs.s.BytesReceived(),
 		})
+		cs.done(balancer.DoneInfo{Err: err})
 		cs.done = nil
 	}
 	if cs.statsHandler != nil {
@@ -590,16 +540,12 @@ type ServerStream interface {
 
 // serverStream implements a server side Stream.
 type serverStream struct {
-	t     transport.ServerTransport
-	s     *transport.Stream
-	p     *parser
-	codec Codec
-
-	cp     Compressor
-	dc     Decompressor
-	comp   encoding.Compressor
-	decomp encoding.Compressor
-
+	t                     transport.ServerTransport
+	s                     *transport.Stream
+	p                     *parser
+	codec                 Codec
+	cp                    Compressor
+	dc                    Decompressor
 	maxReceiveMessageSize int
 	maxSendMessageSize    int
 	trInfo                *traceInfo
@@ -655,12 +601,12 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
 	if ss.statsHandler != nil {
 		outPayload = &stats.OutPayload{}
 	}
-	hdr, data, err := encode(ss.codec, m, ss.cp, outPayload, ss.comp)
+	hdr, data, err := encode(ss.codec, m, ss.cp, bytes.NewBuffer([]byte{}), outPayload)
 	if err != nil {
 		return err
 	}
 	if len(data) > ss.maxSendMessageSize {
-		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize)
+		return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize)
 	}
 	if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil {
 		return toRPCErr(err)
@@ -695,12 +641,12 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
 	if ss.statsHandler != nil {
 		inPayload = &stats.InPayload{}
 	}
-	if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload, ss.decomp); err != nil {
+	if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload); err != nil {
 		if err == io.EOF {
 			return err
 		}
 		if err == io.ErrUnexpectedEOF {
-			err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
+			err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
 		}
 		return toRPCErr(err)
 	}
@@ -709,13 +655,3 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
 	}
 	return nil
 }
-
-// MethodFromServerStream returns the method string for the input stream.
-// The returned string is in the format of "/service/method".
-func MethodFromServerStream(stream ServerStream) (string, bool) {
-	s, ok := transport.StreamFromContext(stream.Context())
-	if !ok {
-		return "", ok
-	}
-	return s.Method(), ok
-}
diff --git a/vendor/google.golang.org/grpc/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/transport/bdp_estimator.go
index 63cd2627..8dd2ed42 100644
--- a/vendor/google.golang.org/grpc/transport/bdp_estimator.go
+++ b/vendor/google.golang.org/grpc/transport/bdp_estimator.go
@@ -41,9 +41,12 @@ const (
 	gamma = 2
 )
 
-// Adding arbitrary data to ping so that its ack can be identified.
-// Easter-egg: what does the ping message say?
-var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}}
+var (
+	// Adding arbitrary data to ping so that its ack can be
+	// identified.
+	// Easter-egg: what does the ping message say?
+	bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}}
+)
 
 type bdpEstimator struct {
 	// sentAt is the time when the ping was sent.
diff --git a/vendor/google.golang.org/grpc/transport/control.go b/vendor/google.golang.org/grpc/transport/control.go
index 0474b090..dd1a8d42 100644
--- a/vendor/google.golang.org/grpc/transport/control.go
+++ b/vendor/google.golang.org/grpc/transport/control.go
@@ -20,9 +20,9 @@ package transport
 
 import (
 	"fmt"
-	"io"
 	"math"
 	"sync"
+	"sync/atomic"
 	"time"
 
 	"golang.org/x/net/http2"
@@ -49,7 +49,7 @@ const (
 	// defaultLocalSendQuota sets is default value for number of data
 	// bytes that each stream can schedule before some of it being
 	// flushed out.
-	defaultLocalSendQuota = 128 * 1024
+	defaultLocalSendQuota = 64 * 1024
 )
 
 // The following defines various control items which could flow through
@@ -89,16 +89,12 @@ type windowUpdate struct {
 func (*windowUpdate) item() {}
 
 type settings struct {
-	ss []http2.Setting
+	ack bool
+	ss  []http2.Setting
 }
 
 func (*settings) item() {}
 
-type settingsAck struct {
-}
-
-func (*settingsAck) item() {}
-
 type resetStream struct {
 	streamID uint32
 	code     http2.ErrCode
@@ -116,7 +112,6 @@ type goAway struct {
 func (*goAway) item() {}
 
 type flushIO struct {
-	closeTr bool
 }
 
 func (*flushIO) item() {}
@@ -131,8 +126,9 @@ func (*ping) item() {}
 // quotaPool is a pool which accumulates the quota and sends it to acquire()
 // when it is available.
 type quotaPool struct {
+	c chan int
+
 	mu      sync.Mutex
-	c       chan struct{}
 	version uint32
 	quota   int
 }
@@ -140,8 +136,12 @@ type quotaPool struct {
 // newQuotaPool creates a quotaPool which has quota q available to consume.
 func newQuotaPool(q int) *quotaPool {
 	qb := &quotaPool{
-		quota: q,
-		c:     make(chan struct{}, 1),
+		c: make(chan int, 1),
+	}
+	if q > 0 {
+		qb.c <- q
+	} else {
+		qb.quota = q
 	}
 	return qb
 }
@@ -155,83 +155,60 @@ func (qb *quotaPool) add(v int) {
 }
 
 func (qb *quotaPool) lockedAdd(v int) {
-	var wakeUp bool
-	if qb.quota <= 0 {
-		wakeUp = true // Wake up potential waiters.
+	select {
+	case n := <-qb.c:
+		qb.quota += n
+	default:
 	}
 	qb.quota += v
-	if wakeUp && qb.quota > 0 {
-		select {
-		case qb.c <- struct{}{}:
-		default:
-		}
+	if qb.quota <= 0 {
+		return
+	}
+	// After the pool has been created, this is the only place that sends on
+	// the channel. Since mu is held at this point and any quota that was sent
+	// on the channel has been retrieved, we know that this code will always
+	// place any positive quota value on the channel.
+	select {
+	case qb.c <- qb.quota:
+		qb.quota = 0
+	default:
 	}
 }
 
 func (qb *quotaPool) addAndUpdate(v int) {
 	qb.mu.Lock()
+	defer qb.mu.Unlock()
 	qb.lockedAdd(v)
-	qb.version++
-	qb.mu.Unlock()
+	// Update the version only after having added to the quota
+	// so that if acquireWithVesrion sees the new vesrion it is
+	// guaranteed to have seen the updated quota.
+	// Also, still keep this inside of the lock, so that when
+	// compareAndExecute is processing, this function doesn't
+	// get executed partially (quota gets updated but the version
+	// doesn't).
+	atomic.AddUint32(&(qb.version), 1)
 }
 
-func (qb *quotaPool) get(v int, wc waiters) (int, uint32, error) {
-	qb.mu.Lock()
-	if qb.quota > 0 {
-		if v > qb.quota {
-			v = qb.quota
-		}
-		qb.quota -= v
-		ver := qb.version
-		qb.mu.Unlock()
-		return v, ver, nil
-	}
-	qb.mu.Unlock()
-	for {
-		select {
-		case <-wc.ctx.Done():
-			return 0, 0, ContextErr(wc.ctx.Err())
-		case <-wc.tctx.Done():
-			return 0, 0, ErrConnClosing
-		case <-wc.done:
-			return 0, 0, io.EOF
-		case <-wc.goAway:
-			return 0, 0, errStreamDrain
-		case <-qb.c:
-			qb.mu.Lock()
-			if qb.quota > 0 {
-				if v > qb.quota {
-					v = qb.quota
-				}
-				qb.quota -= v
-				ver := qb.version
-				if qb.quota > 0 {
-					select {
-					case qb.c <- struct{}{}:
-					default:
-					}
-				}
-				qb.mu.Unlock()
-				return v, ver, nil
-
-			}
-			qb.mu.Unlock()
-		}
-	}
+func (qb *quotaPool) acquireWithVersion() (<-chan int, uint32) {
+	return qb.c, atomic.LoadUint32(&(qb.version))
 }
 
 func (qb *quotaPool) compareAndExecute(version uint32, success, failure func()) bool {
 	qb.mu.Lock()
-	if version == qb.version {
+	defer qb.mu.Unlock()
+	if version == atomic.LoadUint32(&(qb.version)) {
 		success()
-		qb.mu.Unlock()
 		return true
 	}
 	failure()
-	qb.mu.Unlock()
 	return false
 }
 
+// acquire returns the channel on which available quota amounts are sent.
+func (qb *quotaPool) acquire() <-chan int {
+	return qb.c
+}
+
 // inFlow deals with inbound flow control
 type inFlow struct {
 	mu sync.Mutex
diff --git a/vendor/google.golang.org/grpc/transport/go16.go b/vendor/google.golang.org/grpc/transport/go16.go
deleted file mode 100644
index 5babcf9b..00000000
--- a/vendor/google.golang.org/grpc/transport/go16.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// +build go1.6,!go1.7
-
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
-	"net"
-	"net/http"
-
-	"google.golang.org/grpc/codes"
-
-	"golang.org/x/net/context"
-)
-
-// dialContext connects to the address on the named network.
-func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
-	return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
-}
-
-// ContextErr converts the error from context package into a StreamError.
-func ContextErr(err error) StreamError {
-	switch err {
-	case context.DeadlineExceeded:
-		return streamErrorf(codes.DeadlineExceeded, "%v", err)
-	case context.Canceled:
-		return streamErrorf(codes.Canceled, "%v", err)
-	}
-	return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
-}
-
-// contextFromRequest returns a background context.
-func contextFromRequest(r *http.Request) context.Context {
-	return context.Background()
-}
diff --git a/vendor/google.golang.org/grpc/transport/go17.go b/vendor/google.golang.org/grpc/transport/go17.go
deleted file mode 100644
index b7fa6bdb..00000000
--- a/vendor/google.golang.org/grpc/transport/go17.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// +build go1.7
-
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
-	"context"
-	"net"
-	"net/http"
-
-	"google.golang.org/grpc/codes"
-
-	netctx "golang.org/x/net/context"
-)
-
-// dialContext connects to the address on the named network.
-func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
-	return (&net.Dialer{}).DialContext(ctx, network, address)
-}
-
-// ContextErr converts the error from context package into a StreamError.
-func ContextErr(err error) StreamError {
-	switch err {
-	case context.DeadlineExceeded, netctx.DeadlineExceeded:
-		return streamErrorf(codes.DeadlineExceeded, "%v", err)
-	case context.Canceled, netctx.Canceled:
-		return streamErrorf(codes.Canceled, "%v", err)
-	}
-	return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
-}
-
-// contextFromRequest returns a context from the HTTP Request.
-func contextFromRequest(r *http.Request) context.Context {
-	return r.Context()
-}
diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go
index 27c4ebb5..7e0fdb35 100644
--- a/vendor/google.golang.org/grpc/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/transport/handler_server.go
@@ -284,12 +284,12 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
 func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
 	// With this transport type there will be exactly 1 stream: this HTTP request.
 
-	ctx := contextFromRequest(ht.req)
+	var ctx context.Context
 	var cancel context.CancelFunc
 	if ht.timeoutSet {
-		ctx, cancel = context.WithTimeout(ctx, ht.timeout)
+		ctx, cancel = context.WithTimeout(context.Background(), ht.timeout)
 	} else {
-		ctx, cancel = context.WithCancel(ctx)
+		ctx, cancel = context.WithCancel(context.Background())
 	}
 
 	// requestOver is closed when either the request's context is done
diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go
index b7f1f548..1abb62e6 100644
--- a/vendor/google.golang.org/grpc/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/transport/http2_client.go
@@ -20,7 +20,6 @@ package transport
 
 import (
 	"bytes"
-	"fmt"
 	"io"
 	"math"
 	"net"
@@ -45,6 +44,7 @@ import (
 type http2Client struct {
 	ctx        context.Context
 	cancel     context.CancelFunc
+	target     string // server name/addr
 	userAgent  string
 	md         interface{}
 	conn       net.Conn // underlying communication channel
@@ -69,9 +69,6 @@ type http2Client struct {
 	fc         *inFlow
 	// sendQuotaPool provides flow control to outbound message.
 	sendQuotaPool *quotaPool
-	// localSendQuota limits the amount of data that can be scheduled
-	// for writing before it is actually written out.
-	localSendQuota *quotaPool
 	// streamsQuota limits the max number of concurrent streams.
 	streamsQuota *quotaPool
 
@@ -94,11 +91,6 @@ type http2Client struct {
 	bdpEst          *bdpEstimator
 	outQuotaVersion uint32
 
-	// onSuccess is a callback that client transport calls upon
-	// receiving server preface to signal that a succefull HTTP2
-	// connection was established.
-	onSuccess func()
-
 	mu            sync.Mutex     // guard the following variables
 	state         transportState // the state of underlying connection
 	activeStreams map[uint32]*Stream
@@ -117,7 +109,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error
 	if fn != nil {
 		return fn(ctx, addr)
 	}
-	return dialContext(ctx, "tcp", addr)
+	return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
 }
 
 func isTemporary(err error) bool {
@@ -151,10 +143,12 @@ func isTemporary(err error) bool {
 // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
 // and starts to receive messages on it. Non-nil error returns if construction
 // fails.
-func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onSuccess func()) (_ ClientTransport, err error) {
+func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, timeout time.Duration) (_ ClientTransport, err error) {
 	scheme := "http"
 	ctx, cancel := context.WithCancel(ctx)
+	connectCtx, connectCancel := context.WithTimeout(ctx, timeout)
 	defer func() {
+		connectCancel()
 		if err != nil {
 			cancel()
 		}
@@ -179,7 +173,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
 	)
 	if creds := opts.TransportCredentials; creds != nil {
 		scheme = "https"
-		conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Authority, conn)
+		conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Addr, conn)
 		if err != nil {
 			// Credentials handshake errors are typically considered permanent
 			// to avoid retrying on e.g. bad certificates.
@@ -214,6 +208,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
 	t := &http2Client{
 		ctx:        ctx,
 		cancel:     cancel,
+		target:     addr.Addr,
 		userAgent:  opts.UserAgent,
 		md:         addr.Metadata,
 		conn:       conn,
@@ -230,7 +225,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
 		controlBuf:        newControlBuffer(),
 		fc:                &inFlow{limit: uint32(icwz)},
 		sendQuotaPool:     newQuotaPool(defaultWindowSize),
-		localSendQuota:    newQuotaPool(defaultLocalSendQuota),
 		scheme:            scheme,
 		state:             reachable,
 		activeStreams:     make(map[uint32]*Stream),
@@ -242,7 +236,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
 		kp:                kp,
 		statsHandler:      opts.StatsHandler,
 		initialWindowSize: initialWindowSize,
-		onSuccess:         onSuccess,
 	}
 	if opts.InitialWindowSize >= defaultWindowSize {
 		t.initialWindowSize = opts.InitialWindowSize
@@ -303,7 +296,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
 	t.framer.writer.Flush()
 	go func() {
 		loopyWriter(t.ctx, t.controlBuf, t.itemHandler)
-		t.conn.Close()
+		t.Close()
 	}()
 	if t.kp.Time != infinity {
 		go t.keepalive()
@@ -314,15 +307,16 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
 func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
 	// TODO(zhaoq): Handle uint32 overflow of Stream.id.
 	s := &Stream{
-		id:            t.nextID,
-		done:          make(chan struct{}),
-		goAway:        make(chan struct{}),
-		method:        callHdr.Method,
-		sendCompress:  callHdr.SendCompress,
-		buf:           newRecvBuffer(),
-		fc:            &inFlow{limit: uint32(t.initialWindowSize)},
-		sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
-		headerChan:    make(chan struct{}),
+		id:             t.nextID,
+		done:           make(chan struct{}),
+		goAway:         make(chan struct{}),
+		method:         callHdr.Method,
+		sendCompress:   callHdr.SendCompress,
+		buf:            newRecvBuffer(),
+		fc:             &inFlow{limit: uint32(t.initialWindowSize)},
+		sendQuotaPool:  newQuotaPool(int(t.streamSendQuota)),
+		localSendQuota: newQuotaPool(defaultLocalSendQuota),
+		headerChan:     make(chan struct{}),
 	}
 	t.nextID += 2
 	s.requestRead = func(n int) {
@@ -342,12 +336,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
 			t.updateWindow(s, uint32(n))
 		},
 	}
-	s.waiters = waiters{
-		ctx:    s.ctx,
-		tctx:   t.ctx,
-		done:   s.done,
-		goAway: s.goAway,
-	}
+
 	return s
 }
 
@@ -413,18 +402,22 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
 	}
 	if t.state == draining {
 		t.mu.Unlock()
-		return nil, errStreamDrain
+		return nil, ErrStreamDrain
 	}
 	if t.state != reachable {
 		t.mu.Unlock()
 		return nil, ErrConnClosing
 	}
 	t.mu.Unlock()
-	// Get a quota of 1 from streamsQuota.
-	if _, _, err := t.streamsQuota.get(1, waiters{ctx: ctx, tctx: t.ctx}); err != nil {
+	sq, err := wait(ctx, t.ctx, nil, nil, t.streamsQuota.acquire())
+	if err != nil {
 		return nil, err
 	}
-	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
+	// Returns the quota balance back.
+	if sq > 1 {
+		t.streamsQuota.add(sq - 1)
+	}
+	// TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields
 	// first and create a slice of that exact size.
 	// Make the slice of certain predictable size to reduce allocations made by append.
 	hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te
@@ -484,7 +477,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
 	if t.state == draining {
 		t.mu.Unlock()
 		t.streamsQuota.add(1)
-		return nil, errStreamDrain
+		return nil, ErrStreamDrain
 	}
 	if t.state != reachable {
 		t.mu.Unlock()
@@ -512,6 +505,10 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
 	})
 	t.mu.Unlock()
 
+	s.mu.Lock()
+	s.bytesSent = true
+	s.mu.Unlock()
+
 	if t.statsHandler != nil {
 		outHeader := &stats.OutHeader{
 			Client:      true,
@@ -585,16 +582,16 @@ func (t *http2Client) CloseStream(s *Stream, err error) {
 // Close kicks off the shutdown process of the transport. This should be called
 // only once on a transport. Once it is called, the transport should not be
 // accessed any more.
-func (t *http2Client) Close() error {
+func (t *http2Client) Close() (err error) {
 	t.mu.Lock()
 	if t.state == closing {
 		t.mu.Unlock()
-		return nil
+		return
 	}
 	t.state = closing
 	t.mu.Unlock()
 	t.cancel()
-	err := t.conn.Close()
+	err = t.conn.Close()
 	t.mu.Lock()
 	streams := t.activeStreams
 	t.activeStreams = nil
@@ -645,8 +642,6 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
 	select {
 	case <-s.ctx.Done():
 		return ContextErr(s.ctx.Err())
-	case <-s.done:
-		return io.EOF
 	case <-t.ctx.Done():
 		return ErrConnClosing
 	default:
@@ -664,44 +659,44 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
 	}
 	hdr = append(hdr, data[:emptyLen]...)
 	data = data[emptyLen:]
-	var (
-		streamQuota    int
-		streamQuotaVer uint32
-		err            error
-	)
 	for idx, r := range [][]byte{hdr, data} {
 		for len(r) > 0 {
 			size := http2MaxFrameLen
-			if size > len(r) {
-				size = len(r)
-			}
-			if streamQuota == 0 { // Used up all the locally cached stream quota.
-				// Get all the stream quota there is.
-				streamQuota, streamQuotaVer, err = s.sendQuotaPool.get(math.MaxInt32, s.waiters)
-				if err != nil {
-					return err
-				}
-			}
-			if size > streamQuota {
-				size = streamQuota
+			// Wait until the stream has some quota to send the data.
+			quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion()
+			sq, err := wait(s.ctx, t.ctx, s.done, s.goAway, quotaChan)
+			if err != nil {
+				return err
 			}
-
-			// Get size worth quota from transport.
-			tq, _, err := t.sendQuotaPool.get(size, s.waiters)
+			// Wait until the transport has some quota to send the data.
+			tq, err := wait(s.ctx, t.ctx, s.done, s.goAway, t.sendQuotaPool.acquire())
 			if err != nil {
 				return err
 			}
+			if sq < size {
+				size = sq
+			}
 			if tq < size {
 				size = tq
 			}
-			ltq, _, err := t.localSendQuota.get(size, s.waiters)
+			if size > len(r) {
+				size = len(r)
+			}
+			p := r[:size]
+			ps := len(p)
+			if ps < tq {
+				// Overbooked transport quota. Return it back.
+				t.sendQuotaPool.add(tq - ps)
+			}
+			// Acquire local send quota to be able to write to the controlBuf.
+			ltq, err := wait(s.ctx, t.ctx, s.done, s.goAway, s.localSendQuota.acquire())
 			if err != nil {
+				if _, ok := err.(ConnectionError); !ok {
+					t.sendQuotaPool.add(ps)
+				}
 				return err
 			}
-			// even if ltq is smaller than size we don't adjust size since
-			// ltq is only a soft limit.
-			streamQuota -= size
-			p := r[:size]
+			s.localSendQuota.add(ltq - ps) // It's ok if we make it negative.
 			var endStream bool
 			// See if this is the last frame to be written.
 			if opts.Last {
@@ -716,25 +711,21 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
 				}
 			}
 			success := func() {
-				ltq := ltq
-				t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { t.localSendQuota.add(ltq) }})
-				r = r[size:]
+				t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { s.localSendQuota.add(ps) }})
+				if ps < sq {
+					s.sendQuotaPool.lockedAdd(sq - ps)
+				}
+				r = r[ps:]
 			}
-			failure := func() { // The stream quota version must have changed.
-				// Our streamQuota cache is invalidated now, so give it back.
-				s.sendQuotaPool.lockedAdd(streamQuota + size)
+			failure := func() {
+				s.sendQuotaPool.lockedAdd(sq)
 			}
-			if !s.sendQuotaPool.compareAndExecute(streamQuotaVer, success, failure) {
-				// Couldn't send this chunk out.
-				t.sendQuotaPool.add(size)
-				t.localSendQuota.add(ltq)
-				streamQuota = 0
+			if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) {
+				t.sendQuotaPool.add(ps)
+				s.localSendQuota.add(ps)
 			}
 		}
 	}
-	if streamQuota > 0 { // Add the left over quota back to stream.
-		s.sendQuotaPool.add(streamQuota)
-	}
 	if !opts.Last {
 		return nil
 	}
@@ -800,6 +791,7 @@ func (t *http2Client) updateFlowControl(n uint32) {
 	t.mu.Unlock()
 	t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)})
 	t.controlBuf.put(&settings{
+		ack: false,
 		ss: []http2.Setting{
 			{
 				ID:  http2.SettingInitialWindowSize,
@@ -902,13 +894,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
 		close(s.headerChan)
 		s.headerDone = true
 	}
-
-	code := http2.ErrCode(f.ErrCode)
-	if code == http2.ErrCodeRefusedStream {
-		// The stream was unprocessed by the server.
-		s.unprocessed = true
-	}
-	statusCode, ok := http2ErrConvTab[code]
+	statusCode, ok := http2ErrConvTab[http2.ErrCode(f.ErrCode)]
 	if !ok {
 		warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
 		statusCode = codes.Unknown
@@ -918,48 +904,17 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
 	s.write(recvMsg{err: io.EOF})
 }
 
-func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
+func (t *http2Client) handleSettings(f *http2.SettingsFrame) {
 	if f.IsAck() {
 		return
 	}
-	var rs []http2.Setting
-	var ps []http2.Setting
-	isMaxConcurrentStreamsMissing := true
+	var ss []http2.Setting
 	f.ForeachSetting(func(s http2.Setting) error {
-		if s.ID == http2.SettingMaxConcurrentStreams {
-			isMaxConcurrentStreamsMissing = false
-		}
-		if t.isRestrictive(s) {
-			rs = append(rs, s)
-		} else {
-			ps = append(ps, s)
-		}
+		ss = append(ss, s)
 		return nil
 	})
-	if isFirst && isMaxConcurrentStreamsMissing {
-		// This means server is imposing no limits on
-		// maximum number of concurrent streams initiated by client.
-		// So we must remove our self-imposed limit.
-		ps = append(ps, http2.Setting{
-			ID:  http2.SettingMaxConcurrentStreams,
-			Val: math.MaxUint32,
-		})
-	}
-	t.applySettings(rs)
-	t.controlBuf.put(&settingsAck{})
-	t.applySettings(ps)
-}
-
-func (t *http2Client) isRestrictive(s http2.Setting) bool {
-	switch s.ID {
-	case http2.SettingMaxConcurrentStreams:
-		return int(s.Val) < t.maxStreams
-	case http2.SettingInitialWindowSize:
-		// Note: we don't acquire a lock here to read streamSendQuota
-		// because the same goroutine updates it later.
-		return s.Val < t.streamSendQuota
-	}
-	return false
+	// The settings will be applied once the ack is sent.
+	t.controlBuf.put(&settings{ack: true, ss: ss})
 }
 
 func (t *http2Client) handlePing(f *http2.PingFrame) {
@@ -990,16 +945,12 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
 		t.Close()
 		return
 	}
-	// A client can receive multiple GoAways from the server (see
-	// https://github.com/grpc/grpc-go/issues/1387).  The idea is that the first
-	// GoAway will be sent with an ID of MaxInt32 and the second GoAway will be
-	// sent after an RTT delay with the ID of the last stream the server will
-	// process.
-	//
-	// Therefore, when we get the first GoAway we don't necessarily close any
-	// streams. While in case of second GoAway we close all streams created after
-	// the GoAwayId. This way streams that were in-flight while the GoAway from
-	// server was being sent don't get killed.
+	// A client can receive multiple GoAways from server (look at https://github.com/grpc/grpc-go/issues/1387).
+	// The idea is that the first GoAway will be sent with an ID of MaxInt32 and the second GoAway will be sent after an RTT delay
+	// with the ID of the last stream the server will process.
+	// Therefore, when we get the first GoAway we don't really close any streams. While in case of second GoAway we
+	// close all streams created after the second GoAwayId. This way streams that were in-flight while the GoAway from server
+	// was being sent don't get killed.
 	select {
 	case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways).
 		// If there are multiple GoAways the first one should always have an ID greater than the following ones.
@@ -1021,11 +972,6 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
 	}
 	for streamID, stream := range t.activeStreams {
 		if streamID > id && streamID <= upperLimit {
-			// The stream was unprocessed by the server.
-			stream.mu.Lock()
-			stream.unprocessed = true
-			stream.finish(statusGoAway)
-			stream.mu.Unlock()
 			close(stream.goAway)
 		}
 	}
@@ -1042,11 +988,11 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
 // It expects a lock on transport's mutext to be held by
 // the caller.
 func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
-	t.goAwayReason = GoAwayNoReason
+	t.goAwayReason = NoReason
 	switch f.ErrCode {
 	case http2.ErrCodeEnhanceYourCalm:
 		if string(f.DebugData()) == "too_many_pings" {
-			t.goAwayReason = GoAwayTooManyPings
+			t.goAwayReason = TooManyPings
 		}
 	}
 }
@@ -1112,13 +1058,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
 	}()
 
 	s.mu.Lock()
+	if !endStream {
+		s.recvCompress = state.encoding
+	}
 	if !s.headerDone {
-		// Headers frame is not actually a trailers-only frame.
-		if !endStream {
-			s.recvCompress = state.encoding
-			if len(state.mdata) > 0 {
-				s.header = state.mdata
-			}
+		if !endStream && len(state.mdata) > 0 {
+			s.header = state.mdata
 		}
 		close(s.headerChan)
 		s.headerDone = true
@@ -1128,6 +1073,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
 		s.mu.Unlock()
 		return
 	}
+
 	if len(state.mdata) > 0 {
 		s.trailer = state.mdata
 	}
@@ -1165,8 +1111,7 @@ func (t *http2Client) reader() {
 		t.Close()
 		return
 	}
-	t.onSuccess()
-	t.handleSettings(sf, true)
+	t.handleSettings(sf)
 
 	// loop to keep reading incoming messages on this transport.
 	for {
@@ -1199,7 +1144,7 @@ func (t *http2Client) reader() {
 		case *http2.RSTStreamFrame:
 			t.handleRSTStream(frame)
 		case *http2.SettingsFrame:
-			t.handleSettings(frame, false)
+			t.handleSettings(frame)
 		case *http2.PingFrame:
 			t.handlePing(frame)
 		case *http2.GoAwayFrame:
@@ -1222,8 +1167,10 @@ func (t *http2Client) applySettings(ss []http2.Setting) {
 			if s.Val > math.MaxInt32 {
 				s.Val = math.MaxInt32
 			}
+			t.mu.Lock()
 			ms := t.maxStreams
 			t.maxStreams = int(s.Val)
+			t.mu.Unlock()
 			t.streamsQuota.add(int(s.Val) - ms)
 		case http2.SettingInitialWindowSize:
 			t.mu.Lock()
@@ -1240,19 +1187,14 @@ func (t *http2Client) applySettings(ss []http2.Setting) {
 // TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer)
 // is duplicated between the client and the server.
 // The transport layer needs to be refactored to take care of this.
-func (t *http2Client) itemHandler(i item) (err error) {
-	defer func() {
-		if err != nil {
-			errorf(" error in itemHandler: %v", err)
-		}
-	}()
+func (t *http2Client) itemHandler(i item) error {
+	var err error
 	switch i := i.(type) {
 	case *dataFrame:
-		if err := t.framer.fr.WriteData(i.streamID, i.endStream, i.d); err != nil {
-			return err
+		err = t.framer.fr.WriteData(i.streamID, i.endStream, i.d)
+		if err == nil {
+			i.f()
 		}
-		i.f()
-		return nil
 	case *headerFrame:
 		t.hBuf.Reset()
 		for _, f := range i.hf {
@@ -1286,33 +1228,34 @@ func (t *http2Client) itemHandler(i item) (err error) {
 				return err
 			}
 		}
-		return nil
 	case *windowUpdate:
-		return t.framer.fr.WriteWindowUpdate(i.streamID, i.increment)
+		err = t.framer.fr.WriteWindowUpdate(i.streamID, i.increment)
 	case *settings:
-		return t.framer.fr.WriteSettings(i.ss...)
-	case *settingsAck:
-		return t.framer.fr.WriteSettingsAck()
+		if i.ack {
+			t.applySettings(i.ss)
+			err = t.framer.fr.WriteSettingsAck()
+		} else {
+			err = t.framer.fr.WriteSettings(i.ss...)
+		}
 	case *resetStream:
 		// If the server needs to be to intimated about stream closing,
 		// then we need to make sure the RST_STREAM frame is written to
 		// the wire before the headers of the next stream waiting on
 		// streamQuota. We ensure this by adding to the streamsQuota pool
 		// only after having acquired the writableChan to send RST_STREAM.
-		err := t.framer.fr.WriteRSTStream(i.streamID, i.code)
+		err = t.framer.fr.WriteRSTStream(i.streamID, i.code)
 		t.streamsQuota.add(1)
-		return err
 	case *flushIO:
-		return t.framer.writer.Flush()
+		err = t.framer.writer.Flush()
 	case *ping:
 		if !i.ack {
 			t.bdpEst.timesnap(i.data)
 		}
-		return t.framer.fr.WritePing(i.ack, i.data)
+		err = t.framer.fr.WritePing(i.ack, i.data)
 	default:
-		errorf("transport: http2Client.controller got unexpected item type %v", i)
-		return fmt.Errorf("transport: http2Client.controller got unexpected item type %v", i)
+		errorf("transport: http2Client.controller got unexpected item type %v\n", i)
 	}
+	return err
 }
 
 // keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go
index 6d252c53..00df8eed 100644
--- a/vendor/google.golang.org/grpc/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/transport/http2_server.go
@@ -70,10 +70,7 @@ type http2Server struct {
 	fc         *inFlow
 	// sendQuotaPool provides flow control to outbound message.
 	sendQuotaPool *quotaPool
-	// localSendQuota limits the amount of data that can be scheduled
-	// for writing before it is actually written out.
-	localSendQuota *quotaPool
-	stats          stats.Handler
+	stats         stats.Handler
 	// Flag to keep track of reading activity on transport.
 	// 1 is true and 0 is false.
 	activity uint32 // Accessed atomically.
@@ -202,7 +199,6 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
 		controlBuf:        newControlBuffer(),
 		fc:                &inFlow{limit: uint32(icwz)},
 		sendQuotaPool:     newQuotaPool(defaultWindowSize),
-		localSendQuota:    newQuotaPool(defaultLocalSendQuota),
 		state:             reachable,
 		activeStreams:     make(map[uint32]*Stream),
 		streamSendQuota:   defaultWindowSize,
@@ -228,12 +224,6 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
 	}
 	t.framer.writer.Flush()
 
-	defer func() {
-		if err != nil {
-			t.Close()
-		}
-	}()
-
 	// Check the validity of client preface.
 	preface := make([]byte, len(clientPreface))
 	if _, err := io.ReadFull(t.conn, preface); err != nil {
@@ -245,7 +235,8 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
 
 	frame, err := t.framer.fr.ReadFrame()
 	if err == io.EOF || err == io.ErrUnexpectedEOF {
-		return nil, err
+		t.Close()
+		return
 	}
 	if err != nil {
 		return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
@@ -259,7 +250,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
 
 	go func() {
 		loopyWriter(t.ctx, t.controlBuf, t.itemHandler)
-		t.conn.Close()
+		t.Close()
 	}()
 	go t.keepalive()
 	return t, nil
@@ -350,6 +341,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
 	}
 	t.maxStreamID = streamID
 	s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
+	s.localSendQuota = newQuotaPool(defaultLocalSendQuota)
 	t.activeStreams[streamID] = s
 	if len(t.activeStreams) == 1 {
 		t.idle = time.Time{}
@@ -379,10 +371,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
 			t.updateWindow(s, uint32(n))
 		},
 	}
-	s.waiters = waiters{
-		ctx:  s.ctx,
-		tctx: t.ctx,
-	}
 	handle(s)
 	return
 }
@@ -498,6 +486,7 @@ func (t *http2Server) updateFlowControl(n uint32) {
 	t.mu.Unlock()
 	t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)})
 	t.controlBuf.put(&settings{
+		ack: false,
 		ss: []http2.Setting{
 			{
 				ID:  http2.SettingInitialWindowSize,
@@ -595,29 +584,12 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
 	if f.IsAck() {
 		return
 	}
-	var rs []http2.Setting
-	var ps []http2.Setting
+	var ss []http2.Setting
 	f.ForeachSetting(func(s http2.Setting) error {
-		if t.isRestrictive(s) {
-			rs = append(rs, s)
-		} else {
-			ps = append(ps, s)
-		}
+		ss = append(ss, s)
 		return nil
 	})
-	t.applySettings(rs)
-	t.controlBuf.put(&settingsAck{})
-	t.applySettings(ps)
-}
-
-func (t *http2Server) isRestrictive(s http2.Setting) bool {
-	switch s.ID {
-	case http2.SettingInitialWindowSize:
-		// Note: we don't acquire a lock here to read streamSendQuota
-		// because the same goroutine updates it later.
-		return s.Val < t.streamSendQuota
-	}
-	return false
+	t.controlBuf.put(&settings{ack: true, ss: ss})
 }
 
 func (t *http2Server) applySettings(ss []http2.Setting) {
@@ -684,7 +656,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) {
 
 	if t.pingStrikes > maxPingStrikes {
 		// Send goaway and close the connection.
-		errorf("transport: Got too many pings from the client, closing the connection.")
+		errorf("transport: Got to too many pings from the client, closing the connection.")
 		t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
 	}
 }
@@ -726,7 +698,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
 	}
 	md = s.header
 	s.mu.Unlock()
-	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
+	// TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields
 	// first and create a slice of that exact size.
 	headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
 	headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
@@ -787,7 +759,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
 		headersSent = true
 	}
 
-	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
+	// TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields
 	// first and create a slice of that exact size.
 	headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
 	if !headersSent {
@@ -831,7 +803,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
 
 // Write converts the data into HTTP2 data frame and sends it out. Non-nil error
 // is returns if it fails (e.g., framing error, transport error).
-func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) (err error) {
 	select {
 	case <-s.ctx.Done():
 		return ContextErr(s.ctx.Err())
@@ -860,69 +832,66 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
 	}
 	hdr = append(hdr, data[:emptyLen]...)
 	data = data[emptyLen:]
-	var (
-		streamQuota    int
-		streamQuotaVer uint32
-		err            error
-	)
 	for _, r := range [][]byte{hdr, data} {
 		for len(r) > 0 {
 			size := http2MaxFrameLen
-			if size > len(r) {
-				size = len(r)
-			}
-			if streamQuota == 0 { // Used up all the locally cached stream quota.
-				// Get all the stream quota there is.
-				streamQuota, streamQuotaVer, err = s.sendQuotaPool.get(math.MaxInt32, s.waiters)
-				if err != nil {
-					return err
-				}
-			}
-			if size > streamQuota {
-				size = streamQuota
+			// Wait until the stream has some quota to send the data.
+			quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion()
+			sq, err := wait(s.ctx, t.ctx, nil, nil, quotaChan)
+			if err != nil {
+				return err
 			}
-			// Get size worth quota from transport.
-			tq, _, err := t.sendQuotaPool.get(size, s.waiters)
+			// Wait until the transport has some quota to send the data.
+			tq, err := wait(s.ctx, t.ctx, nil, nil, t.sendQuotaPool.acquire())
 			if err != nil {
 				return err
 			}
+			if sq < size {
+				size = sq
+			}
 			if tq < size {
 				size = tq
 			}
-			ltq, _, err := t.localSendQuota.get(size, s.waiters)
+			if size > len(r) {
+				size = len(r)
+			}
+			p := r[:size]
+			ps := len(p)
+			if ps < tq {
+				// Overbooked transport quota. Return it back.
+				t.sendQuotaPool.add(tq - ps)
+			}
+			// Acquire local send quota to be able to write to the controlBuf.
+			ltq, err := wait(s.ctx, t.ctx, nil, nil, s.localSendQuota.acquire())
 			if err != nil {
+				if _, ok := err.(ConnectionError); !ok {
+					t.sendQuotaPool.add(ps)
+				}
 				return err
 			}
-			// even if ltq is smaller than size we don't adjust size since,
-			// ltq is only a soft limit.
-			streamQuota -= size
-			p := r[:size]
+			s.localSendQuota.add(ltq - ps) // It's ok we make this negative.
 			// Reset ping strikes when sending data since this might cause
 			// the peer to send ping.
 			atomic.StoreUint32(&t.resetPingStrikes, 1)
 			success := func() {
-				ltq := ltq
 				t.controlBuf.put(&dataFrame{streamID: s.id, endStream: false, d: p, f: func() {
-					t.localSendQuota.add(ltq)
+					s.localSendQuota.add(ps)
 				}})
-				r = r[size:]
+				if ps < sq {
+					// Overbooked stream quota. Return it back.
+					s.sendQuotaPool.lockedAdd(sq - ps)
+				}
+				r = r[ps:]
 			}
-			failure := func() { // The stream quota version must have changed.
-				// Our streamQuota cache is invalidated now, so give it back.
-				s.sendQuotaPool.lockedAdd(streamQuota + size)
+			failure := func() {
+				s.sendQuotaPool.lockedAdd(sq)
 			}
-			if !s.sendQuotaPool.compareAndExecute(streamQuotaVer, success, failure) {
-				// Couldn't send this chunk out.
-				t.sendQuotaPool.add(size)
-				t.localSendQuota.add(ltq)
-				streamQuota = 0
+			if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) {
+				t.sendQuotaPool.add(ps)
+				s.localSendQuota.add(ps)
 			}
 		}
 	}
-	if streamQuota > 0 {
-		// ADd the left over quota back to stream.
-		s.sendQuotaPool.add(streamQuota)
-	}
 	return nil
 }
 
@@ -1058,9 +1027,11 @@ func (t *http2Server) itemHandler(i item) error {
 	case *windowUpdate:
 		return t.framer.fr.WriteWindowUpdate(i.streamID, i.increment)
 	case *settings:
+		if i.ack {
+			t.applySettings(i.ss)
+			return t.framer.fr.WriteSettingsAck()
+		}
 		return t.framer.fr.WriteSettings(i.ss...)
-	case *settingsAck:
-		return t.framer.fr.WriteSettingsAck()
 	case *resetStream:
 		return t.framer.fr.WriteRSTStream(i.streamID, i.code)
 	case *goAway:
@@ -1074,9 +1045,6 @@ func (t *http2Server) itemHandler(i item) error {
 		if !i.headsUp {
 			// Stop accepting more streams now.
 			t.state = draining
-			if len(t.activeStreams) == 0 {
-				i.closeConn = true
-			}
 			t.mu.Unlock()
 			if err := t.framer.fr.WriteGoAway(sid, i.code, i.debugData); err != nil {
 				return err
@@ -1084,7 +1052,8 @@ func (t *http2Server) itemHandler(i item) error {
 			if i.closeConn {
 				// Abruptly close the connection following the GoAway (via
 				// loopywriter).  But flush out what's inside the buffer first.
-				t.controlBuf.put(&flushIO{closeTr: true})
+				t.framer.writer.Flush()
+				return fmt.Errorf("transport: Connection closing")
 			}
 			return nil
 		}
@@ -1114,13 +1083,7 @@ func (t *http2Server) itemHandler(i item) error {
 		}()
 		return nil
 	case *flushIO:
-		if err := t.framer.writer.Flush(); err != nil {
-			return err
-		}
-		if i.closeTr {
-			return ErrConnClosing
-		}
-		return nil
+		return t.framer.writer.Flush()
 	case *ping:
 		if !i.ack {
 			t.bdpEst.timesnap(i.data)
@@ -1168,7 +1131,7 @@ func (t *http2Server) closeStream(s *Stream) {
 		t.idle = time.Now()
 	}
 	if t.state == draining && len(t.activeStreams) == 0 {
-		defer t.controlBuf.put(&flushIO{closeTr: true})
+		defer t.Close()
 	}
 	t.mu.Unlock()
 	// In case stream sending and receiving are invoked in separate
diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go
index 2e7bcaea..bde8fa5c 100644
--- a/vendor/google.golang.org/grpc/transport/transport.go
+++ b/vendor/google.golang.org/grpc/transport/transport.go
@@ -17,15 +17,16 @@
  */
 
 // Package transport defines and implements message oriented communication
-// channel to complete various transactions (e.g., an RPC).  It is meant for
-// grpc-internal usage and is not intended to be imported directly by users.
+// channel to complete various transactions (e.g., an RPC).
 package transport // import "google.golang.org/grpc/transport"
 
 import (
+	stdctx "context"
 	"fmt"
 	"io"
 	"net"
 	"sync"
+	"time"
 
 	"golang.org/x/net/context"
 	"golang.org/x/net/http2"
@@ -133,7 +134,7 @@ func (r *recvBufferReader) read(p []byte) (n int, err error) {
 	case <-r.ctx.Done():
 		return 0, ContextErr(r.ctx.Err())
 	case <-r.goAway:
-		return 0, errStreamDrain
+		return 0, ErrStreamDrain
 	case m := <-r.recv.get():
 		r.recv.load()
 		if m.err != nil {
@@ -210,67 +211,66 @@ const (
 
 // Stream represents an RPC in the transport layer.
 type Stream struct {
-	id           uint32
-	st           ServerTransport    // nil for client side Stream
-	ctx          context.Context    // the associated context of the stream
-	cancel       context.CancelFunc // always nil for client side Stream
-	done         chan struct{}      // closed when the final status arrives
-	goAway       chan struct{}      // closed when a GOAWAY control message is received
-	method       string             // the associated RPC method of the stream
+	id uint32
+	// nil for client side Stream.
+	st ServerTransport
+	// ctx is the associated context of the stream.
+	ctx context.Context
+	// cancel is always nil for client side Stream.
+	cancel context.CancelFunc
+	// done is closed when the final status arrives.
+	done chan struct{}
+	// goAway is closed when the server sent GoAways signal before this stream was initiated.
+	goAway chan struct{}
+	// method records the associated RPC method of the stream.
+	method       string
 	recvCompress string
 	sendCompress string
 	buf          *recvBuffer
 	trReader     io.Reader
 	fc           *inFlow
 	recvQuota    uint32
-	waiters      waiters
+
+	// TODO: Remote this unused variable.
+	// The accumulated inbound quota pending for window update.
+	updateQuota uint32
 
 	// Callback to state application's intentions to read data. This
-	// is used to adjust flow control, if needed.
+	// is used to adjust flow control, if need be.
 	requestRead func(int)
 
-	sendQuotaPool *quotaPool
-	headerChan    chan struct{} // closed to indicate the end of header metadata.
-	headerDone    bool          // set when headerChan is closed. Used to avoid closing headerChan multiple times.
-	header        metadata.MD   // the received header metadata.
-	trailer       metadata.MD   // the key-value map of trailer metadata.
-
-	mu       sync.RWMutex // guard the following
-	headerOk bool         // becomes true from the first header is about to send
+	sendQuotaPool  *quotaPool
+	localSendQuota *quotaPool
+	// Close headerChan to indicate the end of reception of header metadata.
+	headerChan chan struct{}
+	// header caches the received header metadata.
+	header metadata.MD
+	// The key-value map of trailer metadata.
+	trailer metadata.MD
+
+	mu sync.RWMutex // guard the following
+	// headerOK becomes true from the first header is about to send.
+	headerOk bool
 	state    streamState
-
-	status *status.Status // the status error received from the server
-
-	rstStream bool          // indicates whether a RST_STREAM frame needs to be sent
-	rstError  http2.ErrCode // the error that needs to be sent along with the RST_STREAM frame
-
-	bytesReceived bool // indicates whether any bytes have been received on this stream
-	unprocessed   bool // set if the server sends a refused stream or GOAWAY including this stream
-}
-
-func (s *Stream) waitOnHeader() error {
-	if s.headerChan == nil {
-		// On the server headerChan is always nil since a stream originates
-		// only after having received headers.
-		return nil
-	}
-	wc := s.waiters
-	select {
-	case <-wc.ctx.Done():
-		return ContextErr(wc.ctx.Err())
-	case <-wc.goAway:
-		return errStreamDrain
-	case <-s.headerChan:
-		return nil
-	}
+	// true iff headerChan is closed. Used to avoid closing headerChan
+	// multiple times.
+	headerDone bool
+	// the status error received from the server.
+	status *status.Status
+	// rstStream indicates whether a RST_STREAM frame needs to be sent
+	// to the server to signify that this stream is closing.
+	rstStream bool
+	// rstError is the error that needs to be sent along with the RST_STREAM frame.
+	rstError http2.ErrCode
+	// bytesSent and bytesReceived indicates whether any bytes have been sent or
+	// received on this stream.
+	bytesSent     bool
+	bytesReceived bool
 }
 
 // RecvCompress returns the compression algorithm applied to the inbound
 // message. It is empty string if there is no compression applied.
 func (s *Stream) RecvCompress() string {
-	if err := s.waitOnHeader(); err != nil {
-		return ""
-	}
 	return s.recvCompress
 }
 
@@ -295,7 +295,15 @@ func (s *Stream) GoAway() <-chan struct{} {
 // is available. It blocks until i) the metadata is ready or ii) there is no
 // header metadata or iii) the stream is canceled/expired.
 func (s *Stream) Header() (metadata.MD, error) {
-	err := s.waitOnHeader()
+	var err error
+	select {
+	case <-s.ctx.Done():
+		err = ContextErr(s.ctx.Err())
+	case <-s.goAway:
+		err = ErrStreamDrain
+	case <-s.headerChan:
+		return s.header.Copy(), nil
+	}
 	// Even if the stream is closed, header is returned if available.
 	select {
 	case <-s.headerChan:
@@ -409,19 +417,18 @@ func (s *Stream) finish(st *status.Status) {
 	close(s.done)
 }
 
-// BytesReceived indicates whether any bytes have been received on this stream.
-func (s *Stream) BytesReceived() bool {
+// BytesSent indicates whether any bytes have been sent on this stream.
+func (s *Stream) BytesSent() bool {
 	s.mu.Lock()
-	br := s.bytesReceived
+	bs := s.bytesSent
 	s.mu.Unlock()
-	return br
+	return bs
 }
 
-// Unprocessed indicates whether the server did not process this stream --
-// i.e. it sent a refused stream or GOAWAY including this stream ID.
-func (s *Stream) Unprocessed() bool {
+// BytesReceived indicates whether any bytes have been received on this stream.
+func (s *Stream) BytesReceived() bool {
 	s.mu.Lock()
-	br := s.unprocessed
+	br := s.bytesReceived
 	s.mu.Unlock()
 	return br
 }
@@ -507,15 +514,14 @@ type ConnectOptions struct {
 
 // TargetInfo contains the information of the target such as network address and metadata.
 type TargetInfo struct {
-	Addr      string
-	Metadata  interface{}
-	Authority string
+	Addr     string
+	Metadata interface{}
 }
 
 // NewClientTransport establishes the transport with the required ConnectOptions
 // and returns it to the caller.
-func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error) {
-	return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess)
+func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions, timeout time.Duration) (ClientTransport, error) {
+	return newHTTP2Client(ctx, target, opts, timeout)
 }
 
 // Options provides additional hints and information for message
@@ -539,6 +545,10 @@ type CallHdr struct {
 	// Method specifies the operation to perform.
 	Method string
 
+	// RecvCompress specifies the compression algorithm applied on
+	// inbound messages.
+	RecvCompress string
+
 	// SendCompress specifies the compression algorithm applied on
 	// outbound message.
 	SendCompress string
@@ -676,13 +686,9 @@ func (e ConnectionError) Origin() error {
 var (
 	// ErrConnClosing indicates that the transport is closing.
 	ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
-	// errStreamDrain indicates that the stream is rejected by the server because
+	// ErrStreamDrain indicates that the stream is rejected by the server because
 	// the server stops accepting new RPCs.
-	// TODO: delete this error; it is no longer necessary.
-	errStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs")
-	// StatusGoAway indicates that the server sent a GOAWAY that included this
-	// stream's ID in unprocessed RPCs.
-	statusGoAway = status.New(codes.Unavailable, "the server stopped accepting new RPCs")
+	ErrStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs")
 )
 
 // TODO: See if we can replace StreamError with status package errors.
@@ -697,27 +703,44 @@ func (e StreamError) Error() string {
 	return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc)
 }
 
-// waiters are passed to quotaPool get methods to
-// wait on in addition to waiting on quota.
-type waiters struct {
-	ctx    context.Context
-	tctx   context.Context
-	done   chan struct{}
-	goAway chan struct{}
+// wait blocks until it can receive from one of the provided contexts or channels
+func wait(ctx, tctx context.Context, done, goAway <-chan struct{}, proceed <-chan int) (int, error) {
+	select {
+	case <-ctx.Done():
+		return 0, ContextErr(ctx.Err())
+	case <-done:
+		return 0, io.EOF
+	case <-goAway:
+		return 0, ErrStreamDrain
+	case <-tctx.Done():
+		return 0, ErrConnClosing
+	case i := <-proceed:
+		return i, nil
+	}
+}
+
+// ContextErr converts the error from context package into a StreamError.
+func ContextErr(err error) StreamError {
+	switch err {
+	case context.DeadlineExceeded, stdctx.DeadlineExceeded:
+		return streamErrorf(codes.DeadlineExceeded, "%v", err)
+	case context.Canceled, stdctx.Canceled:
+		return streamErrorf(codes.Canceled, "%v", err)
+	}
+	return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
 }
 
 // GoAwayReason contains the reason for the GoAway frame received.
 type GoAwayReason uint8
 
 const (
-	// GoAwayInvalid indicates that no GoAway frame is received.
-	GoAwayInvalid GoAwayReason = 0
-	// GoAwayNoReason is the default value when GoAway frame is received.
-	GoAwayNoReason GoAwayReason = 1
-	// GoAwayTooManyPings indicates that a GoAway frame with
-	// ErrCodeEnhanceYourCalm was received and that the debug data said
-	// "too_many_pings".
-	GoAwayTooManyPings GoAwayReason = 2
+	// Invalid indicates that no GoAway frame is received.
+	Invalid GoAwayReason = 0
+	// NoReason is the default value when GoAway frame is received.
+	NoReason GoAwayReason = 1
+	// TooManyPings indicates that a GoAway frame with ErrCodeEnhanceYourCalm
+	// was received and that the debug data said "too_many_pings".
+	TooManyPings GoAwayReason = 2
 )
 
 // loopyWriter is run in a separate go routine. It is the single code path that will
@@ -728,7 +751,6 @@ func loopyWriter(ctx context.Context, cbuf *controlBuffer, handler func(item) er
 		case i := <-cbuf.get():
 			cbuf.load()
 			if err := handler(i); err != nil {
-				errorf("transport: Error while handling item. Err: %v", err)
 				return
 			}
 		case <-ctx.Done():
@@ -740,14 +762,12 @@ func loopyWriter(ctx context.Context, cbuf *controlBuffer, handler func(item) er
 			case i := <-cbuf.get():
 				cbuf.load()
 				if err := handler(i); err != nil {
-					errorf("transport: Error while handling item. Err: %v", err)
 					return
 				}
 			case <-ctx.Done():
 				return
 			default:
 				if err := handler(&flushIO{}); err != nil {
-					errorf("transport: Error while flushing. Err: %v", err)
 					return
 				}
 				break hasData
diff --git a/vendor/gopkg.in/go-playground/validator.v9/LICENSE b/vendor/gopkg.in/go-playground/validator.v9/LICENSE
deleted file mode 100644
index 6a2ae9aa..00000000
--- a/vendor/gopkg.in/go-playground/validator.v9/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Dean Karn
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
diff --git a/vendor/gopkg.in/go-playground/validator.v9/baked_in.go b/vendor/gopkg.in/go-playground/validator.v9/baked_in.go
deleted file mode 100644
index 231b78ee..00000000
--- a/vendor/gopkg.in/go-playground/validator.v9/baked_in.go
+++ /dev/null
@@ -1,1579 +0,0 @@
-package validator
-
-import (
-	"context"
-	"fmt"
-	"net"
-	"net/url"
-	"reflect"
-	"strconv"
-	"strings"
-	"sync"
-	"time"
-	"unicode/utf8"
-)
-
-// Func accepts a FieldLevel interface for all validation needs. The return
-// value should be true when validation succeeds.
-type Func func(fl FieldLevel) bool
-
-// FuncCtx accepts a context.Context and FieldLevel interface for all
-// validation needs. The return value should be true when validation succeeds.
-type FuncCtx func(ctx context.Context, fl FieldLevel) bool
-
-// wrapFunc wraps noramal Func makes it compatible with FuncCtx
-func wrapFunc(fn Func) FuncCtx {
-	if fn == nil {
-		return nil // be sure not to wrap a bad function.
-	}
-	return func(ctx context.Context, fl FieldLevel) bool {
-		return fn(fl)
-	}
-}
-
-var (
-	restrictedTags = map[string]struct{}{
-		diveTag:           {},
-		keysTag:           {},
-		endKeysTag:        {},
-		structOnlyTag:     {},
-		omitempty:         {},
-		skipValidationTag: {},
-		utf8HexComma:      {},
-		utf8Pipe:          {},
-		noStructLevelTag:  {},
-		requiredTag:       {},
-		isdefault:         {},
-	}
-
-	// BakedInAliasValidators is a default mapping of a single validation tag that
-	// defines a common or complex set of validation(s) to simplify
-	// adding validation to structs.
-	bakedInAliases = map[string]string{
-		"iscolor": "hexcolor|rgb|rgba|hsl|hsla",
-	}
-
-	// BakedInValidators is the default map of ValidationFunc
-	// you can add, remove or even replace items to suite your needs,
-	// or even disregard and use your own map if so desired.
-	bakedInValidators = map[string]Func{
-		"required":         hasValue,
-		"isdefault":        isDefault,
-		"len":              hasLengthOf,
-		"min":              hasMinOf,
-		"max":              hasMaxOf,
-		"eq":               isEq,
-		"ne":               isNe,
-		"lt":               isLt,
-		"lte":              isLte,
-		"gt":               isGt,
-		"gte":              isGte,
-		"eqfield":          isEqField,
-		"eqcsfield":        isEqCrossStructField,
-		"necsfield":        isNeCrossStructField,
-		"gtcsfield":        isGtCrossStructField,
-		"gtecsfield":       isGteCrossStructField,
-		"ltcsfield":        isLtCrossStructField,
-		"ltecsfield":       isLteCrossStructField,
-		"nefield":          isNeField,
-		"gtefield":         isGteField,
-		"gtfield":          isGtField,
-		"ltefield":         isLteField,
-		"ltfield":          isLtField,
-		"alpha":            isAlpha,
-		"alphanum":         isAlphanum,
-		"alphaunicode":     isAlphaUnicode,
-		"alphanumunicode":  isAlphanumUnicode,
-		"numeric":          isNumeric,
-		"number":           isNumber,
-		"hexadecimal":      isHexadecimal,
-		"hexcolor":         isHEXColor,
-		"rgb":              isRGB,
-		"rgba":             isRGBA,
-		"hsl":              isHSL,
-		"hsla":             isHSLA,
-		"email":            isEmail,
-		"url":              isURL,
-		"uri":              isURI,
-		"base64":           isBase64,
-		"contains":         contains,
-		"containsany":      containsAny,
-		"containsrune":     containsRune,
-		"excludes":         excludes,
-		"excludesall":      excludesAll,
-		"excludesrune":     excludesRune,
-		"isbn":             isISBN,
-		"isbn10":           isISBN10,
-		"isbn13":           isISBN13,
-		"uuid":             isUUID,
-		"uuid3":            isUUID3,
-		"uuid4":            isUUID4,
-		"uuid5":            isUUID5,
-		"ascii":            isASCII,
-		"printascii":       isPrintableASCII,
-		"multibyte":        hasMultiByteCharacter,
-		"datauri":          isDataURI,
-		"latitude":         isLatitude,
-		"longitude":        isLongitude,
-		"ssn":              isSSN,
-		"ipv4":             isIPv4,
-		"ipv6":             isIPv6,
-		"ip":               isIP,
-		"cidrv4":           isCIDRv4,
-		"cidrv6":           isCIDRv6,
-		"cidr":             isCIDR,
-		"tcp4_addr":        isTCP4AddrResolvable,
-		"tcp6_addr":        isTCP6AddrResolvable,
-		"tcp_addr":         isTCPAddrResolvable,
-		"udp4_addr":        isUDP4AddrResolvable,
-		"udp6_addr":        isUDP6AddrResolvable,
-		"udp_addr":         isUDPAddrResolvable,
-		"ip4_addr":         isIP4AddrResolvable,
-		"ip6_addr":         isIP6AddrResolvable,
-		"ip_addr":          isIPAddrResolvable,
-		"unix_addr":        isUnixAddrResolvable,
-		"mac":              isMAC,
-		"hostname":         isHostnameRFC952,  // RFC 952
-		"hostname_rfc1123": isHostnameRFC1123, // RFC 1123
-		"fqdn":             isFQDN,
-		"unique":           isUnique,
-		"oneof":            isOneOf,
-	}
-)
-
-var oneofValsCache = map[string][]string{}
-var oneofValsCacheRWLock = sync.RWMutex{}
-
-func parseOneOfParam2(s string) []string {
-	oneofValsCacheRWLock.RLock()
-	vals, ok := oneofValsCache[s]
-	oneofValsCacheRWLock.RUnlock()
-	if !ok {
-		oneofValsCacheRWLock.Lock()
-		vals = strings.Fields(s)
-		oneofValsCache[s] = vals
-		oneofValsCacheRWLock.Unlock()
-	}
-	return vals
-}
-
-func isOneOf(fl FieldLevel) bool {
-	vals := parseOneOfParam2(fl.Param())
-
-	field := fl.Field()
-
-	var v string
-	switch field.Kind() {
-	case reflect.String:
-		v = field.String()
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		v = strconv.FormatInt(field.Int(), 10)
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
-		v = strconv.FormatUint(field.Uint(), 10)
-	default:
-		panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-	}
-	for i := 0; i < len(vals); i++ {
-		if vals[i] == v {
-			return true
-		}
-	}
-	return false
-}
-
-// isUnique is the validation function for validating if each array|slice element is unique
-func isUnique(fl FieldLevel) bool {
-
-	field := fl.Field()
-	v := reflect.ValueOf(struct{}{})
-
-	switch field.Kind() {
-	case reflect.Slice, reflect.Array:
-		m := reflect.MakeMap(reflect.MapOf(fl.Field().Type().Elem(), v.Type()))
-
-		for i := 0; i < field.Len(); i++ {
-			m.SetMapIndex(field.Index(i), v)
-		}
-		return field.Len() == m.Len()
-	default:
-		panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-	}
-}
-
-// IsMAC is the validation function for validating if the field's value is a valid MAC address.
-func isMAC(fl FieldLevel) bool {
-
-	_, err := net.ParseMAC(fl.Field().String())
-
-	return err == nil
-}
-
-// IsCIDRv4 is the validation function for validating if the field's value is a valid v4 CIDR address.
-func isCIDRv4(fl FieldLevel) bool {
-
-	ip, _, err := net.ParseCIDR(fl.Field().String())
-
-	return err == nil && ip.To4() != nil
-}
-
-// IsCIDRv6 is the validation function for validating if the field's value is a valid v6 CIDR address.
-func isCIDRv6(fl FieldLevel) bool {
-
-	ip, _, err := net.ParseCIDR(fl.Field().String())
-
-	return err == nil && ip.To4() == nil
-}
-
-// IsCIDR is the validation function for validating if the field's value is a valid v4 or v6 CIDR address.
-func isCIDR(fl FieldLevel) bool {
-
-	_, _, err := net.ParseCIDR(fl.Field().String())
-
-	return err == nil
-}
-
-// IsIPv4 is the validation function for validating if a value is a valid v4 IP address.
-func isIPv4(fl FieldLevel) bool {
-
-	ip := net.ParseIP(fl.Field().String())
-
-	return ip != nil && ip.To4() != nil
-}
-
-// IsIPv6 is the validation function for validating if the field's value is a valid v6 IP address.
-func isIPv6(fl FieldLevel) bool {
-
-	ip := net.ParseIP(fl.Field().String())
-
-	return ip != nil && ip.To4() == nil
-}
-
-// IsIP is the validation function for validating if the field's value is a valid v4 or v6 IP address.
-func isIP(fl FieldLevel) bool {
-
-	ip := net.ParseIP(fl.Field().String())
-
-	return ip != nil
-}
-
-// IsSSN is the validation function for validating if the field's value is a valid SSN.
-func isSSN(fl FieldLevel) bool {
-
-	field := fl.Field()
-
-	if field.Len() != 11 {
-		return false
-	}
-
-	return sSNRegex.MatchString(field.String())
-}
-
-// IsLongitude is the validation function for validating if the field's value is a valid longitude coordinate.
-func isLongitude(fl FieldLevel) bool {
-	return longitudeRegex.MatchString(fl.Field().String())
-}
-
-// IsLatitude is the validation function for validating if the field's value is a valid latitude coordinate.
-func isLatitude(fl FieldLevel) bool {
-	return latitudeRegex.MatchString(fl.Field().String())
-}
-
-// IsDataURI is the validation function for validating if the field's value is a valid data URI.
-func isDataURI(fl FieldLevel) bool {
-
-	uri := strings.SplitN(fl.Field().String(), ",", 2)
-
-	if len(uri) != 2 {
-		return false
-	}
-
-	if !dataURIRegex.MatchString(uri[0]) {
-		return false
-	}
-
-	return base64Regex.MatchString(uri[1])
-}
-
-// HasMultiByteCharacter is the validation function for validating if the field's value has a multi byte character.
-func hasMultiByteCharacter(fl FieldLevel) bool {
-
-	field := fl.Field()
-
-	if field.Len() == 0 {
-		return true
-	}
-
-	return multibyteRegex.MatchString(field.String())
-}
-
-// IsPrintableASCII is the validation function for validating if the field's value is a valid printable ASCII character.
-func isPrintableASCII(fl FieldLevel) bool {
-	return printableASCIIRegex.MatchString(fl.Field().String())
-}
-
-// IsASCII is the validation function for validating if the field's value is a valid ASCII character.
-func isASCII(fl FieldLevel) bool {
-	return aSCIIRegex.MatchString(fl.Field().String())
-}
-
-// IsUUID5 is the validation function for validating if the field's value is a valid v5 UUID.
-func isUUID5(fl FieldLevel) bool {
-	return uUID5Regex.MatchString(fl.Field().String())
-}
-
-// IsUUID4 is the validation function for validating if the field's value is a valid v4 UUID.
-func isUUID4(fl FieldLevel) bool {
-	return uUID4Regex.MatchString(fl.Field().String())
-}
-
-// IsUUID3 is the validation function for validating if the field's value is a valid v3 UUID.
-func isUUID3(fl FieldLevel) bool {
-	return uUID3Regex.MatchString(fl.Field().String())
-}
-
-// IsUUID is the validation function for validating if the field's value is a valid UUID of any version.
-func isUUID(fl FieldLevel) bool {
-	return uUIDRegex.MatchString(fl.Field().String())
-}
-
-// IsISBN is the validation function for validating if the field's value is a valid v10 or v13 ISBN.
-func isISBN(fl FieldLevel) bool {
-	return isISBN10(fl) || isISBN13(fl)
-}
-
-// IsISBN13 is the validation function for validating if the field's value is a valid v13 ISBN.
-func isISBN13(fl FieldLevel) bool {
-
-	s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 4), " ", "", 4)
-
-	if !iSBN13Regex.MatchString(s) {
-		return false
-	}
-
-	var checksum int32
-	var i int32
-
-	factor := []int32{1, 3}
-
-	for i = 0; i < 12; i++ {
-		checksum += factor[i%2] * int32(s[i]-'0')
-	}
-
-	return (int32(s[12]-'0'))-((10-(checksum%10))%10) == 0
-}
-
-// IsISBN10 is the validation function for validating if the field's value is a valid v10 ISBN.
-func isISBN10(fl FieldLevel) bool {
-
-	s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 3), " ", "", 3)
-
-	if !iSBN10Regex.MatchString(s) {
-		return false
-	}
-
-	var checksum int32
-	var i int32
-
-	for i = 0; i < 9; i++ {
-		checksum += (i + 1) * int32(s[i]-'0')
-	}
-
-	if s[9] == 'X' {
-		checksum += 10 * 10
-	} else {
-		checksum += 10 * int32(s[9]-'0')
-	}
-
-	return checksum%11 == 0
-}
-
-// ExcludesRune is the validation function for validating that the field's value does not contain the rune specified within the param.
-func excludesRune(fl FieldLevel) bool {
-	return !containsRune(fl)
-}
-
-// ExcludesAll is the validation function for validating that the field's value does not contain any of the characters specified within the param.
-func excludesAll(fl FieldLevel) bool {
-	return !containsAny(fl)
-}
-
-// Excludes is the validation function for validating that the field's value does not contain the text specified within the param.
-func excludes(fl FieldLevel) bool {
-	return !contains(fl)
-}
-
-// ContainsRune is the validation function for validating that the field's value contains the rune specified within the param.
-func containsRune(fl FieldLevel) bool {
-
-	r, _ := utf8.DecodeRuneInString(fl.Param())
-
-	return strings.ContainsRune(fl.Field().String(), r)
-}
-
-// ContainsAny is the validation function for validating that the field's value contains any of the characters specified within the param.
-func containsAny(fl FieldLevel) bool {
-	return strings.ContainsAny(fl.Field().String(), fl.Param())
-}
-
-// Contains is the validation function for validating that the field's value contains the text specified within the param.
-func contains(fl FieldLevel) bool {
-	return strings.Contains(fl.Field().String(), fl.Param())
-}
-
-// IsNeField is the validation function for validating if the current field's value is not equal to the field specified by the param's value.
-func isNeField(fl FieldLevel) bool {
-
-	field := fl.Field()
-	kind := field.Kind()
-
-	currentField, currentKind, ok := fl.GetStructFieldOK()
-
-	if !ok || currentKind != kind {
-		return true
-	}
-
-	switch kind {
-
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return field.Int() != currentField.Int()
-
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return field.Uint() != currentField.Uint()
-
-	case reflect.Float32, reflect.Float64:
-		return field.Float() != currentField.Float()
-
-	case reflect.Slice, reflect.Map, reflect.Array:
-		return int64(field.Len()) != int64(currentField.Len())
-
-	case reflect.Struct:
-
-		fieldType := field.Type()
-
-		// Not Same underlying type i.e. struct and time
-		if fieldType != currentField.Type() {
-			return true
-		}
-
-		if fieldType == timeType {
-
-			t := currentField.Interface().(time.Time)
-			fieldTime := field.Interface().(time.Time)
-
-			return !fieldTime.Equal(t)
-		}
-
-	}
-
-	// default reflect.String:
-	return field.String() != currentField.String()
-}
-
-// IsNe is the validation function for validating that the field's value does not equal the provided param value.
-func isNe(fl FieldLevel) bool {
-	return !isEq(fl)
-}
-
-// IsLteCrossStructField is the validation function for validating if the current field's value is less than or equal to the field, within a separate struct, specified by the param's value.
-func isLteCrossStructField(fl FieldLevel) bool {
-
-	field := fl.Field()
-	kind := field.Kind()
-
-	topField, topKind, ok := fl.GetStructFieldOK()
-	if !ok || topKind != kind {
-		return false
-	}
-
-	switch kind {
-
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return field.Int() <= topField.Int()
-
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return field.Uint() <= topField.Uint()
-
-	case reflect.Float32, reflect.Float64:
-		return field.Float() <= topField.Float()
-
-	case reflect.Slice, reflect.Map, reflect.Array:
-		return int64(field.Len()) <= int64(topField.Len())
-
-	case reflect.Struct:
-
-		fieldType := field.Type()
-
-		// Not Same underlying type i.e. struct and time
-		if fieldType != topField.Type() {
-			return false
-		}
-
-		if fieldType == timeType {
-
-			fieldTime := field.Interface().(time.Time)
-			topTime := topField.Interface().(time.Time)
-
-			return fieldTime.Before(topTime) || fieldTime.Equal(topTime)
-		}
-	}
-
-	// default reflect.String:
-	return field.String() <= topField.String()
-}
-
-// IsLtCrossStructField is the validation function for validating if the current field's value is less than the field, within a separate struct, specified by the param's value.
-// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
-func isLtCrossStructField(fl FieldLevel) bool {
-
-	field := fl.Field()
-	kind := field.Kind()
-
-	topField, topKind, ok := fl.GetStructFieldOK()
-	if !ok || topKind != kind {
-		return false
-	}
-
-	switch kind {
-
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return field.Int() < topField.Int()
-
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return field.Uint() < topField.Uint()
-
-	case reflect.Float32, reflect.Float64:
-		return field.Float() < topField.Float()
-
-	case reflect.Slice, reflect.Map, reflect.Array:
-		return int64(field.Len()) < int64(topField.Len())
-
-	case reflect.Struct:
-
-		fieldType := field.Type()
-
-		// Not Same underlying type i.e. struct and time
-		if fieldType != topField.Type() {
-			return false
-		}
-
-		if fieldType == timeType {
-
-			fieldTime := field.Interface().(time.Time)
-			topTime := topField.Interface().(time.Time)
-
-			return fieldTime.Before(topTime)
-		}
-	}
-
-	// default reflect.String:
-	return field.String() < topField.String()
-}
-
-// IsGteCrossStructField is the validation function for validating if the current field's value is greater than or equal to the field, within a separate struct, specified by the param's value.
-func isGteCrossStructField(fl FieldLevel) bool {
-
-	field := fl.Field()
-	kind := field.Kind()
-
-	topField, topKind, ok := fl.GetStructFieldOK()
-	if !ok || topKind != kind {
-		return false
-	}
-
-	switch kind {
-
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return field.Int() >= topField.Int()
-
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return field.Uint() >= topField.Uint()
-
-	case reflect.Float32, reflect.Float64:
-		return field.Float() >= topField.Float()
-
-	case reflect.Slice, reflect.Map, reflect.Array:
-		return int64(field.Len()) >= int64(topField.Len())
-
-	case reflect.Struct:
-
-		fieldType := field.Type()
-
-		// Not Same underlying type i.e. struct and time
-		if fieldType != topField.Type() {
-			return false
-		}
-
-		if fieldType == timeType {
-
-			fieldTime := field.Interface().(time.Time)
-			topTime := topField.Interface().(time.Time)
-
-			return fieldTime.After(topTime) || fieldTime.Equal(topTime)
-		}
-	}
-
-	// default reflect.String:
-	return field.String() >= topField.String()
-}
-
-// IsGtCrossStructField is the validation function for validating if the current field's value is greater than the field, within a separate struct, specified by the param's value.
-func isGtCrossStructField(fl FieldLevel) bool {
-
-	field := fl.Field()
-	kind := field.Kind()
-
-	topField, topKind, ok := fl.GetStructFieldOK()
-	if !ok || topKind != kind {
-		return false
-	}
-
-	switch kind {
-
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return field.Int() > topField.Int()
-
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return field.Uint() > topField.Uint()
-
-	case reflect.Float32, reflect.Float64:
-		return field.Float() > topField.Float()
-
-	case reflect.Slice, reflect.Map, reflect.Array:
-		return int64(field.Len()) > int64(topField.Len())
-
-	case reflect.Struct:
-
-		fieldType := field.Type()
-
-		// Not Same underlying type i.e. struct and time
-		if fieldType != topField.Type() {
-			return false
-		}
-
-		if fieldType == timeType {
-
-			fieldTime := field.Interface().(time.Time)
-			topTime := topField.Interface().(time.Time)
-
-			return fieldTime.After(topTime)
-		}
-	}
-
-	// default reflect.String:
-	return field.String() > topField.String()
-}
-
-// IsNeCrossStructField is the validation function for validating that the current field's value is not equal to the field, within a separate struct, specified by the param's value.
-func isNeCrossStructField(fl FieldLevel) bool {
-
-	field := fl.Field()
-	kind := field.Kind()
-
-	topField, currentKind, ok := fl.GetStructFieldOK()
-	if !ok || currentKind != kind {
-		return true
-	}
-
-	switch kind {
-
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return topField.Int() != field.Int()
-
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return topField.Uint() != field.Uint()
-
-	case reflect.Float32, reflect.Float64:
-		return topField.Float() != field.Float()
-
-	case reflect.Slice, reflect.Map, reflect.Array:
-		return int64(topField.Len()) != int64(field.Len())
-
-	case reflect.Struct:
-
-		fieldType := field.Type()
-
-		// Not Same underlying type i.e. struct and time
-		if fieldType != topField.Type() {
-			return true
-		}
-
-		if fieldType == timeType {
-
-			t := field.Interface().(time.Time)
-			fieldTime := topField.Interface().(time.Time)
-
-			return !fieldTime.Equal(t)
-		}
-	}
-
-	// default reflect.String:
-	return topField.String() != field.String()
-}
-
-// IsEqCrossStructField is the validation function for validating that the current field's value is equal to the field, within a separate struct, specified by the param's value.
-func isEqCrossStructField(fl FieldLevel) bool {
-
-	field := fl.Field()
-	kind := field.Kind()
-
-	topField, topKind, ok := fl.GetStructFieldOK()
-	if !ok || topKind != kind {
-		return false
-	}
-
-	switch kind {
-
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return topField.Int() == field.Int()
-
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return topField.Uint() == field.Uint()
-
-	case reflect.Float32, reflect.Float64:
-		return topField.Float() == field.Float()
-
-	case reflect.Slice, reflect.Map, reflect.Array:
-		return int64(topField.Len()) == int64(field.Len())
-
-	case reflect.Struct:
-
-		fieldType := field.Type()
-
-		// Not Same underlying type i.e. struct and time
-		if fieldType != topField.Type() {
-			return false
-		}
-
-		if fieldType == timeType {
-
-			t := field.Interface().(time.Time)
-			fieldTime := topField.Interface().(time.Time)
-
-			return fieldTime.Equal(t)
-		}
-	}
-
-	// default reflect.String:
-	return topField.String() == field.String()
-}
-
-// IsEqField is the validation function for validating if the current field's value is equal to the field specified by the param's value.
-func isEqField(fl FieldLevel) bool {
-
-	field := fl.Field()
-	kind := field.Kind()
-
-	currentField, currentKind, ok := fl.GetStructFieldOK()
-	if !ok || currentKind != kind {
-		return false
-	}
-
-	switch kind {
-
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return field.Int() == currentField.Int()
-
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return field.Uint() == currentField.Uint()
-
-	case reflect.Float32, reflect.Float64:
-		return field.Float() == currentField.Float()
-
-	case reflect.Slice, reflect.Map, reflect.Array:
-		return int64(field.Len()) == int64(currentField.Len())
-
-	case reflect.Struct:
-
-		fieldType := field.Type()
-
-		// Not Same underlying type i.e. struct and time
-		if fieldType != currentField.Type() {
-			return false
-		}
-
-		if fieldType == timeType {
-
-			t := currentField.Interface().(time.Time)
-			fieldTime := field.Interface().(time.Time)
-
-			return fieldTime.Equal(t)
-		}
-
-	}
-
-	// default reflect.String:
-	return field.String() == currentField.String()
-}
-
-// IsEq is the validation function for validating if the current field's value is equal to the param's value.
-func isEq(fl FieldLevel) bool {
-
-	field := fl.Field()
-	param := fl.Param()
-
-	switch field.Kind() {
-
-	case reflect.String:
-		return field.String() == param
-
-	case reflect.Slice, reflect.Map, reflect.Array:
-		p := asInt(param)
-
-		return int64(field.Len()) == p
-
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		p := asInt(param)
-
-		return field.Int() == p
-
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		p := asUint(param)
-
-		return field.Uint() == p
-
-	case reflect.Float32, reflect.Float64:
-		p := asFloat(param)
-
-		return field.Float() == p
-	}
-
-	panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// IsBase64 is the validation function for validating if the current field's value is a valid base 64.
-func isBase64(fl FieldLevel) bool {
-	return base64Regex.MatchString(fl.Field().String())
-}
-
-// IsURI is the validation function for validating if the current field's value is a valid URI.
-func isURI(fl FieldLevel) bool {
-
-	field := fl.Field()
-
-	switch field.Kind() {
-
-	case reflect.String:
-
-		s := field.String()
-
-		// checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195
-		// emulate browser and strip the '#' suffix prior to validation. see issue-#237
-		if i := strings.Index(s, "#"); i > -1 {
-			s = s[:i]
-		}
-
-		if len(s) == 0 {
-			return false
-		}
-
-		_, err := url.ParseRequestURI(s)
-
-		return err == nil
-	}
-
-	panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// IsURL is the validation function for validating if the current field's value is a valid URL.
-func isURL(fl FieldLevel) bool {
-
-	field := fl.Field()
-
-	switch field.Kind() {
-
-	case reflect.String:
-
-		var i int
-		s := field.String()
-
-		// checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195
-		// emulate browser and strip the '#' suffix prior to validation. see issue-#237
-		if i = strings.Index(s, "#"); i > -1 {
-			s = s[:i]
-		}
-
-		if len(s) == 0 {
-			return false
-		}
-
-		url, err := url.ParseRequestURI(s)
-
-		if err != nil || url.Scheme == "" {
-			return false
-		}
-
-		return err == nil
-	}
-
-	panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// IsEmail is the validation function for validating if the current field's value is a valid email address.
-func isEmail(fl FieldLevel) bool {
-	return emailRegex.MatchString(fl.Field().String())
-}
-
-// IsHSLA is the validation function for validating if the current field's value is a valid HSLA color.
-func isHSLA(fl FieldLevel) bool {
-	return hslaRegex.MatchString(fl.Field().String())
-}
-
-// IsHSL is the validation function for validating if the current field's value is a valid HSL color.
-func isHSL(fl FieldLevel) bool {
-	return hslRegex.MatchString(fl.Field().String())
-}
-
-// IsRGBA is the validation function for validating if the current field's value is a valid RGBA color.
-func isRGBA(fl FieldLevel) bool {
-	return rgbaRegex.MatchString(fl.Field().String())
-}
-
-// IsRGB is the validation function for validating if the current field's value is a valid RGB color.
-func isRGB(fl FieldLevel) bool {
-	return rgbRegex.MatchString(fl.Field().String())
-}
-
-// IsHEXColor is the validation function for validating if the current field's value is a valid HEX color.
-func isHEXColor(fl FieldLevel) bool {
-	return hexcolorRegex.MatchString(fl.Field().String())
-}
-
-// IsHexadecimal is the validation function for validating if the current field's value is a valid hexadecimal.
-func isHexadecimal(fl FieldLevel) bool {
-	return hexadecimalRegex.MatchString(fl.Field().String())
-}
-
-// IsNumber is the validation function for validating if the current field's value is a valid number.
-func isNumber(fl FieldLevel) bool {
-	return numberRegex.MatchString(fl.Field().String())
-}
-
-// IsNumeric is the validation function for validating if the current field's value is a valid numeric value.
-func isNumeric(fl FieldLevel) bool {
-	return numericRegex.MatchString(fl.Field().String())
-}
-
-// IsAlphanum is the validation function for validating if the current field's value is a valid alphanumeric value.
-func isAlphanum(fl FieldLevel) bool {
-	return alphaNumericRegex.MatchString(fl.Field().String())
-}
-
-// IsAlpha is the validation function for validating if the current field's value is a valid alpha value.
-func isAlpha(fl FieldLevel) bool {
-	return alphaRegex.MatchString(fl.Field().String())
-}
-
-// IsAlphanumUnicode is the validation function for validating if the current field's value is a valid alphanumeric unicode value.
-func isAlphanumUnicode(fl FieldLevel) bool {
-	return alphaUnicodeNumericRegex.MatchString(fl.Field().String())
-}
-
-// IsAlphaUnicode is the validation function for validating if the current field's value is a valid alpha unicode value.
-func isAlphaUnicode(fl FieldLevel) bool {
-	return alphaUnicodeRegex.MatchString(fl.Field().String())
-}
-
-// isDefault is the opposite of required aka hasValue
-func isDefault(fl FieldLevel) bool {
-	return !hasValue(fl)
-}
-
-// HasValue is the validation function for validating if the current field's value is not the default static value.
-func hasValue(fl FieldLevel) bool {
-
-	field := fl.Field()
-
-	switch field.Kind() {
-	case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func:
-		return !field.IsNil()
-	default:
-
-		if fl.(*validate).fldIsPointer && field.Interface() != nil {
-			return true
-		}
-
-		return field.IsValid() && field.Interface() != reflect.Zero(field.Type()).Interface()
-	}
-}
-
-// IsGteField is the validation function for validating if the current field's value is greater than or equal to the field specified by the param's value.
-func isGteField(fl FieldLevel) bool {
-
-	field := fl.Field()
-	kind := field.Kind()
-
-	currentField, currentKind, ok := fl.GetStructFieldOK()
-	if !ok || currentKind != kind {
-		return false
-	}
-
-	switch kind {
-
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-
-		return field.Int() >= currentField.Int()
-
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-
-		return field.Uint() >= currentField.Uint()
-
-	case reflect.Float32, reflect.Float64:
-
-		return field.Float() >= currentField.Float()
-
-	case reflect.Struct:
-
-		fieldType := field.Type()
-
-		// Not Same underlying type i.e. struct and time
-		if fieldType != currentField.Type() {
-			return false
-		}
-
-		if fieldType == timeType {
-
-			t := currentField.Interface().(time.Time)
-			fieldTime := field.Interface().(time.Time)
-
-			return fieldTime.After(t) || fieldTime.Equal(t)
-		}
-	}
-
-	// default reflect.String
-	return len(field.String()) >= len(currentField.String())
-}
-
-// IsGtField is the validation function for validating if the current field's value is greater than the field specified by the param's value.
-func isGtField(fl FieldLevel) bool {
-
-	field := fl.Field()
-	kind := field.Kind()
-
-	currentField, currentKind, ok := fl.GetStructFieldOK()
-	if !ok || currentKind != kind {
-		return false
-	}
-
-	switch kind {
-
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-
-		return field.Int() > currentField.Int()
-
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-
-		return field.Uint() > currentField.Uint()
-
-	case reflect.Float32, reflect.Float64:
-
-		return field.Float() > currentField.Float()
-
-	case reflect.Struct:
-
-		fieldType := field.Type()
-
-		// Not Same underlying type i.e. struct and time
-		if fieldType != currentField.Type() {
-			return false
-		}
-
-		if fieldType == timeType {
-
-			t := currentField.Interface().(time.Time)
-			fieldTime := field.Interface().(time.Time)
-
-			return fieldTime.After(t)
-		}
-	}
-
-	// default reflect.String
-	return len(field.String()) > len(currentField.String())
-}
-
-// IsGte is the validation function for validating if the current field's value is greater than or equal to the param's value.
-func isGte(fl FieldLevel) bool {
-
-	field := fl.Field()
-	param := fl.Param()
-
-	switch field.Kind() {
-
-	case reflect.String:
-		p := asInt(param)
-
-		return int64(utf8.RuneCountInString(field.String())) >= p
-
-	case reflect.Slice, reflect.Map, reflect.Array:
-		p := asInt(param)
-
-		return int64(field.Len()) >= p
-
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		p := asInt(param)
-
-		return field.Int() >= p
-
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		p := asUint(param)
-
-		return field.Uint() >= p
-
-	case reflect.Float32, reflect.Float64:
-		p := asFloat(param)
-
-		return field.Float() >= p
-
-	case reflect.Struct:
-
-		if field.Type() == timeType {
-
-			now := time.Now().UTC()
-			t := field.Interface().(time.Time)
-
-			return t.After(now) || t.Equal(now)
-		}
-	}
-
-	panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// IsGt is the validation function for validating if the current field's value is greater than the param's value.
-func isGt(fl FieldLevel) bool {
-
-	field := fl.Field()
-	param := fl.Param()
-
-	switch field.Kind() {
-
-	case reflect.String:
-		p := asInt(param)
-
-		return int64(utf8.RuneCountInString(field.String())) > p
-
-	case reflect.Slice, reflect.Map, reflect.Array:
-		p := asInt(param)
-
-		return int64(field.Len()) > p
-
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		p := asInt(param)
-
-		return field.Int() > p
-
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		p := asUint(param)
-
-		return field.Uint() > p
-
-	case reflect.Float32, reflect.Float64:
-		p := asFloat(param)
-
-		return field.Float() > p
-	case reflect.Struct:
-
-		if field.Type() == timeType {
-
-			return field.Interface().(time.Time).After(time.Now().UTC())
-		}
-	}
-
-	panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// HasLengthOf is the validation function for validating if the current field's value is equal to the param's value.
-func hasLengthOf(fl FieldLevel) bool {
-
-	field := fl.Field()
-	param := fl.Param()
-
-	switch field.Kind() {
-
-	case reflect.String:
-		p := asInt(param)
-
-		return int64(utf8.RuneCountInString(field.String())) == p
-
-	case reflect.Slice, reflect.Map, reflect.Array:
-		p := asInt(param)
-
-		return int64(field.Len()) == p
-
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		p := asInt(param)
-
-		return field.Int() == p
-
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		p := asUint(param)
-
-		return field.Uint() == p
-
-	case reflect.Float32, reflect.Float64:
-		p := asFloat(param)
-
-		return field.Float() == p
-	}
-
-	panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// HasMinOf is the validation function for validating if the current field's value is greater than or equal to the param's value.
-func hasMinOf(fl FieldLevel) bool {
-	return isGte(fl)
-}
-
-// IsLteField is the validation function for validating if the current field's value is less than or equal to the field specified by the param's value.
-func isLteField(fl FieldLevel) bool {
-
-	field := fl.Field()
-	kind := field.Kind()
-
-	currentField, currentKind, ok := fl.GetStructFieldOK()
-	if !ok || currentKind != kind {
-		return false
-	}
-
-	switch kind {
-
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-
-		return field.Int() <= currentField.Int()
-
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-
-		return field.Uint() <= currentField.Uint()
-
-	case reflect.Float32, reflect.Float64:
-
-		return field.Float() <= currentField.Float()
-
-	case reflect.Struct:
-
-		fieldType := field.Type()
-
-		// Not Same underlying type i.e. struct and time
-		if fieldType != currentField.Type() {
-			return false
-		}
-
-		if fieldType == timeType {
-
-			t := currentField.Interface().(time.Time)
-			fieldTime := field.Interface().(time.Time)
-
-			return fieldTime.Before(t) || fieldTime.Equal(t)
-		}
-	}
-
-	// default reflect.String
-	return len(field.String()) <= len(currentField.String())
-}
-
-// IsLtField is the validation function for validating if the current field's value is less than the field specified by the param's value.
-func isLtField(fl FieldLevel) bool {
-
-	field := fl.Field()
-	kind := field.Kind()
-
-	currentField, currentKind, ok := fl.GetStructFieldOK()
-	if !ok || currentKind != kind {
-		return false
-	}
-
-	switch kind {
-
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-
-		return field.Int() < currentField.Int()
-
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-
-		return field.Uint() < currentField.Uint()
-
-	case reflect.Float32, reflect.Float64:
-
-		return field.Float() < currentField.Float()
-
-	case reflect.Struct:
-
-		fieldType := field.Type()
-
-		// Not Same underlying type i.e. struct and time
-		if fieldType != currentField.Type() {
-			return false
-		}
-
-		if fieldType == timeType {
-
-			t := currentField.Interface().(time.Time)
-			fieldTime := field.Interface().(time.Time)
-
-			return fieldTime.Before(t)
-		}
-	}
-
-	// default reflect.String
-	return len(field.String()) < len(currentField.String())
-}
-
-// IsLte is the validation function for validating if the current field's value is less than or equal to the param's value.
-func isLte(fl FieldLevel) bool {
-
-	field := fl.Field()
-	param := fl.Param()
-
-	switch field.Kind() {
-
-	case reflect.String:
-		p := asInt(param)
-
-		return int64(utf8.RuneCountInString(field.String())) <= p
-
-	case reflect.Slice, reflect.Map, reflect.Array:
-		p := asInt(param)
-
-		return int64(field.Len()) <= p
-
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		p := asInt(param)
-
-		return field.Int() <= p
-
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		p := asUint(param)
-
-		return field.Uint() <= p
-
-	case reflect.Float32, reflect.Float64:
-		p := asFloat(param)
-
-		return field.Float() <= p
-
-	case reflect.Struct:
-
-		if field.Type() == timeType {
-
-			now := time.Now().UTC()
-			t := field.Interface().(time.Time)
-
-			return t.Before(now) || t.Equal(now)
-		}
-	}
-
-	panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// IsLt is the validation function for validating if the current field's value is less than the param's value.
-func isLt(fl FieldLevel) bool {
-
-	field := fl.Field()
-	param := fl.Param()
-
-	switch field.Kind() {
-
-	case reflect.String:
-		p := asInt(param)
-
-		return int64(utf8.RuneCountInString(field.String())) < p
-
-	case reflect.Slice, reflect.Map, reflect.Array:
-		p := asInt(param)
-
-		return int64(field.Len()) < p
-
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		p := asInt(param)
-
-		return field.Int() < p
-
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		p := asUint(param)
-
-		return field.Uint() < p
-
-	case reflect.Float32, reflect.Float64:
-		p := asFloat(param)
-
-		return field.Float() < p
-
-	case reflect.Struct:
-
-		if field.Type() == timeType {
-
-			return field.Interface().(time.Time).Before(time.Now().UTC())
-		}
-	}
-
-	panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// HasMaxOf is the validation function for validating if the current field's value is less than or equal to the param's value.
-func hasMaxOf(fl FieldLevel) bool {
-	return isLte(fl)
-}
-
-// IsTCP4AddrResolvable is the validation function for validating if the field's value is a resolvable tcp4 address.
-func isTCP4AddrResolvable(fl FieldLevel) bool {
-
-	if !isIP4Addr(fl) {
-		return false
-	}
-
-	_, err := net.ResolveTCPAddr("tcp4", fl.Field().String())
-	return err == nil
-}
-
-// IsTCP6AddrResolvable is the validation function for validating if the field's value is a resolvable tcp6 address.
-func isTCP6AddrResolvable(fl FieldLevel) bool {
-
-	if !isIP6Addr(fl) {
-		return false
-	}
-
-	_, err := net.ResolveTCPAddr("tcp6", fl.Field().String())
-
-	return err == nil
-}
-
-// IsTCPAddrResolvable is the validation function for validating if the field's value is a resolvable tcp address.
-func isTCPAddrResolvable(fl FieldLevel) bool {
-
-	if !isIP4Addr(fl) && !isIP6Addr(fl) {
-		return false
-	}
-
-	_, err := net.ResolveTCPAddr("tcp", fl.Field().String())
-
-	return err == nil
-}
-
-// IsUDP4AddrResolvable is the validation function for validating if the field's value is a resolvable udp4 address.
-func isUDP4AddrResolvable(fl FieldLevel) bool {
-
-	if !isIP4Addr(fl) {
-		return false
-	}
-
-	_, err := net.ResolveUDPAddr("udp4", fl.Field().String())
-
-	return err == nil
-}
-
-// IsUDP6AddrResolvable is the validation function for validating if the field's value is a resolvable udp6 address.
-func isUDP6AddrResolvable(fl FieldLevel) bool {
-
-	if !isIP6Addr(fl) {
-		return false
-	}
-
-	_, err := net.ResolveUDPAddr("udp6", fl.Field().String())
-
-	return err == nil
-}
-
-// IsUDPAddrResolvable is the validation function for validating if the field's value is a resolvable udp address.
-func isUDPAddrResolvable(fl FieldLevel) bool {
-
-	if !isIP4Addr(fl) && !isIP6Addr(fl) {
-		return false
-	}
-
-	_, err := net.ResolveUDPAddr("udp", fl.Field().String())
-
-	return err == nil
-}
-
-// IsIP4AddrResolvable is the validation function for validating if the field's value is a resolvable ip4 address.
-func isIP4AddrResolvable(fl FieldLevel) bool {
-
-	if !isIPv4(fl) {
-		return false
-	}
-
-	_, err := net.ResolveIPAddr("ip4", fl.Field().String())
-
-	return err == nil
-}
-
-// IsIP6AddrResolvable is the validation function for validating if the field's value is a resolvable ip6 address.
-func isIP6AddrResolvable(fl FieldLevel) bool {
-
-	if !isIPv6(fl) {
-		return false
-	}
-
-	_, err := net.ResolveIPAddr("ip6", fl.Field().String())
-
-	return err == nil
-}
-
-// IsIPAddrResolvable is the validation function for validating if the field's value is a resolvable ip address.
-func isIPAddrResolvable(fl FieldLevel) bool {
-
-	if !isIP(fl) {
-		return false
-	}
-
-	_, err := net.ResolveIPAddr("ip", fl.Field().String())
-
-	return err == nil
-}
-
-// IsUnixAddrResolvable is the validation function for validating if the field's value is a resolvable unix address.
-func isUnixAddrResolvable(fl FieldLevel) bool {
-
-	_, err := net.ResolveUnixAddr("unix", fl.Field().String())
-
-	return err == nil
-}
-
-func isIP4Addr(fl FieldLevel) bool {
-
-	val := fl.Field().String()
-
-	if idx := strings.LastIndex(val, ":"); idx != -1 {
-		val = val[0:idx]
-	}
-
-	ip := net.ParseIP(val)
-
-	return ip != nil && ip.To4() != nil
-}
-
-func isIP6Addr(fl FieldLevel) bool {
-
-	val := fl.Field().String()
-
-	if idx := strings.LastIndex(val, ":"); idx != -1 {
-		if idx != 0 && val[idx-1:idx] == "]" {
-			val = val[1 : idx-1]
-		}
-	}
-
-	ip := net.ParseIP(val)
-
-	return ip != nil && ip.To4() == nil
-}
-
-func isHostnameRFC952(fl FieldLevel) bool {
-	return hostnameRegexRFC952.MatchString(fl.Field().String())
-}
-
-func isHostnameRFC1123(fl FieldLevel) bool {
-	return hostnameRegexRFC1123.MatchString(fl.Field().String())
-}
-
-func isFQDN(fl FieldLevel) bool {
-	val := fl.Field().String()
-
-	if val == "" {
-		return false
-	}
-
-	if val[len(val)-1] == '.' {
-		val = val[0 : len(val)-1]
-	}
-
-	return strings.ContainsAny(val, ".") &&
-		hostnameRegexRFC952.MatchString(val)
-}
diff --git a/vendor/gopkg.in/go-playground/validator.v9/cache.go b/vendor/gopkg.in/go-playground/validator.v9/cache.go
deleted file mode 100644
index c7fb0fb1..00000000
--- a/vendor/gopkg.in/go-playground/validator.v9/cache.go
+++ /dev/null
@@ -1,337 +0,0 @@
-package validator
-
-import (
-	"fmt"
-	"reflect"
-	"strings"
-	"sync"
-	"sync/atomic"
-)
-
-type tagType uint8
-
-const (
-	typeDefault tagType = iota
-	typeOmitEmpty
-	typeIsDefault
-	typeNoStructLevel
-	typeStructOnly
-	typeDive
-	typeOr
-	typeKeys
-	typeEndKeys
-)
-
-const (
-	invalidValidation   = "Invalid validation tag on field '%s'"
-	undefinedValidation = "Undefined validation function '%s' on field '%s'"
-	keysTagNotDefined   = "'" + endKeysTag + "' tag encountered without a corresponding '" + keysTag + "' tag"
-)
-
-type structCache struct {
-	lock sync.Mutex
-	m    atomic.Value // map[reflect.Type]*cStruct
-}
-
-func (sc *structCache) Get(key reflect.Type) (c *cStruct, found bool) {
-	c, found = sc.m.Load().(map[reflect.Type]*cStruct)[key]
-	return
-}
-
-func (sc *structCache) Set(key reflect.Type, value *cStruct) {
-
-	m := sc.m.Load().(map[reflect.Type]*cStruct)
-
-	nm := make(map[reflect.Type]*cStruct, len(m)+1)
-	for k, v := range m {
-		nm[k] = v
-	}
-	nm[key] = value
-	sc.m.Store(nm)
-}
-
-type tagCache struct {
-	lock sync.Mutex
-	m    atomic.Value // map[string]*cTag
-}
-
-func (tc *tagCache) Get(key string) (c *cTag, found bool) {
-	c, found = tc.m.Load().(map[string]*cTag)[key]
-	return
-}
-
-func (tc *tagCache) Set(key string, value *cTag) {
-
-	m := tc.m.Load().(map[string]*cTag)
-
-	nm := make(map[string]*cTag, len(m)+1)
-	for k, v := range m {
-		nm[k] = v
-	}
-	nm[key] = value
-	tc.m.Store(nm)
-}
-
-type cStruct struct {
-	name   string
-	fields []*cField
-	fn     StructLevelFuncCtx
-}
-
-type cField struct {
-	idx        int
-	name       string
-	altName    string
-	namesEqual bool
-	cTags      *cTag
-}
-
-type cTag struct {
-	tag            string
-	aliasTag       string
-	actualAliasTag string
-	param          string
-	keys           *cTag // only populated when using tag's 'keys' and 'endkeys' for map key validation
-	next           *cTag
-	fn             FuncCtx
-	typeof         tagType
-	hasTag         bool
-	hasAlias       bool
-	hasParam       bool // true if parameter used eg. eq= where the equal sign has been set
-	isBlockEnd     bool // indicates the current tag represents the last validation in the block
-}
-
-func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStruct {
-
-	v.structCache.lock.Lock()
-	defer v.structCache.lock.Unlock() // leave as defer! because if inner panics, it will never get unlocked otherwise!
-
-	typ := current.Type()
-
-	// could have been multiple trying to access, but once first is done this ensures struct
-	// isn't parsed again.
-	cs, ok := v.structCache.Get(typ)
-	if ok {
-		return cs
-	}
-
-	cs = &cStruct{name: sName, fields: make([]*cField, 0), fn: v.structLevelFuncs[typ]}
-
-	numFields := current.NumField()
-
-	var ctag *cTag
-	var fld reflect.StructField
-	var tag string
-	var customName string
-
-	for i := 0; i < numFields; i++ {
-
-		fld = typ.Field(i)
-
-		if !fld.Anonymous && len(fld.PkgPath) > 0 {
-			continue
-		}
-
-		tag = fld.Tag.Get(v.tagName)
-
-		if tag == skipValidationTag {
-			continue
-		}
-
-		customName = fld.Name
-
-		if v.hasTagNameFunc {
-
-			name := v.tagNameFunc(fld)
-
-			if len(name) > 0 {
-				customName = name
-			}
-		}
-
-		// NOTE: cannot use shared tag cache, because tags may be equal, but things like alias may be different
-		// and so only struct level caching can be used instead of combined with Field tag caching
-
-		if len(tag) > 0 {
-			ctag, _ = v.parseFieldTagsRecursive(tag, fld.Name, "", false)
-		} else {
-			// even if field doesn't have validations need cTag for traversing to potential inner/nested
-			// elements of the field.
-			ctag = new(cTag)
-		}
-
-		cs.fields = append(cs.fields, &cField{
-			idx:        i,
-			name:       fld.Name,
-			altName:    customName,
-			cTags:      ctag,
-			namesEqual: fld.Name == customName,
-		})
-	}
-
-	v.structCache.Set(typ, cs)
-
-	return cs
-}
-
-func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias string, hasAlias bool) (firstCtag *cTag, current *cTag) {
-
-	var t string
-	var ok bool
-	noAlias := len(alias) == 0
-	tags := strings.Split(tag, tagSeparator)
-
-	for i := 0; i < len(tags); i++ {
-
-		t = tags[i]
-
-		if noAlias {
-			alias = t
-		}
-
-		// check map for alias and process new tags, otherwise process as usual
-		if tagsVal, found := v.aliases[t]; found {
-			if i == 0 {
-				firstCtag, current = v.parseFieldTagsRecursive(tagsVal, fieldName, t, true)
-			} else {
-				next, curr := v.parseFieldTagsRecursive(tagsVal, fieldName, t, true)
-				current.next, current = next, curr
-
-			}
-
-			continue
-		}
-
-		var prevTag tagType
-
-		if i == 0 {
-			current = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true}
-			firstCtag = current
-		} else {
-			prevTag = current.typeof
-			current.next = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true}
-			current = current.next
-		}
-
-		switch t {
-
-		case diveTag:
-			current.typeof = typeDive
-			continue
-
-		case keysTag:
-			current.typeof = typeKeys
-
-			if i == 0 || prevTag != typeDive {
-				panic(fmt.Sprintf("'%s' tag must be immediately preceeded by the '%s' tag", keysTag, diveTag))
-			}
-
-			current.typeof = typeKeys
-
-			// need to pass along only keys tag
-			// need to increment i to skip over the keys tags
-			b := make([]byte, 0, 64)
-
-			i++
-
-			for ; i < len(tags); i++ {
-
-				b = append(b, tags[i]...)
-				b = append(b, ',')
-
-				if tags[i] == endKeysTag {
-					break
-				}
-			}
-
-			current.keys, _ = v.parseFieldTagsRecursive(string(b[:len(b)-1]), fieldName, "", false)
-			continue
-
-		case endKeysTag:
-			current.typeof = typeEndKeys
-
-			// if there are more in tags then there was no keysTag defined
-			// and an error should be thrown
-			if i != len(tags)-1 {
-				panic(keysTagNotDefined)
-			}
-			return
-
-		case omitempty:
-			current.typeof = typeOmitEmpty
-			continue
-
-		case structOnlyTag:
-			current.typeof = typeStructOnly
-			continue
-
-		case noStructLevelTag:
-			current.typeof = typeNoStructLevel
-			continue
-
-		default:
-
-			if t == isdefault {
-				current.typeof = typeIsDefault
-			}
-
-			// if a pipe character is needed within the param you must use the utf8Pipe representation "0x7C"
-			orVals := strings.Split(t, orSeparator)
-
-			for j := 0; j < len(orVals); j++ {
-
-				vals := strings.SplitN(orVals[j], tagKeySeparator, 2)
-
-				if noAlias {
-					alias = vals[0]
-					current.aliasTag = alias
-				} else {
-					current.actualAliasTag = t
-				}
-
-				if j > 0 {
-					current.next = &cTag{aliasTag: alias, actualAliasTag: current.actualAliasTag, hasAlias: hasAlias, hasTag: true}
-					current = current.next
-				}
-				current.hasParam = len(vals) > 1
-
-				current.tag = vals[0]
-				if len(current.tag) == 0 {
-					panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, fieldName)))
-				}
-
-				if current.fn, ok = v.validations[current.tag]; !ok {
-					panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, current.tag, fieldName)))
-				}
-
-				if len(orVals) > 1 {
-					current.typeof = typeOr
-				}
-
-				if len(vals) > 1 {
-					current.param = strings.Replace(strings.Replace(vals[1], utf8HexComma, ",", -1), utf8Pipe, "|", -1)
-				}
-			}
-			current.isBlockEnd = true
-		}
-	}
-	return
-}
-
-func (v *Validate) fetchCacheTag(tag string) *cTag {
-	// find cached tag
-	ctag, found := v.tagCache.Get(tag)
-	if !found {
-		v.tagCache.lock.Lock()
-		defer v.tagCache.lock.Unlock()
-
-		// could have been multiple trying to access, but once first is done this ensures tag
-		// isn't parsed again.
-		ctag, found = v.tagCache.Get(tag)
-		if !found {
-			ctag, _ = v.parseFieldTagsRecursive(tag, "", "", false)
-			v.tagCache.Set(tag, ctag)
-		}
-	}
-	return ctag
-}
diff --git a/vendor/gopkg.in/go-playground/validator.v9/doc.go b/vendor/gopkg.in/go-playground/validator.v9/doc.go
deleted file mode 100644
index f7efe234..00000000
--- a/vendor/gopkg.in/go-playground/validator.v9/doc.go
+++ /dev/null
@@ -1,907 +0,0 @@
-/*
-Package validator implements value validations for structs and individual fields
-based on tags.
-
-It can also handle Cross-Field and Cross-Struct validation for nested structs
-and has the ability to dive into arrays and maps of any type.
-
-see more examples https://github.com/go-playground/validator/tree/v9/_examples
-
-Validation Functions Return Type error
-
-Doing things this way is actually the way the standard library does, see the
-file.Open method here:
-
-	https://golang.org/pkg/os/#Open.
-
-The authors return type "error" to avoid the issue discussed in the following,
-where err is always != nil:
-
-	http://stackoverflow.com/a/29138676/3158232
-	https://github.com/go-playground/validator/issues/134
-
-Validator only InvalidValidationError for bad validation input, nil or
-ValidationErrors as type error; so, in your code all you need to do is check
-if the error returned is not nil, and if it's not check if error is
-InvalidValidationError ( if necessary, most of the time it isn't ) type cast
-it to type ValidationErrors like so err.(validator.ValidationErrors).
-
-Custom Validation Functions
-
-Custom Validation functions can be added. Example:
-
-	// Structure
-	func customFunc(fl FieldLevel) bool {
-
-		if fl.Field().String() == "invalid" {
-			return false
-		}
-
-		return true
-	}
-
-	validate.RegisterValidation("custom tag name", customFunc)
-	// NOTES: using the same tag name as an existing function
-	//        will overwrite the existing one
-
-Cross-Field Validation
-
-Cross-Field Validation can be done via the following tags:
-	- eqfield
-	- nefield
-	- gtfield
-	- gtefield
-	- ltfield
-	- ltefield
-	- eqcsfield
-	- necsfield
-	- gtcsfield
-	- gtecsfield
-	- ltcsfield
-	- ltecsfield
-
-If, however, some custom cross-field validation is required, it can be done
-using a custom validation.
-
-Why not just have cross-fields validation tags (i.e. only eqcsfield and not
-eqfield)?
-
-The reason is efficiency. If you want to check a field within the same struct
-"eqfield" only has to find the field on the same struct (1 level). But, if we
-used "eqcsfield" it could be multiple levels down. Example:
-
-	type Inner struct {
-		StartDate time.Time
-	}
-
-	type Outer struct {
-		InnerStructField *Inner
-		CreatedAt time.Time      `validate:"ltecsfield=InnerStructField.StartDate"`
-	}
-
-	now := time.Now()
-
-	inner := &Inner{
-		StartDate: now,
-	}
-
-	outer := &Outer{
-		InnerStructField: inner,
-		CreatedAt: now,
-	}
-
-	errs := validate.Struct(outer)
-
-	// NOTE: when calling validate.Struct(val) topStruct will be the top level struct passed
-	//       into the function
-	//       when calling validate.VarWithValue(val, field, tag) val will be
-	//       whatever you pass, struct, field...
-	//       when calling validate.Field(field, tag) val will be nil
-
-Multiple Validators
-
-Multiple validators on a field will process in the order defined. Example:
-
-	type Test struct {
-		Field `validate:"max=10,min=1"`
-	}
-
-	// max will be checked then min
-
-Bad Validator definitions are not handled by the library. Example:
-
-	type Test struct {
-		Field `validate:"min=10,max=0"`
-	}
-
-	// this definition of min max will never succeed
-
-Using Validator Tags
-
-Baked In Cross-Field validation only compares fields on the same struct.
-If Cross-Field + Cross-Struct validation is needed you should implement your
-own custom validator.
-
-Comma (",") is the default separator of validation tags. If you wish to
-have a comma included within the parameter (i.e. excludesall=,) you will need to
-use the UTF-8 hex representation 0x2C, which is replaced in the code as a comma,
-so the above will become excludesall=0x2C.
-
-	type Test struct {
-		Field `validate:"excludesall=,"`    // BAD! Do not include a comma.
-		Field `validate:"excludesall=0x2C"` // GOOD! Use the UTF-8 hex representation.
-	}
-
-Pipe ("|") is the 'or' validation tags deparator. If you wish to
-have a pipe included within the parameter i.e. excludesall=| you will need to
-use the UTF-8 hex representation 0x7C, which is replaced in the code as a pipe,
-so the above will become excludesall=0x7C
-
-	type Test struct {
-		Field `validate:"excludesall=|"`    // BAD! Do not include a a pipe!
-		Field `validate:"excludesall=0x7C"` // GOOD! Use the UTF-8 hex representation.
-	}
-
-
-Baked In Validators and Tags
-
-Here is a list of the current built in validators:
-
-
-Skip Field
-
-Tells the validation to skip this struct field; this is particularly
-handy in ignoring embedded structs from being validated. (Usage: -)
-	Usage: -
-
-
-Or Operator
-
-This is the 'or' operator allowing multiple validators to be used and
-accepted. (Usage: rbg|rgba) <-- this would allow either rgb or rgba
-colors to be accepted. This can also be combined with 'and' for example
-( Usage: omitempty,rgb|rgba)
-
-	Usage: |
-
-StructOnly
-
-When a field that is a nested struct is encountered, and contains this flag
-any validation on the nested struct will be run, but none of the nested
-struct fields will be validated. This is usefull if inside of you program
-you know the struct will be valid, but need to verify it has been assigned.
-NOTE: only "required" and "omitempty" can be used on a struct itself.
-
-	Usage: structonly
-
-NoStructLevel
-
-Same as structonly tag except that any struct level validations will not run.
-
-	Usage: nostructlevel
-
-Omit Empty
-
-Allows conditional validation, for example if a field is not set with
-a value (Determined by the "required" validator) then other validation
-such as min or max won't run, but if a value is set validation will run.
-
-	Usage: omitempty
-
-Dive
-
-This tells the validator to dive into a slice, array or map and validate that
-level of the slice, array or map with the validation tags that follow.
-Multidimensional nesting is also supported, each level you wish to dive will
-require another dive tag. dive has some sub-tags, 'keys' & 'endkeys', please see
-the Keys & EndKeys section just below.
-
-	Usage: dive
-
-Example #1
-
-	[][]string with validation tag "gt=0,dive,len=1,dive,required"
-	// gt=0 will be applied to []
-	// len=1 will be applied to []string
-	// required will be applied to string
-
-Example #2
-
-	[][]string with validation tag "gt=0,dive,dive,required"
-	// gt=0 will be applied to []
-	// []string will be spared validation
-	// required will be applied to string
-
-Keys & EndKeys
-
-These are to be used together directly after the dive tag and tells the validator
-that anything between 'keys' and 'endkeys' applies to the keys of a map and not the
-values; think of it like the 'dive' tag, but for map keys instead of values.
-Multidimensional nesting is also supported, each level you wish to validate will
-require another 'keys' and 'endkeys' tag. These tags are only valid for maps.
-
-	Usage: dive,keys,othertagvalidation(s),endkeys,valuevalidationtags
-
-Example #1
-
-	map[string]string with validation tag "gt=0,dive,keys,eg=1|eq=2,endkeys,required"
-	// gt=0 will be applied to the map itself
-	// eg=1|eq=2 will be applied to the map keys
-	// required will be applied to map values
-
-Example #2
-
-	map[[2]string]string with validation tag "gt=0,dive,keys,dive,eq=1|eq=2,endkeys,required"
-	// gt=0 will be applied to the map itself
-	// eg=1|eq=2 will be applied to each array element in the the map keys
-	// required will be applied to map values
-
-Required
-
-This validates that the value is not the data types default zero value.
-For numbers ensures value is not zero. For strings ensures value is
-not "". For slices, maps, pointers, interfaces, channels and functions
-ensures the value is not nil.
-
-	Usage: required
-
-Is Default
-
-This validates that the value is the default value and is almost the
-opposite of required.
-
-	Usage: isdefault
-
-Length
-
-For numbers, length will ensure that the value is
-equal to the parameter given. For strings, it checks that
-the string length is exactly that number of characters. For slices,
-arrays, and maps, validates the number of items.
-
-	Usage: len=10
-
-Maximum
-
-For numbers, max will ensure that the value is
-less than or equal to the parameter given. For strings, it checks
-that the string length is at most that number of characters. For
-slices, arrays, and maps, validates the number of items.
-
-	Usage: max=10
-
-Minimum
-
-For numbers, min will ensure that the value is
-greater or equal to the parameter given. For strings, it checks that
-the string length is at least that number of characters. For slices,
-arrays, and maps, validates the number of items.
-
-	Usage: min=10
-
-Equals
-
-For strings & numbers, eq will ensure that the value is
-equal to the parameter given. For slices, arrays, and maps,
-validates the number of items.
-
-	Usage: eq=10
-
-Not Equal
-
-For strings & numbers, ne will ensure that the value is not
-equal to the parameter given. For slices, arrays, and maps,
-validates the number of items.
-
-	Usage: ne=10
-
-One Of
-
-For strings, ints, and uints, oneof will ensure that the value
-is one of the values in the parameter.  The parameter should be
-a list of values separated by whitespace.  Values may be
-strings or numbers.
-
-    Usage: oneof=red green
-           oneof=5 7 9
-
-Greater Than
-
-For numbers, this will ensure that the value is greater than the
-parameter given. For strings, it checks that the string length
-is greater than that number of characters. For slices, arrays
-and maps it validates the number of items.
-
-Example #1
-
-	Usage: gt=10
-
-Example #2 (time.Time)
-
-For time.Time ensures the time value is greater than time.Now.UTC().
-
-	Usage: gt
-
-Greater Than or Equal
-
-Same as 'min' above. Kept both to make terminology with 'len' easier.
-
-
-Example #1
-
-	Usage: gte=10
-
-Example #2 (time.Time)
-
-For time.Time ensures the time value is greater than or equal to time.Now.UTC().
-
-	Usage: gte
-
-Less Than
-
-For numbers, this will ensure that the value is less than the parameter given.
-For strings, it checks that the string length is less than that number of
-characters. For slices, arrays, and maps it validates the number of items.
-
-Example #1
-
-	Usage: lt=10
-
-Example #2 (time.Time)
-For time.Time ensures the time value is less than time.Now.UTC().
-
-	Usage: lt
-
-Less Than or Equal
-
-Same as 'max' above. Kept both to make terminology with 'len' easier.
-
-Example #1
-
-	Usage: lte=10
-
-Example #2 (time.Time)
-
-For time.Time ensures the time value is less than or equal to time.Now.UTC().
-
-	Usage: lte
-
-Field Equals Another Field
-
-This will validate the field value against another fields value either within
-a struct or passed in field.
-
-Example #1:
-
-	// Validation on Password field using:
-	Usage: eqfield=ConfirmPassword
-
-Example #2:
-
-	// Validating by field:
-	validate.VarWithValue(password, confirmpassword, "eqfield")
-
-Field Equals Another Field (relative)
-
-This does the same as eqfield except that it validates the field provided relative
-to the top level struct.
-
-	Usage: eqcsfield=InnerStructField.Field)
-
-Field Does Not Equal Another Field
-
-This will validate the field value against another fields value either within
-a struct or passed in field.
-
-Examples:
-
-	// Confirm two colors are not the same:
-	//
-	// Validation on Color field:
-	Usage: nefield=Color2
-
-	// Validating by field:
-	validate.VarWithValue(color1, color2, "nefield")
-
-Field Does Not Equal Another Field (relative)
-
-This does the same as nefield except that it validates the field provided
-relative to the top level struct.
-
-	Usage: necsfield=InnerStructField.Field
-
-Field Greater Than Another Field
-
-Only valid for Numbers and time.Time types, this will validate the field value
-against another fields value either within a struct or passed in field.
-usage examples are for validation of a Start and End date:
-
-Example #1:
-
-	// Validation on End field using:
-	validate.Struct Usage(gtfield=Start)
-
-Example #2:
-
-	// Validating by field:
-	validate.VarWithValue(start, end, "gtfield")
-
-
-Field Greater Than Another Relative Field
-
-This does the same as gtfield except that it validates the field provided
-relative to the top level struct.
-
-	Usage: gtcsfield=InnerStructField.Field
-
-Field Greater Than or Equal To Another Field
-
-Only valid for Numbers and time.Time types, this will validate the field value
-against another fields value either within a struct or passed in field.
-usage examples are for validation of a Start and End date:
-
-Example #1:
-
-	// Validation on End field using:
-	validate.Struct Usage(gtefield=Start)
-
-Example #2:
-
-	// Validating by field:
-	validate.VarWithValue(start, end, "gtefield")
-
-Field Greater Than or Equal To Another Relative Field
-
-This does the same as gtefield except that it validates the field provided relative
-to the top level struct.
-
-	Usage: gtecsfield=InnerStructField.Field
-
-Less Than Another Field
-
-Only valid for Numbers and time.Time types, this will validate the field value
-against another fields value either within a struct or passed in field.
-usage examples are for validation of a Start and End date:
-
-Example #1:
-
-	// Validation on End field using:
-	validate.Struct Usage(ltfield=Start)
-
-Example #2:
-
-	// Validating by field:
-	validate.VarWithValue(start, end, "ltfield")
-
-Less Than Another Relative Field
-
-This does the same as ltfield except that it validates the field provided relative
-to the top level struct.
-
-	Usage: ltcsfield=InnerStructField.Field
-
-Less Than or Equal To Another Field
-
-Only valid for Numbers and time.Time types, this will validate the field value
-against another fields value either within a struct or passed in field.
-usage examples are for validation of a Start and End date:
-
-Example #1:
-
-	// Validation on End field using:
-	validate.Struct Usage(ltefield=Start)
-
-Example #2:
-
-	// Validating by field:
-	validate.VarWithValue(start, end, "ltefield")
-
-Less Than or Equal To Another Relative Field
-
-This does the same as ltefield except that it validates the field provided relative
-to the top level struct.
-
-	Usage: ltecsfield=InnerStructField.Field
-
-Unique
-
-For arrays & slices, unique will ensure that there are no duplicates.
-
-	Usage: unique
-
-Alpha Only
-
-This validates that a string value contains ASCII alpha characters only
-
-	Usage: alpha
-
-Alphanumeric
-
-This validates that a string value contains ASCII alphanumeric characters only
-
-	Usage: alphanum
-
-Alpha Unicode
-
-This validates that a string value contains unicode alpha characters only
-
-	Usage: alphaunicode
-
-Alphanumeric Unicode
-
-This validates that a string value contains unicode alphanumeric characters only
-
-	Usage: alphanumunicode
-
-Numeric
-
-This validates that a string value contains a basic numeric value.
-basic excludes exponents etc...
-
-	Usage: numeric
-
-Hexadecimal String
-
-This validates that a string value contains a valid hexadecimal.
-
-	Usage: hexadecimal
-
-Hexcolor String
-
-This validates that a string value contains a valid hex color including
-hashtag (#)
-
-		Usage: hexcolor
-
-RGB String
-
-This validates that a string value contains a valid rgb color
-
-	Usage: rgb
-
-RGBA String
-
-This validates that a string value contains a valid rgba color
-
-	Usage: rgba
-
-HSL String
-
-This validates that a string value contains a valid hsl color
-
-	Usage: hsl
-
-HSLA String
-
-This validates that a string value contains a valid hsla color
-
-	Usage: hsla
-
-E-mail String
-
-This validates that a string value contains a valid email
-This may not conform to all possibilities of any rfc standard, but neither
-does any email provider accept all posibilities.
-
-	Usage: email
-
-URL String
-
-This validates that a string value contains a valid url
-This will accept any url the golang request uri accepts but must contain
-a schema for example http:// or rtmp://
-
-	Usage: url
-
-URI String
-
-This validates that a string value contains a valid uri
-This will accept any uri the golang request uri accepts
-
-	Usage: uri
-
-Base64 String
-
-This validates that a string value contains a valid base64 value.
-Although an empty string is valid base64 this will report an empty string
-as an error, if you wish to accept an empty string as valid you can use
-this with the omitempty tag.
-
-	Usage: base64
-
-Contains
-
-This validates that a string value contains the substring value.
-
-	Usage: contains=@
-
-Contains Any
-
-This validates that a string value contains any Unicode code points
-in the substring value.
-
-	Usage: containsany=!@#?
-
-Contains Rune
-
-This validates that a string value contains the supplied rune value.
-
-	Usage: containsrune=@
-
-Excludes
-
-This validates that a string value does not contain the substring value.
-
-	Usage: excludes=@
-
-Excludes All
-
-This validates that a string value does not contain any Unicode code
-points in the substring value.
-
-	Usage: excludesall=!@#?
-
-Excludes Rune
-
-This validates that a string value does not contain the supplied rune value.
-
-	Usage: excludesrune=@
-
-International Standard Book Number
-
-This validates that a string value contains a valid isbn10 or isbn13 value.
-
-	Usage: isbn
-
-International Standard Book Number 10
-
-This validates that a string value contains a valid isbn10 value.
-
-	Usage: isbn10
-
-International Standard Book Number 13
-
-This validates that a string value contains a valid isbn13 value.
-
-	Usage: isbn13
-
-
-Universally Unique Identifier UUID
-
-This validates that a string value contains a valid UUID.
-
-	Usage: uuid
-
-Universally Unique Identifier UUID v3
-
-This validates that a string value contains a valid version 3 UUID.
-
-	Usage: uuid3
-
-Universally Unique Identifier UUID v4
-
-This validates that a string value contains a valid version 4 UUID.
-
-	Usage: uuid4
-
-Universally Unique Identifier UUID v5
-
-This validates that a string value contains a valid version 5 UUID.
-
-	Usage: uuid5
-
-ASCII
-
-This validates that a string value contains only ASCII characters.
-NOTE: if the string is blank, this validates as true.
-
-	Usage: ascii
-
-Printable ASCII
-
-This validates that a string value contains only printable ASCII characters.
-NOTE: if the string is blank, this validates as true.
-
-	Usage: printascii
-
-Multi-Byte Characters
-
-This validates that a string value contains one or more multibyte characters.
-NOTE: if the string is blank, this validates as true.
-
-	Usage: multibyte
-
-Data URL
-
-This validates that a string value contains a valid DataURI.
-NOTE: this will also validate that the data portion is valid base64
-
-	Usage: datauri
-
-Latitude
-
-This validates that a string value contains a valid latitude.
-
-	Usage: latitude
-
-Longitude
-
-This validates that a string value contains a valid longitude.
-
-	Usage: longitude
-
-Social Security Number SSN
-
-This validates that a string value contains a valid U.S. Social Security Number.
-
-	Usage: ssn
-
-Internet Protocol Address IP
-
-This validates that a string value contains a valid IP Adress.
-
-	Usage: ip
-
-Internet Protocol Address IPv4
-
-This validates that a string value contains a valid v4 IP Adress.
-
-	Usage: ipv4
-
-Internet Protocol Address IPv6
-
-This validates that a string value contains a valid v6 IP Adress.
-
-	Usage: ipv6
-
-Classless Inter-Domain Routing CIDR
-
-This validates that a string value contains a valid CIDR Adress.
-
-	Usage: cidr
-
-Classless Inter-Domain Routing CIDRv4
-
-This validates that a string value contains a valid v4 CIDR Adress.
-
-	Usage: cidrv4
-
-Classless Inter-Domain Routing CIDRv6
-
-This validates that a string value contains a valid v6 CIDR Adress.
-
-	Usage: cidrv6
-
-Transmission Control Protocol Address TCP
-
-This validates that a string value contains a valid resolvable TCP Adress.
-
-	Usage: tcp_addr
-
-Transmission Control Protocol Address TCPv4
-
-This validates that a string value contains a valid resolvable v4 TCP Adress.
-
-	Usage: tcp4_addr
-
-Transmission Control Protocol Address TCPv6
-
-This validates that a string value contains a valid resolvable v6 TCP Adress.
-
-	Usage: tcp6_addr
-
-User Datagram Protocol Address UDP
-
-This validates that a string value contains a valid resolvable UDP Adress.
-
-	Usage: udp_addr
-
-User Datagram Protocol Address UDPv4
-
-This validates that a string value contains a valid resolvable v4 UDP Adress.
-
-	Usage: udp4_addr
-
-User Datagram Protocol Address UDPv6
-
-This validates that a string value contains a valid resolvable v6 UDP Adress.
-
-	Usage: udp6_addr
-
-Internet Protocol Address IP
-
-This validates that a string value contains a valid resolvable IP Adress.
-
-	Usage: ip_addr
-
-Internet Protocol Address IPv4
-
-This validates that a string value contains a valid resolvable v4 IP Adress.
-
-	Usage: ip4_addr
-
-Internet Protocol Address IPv6
-
-This validates that a string value contains a valid resolvable v6 IP Adress.
-
-	Usage: ip6_addr
-
-Unix domain socket end point Address
-
-This validates that a string value contains a valid Unix Adress.
-
-	Usage: unix_addr
-
-Media Access Control Address MAC
-
-This validates that a string value contains a valid MAC Adress.
-
-	Usage: mac
-
-Note: See Go's ParseMAC for accepted formats and types:
-
-	http://golang.org/src/net/mac.go?s=866:918#L29
-
-Hostname RFC 952
-
-This validates that a string value is a valid Hostname according to RFC 952 https://tools.ietf.org/html/rfc952
-
-	Usage: hostname
-
-Hostname RFC 1123
-
-This validates that a string value is a valid Hostname according to RFC 1123 https://tools.ietf.org/html/rfc1123
-
-	Usage: hostname_rfc1123 or if you want to continue to use 'hostname' in your tags, create an alias.
-
-Full Qualified Domain Name (FQDN)
-
-This validates that a string value contains a valid FQDN.
-
-	Usage: fqdn
-
-Alias Validators and Tags
-
-NOTE: When returning an error, the tag returned in "FieldError" will be
-the alias tag unless the dive tag is part of the alias. Everything after the
-dive tag is not reported as the alias tag. Also, the "ActualTag" in the before
-case will be the actual tag within the alias that failed.
-
-Here is a list of the current built in alias tags:
-
-	"iscolor"
-		alias is "hexcolor|rgb|rgba|hsl|hsla" (Usage: iscolor)
-
-Validator notes:
-
-	regex
-		a regex validator won't be added because commas and = signs can be part
-		of a regex which conflict with the validation definitions. Although
-		workarounds can be made, they take away from using pure regex's.
-		Furthermore it's quick and dirty but the regex's become harder to
-		maintain and are not reusable, so it's as much a programming philosiphy
-		as anything.
-
-		In place of this new validator functions should be created; a regex can
-		be used within the validator function and even be precompiled for better
-		efficiency within regexes.go.
-
-		And the best reason, you can submit a pull request and we can keep on
-		adding to the validation library of this package!
-
-Panics
-
-This package panics when bad input is provided, this is by design, bad code like
-that should not make it to production.
-
-	type Test struct {
-		TestField string `validate:"nonexistantfunction=1"`
-	}
-
-	t := &Test{
-		TestField: "Test"
-	}
-
-	validate.Struct(t) // this will panic
-*/
-package validator
diff --git a/vendor/gopkg.in/go-playground/validator.v9/errors.go b/vendor/gopkg.in/go-playground/validator.v9/errors.go
deleted file mode 100644
index 85f65eff..00000000
--- a/vendor/gopkg.in/go-playground/validator.v9/errors.go
+++ /dev/null
@@ -1,272 +0,0 @@
-package validator
-
-import (
-	"bytes"
-	"fmt"
-	"reflect"
-	"strings"
-
-	ut "github.com/go-playground/universal-translator"
-)
-
-const (
-	fieldErrMsg = "Key: '%s' Error:Field validation for '%s' failed on the '%s' tag"
-)
-
-// ValidationErrorsTranslations is the translation return type
-type ValidationErrorsTranslations map[string]string
-
-// InvalidValidationError describes an invalid argument passed to
-// `Struct`, `StructExcept`, StructPartial` or `Field`
-type InvalidValidationError struct {
-	Type reflect.Type
-}
-
-// Error returns InvalidValidationError message
-func (e *InvalidValidationError) Error() string {
-
-	if e.Type == nil {
-		return "validator: (nil)"
-	}
-
-	return "validator: (nil " + e.Type.String() + ")"
-}
-
-// ValidationErrors is an array of FieldError's
-// for use in custom error messages post validation.
-type ValidationErrors []FieldError
-
-// Error is intended for use in development + debugging and not intended to be a production error message.
-// It allows ValidationErrors to subscribe to the Error interface.
-// All information to create an error message specific to your application is contained within
-// the FieldError found within the ValidationErrors array
-func (ve ValidationErrors) Error() string {
-
-	buff := bytes.NewBufferString("")
-
-	var fe *fieldError
-
-	for i := 0; i < len(ve); i++ {
-
-		fe = ve[i].(*fieldError)
-		buff.WriteString(fe.Error())
-		buff.WriteString("\n")
-	}
-
-	return strings.TrimSpace(buff.String())
-}
-
-// Translate translates all of the ValidationErrors
-func (ve ValidationErrors) Translate(ut ut.Translator) ValidationErrorsTranslations {
-
-	trans := make(ValidationErrorsTranslations)
-
-	var fe *fieldError
-
-	for i := 0; i < len(ve); i++ {
-		fe = ve[i].(*fieldError)
-
-		// // in case an Anonymous struct was used, ensure that the key
-		// // would be 'Username' instead of ".Username"
-		// if len(fe.ns) > 0 && fe.ns[:1] == "." {
-		// 	trans[fe.ns[1:]] = fe.Translate(ut)
-		// 	continue
-		// }
-
-		trans[fe.ns] = fe.Translate(ut)
-	}
-
-	return trans
-}
-
-// FieldError contains all functions to get error details
-type FieldError interface {
-
-	// returns the validation tag that failed. if the
-	// validation was an alias, this will return the
-	// alias name and not the underlying tag that failed.
-	//
-	// eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla"
-	// will return "iscolor"
-	Tag() string
-
-	// returns the validation tag that failed, even if an
-	// alias the actual tag within the alias will be returned.
-	// If an 'or' validation fails the entire or will be returned.
-	//
-	// eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla"
-	// will return "hexcolor|rgb|rgba|hsl|hsla"
-	ActualTag() string
-
-	// returns the namespace for the field error, with the tag
-	// name taking precedence over the fields actual name.
-	//
-	// eg. JSON name "User.fname"
-	//
-	// See StructNamespace() for a version that returns actual names.
-	//
-	// NOTE: this field can be blank when validating a single primitive field
-	// using validate.Field(...) as there is no way to extract it's name
-	Namespace() string
-
-	// returns the namespace for the field error, with the fields
-	// actual name.
-	//
-	// eq. "User.FirstName" see Namespace for comparison
-	//
-	// NOTE: this field can be blank when validating a single primitive field
-	// using validate.Field(...) as there is no way to extract it's name
-	StructNamespace() string
-
-	// returns the fields name with the tag name taking precedence over the
-	// fields actual name.
-	//
-	// eq. JSON name "fname"
-	// see ActualField for comparison
-	Field() string
-
-	// returns the fields actual name from the struct, when able to determine.
-	//
-	// eq.  "FirstName"
-	// see Field for comparison
-	StructField() string
-
-	// returns the actual fields value in case needed for creating the error
-	// message
-	Value() interface{}
-
-	// returns the param value, in string form for comparison; this will also
-	// help with generating an error message
-	Param() string
-
-	// Kind returns the Field's reflect Kind
-	//
-	// eg. time.Time's kind is a struct
-	Kind() reflect.Kind
-
-	// Type returns the Field's reflect Type
-	//
-	// // eg. time.Time's type is time.Time
-	Type() reflect.Type
-
-	// returns the FieldError's translated error
-	// from the provided 'ut.Translator' and registered 'TranslationFunc'
-	//
-	// NOTE: is not registered translation can be found it returns the same
-	// as calling fe.Error()
-	Translate(ut ut.Translator) string
-}
-
-// compile time interface checks
-var _ FieldError = new(fieldError)
-var _ error = new(fieldError)
-
-// fieldError contains a single field's validation error along
-// with other properties that may be needed for error message creation
-// it complies with the FieldError interface
-type fieldError struct {
-	v              *Validate
-	tag            string
-	actualTag      string
-	ns             string
-	structNs       string
-	fieldLen       uint8
-	structfieldLen uint8
-	value          interface{}
-	param          string
-	kind           reflect.Kind
-	typ            reflect.Type
-}
-
-// Tag returns the validation tag that failed.
-func (fe *fieldError) Tag() string {
-	return fe.tag
-}
-
-// ActualTag returns the validation tag that failed, even if an
-// alias the actual tag within the alias will be returned.
-func (fe *fieldError) ActualTag() string {
-	return fe.actualTag
-}
-
-// Namespace returns the namespace for the field error, with the tag
-// name taking precedence over the fields actual name.
-func (fe *fieldError) Namespace() string {
-	return fe.ns
-}
-
-// StructNamespace returns the namespace for the field error, with the fields
-// actual name.
-func (fe *fieldError) StructNamespace() string {
-	return fe.structNs
-}
-
-// Field returns the fields name with the tag name taking precedence over the
-// fields actual name.
-func (fe *fieldError) Field() string {
-
-	return fe.ns[len(fe.ns)-int(fe.fieldLen):]
-	// // return fe.field
-	// fld := fe.ns[len(fe.ns)-int(fe.fieldLen):]
-
-	// log.Println("FLD:", fld)
-
-	// if len(fld) > 0 && fld[:1] == "." {
-	// 	return fld[1:]
-	// }
-
-	// return fld
-}
-
-// returns the fields actual name from the struct, when able to determine.
-func (fe *fieldError) StructField() string {
-	// return fe.structField
-	return fe.structNs[len(fe.structNs)-int(fe.structfieldLen):]
-}
-
-// Value returns the actual fields value in case needed for creating the error
-// message
-func (fe *fieldError) Value() interface{} {
-	return fe.value
-}
-
-// Param returns the param value, in string form for comparison; this will
-// also help with generating an error message
-func (fe *fieldError) Param() string {
-	return fe.param
-}
-
-// Kind returns the Field's reflect Kind
-func (fe *fieldError) Kind() reflect.Kind {
-	return fe.kind
-}
-
-// Type returns the Field's reflect Type
-func (fe *fieldError) Type() reflect.Type {
-	return fe.typ
-}
-
-// Error returns the fieldError's error message
-func (fe *fieldError) Error() string {
-	return fmt.Sprintf(fieldErrMsg, fe.ns, fe.Field(), fe.tag)
-}
-
-// Translate returns the FieldError's translated error
-// from the provided 'ut.Translator' and registered 'TranslationFunc'
-//
-// NOTE: is not registered translation can be found it returns the same
-// as calling fe.Error()
-func (fe *fieldError) Translate(ut ut.Translator) string {
-
-	m, ok := fe.v.transTagFunc[ut]
-	if !ok {
-		return fe.Error()
-	}
-
-	fn, ok := m[fe.tag]
-	if !ok {
-		return fe.Error()
-	}
-
-	return fn(ut, fe)
-}
diff --git a/vendor/gopkg.in/go-playground/validator.v9/field_level.go b/vendor/gopkg.in/go-playground/validator.v9/field_level.go
deleted file mode 100644
index 6d731925..00000000
--- a/vendor/gopkg.in/go-playground/validator.v9/field_level.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package validator
-
-import "reflect"
-
-// FieldLevel contains all the information and helper functions
-// to validate a field
-type FieldLevel interface {
-
-	// returns the top level struct, if any
-	Top() reflect.Value
-
-	// returns the current fields parent struct, if any or
-	// the comparison value if called 'VarWithValue'
-	Parent() reflect.Value
-
-	// returns current field for validation
-	Field() reflect.Value
-
-	// returns the field's name with the tag
-	// name takeing precedence over the fields actual name.
-	FieldName() string
-
-	// returns the struct field's name
-	StructFieldName() string
-
-	// returns param for validation against current field
-	Param() string
-
-	// ExtractType gets the actual underlying type of field value.
-	// It will dive into pointers, customTypes and return you the
-	// underlying value and it's kind.
-	ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool)
-
-	// traverses the parent struct to retrieve a specific field denoted by the provided namespace
-	// in the param and returns the field, field kind and whether is was successful in retrieving
-	// the field at all.
-	//
-	// NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field
-	// could not be retrieved because it didn't exist.
-	GetStructFieldOK() (reflect.Value, reflect.Kind, bool)
-}
-
-var _ FieldLevel = new(validate)
-
-// Field returns current field for validation
-func (v *validate) Field() reflect.Value {
-	return v.flField
-}
-
-// FieldName returns the field's name with the tag
-// name takeing precedence over the fields actual name.
-func (v *validate) FieldName() string {
-	return v.cf.altName
-}
-
-// StructFieldName returns the struct field's name
-func (v *validate) StructFieldName() string {
-	return v.cf.name
-}
-
-// Param returns param for validation against current field
-func (v *validate) Param() string {
-	return v.ct.param
-}
-
-// GetStructFieldOK returns Param returns param for validation against current field
-func (v *validate) GetStructFieldOK() (reflect.Value, reflect.Kind, bool) {
-	return v.getStructFieldOKInternal(v.slflParent, v.ct.param)
-}
diff --git a/vendor/gopkg.in/go-playground/validator.v9/regexes.go b/vendor/gopkg.in/go-playground/validator.v9/regexes.go
deleted file mode 100644
index 78f3ea0a..00000000
--- a/vendor/gopkg.in/go-playground/validator.v9/regexes.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package validator
-
-import "regexp"
-
-const (
-	alphaRegexString               = "^[a-zA-Z]+$"
-	alphaNumericRegexString        = "^[a-zA-Z0-9]+$"
-	alphaUnicodeRegexString        = "^[\\p{L}]+$"
-	alphaUnicodeNumericRegexString = "^[\\p{L}\\p{N}]+$"
-	numericRegexString             = "^[-+]?[0-9]+(?:\\.[0-9]+)?$"
-	numberRegexString              = "^[0-9]+$"
-	hexadecimalRegexString         = "^[0-9a-fA-F]+$"
-	hexcolorRegexString            = "^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
-	rgbRegexString                 = "^rgb\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*\\)$"
-	rgbaRegexString                = "^rgba\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
-	hslRegexString                 = "^hsl\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*\\)$"
-	hslaRegexString                = "^hsla\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
-	emailRegexString               = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:\\(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22)))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
-	base64RegexString              = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
-	iSBN10RegexString              = "^(?:[0-9]{9}X|[0-9]{10})$"
-	iSBN13RegexString              = "^(?:(?:97(?:8|9))[0-9]{10})$"
-	uUID3RegexString               = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
-	uUID4RegexString               = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
-	uUID5RegexString               = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
-	uUIDRegexString                = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
-	aSCIIRegexString               = "^[\x00-\x7F]*$"
-	printableASCIIRegexString      = "^[\x20-\x7E]*$"
-	multibyteRegexString           = "[^\x00-\x7F]"
-	dataURIRegexString             = "^data:.+\\/(.+);base64$"
-	latitudeRegexString            = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
-	longitudeRegexString           = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
-	sSNRegexString                 = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
-	hostnameRegexStringRFC952      = `^[a-zA-Z][a-zA-Z0-9\-\.]+[a-z-Az0-9]$`    // https://tools.ietf.org/html/rfc952
-	hostnameRegexStringRFC1123     = `^[a-zA-Z0-9][a-zA-Z0-9\-\.]+[a-z-Az0-9]$` // accepts hostname starting with a digit https://tools.ietf.org/html/rfc1123
-)
-
-var (
-	alphaRegex               = regexp.MustCompile(alphaRegexString)
-	alphaNumericRegex        = regexp.MustCompile(alphaNumericRegexString)
-	alphaUnicodeRegex        = regexp.MustCompile(alphaUnicodeRegexString)
-	alphaUnicodeNumericRegex = regexp.MustCompile(alphaUnicodeNumericRegexString)
-	numericRegex             = regexp.MustCompile(numericRegexString)
-	numberRegex              = regexp.MustCompile(numberRegexString)
-	hexadecimalRegex         = regexp.MustCompile(hexadecimalRegexString)
-	hexcolorRegex            = regexp.MustCompile(hexcolorRegexString)
-	rgbRegex                 = regexp.MustCompile(rgbRegexString)
-	rgbaRegex                = regexp.MustCompile(rgbaRegexString)
-	hslRegex                 = regexp.MustCompile(hslRegexString)
-	hslaRegex                = regexp.MustCompile(hslaRegexString)
-	emailRegex               = regexp.MustCompile(emailRegexString)
-	base64Regex              = regexp.MustCompile(base64RegexString)
-	iSBN10Regex              = regexp.MustCompile(iSBN10RegexString)
-	iSBN13Regex              = regexp.MustCompile(iSBN13RegexString)
-	uUID3Regex               = regexp.MustCompile(uUID3RegexString)
-	uUID4Regex               = regexp.MustCompile(uUID4RegexString)
-	uUID5Regex               = regexp.MustCompile(uUID5RegexString)
-	uUIDRegex                = regexp.MustCompile(uUIDRegexString)
-	aSCIIRegex               = regexp.MustCompile(aSCIIRegexString)
-	printableASCIIRegex      = regexp.MustCompile(printableASCIIRegexString)
-	multibyteRegex           = regexp.MustCompile(multibyteRegexString)
-	dataURIRegex             = regexp.MustCompile(dataURIRegexString)
-	latitudeRegex            = regexp.MustCompile(latitudeRegexString)
-	longitudeRegex           = regexp.MustCompile(longitudeRegexString)
-	sSNRegex                 = regexp.MustCompile(sSNRegexString)
-	hostnameRegexRFC952      = regexp.MustCompile(hostnameRegexStringRFC952)
-	hostnameRegexRFC1123     = regexp.MustCompile(hostnameRegexStringRFC1123)
-)
diff --git a/vendor/gopkg.in/go-playground/validator.v9/struct_level.go b/vendor/gopkg.in/go-playground/validator.v9/struct_level.go
deleted file mode 100644
index 16c620d3..00000000
--- a/vendor/gopkg.in/go-playground/validator.v9/struct_level.go
+++ /dev/null
@@ -1,175 +0,0 @@
-package validator
-
-import (
-	"context"
-	"reflect"
-)
-
-// StructLevelFunc accepts all values needed for struct level validation
-type StructLevelFunc func(sl StructLevel)
-
-// StructLevelFuncCtx accepts all values needed for struct level validation
-// but also allows passing of contextual validation information vi context.Context.
-type StructLevelFuncCtx func(ctx context.Context, sl StructLevel)
-
-// wrapStructLevelFunc wraps noramal StructLevelFunc makes it compatible with StructLevelFuncCtx
-func wrapStructLevelFunc(fn StructLevelFunc) StructLevelFuncCtx {
-	return func(ctx context.Context, sl StructLevel) {
-		fn(sl)
-	}
-}
-
-// StructLevel contains all the information and helper functions
-// to validate a struct
-type StructLevel interface {
-
-	// returns the main validation object, in case one want to call validations internally.
-	// this is so you don;t have to use anonymous functoins to get access to the validate
-	// instance.
-	Validator() *Validate
-
-	// returns the top level struct, if any
-	Top() reflect.Value
-
-	// returns the current fields parent struct, if any
-	Parent() reflect.Value
-
-	// returns the current struct.
-	Current() reflect.Value
-
-	// ExtractType gets the actual underlying type of field value.
-	// It will dive into pointers, customTypes and return you the
-	// underlying value and it's kind.
-	ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool)
-
-	// reports an error just by passing the field and tag information
-	//
-	// NOTES:
-	//
-	// fieldName and altName get appended to the existing namespace that
-	// validator is on. eg. pass 'FirstName' or 'Names[0]' depending
-	// on the nesting
-	//
-	// tag can be an existing validation tag or just something you make up
-	// and process on the flip side it's up to you.
-	ReportError(field interface{}, fieldName, structFieldName string, tag, param string)
-
-	// reports an error just by passing ValidationErrors
-	//
-	// NOTES:
-	//
-	// relativeNamespace and relativeActualNamespace get appended to the
-	// existing namespace that validator is on.
-	// eg. pass 'User.FirstName' or 'Users[0].FirstName' depending
-	// on the nesting. most of the time they will be blank, unless you validate
-	// at a level lower the the current field depth
-	ReportValidationErrors(relativeNamespace, relativeActualNamespace string, errs ValidationErrors)
-}
-
-var _ StructLevel = new(validate)
-
-// Top returns the top level struct
-//
-// NOTE: this can be the same as the current struct being validated
-// if not is a nested struct.
-//
-// this is only called when within Struct and Field Level validation and
-// should not be relied upon for an acurate value otherwise.
-func (v *validate) Top() reflect.Value {
-	return v.top
-}
-
-// Parent returns the current structs parent
-//
-// NOTE: this can be the same as the current struct being validated
-// if not is a nested struct.
-//
-// this is only called when within Struct and Field Level validation and
-// should not be relied upon for an acurate value otherwise.
-func (v *validate) Parent() reflect.Value {
-	return v.slflParent
-}
-
-// Current returns the current struct.
-func (v *validate) Current() reflect.Value {
-	return v.slCurrent
-}
-
-// Validator returns the main validation object, in case one want to call validations internally.
-func (v *validate) Validator() *Validate {
-	return v.v
-}
-
-// ExtractType gets the actual underlying type of field value.
-func (v *validate) ExtractType(field reflect.Value) (reflect.Value, reflect.Kind, bool) {
-	return v.extractTypeInternal(field, false)
-}
-
-// ReportError reports an error just by passing the field and tag information
-func (v *validate) ReportError(field interface{}, fieldName, structFieldName, tag, param string) {
-
-	fv, kind, _ := v.extractTypeInternal(reflect.ValueOf(field), false)
-
-	if len(structFieldName) == 0 {
-		structFieldName = fieldName
-	}
-
-	v.str1 = string(append(v.ns, fieldName...))
-
-	if v.v.hasTagNameFunc || fieldName != structFieldName {
-		v.str2 = string(append(v.actualNs, structFieldName...))
-	} else {
-		v.str2 = v.str1
-	}
-
-	if kind == reflect.Invalid {
-
-		v.errs = append(v.errs,
-			&fieldError{
-				v:              v.v,
-				tag:            tag,
-				actualTag:      tag,
-				ns:             v.str1,
-				structNs:       v.str2,
-				fieldLen:       uint8(len(fieldName)),
-				structfieldLen: uint8(len(structFieldName)),
-				param:          param,
-				kind:           kind,
-			},
-		)
-		return
-	}
-
-	v.errs = append(v.errs,
-		&fieldError{
-			v:              v.v,
-			tag:            tag,
-			actualTag:      tag,
-			ns:             v.str1,
-			structNs:       v.str2,
-			fieldLen:       uint8(len(fieldName)),
-			structfieldLen: uint8(len(structFieldName)),
-			value:          fv.Interface(),
-			param:          param,
-			kind:           kind,
-			typ:            fv.Type(),
-		},
-	)
-}
-
-// ReportValidationErrors reports ValidationErrors obtained from running validations within the Struct Level validation.
-//
-// NOTE: this function prepends the current namespace to the relative ones.
-func (v *validate) ReportValidationErrors(relativeNamespace, relativeStructNamespace string, errs ValidationErrors) {
-
-	var err *fieldError
-
-	for i := 0; i < len(errs); i++ {
-
-		err = errs[i].(*fieldError)
-		err.ns = string(append(append(v.ns, relativeNamespace...), err.ns...))
-		err.structNs = string(append(append(v.actualNs, relativeStructNamespace...), err.structNs...))
-
-		v.errs = append(v.errs, err)
-	}
-}
diff --git a/vendor/gopkg.in/go-playground/validator.v9/translations.go b/vendor/gopkg.in/go-playground/validator.v9/translations.go
deleted file mode 100644
index 4465abbb..00000000
--- a/vendor/gopkg.in/go-playground/validator.v9/translations.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package validator
-
-import ut "github.com/go-playground/universal-translator"
-
-// TranslationFunc is the function type used to register or override
-// custom translations
-type TranslationFunc func(ut ut.Translator, fe FieldError) string
-
-// RegisterTranslationsFunc allows for registering of translations
-// for a 'ut.Translator' for use withing the 'TranslationFunc'
-type RegisterTranslationsFunc func(ut ut.Translator) error
diff --git a/vendor/gopkg.in/go-playground/validator.v9/util.go b/vendor/gopkg.in/go-playground/validator.v9/util.go
deleted file mode 100644
index 16a5517c..00000000
--- a/vendor/gopkg.in/go-playground/validator.v9/util.go
+++ /dev/null
@@ -1,257 +0,0 @@
-package validator
-
-import (
-	"reflect"
-	"strconv"
-	"strings"
-)
-
-// extractTypeInternal gets the actual underlying type of field value.
-// It will dive into pointers, customTypes and return you the
-// underlying value and it's kind.
-func (v *validate) extractTypeInternal(current reflect.Value, nullable bool) (reflect.Value, reflect.Kind, bool) {
-
-BEGIN:
-	switch current.Kind() {
-	case reflect.Ptr:
-
-		nullable = true
-
-		if current.IsNil() {
-			return current, reflect.Ptr, nullable
-		}
-
-		current = current.Elem()
-		goto BEGIN
-
-	case reflect.Interface:
-
-		nullable = true
-
-		if current.IsNil() {
-			return current, reflect.Interface, nullable
-		}
-
-		current = current.Elem()
-		goto BEGIN
-
-	case reflect.Invalid:
-		return current, reflect.Invalid, nullable
-
-	default:
-
-		if v.v.hasCustomFuncs {
-
-			if fn, ok := v.v.customFuncs[current.Type()]; ok {
-				current = reflect.ValueOf(fn(current))
-				goto BEGIN
-			}
-		}
-
-		return current, current.Kind(), nullable
-	}
-}
-
-// getStructFieldOKInternal traverses a struct to retrieve a specific field denoted by the provided namespace and
-// returns the field, field kind and whether is was successful in retrieving the field at all.
-//
-// NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field
-// could not be retrieved because it didn't exist.
-func (v *validate) getStructFieldOKInternal(val reflect.Value, namespace string) (current reflect.Value, kind reflect.Kind, found bool) {
-
-BEGIN:
-	current, kind, _ = v.ExtractType(val)
-
-	if kind == reflect.Invalid {
-		return
-	}
-
-	if namespace == "" {
-		found = true
-		return
-	}
-
-	switch kind {
-
-	case reflect.Ptr, reflect.Interface:
-		return
-
-	case reflect.Struct:
-
-		typ := current.Type()
-		fld := namespace
-		var ns string
-
-		if typ != timeType {
-
-			idx := strings.Index(namespace, namespaceSeparator)
-
-			if idx != -1 {
-				fld = namespace[:idx]
-				ns = namespace[idx+1:]
-			} else {
-				ns = ""
-			}
-
-			bracketIdx := strings.Index(fld, leftBracket)
-			if bracketIdx != -1 {
-				fld = fld[:bracketIdx]
-
-				ns = namespace[bracketIdx:]
-			}
-
-			val = current.FieldByName(fld)
-			namespace = ns
-			goto BEGIN
-		}
-
-	case reflect.Array, reflect.Slice:
-		idx := strings.Index(namespace, leftBracket)
-		idx2 := strings.Index(namespace, rightBracket)
-
-		arrIdx, _ := strconv.Atoi(namespace[idx+1 : idx2])
-
-		if arrIdx >= current.Len() {
-			return current, kind, false
-		}
-
-		startIdx := idx2 + 1
-
-		if startIdx < len(namespace) {
-			if namespace[startIdx:startIdx+1] == namespaceSeparator {
-				startIdx++
-			}
-		}
-
-		val = current.Index(arrIdx)
-		namespace = namespace[startIdx:]
-		goto BEGIN
-
-	case reflect.Map:
-		idx := strings.Index(namespace, leftBracket) + 1
-		idx2 := strings.Index(namespace, rightBracket)
-
-		endIdx := idx2
-
-		if endIdx+1 < len(namespace) {
-			if namespace[endIdx+1:endIdx+2] == namespaceSeparator {
-				endIdx++
-			}
-		}
-
-		key := namespace[idx:idx2]
-
-		switch current.Type().Key().Kind() {
-		case reflect.Int:
-			i, _ := strconv.Atoi(key)
-			val = current.MapIndex(reflect.ValueOf(i))
-			namespace = namespace[endIdx+1:]
-
-		case reflect.Int8:
-			i, _ := strconv.ParseInt(key, 10, 8)
-			val = current.MapIndex(reflect.ValueOf(int8(i)))
-			namespace = namespace[endIdx+1:]
-
-		case reflect.Int16:
-			i, _ := strconv.ParseInt(key, 10, 16)
-			val = current.MapIndex(reflect.ValueOf(int16(i)))
-			namespace = namespace[endIdx+1:]
-
-		case reflect.Int32:
-			i, _ := strconv.ParseInt(key, 10, 32)
-			val = current.MapIndex(reflect.ValueOf(int32(i)))
-			namespace = namespace[endIdx+1:]
-
-		case reflect.Int64:
-			i, _ := strconv.ParseInt(key, 10, 64)
-			val = current.MapIndex(reflect.ValueOf(i))
-			namespace = namespace[endIdx+1:]
-
-		case reflect.Uint:
-			i, _ := strconv.ParseUint(key, 10, 0)
-			val = current.MapIndex(reflect.ValueOf(uint(i)))
-			namespace = namespace[endIdx+1:]
-
-		case reflect.Uint8:
-			i, _ := strconv.ParseUint(key, 10, 8)
-			val = current.MapIndex(reflect.ValueOf(uint8(i)))
-			namespace = namespace[endIdx+1:]
-
-		case reflect.Uint16:
-			i, _ := strconv.ParseUint(key, 10, 16)
-			val = current.MapIndex(reflect.ValueOf(uint16(i)))
-			namespace = namespace[endIdx+1:]
-
-		case reflect.Uint32:
-			i, _ := strconv.ParseUint(key, 10, 32)
-			val = current.MapIndex(reflect.ValueOf(uint32(i)))
-			namespace = namespace[endIdx+1:]
-
-		case reflect.Uint64:
-			i, _ := strconv.ParseUint(key, 10, 64)
-			val = current.MapIndex(reflect.ValueOf(i))
-			namespace = namespace[endIdx+1:]
-
-		case reflect.Float32:
-			f, _ := strconv.ParseFloat(key, 32)
-			val = current.MapIndex(reflect.ValueOf(float32(f)))
-			namespace = namespace[endIdx+1:]
-
-		case reflect.Float64:
-			f, _ := strconv.ParseFloat(key, 64)
-			val = current.MapIndex(reflect.ValueOf(f))
-			namespace = namespace[endIdx+1:]
-
-		case reflect.Bool:
-			b, _ := strconv.ParseBool(key)
-			val = current.MapIndex(reflect.ValueOf(b))
-			namespace = namespace[endIdx+1:]
-
-		// reflect.Type = string
-		default:
-			val = current.MapIndex(reflect.ValueOf(key))
-			namespace = namespace[endIdx+1:]
-		}
-
-		goto BEGIN
-	}
-
-	// if got here there was more namespace, cannot go any deeper
-	panic("Invalid field namespace")
-}
-
-// asInt returns the parameter as a int64
-// or panics if it can't convert
-func asInt(param string) int64 {
-
-	i, err := strconv.ParseInt(param, 0, 64)
-	panicIf(err)
-
-	return i
-}
-
-// asUint returns the parameter as a uint64
-// or panics if it can't convert
-func asUint(param string) uint64 {
-
-	i, err := strconv.ParseUint(param, 0, 64)
-	panicIf(err)
-
-	return i
-}
-
-// asFloat returns the parameter as a float64
-// or panics if it can't convert
-func asFloat(param string) float64 {
-
-	i, err := strconv.ParseFloat(param, 64)
-	panicIf(err)
-
-	return i
-}
-
-func panicIf(err error) {
-	if err != nil {
-		panic(err.Error())
-	}
-}
diff --git a/vendor/gopkg.in/go-playground/validator.v9/validator.go b/vendor/gopkg.in/go-playground/validator.v9/validator.go
deleted file mode 100644
index 483e0a2b..00000000
--- a/vendor/gopkg.in/go-playground/validator.v9/validator.go
+++ /dev/null
@@ -1,475 +0,0 @@
-package validator
-
-import (
-	"context"
-	"fmt"
-	"reflect"
-	"strconv"
-)
-
-// per validate contruct
-type validate struct {
-	v              *Validate
-	top            reflect.Value
-	ns             []byte
-	actualNs       []byte
-	errs           ValidationErrors
-	includeExclude map[string]struct{} // reset only if StructPartial or StructExcept are called, no need otherwise
-	ffn            FilterFunc
-	slflParent     reflect.Value // StructLevel & FieldLevel
-	slCurrent      reflect.Value // StructLevel & FieldLevel
-	flField        reflect.Value // StructLevel & FieldLevel
-	cf             *cField       // StructLevel & FieldLevel
-	ct             *cTag         // StructLevel & FieldLevel
-	misc           []byte        // misc reusable
-	str1           string        // misc reusable
-	str2           string        // misc reusable
-	fldIsPointer   bool          // StructLevel & FieldLevel
-	isPartial      bool
-	hasExcludes    bool
-}
-
-// parent and current will be the same the first run of validateStruct
-func (v *validate) validateStruct(ctx context.Context, parent reflect.Value, current reflect.Value, typ reflect.Type, ns []byte, structNs []byte, ct *cTag) {
-
-	cs, ok := v.v.structCache.Get(typ)
-	if !ok {
-		cs = v.v.extractStructCache(current, typ.Name())
-	}
-
-	if len(ns) == 0 && len(cs.name) != 0 {
-
-		ns = append(ns, cs.name...)
-		ns = append(ns, '.')
-
-		structNs = append(structNs, cs.name...)
-		structNs = append(structNs, '.')
-	}
-
-	// ct is nil on top level struct, and structs as fields that have no tag info
-	// so if nil or if not nil and the structonly tag isn't present
-	if ct == nil || ct.typeof != typeStructOnly {
-
-		var f *cField
-
-		for i := 0; i < len(cs.fields); i++ {
-
-			f = cs.fields[i]
-
-			if v.isPartial {
-
-				if v.ffn != nil {
-					// used with StructFiltered
-					if v.ffn(append(structNs, f.name...)) {
-						continue
-					}
-
-				} else {
-					// used with StructPartial & StructExcept
-					_, ok = v.includeExclude[string(append(structNs, f.name...))]
-
-					if (ok && v.hasExcludes) || (!ok && !v.hasExcludes) {
-						continue
-					}
-				}
-			}
-
-			v.traverseField(ctx, parent, current.Field(f.idx), ns, structNs, f, f.cTags)
-		}
-	}
-
-	// check if any struct level validations, after all field validations already checked.
-	// first iteration will have no info about nostructlevel tag, and is checked prior to
-	// calling the next iteration of validateStruct called from traverseField.
-	if cs.fn != nil {
-
-		v.slflParent = parent
-		v.slCurrent = current
-		v.ns = ns
-		v.actualNs = structNs
-
-		cs.fn(ctx, v)
-	}
-}
-
-// traverseField validates any field, be it a struct or single field, ensures it's validity and passes it along to be validated via it's tag options
-func (v *validate) traverseField(ctx context.Context, parent reflect.Value, current reflect.Value, ns []byte, structNs []byte, cf *cField, ct *cTag) {
-
-	var typ reflect.Type
-	var kind reflect.Kind
-
-	current, kind, v.fldIsPointer = v.extractTypeInternal(current, false)
-
-	switch kind {
-	case reflect.Ptr, reflect.Interface, reflect.Invalid:
-
-		if ct == nil {
-			return
-		}
-
-		if ct.typeof == typeOmitEmpty || ct.typeof == typeIsDefault {
-			return
-		}
-
-		if ct.hasTag {
-
-			v.str1 = string(append(ns, cf.altName...))
-
-			if v.v.hasTagNameFunc {
-				v.str2 = string(append(structNs, cf.name...))
-			} else {
-				v.str2 = v.str1
-			}
-
-			if kind == reflect.Invalid {
-				v.errs = append(v.errs,
-					&fieldError{
-						v:              v.v,
-						tag:            ct.aliasTag,
-						actualTag:      ct.tag,
-						ns:             v.str1,
-						structNs:       v.str2,
-						fieldLen:       uint8(len(cf.altName)),
-						structfieldLen: uint8(len(cf.name)),
-						param:          ct.param,
-						kind:           kind,
-					},
-				)
-
-				return
-			}
-
-			v.errs = append(v.errs,
-				&fieldError{
-					v:              v.v,
-					tag:            ct.aliasTag,
-					actualTag:      ct.tag,
-					ns:             v.str1,
-					structNs:       v.str2,
-					fieldLen:       uint8(len(cf.altName)),
-					structfieldLen: uint8(len(cf.name)),
-					value:          current.Interface(),
-					param:          ct.param,
-					kind:           kind,
-					typ:            current.Type(),
-				},
-			)
-
-			return
-		}
-
-	case reflect.Struct:
-
-		typ = current.Type()
-
-		if typ != timeType {
-
-			if ct != nil {
-
-				if ct.typeof == typeStructOnly {
-					goto CONTINUE
-				} else if ct.typeof == typeIsDefault {
-					// set Field Level fields
-					v.slflParent = parent
-					v.flField = current
-					v.cf = cf
-					v.ct = ct
-
-					if !ct.fn(ctx, v) {
-						v.str1 = string(append(ns, cf.altName...))
-
-						if v.v.hasTagNameFunc {
-							v.str2 = string(append(structNs, cf.name...))
-						} else {
-							v.str2 = v.str1
-						}
-
-						v.errs = append(v.errs,
-							&fieldError{
-								v:              v.v,
-								tag:            ct.aliasTag,
-								actualTag:      ct.tag,
-								ns:             v.str1,
-								structNs:       v.str2,
-								fieldLen:       uint8(len(cf.altName)),
-								structfieldLen: uint8(len(cf.name)),
-								value:          current.Interface(),
-								param:          ct.param,
-								kind:           kind,
-								typ:            typ,
-							},
-						)
-						return
-					}
-				}
-
-				ct = ct.next
-			}
-
-			if ct != nil && ct.typeof == typeNoStructLevel {
-				return
-			}
-
-		CONTINUE:
-			// if len == 0 then validating using 'Var' or 'VarWithValue'
-			// Var - doesn't make much sense to do it that way, should call 'Struct', but no harm...
-			// VarWithField - this allows for validating against each field withing the struct against a specific value
-			//                pretty handly in certain situations
-			if len(cf.name) > 0 {
-				ns = append(append(ns, cf.altName...), '.')
-				structNs = append(append(structNs, cf.name...), '.')
-			}
-
-			v.validateStruct(ctx, current, current, typ, ns, structNs, ct)
-			return
-		}
-	}
-
-	if !ct.hasTag {
-		return
-	}
-
-	typ = current.Type()
-
-OUTER:
-	for {
-		if ct == nil {
-			return
-		}
-
-		switch ct.typeof {
-
-		case typeOmitEmpty:
-
-			// set Field Level fields
-			v.slflParent = parent
-			v.flField = current
-			v.cf = cf
-			v.ct = ct
-
-			if !v.fldIsPointer && !hasValue(v) {
-				return
-			}
-
-			ct = ct.next
-			continue
-
-		case typeEndKeys:
-			return
-
-		case typeDive:
-
-			ct = ct.next
-
-			// traverse slice or map here
-			// or panic ;)
-			switch kind {
-			case reflect.Slice, reflect.Array:
-
-				var i64 int64
-				reusableCF := &cField{}
-
-				for i := 0; i < current.Len(); i++ {
-
-					i64 = int64(i)
-
-					v.misc = append(v.misc[0:0], cf.name...)
-					v.misc = append(v.misc, '[')
-					v.misc = strconv.AppendInt(v.misc, i64, 10)
-					v.misc = append(v.misc, ']')
-
-					reusableCF.name = string(v.misc)
-
-					if cf.namesEqual {
-						reusableCF.altName = reusableCF.name
-					} else {
-
-						v.misc = append(v.misc[0:0], cf.altName...)
-						v.misc = append(v.misc, '[')
-						v.misc = strconv.AppendInt(v.misc, i64, 10)
-						v.misc = append(v.misc, ']')
-
-						reusableCF.altName = string(v.misc)
-					}
-					v.traverseField(ctx, parent, current.Index(i), ns, structNs, reusableCF, ct)
-				}
-
-			case reflect.Map:
-
-				var pv string
-				reusableCF := &cField{}
-
-				for _, key := range current.MapKeys() {
-
-					pv = fmt.Sprintf("%v", key.Interface())
-
-					v.misc = append(v.misc[0:0], cf.name...)
-					v.misc = append(v.misc, '[')
-					v.misc = append(v.misc, pv...)
-					v.misc = append(v.misc, ']')
-
-					reusableCF.name = string(v.misc)
-
-					if cf.namesEqual {
-						reusableCF.altName = reusableCF.name
-					} else {
-						v.misc = append(v.misc[0:0], cf.altName...)
-						v.misc = append(v.misc, '[')
-						v.misc = append(v.misc, pv...)
-						v.misc = append(v.misc, ']')
-
-						reusableCF.altName = string(v.misc)
-					}
-
-					if ct != nil && ct.typeof == typeKeys && ct.keys != nil {
-						v.traverseField(ctx, parent, key, ns, structNs, reusableCF, ct.keys)
-						// can be nil when just keys being validated
-						if ct.next != nil {
-							v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct.next)
-						}
-					} else {
-						v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct)
-					}
-				}
-
-			default:
-				// throw error, if not a slice or map then should not have gotten here
-				// bad dive tag
-				panic("dive error! can't dive on a non slice or map")
-			}
-
-			return
-
-		case typeOr:
-
-			v.misc = v.misc[0:0]
-
-			for {
-
-				// set Field Level fields
-				v.slflParent = parent
-				v.flField = current
-				v.cf = cf
-				v.ct = ct
-
-				if ct.fn(ctx, v) {
-
-					// drain rest of the 'or' values, then continue or leave
-					for {
-
-						ct = ct.next
-
-						if ct == nil {
-							return
-						}
-
-						if ct.typeof != typeOr {
-							continue OUTER
-						}
-					}
-				}
-
-				v.misc = append(v.misc, '|')
-				v.misc = append(v.misc, ct.tag...)
-
-				if ct.hasParam {
-					v.misc = append(v.misc, '=')
-					v.misc = append(v.misc, ct.param...)
-				}
-
-				if ct.isBlockEnd || ct.next == nil {
-					// if we get here, no valid 'or' value and no more tags
-					v.str1 = string(append(ns, cf.altName...))
-
-					if v.v.hasTagNameFunc {
-						v.str2 = string(append(structNs, cf.name...))
-					} else {
-						v.str2 = v.str1
-					}
-
-					if ct.hasAlias {
-
-						v.errs = append(v.errs,
-							&fieldError{
-								v:              v.v,
-								tag:            ct.aliasTag,
-								actualTag:      ct.actualAliasTag,
-								ns:             v.str1,
-								structNs:       v.str2,
-								fieldLen:       uint8(len(cf.altName)),
-								structfieldLen: uint8(len(cf.name)),
-								value:          current.Interface(),
-								param:          ct.param,
-								kind:           kind,
-								typ:            typ,
-							},
-						)
-
-					} else {
-
-						tVal := string(v.misc)[1:]
-
-						v.errs = append(v.errs,
-							&fieldError{
-								v:              v.v,
-								tag:            tVal,
-								actualTag:      tVal,
-								ns:             v.str1,
-								structNs:       v.str2,
-								fieldLen:       uint8(len(cf.altName)),
-								structfieldLen: uint8(len(cf.name)),
-								value:          current.Interface(),
-								param:          ct.param,
-								kind:           kind,
-								typ:            typ,
-							},
-						)
-					}
-
-					return
-				}
-
-				ct = ct.next
-			}
-
-		default:
-
-			// set Field Level fields
-			v.slflParent = parent
-			v.flField = current
-			v.cf = cf
-			v.ct = ct
-
-			if !ct.fn(ctx, v) {
-
-				v.str1 = string(append(ns, cf.altName...))
-
-				if v.v.hasTagNameFunc {
-					v.str2 = string(append(structNs, cf.name...))
-				} else {
-					v.str2 = v.str1
-				}
-
-				v.errs = append(v.errs,
-					&fieldError{
-						v:              v.v,
-						tag:            ct.aliasTag,
-						actualTag:      ct.tag,
-						ns:             v.str1,
-						structNs:       v.str2,
-						fieldLen:       uint8(len(cf.altName)),
-						structfieldLen: uint8(len(cf.name)),
-						value:          current.Interface(),
-						param:          ct.param,
-						kind:           kind,
-						typ:            typ,
-					},
-				)
-
-				return
-			}
-			ct = ct.next
-		}
-	}
-
-}
diff --git a/vendor/gopkg.in/go-playground/validator.v9/validator_instance.go b/vendor/gopkg.in/go-playground/validator.v9/validator_instance.go
deleted file mode 100644
index e84b452d..00000000
--- a/vendor/gopkg.in/go-playground/validator.v9/validator_instance.go
+++ /dev/null
@@ -1,586 +0,0 @@
-package validator
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"reflect"
-	"strings"
-	"sync"
-	"time"
-
-	ut "github.com/go-playground/universal-translator"
-)
-
-const (
-	defaultTagName     = "validate"
-	utf8HexComma       = "0x2C"
-	utf8Pipe           = "0x7C"
-	tagSeparator       = ","
-	orSeparator        = "|"
-	tagKeySeparator    = "="
-	structOnlyTag      = "structonly"
-	noStructLevelTag   = "nostructlevel"
-	omitempty          = "omitempty"
-	isdefault          = "isdefault"
-	skipValidationTag  = "-"
-	diveTag            = "dive"
-	keysTag            = "keys"
-	endKeysTag         = "endkeys"
-	requiredTag        = "required"
-	namespaceSeparator = "."
-	leftBracket        = "["
-	rightBracket       = "]"
-	restrictedTagChars = ".[],|=+()`~!@#$%^&*\\\"/?<>{}"
-	restrictedAliasErr = "Alias '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation"
-	restrictedTagErr   = "Tag '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation"
-)
-
-var (
-	timeType      = reflect.TypeOf(time.Time{})
-	defaultCField = &cField{namesEqual: true}
-)
-
-// FilterFunc is the type used to filter fields using
-// StructFiltered(...) function.
-// returning true results in the field being filtered/skiped from
-// validation
-type FilterFunc func(ns []byte) bool
-
-// CustomTypeFunc allows for overriding or adding custom field type handler functions
-// field = field value of the type to return a value to be validated
-// example Valuer from sql drive see https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29
-type CustomTypeFunc func(field reflect.Value) interface{}
-
-// TagNameFunc allows for adding of a custom tag name parser
-type TagNameFunc func(field reflect.StructField) string
-
-// Validate contains the validator settings and cache
-type Validate struct {
-	tagName          string
-	pool             *sync.Pool
-	hasCustomFuncs   bool
-	hasTagNameFunc   bool
-	tagNameFunc      TagNameFunc
-	structLevelFuncs map[reflect.Type]StructLevelFuncCtx
-	customFuncs      map[reflect.Type]CustomTypeFunc
-	aliases          map[string]string
-	validations      map[string]FuncCtx
-	transTagFunc     map[ut.Translator]map[string]TranslationFunc // map[<locale>]map[<tag>]TranslationFunc
-	tagCache         *tagCache
-	structCache      *structCache
-}
-
-// New returns a new instacne of 'validate' with sane defaults.
-func New() *Validate {
-
-	tc := new(tagCache)
-	tc.m.Store(make(map[string]*cTag))
-
-	sc := new(structCache)
-	sc.m.Store(make(map[reflect.Type]*cStruct))
-
-	v := &Validate{
-		tagName:     defaultTagName,
-		aliases:     make(map[string]string, len(bakedInAliases)),
-		validations: make(map[string]FuncCtx, len(bakedInValidators)),
-		tagCache:    tc,
-		structCache: sc,
-	}
-
-	// must copy alias validators for separate validations to be used in each validator instance
-	for k, val := range bakedInAliases {
-		v.RegisterAlias(k, val)
-	}
-
-	// must copy validators for separate validations to be used in each instance
-	for k, val := range bakedInValidators {
-
-		// no need to error check here, baked in will always be valid
-		v.registerValidation(k, wrapFunc(val), true)
-	}
-
-	v.pool = &sync.Pool{
-		New: func() interface{} {
-			return &validate{
-				v:        v,
-				ns:       make([]byte, 0, 64),
-				actualNs: make([]byte, 0, 64),
-				misc:     make([]byte, 32),
-			}
-		},
-	}
-
-	return v
-}
-
-// SetTagName allows for changing of the default tag name of 'validate'
-func (v *Validate) SetTagName(name string) {
-	v.tagName = name
-}
-
-// RegisterTagNameFunc registers a function to get another name from the
-// StructField eg. the JSON name
-func (v *Validate) RegisterTagNameFunc(fn TagNameFunc) {
-	v.tagNameFunc = fn
-	v.hasTagNameFunc = true
-}
-
-// RegisterValidation adds a validation with the given tag
-//
-// NOTES:
-// - if the key already exists, the previous validation function will be replaced.
-// - this method is not thread-safe it is intended that these all be registered prior to any validation
-func (v *Validate) RegisterValidation(tag string, fn Func) error {
-	return v.RegisterValidationCtx(tag, wrapFunc(fn))
-}
-
-// RegisterValidationCtx does the same as RegisterValidation on accepts a FuncCtx validation
-// allowing context.Context validation support.
-func (v *Validate) RegisterValidationCtx(tag string, fn FuncCtx) error {
-	return v.registerValidation(tag, fn, false)
-}
-
-func (v *Validate) registerValidation(tag string, fn FuncCtx, bakedIn bool) error {
-
-	if len(tag) == 0 {
-		return errors.New("Function Key cannot be empty")
-	}
-
-	if fn == nil {
-		return errors.New("Function cannot be empty")
-	}
-
-	_, ok := restrictedTags[tag]
-
-	if !bakedIn && (ok || strings.ContainsAny(tag, restrictedTagChars)) {
-		panic(fmt.Sprintf(restrictedTagErr, tag))
-	}
-
-	v.validations[tag] = fn
-
-	return nil
-}
-
-// RegisterAlias registers a mapping of a single validation tag that
-// defines a common or complex set of validation(s) to simplify adding validation
-// to structs.
-//
-// NOTE: this function is not thread-safe it is intended that these all be registered prior to any validation
-func (v *Validate) RegisterAlias(alias, tags string) {
-
-	_, ok := restrictedTags[alias]
-
-	if ok || strings.ContainsAny(alias, restrictedTagChars) {
-		panic(fmt.Sprintf(restrictedAliasErr, alias))
-	}
-
-	v.aliases[alias] = tags
-}
-
-// RegisterStructValidation registers a StructLevelFunc against a number of types.
-//
-// NOTE:
-// - this method is not thread-safe it is intended that these all be registered prior to any validation
-func (v *Validate) RegisterStructValidation(fn StructLevelFunc, types ...interface{}) {
-	v.RegisterStructValidationCtx(wrapStructLevelFunc(fn), types...)
-}
-
-// RegisterStructValidationCtx registers a StructLevelFuncCtx against a number of types and allows passing
-// of contextual validation information via context.Context.
-//
-// NOTE:
-// - this method is not thread-safe it is intended that these all be registered prior to any validation
-func (v *Validate) RegisterStructValidationCtx(fn StructLevelFuncCtx, types ...interface{}) {
-
-	if v.structLevelFuncs == nil {
-		v.structLevelFuncs = make(map[reflect.Type]StructLevelFuncCtx)
-	}
-
-	for _, t := range types {
-		v.structLevelFuncs[reflect.TypeOf(t)] = fn
-	}
-}
-
-// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types
-//
-// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
-func (v *Validate) RegisterCustomTypeFunc(fn CustomTypeFunc, types ...interface{}) {
-
-	if v.customFuncs == nil {
-		v.customFuncs = make(map[reflect.Type]CustomTypeFunc)
-	}
-
-	for _, t := range types {
-		v.customFuncs[reflect.TypeOf(t)] = fn
-	}
-
-	v.hasCustomFuncs = true
-}
-
-// RegisterTranslation registers translations against the provided tag.
-func (v *Validate) RegisterTranslation(tag string, trans ut.Translator, registerFn RegisterTranslationsFunc, translationFn TranslationFunc) (err error) {
-
-	if v.transTagFunc == nil {
-		v.transTagFunc = make(map[ut.Translator]map[string]TranslationFunc)
-	}
-
-	if err = registerFn(trans); err != nil {
-		return
-	}
-
-	m, ok := v.transTagFunc[trans]
-	if !ok {
-		m = make(map[string]TranslationFunc)
-		v.transTagFunc[trans] = m
-	}
-
-	m[tag] = translationFn
-
-	return
-}
-
-// Struct validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified.
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-func (v *Validate) Struct(s interface{}) error {
-	return v.StructCtx(context.Background(), s)
-}
-
-// StructCtx validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified
-// and also allows passing of context.Context for contextual validation information.
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-func (v *Validate) StructCtx(ctx context.Context, s interface{}) (err error) {
-
-	val := reflect.ValueOf(s)
-	top := val
-
-	if val.Kind() == reflect.Ptr && !val.IsNil() {
-		val = val.Elem()
-	}
-
-	if val.Kind() != reflect.Struct || val.Type() == timeType {
-		return &InvalidValidationError{Type: reflect.TypeOf(s)}
-	}
-
-	// good to validate
-	vd := v.pool.Get().(*validate)
-	vd.top = top
-	vd.isPartial = false
-	// vd.hasExcludes = false // only need to reset in StructPartial and StructExcept
-
-	vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil)
-
-	if len(vd.errs) > 0 {
-		err = vd.errs
-		vd.errs = nil
-	}
-
-	v.pool.Put(vd)
-
-	return
-}
-
-// StructFiltered validates a structs exposed fields, that pass the FilterFunc check and automatically validates
-// nested structs, unless otherwise specified.
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-func (v *Validate) StructFiltered(s interface{}, fn FilterFunc) error {
-	return v.StructFilteredCtx(context.Background(), s, fn)
-}
-
-// StructFilteredCtx validates a structs exposed fields, that pass the FilterFunc check and automatically validates
-// nested structs, unless otherwise specified and also allows passing of contextual validation information via
-// context.Context
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-func (v *Validate) StructFilteredCtx(ctx context.Context, s interface{}, fn FilterFunc) (err error) {
-	val := reflect.ValueOf(s)
-	top := val
-
-	if val.Kind() == reflect.Ptr && !val.IsNil() {
-		val = val.Elem()
-	}
-
-	if val.Kind() != reflect.Struct || val.Type() == timeType {
-		return &InvalidValidationError{Type: reflect.TypeOf(s)}
-	}
-
-	// good to validate
-	vd := v.pool.Get().(*validate)
-	vd.top = top
-	vd.isPartial = true
-	vd.ffn = fn
-	// vd.hasExcludes = false // only need to reset in StructPartial and StructExcept
-
-	vd.validateStruct(context.Background(), top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil)
-
-	if len(vd.errs) > 0 {
-		err = vd.errs
-		vd.errs = nil
-	}
-
-	v.pool.Put(vd)
-
-	return
-}
-
-// StructPartial validates the fields passed in only, ignoring all others.
-// Fields may be provided in a namespaced fashion relative to the  struct provided
-// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-func (v *Validate) StructPartial(s interface{}, fields ...string) error {
-	return v.StructPartialCtx(context.Background(), s, fields...)
-}
-
-// StructPartialCtx validates the fields passed in only, ignoring all others and allows passing of contextual
-// validation validation information via context.Context
-// Fields may be provided in a namespaced fashion relative to the  struct provided
-// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-func (v *Validate) StructPartialCtx(ctx context.Context, s interface{}, fields ...string) (err error) {
-	val := reflect.ValueOf(s)
-	top := val
-
-	if val.Kind() == reflect.Ptr && !val.IsNil() {
-		val = val.Elem()
-	}
-
-	if val.Kind() != reflect.Struct || val.Type() == timeType {
-		return &InvalidValidationError{Type: reflect.TypeOf(s)}
-	}
-
-	// good to validate
-	vd := v.pool.Get().(*validate)
-	vd.top = top
-	vd.isPartial = true
-	vd.ffn = nil
-	vd.hasExcludes = false
-	vd.includeExclude = make(map[string]struct{})
-
-	typ := val.Type()
-	name := typ.Name()
-
-	for _, k := range fields {
-
-		flds := strings.Split(k, namespaceSeparator)
-		if len(flds) > 0 {
-
-			vd.misc = append(vd.misc[0:0], name...)
-			vd.misc = append(vd.misc, '.')
-
-			for _, s := range flds {
-
-				idx := strings.Index(s, leftBracket)
-
-				if idx != -1 {
-					for idx != -1 {
-						vd.misc = append(vd.misc, s[:idx]...)
-						vd.includeExclude[string(vd.misc)] = struct{}{}
-
-						idx2 := strings.Index(s, rightBracket)
-						idx2++
-						vd.misc = append(vd.misc, s[idx:idx2]...)
-						vd.includeExclude[string(vd.misc)] = struct{}{}
-						s = s[idx2:]
-						idx = strings.Index(s, leftBracket)
-					}
-				} else {
-
-					vd.misc = append(vd.misc, s...)
-					vd.includeExclude[string(vd.misc)] = struct{}{}
-				}
-
-				vd.misc = append(vd.misc, '.')
-			}
-		}
-	}
-
-	vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil)
-
-	if len(vd.errs) > 0 {
-		err = vd.errs
-		vd.errs = nil
-	}
-
-	v.pool.Put(vd)
-
-	return
-}
-
-// StructExcept validates all fields except the ones passed in.
-// Fields may be provided in a namespaced fashion relative to the  struct provided
-// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-func (v *Validate) StructExcept(s interface{}, fields ...string) error {
-	return v.StructExceptCtx(context.Background(), s, fields...)
-}
-
-// StructExceptCtx validates all fields except the ones passed in and allows passing of contextual
-// validation validation information via context.Context
-// Fields may be provided in a namespaced fashion relative to the  struct provided
-// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-func (v *Validate) StructExceptCtx(ctx context.Context, s interface{}, fields ...string) (err error) {
-	val := reflect.ValueOf(s)
-	top := val
-
-	if val.Kind() == reflect.Ptr && !val.IsNil() {
-		val = val.Elem()
-	}
-
-	if val.Kind() != reflect.Struct || val.Type() == timeType {
-		return &InvalidValidationError{Type: reflect.TypeOf(s)}
-	}
-
-	// good to validate
-	vd := v.pool.Get().(*validate)
-	vd.top = top
-	vd.isPartial = true
-	vd.ffn = nil
-	vd.hasExcludes = true
-	vd.includeExclude = make(map[string]struct{})
-
-	typ := val.Type()
-	name := typ.Name()
-
-	for _, key := range fields {
-
-		vd.misc = vd.misc[0:0]
-
-		if len(name) > 0 {
-			vd.misc = append(vd.misc, name...)
-			vd.misc = append(vd.misc, '.')
-		}
-
-		vd.misc = append(vd.misc, key...)
-		vd.includeExclude[string(vd.misc)] = struct{}{}
-	}
-
-	vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil)
-
-	if len(vd.errs) > 0 {
-		err = vd.errs
-		vd.errs = nil
-	}
-
-	v.pool.Put(vd)
-
-	return
-}
-
-// Var validates a single variable using tag style validation.
-// eg.
-// var i int
-// validate.Var(i, "gt=1,lt=10")
-//
-// WARNING: a struct can be passed for validation eg. time.Time is a struct or
-// if you have a custom type and have registered a custom type handler, so must
-// allow it; however unforseen validations will occur if trying to validate a
-// struct that is meant to be passed to 'validate.Struct'
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-// validate Array, Slice and maps fields which may contain more than one error
-func (v *Validate) Var(field interface{}, tag string) error {
-	return v.VarCtx(context.Background(), field, tag)
-}
-
-// VarCtx validates a single variable using tag style validation and allows passing of contextual
-// validation validation information via context.Context.
-// eg.
-// var i int
-// validate.Var(i, "gt=1,lt=10")
-//
-// WARNING: a struct can be passed for validation eg. time.Time is a struct or
-// if you have a custom type and have registered a custom type handler, so must
-// allow it; however unforseen validations will occur if trying to validate a
-// struct that is meant to be passed to 'validate.Struct'
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-// validate Array, Slice and maps fields which may contain more than one error
-func (v *Validate) VarCtx(ctx context.Context, field interface{}, tag string) (err error) {
-	if len(tag) == 0 || tag == skipValidationTag {
-		return nil
-	}
-
-	ctag := v.fetchCacheTag(tag)
-	val := reflect.ValueOf(field)
-	vd := v.pool.Get().(*validate)
-	vd.top = val
-	vd.isPartial = false
-	vd.traverseField(ctx, val, val, vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag)
-
-	if len(vd.errs) > 0 {
-		err = vd.errs
-		vd.errs = nil
-	}
-	v.pool.Put(vd)
-	return
-}
-
-// VarWithValue validates a single variable, against another variable/field's value using tag style validation
-// eg.
-// s1 := "abcd"
-// s2 := "abcd"
-// validate.VarWithValue(s1, s2, "eqcsfield") // returns true
-//
-// WARNING: a struct can be passed for validation eg. time.Time is a struct or
-// if you have a custom type and have registered a custom type handler, so must
-// allow it; however unforseen validations will occur if trying to validate a
-// struct that is meant to be passed to 'validate.Struct'
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-// validate Array, Slice and maps fields which may contain more than one error
-func (v *Validate) VarWithValue(field interface{}, other interface{}, tag string) error {
-	return v.VarWithValueCtx(context.Background(), field, other, tag)
-}
-
-// VarWithValueCtx validates a single variable, against another variable/field's value using tag style validation and
-// allows passing of contextual validation validation information via context.Context.
-// eg.
-// s1 := "abcd"
-// s2 := "abcd"
-// validate.VarWithValue(s1, s2, "eqcsfield") // returns true
-//
-// WARNING: a struct can be passed for validation eg. time.Time is a struct or
-// if you have a custom type and have registered a custom type handler, so must
-// allow it; however unforseen validations will occur if trying to validate a
-// struct that is meant to be passed to 'validate.Struct'
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-// validate Array, Slice and maps fields which may contain more than one error
-func (v *Validate) VarWithValueCtx(ctx context.Context, field interface{}, other interface{}, tag string) (err error) {
-	if len(tag) == 0 || tag == skipValidationTag {
-		return nil
-	}
-	ctag := v.fetchCacheTag(tag)
-	otherVal := reflect.ValueOf(other)
-	vd := v.pool.Get().(*validate)
-	vd.top = otherVal
-	vd.isPartial = false
-	vd.traverseField(ctx, otherVal, reflect.ValueOf(field), vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag)
-
-	if len(vd.errs) > 0 {
-		err = vd.errs
-		vd.errs = nil
-	}
-	v.pool.Put(vd)
-	return
-}
diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE
new file mode 100644
index 00000000..866d74a7
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go
index 95ec014e..1f7e87e6 100644
--- a/vendor/gopkg.in/yaml.v2/apic.go
+++ b/vendor/gopkg.in/yaml.v2/apic.go
@@ -2,7 +2,6 @@ package yaml
 
 import (
 	"io"
-	"os"
 )
 
 func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
@@ -48,9 +47,9 @@ func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err
 	return n, nil
 }
 
-// File read handler.
-func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
-	return parser.input_file.Read(buffer)
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+	return parser.input_reader.Read(buffer)
 }
 
 // Set a string input.
@@ -64,12 +63,12 @@ func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
 }
 
 // Set a file input.
-func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
 	if parser.read_handler != nil {
 		panic("must set the input source only once")
 	}
-	parser.read_handler = yaml_file_read_handler
-	parser.input_file = file
+	parser.read_handler = yaml_reader_read_handler
+	parser.input_reader = r
 }
 
 // Set the source encoding.
@@ -81,14 +80,13 @@ func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
 }
 
 // Create a new emitter object.
-func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
 	*emitter = yaml_emitter_t{
 		buffer:     make([]byte, output_buffer_size),
 		raw_buffer: make([]byte, 0, output_raw_buffer_size),
 		states:     make([]yaml_emitter_state_t, 0, initial_stack_size),
 		events:     make([]yaml_event_t, 0, initial_queue_size),
 	}
-	return true
 }
 
 // Destroy an emitter object.
@@ -102,9 +100,10 @@ func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
 	return nil
 }
 
-// File write handler.
-func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
-	_, err := emitter.output_file.Write(buffer)
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+	_, err := emitter.output_writer.Write(buffer)
 	return err
 }
 
@@ -118,12 +117,12 @@ func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]by
 }
 
 // Set a file output.
-func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
 	if emitter.write_handler != nil {
 		panic("must set the output target only once")
 	}
-	emitter.write_handler = yaml_file_write_handler
-	emitter.output_file = file
+	emitter.write_handler = yaml_writer_write_handler
+	emitter.output_writer = w
 }
 
 // Set the output encoding.
@@ -252,41 +251,41 @@ func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
 //
 
 // Create STREAM-START.
-func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
 	*event = yaml_event_t{
 		typ:      yaml_STREAM_START_EVENT,
 		encoding: encoding,
 	}
-	return true
 }
 
 // Create STREAM-END.
-func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
 	*event = yaml_event_t{
 		typ: yaml_STREAM_END_EVENT,
 	}
-	return true
 }
 
 // Create DOCUMENT-START.
-func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
-	tag_directives []yaml_tag_directive_t, implicit bool) bool {
+func yaml_document_start_event_initialize(
+	event *yaml_event_t,
+	version_directive *yaml_version_directive_t,
+	tag_directives []yaml_tag_directive_t,
+	implicit bool,
+) {
 	*event = yaml_event_t{
 		typ:               yaml_DOCUMENT_START_EVENT,
 		version_directive: version_directive,
 		tag_directives:    tag_directives,
 		implicit:          implicit,
 	}
-	return true
 }
 
 // Create DOCUMENT-END.
-func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
 	*event = yaml_event_t{
 		typ:      yaml_DOCUMENT_END_EVENT,
 		implicit: implicit,
 	}
-	return true
 }
 
 ///*
@@ -348,7 +347,7 @@ func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
 }
 
 // Create MAPPING-START.
-func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
 	*event = yaml_event_t{
 		typ:      yaml_MAPPING_START_EVENT,
 		anchor:   anchor,
@@ -356,15 +355,13 @@ func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte
 		implicit: implicit,
 		style:    yaml_style_t(style),
 	}
-	return true
 }
 
 // Create MAPPING-END.
-func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
 	*event = yaml_event_t{
 		typ: yaml_MAPPING_END_EVENT,
 	}
-	return true
 }
 
 // Destroy an event object.
@@ -471,7 +468,7 @@ func yaml_event_delete(event *yaml_event_t) {
 //    } context
 //    tag_directive *yaml_tag_directive_t
 //
-//    context.error = YAML_NO_ERROR // Eliminate a compliler warning.
+//    context.error = YAML_NO_ERROR // Eliminate a compiler warning.
 //
 //    assert(document) // Non-NULL document object is expected.
 //
diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go
index e85eb2e3..e4e56e28 100644
--- a/vendor/gopkg.in/yaml.v2/decode.go
+++ b/vendor/gopkg.in/yaml.v2/decode.go
@@ -4,6 +4,7 @@ import (
 	"encoding"
 	"encoding/base64"
 	"fmt"
+	"io"
 	"math"
 	"reflect"
 	"strconv"
@@ -22,19 +23,22 @@ type node struct {
 	kind         int
 	line, column int
 	tag          string
-	value        string
-	implicit     bool
-	children     []*node
-	anchors      map[string]*node
+	// For an alias node, alias holds the resolved alias.
+	alias    *node
+	value    string
+	implicit bool
+	children []*node
+	anchors  map[string]*node
 }
 
 // ----------------------------------------------------------------------------
 // Parser, produces a node tree out of a libyaml event stream.
 
 type parser struct {
-	parser yaml_parser_t
-	event  yaml_event_t
-	doc    *node
+	parser   yaml_parser_t
+	event    yaml_event_t
+	doc      *node
+	doneInit bool
 }
 
 func newParser(b []byte) *parser {
@@ -42,21 +46,30 @@ func newParser(b []byte) *parser {
 	if !yaml_parser_initialize(&p.parser) {
 		panic("failed to initialize YAML emitter")
 	}
-
 	if len(b) == 0 {
 		b = []byte{'\n'}
 	}
-
 	yaml_parser_set_input_string(&p.parser, b)
+	return &p
+}
 
-	p.skip()
-	if p.event.typ != yaml_STREAM_START_EVENT {
-		panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
+func newParserFromReader(r io.Reader) *parser {
+	p := parser{}
+	if !yaml_parser_initialize(&p.parser) {
+		panic("failed to initialize YAML emitter")
 	}
-	p.skip()
+	yaml_parser_set_input_reader(&p.parser, r)
 	return &p
 }
 
+func (p *parser) init() {
+	if p.doneInit {
+		return
+	}
+	p.expect(yaml_STREAM_START_EVENT)
+	p.doneInit = true
+}
+
 func (p *parser) destroy() {
 	if p.event.typ != yaml_NO_EVENT {
 		yaml_event_delete(&p.event)
@@ -64,16 +77,35 @@ func (p *parser) destroy() {
 	yaml_parser_delete(&p.parser)
 }
 
-func (p *parser) skip() {
-	if p.event.typ != yaml_NO_EVENT {
-		if p.event.typ == yaml_STREAM_END_EVENT {
-			failf("attempted to go past the end of stream; corrupted value?")
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+	if p.event.typ == yaml_NO_EVENT {
+		if !yaml_parser_parse(&p.parser, &p.event) {
+			p.fail()
 		}
-		yaml_event_delete(&p.event)
+	}
+	if p.event.typ == yaml_STREAM_END_EVENT {
+		failf("attempted to go past the end of stream; corrupted value?")
+	}
+	if p.event.typ != e {
+		p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+		p.fail()
+	}
+	yaml_event_delete(&p.event)
+	p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+	if p.event.typ != yaml_NO_EVENT {
+		return p.event.typ
 	}
 	if !yaml_parser_parse(&p.parser, &p.event) {
 		p.fail()
 	}
+	return p.event.typ
 }
 
 func (p *parser) fail() {
@@ -81,6 +113,10 @@ func (p *parser) fail() {
 	var line int
 	if p.parser.problem_mark.line != 0 {
 		line = p.parser.problem_mark.line
+		// Scanner errors don't iterate line before returning error
+		if p.parser.error == yaml_SCANNER_ERROR {
+			line++
+		}
 	} else if p.parser.context_mark.line != 0 {
 		line = p.parser.context_mark.line
 	}
@@ -103,7 +139,8 @@ func (p *parser) anchor(n *node, anchor []byte) {
 }
 
 func (p *parser) parse() *node {
-	switch p.event.typ {
+	p.init()
+	switch p.peek() {
 	case yaml_SCALAR_EVENT:
 		return p.scalar()
 	case yaml_ALIAS_EVENT:
@@ -118,7 +155,7 @@ func (p *parser) parse() *node {
 		// Happens when attempting to decode an empty buffer.
 		return nil
 	default:
-		panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
+		panic("attempted to parse unknown event: " + p.event.typ.String())
 	}
 }
 
@@ -134,19 +171,20 @@ func (p *parser) document() *node {
 	n := p.node(documentNode)
 	n.anchors = make(map[string]*node)
 	p.doc = n
-	p.skip()
+	p.expect(yaml_DOCUMENT_START_EVENT)
 	n.children = append(n.children, p.parse())
-	if p.event.typ != yaml_DOCUMENT_END_EVENT {
-		panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
-	}
-	p.skip()
+	p.expect(yaml_DOCUMENT_END_EVENT)
 	return n
 }
 
 func (p *parser) alias() *node {
 	n := p.node(aliasNode)
 	n.value = string(p.event.anchor)
-	p.skip()
+	n.alias = p.doc.anchors[n.value]
+	if n.alias == nil {
+		failf("unknown anchor '%s' referenced", n.value)
+	}
+	p.expect(yaml_ALIAS_EVENT)
 	return n
 }
 
@@ -156,29 +194,29 @@ func (p *parser) scalar() *node {
 	n.tag = string(p.event.tag)
 	n.implicit = p.event.implicit
 	p.anchor(n, p.event.anchor)
-	p.skip()
+	p.expect(yaml_SCALAR_EVENT)
 	return n
 }
 
 func (p *parser) sequence() *node {
 	n := p.node(sequenceNode)
 	p.anchor(n, p.event.anchor)
-	p.skip()
-	for p.event.typ != yaml_SEQUENCE_END_EVENT {
+	p.expect(yaml_SEQUENCE_START_EVENT)
+	for p.peek() != yaml_SEQUENCE_END_EVENT {
 		n.children = append(n.children, p.parse())
 	}
-	p.skip()
+	p.expect(yaml_SEQUENCE_END_EVENT)
 	return n
 }
 
 func (p *parser) mapping() *node {
 	n := p.node(mappingNode)
 	p.anchor(n, p.event.anchor)
-	p.skip()
-	for p.event.typ != yaml_MAPPING_END_EVENT {
+	p.expect(yaml_MAPPING_START_EVENT)
+	for p.peek() != yaml_MAPPING_END_EVENT {
 		n.children = append(n.children, p.parse(), p.parse())
 	}
-	p.skip()
+	p.expect(yaml_MAPPING_END_EVENT)
 	return n
 }
 
@@ -187,7 +225,7 @@ func (p *parser) mapping() *node {
 
 type decoder struct {
 	doc     *node
-	aliases map[string]bool
+	aliases map[*node]bool
 	mapType reflect.Type
 	terrors []string
 	strict  bool
@@ -198,11 +236,13 @@ var (
 	durationType   = reflect.TypeOf(time.Duration(0))
 	defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
 	ifaceType      = defaultMapType.Elem()
+	timeType       = reflect.TypeOf(time.Time{})
+	ptrTimeType    = reflect.TypeOf(&time.Time{})
 )
 
 func newDecoder(strict bool) *decoder {
 	d := &decoder{mapType: defaultMapType, strict: strict}
-	d.aliases = make(map[string]bool)
+	d.aliases = make(map[*node]bool)
 	return d
 }
 
@@ -308,16 +348,13 @@ func (d *decoder) document(n *node, out reflect.Value) (good bool) {
 }
 
 func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
-	an, ok := d.doc.anchors[n.value]
-	if !ok {
-		failf("unknown anchor '%s' referenced", n.value)
-	}
-	if d.aliases[n.value] {
+	if d.aliases[n] {
+		// TODO this could actually be allowed in some circumstances.
 		failf("anchor '%s' value contains itself", n.value)
 	}
-	d.aliases[n.value] = true
-	good = d.unmarshal(an, out)
-	delete(d.aliases, n.value)
+	d.aliases[n] = true
+	good = d.unmarshal(n.alias, out)
+	delete(d.aliases, n)
 	return good
 }
 
@@ -329,7 +366,7 @@ func resetMap(out reflect.Value) {
 	}
 }
 
-func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
+func (d *decoder) scalar(n *node, out reflect.Value) bool {
 	var tag string
 	var resolved interface{}
 	if n.tag == "" && !n.implicit {
@@ -353,9 +390,26 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
 		}
 		return true
 	}
-	if s, ok := resolved.(string); ok && out.CanAddr() {
-		if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
-			err := u.UnmarshalText([]byte(s))
+	if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+		// We've resolved to exactly the type we want, so use that.
+		out.Set(resolvedv)
+		return true
+	}
+	// Perhaps we can use the value as a TextUnmarshaler to
+	// set its value.
+	if out.CanAddr() {
+		u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+		if ok {
+			var text []byte
+			if tag == yaml_BINARY_TAG {
+				text = []byte(resolved.(string))
+			} else {
+				// We let any value be unmarshaled into TextUnmarshaler.
+				// That might be more lax than we'd like, but the
+				// TextUnmarshaler itself should bowl out any dubious values.
+				text = []byte(n.value)
+			}
+			err := u.UnmarshalText(text)
 			if err != nil {
 				fail(err)
 			}
@@ -366,46 +420,54 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
 	case reflect.String:
 		if tag == yaml_BINARY_TAG {
 			out.SetString(resolved.(string))
-			good = true
-		} else if resolved != nil {
+			return true
+		}
+		if resolved != nil {
 			out.SetString(n.value)
-			good = true
+			return true
 		}
 	case reflect.Interface:
 		if resolved == nil {
 			out.Set(reflect.Zero(out.Type()))
+		} else if tag == yaml_TIMESTAMP_TAG {
+			// It looks like a timestamp but for backward compatibility
+			// reasons we set it as a string, so that code that unmarshals
+			// timestamp-like values into interface{} will continue to
+			// see a string and not a time.Time.
+			// TODO(v3) Drop this.
+			out.Set(reflect.ValueOf(n.value))
 		} else {
 			out.Set(reflect.ValueOf(resolved))
 		}
-		good = true
+		return true
 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
 		switch resolved := resolved.(type) {
 		case int:
 			if !out.OverflowInt(int64(resolved)) {
 				out.SetInt(int64(resolved))
-				good = true
+				return true
 			}
 		case int64:
 			if !out.OverflowInt(resolved) {
 				out.SetInt(resolved)
-				good = true
+				return true
 			}
 		case uint64:
 			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
 				out.SetInt(int64(resolved))
-				good = true
+				return true
 			}
 		case float64:
 			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
 				out.SetInt(int64(resolved))
-				good = true
+				return true
 			}
 		case string:
 			if out.Type() == durationType {
 				d, err := time.ParseDuration(resolved)
 				if err == nil {
 					out.SetInt(int64(d))
-					good = true
+					return true
 				}
 			}
 		}
@@ -414,44 +476,49 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
 		case int:
 			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
 				out.SetUint(uint64(resolved))
-				good = true
+				return true
 			}
 		case int64:
 			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
 				out.SetUint(uint64(resolved))
-				good = true
+				return true
 			}
 		case uint64:
 			if !out.OverflowUint(uint64(resolved)) {
 				out.SetUint(uint64(resolved))
-				good = true
+				return true
 			}
 		case float64:
 			if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
 				out.SetUint(uint64(resolved))
-				good = true
+				return true
 			}
 		}
 	case reflect.Bool:
 		switch resolved := resolved.(type) {
 		case bool:
 			out.SetBool(resolved)
-			good = true
+			return true
 		}
 	case reflect.Float32, reflect.Float64:
 		switch resolved := resolved.(type) {
 		case int:
 			out.SetFloat(float64(resolved))
-			good = true
+			return true
 		case int64:
 			out.SetFloat(float64(resolved))
-			good = true
+			return true
 		case uint64:
 			out.SetFloat(float64(resolved))
-			good = true
+			return true
 		case float64:
 			out.SetFloat(resolved)
-			good = true
+			return true
+		}
+	case reflect.Struct:
+		if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+			out.Set(resolvedv)
+			return true
 		}
 	case reflect.Ptr:
 		if out.Type().Elem() == reflect.TypeOf(resolved) {
@@ -459,13 +526,11 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
 			elem := reflect.New(out.Type().Elem())
 			elem.Elem().Set(reflect.ValueOf(resolved))
 			out.Set(elem)
-			good = true
+			return true
 		}
 	}
-	if !good {
-		d.terror(n, tag, out)
-	}
-	return good
+	d.terror(n, tag, out)
+	return false
 }
 
 func settableValueOf(i interface{}) reflect.Value {
@@ -482,6 +547,10 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
 	switch out.Kind() {
 	case reflect.Slice:
 		out.Set(reflect.MakeSlice(out.Type(), l, l))
+	case reflect.Array:
+		if l != out.Len() {
+			failf("invalid array: want %d elements but got %d", out.Len(), l)
+		}
 	case reflect.Interface:
 		// No type hints. Will have to use a generic sequence.
 		iface = out
@@ -500,7 +569,9 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
 			j++
 		}
 	}
-	out.Set(out.Slice(0, j))
+	if out.Kind() != reflect.Array {
+		out.Set(out.Slice(0, j))
+	}
 	if iface.IsValid() {
 		iface.Set(out)
 	}
@@ -561,7 +632,7 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
 			}
 			e := reflect.New(et).Elem()
 			if d.unmarshal(n.children[i+1], e) {
-				out.SetMapIndex(k, e)
+				d.setMapIndex(n.children[i+1], out, k, e)
 			}
 		}
 	}
@@ -569,6 +640,14 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
 	return true
 }
 
+func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
+	if d.strict && out.MapIndex(k) != zeroValue {
+		d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
+		return
+	}
+	out.SetMapIndex(k, v)
+}
+
 func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
 	outt := out.Type()
 	if outt.Elem() != mapItemType {
@@ -616,6 +695,10 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
 		elemType = inlineMap.Type().Elem()
 	}
 
+	var doneFields []bool
+	if d.strict {
+		doneFields = make([]bool, len(sinfo.FieldsList))
+	}
 	for i := 0; i < l; i += 2 {
 		ni := n.children[i]
 		if isMerge(ni) {
@@ -626,6 +709,13 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
 			continue
 		}
 		if info, ok := sinfo.FieldsMap[name.String()]; ok {
+			if d.strict {
+				if doneFields[info.Id] {
+					d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
+					continue
+				}
+				doneFields[info.Id] = true
+			}
 			var field reflect.Value
 			if info.Inline == nil {
 				field = out.Field(info.Num)
@@ -639,9 +729,9 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
 			}
 			value := reflect.New(elemType).Elem()
 			d.unmarshal(n.children[i+1], value)
-			inlineMap.SetMapIndex(name, value)
+			d.setMapIndex(n.children[i+1], inlineMap, name, value)
 		} else if d.strict {
-			d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in struct %s", ni.line+1, name.String(), out.Type()))
+			d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
 		}
 	}
 	return true
diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go
index dcaf502f..a1c2cc52 100644
--- a/vendor/gopkg.in/yaml.v2/emitterc.go
+++ b/vendor/gopkg.in/yaml.v2/emitterc.go
@@ -2,6 +2,7 @@ package yaml
 
 import (
 	"bytes"
+	"fmt"
 )
 
 // Flush the buffer if needed.
@@ -664,7 +665,7 @@ func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
 		return yaml_emitter_emit_mapping_start(emitter, event)
 	default:
 		return yaml_emitter_set_emitter_error(emitter,
-			"expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
+			fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
 	}
 }
 
@@ -842,7 +843,7 @@ func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event
 	return true
 }
 
-// Write an achor.
+// Write an anchor.
 func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
 	if emitter.anchor_data.anchor == nil {
 		return true
diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go
index 84f84995..a14435e8 100644
--- a/vendor/gopkg.in/yaml.v2/encode.go
+++ b/vendor/gopkg.in/yaml.v2/encode.go
@@ -3,12 +3,14 @@ package yaml
 import (
 	"encoding"
 	"fmt"
+	"io"
 	"reflect"
 	"regexp"
 	"sort"
 	"strconv"
 	"strings"
 	"time"
+	"unicode/utf8"
 )
 
 type encoder struct {
@@ -16,25 +18,39 @@ type encoder struct {
 	event   yaml_event_t
 	out     []byte
 	flow    bool
+	// doneInit holds whether the initial stream_start_event has been
+	// emitted.
+	doneInit bool
 }
 
-func newEncoder() (e *encoder) {
-	e = &encoder{}
-	e.must(yaml_emitter_initialize(&e.emitter))
+func newEncoder() *encoder {
+	e := &encoder{}
+	yaml_emitter_initialize(&e.emitter)
 	yaml_emitter_set_output_string(&e.emitter, &e.out)
 	yaml_emitter_set_unicode(&e.emitter, true)
-	e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
-	e.emit()
-	e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
-	e.emit()
 	return e
 }
 
-func (e *encoder) finish() {
-	e.must(yaml_document_end_event_initialize(&e.event, true))
+func newEncoderWithWriter(w io.Writer) *encoder {
+	e := &encoder{}
+	yaml_emitter_initialize(&e.emitter)
+	yaml_emitter_set_output_writer(&e.emitter, w)
+	yaml_emitter_set_unicode(&e.emitter, true)
+	return e
+}
+
+func (e *encoder) init() {
+	if e.doneInit {
+		return
+	}
+	yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
 	e.emit()
+	e.doneInit = true
+}
+
+func (e *encoder) finish() {
 	e.emitter.open_ended = false
-	e.must(yaml_stream_end_event_initialize(&e.event))
+	yaml_stream_end_event_initialize(&e.event)
 	e.emit()
 }
 
@@ -44,9 +60,7 @@ func (e *encoder) destroy() {
 
 func (e *encoder) emit() {
 	// This will internally delete the e.event value.
-	if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
-		e.must(false)
-	}
+	e.must(yaml_emitter_emit(&e.emitter, &e.event))
 }
 
 func (e *encoder) must(ok bool) {
@@ -59,13 +73,28 @@ func (e *encoder) must(ok bool) {
 	}
 }
 
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+	e.init()
+	yaml_document_start_event_initialize(&e.event, nil, nil, true)
+	e.emit()
+	e.marshal(tag, in)
+	yaml_document_end_event_initialize(&e.event, true)
+	e.emit()
+}
+
 func (e *encoder) marshal(tag string, in reflect.Value) {
-	if !in.IsValid() {
+	if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
 		e.nilv()
 		return
 	}
 	iface := in.Interface()
-	if m, ok := iface.(Marshaler); ok {
+	switch m := iface.(type) {
+	case time.Time, *time.Time:
+		// Although time.Time implements TextMarshaler,
+		// we don't want to treat it as a string for YAML
+		// purposes because YAML has special support for
+		// timestamps.
+	case Marshaler:
 		v, err := m.MarshalYAML()
 		if err != nil {
 			fail(err)
@@ -75,31 +104,34 @@ func (e *encoder) marshal(tag string, in reflect.Value) {
 			return
 		}
 		in = reflect.ValueOf(v)
-	} else if m, ok := iface.(encoding.TextMarshaler); ok {
+	case encoding.TextMarshaler:
 		text, err := m.MarshalText()
 		if err != nil {
 			fail(err)
 		}
 		in = reflect.ValueOf(string(text))
+	case nil:
+		e.nilv()
+		return
 	}
 	switch in.Kind() {
 	case reflect.Interface:
-		if in.IsNil() {
-			e.nilv()
-		} else {
-			e.marshal(tag, in.Elem())
-		}
+		e.marshal(tag, in.Elem())
 	case reflect.Map:
 		e.mapv(tag, in)
 	case reflect.Ptr:
-		if in.IsNil() {
-			e.nilv()
+		if in.Type() == ptrTimeType {
+			e.timev(tag, in.Elem())
 		} else {
 			e.marshal(tag, in.Elem())
 		}
 	case reflect.Struct:
-		e.structv(tag, in)
-	case reflect.Slice:
+		if in.Type() == timeType {
+			e.timev(tag, in)
+		} else {
+			e.structv(tag, in)
+		}
+	case reflect.Slice, reflect.Array:
 		if in.Type().Elem() == mapItemType {
 			e.itemsv(tag, in)
 		} else {
@@ -191,10 +223,10 @@ func (e *encoder) mappingv(tag string, f func()) {
 		e.flow = false
 		style = yaml_FLOW_MAPPING_STYLE
 	}
-	e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+	yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
 	e.emit()
 	f()
-	e.must(yaml_mapping_end_event_initialize(&e.event))
+	yaml_mapping_end_event_initialize(&e.event)
 	e.emit()
 }
 
@@ -240,23 +272,36 @@ var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0
 func (e *encoder) stringv(tag string, in reflect.Value) {
 	var style yaml_scalar_style_t
 	s := in.String()
-	rtag, rs := resolve("", s)
-	if rtag == yaml_BINARY_TAG {
-		if tag == "" || tag == yaml_STR_TAG {
-			tag = rtag
-			s = rs.(string)
-		} else if tag == yaml_BINARY_TAG {
+	canUsePlain := true
+	switch {
+	case !utf8.ValidString(s):
+		if tag == yaml_BINARY_TAG {
 			failf("explicitly tagged !!binary data must be base64-encoded")
-		} else {
+		}
+		if tag != "" {
 			failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
 		}
+		// It can't be encoded directly as YAML so use a binary tag
+		// and encode it as base64.
+		tag = yaml_BINARY_TAG
+		s = encodeBase64(s)
+	case tag == "":
+		// Check to see if it would resolve to a specific
+		// tag when encoded unquoted. If it doesn't,
+		// there's no need to quote it.
+		rtag, _ := resolve("", s)
+		canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
 	}
-	if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
-		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
-	} else if strings.Contains(s, "\n") {
+	// Note: it's possible for user code to emit invalid YAML
+	// if they explicitly specify a tag and a string containing
+	// text that's incompatible with that tag.
+	switch {
+	case strings.Contains(s, "\n"):
 		style = yaml_LITERAL_SCALAR_STYLE
-	} else {
+	case canUsePlain:
 		style = yaml_PLAIN_SCALAR_STYLE
+	default:
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
 	}
 	e.emitScalar(s, "", tag, style)
 }
@@ -281,9 +326,20 @@ func (e *encoder) uintv(tag string, in reflect.Value) {
 	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
 }
 
+func (e *encoder) timev(tag string, in reflect.Value) {
+	t := in.Interface().(time.Time)
+	s := t.Format(time.RFC3339Nano)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
 func (e *encoder) floatv(tag string, in reflect.Value) {
-	// FIXME: Handle 64 bits here.
-	s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
+	// Issue #352: When formatting, use the precision of the underlying value
+	precision := 64
+	if in.Kind() == reflect.Float32 {
+		precision = 32
+	}
+
+	s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
 	switch s {
 	case "+Inf":
 		s = ".inf"
diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go
index f4507917..7c1f5fac 100644
--- a/vendor/gopkg.in/yaml.v2/readerc.go
+++ b/vendor/gopkg.in/yaml.v2/readerc.go
@@ -93,9 +93,18 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
 		panic("read handler must be set")
 	}
 
+	// [Go] This function was changed to guarantee the requested length size at EOF.
+	// The fact we need to do this is pretty awful, but the description above implies
+	// for that to be the case, and there are tests 
+
 	// If the EOF flag is set and the raw buffer is empty, do nothing.
 	if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
-		return true
+		// [Go] ACTUALLY! Read the documentation of this function above.
+		// This is just broken. To return true, we need to have the
+		// given length in the buffer. Not doing that means every single
+		// check that calls this function to make sure the buffer has a
+		// given length is Go) panicking; or C) accessing invalid memory.
+		//return true
 	}
 
 	// Return if the buffer contains enough characters.
@@ -389,6 +398,15 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
 			break
 		}
 	}
+	// [Go] Read the documentation of this function above. To return true,
+	// we need to have the given length in the buffer. Not doing that means
+	// every single check that calls this function to make sure the buffer
+	// has a given length is Go) panicking; or C) accessing invalid memory.
+	// This happens here due to the EOF above breaking early.
+	for buffer_len < length {
+		parser.buffer[buffer_len] = 0
+		buffer_len++
+	}
 	parser.buffer = parser.buffer[:buffer_len]
 	return true
 }
diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go
index 232313cc..6c151db6 100644
--- a/vendor/gopkg.in/yaml.v2/resolve.go
+++ b/vendor/gopkg.in/yaml.v2/resolve.go
@@ -6,7 +6,7 @@ import (
 	"regexp"
 	"strconv"
 	"strings"
-	"unicode/utf8"
+	"time"
 )
 
 type resolveMapItem struct {
@@ -75,7 +75,7 @@ func longTag(tag string) string {
 
 func resolvableTag(tag string) bool {
 	switch tag {
-	case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
+	case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
 		return true
 	}
 	return false
@@ -92,6 +92,19 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
 		switch tag {
 		case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
 			return
+		case yaml_FLOAT_TAG:
+			if rtag == yaml_INT_TAG {
+				switch v := out.(type) {
+				case int64:
+					rtag = yaml_FLOAT_TAG
+					out = float64(v)
+					return
+				case int:
+					rtag = yaml_FLOAT_TAG
+					out = float64(v)
+					return
+				}
+			}
 		}
 		failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
 	}()
@@ -125,6 +138,15 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
 
 		case 'D', 'S':
 			// Int, float, or timestamp.
+			// Only try values as a timestamp if the value is unquoted or there's an explicit
+			// !!timestamp tag.
+			if tag == "" || tag == yaml_TIMESTAMP_TAG {
+				t, ok := parseTimestamp(in)
+				if ok {
+					return yaml_TIMESTAMP_TAG, t
+				}
+			}
+
 			plain := strings.Replace(in, "_", "", -1)
 			intv, err := strconv.ParseInt(plain, 0, 64)
 			if err == nil {
@@ -158,28 +180,20 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
 					return yaml_INT_TAG, uintv
 				}
 			} else if strings.HasPrefix(plain, "-0b") {
-				intv, err := strconv.ParseInt(plain[3:], 2, 64)
+				intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
 				if err == nil {
-					if intv == int64(int(intv)) {
-						return yaml_INT_TAG, -int(intv)
+					if true || intv == int64(int(intv)) {
+						return yaml_INT_TAG, int(intv)
 					} else {
-						return yaml_INT_TAG, -intv
+						return yaml_INT_TAG, intv
 					}
 				}
 			}
-			// XXX Handle timestamps here.
-
 		default:
 			panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
 		}
 	}
-	if tag == yaml_BINARY_TAG {
-		return yaml_BINARY_TAG, in
-	}
-	if utf8.ValidString(in) {
-		return yaml_STR_TAG, in
-	}
-	return yaml_BINARY_TAG, encodeBase64(in)
+	return yaml_STR_TAG, in
 }
 
 // encodeBase64 encodes s as base64 that is broken up into multiple lines
@@ -206,3 +220,39 @@ func encodeBase64(s string) string {
 	}
 	return string(out[:k])
 }
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+	"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+	"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+	"2006-1-2 15:4:5.999999999",       // space separated with no time zone
+	"2006-1-2",                        // date only
+	// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+	// from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+	// TODO write code to check all the formats supported by
+	// http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+	// Quick check: all date formats start with YYYY-.
+	i := 0
+	for ; i < len(s); i++ {
+		if c := s[i]; c < '0' || c > '9' {
+			break
+		}
+	}
+	if i != 4 || i == len(s) || s[i] != '-' {
+		return time.Time{}, false
+	}
+	for _, format := range allowedTimestampFormats {
+		if t, err := time.Parse(format, s); err == nil {
+			return t, true
+		}
+	}
+	return time.Time{}, false
+}
diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go
index 07448445..077fd1dd 100644
--- a/vendor/gopkg.in/yaml.v2/scannerc.go
+++ b/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -871,12 +871,6 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
 
 	required := parser.flow_level == 0 && parser.indent == parser.mark.column
 
-	// A simple key is required only when it is the first token in the current
-	// line.  Therefore it is always allowed.  But we add a check anyway.
-	if required && !parser.simple_key_allowed {
-		panic("should not happen")
-	}
-
 	//
 	// If the current position may start a simple key, save it.
 	//
@@ -2475,6 +2469,10 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si
 			}
 		}
 
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
 		// Check if we are at the end of the scalar.
 		if single {
 			if parser.buffer[parser.buffer_pos] == '\'' {
@@ -2487,10 +2485,6 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si
 		}
 
 		// Consume blank characters.
-		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-			return false
-		}
-
 		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
 			if is_blank(parser.buffer, parser.buffer_pos) {
 				// Consume a space or a tab character.
@@ -2592,19 +2586,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b
 		// Consume non-blank characters.
 		for !is_blankz(parser.buffer, parser.buffer_pos) {
 
-			// Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
-			if parser.flow_level > 0 &&
-				parser.buffer[parser.buffer_pos] == ':' &&
-				!is_blankz(parser.buffer, parser.buffer_pos+1) {
-				yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
-					start_mark, "found unexpected ':'")
-				return false
-			}
-
 			// Check for indicators that may end a plain scalar.
 			if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
 				(parser.flow_level > 0 &&
-					(parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
+					(parser.buffer[parser.buffer_pos] == ',' ||
 						parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
 						parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
 						parser.buffer[parser.buffer_pos] == '}')) {
@@ -2656,10 +2641,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b
 		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
 			if is_blank(parser.buffer, parser.buffer_pos) {
 
-				// Check for tab character that abuse indentation.
+				// Check for tab characters that abuse indentation.
 				if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
 					yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
-						start_mark, "found a tab character that violate indentation")
+						start_mark, "found a tab character that violates indentation")
 					return false
 				}
 
diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go
index 5958822f..4c45e660 100644
--- a/vendor/gopkg.in/yaml.v2/sorter.go
+++ b/vendor/gopkg.in/yaml.v2/sorter.go
@@ -51,6 +51,15 @@ func (l keyList) Less(i, j int) bool {
 		}
 		var ai, bi int
 		var an, bn int64
+		if ar[i] == '0' || br[i] == '0' {
+			for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+				if ar[j] != '0' {
+					an = 1
+					bn = 1
+					break
+				}
+			}
+		}
 		for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
 			an = an*10 + int64(ar[ai]-'0')
 		}
diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go
index 190362f2..a2dde608 100644
--- a/vendor/gopkg.in/yaml.v2/writerc.go
+++ b/vendor/gopkg.in/yaml.v2/writerc.go
@@ -18,72 +18,9 @@ func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
 		return true
 	}
 
-	// If the output encoding is UTF-8, we don't need to recode the buffer.
-	if emitter.encoding == yaml_UTF8_ENCODING {
-		if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
-			return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
-		}
-		emitter.buffer_pos = 0
-		return true
-	}
-
-	// Recode the buffer into the raw buffer.
-	var low, high int
-	if emitter.encoding == yaml_UTF16LE_ENCODING {
-		low, high = 0, 1
-	} else {
-		high, low = 1, 0
-	}
-
-	pos := 0
-	for pos < emitter.buffer_pos {
-		// See the "reader.c" code for more details on UTF-8 encoding.  Note
-		// that we assume that the buffer contains a valid UTF-8 sequence.
-
-		// Read the next UTF-8 character.
-		octet := emitter.buffer[pos]
-
-		var w int
-		var value rune
-		switch {
-		case octet&0x80 == 0x00:
-			w, value = 1, rune(octet&0x7F)
-		case octet&0xE0 == 0xC0:
-			w, value = 2, rune(octet&0x1F)
-		case octet&0xF0 == 0xE0:
-			w, value = 3, rune(octet&0x0F)
-		case octet&0xF8 == 0xF0:
-			w, value = 4, rune(octet&0x07)
-		}
-		for k := 1; k < w; k++ {
-			octet = emitter.buffer[pos+k]
-			value = (value << 6) + (rune(octet) & 0x3F)
-		}
-		pos += w
-
-		// Write the character.
-		if value < 0x10000 {
-			var b [2]byte
-			b[high] = byte(value >> 8)
-			b[low] = byte(value & 0xFF)
-			emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
-		} else {
-			// Write the character using a surrogate pair (check "reader.c").
-			var b [4]byte
-			value -= 0x10000
-			b[high] = byte(0xD8 + (value >> 18))
-			b[low] = byte((value >> 10) & 0xFF)
-			b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
-			b[low+2] = byte(value & 0xFF)
-			emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
-		}
-	}
-
-	// Write the raw buffer.
-	if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
+	if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
 		return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
 	}
 	emitter.buffer_pos = 0
-	emitter.raw_buffer = emitter.raw_buffer[:0]
 	return true
 }
diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go
index 5e3c2dae..de85aa4c 100644
--- a/vendor/gopkg.in/yaml.v2/yaml.go
+++ b/vendor/gopkg.in/yaml.v2/yaml.go
@@ -9,6 +9,7 @@ package yaml
 import (
 	"errors"
 	"fmt"
+	"io"
 	"reflect"
 	"strings"
 	"sync"
@@ -81,12 +82,58 @@ func Unmarshal(in []byte, out interface{}) (err error) {
 }
 
 // UnmarshalStrict is like Unmarshal except that any fields that are found
-// in the data that do not have corresponding struct members will result in
+// in the data that do not have corresponding struct members, or mapping
+// keys that are duplicates, will result in
 // an error.
 func UnmarshalStrict(in []byte, out interface{}) (err error) {
 	return unmarshal(in, out, true)
 }
 
+// A Decorder reads and decodes YAML values from an input stream.
+type Decoder struct {
+	strict bool
+	parser *parser
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+	return &Decoder{
+		parser: newParserFromReader(r),
+	}
+}
+
+// SetStrict sets whether strict decoding behaviour is enabled when
+// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
+func (dec *Decoder) SetStrict(strict bool) {
+	dec.strict = strict
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+	d := newDecoder(dec.strict)
+	defer handleErr(&err)
+	node := dec.parser.parse()
+	if node == nil {
+		return io.EOF
+	}
+	out := reflect.ValueOf(v)
+	if out.Kind() == reflect.Ptr && !out.IsNil() {
+		out = out.Elem()
+	}
+	d.unmarshal(node, out)
+	if len(d.terrors) > 0 {
+		return &TypeError{d.terrors}
+	}
+	return nil
+}
+
 func unmarshal(in []byte, out interface{}, strict bool) (err error) {
 	defer handleErr(&err)
 	d := newDecoder(strict)
@@ -110,8 +157,8 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) {
 // of the generated document will reflect the structure of the value itself.
 // Maps and pointers (to struct, string, int, etc) are accepted as the in value.
 //
-// Struct fields are only unmarshalled if they are exported (have an upper case
-// first letter), and are unmarshalled using the field name lowercased as the
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
 // default key. Custom keys may be defined via the "yaml" name in the field
 // tag: the content preceding the first comma is used as the key, and the
 // following comma-separated options are used to tweak the marshalling process.
@@ -125,7 +172,10 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) {
 //
 //     omitempty    Only include the field if it's not set to the zero
 //                  value for the type or to empty slices or maps.
-//                  Does not apply to zero valued structs.
+//                  Zero valued structs will be omitted if all their public
+//                  fields are zero, unless they implement an IsZero
+//                  method (see the IsZeroer interface type), in which
+//                  case the field will be included if that method returns true.
 //
 //     flow         Marshal using a flow style (useful for structs,
 //                  sequences and maps).
@@ -150,12 +200,47 @@ func Marshal(in interface{}) (out []byte, err error) {
 	defer handleErr(&err)
 	e := newEncoder()
 	defer e.destroy()
-	e.marshal("", reflect.ValueOf(in))
+	e.marshalDoc("", reflect.ValueOf(in))
 	e.finish()
 	out = e.out
 	return
 }
 
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+	encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{
+		encoder: newEncoderWithWriter(w),
+	}
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+	defer handleErr(&err)
+	e.encoder.marshalDoc("", reflect.ValueOf(v))
+	return nil
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+	defer handleErr(&err)
+	e.encoder.finish()
+	return nil
+}
+
 func handleErr(err *error) {
 	if v := recover(); v != nil {
 		if e, ok := v.(yamlError); ok {
@@ -211,6 +296,9 @@ type fieldInfo struct {
 	Num       int
 	OmitEmpty bool
 	Flow      bool
+	// Id holds the unique field identifier, so we can cheaply
+	// check for field duplicates without maintaining an extra map.
+	Id int
 
 	// Inline holds the field index if the field is part of an inlined struct.
 	Inline []int
@@ -290,6 +378,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
 					} else {
 						finfo.Inline = append([]int{i}, finfo.Inline...)
 					}
+					finfo.Id = len(fieldsList)
 					fieldsMap[finfo.Key] = finfo
 					fieldsList = append(fieldsList, finfo)
 				}
@@ -311,11 +400,16 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
 			return nil, errors.New(msg)
 		}
 
+		info.Id = len(fieldsList)
 		fieldsList = append(fieldsList, info)
 		fieldsMap[info.Key] = info
 	}
 
-	sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
+	sinfo = &structInfo{
+		FieldsMap:  fieldsMap,
+		FieldsList: fieldsList,
+		InlineMap:  inlineMap,
+	}
 
 	fieldMapMutex.Lock()
 	structMap[st] = sinfo
@@ -323,8 +417,23 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
 	return sinfo, nil
 }
 
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+	IsZero() bool
+}
+
 func isZero(v reflect.Value) bool {
-	switch v.Kind() {
+	kind := v.Kind()
+	if z, ok := v.Interface().(IsZeroer); ok {
+		if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+			return true
+		}
+		return z.IsZero()
+	}
+	switch kind {
 	case reflect.String:
 		return len(v.String()) == 0
 	case reflect.Interface, reflect.Ptr:
diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go
index 3caeca04..e25cee56 100644
--- a/vendor/gopkg.in/yaml.v2/yamlh.go
+++ b/vendor/gopkg.in/yaml.v2/yamlh.go
@@ -1,6 +1,7 @@
 package yaml
 
 import (
+	"fmt"
 	"io"
 )
 
@@ -239,6 +240,27 @@ const (
 	yaml_MAPPING_END_EVENT    // A MAPPING-END event.
 )
 
+var eventStrings = []string{
+	yaml_NO_EVENT:             "none",
+	yaml_STREAM_START_EVENT:   "stream start",
+	yaml_STREAM_END_EVENT:     "stream end",
+	yaml_DOCUMENT_START_EVENT: "document start",
+	yaml_DOCUMENT_END_EVENT:   "document end",
+	yaml_ALIAS_EVENT:          "alias",
+	yaml_SCALAR_EVENT:         "scalar",
+	yaml_SEQUENCE_START_EVENT: "sequence start",
+	yaml_SEQUENCE_END_EVENT:   "sequence end",
+	yaml_MAPPING_START_EVENT:  "mapping start",
+	yaml_MAPPING_END_EVENT:    "mapping end",
+}
+
+func (e yaml_event_type_t) String() string {
+	if e < 0 || int(e) >= len(eventStrings) {
+		return fmt.Sprintf("unknown event %d", e)
+	}
+	return eventStrings[e]
+}
+
 // The event structure.
 type yaml_event_t struct {
 
@@ -521,9 +543,9 @@ type yaml_parser_t struct {
 
 	read_handler yaml_read_handler_t // Read handler.
 
-	input_file io.Reader // File input data.
-	input      []byte    // String input data.
-	input_pos  int
+	input_reader io.Reader // File input data.
+	input        []byte    // String input data.
+	input_pos    int
 
 	eof bool // EOF flag
 
@@ -632,7 +654,7 @@ type yaml_emitter_t struct {
 	write_handler yaml_write_handler_t // Write handler.
 
 	output_buffer *[]byte   // String output data.
-	output_file   io.Writer // File output data.
+	output_writer io.Writer // File output data.
 
 	buffer     []byte // The working buffer.
 	buffer_pos int    // The current position of the buffer.
-- 
GitLab