diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index dcec5ffd09155b29c1fc253b793c6b40d2e280bd..4df4cf80991706277da5e5a4ff0d150dd2a0bf62 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -5,10 +5,19 @@
 		"./..."
 	],
 	"Deps": [
+		{
+			"ImportPath": "github.com/codegangsta/negroni",
+			"Comment": "v0.1-70-gc7477ad",
+			"Rev": "c7477ad8e330bef55bf1ebe300cf8aa67c492d1b"
+		},
 		{
 			"ImportPath": "github.com/davecgh/go-spew/spew",
 			"Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d"
 		},
+		{
+			"ImportPath": "github.com/dustin/go-broadcast",
+			"Rev": "3bdf6d4a7164a50bc19d5f230e2981d87d2584f1"
+		},
 		{
 			"ImportPath": "github.com/gin-gonic/gin",
 			"Comment": "v1.0rc1-219-g3d002e3",
@@ -27,12 +36,7 @@
 			"Rev": "234959944d9cf05229b02e8b386e5cffe1e4e04a"
 		},
 		{
-			"ImportPath": "github.com/inconshreveable/log15/stack",
-			"Comment": "v2.3-48-g210d6fd",
-			"Rev": "210d6fdc4d979ef6579778f1b6ed84571454abb4"
-		},
-		{
-			"ImportPath": "github.com/inconshreveable/log15/term",
+			"ImportPath": "github.com/inconshreveable/log15",
 			"Comment": "v2.3-48-g210d6fd",
 			"Rev": "210d6fdc4d979ef6579778f1b6ed84571454abb4"
 		},
@@ -40,6 +44,10 @@
 			"ImportPath": "github.com/manucorporat/sse",
 			"Rev": "ee05b128a739a0fb76c7ebd3ae4810c1de808d6d"
 		},
+		{
+			"ImportPath": "github.com/manucorporat/stats",
+			"Rev": "8f2d6ace262eba462e9beb552382c98be51d807b"
+		},
 		{
 			"ImportPath": "github.com/mattn/go-colorable",
 			"Rev": "4af63d73f5bea08b682ad2cd198b1e607afd6be0"
@@ -56,6 +64,16 @@
 			"ImportPath": "github.com/naoina/toml",
 			"Rev": "751171607256bb66e64c9f0220c00662420c38e9"
 		},
+		{
+			"ImportPath": "github.com/onsi/ginkgo",
+			"Comment": "v1.2.0-42-g07d85e6",
+			"Rev": "07d85e6b10c4289c7d612f9b13f45ba36f66d55b"
+		},
+		{
+			"ImportPath": "github.com/onsi/gomega",
+			"Comment": "v1.0-81-gad93e46",
+			"Rev": "ad93e463829d54602c66e94813bc1eb9b10d454c"
+		},
 		{
 			"ImportPath": "github.com/pmezard/go-difflib/difflib",
 			"Rev": "792786c7400a136282c1664665ae0a8db921c6c2"
@@ -147,79 +165,84 @@
 		},
 		{
 			"ImportPath": "github.com/tendermint/tendermint/blockchain",
-			"Comment": "0.2-130-g6a209ba",
-			"Rev": "6a209ba3a8c283fb2ffe78182675ad00be22c878"
+			"Comment": "0.2-134-gd31d3c5",
+			"Rev": "d31d3c58ad69474c455a6fddbf217cd766da61a2"
 		},
 		{
 			"ImportPath": "github.com/tendermint/tendermint/config/tendermint",
-			"Comment": "0.2-130-g6a209ba",
-			"Rev": "6a209ba3a8c283fb2ffe78182675ad00be22c878"
+			"Comment": "0.2-134-gd31d3c5",
+			"Rev": "d31d3c58ad69474c455a6fddbf217cd766da61a2"
 		},
 		{
 			"ImportPath": "github.com/tendermint/tendermint/config/tendermint_test",
-			"Comment": "0.2-130-g6a209ba",
-			"Rev": "6a209ba3a8c283fb2ffe78182675ad00be22c878"
+			"Comment": "0.2-134-gd31d3c5",
+			"Rev": "d31d3c58ad69474c455a6fddbf217cd766da61a2"
 		},
 		{
 			"ImportPath": "github.com/tendermint/tendermint/consensus",
-			"Comment": "0.2-130-g6a209ba",
-			"Rev": "6a209ba3a8c283fb2ffe78182675ad00be22c878"
+			"Comment": "0.2-134-gd31d3c5",
+			"Rev": "d31d3c58ad69474c455a6fddbf217cd766da61a2"
 		},
 		{
 			"ImportPath": "github.com/tendermint/tendermint/mempool",
-			"Comment": "0.2-130-g6a209ba",
-			"Rev": "6a209ba3a8c283fb2ffe78182675ad00be22c878"
+			"Comment": "0.2-134-gd31d3c5",
+			"Rev": "d31d3c58ad69474c455a6fddbf217cd766da61a2"
 		},
 		{
 			"ImportPath": "github.com/tendermint/tendermint/node",
-			"Comment": "0.2-130-g6a209ba",
-			"Rev": "6a209ba3a8c283fb2ffe78182675ad00be22c878"
+			"Comment": "0.2-134-gd31d3c5",
+			"Rev": "d31d3c58ad69474c455a6fddbf217cd766da61a2"
 		},
 		{
 			"ImportPath": "github.com/tendermint/tendermint/proxy",
-			"Comment": "0.2-130-g6a209ba",
-			"Rev": "6a209ba3a8c283fb2ffe78182675ad00be22c878"
+			"Comment": "0.2-134-gd31d3c5",
+			"Rev": "d31d3c58ad69474c455a6fddbf217cd766da61a2"
 		},
 		{
 			"ImportPath": "github.com/tendermint/tendermint/rpc/core",
-			"Comment": "0.2-130-g6a209ba",
-			"Rev": "6a209ba3a8c283fb2ffe78182675ad00be22c878"
+			"Comment": "0.2-134-gd31d3c5",
+			"Rev": "d31d3c58ad69474c455a6fddbf217cd766da61a2"
 		},
 		{
 			"ImportPath": "github.com/tendermint/tendermint/state",
-			"Comment": "0.2-130-g6a209ba",
-			"Rev": "6a209ba3a8c283fb2ffe78182675ad00be22c878"
+			"Comment": "0.2-134-gd31d3c5",
+			"Rev": "d31d3c58ad69474c455a6fddbf217cd766da61a2"
 		},
 		{
 			"ImportPath": "github.com/tendermint/tendermint/types",
-			"Comment": "0.2-130-g6a209ba",
-			"Rev": "6a209ba3a8c283fb2ffe78182675ad00be22c878"
+			"Comment": "0.2-134-gd31d3c5",
+			"Rev": "d31d3c58ad69474c455a6fddbf217cd766da61a2"
 		},
 		{
 			"ImportPath": "github.com/tendermint/tendermint/version",
-			"Comment": "0.2-130-g6a209ba",
-			"Rev": "6a209ba3a8c283fb2ffe78182675ad00be22c878"
+			"Comment": "0.2-134-gd31d3c5",
+			"Rev": "d31d3c58ad69474c455a6fddbf217cd766da61a2"
 		},
 		{
 			"ImportPath": "github.com/tendermint/tmsp/client",
-			"Rev": "089489cbf5541391c4244b96a0174ed36216fe95"
+			"Rev": "90e38f08f4d0edd307b23d463ace8299d01c8fad"
 		},
 		{
-			"ImportPath": "github.com/tendermint/tmsp/example/golang",
-			"Rev": "089489cbf5541391c4244b96a0174ed36216fe95"
+			"ImportPath": "github.com/tendermint/tmsp/example/dummy",
+			"Rev": "90e38f08f4d0edd307b23d463ace8299d01c8fad"
 		},
 		{
 			"ImportPath": "github.com/tendermint/tmsp/server",
-			"Rev": "089489cbf5541391c4244b96a0174ed36216fe95"
+			"Rev": "90e38f08f4d0edd307b23d463ace8299d01c8fad"
 		},
 		{
 			"ImportPath": "github.com/tendermint/tmsp/types",
-			"Rev": "089489cbf5541391c4244b96a0174ed36216fe95"
+			"Rev": "90e38f08f4d0edd307b23d463ace8299d01c8fad"
 		},
 		{
 			"ImportPath": "github.com/tommy351/gin-cors",
 			"Rev": "dc91dec6313ae4db53481bf3b29cf6b94bf80357"
 		},
+		{
+			"ImportPath": "github.com/tylerb/graceful",
+			"Comment": "v1.2.4-2-g7116c7a",
+			"Rev": "7116c7a8115899e80197cd9e0b97998c0f97ed8e"
+		},
 		{
 			"ImportPath": "golang.org/x/crypto/curve25519",
 			"Rev": "1f22c0103821b9390939b6776727195525381532"
diff --git a/tmsp/erisdb.go b/tmsp/erisdb.go
index bd0163f415a652e991f0def82eaa817f85372f0f..849b1d92a98b4d4c34139414c71ebffda064d446 100644
--- a/tmsp/erisdb.go
+++ b/tmsp/erisdb.go
@@ -130,8 +130,8 @@ func (app ErisDBApp) CheckTx(txBytes []byte) (code tmsp.CodeType, result []byte,
 		return tmsp.CodeType_EncodingError, nil, fmt.Sprintf("Encoding error: %v", err)
 	}
 
-	// we need the lock because CheckTx can run concurrently with GetHash,
-	// and GetHash refreshes the checkCache
+	// we need the lock because CheckTx can run concurrently with Commit,
+	// and Commit refreshes the checkCache
 	app.mtx.Lock()
 	defer app.mtx.Unlock()
 	err = sm.ExecTx(app.checkCache, *tx, false, nil)
@@ -143,8 +143,8 @@ func (app ErisDBApp) CheckTx(txBytes []byte) (code tmsp.CodeType, result []byte,
 }
 
 // Implements tmsp.Application
-// GetHash should commit the state (called at end of block)
-func (app *ErisDBApp) GetHash() (hash []byte, log string) {
+// Commit the state (called at end of block)
+func (app *ErisDBApp) Commit() (hash []byte, log string) {
 	// sync the AppendTx cache
 	app.cache.Sync()
 
diff --git a/vendor/github.com/codegangsta/negroni/LICENSE b/vendor/github.com/codegangsta/negroni/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..08b5e20ac47dd34cc4cbe2e9111cd4464540c568
--- /dev/null
+++ b/vendor/github.com/codegangsta/negroni/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Jeremy Saenz
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/codegangsta/negroni/README.md b/vendor/github.com/codegangsta/negroni/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9294d705519a6e5b96c1e3f63d7b5d11fcc8121e
--- /dev/null
+++ b/vendor/github.com/codegangsta/negroni/README.md
@@ -0,0 +1,181 @@
+# Negroni [![GoDoc](https://godoc.org/github.com/codegangsta/negroni?status.svg)](http://godoc.org/github.com/codegangsta/negroni) [![wercker status](https://app.wercker.com/status/13688a4a94b82d84a0b8d038c4965b61/s "wercker status")](https://app.wercker.com/project/bykey/13688a4a94b82d84a0b8d038c4965b61)
+
+Negroni is an idiomatic approach to web middleware in Go. It is tiny, non-intrusive, and encourages use of `net/http` Handlers.
+
+If you like the idea of [Martini](http://github.com/go-martini/martini), but you think it contains too much magic, then Negroni is a great fit.
+
+
+Language Translations:
+* [Português Brasileiro (pt_BR)](translations/README_pt_br.md)
+
+## Getting Started
+
+After installing Go and setting up your [GOPATH](http://golang.org/doc/code.html#GOPATH), create your first `.go` file. We'll call it `server.go`.
+
+~~~ go
+package main
+
+import (
+  "github.com/codegangsta/negroni"
+  "net/http"
+  "fmt"
+)
+
+func main() {
+  mux := http.NewServeMux()
+  mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
+    fmt.Fprintf(w, "Welcome to the home page!")
+  })
+
+  n := negroni.Classic()
+  n.UseHandler(mux)
+  n.Run(":3000")
+}
+~~~
+
+Then install the Negroni package (**go 1.1** and greater is required):
+~~~
+go get github.com/codegangsta/negroni
+~~~
+
+Then run your server:
+~~~
+go run server.go
+~~~
+
+You will now have a Go net/http webserver running on `localhost:3000`.
+
+## Need Help?
+If you have a question or feature request, [go ask the mailing list](https://groups.google.com/forum/#!forum/negroni-users). The GitHub issues for Negroni will be used exclusively for bug reports and pull requests.
+
+## Is Negroni a Framework?
+Negroni is **not** a framework. It is a library that is designed to work directly with net/http.
+
+## Routing?
+Negroni is BYOR (Bring your own Router). The Go community already has a number of great http routers available, Negroni tries to play well with all of them by fully supporting `net/http`. For instance, integrating with [Gorilla Mux](http://github.com/gorilla/mux) looks like so:
+
+~~~ go
+router := mux.NewRouter()
+router.HandleFunc("/", HomeHandler)
+
+n := negroni.New(Middleware1, Middleware2)
+// Or use a middleware with the Use() function
+n.Use(Middleware3)
+// router goes last
+n.UseHandler(router)
+
+n.Run(":3000")
+~~~
+
+## `negroni.Classic()`
+`negroni.Classic()` provides some default middleware that is useful for most applications:
+
+* `negroni.Recovery` - Panic Recovery Middleware.
+* `negroni.Logging` - Request/Response Logging Middleware.
+* `negroni.Static` - Static File serving under the "public" directory.
+
+This makes it really easy to get started with some useful features from Negroni.
+
+## Handlers
+Negroni provides a bidirectional middleware flow. This is done through the `negroni.Handler` interface:
+
+~~~ go
+type Handler interface {
+  ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc)
+}
+~~~
+
+If a middleware hasn't already written to the ResponseWriter, it should call the next `http.HandlerFunc` in the chain to yield to the next middleware handler. This can be used for great good:
+
+~~~ go
+func MyMiddleware(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
+  // do some stuff before
+  next(rw, r)
+  // do some stuff after
+}
+~~~
+
+And you can map it to the handler chain with the `Use` function:
+
+~~~ go
+n := negroni.New()
+n.Use(negroni.HandlerFunc(MyMiddleware))
+~~~
+
+You can also map plain old `http.Handler`s:
+
+~~~ go
+n := negroni.New()
+
+mux := http.NewServeMux()
+// map your routes
+
+n.UseHandler(mux)
+
+n.Run(":3000")
+~~~
+
+## `Run()`
+Negroni has a convenience function called `Run`. `Run` takes an addr string identical to [http.ListenAndServe](http://golang.org/pkg/net/http#ListenAndServe).
+
+~~~ go
+n := negroni.Classic()
+// ...
+log.Fatal(http.ListenAndServe(":8080", n))
+~~~
+
+## Route Specific Middleware
+If you have a route group of routes that need specific middleware to be executed, you can simply create a new Negroni instance and use it as your route handler.
+
+~~~ go
+router := mux.NewRouter()
+adminRoutes := mux.NewRouter()
+// add admin routes here
+
+// Create a new negroni for the admin middleware
+router.Handle("/admin", negroni.New(
+  Middleware1,
+  Middleware2,
+  negroni.Wrap(adminRoutes),
+))
+~~~
+
+## Third Party Middleware
+
+Here is a current list of Negroni compatible middlware. Feel free to put up a PR linking your middleware if you have built one:
+
+
+| Middleware | Author | Description |
+| -----------|--------|-------------|
+| [RestGate](https://github.com/pjebs/restgate) | [Prasanga Siripala](https://github.com/pjebs) | Secure authentication for REST API endpoints |
+| [Graceful](https://github.com/stretchr/graceful) | [Tyler Bunnell](https://github.com/tylerb) | Graceful HTTP Shutdown |
+| [secure](https://github.com/unrolled/secure) | [Cory Jacobsen](https://github.com/unrolled) | Middleware that implements a few quick security wins |
+| [JWT Middleware](https://github.com/auth0/go-jwt-middleware) | [Auth0](https://github.com/auth0) | Middleware checks for a JWT on the `Authorization` header on incoming requests and decodes it|
+| [binding](https://github.com/mholt/binding) | [Matt Holt](https://github.com/mholt) | Data binding from HTTP requests into structs |
+| [logrus](https://github.com/meatballhat/negroni-logrus) | [Dan Buch](https://github.com/meatballhat) | Logrus-based logger |
+| [render](https://github.com/unrolled/render) | [Cory Jacobsen](https://github.com/unrolled) | Render JSON, XML and HTML templates |
+| [gorelic](https://github.com/jingweno/negroni-gorelic) | [Jingwen Owen Ou](https://github.com/jingweno) | New Relic agent for Go runtime |
+| [gzip](https://github.com/phyber/negroni-gzip) | [phyber](https://github.com/phyber) | GZIP response compression |
+| [oauth2](https://github.com/goincremental/negroni-oauth2) | [David Bochenski](https://github.com/bochenski) | oAuth2 middleware |
+| [sessions](https://github.com/goincremental/negroni-sessions) | [David Bochenski](https://github.com/bochenski) | Session Management |
+| [permissions2](https://github.com/xyproto/permissions2) | [Alexander Rødseth](https://github.com/xyproto) | Cookies, users and permissions |
+| [onthefly](https://github.com/xyproto/onthefly) | [Alexander Rødseth](https://github.com/xyproto) | Generate TinySVG, HTML and CSS on the fly |
+| [cors](https://github.com/rs/cors) | [Olivier Poitrey](https://github.com/rs) | [Cross Origin Resource Sharing](http://www.w3.org/TR/cors/) (CORS) support |
+| [xrequestid](https://github.com/pilu/xrequestid) | [Andrea Franz](https://github.com/pilu) | Middleware that assigns a random X-Request-Id header to each request |
+| [VanGoH](https://github.com/auroratechnologies/vangoh) | [Taylor Wrobel](https://github.com/twrobel3) | Configurable [AWS-Style](http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) HMAC authentication middleware |
+| [stats](https://github.com/thoas/stats) | [Florent Messa](https://github.com/thoas) | Store information about your web application (response time, etc.) |
+
+## Examples
+[Alexander Rødseth](https://github.com/xyproto) created [mooseware](https://github.com/xyproto/mooseware), a skeleton for writing a Negroni middleware handler.
+
+## Live code reload?
+[gin](https://github.com/codegangsta/gin) and [fresh](https://github.com/pilu/fresh) both live reload negroni apps.
+
+## Essential Reading for Beginners of Go & Negroni
+
+* [Using a Context to pass information from middleware to end handler](http://elithrar.github.io/article/map-string-interface/)
+* [Understanding middleware](http://mattstauffer.co/blog/laravel-5.0-middleware-replacing-filters)
+
+## About
+
+Negroni is obsessively designed by none other than the [Code Gangsta](http://codegangsta.io/)
diff --git a/vendor/github.com/codegangsta/negroni/doc.go b/vendor/github.com/codegangsta/negroni/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..24d6572c2fea573011996fc5390d0cb3a26b6ca8
--- /dev/null
+++ b/vendor/github.com/codegangsta/negroni/doc.go
@@ -0,0 +1,25 @@
+// Package negroni is an idiomatic approach to web middleware in Go. It is tiny, non-intrusive, and encourages use of net/http Handlers.
+//
+// If you like the idea of Martini, but you think it contains too much magic, then Negroni is a great fit.
+//
+// For a full guide visit http://github.com/codegangsta/negroni
+//
+//  package main
+//
+//  import (
+//    "github.com/codegangsta/negroni"
+//    "net/http"
+//    "fmt"
+//  )
+//
+//  func main() {
+//    mux := http.NewServeMux()
+//    mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
+//      fmt.Fprintf(w, "Welcome to the home page!")
+//    })
+//
+//    n := negroni.Classic()
+//    n.UseHandler(mux)
+//    n.Run(":3000")
+//  }
+package negroni
diff --git a/vendor/github.com/codegangsta/negroni/logger.go b/vendor/github.com/codegangsta/negroni/logger.go
new file mode 100644
index 0000000000000000000000000000000000000000..e3828ef319f5c266bfcd2c4eb795631c33963992
--- /dev/null
+++ b/vendor/github.com/codegangsta/negroni/logger.go
@@ -0,0 +1,29 @@
+package negroni
+
+import (
+	"log"
+	"net/http"
+	"os"
+	"time"
+)
+
+// Logger is a middleware handler that logs the request as it goes in and the response as it goes out.
+type Logger struct {
+	// Logger inherits from log.Logger used to log messages with the Logger middleware
+	*log.Logger
+}
+
+// NewLogger returns a new Logger instance
+func NewLogger() *Logger {
+	return &Logger{log.New(os.Stdout, "[negroni] ", 0)}
+}
+
+func (l *Logger) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
+	start := time.Now()
+	l.Printf("Started %s %s", r.Method, r.URL.Path)
+
+	next(rw, r)
+
+	res := rw.(ResponseWriter)
+	l.Printf("Completed %v %s in %v", res.Status(), http.StatusText(res.Status()), time.Since(start))
+}
diff --git a/vendor/github.com/codegangsta/negroni/negroni.go b/vendor/github.com/codegangsta/negroni/negroni.go
new file mode 100644
index 0000000000000000000000000000000000000000..57d15eb7974f5dcb32f7bf9ebc4bdbdacb06449f
--- /dev/null
+++ b/vendor/github.com/codegangsta/negroni/negroni.go
@@ -0,0 +1,129 @@
+package negroni
+
+import (
+	"log"
+	"net/http"
+	"os"
+)
+
+// Handler handler is an interface that objects can implement to be registered to serve as middleware
+// in the Negroni middleware stack.
+// ServeHTTP should yield to the next middleware in the chain by invoking the next http.HandlerFunc
+// passed in.
+//
+// If the Handler writes to the ResponseWriter, the next http.HandlerFunc should not be invoked.
+type Handler interface {
+	ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc)
+}
+
+// HandlerFunc is an adapter to allow the use of ordinary functions as Negroni handlers.
+// If f is a function with the appropriate signature, HandlerFunc(f) is a Handler object that calls f.
+type HandlerFunc func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc)
+
+func (h HandlerFunc) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
+	h(rw, r, next)
+}
+
+type middleware struct {
+	handler Handler
+	next    *middleware
+}
+
+func (m middleware) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
+	m.handler.ServeHTTP(rw, r, m.next.ServeHTTP)
+}
+
+// Wrap converts a http.Handler into a negroni.Handler so it can be used as a Negroni
+// middleware. The next http.HandlerFunc is automatically called after the Handler
+// is executed.
+func Wrap(handler http.Handler) Handler {
+	return HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
+		handler.ServeHTTP(rw, r)
+		next(rw, r)
+	})
+}
+
+// Negroni is a stack of Middleware Handlers that can be invoked as an http.Handler.
+// Negroni middleware is evaluated in the order that they are added to the stack using
+// the Use and UseHandler methods.
+type Negroni struct {
+	middleware middleware
+	handlers   []Handler
+}
+
+// New returns a new Negroni instance with no middleware preconfigured.
+func New(handlers ...Handler) *Negroni {
+	return &Negroni{
+		handlers:   handlers,
+		middleware: build(handlers),
+	}
+}
+
+// Classic returns a new Negroni instance with the default middleware already
+// in the stack.
+//
+// Recovery - Panic Recovery Middleware
+// Logger - Request/Response Logging
+// Static - Static File Serving
+func Classic() *Negroni {
+	return New(NewRecovery(), NewLogger(), NewStatic(http.Dir("public")))
+}
+
+func (n *Negroni) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
+	n.middleware.ServeHTTP(NewResponseWriter(rw), r)
+}
+
+// Use adds a Handler onto the middleware stack. Handlers are invoked in the order they are added to a Negroni.
+func (n *Negroni) Use(handler Handler) {
+	n.handlers = append(n.handlers, handler)
+	n.middleware = build(n.handlers)
+}
+
+// UseFunc adds a Negroni-style handler function onto the middleware stack.
+func (n *Negroni) UseFunc(handlerFunc func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc)) {
+	n.Use(HandlerFunc(handlerFunc))
+}
+
+// UseHandler adds a http.Handler onto the middleware stack. Handlers are invoked in the order they are added to a Negroni.
+func (n *Negroni) UseHandler(handler http.Handler) {
+	n.Use(Wrap(handler))
+}
+
+// UseHandler adds a http.HandlerFunc-style handler function onto the middleware stack.
+func (n *Negroni) UseHandlerFunc(handlerFunc func(rw http.ResponseWriter, r *http.Request)) {
+	n.UseHandler(http.HandlerFunc(handlerFunc))
+}
+
+// Run is a convenience function that runs the negroni stack as an HTTP
+// server. The addr string takes the same format as http.ListenAndServe.
+func (n *Negroni) Run(addr string) {
+	l := log.New(os.Stdout, "[negroni] ", 0)
+	l.Printf("listening on %s", addr)
+	l.Fatal(http.ListenAndServe(addr, n))
+}
+
+// Returns a list of all the handlers in the current Negroni middleware chain.
+func (n *Negroni) Handlers() []Handler {
+	return n.handlers
+}
+
+func build(handlers []Handler) middleware {
+	var next middleware
+
+	if len(handlers) == 0 {
+		return voidMiddleware()
+	} else if len(handlers) > 1 {
+		next = build(handlers[1:])
+	} else {
+		next = voidMiddleware()
+	}
+
+	return middleware{handlers[0], &next}
+}
+
+func voidMiddleware() middleware {
+	return middleware{
+		HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {}),
+		&middleware{},
+	}
+}
diff --git a/vendor/github.com/codegangsta/negroni/recovery.go b/vendor/github.com/codegangsta/negroni/recovery.go
new file mode 100644
index 0000000000000000000000000000000000000000..d790cade652ac21d6f69a20b95fb4087f4aed6ba
--- /dev/null
+++ b/vendor/github.com/codegangsta/negroni/recovery.go
@@ -0,0 +1,46 @@
+package negroni
+
+import (
+	"fmt"
+	"log"
+	"net/http"
+	"os"
+	"runtime"
+)
+
+// Recovery is a Negroni middleware that recovers from any panics and writes a 500 if there was one.
+type Recovery struct {
+	Logger     *log.Logger
+	PrintStack bool
+	StackAll   bool
+	StackSize  int
+}
+
+// NewRecovery returns a new instance of Recovery
+func NewRecovery() *Recovery {
+	return &Recovery{
+		Logger:     log.New(os.Stdout, "[negroni] ", 0),
+		PrintStack: true,
+		StackAll:   false,
+		StackSize:  1024 * 8,
+	}
+}
+
+func (rec *Recovery) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
+	defer func() {
+		if err := recover(); err != nil {
+			rw.WriteHeader(http.StatusInternalServerError)
+			stack := make([]byte, rec.StackSize)
+			stack = stack[:runtime.Stack(stack, rec.StackAll)]
+
+			f := "PANIC: %s\n%s"
+			rec.Logger.Printf(f, err, stack)
+
+			if rec.PrintStack {
+				fmt.Fprintf(rw, f, err, stack)
+			}
+		}
+	}()
+
+	next(rw, r)
+}
diff --git a/vendor/github.com/codegangsta/negroni/response_writer.go b/vendor/github.com/codegangsta/negroni/response_writer.go
new file mode 100644
index 0000000000000000000000000000000000000000..ea86a2657d074022314a0254e0705815c349b603
--- /dev/null
+++ b/vendor/github.com/codegangsta/negroni/response_writer.go
@@ -0,0 +1,96 @@
+package negroni
+
+import (
+	"bufio"
+	"fmt"
+	"net"
+	"net/http"
+)
+
+// ResponseWriter is a wrapper around http.ResponseWriter that provides extra information about
+// the response. It is recommended that middleware handlers use this construct to wrap a responsewriter
+// if the functionality calls for it.
+type ResponseWriter interface {
+	http.ResponseWriter
+	http.Flusher
+	// Status returns the status code of the response or 0 if the response has not been written.
+	Status() int
+	// Written returns whether or not the ResponseWriter has been written.
+	Written() bool
+	// Size returns the size of the response body.
+	Size() int
+	// Before allows for a function to be called before the ResponseWriter has been written to. This is
+	// useful for setting headers or any other operations that must happen before a response has been written.
+	Before(func(ResponseWriter))
+}
+
+type beforeFunc func(ResponseWriter)
+
+// NewResponseWriter creates a ResponseWriter that wraps an http.ResponseWriter
+func NewResponseWriter(rw http.ResponseWriter) ResponseWriter {
+	return &responseWriter{rw, 0, 0, nil}
+}
+
+type responseWriter struct {
+	http.ResponseWriter
+	status      int
+	size        int
+	beforeFuncs []beforeFunc
+}
+
+func (rw *responseWriter) WriteHeader(s int) {
+	rw.status = s
+	rw.callBefore()
+	rw.ResponseWriter.WriteHeader(s)
+}
+
+func (rw *responseWriter) Write(b []byte) (int, error) {
+	if !rw.Written() {
+		// The status will be StatusOK if WriteHeader has not been called yet
+		rw.WriteHeader(http.StatusOK)
+	}
+	size, err := rw.ResponseWriter.Write(b)
+	rw.size += size
+	return size, err
+}
+
+func (rw *responseWriter) Status() int {
+	return rw.status
+}
+
+func (rw *responseWriter) Size() int {
+	return rw.size
+}
+
+func (rw *responseWriter) Written() bool {
+	return rw.status != 0
+}
+
+func (rw *responseWriter) Before(before func(ResponseWriter)) {
+	rw.beforeFuncs = append(rw.beforeFuncs, before)
+}
+
+func (rw *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+	hijacker, ok := rw.ResponseWriter.(http.Hijacker)
+	if !ok {
+		return nil, nil, fmt.Errorf("the ResponseWriter doesn't support the Hijacker interface")
+	}
+	return hijacker.Hijack()
+}
+
+func (rw *responseWriter) CloseNotify() <-chan bool {
+	return rw.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+func (rw *responseWriter) callBefore() {
+	for i := len(rw.beforeFuncs) - 1; i >= 0; i-- {
+		rw.beforeFuncs[i](rw)
+	}
+}
+
+func (rw *responseWriter) Flush() {
+	flusher, ok := rw.ResponseWriter.(http.Flusher)
+	if ok {
+		flusher.Flush()
+	}
+}
diff --git a/vendor/github.com/codegangsta/negroni/static.go b/vendor/github.com/codegangsta/negroni/static.go
new file mode 100644
index 0000000000000000000000000000000000000000..c5af4e6882f245eeb083993c185f0863fb21102c
--- /dev/null
+++ b/vendor/github.com/codegangsta/negroni/static.go
@@ -0,0 +1,84 @@
+package negroni
+
+import (
+	"net/http"
+	"path"
+	"strings"
+)
+
+// Static is a middleware handler that serves static files in the given directory/filesystem.
+type Static struct {
+	// Dir is the directory to serve static files from
+	Dir http.FileSystem
+	// Prefix is the optional prefix used to serve the static directory content
+	Prefix string
+	// IndexFile defines which file to serve as index if it exists.
+	IndexFile string
+}
+
+// NewStatic returns a new instance of Static
+func NewStatic(directory http.FileSystem) *Static {
+	return &Static{
+		Dir:       directory,
+		Prefix:    "",
+		IndexFile: "index.html",
+	}
+}
+
+func (s *Static) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
+	if r.Method != "GET" && r.Method != "HEAD" {
+		next(rw, r)
+		return
+	}
+	file := r.URL.Path
+	// if we have a prefix, filter requests by stripping the prefix
+	if s.Prefix != "" {
+		if !strings.HasPrefix(file, s.Prefix) {
+			next(rw, r)
+			return
+		}
+		file = file[len(s.Prefix):]
+		if file != "" && file[0] != '/' {
+			next(rw, r)
+			return
+		}
+	}
+	f, err := s.Dir.Open(file)
+	if err != nil {
+		// discard the error?
+		next(rw, r)
+		return
+	}
+	defer f.Close()
+
+	fi, err := f.Stat()
+	if err != nil {
+		next(rw, r)
+		return
+	}
+
+	// try to serve index file
+	if fi.IsDir() {
+		// redirect if missing trailing slash
+		if !strings.HasSuffix(r.URL.Path, "/") {
+			http.Redirect(rw, r, r.URL.Path+"/", http.StatusFound)
+			return
+		}
+
+		file = path.Join(file, s.IndexFile)
+		f, err = s.Dir.Open(file)
+		if err != nil {
+			next(rw, r)
+			return
+		}
+		defer f.Close()
+
+		fi, err = f.Stat()
+		if err != nil || fi.IsDir() {
+			next(rw, r)
+			return
+		}
+	}
+
+	http.ServeContent(rw, r, file, fi.ModTime(), f)
+}
diff --git a/vendor/github.com/codegangsta/negroni/translations/README_pt_br.md b/vendor/github.com/codegangsta/negroni/translations/README_pt_br.md
new file mode 100644
index 0000000000000000000000000000000000000000..d5b02fa3061e17a16bf81699c3b4feb5c68bcc1c
--- /dev/null
+++ b/vendor/github.com/codegangsta/negroni/translations/README_pt_br.md
@@ -0,0 +1,170 @@
+# Negroni [![GoDoc](https://godoc.org/github.com/codegangsta/negroni?status.svg)](http://godoc.org/github.com/codegangsta/negroni) [![wercker status](https://app.wercker.com/status/13688a4a94b82d84a0b8d038c4965b61/s "wercker status")](https://app.wercker.com/project/bykey/13688a4a94b82d84a0b8d038c4965b61)
+
+Negroni é uma abordagem idiomática para middleware web em Go. É pequeno, não intrusivo, e incentiva uso da biblioteca `net/http`.
+
+Se gosta da idéia do [Martini](http://github.com/go-martini/martini), mas acha que contém muita mágica, então Negroni é ideal.
+
+## Começando
+
+Depois de instalar Go e definir seu [GOPATH](http://golang.org/doc/code.html#GOPATH), criar seu primeirto arquivo `.go`. Iremos chamá-lo `server.go`.
+
+~~~ go
+package main
+
+import (
+  "github.com/codegangsta/negroni"
+  "net/http"
+  "fmt"
+)
+
+func main() {
+  mux := http.NewServeMux()
+  mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
+    fmt.Fprintf(w, "Welcome to the home page!")
+  })
+
+  n := negroni.Classic()
+  n.UseHandler(mux)
+  n.Run(":3000")
+}
+~~~
+
+Depois instale o pacote Negroni (**go 1.1** ou superior)
+~~~
+go get github.com/codegangsta/negroni
+~~~
+
+Depois execute seu servidor:
+~~~
+go run server.go
+~~~
+
+Agora terá um servidor web Go net/http rodando em `localhost:3000`.
+
+## Precisa de Ajuda?
+Se você tem uma pergunta ou pedido de recurso,[go ask the mailing list](https://groups.google.com/forum/#!forum/negroni-users). O Github issues para o Negroni será usado exclusivamente para Reportar bugs e pull requests.
+
+## Negroni é um Framework?
+Negroni **não** é a framework. É uma biblioteca que é desenhada para trabalhar diretamente com net/http.
+
+## Roteamento?
+Negroni é TSPR(Traga seu próprio Roteamento). A comunidade Go já tem um grande número de roteadores http disponíveis, Negroni tenta rodar bem com todos eles pelo suporte total `net/http`/ Por exemplo, a integração com [Gorilla Mux](http://github.com/gorilla/mux) se parece com isso:
+
+~~~ go
+router := mux.NewRouter()
+router.HandleFunc("/", HomeHandler)
+
+n := negroni.New(Middleware1, Middleware2)
+// Or use a middleware with the Use() function
+n.Use(Middleware3)
+// router goes last
+n.UseHandler(router)
+
+n.Run(":3000")
+~~~
+
+## `negroni.Classic()`
+`negroni.Classic()`  fornece alguns middlewares padrão que são úteis para maioria das aplicações:
+
+* `negroni.Recovery` - Panic Recovery Middleware.
+* `negroni.Logging` - Request/Response Logging Middleware.
+* `negroni.Static` - Static File serving under the "public" directory.
+
+Isso torna muito fácil começar com alguns recursos úteis do Negroni.
+
+## Handlers
+Negroni fornece um middleware de fluxo bidirecional. Isso é feito através da interface `negroni.Handler`:
+
+~~~ go
+type Handler interface {
+  ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc)
+}
+~~~
+
+Se um middleware não tenha escrito o ResponseWriter, ele deve chamar a próxima `http.HandlerFunc` na cadeia para produzir o próximo handler middleware. Isso pode ser usado muito bem:
+
+~~~ go
+func MyMiddleware(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
+  // do some stuff before
+  next(rw, r)
+  // do some stuff after
+}
+~~~
+
+E pode mapear isso para a cadeia de handler com a função `Use`:
+
+~~~ go
+n := negroni.New()
+n.Use(negroni.HandlerFunc(MyMiddleware))
+~~~
+
+Você também pode mapear `http.Handler` antigos:
+
+~~~ go
+n := negroni.New()
+
+mux := http.NewServeMux()
+// map your routes
+
+n.UseHandler(mux)
+
+n.Run(":3000")
+~~~
+
+## `Run()`
+Negroni tem uma função de conveniência chamada `Run`. `Run` pega um endereço de string idêntico para [http.ListenAndServe](http://golang.org/pkg/net/http#ListenAndServe).
+
+~~~ go
+n := negroni.Classic()
+// ...
+log.Fatal(http.ListenAndServe(":8080", n))
+~~~
+
+## Middleware para Rotas Específicas
+Se você tem um grupo de rota com rotas que precisam ser executadas por um middleware específico, pode simplesmente criar uma nova instância de Negroni e usar no seu Manipulador de rota.
+
+~~~ go
+router := mux.NewRouter()
+adminRoutes := mux.NewRouter()
+// add admin routes here
+
+// Criar um middleware negroni para admin
+router.Handle("/admin", negroni.New(
+  Middleware1,
+  Middleware2,
+  negroni.Wrap(adminRoutes),
+))
+~~~
+
+## Middleware de Terceiros
+
+Aqui está uma lista atual de Middleware Compatíveis com Negroni. Sinta se livre para mandar um PR vinculando seu middleware se construiu um:
+
+
+| Middleware | Autor | Descrição |
+| -----------|--------|-------------|
+| [Graceful](https://github.com/stretchr/graceful) | [Tyler Bunnell](https://github.com/tylerb) | Graceful HTTP Shutdown |
+| [secure](https://github.com/unrolled/secure) | [Cory Jacobsen](https://github.com/unrolled) |  Implementa rapidamente itens de segurança.|
+| [binding](https://github.com/mholt/binding) | [Matt Holt](https://github.com/mholt) | Handler para mapeamento/validação de um request a estrutura. |
+| [logrus](https://github.com/meatballhat/negroni-logrus) | [Dan Buch](https://github.com/meatballhat) | Logrus-based logger |
+| [render](https://github.com/unrolled/render) | [Cory Jacobsen](https://github.com/unrolled) | Pacote para renderizar JSON, XML, e templates HTML. |
+| [gorelic](https://github.com/jingweno/negroni-gorelic) | [Jingwen Owen Ou](https://github.com/jingweno) | New Relic agent for Go runtime |
+| [gzip](https://github.com/phyber/negroni-gzip) | [phyber](https://github.com/phyber) | Handler para adicionar compreção gzip para as requisições |
+| [oauth2](https://github.com/goincremental/negroni-oauth2) | [David Bochenski](https://github.com/bochenski) | Handler que prove sistema de login OAuth 2.0 para aplicações Martini. Google Sign-in, Facebook Connect e Github login são suportados. |
+| [sessions](https://github.com/goincremental/negroni-sessions) | [David Bochenski](https://github.com/bochenski) | Handler que provê o serviço de sessão. |
+| [permissions](https://github.com/xyproto/permissions) | [Alexander Rødseth](https://github.com/xyproto) | Cookies, usuários e permissões. |
+| [onthefly](https://github.com/xyproto/onthefly) | [Alexander Rødseth](https://github.com/xyproto) | Pacote para gerar TinySVG, HTML e CSS em tempo real. |
+
+## Exemplos
+[Alexander Rødseth](https://github.com/xyproto) criou [mooseware](https://github.com/xyproto/mooseware), uma estrutura para escrever um handler middleware Negroni.
+
+## Servidor com autoreload?
+[gin](https://github.com/codegangsta/gin) e [fresh](https://github.com/pilu/fresh) são aplicativos para autoreload do Negroni.
+
+## Leitura Essencial para Iniciantes em Go & Negroni
+* [Usando um contexto para passar informação de um middleware para o manipulador final](http://elithrar.github.io/article/map-string-interface/)
+* [Entendendo middleware](http://mattstauffer.co/blog/laravel-5.0-middleware-replacing-filters)
+
+
+## Sobre
+Negroni é obsessivamente desenhado por ninguém menos que  [Code Gangsta](http://codegangsta.io/)
diff --git a/vendor/github.com/dustin/go-broadcast/.gitignore b/vendor/github.com/dustin/go-broadcast/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..4e8e42fe0725dbb42a8b9f49b68f1a881f62d814
--- /dev/null
+++ b/vendor/github.com/dustin/go-broadcast/.gitignore
@@ -0,0 +1,2 @@
+#*
+*~
diff --git a/vendor/github.com/dustin/go-broadcast/LICENSE b/vendor/github.com/dustin/go-broadcast/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..b01ef80261ec9c64376a392ce27635ee230715bb
--- /dev/null
+++ b/vendor/github.com/dustin/go-broadcast/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2013 Dustin Sallings
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/dustin/go-broadcast/README.markdown b/vendor/github.com/dustin/go-broadcast/README.markdown
new file mode 100644
index 0000000000000000000000000000000000000000..863ae2714d992aebb48402f343d5145834695e40
--- /dev/null
+++ b/vendor/github.com/dustin/go-broadcast/README.markdown
@@ -0,0 +1,5 @@
+pubsubbing channels.
+
+This project primarily exists because I've been copying and pasting
+the exact same two files into numerous projects.  It does work well,
+though.
diff --git a/vendor/github.com/dustin/go-broadcast/broadcaster.go b/vendor/github.com/dustin/go-broadcast/broadcaster.go
new file mode 100644
index 0000000000000000000000000000000000000000..9c113f59688522738c0ac8974dbd47c0e00b530b
--- /dev/null
+++ b/vendor/github.com/dustin/go-broadcast/broadcaster.go
@@ -0,0 +1,86 @@
+/*
+Package broadcast provides pubsub of messages over channels.
+
+A provider has a Broadcaster into which it Submits messages and into
+which subscribers Register to pick up those messages.
+
+*/
+package broadcast
+
+type broadcaster struct {
+	input chan interface{}
+	reg   chan chan<- interface{}
+	unreg chan chan<- interface{}
+
+	outputs map[chan<- interface{}]bool
+}
+
+// The Broadcaster interface describes the main entry points to
+// broadcasters.
+type Broadcaster interface {
+	// Register a new channel to receive broadcasts
+	Register(chan<- interface{})
+	// Unregister a channel so that it no longer receives broadcasts.
+	Unregister(chan<- interface{})
+	// Shut this broadcaster down.
+	Close() error
+	// Submit a new object to all subscribers
+	Submit(interface{})
+}
+
+func (b *broadcaster) broadcast(m interface{}) {
+	for ch := range b.outputs {
+		ch <- m
+	}
+}
+
+func (b *broadcaster) run() {
+	for {
+		select {
+		case m := <-b.input:
+			b.broadcast(m)
+		case ch, ok := <-b.reg:
+			if ok {
+				b.outputs[ch] = true
+			} else {
+				return
+			}
+		case ch := <-b.unreg:
+			delete(b.outputs, ch)
+		}
+	}
+}
+
+// NewBroadcaster creates a new broadcaster with the given input
+// channel buffer length.
+func NewBroadcaster(buflen int) Broadcaster {
+	b := &broadcaster{
+		input:   make(chan interface{}, buflen),
+		reg:     make(chan chan<- interface{}),
+		unreg:   make(chan chan<- interface{}),
+		outputs: make(map[chan<- interface{}]bool),
+	}
+
+	go b.run()
+
+	return b
+}
+
+func (b *broadcaster) Register(newch chan<- interface{}) {
+	b.reg <- newch
+}
+
+func (b *broadcaster) Unregister(newch chan<- interface{}) {
+	b.unreg <- newch
+}
+
+func (b *broadcaster) Close() error {
+	close(b.reg)
+	return nil
+}
+
+func (b *broadcaster) Submit(m interface{}) {
+	if b != nil {
+		b.input <- m
+	}
+}
diff --git a/vendor/github.com/dustin/go-broadcast/mux_observer.go b/vendor/github.com/dustin/go-broadcast/mux_observer.go
new file mode 100644
index 0000000000000000000000000000000000000000..38d0dcb187495afb3694ba2345a7bfd1e656ccb6
--- /dev/null
+++ b/vendor/github.com/dustin/go-broadcast/mux_observer.go
@@ -0,0 +1,133 @@
+package broadcast
+
+type taggedObservation struct {
+	sub *subObserver
+	ob  interface{}
+}
+
+const (
+	register = iota
+	unregister
+	purge
+)
+
+type taggedRegReq struct {
+	sub     *subObserver
+	ch      chan<- interface{}
+	regType int
+}
+
+// A MuxObserver multiplexes several streams of observations onto a
+// single delivery goroutine.
+type MuxObserver struct {
+	subs  map[*subObserver]map[chan<- interface{}]bool
+	reg   chan taggedRegReq
+	input chan taggedObservation
+}
+
+// NewMuxObserver constructs  a new MuxObserver.
+//
+// qlen is the size of the channel buffer for observations sent into
+// the mux observer and reglen is the size of the channel buffer for
+// registration/unregistration events.
+func NewMuxObserver(qlen, reglen int) *MuxObserver {
+	rv := &MuxObserver{
+		subs:  map[*subObserver]map[chan<- interface{}]bool{},
+		reg:   make(chan taggedRegReq, reglen),
+		input: make(chan taggedObservation, qlen),
+	}
+	go rv.run()
+	return rv
+}
+
+// Close shuts down this mux observer.
+func (m *MuxObserver) Close() error {
+	close(m.reg)
+	return nil
+}
+
+func (m *MuxObserver) broadcast(to taggedObservation) {
+	for ch := range m.subs[to.sub] {
+		ch <- to.ob
+	}
+}
+
+func (m *MuxObserver) doReg(tr taggedRegReq) {
+	mm, exists := m.subs[tr.sub]
+	if !exists {
+		mm = map[chan<- interface{}]bool{}
+		m.subs[tr.sub] = mm
+	}
+	mm[tr.ch] = true
+}
+
+func (m *MuxObserver) doUnreg(tr taggedRegReq) {
+	mm, exists := m.subs[tr.sub]
+	if exists {
+		delete(mm, tr.ch)
+		if len(mm) == 0 {
+			delete(m.subs, tr.sub)
+		}
+	}
+}
+
+func (m *MuxObserver) handleReg(tr taggedRegReq) {
+	switch tr.regType {
+	case register:
+		m.doReg(tr)
+	case unregister:
+		m.doUnreg(tr)
+	case purge:
+		delete(m.subs, tr.sub)
+	}
+}
+
+func (m *MuxObserver) run() {
+	for {
+		select {
+		case tr, ok := <-m.reg:
+			if ok {
+				m.handleReg(tr)
+			} else {
+				return
+			}
+		default:
+			select {
+			case to := <-m.input:
+				m.broadcast(to)
+			case tr, ok := <-m.reg:
+				if ok {
+					m.handleReg(tr)
+				} else {
+					return
+				}
+			}
+		}
+	}
+}
+
+// Sub creates a new sub-broadcaster from this MuxObserver.
+func (m *MuxObserver) Sub() Broadcaster {
+	return &subObserver{m}
+}
+
+type subObserver struct {
+	mo *MuxObserver
+}
+
+func (s *subObserver) Register(ch chan<- interface{}) {
+	s.mo.reg <- taggedRegReq{s, ch, register}
+}
+
+func (s *subObserver) Unregister(ch chan<- interface{}) {
+	s.mo.reg <- taggedRegReq{s, ch, unregister}
+}
+
+func (s *subObserver) Close() error {
+	s.mo.reg <- taggedRegReq{s, nil, purge}
+	return nil
+}
+
+func (s *subObserver) Submit(ob interface{}) {
+	s.mo.input <- taggedObservation{s, ob}
+}
diff --git a/vendor/github.com/inconshreveable/log15/.travis.yml b/vendor/github.com/inconshreveable/log15/.travis.yml
new file mode 100644
index 0000000000000000000000000000000000000000..56678f3b51b8e135524abeae961f8a3324ad1dfc
--- /dev/null
+++ b/vendor/github.com/inconshreveable/log15/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+
+go:
+  - 1.0
+  - 1.1
+  - 1.2
+  - 1.3
+  - release
+  - tip
diff --git a/vendor/github.com/inconshreveable/log15/CONTRIBUTORS b/vendor/github.com/inconshreveable/log15/CONTRIBUTORS
new file mode 100644
index 0000000000000000000000000000000000000000..a0866713be09a01e96c2597c6158f96135000fbb
--- /dev/null
+++ b/vendor/github.com/inconshreveable/log15/CONTRIBUTORS
@@ -0,0 +1,11 @@
+Contributors to log15:
+
+- Aaron L 
+- Alan Shreve 
+- Chris Hines 
+- Ciaran Downey 
+- Dmitry Chestnykh 
+- Evan Shaw 
+- Péter Szilágyi 
+- Trevor Gattis 
+- Vincent Vanackere 
diff --git a/vendor/github.com/inconshreveable/log15/README.md b/vendor/github.com/inconshreveable/log15/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..3079139af4d2d2285b3a0e882c5eb48b25b7c892
--- /dev/null
+++ b/vendor/github.com/inconshreveable/log15/README.md
@@ -0,0 +1,60 @@
+![obligatory xkcd](http://imgs.xkcd.com/comics/standards.png)
+
+# log15 [![godoc reference](https://godoc.org/gopkg.in/inconshreveable/log15.v2?status.png)](https://godoc.org/gopkg.in/inconshreveable/log15.v2)
+
+Package log15 provides an opinionated, simple toolkit for best-practice logging in Go (golang) that is both human and machine readable. It is modeled after the Go standard library's [`io`](http://golang.org/pkg/io/) and [`net/http`](http://golang.org/pkg/net/http/) packages and is an alternative to the standard library's [`log`](http://golang.org/pkg/log/) package. 
+
+## Features
+- A simple, easy-to-understand API
+- Promotes structured logging by encouraging use of key/value pairs
+- Child loggers which inherit and add their own private context
+- Lazy evaluation of expensive operations
+- Simple Handler interface allowing for construction of flexible, custom logging configurations with a tiny API.
+- Color terminal support
+- Built-in support for logging to files, streams, syslog, and the network
+- Support for forking records to multiple handlers, buffering records for output, failing over from failed handler writes, + more
+
+## Versioning
+The API of the master branch of log15 should always be considered unstable. Using a stable version
+of the log15 package is supported by gopkg.in. Include your dependency like so:
+
+```go
+import log "gopkg.in/inconshreveable/log15.v2"
+```
+
+## Examples
+
+```go
+// all loggers can have key/value context
+srvlog := log.New("module", "app/server")
+
+// all log messages can have key/value context 
+srvlog.Warn("abnormal conn rate", "rate", curRate, "low", lowRate, "high", highRate)
+
+// child loggers with inherited context
+connlog := srvlog.New("raddr", c.RemoteAddr())
+connlog.Info("connection open")
+
+// lazy evaluation
+connlog.Debug("ping remote", "latency", log.Lazy(pingRemote))
+
+// flexible configuration
+srvlog.SetHandler(log.MultiHandler(
+    log.StreamHandler(os.Stderr, log.LogfmtFormat()),
+    log.LvlFilterHandler(
+        log.LvlError,
+        log.Must.FileHandler("errors.json", log.JsonFormat())))
+```
+
+## FAQ
+
+### The varargs style is brittle and error prone! Can I have type safety please?
+Yes. Use `log.Ctx`:
+
+```go
+srvlog := log.New(log.Ctx{"module": "app/server"})
+srvlog.Warn("abnormal conn rate", log.Ctx{"rate": curRate, "low": lowRate, "high": highRate})
+```
+
+## License
+Apache
diff --git a/vendor/github.com/inconshreveable/log15/RELEASING.md b/vendor/github.com/inconshreveable/log15/RELEASING.md
new file mode 100644
index 0000000000000000000000000000000000000000..589a4dcc618100363edc8f528d8d4a19b3d62f13
--- /dev/null
+++ b/vendor/github.com/inconshreveable/log15/RELEASING.md
@@ -0,0 +1,19 @@
+# log15's release strategy
+
+log15 uses gopkg.in to manage versioning releases so that consumers who don't vendor dependencies can rely upon a stable API.
+
+## Master
+
+Master is considered to have no API stability guarantee, so merging new code that passes tests into master is always okay.
+
+## Releasing a new API-compatible version
+
+The process to release a new API-compatible version is described below. For the purposes of this example, we'll assume you're trying to release a new version of v2
+
+1. `git checkout v2`
+1. `git merge master`
+1. Audit the code for any imports of sub-packages. Modify any import references from `github.com/inconshrevealbe/log15/<pkg>` -> `gopkg.in/inconshreveable/log15.v2/<pkg>`
+1. `git commit`
+1. `git tag`, find the latest tag of the style v2.X.
+1. `git tag v2.X+1` If the last version was v2.6, you would run `git tag v2.7`
+1. `git push --tags git@github.com:inconshreveable/log15.git v2`
diff --git a/vendor/github.com/inconshreveable/log15/doc.go b/vendor/github.com/inconshreveable/log15/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..64826d7c2c05823f25a2ac56eeafbd54cf363a5f
--- /dev/null
+++ b/vendor/github.com/inconshreveable/log15/doc.go
@@ -0,0 +1,333 @@
+/*
+Package log15 provides an opinionated, simple toolkit for best-practice logging that is
+both human and machine readable. It is modeled after the standard library's io and net/http
+packages.
+
+This package enforces you to only log key/value pairs. Keys must be strings. Values may be
+any type that you like. The default output format is logfmt, but you may also choose to use
+JSON instead if that suits you. Here's how you log:
+
+    log.Info("page accessed", "path", r.URL.Path, "user_id", user.id)
+
+This will output a line that looks like:
+
+     lvl=info t=2014-05-02T16:07:23-0700 msg="page accessed" path=/org/71/profile user_id=9
+
+Getting Started
+
+To get started, you'll want to import the library:
+
+    import log "gopkg.in/inconshreveable/log15.v2"
+
+
+Now you're ready to start logging:
+
+    func main() {
+        log.Info("Program starting", "args", os.Args())
+    }
+
+
+Convention
+
+Because recording a human-meaningful message is common and good practice, the first argument to every
+logging method is the value to the *implicit* key 'msg'.
+
+Additionally, the level you choose for a message will be automatically added with the key 'lvl', and so
+will the current timestamp with key 't'.
+
+You may supply any additional context as a set of key/value pairs to the logging function. log15 allows
+you to favor terseness, ordering, and speed over safety. This is a reasonable tradeoff for
+logging functions. You don't need to explicitly state keys/values, log15 understands that they alternate
+in the variadic argument list:
+
+    log.Warn("size out of bounds", "low", lowBound, "high", highBound, "val", val)
+
+If you really do favor your type-safety, you may choose to pass a log.Ctx instead:
+
+    log.Warn("size out of bounds", log.Ctx{"low": lowBound, "high": highBound, "val": val})
+
+
+Context loggers
+
+Frequently, you want to add context to a logger so that you can track actions associated with it. An http
+request is a good example. You can easily create new loggers that have context that is automatically included
+with each log line:
+
+    requestlogger := log.New("path", r.URL.Path)
+
+    // later
+    requestlogger.Debug("db txn commit", "duration", txnTimer.Finish())
+
+This will output a log line that includes the path context that is attached to the logger:
+
+    lvl=dbug t=2014-05-02T16:07:23-0700 path=/repo/12/add_hook msg="db txn commit" duration=0.12
+
+
+Handlers
+
+The Handler interface defines where log lines are printed to and how they are formated. Handler is a
+single interface that is inspired by net/http's handler interface:
+
+    type Handler interface {
+        Log(r *Record)
+    }
+
+
+Handlers can filter records, format them, or dispatch to multiple other Handlers.
+This package implements a number of Handlers for common logging patterns that are
+easily composed to create flexible, custom logging structures.
+
+Here's an example handler that prints logfmt output to Stdout:
+
+    handler := log.StreamHandler(os.Stdout, log.LogfmtFormat())
+
+Here's an example handler that defers to two other handlers. One handler only prints records
+from the rpc package in logfmt to standard out. The other prints records at Error level
+or above in JSON formatted output to the file /var/log/service.json
+
+    handler := log.MultiHandler(
+        log.LvlFilterHandler(log.LvlError, log.Must.FileHandler("/var/log/service.json", log.JsonFormat())),
+        log.MatchFilterHandler("pkg", "app/rpc" log.StdoutHandler())
+    )
+
+Logging File Names and Line Numbers
+
+This package implements three Handlers that add debugging information to the
+context, CallerFileHandler, CallerFuncHandler and CallerStackHandler. Here's
+an example that adds the source file and line number of each logging call to
+the context.
+
+    h := log.CallerFileHandler(log.StdoutHandler())
+    log.Root().SetHandler(h)
+    ...
+    log.Error("open file", "err", err)
+
+This will output a line that looks like:
+
+    lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" caller=data.go:42
+
+Here's an example that logs the call stack rather than just the call site.
+
+    h := log.CallerStackHandler("%+v", log.StdoutHandler())
+    log.Root().SetHandler(h)
+    ...
+    log.Error("open file", "err", err)
+
+This will output a line that looks like:
+
+    lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" stack="[pkg/data.go:42 pkg/cmd/main.go]"
+
+The "%+v" format instructs the handler to include the path of the source file
+relative to the compile time GOPATH. The log15/stack package documents the
+full list of formatting verbs and modifiers available.
+
+Custom Handlers
+
+The Handler interface is so simple that it's also trivial to write your own. Let's create an
+example handler which tries to write to one handler, but if that fails it falls back to
+writing to another handler and includes the error that it encountered when trying to write
+to the primary. This might be useful when trying to log over a network socket, but if that
+fails you want to log those records to a file on disk.
+
+    type BackupHandler struct {
+        Primary Handler
+        Secondary Handler
+    }
+
+    func (h *BackupHandler) Log (r *Record) error {
+        err := h.Primary.Log(r)
+        if err != nil {
+            r.Ctx = append(ctx, "primary_err", err)
+            return h.Secondary.Log(r)
+        }
+        return nil
+    }
+
+This pattern is so useful that a generic version that handles an arbitrary number of Handlers
+is included as part of this library called FailoverHandler.
+
+Logging Expensive Operations
+
+Sometimes, you want to log values that are extremely expensive to compute, but you don't want to pay
+the price of computing them if you haven't turned up your logging level to a high level of detail.
+
+This package provides a simple type to annotate a logging operation that you want to be evaluated
+lazily, just when it is about to be logged, so that it would not be evaluated if an upstream Handler
+filters it out. Just wrap any function which takes no arguments with the log.Lazy type. For example:
+
+    func factorRSAKey() (factors []int) {
+        // return the factors of a very large number
+    }
+
+    log.Debug("factors", log.Lazy{factorRSAKey})
+
+If this message is not logged for any reason (like logging at the Error level), then
+factorRSAKey is never evaluated.
+
+Dynamic context values
+
+The same log.Lazy mechanism can be used to attach context to a logger which you want to be
+evaluated when the message is logged, but not when the logger is created. For example, let's imagine
+a game where you have Player objects:
+
+    type Player struct {
+        name string
+        alive bool
+        log.Logger
+    }
+
+You always want to log a player's name and whether they're alive or dead, so when you create the player
+object, you might do:
+
+    p := &Player{name: name, alive: true}
+    p.Logger = log.New("name", p.name, "alive", p.alive)
+
+Only now, even after a player has died, the logger will still report they are alive because the logging
+context is evaluated when the logger was created. By using the Lazy wrapper, we can defer the evaluation
+of whether the player is alive or not to each log message, so that the log records will reflect the player's
+current state no matter when the log message is written:
+
+    p := &Player{name: name, alive: true}
+    isAlive := func() bool { return p.alive }
+    player.Logger = log.New("name", p.name, "alive", log.Lazy{isAlive})
+
+Terminal Format
+
+If log15 detects that stdout is a terminal, it will configure the default
+handler for it (which is log.StdoutHandler) to use TerminalFormat. This format
+logs records nicely for your terminal, including color-coded output based
+on log level.
+
+Error Handling
+
+Becasuse log15 allows you to step around the type system, there are a few ways you can specify
+invalid arguments to the logging functions. You could, for example, wrap something that is not
+a zero-argument function with log.Lazy or pass a context key that is not a string. Since logging libraries
+are typically the mechanism by which errors are reported, it would be onerous for the logging functions
+to return errors. Instead, log15 handles errors by making these guarantees to you:
+
+- Any log record containing an error will still be printed with the error explained to you as part of the log record.
+
+- Any log record containing an error will include the context key LOG15_ERROR, enabling you to easily
+(and if you like, automatically) detect if any of your logging calls are passing bad values.
+
+Understanding this, you might wonder why the Handler interface can return an error value in its Log method. Handlers
+are encouraged to return errors only if they fail to write their log records out to an external source like if the
+syslog daemon is not responding. This allows the construction of useful handlers which cope with those failures
+like the FailoverHandler.
+
+Library Use
+
+log15 is intended to be useful for library authors as a way to provide configurable logging to
+users of their library. Best practice for use in a library is to always disable all output for your logger
+by default and to provide a public Logger instance that consumers of your library can configure. Like so:
+
+    package yourlib
+
+    import "gopkg.in/inconshreveable/log15.v2"
+
+    var Log = log.New()
+
+    func init() {
+        Log.SetHandler(log.DiscardHandler())
+    }
+
+Users of your library may then enable it if they like:
+
+    import "gopkg.in/inconshreveable/log15.v2"
+    import "example.com/yourlib"
+
+    func main() {
+        handler := // custom handler setup
+        yourlib.Log.SetHandler(handler)
+    }
+
+Best practices attaching logger context
+
+The ability to attach context to a logger is a powerful one. Where should you do it and why?
+I favor embedding a Logger directly into any persistent object in my application and adding
+unique, tracing context keys to it. For instance, imagine I am writing a web browser:
+
+    type Tab struct {
+        url string
+        render *RenderingContext
+        // ...
+
+        Logger
+    }
+
+    func NewTab(url string) *Tab {
+        return &Tab {
+            // ...
+            url: url,
+
+            Logger: log.New("url", url),
+        }
+    }
+
+When a new tab is created, I assign a logger to it with the url of
+the tab as context so it can easily be traced through the logs.
+Now, whenever we perform any operation with the tab, we'll log with its
+embedded logger and it will include the tab title automatically:
+
+    tab.Debug("moved position", "idx", tab.idx)
+
+There's only one problem. What if the tab url changes? We could
+use log.Lazy to make sure the current url is always written, but that
+would mean that we couldn't trace a tab's full lifetime through our
+logs after the user navigate to a new URL.
+
+Instead, think about what values to attach to your loggers the
+same way you think about what to use as a key in a SQL database schema.
+If it's possible to use a natural key that is unique for the lifetime of the
+object, do so. But otherwise, log15's ext package has a handy RandId
+function to let you generate what you might call "surrogate keys"
+They're just random hex identifiers to use for tracing. Back to our
+Tab example, we would prefer to set up our Logger like so:
+
+        import logext "gopkg.in/inconshreveable/log15.v2/ext"
+
+        t := &Tab {
+            // ...
+            url: url,
+        }
+
+        t.Logger = log.New("id", logext.RandId(8), "url", log.Lazy{t.getUrl})
+        return t
+
+Now we'll have a unique traceable identifier even across loading new urls, but
+we'll still be able to see the tab's current url in the log messages.
+
+Must
+
+For all Handler functions which can return an error, there is a version of that
+function which will return no error but panics on failure. They are all available
+on the Must object. For example:
+
+    log.Must.FileHandler("/path", log.JsonFormat)
+    log.Must.NetHandler("tcp", ":1234", log.JsonFormat)
+
+Inspiration and Credit
+
+All of the following excellent projects inspired the design of this library:
+
+code.google.com/p/log4go
+
+github.com/op/go-logging
+
+github.com/technoweenie/grohl
+
+github.com/Sirupsen/logrus
+
+github.com/kr/logfmt
+
+github.com/spacemonkeygo/spacelog
+
+golang's stdlib, notably io and net/http
+
+The Name
+
+https://xkcd.com/927/
+
+*/
+package log15
diff --git a/vendor/github.com/inconshreveable/log15/ext/handler.go b/vendor/github.com/inconshreveable/log15/ext/handler.go
new file mode 100644
index 0000000000000000000000000000000000000000..438a7d71b567ba17dea9ebee964a292fbb798b5b
--- /dev/null
+++ b/vendor/github.com/inconshreveable/log15/ext/handler.go
@@ -0,0 +1,130 @@
+package ext
+
+import (
+	"os"
+	"sync"
+	"sync/atomic"
+	"unsafe"
+
+	log "github.com/inconshreveable/log15"
+)
+
+// EscalateErrHandler wraps another handler and passes all records through
+// unchanged except if the logged context contains a non-nil error
+// value in its context. In that case, the record's level is raised
+// to LvlError unless it was already more serious (LvlCrit).
+//
+// This allows you to log the result of all functions for debugging
+// and still capture error conditions when in production with a single
+// log line. As an example, the following the log record will be written
+// out only if there was an error writing a value to redis:
+//
+//     logger := logext.EscalateErrHandler(
+//         log.LvlFilterHandler(log.LvlInfo, log.StdoutHandler))
+//
+//     reply, err := redisConn.Do("SET", "foo", "bar")
+//     logger.Debug("Wrote value to redis", "reply", reply, "err", err)
+//     if err != nil {
+//         return err
+//     }
+//
+func EscalateErrHandler(h log.Handler) log.Handler {
+	return log.FuncHandler(func(r *log.Record) error {
+		if r.Lvl > log.LvlError {
+			for i := 1; i < len(r.Ctx); i++ {
+				if v, ok := r.Ctx[i].(error); ok && v != nil {
+					r.Lvl = log.LvlError
+					break
+				}
+			}
+		}
+		return h.Log(r)
+	})
+}
+
+// SpeculativeHandler is a handler for speculative logging. It
+// keeps a ring buffer of the given size full of the last events
+// logged into it. When Flush is called, all buffered log records
+// are written to the wrapped handler. This is extremely for
+// continuosly capturing debug level output, but only flushing those
+// log records if an exceptional condition is encountered.
+func SpeculativeHandler(size int, h log.Handler) *Speculative {
+	return &Speculative{
+		handler: h,
+		recs:    make([]*log.Record, size),
+	}
+}
+
+type Speculative struct {
+	mu      sync.Mutex
+	idx     int
+	recs    []*log.Record
+	handler log.Handler
+	full    bool
+}
+
+func (h *Speculative) Log(r *log.Record) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	h.recs[h.idx] = r
+	h.idx = (h.idx + 1) % len(h.recs)
+	h.full = h.full || h.idx == 0
+	return nil
+}
+
+func (h *Speculative) Flush() {
+	recs := make([]*log.Record, 0)
+	func() {
+		h.mu.Lock()
+		defer h.mu.Unlock()
+		if h.full {
+			recs = append(recs, h.recs[h.idx:]...)
+		}
+		recs = append(recs, h.recs[:h.idx]...)
+
+		// reset state
+		h.full = false
+		h.idx = 0
+	}()
+
+	// don't hold the lock while we flush to the wrapped handler
+	for _, r := range recs {
+		h.handler.Log(r)
+	}
+}
+
+// HotSwapHandler wraps another handler that may swapped out
+// dynamically at runtime in a thread-safe fashion.
+// HotSwapHandler is the same functionality
+// used to implement the SetHandler method for the default
+// implementation of Logger.
+func HotSwapHandler(h log.Handler) *HotSwap {
+	hs := new(HotSwap)
+	hs.Swap(h)
+	return hs
+}
+
+type HotSwap struct {
+	handler unsafe.Pointer
+}
+
+func (h *HotSwap) Log(r *log.Record) error {
+	return (*(*log.Handler)(atomic.LoadPointer(&h.handler))).Log(r)
+}
+
+func (h *HotSwap) Swap(newHandler log.Handler) {
+	atomic.StorePointer(&h.handler, unsafe.Pointer(&newHandler))
+}
+
+// FatalHandler makes critical errors exit the program
+// immediately, much like the log.Fatal* methods from the
+// standard log package
+func FatalHandler(h log.Handler) log.Handler {
+	return log.FuncHandler(func(r *log.Record) error {
+		err := h.Log(r)
+		if r.Lvl == log.LvlCrit {
+			os.Exit(1)
+		}
+		return err
+	})
+}
diff --git a/vendor/github.com/inconshreveable/log15/ext/id.go b/vendor/github.com/inconshreveable/log15/ext/id.go
new file mode 100644
index 0000000000000000000000000000000000000000..0bfb1551f3a2d9d1c7d6d007f3a6ef637a22be3d
--- /dev/null
+++ b/vendor/github.com/inconshreveable/log15/ext/id.go
@@ -0,0 +1,47 @@
+package ext
+
+import (
+	"fmt"
+	"math/rand"
+	"sync"
+	"time"
+)
+
+var r = rand.New(&lockedSource{src: rand.NewSource(time.Now().Unix())})
+
+// RandId creates a random identifier of the requested length.
+// Useful for assigning mostly-unique identifiers for logging
+// and identification that are unlikely to collide because of
+// short lifespan or low set cardinality
+func RandId(idlen int) string {
+	b := make([]byte, idlen)
+	var randVal uint32
+	for i := 0; i < idlen; i++ {
+		byteIdx := i % 4
+		if byteIdx == 0 {
+			randVal = r.Uint32()
+		}
+		b[i] = byte((randVal >> (8 * uint(byteIdx))) & 0xFF)
+	}
+	return fmt.Sprintf("%x", b)
+}
+
+// lockedSource is a wrapper to allow a rand.Source to be used
+// concurrently (same type as the one used internally in math/rand).
+type lockedSource struct {
+	lk  sync.Mutex
+	src rand.Source
+}
+
+func (r *lockedSource) Int63() (n int64) {
+	r.lk.Lock()
+	n = r.src.Int63()
+	r.lk.Unlock()
+	return
+}
+
+func (r *lockedSource) Seed(seed int64) {
+	r.lk.Lock()
+	r.src.Seed(seed)
+	r.lk.Unlock()
+}
diff --git a/vendor/github.com/inconshreveable/log15/format.go b/vendor/github.com/inconshreveable/log15/format.go
new file mode 100644
index 0000000000000000000000000000000000000000..3468f3048f3f9f91d91602dd3a33ae394d03b350
--- /dev/null
+++ b/vendor/github.com/inconshreveable/log15/format.go
@@ -0,0 +1,257 @@
+package log15
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+const (
+	timeFormat     = "2006-01-02T15:04:05-0700"
+	termTimeFormat = "01-02|15:04:05"
+	floatFormat    = 'f'
+	termMsgJust    = 40
+)
+
+type Format interface {
+	Format(r *Record) []byte
+}
+
+// FormatFunc returns a new Format object which uses
+// the given function to perform record formatting.
+func FormatFunc(f func(*Record) []byte) Format {
+	return formatFunc(f)
+}
+
+type formatFunc func(*Record) []byte
+
+func (f formatFunc) Format(r *Record) []byte {
+	return f(r)
+}
+
+// TerminalFormat formats log records optimized for human readability on
+// a terminal with color-coded level output and terser human friendly timestamp.
+// This format should only be used for interactive programs or while developing.
+//
+//     [TIME] [LEVEL] MESAGE key=value key=value ...
+//
+// Example:
+//
+//     [May 16 20:58:45] [DBUG] remove route ns=haproxy addr=127.0.0.1:50002
+//
+func TerminalFormat() Format {
+	return FormatFunc(func(r *Record) []byte {
+		var color = 0
+		switch r.Lvl {
+		case LvlCrit:
+			color = 35
+		case LvlError:
+			color = 31
+		case LvlWarn:
+			color = 33
+		case LvlInfo:
+			color = 32
+		case LvlDebug:
+			color = 36
+		}
+
+		b := &bytes.Buffer{}
+		lvl := strings.ToUpper(r.Lvl.String())
+		if color > 0 {
+			fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %s ", color, lvl, r.Time.Format(termTimeFormat), r.Msg)
+		} else {
+			fmt.Fprintf(b, "[%s] [%s] %s ", lvl, r.Time.Format(termTimeFormat), r.Msg)
+		}
+
+		// try to justify the log output for short messages
+		if len(r.Ctx) > 0 && len(r.Msg) < termMsgJust {
+			b.Write(bytes.Repeat([]byte{' '}, termMsgJust-len(r.Msg)))
+		}
+
+		// print the keys logfmt style
+		logfmt(b, r.Ctx, color)
+		return b.Bytes()
+	})
+}
+
+// LogfmtFormat prints records in logfmt format, an easy machine-parseable but human-readable
+// format for key/value pairs.
+//
+// For more details see: http://godoc.org/github.com/kr/logfmt
+//
+func LogfmtFormat() Format {
+	return FormatFunc(func(r *Record) []byte {
+		common := []interface{}{r.KeyNames.Time, r.Time, r.KeyNames.Lvl, r.Lvl, r.KeyNames.Msg, r.Msg}
+		buf := &bytes.Buffer{}
+		logfmt(buf, append(common, r.Ctx...), 0)
+		return buf.Bytes()
+	})
+}
+
+func logfmt(buf *bytes.Buffer, ctx []interface{}, color int) {
+	for i := 0; i < len(ctx); i += 2 {
+		if i != 0 {
+			buf.WriteByte(' ')
+		}
+
+		k, ok := ctx[i].(string)
+		v := formatLogfmtValue(ctx[i+1])
+		if !ok {
+			k, v = errorKey, formatLogfmtValue(k)
+		}
+
+		// XXX: we should probably check that all of your key bytes aren't invalid
+		if color > 0 {
+			fmt.Fprintf(buf, "\x1b[%dm%s\x1b[0m=%s", color, k, v)
+		} else {
+			fmt.Fprintf(buf, "%s=%s", k, v)
+		}
+	}
+
+	buf.WriteByte('\n')
+}
+
+// JsonFormat formats log records as JSON objects separated by newlines.
+// It is the equivalent of JsonFormatEx(false, true).
+func JsonFormat() Format {
+	return JsonFormatEx(false, true)
+}
+
+// JsonFormatEx formats log records as JSON objects. If pretty is true,
+// records will be pretty-printed. If lineSeparated is true, records
+// will be logged with a new line between each record.
+func JsonFormatEx(pretty, lineSeparated bool) Format {
+	jsonMarshal := json.Marshal
+	if pretty {
+		jsonMarshal = func(v interface{}) ([]byte, error) {
+			return json.MarshalIndent(v, "", "    ")
+		}
+	}
+
+	return FormatFunc(func(r *Record) []byte {
+		props := make(map[string]interface{})
+
+		props[r.KeyNames.Time] = r.Time
+		props[r.KeyNames.Lvl] = r.Lvl.String()
+		props[r.KeyNames.Msg] = r.Msg
+
+		for i := 0; i < len(r.Ctx); i += 2 {
+			k, ok := r.Ctx[i].(string)
+			if !ok {
+				props[errorKey] = fmt.Sprintf("%+v is not a string key", r.Ctx[i])
+			}
+			props[k] = formatJsonValue(r.Ctx[i+1])
+		}
+
+		b, err := jsonMarshal(props)
+		if err != nil {
+			b, _ = jsonMarshal(map[string]string{
+				errorKey: err.Error(),
+			})
+			return b
+		}
+
+		if lineSeparated {
+			b = append(b, '\n')
+		}
+
+		return b
+	})
+}
+
+func formatShared(value interface{}) (result interface{}) {
+	defer func() {
+		if err := recover(); err != nil {
+			if v := reflect.ValueOf(value); v.Kind() == reflect.Ptr && v.IsNil() {
+				result = "nil"
+			} else {
+				panic(err)
+			}
+		}
+	}()
+
+	switch v := value.(type) {
+	case time.Time:
+		return v.Format(timeFormat)
+
+	case error:
+		return v.Error()
+
+	case fmt.Stringer:
+		return v.String()
+
+	default:
+		return v
+	}
+}
+
+func formatJsonValue(value interface{}) interface{} {
+	value = formatShared(value)
+	switch value.(type) {
+	case int, int8, int16, int32, int64, float32, float64, uint, uint8, uint16, uint32, uint64, string:
+		return value
+	default:
+		return fmt.Sprintf("%+v", value)
+	}
+}
+
+// formatValue formats a value for serialization
+func formatLogfmtValue(value interface{}) string {
+	if value == nil {
+		return "nil"
+	}
+
+	value = formatShared(value)
+	switch v := value.(type) {
+	case bool:
+		return strconv.FormatBool(v)
+	case float32:
+		return strconv.FormatFloat(float64(v), floatFormat, 3, 64)
+	case float64:
+		return strconv.FormatFloat(v, floatFormat, 3, 64)
+	case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
+		return fmt.Sprintf("%d", value)
+	case string:
+		return escapeString(v)
+	default:
+		return escapeString(fmt.Sprintf("%+v", value))
+	}
+}
+
+func escapeString(s string) string {
+	needQuotes := false
+	e := bytes.Buffer{}
+	e.WriteByte('"')
+	for _, r := range s {
+		if r <= ' ' || r == '=' || r == '"' {
+			needQuotes = true
+		}
+
+		switch r {
+		case '\\', '"':
+			e.WriteByte('\\')
+			e.WriteByte(byte(r))
+		case '\n':
+			e.WriteByte('\\')
+			e.WriteByte('n')
+		case '\r':
+			e.WriteByte('\\')
+			e.WriteByte('r')
+		case '\t':
+			e.WriteByte('\\')
+			e.WriteByte('t')
+		default:
+			e.WriteRune(r)
+		}
+	}
+	e.WriteByte('"')
+	start, stop := 0, e.Len()
+	if !needQuotes {
+		start, stop = 1, stop-1
+	}
+	return string(e.Bytes()[start:stop])
+}
diff --git a/vendor/github.com/inconshreveable/log15/handler.go b/vendor/github.com/inconshreveable/log15/handler.go
new file mode 100644
index 0000000000000000000000000000000000000000..9c4c2202c22caadbd22be4d0098640667edd2419
--- /dev/null
+++ b/vendor/github.com/inconshreveable/log15/handler.go
@@ -0,0 +1,371 @@
+package log15
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"reflect"
+	"sync"
+
+	"github.com/inconshreveable/log15/stack"
+)
+
+// A Logger prints its log records by writing to a Handler.
+// The Handler interface defines where and how log records are written.
+// Handlers are composable, providing you great flexibility in combining
+// them to achieve the logging structure that suits your applications.
+type Handler interface {
+	Log(r *Record) error
+}
+
+// FuncHandler returns a Handler that logs records with the given
+// function.
+func FuncHandler(fn func(r *Record) error) Handler {
+	return funcHandler(fn)
+}
+
+type funcHandler func(r *Record) error
+
+func (h funcHandler) Log(r *Record) error {
+	return h(r)
+}
+
+// StreamHandler writes log records to an io.Writer
+// with the given format. StreamHandler can be used
+// to easily begin writing log records to other
+// outputs.
+//
+// StreamHandler wraps itself with LazyHandler and SyncHandler
+// to evaluate Lazy objects and perform safe concurrent writes.
+func StreamHandler(wr io.Writer, fmtr Format) Handler {
+	h := FuncHandler(func(r *Record) error {
+		_, err := wr.Write(fmtr.Format(r))
+		return err
+	})
+	return LazyHandler(SyncHandler(h))
+}
+
+// SyncHandler can be wrapped around a handler to guarantee that
+// only a single Log operation can proceed at a time. It's necessary
+// for thread-safe concurrent writes.
+func SyncHandler(h Handler) Handler {
+	var mu sync.Mutex
+	return FuncHandler(func(r *Record) error {
+		defer mu.Unlock()
+		mu.Lock()
+		return h.Log(r)
+	})
+}
+
+// FileHandler returns a handler which writes log records to the give file
+// using the given format. If the path
+// already exists, FileHandler will append to the given file. If it does not,
+// FileHandler will create the file with mode 0644.
+func FileHandler(path string, fmtr Format) (Handler, error) {
+	f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
+	if err != nil {
+		return nil, err
+	}
+	return closingHandler{f, StreamHandler(f, fmtr)}, nil
+}
+
+// NetHandler opens a socket to the given address and writes records
+// over the connection.
+func NetHandler(network, addr string, fmtr Format) (Handler, error) {
+	conn, err := net.Dial(network, addr)
+	if err != nil {
+		return nil, err
+	}
+
+	return closingHandler{conn, StreamHandler(conn, fmtr)}, nil
+}
+
+// XXX: closingHandler is essentially unused at the moment
+// it's meant for a future time when the Handler interface supports
+// a possible Close() operation
+type closingHandler struct {
+	io.WriteCloser
+	Handler
+}
+
+func (h *closingHandler) Close() error {
+	return h.WriteCloser.Close()
+}
+
+// CallerFileHandler returns a Handler that adds the line number and file of
+// the calling function to the context with key "caller".
+func CallerFileHandler(h Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		call := stack.Call(r.CallPC[0])
+		r.Ctx = append(r.Ctx, "caller", fmt.Sprint(call))
+		return h.Log(r)
+	})
+}
+
+// CallerFuncHandler returns a Handler that adds the calling function name to
+// the context with key "fn".
+func CallerFuncHandler(h Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		call := stack.Call(r.CallPC[0])
+		r.Ctx = append(r.Ctx, "fn", fmt.Sprintf("%+n", call))
+		return h.Log(r)
+	})
+}
+
+// CallerStackHandler returns a Handler that adds a stack trace to the context
+// with key "stack". The stack trace is formated as a space separated list of
+// call sites inside matching []'s. The most recent call site is listed first.
+// Each call site is formatted according to format. See the documentation of
+// log15/stack.Call.Format for the list of supported formats.
+func CallerStackHandler(format string, h Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		s := stack.Callers().
+			TrimBelow(stack.Call(r.CallPC[0])).
+			TrimRuntime()
+		if len(s) > 0 {
+			buf := &bytes.Buffer{}
+			buf.WriteByte('[')
+			for i, pc := range s {
+				if i > 0 {
+					buf.WriteByte(' ')
+				}
+				fmt.Fprintf(buf, format, pc)
+			}
+			buf.WriteByte(']')
+			r.Ctx = append(r.Ctx, "stack", buf.String())
+		}
+		return h.Log(r)
+	})
+}
+
+// FilterHandler returns a Handler that only writes records to the
+// wrapped Handler if the given function evaluates true. For example,
+// to only log records where the 'err' key is not nil:
+//
+//    logger.SetHandler(FilterHandler(func(r *Record) bool {
+//        for i := 0; i < len(r.Ctx); i += 2 {
+//            if r.Ctx[i] == "err" {
+//                return r.Ctx[i+1] != nil
+//            }
+//        }
+//        return false
+//    }, h))
+//
+func FilterHandler(fn func(r *Record) bool, h Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		if fn(r) {
+			return h.Log(r)
+		}
+		return nil
+	})
+}
+
+// MatchFilterHandler returns a Handler that only writes records
+// to the wrapped Handler if the given key in the logged
+// context matches the value. For example, to only log records
+// from your ui package:
+//
+//    log.MatchFilterHandler("pkg", "app/ui", log.StdoutHandler)
+//
+func MatchFilterHandler(key string, value interface{}, h Handler) Handler {
+	return FilterHandler(func(r *Record) (pass bool) {
+		switch key {
+		case r.KeyNames.Lvl:
+			return r.Lvl == value
+		case r.KeyNames.Time:
+			return r.Time == value
+		case r.KeyNames.Msg:
+			return r.Msg == value
+		}
+
+		for i := 0; i < len(r.Ctx); i += 2 {
+			if r.Ctx[i] == key {
+				return r.Ctx[i+1] == value
+			}
+		}
+		return false
+	}, h)
+}
+
+// LvlFilterHandler returns a Handler that only writes
+// records which are less than the given verbosity
+// level to the wrapped Handler. For example, to only
+// log Error/Crit records:
+//
+//     log.LvlFilterHandler(log.Error, log.StdoutHandler)
+//
+func LvlFilterHandler(maxLvl Lvl, h Handler) Handler {
+	return FilterHandler(func(r *Record) (pass bool) {
+		return r.Lvl <= maxLvl
+	}, h)
+}
+
+// A MultiHandler dispatches any write to each of its handlers.
+// This is useful for writing different types of log information
+// to different locations. For example, to log to a file and
+// standard error:
+//
+//     log.MultiHandler(
+//         log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()),
+//         log.StderrHandler)
+//
+func MultiHandler(hs ...Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		for _, h := range hs {
+			// what to do about failures?
+			h.Log(r)
+		}
+		return nil
+	})
+}
+
+// A FailoverHandler writes all log records to the first handler
+// specified, but will failover and write to the second handler if
+// the first handler has failed, and so on for all handlers specified.
+// For example you might want to log to a network socket, but failover
+// to writing to a file if the network fails, and then to
+// standard out if the file write fails:
+//
+//     log.FailoverHandler(
+//         log.Must.NetHandler("tcp", ":9090", log.JsonFormat()),
+//         log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()),
+//         log.StdoutHandler)
+//
+// All writes that do not go to the first handler will add context with keys of
+// the form "failover_err_{idx}" which explain the error encountered while
+// trying to write to the handlers before them in the list.
+func FailoverHandler(hs ...Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		var err error
+		for i, h := range hs {
+			err = h.Log(r)
+			if err == nil {
+				return nil
+			} else {
+				r.Ctx = append(r.Ctx, fmt.Sprintf("failover_err_%d", i), err)
+			}
+		}
+
+		return err
+	})
+}
+
+// ChannelHandler writes all records to the given channel.
+// It blocks if the channel is full. Useful for async processing
+// of log messages, it's used by BufferedHandler.
+func ChannelHandler(recs chan<- *Record) Handler {
+	return FuncHandler(func(r *Record) error {
+		recs <- r
+		return nil
+	})
+}
+
+// BufferedHandler writes all records to a buffered
+// channel of the given size which flushes into the wrapped
+// handler whenever it is available for writing. Since these
+// writes happen asynchronously, all writes to a BufferedHandler
+// never return an error and any errors from the wrapped handler are ignored.
+func BufferedHandler(bufSize int, h Handler) Handler {
+	recs := make(chan *Record, bufSize)
+	go func() {
+		for m := range recs {
+			_ = h.Log(m)
+		}
+	}()
+	return ChannelHandler(recs)
+}
+
+// LazyHandler writes all values to the wrapped handler after evaluating
+// any lazy functions in the record's context. It is already wrapped
+// around StreamHandler and SyslogHandler in this library, you'll only need
+// it if you write your own Handler.
+func LazyHandler(h Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		// go through the values (odd indices) and reassign
+		// the values of any lazy fn to the result of its execution
+		hadErr := false
+		for i := 1; i < len(r.Ctx); i += 2 {
+			lz, ok := r.Ctx[i].(Lazy)
+			if ok {
+				v, err := evaluateLazy(lz)
+				if err != nil {
+					hadErr = true
+					r.Ctx[i] = err
+				} else {
+					if cs, ok := v.(stack.Trace); ok {
+						v = cs.TrimBelow(stack.Call(r.CallPC[0])).
+							TrimRuntime()
+					}
+					r.Ctx[i] = v
+				}
+			}
+		}
+
+		if hadErr {
+			r.Ctx = append(r.Ctx, errorKey, "bad lazy")
+		}
+
+		return h.Log(r)
+	})
+}
+
+func evaluateLazy(lz Lazy) (interface{}, error) {
+	t := reflect.TypeOf(lz.Fn)
+
+	if t.Kind() != reflect.Func {
+		return nil, fmt.Errorf("INVALID_LAZY, not func: %+v", lz.Fn)
+	}
+
+	if t.NumIn() > 0 {
+		return nil, fmt.Errorf("INVALID_LAZY, func takes args: %+v", lz.Fn)
+	}
+
+	if t.NumOut() == 0 {
+		return nil, fmt.Errorf("INVALID_LAZY, no func return val: %+v", lz.Fn)
+	}
+
+	value := reflect.ValueOf(lz.Fn)
+	results := value.Call([]reflect.Value{})
+	if len(results) == 1 {
+		return results[0].Interface(), nil
+	} else {
+		values := make([]interface{}, len(results))
+		for i, v := range results {
+			values[i] = v.Interface()
+		}
+		return values, nil
+	}
+}
+
+// DiscardHandler reports success for all writes but does nothing.
+// It is useful for dynamically disabling logging at runtime via
+// a Logger's SetHandler method.
+func DiscardHandler() Handler {
+	return FuncHandler(func(r *Record) error {
+		return nil
+	})
+}
+
+// The Must object provides the following Handler creation functions
+// which instead of returning an error parameter only return a Handler
+// and panic on failure: FileHandler, NetHandler, SyslogHandler, SyslogNetHandler
+var Must muster
+
+func must(h Handler, err error) Handler {
+	if err != nil {
+		panic(err)
+	}
+	return h
+}
+
+type muster struct{}
+
+func (m muster) FileHandler(path string, fmtr Format) Handler {
+	return must(FileHandler(path, fmtr))
+}
+
+func (m muster) NetHandler(network, addr string, fmtr Format) Handler {
+	return must(NetHandler(network, addr, fmtr))
+}
diff --git a/vendor/github.com/inconshreveable/log15/handler_appengine.go b/vendor/github.com/inconshreveable/log15/handler_appengine.go
new file mode 100644
index 0000000000000000000000000000000000000000..f5e34d2542d0924f1666f871181ad5e9098729f9
--- /dev/null
+++ b/vendor/github.com/inconshreveable/log15/handler_appengine.go
@@ -0,0 +1,26 @@
+// +build appengine
+
+package log15
+
+import "sync"
+
+// swapHandler wraps another handler that may be swapped out
+// dynamically at runtime in a thread-safe fashion.
+type swapHandler struct {
+	handler interface{}
+	lock    sync.RWMutex
+}
+
+func (h *swapHandler) Log(r *Record) error {
+	h.lock.RLock()
+	defer h.lock.RUnlock()
+
+	return h.handler.(Handler).Log(r)
+}
+
+func (h *swapHandler) Swap(newHandler Handler) {
+	h.lock.Lock()
+	defer h.lock.Unlock()
+
+	h.handler = newHandler
+}
diff --git a/vendor/github.com/inconshreveable/log15/handler_other.go b/vendor/github.com/inconshreveable/log15/handler_other.go
new file mode 100644
index 0000000000000000000000000000000000000000..4da96745bb1e4b2b1853dbcdd30ca2754da86f68
--- /dev/null
+++ b/vendor/github.com/inconshreveable/log15/handler_other.go
@@ -0,0 +1,22 @@
+// +build !appengine
+
+package log15
+
+import (
+	"sync/atomic"
+	"unsafe"
+)
+
+// swapHandler wraps another handler that may be swapped out
+// dynamically at runtime in a thread-safe fashion.
+type swapHandler struct {
+	handler unsafe.Pointer
+}
+
+func (h *swapHandler) Log(r *Record) error {
+	return (*(*Handler)(atomic.LoadPointer(&h.handler))).Log(r)
+}
+
+func (h *swapHandler) Swap(newHandler Handler) {
+	atomic.StorePointer(&h.handler, unsafe.Pointer(&newHandler))
+}
diff --git a/vendor/github.com/inconshreveable/log15/logger.go b/vendor/github.com/inconshreveable/log15/logger.go
new file mode 100644
index 0000000000000000000000000000000000000000..dcd7cf8dba28bbf7a251f4a045fed323ab0eec8b
--- /dev/null
+++ b/vendor/github.com/inconshreveable/log15/logger.go
@@ -0,0 +1,201 @@
+package log15
+
+import (
+	"fmt"
+	"runtime"
+	"time"
+)
+
+const timeKey = "t"
+const lvlKey = "lvl"
+const msgKey = "msg"
+const errorKey = "LOG15_ERROR"
+
+type Lvl int
+
+const (
+	LvlCrit Lvl = iota
+	LvlError
+	LvlWarn
+	LvlInfo
+	LvlDebug
+)
+
+// Returns the name of a Lvl
+func (l Lvl) String() string {
+	switch l {
+	case LvlDebug:
+		return "dbug"
+	case LvlInfo:
+		return "info"
+	case LvlWarn:
+		return "warn"
+	case LvlError:
+		return "eror"
+	case LvlCrit:
+		return "crit"
+	default:
+		panic("bad level")
+	}
+}
+
+// Returns the appropriate Lvl from a string name.
+// Useful for parsing command line args and configuration files.
+func LvlFromString(lvlString string) (Lvl, error) {
+	switch lvlString {
+	case "debug", "dbug":
+		return LvlDebug, nil
+	case "info":
+		return LvlInfo, nil
+	case "warn":
+		return LvlWarn, nil
+	case "error", "eror":
+		return LvlError, nil
+	case "crit":
+		return LvlCrit, nil
+	default:
+		return LvlDebug, fmt.Errorf("Unknown level: %v", lvlString)
+	}
+}
+
+// A Record is what a Logger asks its handler to write
+type Record struct {
+	Time     time.Time
+	Lvl      Lvl
+	Msg      string
+	Ctx      []interface{}
+	CallPC   [1]uintptr
+	KeyNames RecordKeyNames
+}
+
+type RecordKeyNames struct {
+	Time string
+	Msg  string
+	Lvl  string
+}
+
+// A Logger writes key/value pairs to a Handler
+type Logger interface {
+	// New returns a new Logger that has this logger's context plus the given context
+	New(ctx ...interface{}) Logger
+
+	// SetHandler updates the logger to write records to the specified handler.
+	SetHandler(h Handler)
+
+	// Log a message at the given level with context key/value pairs
+	Debug(msg string, ctx ...interface{})
+	Info(msg string, ctx ...interface{})
+	Warn(msg string, ctx ...interface{})
+	Error(msg string, ctx ...interface{})
+	Crit(msg string, ctx ...interface{})
+}
+
+type logger struct {
+	ctx []interface{}
+	h   *swapHandler
+}
+
+func (l *logger) write(msg string, lvl Lvl, ctx []interface{}) {
+	r := Record{
+		Time: time.Now(),
+		Lvl:  lvl,
+		Msg:  msg,
+		Ctx:  newContext(l.ctx, ctx),
+		KeyNames: RecordKeyNames{
+			Time: timeKey,
+			Msg:  msgKey,
+			Lvl:  lvlKey,
+		},
+	}
+	runtime.Callers(3, r.CallPC[:])
+	l.h.Log(&r)
+}
+
+func (l *logger) New(ctx ...interface{}) Logger {
+	child := &logger{newContext(l.ctx, ctx), new(swapHandler)}
+	child.SetHandler(l.h)
+	return child
+}
+
+func newContext(prefix []interface{}, suffix []interface{}) []interface{} {
+	normalizedSuffix := normalize(suffix)
+	newCtx := make([]interface{}, len(prefix)+len(normalizedSuffix))
+	n := copy(newCtx, prefix)
+	copy(newCtx[n:], normalizedSuffix)
+	return newCtx
+}
+
+func (l *logger) Debug(msg string, ctx ...interface{}) {
+	l.write(msg, LvlDebug, ctx)
+}
+
+func (l *logger) Info(msg string, ctx ...interface{}) {
+	l.write(msg, LvlInfo, ctx)
+}
+
+func (l *logger) Warn(msg string, ctx ...interface{}) {
+	l.write(msg, LvlWarn, ctx)
+}
+
+func (l *logger) Error(msg string, ctx ...interface{}) {
+	l.write(msg, LvlError, ctx)
+}
+
+func (l *logger) Crit(msg string, ctx ...interface{}) {
+	l.write(msg, LvlCrit, ctx)
+}
+
+func (l *logger) SetHandler(h Handler) {
+	l.h.Swap(h)
+}
+
+func normalize(ctx []interface{}) []interface{} {
+	// if the caller passed a Ctx object, then expand it
+	if len(ctx) == 1 {
+		if ctxMap, ok := ctx[0].(Ctx); ok {
+			ctx = ctxMap.toArray()
+		}
+	}
+
+	// ctx needs to be even because it's a series of key/value pairs
+	// no one wants to check for errors on logging functions,
+	// so instead of erroring on bad input, we'll just make sure
+	// that things are the right length and users can fix bugs
+	// when they see the output looks wrong
+	if len(ctx)%2 != 0 {
+		ctx = append(ctx, nil, errorKey, "Normalized odd number of arguments by adding nil")
+	}
+
+	return ctx
+}
+
+// Lazy allows you to defer calculation of a logged value that is expensive
+// to compute until it is certain that it must be evaluated with the given filters.
+//
+// Lazy may also be used in conjunction with a Logger's New() function
+// to generate a child logger which always reports the current value of changing
+// state.
+//
+// You may wrap any function which takes no arguments to Lazy. It may return any
+// number of values of any type.
+type Lazy struct {
+	Fn interface{}
+}
+
+// Ctx is a map of key/value pairs to pass as context to a log function
+// Use this only if you really need greater safety around the arguments you pass
+// to the logging functions.
+type Ctx map[string]interface{}
+
+func (c Ctx) toArray() []interface{} {
+	arr := make([]interface{}, len(c)*2)
+
+	i := 0
+	for k, v := range c {
+		arr[i] = k
+		arr[i+1] = v
+		i += 2
+	}
+
+	return arr
+}
diff --git a/vendor/github.com/inconshreveable/log15/root.go b/vendor/github.com/inconshreveable/log15/root.go
new file mode 100644
index 0000000000000000000000000000000000000000..c5118d4090f036394444439e5d7c31d02c4ff5c8
--- /dev/null
+++ b/vendor/github.com/inconshreveable/log15/root.go
@@ -0,0 +1,67 @@
+package log15
+
+import (
+	"os"
+
+	"github.com/inconshreveable/log15/term"
+	"github.com/mattn/go-colorable"
+)
+
+var (
+	root          *logger
+	StdoutHandler = StreamHandler(os.Stdout, LogfmtFormat())
+	StderrHandler = StreamHandler(os.Stderr, LogfmtFormat())
+)
+
+func init() {
+	if term.IsTty(os.Stdout.Fd()) {
+		StdoutHandler = StreamHandler(colorable.NewColorableStdout(), TerminalFormat())
+	}
+
+	if term.IsTty(os.Stderr.Fd()) {
+		StderrHandler = StreamHandler(colorable.NewColorableStderr(), TerminalFormat())
+	}
+
+	root = &logger{[]interface{}{}, new(swapHandler)}
+	root.SetHandler(StdoutHandler)
+}
+
+// New returns a new logger with the given context.
+// New is a convenient alias for Root().New
+func New(ctx ...interface{}) Logger {
+	return root.New(ctx...)
+}
+
+// Root returns the root logger
+func Root() Logger {
+	return root
+}
+
+// The following functions bypass the exported logger methods (logger.Debug,
+// etc.) to keep the call depth the same for all paths to logger.write so
+// runtime.Caller(2) always refers to the call site in client code.
+
+// Debug is a convenient alias for Root().Debug
+func Debug(msg string, ctx ...interface{}) {
+	root.write(msg, LvlDebug, ctx)
+}
+
+// Info is a convenient alias for Root().Info
+func Info(msg string, ctx ...interface{}) {
+	root.write(msg, LvlInfo, ctx)
+}
+
+// Warn is a convenient alias for Root().Warn
+func Warn(msg string, ctx ...interface{}) {
+	root.write(msg, LvlWarn, ctx)
+}
+
+// Error is a convenient alias for Root().Error
+func Error(msg string, ctx ...interface{}) {
+	root.write(msg, LvlError, ctx)
+}
+
+// Crit is a convenient alias for Root().Crit
+func Crit(msg string, ctx ...interface{}) {
+	root.write(msg, LvlCrit, ctx)
+}
diff --git a/vendor/github.com/inconshreveable/log15/syslog.go b/vendor/github.com/inconshreveable/log15/syslog.go
new file mode 100644
index 0000000000000000000000000000000000000000..36c12b11f7a7d98d9c74377ddb08dda7379fda67
--- /dev/null
+++ b/vendor/github.com/inconshreveable/log15/syslog.go
@@ -0,0 +1,55 @@
+// +build !windows,!plan9
+
+package log15
+
+import (
+	"log/syslog"
+	"strings"
+)
+
+// SyslogHandler opens a connection to the system syslog daemon by calling
+// syslog.New and writes all records to it.
+func SyslogHandler(tag string, fmtr Format) (Handler, error) {
+	wr, err := syslog.New(syslog.LOG_INFO, tag)
+	return sharedSyslog(fmtr, wr, err)
+}
+
+// SyslogHandler opens a connection to a log daemon over the network and writes
+// all log records to it.
+func SyslogNetHandler(net, addr string, tag string, fmtr Format) (Handler, error) {
+	wr, err := syslog.Dial(net, addr, syslog.LOG_INFO, tag)
+	return sharedSyslog(fmtr, wr, err)
+}
+
+func sharedSyslog(fmtr Format, sysWr *syslog.Writer, err error) (Handler, error) {
+	if err != nil {
+		return nil, err
+	}
+	h := FuncHandler(func(r *Record) error {
+		var syslogFn = sysWr.Info
+		switch r.Lvl {
+		case LvlCrit:
+			syslogFn = sysWr.Crit
+		case LvlError:
+			syslogFn = sysWr.Err
+		case LvlWarn:
+			syslogFn = sysWr.Warning
+		case LvlInfo:
+			syslogFn = sysWr.Info
+		case LvlDebug:
+			syslogFn = sysWr.Debug
+		}
+
+		s := strings.TrimSpace(string(fmtr.Format(r)))
+		return syslogFn(s)
+	})
+	return LazyHandler(&closingHandler{sysWr, h}), nil
+}
+
+func (m muster) SyslogHandler(tag string, fmtr Format) Handler {
+	return must(SyslogHandler(tag, fmtr))
+}
+
+func (m muster) SyslogNetHandler(net, addr string, tag string, fmtr Format) Handler {
+	return must(SyslogNetHandler(net, addr, tag, fmtr))
+}
diff --git a/vendor/github.com/manucorporat/stats/stats.go b/vendor/github.com/manucorporat/stats/stats.go
new file mode 100644
index 0000000000000000000000000000000000000000..250e5416e769db2890fa869fe698b60d3f3073d1
--- /dev/null
+++ b/vendor/github.com/manucorporat/stats/stats.go
@@ -0,0 +1,87 @@
+package stats
+
+import "sync"
+
+type ValueType float64
+type StatsType map[string]ValueType
+
+type StatsCollector struct {
+	lock  sync.RWMutex
+	stats StatsType
+}
+
+func New() *StatsCollector {
+	s := new(StatsCollector)
+	s.Reset()
+	return s
+}
+
+func (s *StatsCollector) Reset() {
+	s.lock.Lock()
+	s.stats = make(StatsType)
+	s.lock.Unlock()
+}
+
+func (s *StatsCollector) Set(key string, value ValueType) {
+	s.lock.Lock()
+	s.stats[key] = value
+	s.lock.Unlock()
+}
+
+func (s *StatsCollector) Add(key string, delta ValueType) (v ValueType) {
+	s.lock.Lock()
+	v = s.stats[key]
+	v += delta
+	s.stats[key] = v
+	s.lock.Unlock()
+	return
+}
+
+func (s *StatsCollector) Get(key string) (v ValueType) {
+	s.lock.RLock()
+	v = s.stats[key]
+	s.lock.RUnlock()
+	return
+}
+
+func (s *StatsCollector) Del(key string) {
+	s.lock.Lock()
+	delete(s.stats, key)
+	s.lock.Unlock()
+}
+
+func (s *StatsCollector) Data() StatsType {
+	cp := make(StatsType)
+	s.lock.RLock()
+	for key, value := range s.stats {
+		cp[key] = value
+	}
+	s.lock.RUnlock()
+	return cp
+}
+
+var defaultCollector = New()
+
+func Reset() {
+	defaultCollector.Reset()
+}
+
+func Set(key string, value ValueType) {
+	defaultCollector.Set(key, value)
+}
+
+func Del(key string) {
+	defaultCollector.Del(key)
+}
+
+func Add(key string, delta ValueType) ValueType {
+	return defaultCollector.Add(key, delta)
+}
+
+func Get(key string) ValueType {
+	return defaultCollector.Get(key)
+}
+
+func Data() StatsType {
+	return defaultCollector.Data()
+}
diff --git a/vendor/github.com/onsi/ginkgo/.gitignore b/vendor/github.com/onsi/ginkgo/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..922b4f7f9191509ab386055098cf09f03c6f18f5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/.gitignore
@@ -0,0 +1,4 @@
+.DS_Store
+TODO
+tmp/**/*
+*.coverprofile
\ No newline at end of file
diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f0e67b84abe70fed20cf8097a7899dffe9a68369
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+go:
+  - 1.3
+  - 1.4
+  - 1.5
+  - tip
+
+install:
+  - go get -v -t ./...
+  - go get golang.org/x/tools/cmd/cover
+  - go get github.com/onsi/gomega
+  - go install github.com/onsi/ginkgo/ginkgo
+  - export PATH=$PATH:$HOME/gopath/bin
+
+script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace
diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
new file mode 100644
index 0000000000000000000000000000000000000000..438c8c1ec46c9fc1d2e4f2943a3049afa75a8ee5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
@@ -0,0 +1,136 @@
+## HEAD
+
+Improvements:
+
+- `Skip(message)` can be used to skip the current test.
+- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests)
+
+Bug Fixes:
+
+- Ginkgo tests now fail when you `panic(nil)` (#167)
+
+## 1.2.0 5/31/2015
+
+Improvements
+
+- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
+- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152)
+- Relaxed requirement for Go 1.4+.  `ginkgo` now works with Go v1.3+ (#166)
+
+## 1.2.0-beta
+
+Ginkgo now requires Go 1.4+
+
+Improvements:
+
+- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
+- Improved focus behavior.  Now, this:
+
+    ```golang
+    FDescribe("Some describe", func() {
+        It("A", func() {})
+
+        FIt("B", func() {})
+    })
+    ```
+
+  will run `B` but *not* `A`.  This tends to be a common usage pattern when in the thick of writing and debugging tests.
+- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`.  Useful for debugging stuck tests.
+- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`.  This is useful for debugging stuck tests and tests that generate many logs.
+- Improved output when an error occurs in a setup or teardown block.
+- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything.  Best paired with `-v` to understand which specs will run in which order.
+- Add `By` to help document long `It`s.  `By` simply writes to the `GinkgoWriter`.
+- Add support for precompiled tests:
+    - `ginkgo build <path-to-package>` will now compile the package, producing a file named `package.test`
+    - The compiled `package.test` file can be run directly.  This runs the tests in series.
+    - To run precompiled tests in parallel, you can run: `ginkgo -p package.test`
+- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
+- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
+- The `ginkgo` CLI ignores `SIGQUIT`.  Prevents its stack dump from interlacing with the underlying test suite's stack dump.
+- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory.  This necessitates upgrading to Go v1.4+.
+- `ginkgo -notify` now works on Linux
+
+Bug Fixes:
+
+- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0.
+- Fix tempfile leak when running in parallel
+- Fix incorrect failure message when a panic occurs during a parallel test run
+- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
+- Be more consistent about handling SIGTERM as well as SIGINT
+- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
+- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
+
+## 1.1.0 (8/2/2014)
+
+No changes, just dropping the beta.
+
+## 1.1.0-beta (7/22/2014)
+New Features:
+
+- `ginkgo watch` now monitors packages *and their dependencies* for changes.  The depth of the dependency tree can be modified with the `-depth` flag.
+- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, evne when they pass.  This allows CI systems to detect accidental commits of focused test suites.
+- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes.
+- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
+- `ginkgo --failFast` aborts the test suite after the first failure.
+- `ginkgo generate file_1 file_2` can take multiple file arguments.
+- Ginkgo now summarizes any spec failures that occured at the end of the test run. 
+- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
+
+Improvements:
+
+- `ginkgo -skipPackage` now takes a comma-separated list of strings.  If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped.
+- `ginkgo --untilItFails` no longer recompiles between attempts.
+- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node.  This is always a mistake.  Any test suites that panic because of this change should be fixed.
+
+Bug Fixes:
+
+- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`.
+- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic
+
+## 1.0.0 (5/24/2014)
+New Features:
+
+- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode`
+
+Improvements:
+
+- When compilation fails, the compilation output is rewritten to present a correct *relative* path.  Allows ⌘-clicking in iTerm open the file in your text editor.
+- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified.
+
+Bug Fixes:
+
+- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s.
+- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail.
+- Fix all remaining race conditions in Ginkgo's test suite.
+
+## 1.0.0-beta (4/14/2014)
+Breaking changes:
+
+- `thirdparty/gomocktestreporter` is gone.  Use `GinkgoT()` instead
+- Modified the Reporter interface 
+- `watch` is now a subcommand, not a flag.
+
+DSL changes:
+
+- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites.
+- `AfterSuite` is triggered on interrupt (`^C`) as well as exit.
+- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes.
+
+CLI changes:
+
+- `watch` is now a subcommand, not a flag
+- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports.  This explicitly imports all exported identifiers in Ginkgo and Gomega.  Refreshing this list can be done by running `ginkgo nodot`
+- Additional arguments can be passed to specs.  Pass them after the `--` separator
+- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp.
+- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs.
+
+Misc:
+
+- Start using semantic versioning
+- Start maintaining changelog
+
+Major refactor:
+
+- Pull out Ginkgo's internal to `internal`
+- Rename `example` everywhere to `spec`
+- Much more!
diff --git a/vendor/github.com/onsi/ginkgo/LICENSE b/vendor/github.com/onsi/ginkgo/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..9415ee72c17f87d26fc640a1b198ae12c675704c
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2013-2014 Onsi Fakhouri
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..b8b77b5779a9c0add797a504a054c0597ff03719
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/README.md
@@ -0,0 +1,115 @@
+![Ginkgo: A Golang BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png)
+
+[![Build Status](https://travis-ci.org/onsi/ginkgo.png)](https://travis-ci.org/onsi/ginkgo)
+
+Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more.  To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
+
+To discuss Ginkgo and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega).
+
+## Feature List
+
+- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests.  It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
+
+- Structure your BDD-style tests expressively:
+    - Nestable [`Describe` and `Context` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
+    - [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
+    - [`It` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions
+    - [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
+    - [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
+
+- A comprehensive test runner that lets you:
+    - Mark specs as [pending](http://onsi.github.io/ginkgo/#pending-specs)
+    - [Focus](http://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
+    - Run your tests in [random order](http://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
+    - Break up your test suite into parallel processes for straightforward [test parallelization](http://onsi.github.io/ginkgo/#parallel-specs)
+
+- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files.  Here are a few choice examples:
+    - `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
+    - `ginkgo -cover` runs your tests using Golang's code coverage tool
+    - `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
+    - `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression
+    - `ginkgo -r` runs all tests suites under the current directory
+    - `ginkgo -v` prints out identifying information for each tests just before it runs
+
+    And much more: run `ginkgo help` for details!
+
+    The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test`
+
+- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests.  Run tests immediately as you develop!
+
+- Built-in support for testing [asynchronicity](http://onsi.github.io/ginkgo/#asynchronous-tests)
+
+- Built-in support for [benchmarking](http://onsi.github.io/ginkgo/#benchmark-tests) your code.  Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code. 
+
+- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`.
+
+- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify).  Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details.
+
+- A modular architecture that lets you easily:
+    - Write [custom reporters](http://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](http://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
+    - [Adapt an existing matcher library (or write your own!)](http://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
+
+## [Gomega](http://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
+
+Ginkgo is best paired with Gomega.  Learn more about Gomega [here](http://onsi.github.io/gomega/)
+
+## [Agouti](http://github.com/sclevine/agouti): A Golang Acceptance Testing Framework
+
+Agouti allows you run WebDriver integration tests.  Learn more about Agouti [here](http://agouti.org)
+
+## Set Me Up!
+
+You'll need Golang v1.3+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!)
+
+```bash
+
+go get github.com/onsi/ginkgo/ginkgo  # installs the ginkgo CLI
+go get github.com/onsi/gomega         # fetches the matcher library
+
+cd path/to/package/you/want/to/test
+
+ginkgo bootstrap # set up a new ginkgo suite
+ginkgo generate  # will create a sample test file.  edit this file and add your tests then...
+
+go test # to run your tests
+
+ginkgo  # also runs your tests
+
+```
+
+## I'm new to Go: What are my testing options?
+
+Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega).  Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set.
+
+With that said, it's great to know what your options are :)
+
+### What Golang gives you out of the box
+
+Testing is a first class citizen in Golang, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
+
+### Matcher libraries for Golang's XUnit style tests
+
+A number of matcher libraries have been written to augment Go's built-in XUnit style tests.  Here are two that have gained traction:
+
+- [testify](https://github.com/stretchr/testify)
+- [gocheck](http://labix.org/gocheck)
+
+You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](http://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
+
+### BDD style testing frameworks
+
+There are a handful of BDD-style testing frameworks written for Golang.  Here are a few:
+
+- [Ginkgo](https://github.com/onsi/ginkgo) ;)
+- [GoConvey](https://github.com/smartystreets/goconvey) 
+- [Goblin](https://github.com/franela/goblin)
+- [Mao](https://github.com/azer/mao)
+- [Zen](https://github.com/pranavraja/zen)
+
+Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of golang testing libraries.
+
+Go explore!
+
+## License
+
+Ginkgo is MIT-Licensed
diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go
new file mode 100644
index 0000000000000000000000000000000000000000..7b22e34ad97afbd29f70fddfdea0e336aae31cdc
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/config/config.go
@@ -0,0 +1,170 @@
+/*
+Ginkgo accepts a number of configuration options.
+
+These are documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
+
+You can also learn more via
+
+	ginkgo help
+
+or (I kid you not):
+
+	go test -asdf
+*/
+package config
+
+import (
+	"flag"
+	"time"
+
+	"fmt"
+)
+
+const VERSION = "1.2.0"
+
+type GinkgoConfigType struct {
+	RandomSeed        int64
+	RandomizeAllSpecs bool
+	FocusString       string
+	SkipString        string
+	SkipMeasurements  bool
+	FailOnPending     bool
+	FailFast          bool
+	EmitSpecProgress  bool
+	DryRun            bool
+
+	ParallelNode  int
+	ParallelTotal int
+	SyncHost      string
+	StreamHost    string
+}
+
+var GinkgoConfig = GinkgoConfigType{}
+
+type DefaultReporterConfigType struct {
+	NoColor           bool
+	SlowSpecThreshold float64
+	NoisyPendings     bool
+	Succinct          bool
+	Verbose           bool
+	FullTrace         bool
+}
+
+var DefaultReporterConfig = DefaultReporterConfigType{}
+
+func processPrefix(prefix string) string {
+	if prefix != "" {
+		prefix = prefix + "."
+	}
+	return prefix
+}
+
+func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
+	prefix = processPrefix(prefix)
+	flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.")
+	flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together.  By default, ginkgo only randomizes the top level Describe/Context groups.")
+	flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.")
+	flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.")
+	flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.")
+	flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything.  Best paired with -v.")
+	flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.")
+	flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.")
+	flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.")
+
+	if includeParallelFlags {
+		flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number.  For running specs in parallel.")
+		flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes.  For running specs in parallel.")
+		flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.")
+		flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.")
+	}
+
+	flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
+	flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.")
+	flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
+	flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
+	flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
+	flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
+}
+
+func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
+	prefix = processPrefix(prefix)
+	result := make([]string, 0)
+
+	if ginkgo.RandomSeed > 0 {
+		result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed))
+	}
+
+	if ginkgo.RandomizeAllSpecs {
+		result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix))
+	}
+
+	if ginkgo.SkipMeasurements {
+		result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix))
+	}
+
+	if ginkgo.FailOnPending {
+		result = append(result, fmt.Sprintf("--%sfailOnPending", prefix))
+	}
+
+	if ginkgo.FailFast {
+		result = append(result, fmt.Sprintf("--%sfailFast", prefix))
+	}
+
+	if ginkgo.DryRun {
+		result = append(result, fmt.Sprintf("--%sdryRun", prefix))
+	}
+
+	if ginkgo.FocusString != "" {
+		result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString))
+	}
+
+	if ginkgo.SkipString != "" {
+		result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString))
+	}
+
+	if ginkgo.EmitSpecProgress {
+		result = append(result, fmt.Sprintf("--%sprogress", prefix))
+	}
+
+	if ginkgo.ParallelNode != 0 {
+		result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode))
+	}
+
+	if ginkgo.ParallelTotal != 0 {
+		result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal))
+	}
+
+	if ginkgo.StreamHost != "" {
+		result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost))
+	}
+
+	if ginkgo.SyncHost != "" {
+		result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost))
+	}
+
+	if reporter.NoColor {
+		result = append(result, fmt.Sprintf("--%snoColor", prefix))
+	}
+
+	if reporter.SlowSpecThreshold > 0 {
+		result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold))
+	}
+
+	if !reporter.NoisyPendings {
+		result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
+	}
+
+	if reporter.Verbose {
+		result = append(result, fmt.Sprintf("--%sv", prefix))
+	}
+
+	if reporter.Succinct {
+		result = append(result, fmt.Sprintf("--%ssuccinct", prefix))
+	}
+
+	if reporter.FullTrace {
+		result = append(result, fmt.Sprintf("--%strace", prefix))
+	}
+
+	return result
+}
diff --git a/vendor/github.com/onsi/ginkgo/extensions/table/table.go b/vendor/github.com/onsi/ginkgo/extensions/table/table.go
new file mode 100644
index 0000000000000000000000000000000000000000..ae8ab7d248f48d4b97661f99d19b498d2f94442d
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/extensions/table/table.go
@@ -0,0 +1,98 @@
+/*
+
+Table provides a simple DSL for Ginkgo-native Table-Driven Tests
+
+The godoc documentation describes Table's API.  More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo#table-driven-tests
+
+*/
+
+package table
+
+import (
+	"fmt"
+	"reflect"
+
+	"github.com/onsi/ginkgo"
+)
+
+/*
+DescribeTable describes a table-driven test.
+
+For example:
+
+    DescribeTable("a simple table",
+        func(x int, y int, expected bool) {
+            Ω(x > y).Should(Equal(expected))
+        },
+        Entry("x > y", 1, 0, true),
+        Entry("x == y", 0, 0, false),
+        Entry("x < y", 0, 1, false),
+    )
+
+The first argument to `DescribeTable` is a string description.
+The second argument is a function that will be run for each table entry.  Your assertions go here - the function is equivalent to a Ginkgo It.
+The subsequent arguments must be of type `TableEntry`.  We recommend using the `Entry` convenience constructors.
+
+The `Entry` constructor takes a string description followed by an arbitrary set of parameters.  These parameters are passed into your function.
+
+Under the hood, `DescribeTable` simply generates a new Ginkgo `Describe`.  Each `Entry` is turned into an `It` within the `Describe`.
+
+It's important to understand that the `Describe`s and `It`s are generated at evaluation time (i.e. when Ginkgo constructs the tree of tests and before the tests run).
+
+Individual Entries can be focused (with FEntry) or marked pending (with PEntry or XEntry).  In addition, the entire table can be focused or marked pending with FDescribeTable and PDescribeTable/XDescribeTable.
+*/
+func DescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
+	describeTable(description, itBody, entries, false, false)
+	return true
+}
+
+/*
+You can focus a table with `FDescribeTable`.  This is equivalent to `FDescribe`.
+*/
+func FDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
+	describeTable(description, itBody, entries, false, true)
+	return true
+}
+
+/*
+You can mark a table as pending with `PDescribeTable`.  This is equivalent to `PDescribe`.
+*/
+func PDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
+	describeTable(description, itBody, entries, true, false)
+	return true
+}
+
+/*
+You can mark a table as pending with `XDescribeTable`.  This is equivalent to `XDescribe`.
+*/
+func XDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
+	describeTable(description, itBody, entries, true, false)
+	return true
+}
+
+func describeTable(description string, itBody interface{}, entries []TableEntry, pending bool, focused bool) {
+	itBodyValue := reflect.ValueOf(itBody)
+	if itBodyValue.Kind() != reflect.Func {
+		panic(fmt.Sprintf("DescribeTable expects a function, got %#v", itBody))
+	}
+
+	if pending {
+		ginkgo.PDescribe(description, func() {
+			for _, entry := range entries {
+				entry.generateIt(itBodyValue)
+			}
+		})
+	} else if focused {
+		ginkgo.FDescribe(description, func() {
+			for _, entry := range entries {
+				entry.generateIt(itBodyValue)
+			}
+		})
+	} else {
+		ginkgo.Describe(description, func() {
+			for _, entry := range entries {
+				entry.generateIt(itBodyValue)
+			}
+		})
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go b/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go
new file mode 100644
index 0000000000000000000000000000000000000000..5fa645bceee3af2cb8f610d83ea35165a710cbcf
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go
@@ -0,0 +1,81 @@
+package table
+
+import (
+	"reflect"
+
+	"github.com/onsi/ginkgo"
+)
+
+/*
+TableEntry represents an entry in a table test.  You generally use the `Entry` constructor.
+*/
+type TableEntry struct {
+	Description string
+	Parameters  []interface{}
+	Pending     bool
+	Focused     bool
+}
+
+func (t TableEntry) generateIt(itBody reflect.Value) {
+	if t.Pending {
+		ginkgo.PIt(t.Description)
+		return
+	}
+
+	values := []reflect.Value{}
+	for i, param := range t.Parameters {
+		var value reflect.Value
+
+		if param == nil {
+			inType := itBody.Type().In(i)
+			value = reflect.Zero(inType)
+		} else {
+			value = reflect.ValueOf(param)
+		}
+
+		values = append(values, value)
+	}
+
+	body := func() {
+		itBody.Call(values)
+	}
+
+	if t.Focused {
+		ginkgo.FIt(t.Description, body)
+	} else {
+		ginkgo.It(t.Description, body)
+	}
+}
+
+/*
+Entry constructs a TableEntry.
+
+The first argument is a required description (this becomes the content of the generated Ginkgo `It`).
+Subsequent parameters are saved off and sent to the callback passed in to `DescribeTable`.
+
+Each Entry ends up generating an individual Ginkgo It.
+*/
+func Entry(description string, parameters ...interface{}) TableEntry {
+	return TableEntry{description, parameters, false, false}
+}
+
+/*
+You can focus a particular entry with FEntry.  This is equivalent to FIt.
+*/
+func FEntry(description string, parameters ...interface{}) TableEntry {
+	return TableEntry{description, parameters, false, true}
+}
+
+/*
+You can mark a particular entry as pending with PEntry.  This is equivalent to PIt.
+*/
+func PEntry(description string, parameters ...interface{}) TableEntry {
+	return TableEntry{description, parameters, true, false}
+}
+
+/*
+You can mark a particular entry as pending with XEntry.  This is equivalent to XIt.
+*/
+func XEntry(description string, parameters ...interface{}) TableEntry {
+	return TableEntry{description, parameters, true, false}
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go
new file mode 100644
index 0000000000000000000000000000000000000000..d804fe00ca2f5b5590a7950ef717aa6a144c70e3
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go
@@ -0,0 +1,182 @@
+package main
+
+import (
+	"bytes"
+	"flag"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+	"text/template"
+
+	"go/build"
+
+	"github.com/onsi/ginkgo/ginkgo/nodot"
+)
+
+func BuildBootstrapCommand() *Command {
+	var agouti, noDot bool
+	flagSet := flag.NewFlagSet("bootstrap", flag.ExitOnError)
+	flagSet.BoolVar(&agouti, "agouti", false, "If set, bootstrap will generate a bootstrap file for writing Agouti tests")
+	flagSet.BoolVar(&noDot, "nodot", false, "If set, bootstrap will generate a bootstrap file that does not . import ginkgo and gomega")
+
+	return &Command{
+		Name:         "bootstrap",
+		FlagSet:      flagSet,
+		UsageCommand: "ginkgo bootstrap <FLAGS>",
+		Usage: []string{
+			"Bootstrap a test suite for the current package",
+			"Accepts the following flags:",
+		},
+		Command: func(args []string, additionalArgs []string) {
+			generateBootstrap(agouti, noDot)
+		},
+	}
+}
+
+var bootstrapText = `package {{.Package}}_test
+
+import (
+	{{.GinkgoImport}}
+	{{.GomegaImport}}
+
+	"testing"
+)
+
+func Test{{.FormattedName}}(t *testing.T) {
+	RegisterFailHandler(Fail)
+	RunSpecs(t, "{{.FormattedName}} Suite")
+}
+`
+
+var agoutiBootstrapText = `package {{.Package}}_test
+
+import (
+	{{.GinkgoImport}}
+	{{.GomegaImport}}
+	"github.com/sclevine/agouti"
+
+	"testing"
+)
+
+func Test{{.FormattedName}}(t *testing.T) {
+	RegisterFailHandler(Fail)
+	RunSpecs(t, "{{.FormattedName}} Suite")
+}
+
+var agoutiDriver *agouti.WebDriver
+
+var _ = BeforeSuite(func() {
+	// Choose a WebDriver:
+
+	agoutiDriver = agouti.PhantomJS()
+	// agoutiDriver = agouti.Selenium()
+	// agoutiDriver = agouti.ChromeDriver()
+
+	Expect(agoutiDriver.Start()).To(Succeed())
+})
+
+var _ = AfterSuite(func() {
+	Expect(agoutiDriver.Stop()).To(Succeed())
+})
+`
+
+type bootstrapData struct {
+	Package       string
+	FormattedName string
+	GinkgoImport  string
+	GomegaImport  string
+}
+
+func getPackageAndFormattedName() (string, string, string) {
+	path, err := os.Getwd()
+	if err != nil {
+		complainAndQuit("Could not get current working directory: \n" + err.Error())
+	}
+
+	dirName := strings.Replace(filepath.Base(path), "-", "_", -1)
+	dirName = strings.Replace(dirName, " ", "_", -1)
+
+	pkg, err := build.ImportDir(path, 0)
+	packageName := pkg.Name
+	if err != nil {
+		packageName = dirName
+	}
+
+	formattedName := prettifyPackageName(filepath.Base(path))
+	return packageName, dirName, formattedName
+}
+
+func prettifyPackageName(name string) string {
+	name = strings.Replace(name, "-", " ", -1)
+	name = strings.Replace(name, "_", " ", -1)
+	name = strings.Title(name)
+	name = strings.Replace(name, " ", "", -1)
+	return name
+}
+
+func fileExists(path string) bool {
+	_, err := os.Stat(path)
+	if err == nil {
+		return true
+	}
+	return false
+}
+
+func generateBootstrap(agouti bool, noDot bool) {
+	packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName()
+	data := bootstrapData{
+		Package:       packageName,
+		FormattedName: formattedName,
+		GinkgoImport:  `. "github.com/onsi/ginkgo"`,
+		GomegaImport:  `. "github.com/onsi/gomega"`,
+	}
+
+	if noDot {
+		data.GinkgoImport = `"github.com/onsi/ginkgo"`
+		data.GomegaImport = `"github.com/onsi/gomega"`
+	}
+
+	targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix)
+	if fileExists(targetFile) {
+		fmt.Printf("%s already exists.\n\n", targetFile)
+		os.Exit(1)
+	} else {
+		fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile)
+	}
+
+	f, err := os.Create(targetFile)
+	if err != nil {
+		complainAndQuit("Could not create file: " + err.Error())
+		panic(err.Error())
+	}
+	defer f.Close()
+
+	var templateText string
+	if agouti {
+		templateText = agoutiBootstrapText
+	} else {
+		templateText = bootstrapText
+	}
+
+	bootstrapTemplate, err := template.New("bootstrap").Parse(templateText)
+	if err != nil {
+		panic(err.Error())
+	}
+
+	buf := &bytes.Buffer{}
+	bootstrapTemplate.Execute(buf, data)
+
+	if noDot {
+		contents, err := nodot.ApplyNoDot(buf.Bytes())
+		if err != nil {
+			complainAndQuit("Failed to import nodot declarations: " + err.Error())
+		}
+		fmt.Println("To update the nodot declarations in the future, switch to this directory and run:\n\tginkgo nodot")
+		buf = bytes.NewBuffer(contents)
+	}
+
+	buf.WriteTo(f)
+
+	goFmt(targetFile)
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/build_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/build_command.go
new file mode 100644
index 0000000000000000000000000000000000000000..bbba8a1b4bdb55efdd1d389f8fe185586fea1410
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/build_command.go
@@ -0,0 +1,68 @@
+package main
+
+import (
+	"flag"
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"github.com/onsi/ginkgo/ginkgo/interrupthandler"
+	"github.com/onsi/ginkgo/ginkgo/testrunner"
+)
+
+func BuildBuildCommand() *Command {
+	commandFlags := NewBuildCommandFlags(flag.NewFlagSet("build", flag.ExitOnError))
+	interruptHandler := interrupthandler.NewInterruptHandler()
+	builder := &SpecBuilder{
+		commandFlags:     commandFlags,
+		interruptHandler: interruptHandler,
+	}
+
+	return &Command{
+		Name:         "build",
+		FlagSet:      commandFlags.FlagSet,
+		UsageCommand: "ginkgo build <FLAGS> <PACKAGES>",
+		Usage: []string{
+			"Build the passed in <PACKAGES> (or the package in the current directory if left blank).",
+			"Accepts the following flags:",
+		},
+		Command: builder.BuildSpecs,
+	}
+}
+
+type SpecBuilder struct {
+	commandFlags     *RunWatchAndBuildCommandFlags
+	interruptHandler *interrupthandler.InterruptHandler
+}
+
+func (r *SpecBuilder) BuildSpecs(args []string, additionalArgs []string) {
+	r.commandFlags.computeNodes()
+
+	suites, _ := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage, false)
+
+	if len(suites) == 0 {
+		complainAndQuit("Found no test suites")
+	}
+
+	passed := true
+	for _, suite := range suites {
+		runner := testrunner.New(suite, 1, false, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, nil)
+		fmt.Printf("Compiling %s...\n", suite.PackageName)
+
+		path, _ := filepath.Abs(filepath.Join(suite.Path, fmt.Sprintf("%s.test", suite.PackageName)))
+		err := runner.CompileTo(path)
+		if err != nil {
+			fmt.Println(err.Error())
+			passed = false
+		} else {
+			fmt.Printf("    compiled %s.test\n", suite.PackageName)
+		}
+
+		runner.CleanUp()
+	}
+
+	if passed {
+		os.Exit(0)
+	}
+	os.Exit(1)
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go
new file mode 100644
index 0000000000000000000000000000000000000000..02e2b3b328d791c03bba5c0b7c39a7db4164fb8a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go
@@ -0,0 +1,123 @@
+package convert
+
+import (
+	"fmt"
+	"go/ast"
+	"strings"
+	"unicode"
+)
+
+/*
+ * Creates a func init() node
+ */
+func createVarUnderscoreBlock() *ast.ValueSpec {
+	valueSpec := &ast.ValueSpec{}
+	object := &ast.Object{Kind: 4, Name: "_", Decl: valueSpec, Data: 0}
+	ident := &ast.Ident{Name: "_", Obj: object}
+	valueSpec.Names = append(valueSpec.Names, ident)
+	return valueSpec
+}
+
+/*
+ * Creates a Describe("Testing with ginkgo", func() { }) node
+ */
+func createDescribeBlock() *ast.CallExpr {
+	blockStatement := &ast.BlockStmt{List: []ast.Stmt{}}
+
+	fieldList := &ast.FieldList{}
+	funcType := &ast.FuncType{Params: fieldList}
+	funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement}
+	basicLit := &ast.BasicLit{Kind: 9, Value: "\"Testing with Ginkgo\""}
+	describeIdent := &ast.Ident{Name: "Describe"}
+	return &ast.CallExpr{Fun: describeIdent, Args: []ast.Expr{basicLit, funcLit}}
+}
+
+/*
+ * Convenience function to return the name of the *testing.T param
+ * for a Test function that will be rewritten. This is useful because
+ * we will want to replace the usage of this named *testing.T inside the
+ * body of the function with a GinktoT.
+ */
+func namedTestingTArg(node *ast.FuncDecl) string {
+	return node.Type.Params.List[0].Names[0].Name // *exhale*
+}
+
+/*
+ * Convenience function to return the block statement node for a Describe statement
+ */
+func blockStatementFromDescribe(desc *ast.CallExpr) *ast.BlockStmt {
+	var funcLit *ast.FuncLit
+	var found = false
+
+	for _, node := range desc.Args {
+		switch node := node.(type) {
+		case *ast.FuncLit:
+			found = true
+			funcLit = node
+			break
+		}
+	}
+
+	if !found {
+		panic("Error finding ast.FuncLit inside describe statement. Somebody done goofed.")
+	}
+
+	return funcLit.Body
+}
+
+/* convenience function for creating an It("TestNameHere")
+ * with all the body of the test function inside the anonymous
+ * func passed to It()
+ */
+func createItStatementForTestFunc(testFunc *ast.FuncDecl) *ast.ExprStmt {
+	blockStatement := &ast.BlockStmt{List: testFunc.Body.List}
+	fieldList := &ast.FieldList{}
+	funcType := &ast.FuncType{Params: fieldList}
+	funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement}
+
+	testName := rewriteTestName(testFunc.Name.Name)
+	basicLit := &ast.BasicLit{Kind: 9, Value: fmt.Sprintf("\"%s\"", testName)}
+	itBlockIdent := &ast.Ident{Name: "It"}
+	callExpr := &ast.CallExpr{Fun: itBlockIdent, Args: []ast.Expr{basicLit, funcLit}}
+	return &ast.ExprStmt{X: callExpr}
+}
+
+/*
+* rewrite test names to be human readable
+* eg: rewrites "TestSomethingAmazing" as "something amazing"
+ */
+func rewriteTestName(testName string) string {
+	nameComponents := []string{}
+	currentString := ""
+	indexOfTest := strings.Index(testName, "Test")
+	if indexOfTest != 0 {
+		return testName
+	}
+
+	testName = strings.Replace(testName, "Test", "", 1)
+	first, rest := testName[0], testName[1:]
+	testName = string(unicode.ToLower(rune(first))) + rest
+
+	for _, rune := range testName {
+		if unicode.IsUpper(rune) {
+			nameComponents = append(nameComponents, currentString)
+			currentString = string(unicode.ToLower(rune))
+		} else {
+			currentString += string(rune)
+		}
+	}
+
+	return strings.Join(append(nameComponents, currentString), " ")
+}
+
+func newGinkgoTFromIdent(ident *ast.Ident) *ast.CallExpr {
+	return &ast.CallExpr{
+		Lparen: ident.NamePos + 1,
+		Rparen: ident.NamePos + 2,
+		Fun:    &ast.Ident{Name: "GinkgoT"},
+	}
+}
+
+func newGinkgoTInterface() *ast.Ident {
+	return &ast.Ident{Name: "GinkgoTInterface"}
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/import.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/import.go
new file mode 100644
index 0000000000000000000000000000000000000000..e226196f72e27d2e74f716e01c220ca37d50f0b0
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/convert/import.go
@@ -0,0 +1,91 @@
+package convert
+
+import (
+	"errors"
+	"fmt"
+	"go/ast"
+)
+
+/*
+ * Given the root node of an AST, returns the node containing the
+ * import statements for the file.
+ */
+func importsForRootNode(rootNode *ast.File) (imports *ast.GenDecl, err error) {
+	for _, declaration := range rootNode.Decls {
+		decl, ok := declaration.(*ast.GenDecl)
+		if !ok || len(decl.Specs) == 0 {
+			continue
+		}
+
+		_, ok = decl.Specs[0].(*ast.ImportSpec)
+		if ok {
+			imports = decl
+			return
+		}
+	}
+
+	err = errors.New(fmt.Sprintf("Could not find imports for root node:\n\t%#v\n", rootNode))
+	return
+}
+
+/*
+ * Removes "testing" import, if present
+ */
+func removeTestingImport(rootNode *ast.File) {
+	importDecl, err := importsForRootNode(rootNode)
+	if err != nil {
+		panic(err.Error())
+	}
+
+	var index int
+	for i, importSpec := range importDecl.Specs {
+		importSpec := importSpec.(*ast.ImportSpec)
+		if importSpec.Path.Value == "\"testing\"" {
+			index = i
+			break
+		}
+	}
+
+	importDecl.Specs = append(importDecl.Specs[:index], importDecl.Specs[index+1:]...)
+}
+
+/*
+ * Adds import statements for onsi/ginkgo, if missing
+ */
+func addGinkgoImports(rootNode *ast.File) {
+	importDecl, err := importsForRootNode(rootNode)
+	if err != nil {
+		panic(err.Error())
+	}
+
+	if len(importDecl.Specs) == 0 {
+		// TODO: might need to create a import decl here
+		panic("unimplemented : expected to find an imports block")
+	}
+
+	needsGinkgo := true
+	for _, importSpec := range importDecl.Specs {
+		importSpec, ok := importSpec.(*ast.ImportSpec)
+		if !ok {
+			continue
+		}
+
+		if importSpec.Path.Value == "\"github.com/onsi/ginkgo\"" {
+			needsGinkgo = false
+		}
+	}
+
+	if needsGinkgo {
+		importDecl.Specs = append(importDecl.Specs, createImport(".", "\"github.com/onsi/ginkgo\""))
+	}
+}
+
+/*
+ * convenience function to create an import statement
+ */
+func createImport(name, path string) *ast.ImportSpec {
+	return &ast.ImportSpec{
+		Name: &ast.Ident{Name: name},
+		Path: &ast.BasicLit{Kind: 9, Value: path},
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go
new file mode 100644
index 0000000000000000000000000000000000000000..ed09c460d4c32e14f4664b3506cde990c28ba580
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go
@@ -0,0 +1,127 @@
+package convert
+
+import (
+	"fmt"
+	"go/build"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"regexp"
+)
+
+/*
+ * RewritePackage takes a name (eg: my-package/tools), finds its test files using
+ * Go's build package, and then rewrites them. A ginkgo test suite file will
+ * also be added for this package, and all of its child packages.
+ */
+func RewritePackage(packageName string) {
+	pkg, err := packageWithName(packageName)
+	if err != nil {
+		panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error()))
+	}
+
+	for _, filename := range findTestsInPackage(pkg) {
+		rewriteTestsInFile(filename)
+	}
+	return
+}
+
+/*
+ * Given a package, findTestsInPackage reads the test files in the directory,
+ * and then recurses on each child package, returning a slice of all test files
+ * found in this process.
+ */
+func findTestsInPackage(pkg *build.Package) (testfiles []string) {
+	for _, file := range append(pkg.TestGoFiles, pkg.XTestGoFiles...) {
+		testfiles = append(testfiles, filepath.Join(pkg.Dir, file))
+	}
+
+	dirFiles, err := ioutil.ReadDir(pkg.Dir)
+	if err != nil {
+		panic(fmt.Sprintf("unexpected error reading dir: '%s'\n%s\n", pkg.Dir, err.Error()))
+	}
+
+	re := regexp.MustCompile(`^[._]`)
+
+	for _, file := range dirFiles {
+		if !file.IsDir() {
+			continue
+		}
+
+		if re.Match([]byte(file.Name())) {
+			continue
+		}
+
+		packageName := filepath.Join(pkg.ImportPath, file.Name())
+		subPackage, err := packageWithName(packageName)
+		if err != nil {
+			panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error()))
+		}
+
+		testfiles = append(testfiles, findTestsInPackage(subPackage)...)
+	}
+
+	addGinkgoSuiteForPackage(pkg)
+	goFmtPackage(pkg)
+	return
+}
+
+/*
+ * Shells out to `ginkgo bootstrap` to create a test suite file
+ */
+func addGinkgoSuiteForPackage(pkg *build.Package) {
+	originalDir, err := os.Getwd()
+	if err != nil {
+		panic(err)
+	}
+
+	suite_test_file := filepath.Join(pkg.Dir, pkg.Name+"_suite_test.go")
+
+	_, err = os.Stat(suite_test_file)
+	if err == nil {
+		return // test file already exists, this should be a no-op
+	}
+
+	err = os.Chdir(pkg.Dir)
+	if err != nil {
+		panic(err)
+	}
+
+	output, err := exec.Command("ginkgo", "bootstrap").Output()
+
+	if err != nil {
+		panic(fmt.Sprintf("error running 'ginkgo bootstrap'.\nstdout: %s\n%s\n", output, err.Error()))
+	}
+
+	err = os.Chdir(originalDir)
+	if err != nil {
+		panic(err)
+	}
+}
+
+/*
+ * Shells out to `go fmt` to format the package
+ */
+func goFmtPackage(pkg *build.Package) {
+	output, err := exec.Command("go", "fmt", pkg.ImportPath).Output()
+
+	if err != nil {
+		fmt.Printf("Warning: Error running 'go fmt %s'.\nstdout: %s\n%s\n", pkg.ImportPath, output, err.Error())
+	}
+}
+
+/*
+ * Attempts to return a package with its test files already read.
+ * The ImportMode arg to build.Import lets you specify if you want go to read the
+ * buildable go files inside the package, but it fails if the package has no go files
+ */
+func packageWithName(name string) (pkg *build.Package, err error) {
+	pkg, err = build.Default.Import(name, ".", build.ImportMode(0))
+	if err == nil {
+		return
+	}
+
+	pkg, err = build.Default.Import(name, ".", build.ImportMode(1))
+	return
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go
new file mode 100644
index 0000000000000000000000000000000000000000..b33595c9ae169705f8a10c2d68cbf9c195ee83ef
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go
@@ -0,0 +1,56 @@
+package convert
+
+import (
+	"go/ast"
+	"regexp"
+)
+
+/*
+ * Given a root node, walks its top level statements and returns
+ * points to function nodes to rewrite as It statements.
+ * These functions, according to Go testing convention, must be named
+ * TestWithCamelCasedName and receive a single *testing.T argument.
+ */
+func findTestFuncs(rootNode *ast.File) (testsToRewrite []*ast.FuncDecl) {
+	testNameRegexp := regexp.MustCompile("^Test[0-9A-Z].+")
+
+	ast.Inspect(rootNode, func(node ast.Node) bool {
+		if node == nil {
+			return false
+		}
+
+		switch node := node.(type) {
+		case *ast.FuncDecl:
+			matches := testNameRegexp.MatchString(node.Name.Name)
+
+			if matches && receivesTestingT(node) {
+				testsToRewrite = append(testsToRewrite, node)
+			}
+		}
+
+		return true
+	})
+
+	return
+}
+
+/*
+ * convenience function that looks at args to a function and determines if its
+ * params include an argument of type  *testing.T
+ */
+func receivesTestingT(node *ast.FuncDecl) bool {
+	if len(node.Type.Params.List) != 1 {
+		return false
+	}
+
+	base, ok := node.Type.Params.List[0].Type.(*ast.StarExpr)
+	if !ok {
+		return false
+	}
+
+	intermediate := base.X.(*ast.SelectorExpr)
+	isTestingPackage := intermediate.X.(*ast.Ident).Name == "testing"
+	isTestingT := intermediate.Sel.Name == "T"
+
+	return isTestingPackage && isTestingT
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
new file mode 100644
index 0000000000000000000000000000000000000000..4b001a7dbb54a635b000d44cce76f62d84c5db63
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
@@ -0,0 +1,163 @@
+package convert
+
+import (
+	"bytes"
+	"fmt"
+	"go/ast"
+	"go/format"
+	"go/parser"
+	"go/token"
+	"io/ioutil"
+	"os"
+)
+
+/*
+ * Given a file path, rewrites any tests in the Ginkgo format.
+ * First, we parse the AST, and update the imports declaration.
+ * Then, we walk the first child elements in the file, returning tests to rewrite.
+ * A top level init func is declared, with a single Describe func inside.
+ * Then the test functions to rewrite are inserted as It statements inside the Describe.
+ * Finally we walk the rest of the file, replacing other usages of *testing.T
+ * Once that is complete, we write the AST back out again to its file.
+ */
+func rewriteTestsInFile(pathToFile string) {
+	fileSet := token.NewFileSet()
+	rootNode, err := parser.ParseFile(fileSet, pathToFile, nil, 0)
+	if err != nil {
+		panic(fmt.Sprintf("Error parsing test file '%s':\n%s\n", pathToFile, err.Error()))
+	}
+
+	addGinkgoImports(rootNode)
+	removeTestingImport(rootNode)
+
+	varUnderscoreBlock := createVarUnderscoreBlock()
+	describeBlock := createDescribeBlock()
+	varUnderscoreBlock.Values = []ast.Expr{describeBlock}
+
+	for _, testFunc := range findTestFuncs(rootNode) {
+		rewriteTestFuncAsItStatement(testFunc, rootNode, describeBlock)
+	}
+
+	underscoreDecl := &ast.GenDecl{
+		Tok:    85, // gah, magick numbers are needed to make this work
+		TokPos: 14, // this tricks Go into writing "var _ = Describe"
+		Specs:  []ast.Spec{varUnderscoreBlock},
+	}
+
+	imports := rootNode.Decls[0]
+	tail := rootNode.Decls[1:]
+	rootNode.Decls = append(append([]ast.Decl{imports}, underscoreDecl), tail...)
+	rewriteOtherFuncsToUseGinkgoT(rootNode.Decls)
+	walkNodesInRootNodeReplacingTestingT(rootNode)
+
+	var buffer bytes.Buffer
+	if err = format.Node(&buffer, fileSet, rootNode); err != nil {
+		panic(fmt.Sprintf("Error formatting ast node after rewriting tests.\n%s\n", err.Error()))
+	}
+
+	fileInfo, err := os.Stat(pathToFile)
+	if err != nil {
+		panic(fmt.Sprintf("Error stat'ing file: %s\n", pathToFile))
+	}
+
+	ioutil.WriteFile(pathToFile, buffer.Bytes(), fileInfo.Mode())
+	return
+}
+
+/*
+ * Given a test func named TestDoesSomethingNeat, rewrites it as
+ * It("does something neat", func() { __test_body_here__ }) and adds it
+ * to the Describe's list of statements
+ */
+func rewriteTestFuncAsItStatement(testFunc *ast.FuncDecl, rootNode *ast.File, describe *ast.CallExpr) {
+	var funcIndex int = -1
+	for index, child := range rootNode.Decls {
+		if child == testFunc {
+			funcIndex = index
+			break
+		}
+	}
+
+	if funcIndex < 0 {
+		panic(fmt.Sprintf("Assert failed: Error finding index for test node %s\n", testFunc.Name.Name))
+	}
+
+	var block *ast.BlockStmt = blockStatementFromDescribe(describe)
+	block.List = append(block.List, createItStatementForTestFunc(testFunc))
+	replaceTestingTsWithGinkgoT(block, namedTestingTArg(testFunc))
+
+	// remove the old test func from the root node's declarations
+	rootNode.Decls = append(rootNode.Decls[:funcIndex], rootNode.Decls[funcIndex+1:]...)
+	return
+}
+
+/*
+ * walks nodes inside of a test func's statements and replaces the usage of
+ * it's named *testing.T param with GinkgoT's
+ */
+func replaceTestingTsWithGinkgoT(statementsBlock *ast.BlockStmt, testingT string) {
+	ast.Inspect(statementsBlock, func(node ast.Node) bool {
+		if node == nil {
+			return false
+		}
+
+		keyValueExpr, ok := node.(*ast.KeyValueExpr)
+		if ok {
+			replaceNamedTestingTsInKeyValueExpression(keyValueExpr, testingT)
+			return true
+		}
+
+		funcLiteral, ok := node.(*ast.FuncLit)
+		if ok {
+			replaceTypeDeclTestingTsInFuncLiteral(funcLiteral)
+			return true
+		}
+
+		callExpr, ok := node.(*ast.CallExpr)
+		if !ok {
+			return true
+		}
+		replaceTestingTsInArgsLists(callExpr, testingT)
+
+		funCall, ok := callExpr.Fun.(*ast.SelectorExpr)
+		if ok {
+			replaceTestingTsMethodCalls(funCall, testingT)
+		}
+
+		return true
+	})
+}
+
+/*
+ * rewrite t.Fail() or any other *testing.T method by replacing with T().Fail()
+ * This function receives a selector expression (eg: t.Fail()) and
+ * the name of the *testing.T param from the function declaration. Rewrites the
+ * selector expression in place if the target was a *testing.T
+ */
+func replaceTestingTsMethodCalls(selectorExpr *ast.SelectorExpr, testingT string) {
+	ident, ok := selectorExpr.X.(*ast.Ident)
+	if !ok {
+		return
+	}
+
+	if ident.Name == testingT {
+		selectorExpr.X = newGinkgoTFromIdent(ident)
+	}
+}
+
+/*
+ * replaces usages of a named *testing.T param inside of a call expression
+ * with a new GinkgoT object
+ */
+func replaceTestingTsInArgsLists(callExpr *ast.CallExpr, testingT string) {
+	for index, arg := range callExpr.Args {
+		ident, ok := arg.(*ast.Ident)
+		if !ok {
+			continue
+		}
+
+		if ident.Name == testingT {
+			callExpr.Args[index] = newGinkgoTFromIdent(ident)
+		}
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go
new file mode 100644
index 0000000000000000000000000000000000000000..418cdc4e563464586ee7c6a88f6ca8d48234092a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go
@@ -0,0 +1,130 @@
+package convert
+
+import (
+	"go/ast"
+)
+
+/*
+ * Rewrites any other top level funcs that receive a *testing.T param
+ */
+func rewriteOtherFuncsToUseGinkgoT(declarations []ast.Decl) {
+	for _, decl := range declarations {
+		decl, ok := decl.(*ast.FuncDecl)
+		if !ok {
+			continue
+		}
+
+		for _, param := range decl.Type.Params.List {
+			starExpr, ok := param.Type.(*ast.StarExpr)
+			if !ok {
+				continue
+			}
+
+			selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
+			if !ok {
+				continue
+			}
+
+			xIdent, ok := selectorExpr.X.(*ast.Ident)
+			if !ok || xIdent.Name != "testing" {
+				continue
+			}
+
+			if selectorExpr.Sel.Name != "T" {
+				continue
+			}
+
+			param.Type = newGinkgoTInterface()
+		}
+	}
+}
+
+/*
+ * Walks all of the nodes in the file, replacing *testing.T in struct
+ * and func literal nodes. eg:
+ *   type foo struct { *testing.T }
+ *   var bar = func(t *testing.T) { }
+ */
+func walkNodesInRootNodeReplacingTestingT(rootNode *ast.File) {
+	ast.Inspect(rootNode, func(node ast.Node) bool {
+		if node == nil {
+			return false
+		}
+
+		switch node := node.(type) {
+		case *ast.StructType:
+			replaceTestingTsInStructType(node)
+		case *ast.FuncLit:
+			replaceTypeDeclTestingTsInFuncLiteral(node)
+		}
+
+		return true
+	})
+}
+
+/*
+ * replaces named *testing.T inside a composite literal
+ */
+func replaceNamedTestingTsInKeyValueExpression(kve *ast.KeyValueExpr, testingT string) {
+	ident, ok := kve.Value.(*ast.Ident)
+	if !ok {
+		return
+	}
+
+	if ident.Name == testingT {
+		kve.Value = newGinkgoTFromIdent(ident)
+	}
+}
+
+/*
+ * replaces *testing.T params in a func literal with GinkgoT
+ */
+func replaceTypeDeclTestingTsInFuncLiteral(functionLiteral *ast.FuncLit) {
+	for _, arg := range functionLiteral.Type.Params.List {
+		starExpr, ok := arg.Type.(*ast.StarExpr)
+		if !ok {
+			continue
+		}
+
+		selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
+		if !ok {
+			continue
+		}
+
+		target, ok := selectorExpr.X.(*ast.Ident)
+		if !ok {
+			continue
+		}
+
+		if target.Name == "testing" && selectorExpr.Sel.Name == "T" {
+			arg.Type = newGinkgoTInterface()
+		}
+	}
+}
+
+/*
+ * Replaces *testing.T types inside of a struct declaration with a GinkgoT
+ * eg: type foo struct { *testing.T }
+ */
+func replaceTestingTsInStructType(structType *ast.StructType) {
+	for _, field := range structType.Fields.List {
+		starExpr, ok := field.Type.(*ast.StarExpr)
+		if !ok {
+			continue
+		}
+
+		selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
+		if !ok {
+			continue
+		}
+
+		xIdent, ok := selectorExpr.X.(*ast.Ident)
+		if !ok {
+			continue
+		}
+
+		if xIdent.Name == "testing" && selectorExpr.Sel.Name == "T" {
+			field.Type = newGinkgoTInterface()
+		}
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert_command.go
new file mode 100644
index 0000000000000000000000000000000000000000..89e60d393022f9c6af668044c60634bf1be8fb38
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/convert_command.go
@@ -0,0 +1,44 @@
+package main
+
+import (
+	"flag"
+	"fmt"
+	"github.com/onsi/ginkgo/ginkgo/convert"
+	"os"
+)
+
+func BuildConvertCommand() *Command {
+	return &Command{
+		Name:         "convert",
+		FlagSet:      flag.NewFlagSet("convert", flag.ExitOnError),
+		UsageCommand: "ginkgo convert /path/to/package",
+		Usage: []string{
+			"Convert the package at the passed in path from an XUnit-style test to a Ginkgo-style test",
+		},
+		Command: convertPackage,
+	}
+}
+
+func convertPackage(args []string, additionalArgs []string) {
+	if len(args) != 1 {
+		println(fmt.Sprintf("usage: ginkgo convert /path/to/your/package"))
+		os.Exit(1)
+	}
+
+	defer func() {
+		err := recover()
+		if err != nil {
+			switch err := err.(type) {
+			case error:
+				println(err.Error())
+			case string:
+				println(err)
+			default:
+				println(fmt.Sprintf("unexpected error: %#v", err))
+			}
+			os.Exit(1)
+		}
+	}()
+
+	convert.RewritePackage(args[0])
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go
new file mode 100644
index 0000000000000000000000000000000000000000..7dd3b4da2615912f111d8117af875dbad1abc466
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go
@@ -0,0 +1,164 @@
+package main
+
+import (
+	"flag"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+	"text/template"
+)
+
+func BuildGenerateCommand() *Command {
+	var agouti, noDot bool
+	flagSet := flag.NewFlagSet("generate", flag.ExitOnError)
+	flagSet.BoolVar(&agouti, "agouti", false, "If set, generate will generate a test file for writing Agouti tests")
+	flagSet.BoolVar(&noDot, "nodot", false, "If set, generate will generate a test file that does not . import ginkgo and gomega")
+
+	return &Command{
+		Name:         "generate",
+		FlagSet:      flagSet,
+		UsageCommand: "ginkgo generate <filename(s)>",
+		Usage: []string{
+			"Generate a test file named filename_test.go",
+			"If the optional <filenames> argument is omitted, a file named after the package in the current directory will be created.",
+			"Accepts the following flags:",
+		},
+		Command: func(args []string, additionalArgs []string) {
+			generateSpec(args, agouti, noDot)
+		},
+	}
+}
+
+var specText = `package {{.Package}}_test
+
+import (
+	. "{{.PackageImportPath}}"
+
+	{{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
+	{{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
+)
+
+var _ = Describe("{{.Subject}}", func() {
+
+})
+`
+
+var agoutiSpecText = `package {{.Package}}_test
+
+import (
+	. "{{.PackageImportPath}}"
+
+	{{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
+	{{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
+	. "github.com/sclevine/agouti/matchers"
+	"github.com/sclevine/agouti"
+)
+
+var _ = Describe("{{.Subject}}", func() {
+	var page *agouti.Page
+
+	BeforeEach(func() {
+		var err error
+		page, err = agoutiDriver.NewPage()
+		Expect(err).NotTo(HaveOccurred())
+	})
+
+	AfterEach(func() {
+		Expect(page.Destroy()).To(Succeed())
+	})
+})
+`
+
+type specData struct {
+	Package           string
+	Subject           string
+	PackageImportPath string
+	IncludeImports    bool
+}
+
+func generateSpec(args []string, agouti, noDot bool) {
+	if len(args) == 0 {
+		err := generateSpecForSubject("", agouti, noDot)
+		if err != nil {
+			fmt.Println(err.Error())
+			fmt.Println("")
+			os.Exit(1)
+		}
+		fmt.Println("")
+		return
+	}
+
+	var failed bool
+	for _, arg := range args {
+		err := generateSpecForSubject(arg, agouti, noDot)
+		if err != nil {
+			failed = true
+			fmt.Println(err.Error())
+		}
+	}
+	fmt.Println("")
+	if failed {
+		os.Exit(1)
+	}
+}
+
+func generateSpecForSubject(subject string, agouti, noDot bool) error {
+	packageName, specFilePrefix, formattedName := getPackageAndFormattedName()
+	if subject != "" {
+		subject = strings.Split(subject, ".go")[0]
+		subject = strings.Split(subject, "_test")[0]
+		specFilePrefix = subject
+		formattedName = prettifyPackageName(subject)
+	}
+
+	data := specData{
+		Package:           packageName,
+		Subject:           formattedName,
+		PackageImportPath: getPackageImportPath(),
+		IncludeImports:    !noDot,
+	}
+
+	targetFile := fmt.Sprintf("%s_test.go", specFilePrefix)
+	if fileExists(targetFile) {
+		return fmt.Errorf("%s already exists.", targetFile)
+	} else {
+		fmt.Printf("Generating ginkgo test for %s in:\n  %s\n", data.Subject, targetFile)
+	}
+
+	f, err := os.Create(targetFile)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	var templateText string
+	if agouti {
+		templateText = agoutiSpecText
+	} else {
+		templateText = specText
+	}
+
+	specTemplate, err := template.New("spec").Parse(templateText)
+	if err != nil {
+		return err
+	}
+
+	specTemplate.Execute(f, data)
+	goFmt(targetFile)
+	return nil
+}
+
+func getPackageImportPath() string {
+	workingDir, err := os.Getwd()
+	if err != nil {
+		panic(err.Error())
+	}
+	sep := string(filepath.Separator)
+	paths := strings.Split(workingDir, sep+"src"+sep)
+	if len(paths) == 1 {
+		fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n")
+		return "UNKNOWN_PACKAGE_PATH"
+	}
+	return filepath.ToSlash(paths[len(paths)-1])
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/help_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/help_command.go
new file mode 100644
index 0000000000000000000000000000000000000000..a42d4f8aae70e131603c988667b1f0806a195cd5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/help_command.go
@@ -0,0 +1,31 @@
+package main
+
+import (
+	"flag"
+	"fmt"
+)
+
+func BuildHelpCommand() *Command {
+	return &Command{
+		Name:         "help",
+		FlagSet:      flag.NewFlagSet("help", flag.ExitOnError),
+		UsageCommand: "ginkgo help <COMAND>",
+		Usage: []string{
+			"Print usage information.  If a command is passed in, print usage information just for that command.",
+		},
+		Command: printHelp,
+	}
+}
+
+func printHelp(args []string, additionalArgs []string) {
+	if len(args) == 0 {
+		usage()
+	} else {
+		command, found := commandMatching(args[0])
+		if !found {
+			complainAndQuit(fmt.Sprintf("Unknown command: %s", args[0]))
+		}
+
+		usageForCommand(command, true)
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/interrupt_handler.go
new file mode 100644
index 0000000000000000000000000000000000000000..c15db0b02ad87601afb9c8a3ecdc39848886ec56
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/interrupt_handler.go
@@ -0,0 +1,52 @@
+package interrupthandler
+
+import (
+	"os"
+	"os/signal"
+	"sync"
+	"syscall"
+)
+
+type InterruptHandler struct {
+	interruptCount int
+	lock           *sync.Mutex
+	C              chan bool
+}
+
+func NewInterruptHandler() *InterruptHandler {
+	h := &InterruptHandler{
+		lock: &sync.Mutex{},
+		C:    make(chan bool, 0),
+	}
+
+	go h.handleInterrupt()
+	SwallowSigQuit()
+
+	return h
+}
+
+func (h *InterruptHandler) WasInterrupted() bool {
+	h.lock.Lock()
+	defer h.lock.Unlock()
+
+	return h.interruptCount > 0
+}
+
+func (h *InterruptHandler) handleInterrupt() {
+	c := make(chan os.Signal, 1)
+	signal.Notify(c, os.Interrupt, syscall.SIGTERM)
+
+	<-c
+	signal.Stop(c)
+
+	h.lock.Lock()
+	h.interruptCount++
+	if h.interruptCount == 1 {
+		close(h.C)
+	} else if h.interruptCount > 5 {
+		os.Exit(1)
+	}
+	h.lock.Unlock()
+
+	go h.handleInterrupt()
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_unix.go b/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..14c94210ee030ebf36e6fc36a71e262575bc2148
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_unix.go
@@ -0,0 +1,14 @@
+// +build freebsd openbsd netbsd dragonfly darwin linux
+
+package interrupthandler
+
+import (
+	"os"
+	"os/signal"
+	"syscall"
+)
+
+func SwallowSigQuit() {
+	c := make(chan os.Signal, 1024)
+	signal.Notify(c, syscall.SIGQUIT)
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_windows.go b/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..7f4a50e1906b97b02a90b54086d31573dbfdeebd
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_windows.go
@@ -0,0 +1,7 @@
+// +build windows
+
+package interrupthandler
+
+func SwallowSigQuit() {
+	//noop
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/ginkgo/main.go
new file mode 100644
index 0000000000000000000000000000000000000000..b031b8082cc8787ca9aeefaf0b1b7d2baa9f5df5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/main.go
@@ -0,0 +1,291 @@
+/*
+The Ginkgo CLI
+
+The Ginkgo CLI is fully documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
+
+You can also learn more by running:
+
+	ginkgo help
+
+Here are some of the more commonly used commands:
+
+To install:
+
+	go install github.com/onsi/ginkgo/ginkgo
+
+To run tests:
+
+	ginkgo
+
+To run tests in all subdirectories:
+
+	ginkgo -r
+
+To run tests in particular packages:
+
+	ginkgo <flags> /path/to/package /path/to/another/package
+
+To pass arguments/flags to your tests:
+
+	ginkgo <flags> <packages> -- <pass-throughs>
+
+To run tests in parallel
+
+	ginkgo -p
+
+this will automatically detect the optimal number of nodes to use.  Alternatively, you can specify the number of nodes with:
+
+	ginkgo -nodes=N
+
+(note that you don't need to provide -p in this case).
+
+By default the Ginkgo CLI will spin up a server that the individual test processes send test output to.  The CLI aggregates this output and then presents coherent test output, one test at a time, as each test completes.
+An alternative is to have the parallel nodes run and stream interleaved output back.  This useful for debugging, particularly in contexts where tests hang/fail to start.  To get this interleaved output:
+
+	ginkgo -nodes=N -stream=true
+
+On windows, the default value for stream is true.
+
+By default, when running multiple tests (with -r or a list of packages) Ginkgo will abort when a test fails.  To have Ginkgo run subsequent test suites instead you can:
+
+	ginkgo -keepGoing
+
+To monitor packages and rerun tests when changes occur:
+
+	ginkgo watch <-r> </path/to/package>
+
+passing `ginkgo watch` the `-r` flag will recursively detect all test suites under the current directory and monitor them.
+`watch` does not detect *new* packages. Moreover, changes in package X only rerun the tests for package X, tests for packages
+that depend on X are not rerun.
+
+[OSX & Linux only] To receive (desktop) notifications when a test run completes:
+
+	ginkgo -notify
+
+this is particularly useful with `ginkgo watch`.  Notifications are currently only supported on OS X and require that you `brew install terminal-notifier`
+
+Sometimes (to suss out race conditions/flakey tests, for example) you want to keep running a test suite until it fails.  You can do this with:
+
+	ginkgo -untilItFails
+
+To bootstrap a test suite:
+
+	ginkgo bootstrap
+
+To generate a test file:
+
+	ginkgo generate <test_file_name>
+
+To bootstrap/generate test files without using "." imports:
+
+	ginkgo bootstrap --nodot
+	ginkgo generate --nodot
+
+this will explicitly export all the identifiers in Ginkgo and Gomega allowing you to rename them to avoid collisions.  When you pull to the latest Ginkgo/Gomega you'll want to run
+
+	ginkgo nodot
+
+to refresh this list and pull in any new identifiers.  In particular, this will pull in any new Gomega matchers that get added.
+
+To convert an existing XUnit style test suite to a Ginkgo-style test suite:
+
+	ginkgo convert .
+
+To unfocus tests:
+
+	ginkgo unfocus
+
+or
+
+	ginkgo blur
+
+To compile a test suite:
+
+	ginkgo build <path-to-package>
+
+will output an executable file named `package.test`.  This can be run directly or by invoking
+
+	ginkgo <path-to-package.test>
+
+To print out Ginkgo's version:
+
+	ginkgo version
+
+To get more help:
+
+	ginkgo help
+*/
+package main
+
+import (
+	"flag"
+	"fmt"
+	"os"
+	"os/exec"
+	"strings"
+
+	"github.com/onsi/ginkgo/config"
+	"github.com/onsi/ginkgo/ginkgo/testsuite"
+)
+
+const greenColor = "\x1b[32m"
+const redColor = "\x1b[91m"
+const defaultStyle = "\x1b[0m"
+const lightGrayColor = "\x1b[37m"
+
+type Command struct {
+	Name                      string
+	AltName                   string
+	FlagSet                   *flag.FlagSet
+	Usage                     []string
+	UsageCommand              string
+	Command                   func(args []string, additionalArgs []string)
+	SuppressFlagDocumentation bool
+	FlagDocSubstitute         []string
+}
+
+func (c *Command) Matches(name string) bool {
+	return c.Name == name || (c.AltName != "" && c.AltName == name)
+}
+
+func (c *Command) Run(args []string, additionalArgs []string) {
+	c.FlagSet.Parse(args)
+	c.Command(c.FlagSet.Args(), additionalArgs)
+}
+
+var DefaultCommand *Command
+var Commands []*Command
+
+func init() {
+	DefaultCommand = BuildRunCommand()
+	Commands = append(Commands, BuildWatchCommand())
+	Commands = append(Commands, BuildBuildCommand())
+	Commands = append(Commands, BuildBootstrapCommand())
+	Commands = append(Commands, BuildGenerateCommand())
+	Commands = append(Commands, BuildNodotCommand())
+	Commands = append(Commands, BuildConvertCommand())
+	Commands = append(Commands, BuildUnfocusCommand())
+	Commands = append(Commands, BuildVersionCommand())
+	Commands = append(Commands, BuildHelpCommand())
+}
+
+func main() {
+	args := []string{}
+	additionalArgs := []string{}
+
+	foundDelimiter := false
+
+	for _, arg := range os.Args[1:] {
+		if !foundDelimiter {
+			if arg == "--" {
+				foundDelimiter = true
+				continue
+			}
+		}
+
+		if foundDelimiter {
+			additionalArgs = append(additionalArgs, arg)
+		} else {
+			args = append(args, arg)
+		}
+	}
+
+	if len(args) > 0 {
+		commandToRun, found := commandMatching(args[0])
+		if found {
+			commandToRun.Run(args[1:], additionalArgs)
+			return
+		}
+	}
+
+	DefaultCommand.Run(args, additionalArgs)
+}
+
+func commandMatching(name string) (*Command, bool) {
+	for _, command := range Commands {
+		if command.Matches(name) {
+			return command, true
+		}
+	}
+	return nil, false
+}
+
+func usage() {
+	fmt.Fprintf(os.Stderr, "Ginkgo Version %s\n\n", config.VERSION)
+	usageForCommand(DefaultCommand, false)
+	for _, command := range Commands {
+		fmt.Fprintf(os.Stderr, "\n")
+		usageForCommand(command, false)
+	}
+}
+
+func usageForCommand(command *Command, longForm bool) {
+	fmt.Fprintf(os.Stderr, "%s\n%s\n", command.UsageCommand, strings.Repeat("-", len(command.UsageCommand)))
+	fmt.Fprintf(os.Stderr, "%s\n", strings.Join(command.Usage, "\n"))
+	if command.SuppressFlagDocumentation && !longForm {
+		fmt.Fprintf(os.Stderr, "%s\n", strings.Join(command.FlagDocSubstitute, "\n  "))
+	} else {
+		command.FlagSet.PrintDefaults()
+	}
+}
+
+func complainAndQuit(complaint string) {
+	fmt.Fprintf(os.Stderr, "%s\nFor usage instructions:\n\tginkgo help\n", complaint)
+	os.Exit(1)
+}
+
+func findSuites(args []string, recurse bool, skipPackage string, allowPrecompiled bool) ([]testsuite.TestSuite, []string) {
+	suites := []testsuite.TestSuite{}
+
+	if len(args) > 0 {
+		for _, arg := range args {
+			if allowPrecompiled {
+				suite, err := testsuite.PrecompiledTestSuite(arg)
+				if err == nil {
+					suites = append(suites, suite)
+					continue
+				}
+			}
+			suites = append(suites, testsuite.SuitesInDir(arg, recurse)...)
+		}
+	} else {
+		suites = testsuite.SuitesInDir(".", recurse)
+	}
+
+	skippedPackages := []string{}
+	if skipPackage != "" {
+		skipFilters := strings.Split(skipPackage, ",")
+		filteredSuites := []testsuite.TestSuite{}
+		for _, suite := range suites {
+			skip := false
+			for _, skipFilter := range skipFilters {
+				if strings.Contains(suite.Path, skipFilter) {
+					skip = true
+					break
+				}
+			}
+			if skip {
+				skippedPackages = append(skippedPackages, suite.Path)
+			} else {
+				filteredSuites = append(filteredSuites, suite)
+			}
+		}
+		suites = filteredSuites
+	}
+
+	return suites, skippedPackages
+}
+
+func goFmt(path string) {
+	err := exec.Command("go", "fmt", path).Run()
+	if err != nil {
+		complainAndQuit("Could not fmt: " + err.Error())
+	}
+}
+
+func pluralizedWord(singular, plural string, count int) string {
+	if count == 1 {
+		return singular
+	}
+	return plural
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go b/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go
new file mode 100644
index 0000000000000000000000000000000000000000..3f7237c602d7a9a8cb8738270c34745828202773
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go
@@ -0,0 +1,194 @@
+package nodot
+
+import (
+	"fmt"
+	"go/ast"
+	"go/build"
+	"go/parser"
+	"go/token"
+	"path/filepath"
+	"strings"
+)
+
+func ApplyNoDot(data []byte) ([]byte, error) {
+	sections, err := generateNodotSections()
+	if err != nil {
+		return nil, err
+	}
+
+	for _, section := range sections {
+		data = section.createOrUpdateIn(data)
+	}
+
+	return data, nil
+}
+
+type nodotSection struct {
+	name         string
+	pkg          string
+	declarations []string
+	types        []string
+}
+
+func (s nodotSection) createOrUpdateIn(data []byte) []byte {
+	renames := map[string]string{}
+
+	contents := string(data)
+
+	lines := strings.Split(contents, "\n")
+
+	comment := "// Declarations for " + s.name
+
+	newLines := []string{}
+	for _, line := range lines {
+		if line == comment {
+			continue
+		}
+
+		words := strings.Split(line, " ")
+		lastWord := words[len(words)-1]
+
+		if s.containsDeclarationOrType(lastWord) {
+			renames[lastWord] = words[1]
+			continue
+		}
+
+		newLines = append(newLines, line)
+	}
+
+	if len(newLines[len(newLines)-1]) > 0 {
+		newLines = append(newLines, "")
+	}
+
+	newLines = append(newLines, comment)
+
+	for _, typ := range s.types {
+		name, ok := renames[s.prefix(typ)]
+		if !ok {
+			name = typ
+		}
+		newLines = append(newLines, fmt.Sprintf("type %s %s", name, s.prefix(typ)))
+	}
+
+	for _, decl := range s.declarations {
+		name, ok := renames[s.prefix(decl)]
+		if !ok {
+			name = decl
+		}
+		newLines = append(newLines, fmt.Sprintf("var %s = %s", name, s.prefix(decl)))
+	}
+
+	newLines = append(newLines, "")
+
+	newContents := strings.Join(newLines, "\n")
+
+	return []byte(newContents)
+}
+
+func (s nodotSection) prefix(declOrType string) string {
+	return s.pkg + "." + declOrType
+}
+
+func (s nodotSection) containsDeclarationOrType(word string) bool {
+	for _, declaration := range s.declarations {
+		if s.prefix(declaration) == word {
+			return true
+		}
+	}
+
+	for _, typ := range s.types {
+		if s.prefix(typ) == word {
+			return true
+		}
+	}
+
+	return false
+}
+
+func generateNodotSections() ([]nodotSection, error) {
+	sections := []nodotSection{}
+
+	declarations, err := getExportedDeclerationsForPackage("github.com/onsi/ginkgo", "ginkgo_dsl.go", "GINKGO_VERSION", "GINKGO_PANIC")
+	if err != nil {
+		return nil, err
+	}
+	sections = append(sections, nodotSection{
+		name:         "Ginkgo DSL",
+		pkg:          "ginkgo",
+		declarations: declarations,
+		types:        []string{"Done", "Benchmarker"},
+	})
+
+	declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "gomega_dsl.go", "GOMEGA_VERSION")
+	if err != nil {
+		return nil, err
+	}
+	sections = append(sections, nodotSection{
+		name:         "Gomega DSL",
+		pkg:          "gomega",
+		declarations: declarations,
+	})
+
+	declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "matchers.go")
+	if err != nil {
+		return nil, err
+	}
+	sections = append(sections, nodotSection{
+		name:         "Gomega Matchers",
+		pkg:          "gomega",
+		declarations: declarations,
+	})
+
+	return sections, nil
+}
+
+func getExportedDeclerationsForPackage(pkgPath string, filename string, blacklist ...string) ([]string, error) {
+	pkg, err := build.Import(pkgPath, ".", 0)
+	if err != nil {
+		return []string{}, err
+	}
+
+	declarations, err := getExportedDeclarationsForFile(filepath.Join(pkg.Dir, filename))
+	if err != nil {
+		return []string{}, err
+	}
+
+	blacklistLookup := map[string]bool{}
+	for _, declaration := range blacklist {
+		blacklistLookup[declaration] = true
+	}
+
+	filteredDeclarations := []string{}
+	for _, declaration := range declarations {
+		if blacklistLookup[declaration] {
+			continue
+		}
+		filteredDeclarations = append(filteredDeclarations, declaration)
+	}
+
+	return filteredDeclarations, nil
+}
+
+func getExportedDeclarationsForFile(path string) ([]string, error) {
+	fset := token.NewFileSet()
+	tree, err := parser.ParseFile(fset, path, nil, 0)
+	if err != nil {
+		return []string{}, err
+	}
+
+	declarations := []string{}
+	ast.FileExports(tree)
+	for _, decl := range tree.Decls {
+		switch x := decl.(type) {
+		case *ast.GenDecl:
+			switch s := x.Specs[0].(type) {
+			case *ast.ValueSpec:
+				declarations = append(declarations, s.Names[0].Name)
+			}
+		case *ast.FuncDecl:
+			declarations = append(declarations, x.Name.Name)
+		}
+	}
+
+	return declarations, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go
new file mode 100644
index 0000000000000000000000000000000000000000..212235bae0ad3edd1a53a55d18684a6c6089eecf
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go
@@ -0,0 +1,76 @@
+package main
+
+import (
+	"bufio"
+	"flag"
+	"github.com/onsi/ginkgo/ginkgo/nodot"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"regexp"
+)
+
+func BuildNodotCommand() *Command {
+	return &Command{
+		Name:         "nodot",
+		FlagSet:      flag.NewFlagSet("bootstrap", flag.ExitOnError),
+		UsageCommand: "ginkgo nodot",
+		Usage: []string{
+			"Update the nodot declarations in your test suite",
+			"Any missing declarations (from, say, a recently added matcher) will be added to your bootstrap file.",
+			"If you've renamed a declaration, that name will be honored and not overwritten.",
+		},
+		Command: updateNodot,
+	}
+}
+
+func updateNodot(args []string, additionalArgs []string) {
+	suiteFile, perm := findSuiteFile()
+
+	data, err := ioutil.ReadFile(suiteFile)
+	if err != nil {
+		complainAndQuit("Failed to update nodot declarations: " + err.Error())
+	}
+
+	content, err := nodot.ApplyNoDot(data)
+	if err != nil {
+		complainAndQuit("Failed to update nodot declarations: " + err.Error())
+	}
+	ioutil.WriteFile(suiteFile, content, perm)
+
+	goFmt(suiteFile)
+}
+
+func findSuiteFile() (string, os.FileMode) {
+	workingDir, err := os.Getwd()
+	if err != nil {
+		complainAndQuit("Could not find suite file for nodot: " + err.Error())
+	}
+
+	files, err := ioutil.ReadDir(workingDir)
+	if err != nil {
+		complainAndQuit("Could not find suite file for nodot: " + err.Error())
+	}
+
+	re := regexp.MustCompile(`RunSpecs\(|RunSpecsWithDefaultAndCustomReporters\(|RunSpecsWithCustomReporters\(`)
+
+	for _, file := range files {
+		if file.IsDir() {
+			continue
+		}
+		path := filepath.Join(workingDir, file.Name())
+		f, err := os.Open(path)
+		if err != nil {
+			complainAndQuit("Could not find suite file for nodot: " + err.Error())
+		}
+		defer f.Close()
+
+		if re.MatchReader(bufio.NewReader(f)) {
+			return path, file.Mode()
+		}
+	}
+
+	complainAndQuit("Could not find a suite file for nodot: you need a bootstrap file that call's Ginkgo's RunSpecs() command.\nTry running ginkgo bootstrap first.")
+
+	return "", 0
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/notifications.go b/vendor/github.com/onsi/ginkgo/ginkgo/notifications.go
new file mode 100644
index 0000000000000000000000000000000000000000..368d61fb31c7cd740e4840a6dd7011f65a5d8e87
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/notifications.go
@@ -0,0 +1,141 @@
+package main
+
+import (
+	"fmt"
+	"os"
+	"os/exec"
+	"regexp"
+	"runtime"
+	"strings"
+
+	"github.com/onsi/ginkgo/config"
+	"github.com/onsi/ginkgo/ginkgo/testsuite"
+)
+
+type Notifier struct {
+	commandFlags *RunWatchAndBuildCommandFlags
+}
+
+func NewNotifier(commandFlags *RunWatchAndBuildCommandFlags) *Notifier {
+	return &Notifier{
+		commandFlags: commandFlags,
+	}
+}
+
+func (n *Notifier) VerifyNotificationsAreAvailable() {
+	if n.commandFlags.Notify {
+		onLinux := (runtime.GOOS == "linux")
+		onOSX := (runtime.GOOS == "darwin")
+		if onOSX {
+
+			_, err := exec.LookPath("terminal-notifier")
+			if err != nil {
+				fmt.Printf(`--notify requires terminal-notifier, which you don't seem to have installed.
+
+OSX:
+
+To remedy this:
+
+    brew install terminal-notifier
+
+To learn more about terminal-notifier:
+
+    https://github.com/alloy/terminal-notifier
+`)
+				os.Exit(1)
+			}
+
+		} else if onLinux {
+
+			_, err := exec.LookPath("notify-send")
+			if err != nil {
+				fmt.Printf(`--notify requires terminal-notifier or notify-send, which you don't seem to have installed.
+
+Linux:
+
+Download and install notify-send for your distribution
+`)
+				os.Exit(1)
+			}
+
+		}
+	}
+}
+
+func (n *Notifier) SendSuiteCompletionNotification(suite testsuite.TestSuite, suitePassed bool) {
+	if suitePassed {
+		n.SendNotification("Ginkgo [PASS]", fmt.Sprintf(`Test suite for "%s" passed.`, suite.PackageName))
+	} else {
+		n.SendNotification("Ginkgo [FAIL]", fmt.Sprintf(`Test suite for "%s" failed.`, suite.PackageName))
+	}
+}
+
+func (n *Notifier) SendNotification(title string, subtitle string) {
+
+	if n.commandFlags.Notify {
+		onLinux := (runtime.GOOS == "linux")
+		onOSX := (runtime.GOOS == "darwin")
+
+		if onOSX {
+
+			_, err := exec.LookPath("terminal-notifier")
+			if err == nil {
+				args := []string{"-title", title, "-subtitle", subtitle, "-group", "com.onsi.ginkgo"}
+				terminal := os.Getenv("TERM_PROGRAM")
+				if terminal == "iTerm.app" {
+					args = append(args, "-activate", "com.googlecode.iterm2")
+				} else if terminal == "Apple_Terminal" {
+					args = append(args, "-activate", "com.apple.Terminal")
+				}
+
+				exec.Command("terminal-notifier", args...).Run()
+			}
+
+		} else if onLinux {
+
+			_, err := exec.LookPath("notify-send")
+			if err == nil {
+				args := []string{"-a", "ginkgo", title, subtitle}
+				exec.Command("notify-send", args...).Run()
+			}
+
+		}
+	}
+}
+
+func (n *Notifier) RunCommand(suite testsuite.TestSuite, suitePassed bool) {
+
+	command := n.commandFlags.AfterSuiteHook
+	if command != "" {
+
+		// Allow for string replacement to pass input to the command
+		passed := "[FAIL]"
+		if suitePassed {
+			passed = "[PASS]"
+		}
+		command = strings.Replace(command, "(ginkgo-suite-passed)", passed, -1)
+		command = strings.Replace(command, "(ginkgo-suite-name)", suite.PackageName, -1)
+
+		// Must break command into parts
+		splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`)
+		parts := splitArgs.FindAllString(command, -1)
+
+		output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
+		if err != nil {
+			fmt.Println("Post-suite command failed:")
+			if config.DefaultReporterConfig.NoColor {
+				fmt.Printf("\t%s\n", output)
+			} else {
+				fmt.Printf("\t%s%s%s\n", redColor, string(output), defaultStyle)
+			}
+			n.SendNotification("Ginkgo [ERROR]", fmt.Sprintf(`After suite command "%s" failed`, n.commandFlags.AfterSuiteHook))
+		} else {
+			fmt.Println("Post-suite command succeeded:")
+			if config.DefaultReporterConfig.NoColor {
+				fmt.Printf("\t%s\n", output)
+			} else {
+				fmt.Printf("\t%s%s%s\n", greenColor, string(output), defaultStyle)
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go
new file mode 100644
index 0000000000000000000000000000000000000000..c5cf2775f75cb0d2b85b590eed0149c63a7cfd83
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go
@@ -0,0 +1,192 @@
+package main
+
+import (
+	"flag"
+	"fmt"
+	"math/rand"
+	"os"
+	"time"
+
+	"github.com/onsi/ginkgo/config"
+	"github.com/onsi/ginkgo/ginkgo/interrupthandler"
+	"github.com/onsi/ginkgo/ginkgo/testrunner"
+	"github.com/onsi/ginkgo/types"
+)
+
+func BuildRunCommand() *Command {
+	commandFlags := NewRunCommandFlags(flag.NewFlagSet("ginkgo", flag.ExitOnError))
+	notifier := NewNotifier(commandFlags)
+	interruptHandler := interrupthandler.NewInterruptHandler()
+	runner := &SpecRunner{
+		commandFlags:     commandFlags,
+		notifier:         notifier,
+		interruptHandler: interruptHandler,
+		suiteRunner:      NewSuiteRunner(notifier, interruptHandler),
+	}
+
+	return &Command{
+		Name:         "",
+		FlagSet:      commandFlags.FlagSet,
+		UsageCommand: "ginkgo <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
+		Usage: []string{
+			"Run the tests in the passed in <PACKAGES> (or the package in the current directory if left blank).",
+			"Any arguments after -- will be passed to the test.",
+			"Accepts the following flags:",
+		},
+		Command: runner.RunSpecs,
+	}
+}
+
+type SpecRunner struct {
+	commandFlags     *RunWatchAndBuildCommandFlags
+	notifier         *Notifier
+	interruptHandler *interrupthandler.InterruptHandler
+	suiteRunner      *SuiteRunner
+}
+
+func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
+	r.commandFlags.computeNodes()
+	r.notifier.VerifyNotificationsAreAvailable()
+
+	suites, skippedPackages := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage, true)
+	if len(skippedPackages) > 0 {
+		fmt.Println("Will skip:")
+		for _, skippedPackage := range skippedPackages {
+			fmt.Println("  " + skippedPackage)
+		}
+	}
+
+	if len(skippedPackages) > 0 && len(suites) == 0 {
+		fmt.Println("All tests skipped!  Exiting...")
+		os.Exit(0)
+	}
+
+	if len(suites) == 0 {
+		complainAndQuit("Found no test suites")
+	}
+
+	r.ComputeSuccinctMode(len(suites))
+
+	t := time.Now()
+
+	runners := []*testrunner.TestRunner{}
+	for _, suite := range suites {
+		runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, additionalArgs))
+	}
+
+	numSuites := 0
+	runResult := testrunner.PassingRunResult()
+	if r.commandFlags.UntilItFails {
+		iteration := 0
+		for {
+			r.UpdateSeed()
+			randomizedRunners := r.randomizeOrder(runners)
+			runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.NumCompilers, r.commandFlags.KeepGoing, nil)
+			iteration++
+
+			if r.interruptHandler.WasInterrupted() {
+				break
+			}
+
+			if runResult.Passed {
+				fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration, orcMessage(iteration))
+			} else {
+				fmt.Printf("\nTests failed on attempt #%d\n\n", iteration)
+				break
+			}
+		}
+	} else {
+		randomizedRunners := r.randomizeOrder(runners)
+		runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.NumCompilers, r.commandFlags.KeepGoing, nil)
+	}
+
+	for _, runner := range runners {
+		runner.CleanUp()
+	}
+
+	fmt.Printf("\nGinkgo ran %d %s in %s\n", numSuites, pluralizedWord("suite", "suites", numSuites), time.Since(t))
+
+	if runResult.Passed {
+		if runResult.HasProgrammaticFocus {
+			fmt.Printf("Test Suite Passed\n")
+			fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE)
+			os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
+		} else {
+			fmt.Printf("Test Suite Passed\n")
+			os.Exit(0)
+		}
+	} else {
+		fmt.Printf("Test Suite Failed\n")
+		os.Exit(1)
+	}
+}
+
+func (r *SpecRunner) ComputeSuccinctMode(numSuites int) {
+	if config.DefaultReporterConfig.Verbose {
+		config.DefaultReporterConfig.Succinct = false
+		return
+	}
+
+	if numSuites == 1 {
+		return
+	}
+
+	if numSuites > 1 && !r.commandFlags.wasSet("succinct") {
+		config.DefaultReporterConfig.Succinct = true
+	}
+}
+
+func (r *SpecRunner) UpdateSeed() {
+	if !r.commandFlags.wasSet("seed") {
+		config.GinkgoConfig.RandomSeed = time.Now().Unix()
+	}
+}
+
+func (r *SpecRunner) randomizeOrder(runners []*testrunner.TestRunner) []*testrunner.TestRunner {
+	if !r.commandFlags.RandomizeSuites {
+		return runners
+	}
+
+	if len(runners) <= 1 {
+		return runners
+	}
+
+	randomizedRunners := make([]*testrunner.TestRunner, len(runners))
+	randomizer := rand.New(rand.NewSource(config.GinkgoConfig.RandomSeed))
+	permutation := randomizer.Perm(len(runners))
+	for i, j := range permutation {
+		randomizedRunners[i] = runners[j]
+	}
+	return randomizedRunners
+}
+
+func orcMessage(iteration int) string {
+	if iteration < 10 {
+		return ""
+	} else if iteration < 30 {
+		return []string{
+			"If at first you succeed...",
+			"...try, try again.",
+			"Looking good!",
+			"Still good...",
+			"I think your tests are fine....",
+			"Yep, still passing",
+			"Here we go again...",
+			"Even the gophers are getting bored",
+			"Did you try -race?",
+			"Maybe you should stop now?",
+			"I'm getting tired...",
+			"What if I just made you a sandwich?",
+			"Hit ^C, hit ^C, please hit ^C",
+			"Make it stop. Please!",
+			"Come on!  Enough is enough!",
+			"Dave, this conversation can serve no purpose anymore. Goodbye.",
+			"Just what do you think you're doing, Dave? ",
+			"I, Sisyphus",
+			"Insanity: doing the same thing over and over again and expecting different results. -Einstein",
+			"I guess Einstein never tried to churn butter",
+		}[iteration-10] + "\n"
+	} else {
+		return "No, seriously... you can probably stop now.\n"
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go b/vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go
new file mode 100644
index 0000000000000000000000000000000000000000..e6bcf367eb8dea2e2f3e3cf909d8affc48aab188
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go
@@ -0,0 +1,121 @@
+package main
+
+import (
+	"flag"
+	"runtime"
+
+	"github.com/onsi/ginkgo/config"
+)
+
+type RunWatchAndBuildCommandFlags struct {
+	Recurse     bool
+	Race        bool
+	Cover       bool
+	CoverPkg    string
+	SkipPackage string
+	Tags        string
+
+	//for run and watch commands
+	NumCPU         int
+	NumCompilers   int
+	ParallelStream bool
+	Notify         bool
+	AfterSuiteHook string
+	AutoNodes      bool
+
+	//only for run command
+	KeepGoing       bool
+	UntilItFails    bool
+	RandomizeSuites bool
+
+	//only for watch command
+	Depth int
+
+	FlagSet *flag.FlagSet
+}
+
+const runMode = 1
+const watchMode = 2
+const buildMode = 3
+
+func NewRunCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
+	c := &RunWatchAndBuildCommandFlags{
+		FlagSet: flagSet,
+	}
+	c.flags(runMode)
+	return c
+}
+
+func NewWatchCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
+	c := &RunWatchAndBuildCommandFlags{
+		FlagSet: flagSet,
+	}
+	c.flags(watchMode)
+	return c
+}
+
+func NewBuildCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
+	c := &RunWatchAndBuildCommandFlags{
+		FlagSet: flagSet,
+	}
+	c.flags(buildMode)
+	return c
+}
+
+func (c *RunWatchAndBuildCommandFlags) wasSet(flagName string) bool {
+	wasSet := false
+	c.FlagSet.Visit(func(f *flag.Flag) {
+		if f.Name == flagName {
+			wasSet = true
+		}
+	})
+
+	return wasSet
+}
+
+func (c *RunWatchAndBuildCommandFlags) computeNodes() {
+	if c.wasSet("nodes") {
+		return
+	}
+	if c.AutoNodes {
+		switch n := runtime.NumCPU(); {
+		case n <= 4:
+			c.NumCPU = n
+		default:
+			c.NumCPU = n - 1
+		}
+	}
+}
+
+func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
+	onWindows := (runtime.GOOS == "windows")
+
+	c.FlagSet.BoolVar(&(c.Recurse), "r", false, "Find and run test suites under the current directory recursively")
+	c.FlagSet.BoolVar(&(c.Race), "race", false, "Run tests with race detection enabled")
+	c.FlagSet.BoolVar(&(c.Cover), "cover", false, "Run tests with coverage analysis, will generate coverage profiles with the package name in the current directory")
+	c.FlagSet.StringVar(&(c.CoverPkg), "coverpkg", "", "Run tests with coverage on the given external modules")
+	c.FlagSet.StringVar(&(c.SkipPackage), "skipPackage", "", "A comma-separated list of package names to be skipped.  If any part of the package's path matches, that package is ignored.")
+	c.FlagSet.StringVar(&(c.Tags), "tags", "", "A list of build tags to consider satisfied during the build")
+
+	if mode == runMode || mode == watchMode {
+		config.Flags(c.FlagSet, "", false)
+		c.FlagSet.IntVar(&(c.NumCPU), "nodes", 1, "The number of parallel test nodes to run")
+		c.FlagSet.IntVar(&(c.NumCompilers), "compilers", 0, "The number of concurrent compilations to run (0 will autodetect)")
+		c.FlagSet.BoolVar(&(c.AutoNodes), "p", false, "Run in parallel with auto-detected number of nodes")
+		c.FlagSet.BoolVar(&(c.ParallelStream), "stream", onWindows, "stream parallel test output in real time: less coherent, but useful for debugging")
+		if !onWindows {
+			c.FlagSet.BoolVar(&(c.Notify), "notify", false, "Send desktop notifications when a test run completes")
+		}
+		c.FlagSet.StringVar(&(c.AfterSuiteHook), "afterSuiteHook", "", "Run a command when a suite test run completes")
+	}
+
+	if mode == runMode {
+		c.FlagSet.BoolVar(&(c.KeepGoing), "keepGoing", false, "When true, failures from earlier test suites do not prevent later test suites from running")
+		c.FlagSet.BoolVar(&(c.UntilItFails), "untilItFails", false, "When true, Ginkgo will keep rerunning tests until a failure occurs")
+		c.FlagSet.BoolVar(&(c.RandomizeSuites), "randomizeSuites", false, "When true, Ginkgo will randomize the order in which test suites run")
+	}
+
+	if mode == watchMode {
+		c.FlagSet.IntVar(&(c.Depth), "depth", 1, "Ginkgo will watch dependencies down to this depth in the dependency tree")
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/suite_runner.go b/vendor/github.com/onsi/ginkgo/ginkgo/suite_runner.go
new file mode 100644
index 0000000000000000000000000000000000000000..7d56e5f78a52627b3462fabc6b0d2abe112b56e9
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/suite_runner.go
@@ -0,0 +1,172 @@
+package main
+
+import (
+	"fmt"
+	"runtime"
+	"sync"
+
+	"github.com/onsi/ginkgo/config"
+	"github.com/onsi/ginkgo/ginkgo/interrupthandler"
+	"github.com/onsi/ginkgo/ginkgo/testrunner"
+	"github.com/onsi/ginkgo/ginkgo/testsuite"
+)
+
+type compilationInput struct {
+	runner *testrunner.TestRunner
+	result chan compilationOutput
+}
+
+type compilationOutput struct {
+	runner *testrunner.TestRunner
+	err    error
+}
+
+type SuiteRunner struct {
+	notifier         *Notifier
+	interruptHandler *interrupthandler.InterruptHandler
+}
+
+func NewSuiteRunner(notifier *Notifier, interruptHandler *interrupthandler.InterruptHandler) *SuiteRunner {
+	return &SuiteRunner{
+		notifier:         notifier,
+		interruptHandler: interruptHandler,
+	}
+}
+
+func (r *SuiteRunner) compileInParallel(runners []*testrunner.TestRunner, numCompilers int, willCompile func(suite testsuite.TestSuite)) chan compilationOutput {
+	//we return this to the consumer, it will return each runner in order as it compiles
+	compilationOutputs := make(chan compilationOutput, len(runners))
+
+	//an array of channels - the nth runner's compilation output is sent to the nth channel in this array
+	//we read from these channels in order to ensure we run the suites in order
+	orderedCompilationOutputs := []chan compilationOutput{}
+	for _ = range runners {
+		orderedCompilationOutputs = append(orderedCompilationOutputs, make(chan compilationOutput, 1))
+	}
+
+	//we're going to spin up numCompilers compilers - they're going to run concurrently and will consume this channel
+	//we prefill the channel then close it, this ensures we compile things in the correct order
+	workPool := make(chan compilationInput, len(runners))
+	for i, runner := range runners {
+		workPool <- compilationInput{runner, orderedCompilationOutputs[i]}
+	}
+	close(workPool)
+
+	//pick a reasonable numCompilers
+	if numCompilers == 0 {
+		numCompilers = runtime.NumCPU()
+	}
+
+	//a WaitGroup to help us wait for all compilers to shut down
+	wg := &sync.WaitGroup{}
+	wg.Add(numCompilers)
+
+	//spin up the concurrent compilers
+	for i := 0; i < numCompilers; i++ {
+		go func() {
+			defer wg.Done()
+			for input := range workPool {
+				if r.interruptHandler.WasInterrupted() {
+					return
+				}
+
+				if willCompile != nil {
+					willCompile(input.runner.Suite)
+				}
+
+				//We retry because Go sometimes steps on itself when multiple compiles happen in parallel.  This is ugly, but should help resolve flakiness...
+				var err error
+				retries := 0
+				for retries <= 5 {
+					if r.interruptHandler.WasInterrupted() {
+						return
+					}
+					if err = input.runner.Compile(); err == nil {
+						break
+					}
+					retries++
+				}
+
+				input.result <- compilationOutput{input.runner, err}
+			}
+		}()
+	}
+
+	//read from the compilation output channels *in order* and send them to the caller
+	//close the compilationOutputs channel to tell the caller we're done
+	go func() {
+		defer close(compilationOutputs)
+		for _, orderedCompilationOutput := range orderedCompilationOutputs {
+			select {
+			case compilationOutput := <-orderedCompilationOutput:
+				compilationOutputs <- compilationOutput
+			case <-r.interruptHandler.C:
+				//interrupt detected, wait for the compilers to shut down then bail
+				//this ensure we clean up after ourselves as we don't leave any compilation processes running
+				wg.Wait()
+				return
+			}
+		}
+	}()
+
+	return compilationOutputs
+}
+
+func (r *SuiteRunner) RunSuites(runners []*testrunner.TestRunner, numCompilers int, keepGoing bool, willCompile func(suite testsuite.TestSuite)) (testrunner.RunResult, int) {
+	runResult := testrunner.PassingRunResult()
+
+	compilationOutputs := r.compileInParallel(runners, numCompilers, willCompile)
+
+	numSuitesThatRan := 0
+	suitesThatFailed := []testsuite.TestSuite{}
+	for compilationOutput := range compilationOutputs {
+		if compilationOutput.err != nil {
+			fmt.Print(compilationOutput.err.Error())
+		}
+		numSuitesThatRan++
+		suiteRunResult := testrunner.FailingRunResult()
+		if compilationOutput.err == nil {
+			suiteRunResult = compilationOutput.runner.Run()
+		}
+		r.notifier.SendSuiteCompletionNotification(compilationOutput.runner.Suite, suiteRunResult.Passed)
+		r.notifier.RunCommand(compilationOutput.runner.Suite, suiteRunResult.Passed)
+		runResult = runResult.Merge(suiteRunResult)
+		if !suiteRunResult.Passed {
+			suitesThatFailed = append(suitesThatFailed, compilationOutput.runner.Suite)
+			if !keepGoing {
+				break
+			}
+		}
+		if numSuitesThatRan < len(runners) && !config.DefaultReporterConfig.Succinct {
+			fmt.Println("")
+		}
+	}
+
+	if keepGoing && !runResult.Passed {
+		r.listFailedSuites(suitesThatFailed)
+	}
+
+	return runResult, numSuitesThatRan
+}
+
+func (r *SuiteRunner) listFailedSuites(suitesThatFailed []testsuite.TestSuite) {
+	fmt.Println("")
+	fmt.Println("There were failures detected in the following suites:")
+
+	maxPackageNameLength := 0
+	for _, suite := range suitesThatFailed {
+		if len(suite.PackageName) > maxPackageNameLength {
+			maxPackageNameLength = len(suite.PackageName)
+		}
+	}
+
+	packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength)
+
+	for _, suite := range suitesThatFailed {
+		if config.DefaultReporterConfig.NoColor {
+			fmt.Printf("\t"+packageNameFormatter+" %s\n", suite.PackageName, suite.Path)
+		} else {
+			fmt.Printf("\t%s"+packageNameFormatter+"%s %s%s%s\n", redColor, suite.PackageName, defaultStyle, lightGrayColor, suite.Path, defaultStyle)
+		}
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go
new file mode 100644
index 0000000000000000000000000000000000000000..a73a6e379197296dc394b43dec881fbac78198a5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go
@@ -0,0 +1,52 @@
+package testrunner
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"log"
+	"strings"
+	"sync"
+)
+
+type logWriter struct {
+	buffer *bytes.Buffer
+	lock   *sync.Mutex
+	log    *log.Logger
+}
+
+func newLogWriter(target io.Writer, node int) *logWriter {
+	return &logWriter{
+		buffer: &bytes.Buffer{},
+		lock:   &sync.Mutex{},
+		log:    log.New(target, fmt.Sprintf("[%d] ", node), 0),
+	}
+}
+
+func (w *logWriter) Write(data []byte) (n int, err error) {
+	w.lock.Lock()
+	defer w.lock.Unlock()
+
+	w.buffer.Write(data)
+	contents := w.buffer.String()
+
+	lines := strings.Split(contents, "\n")
+	for _, line := range lines[0 : len(lines)-1] {
+		w.log.Println(line)
+	}
+
+	w.buffer.Reset()
+	w.buffer.Write([]byte(lines[len(lines)-1]))
+	return len(data), nil
+}
+
+func (w *logWriter) Close() error {
+	w.lock.Lock()
+	defer w.lock.Unlock()
+
+	if w.buffer.Len() > 0 {
+		w.log.Println(w.buffer.String())
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go
new file mode 100644
index 0000000000000000000000000000000000000000..5d472acb8d2fedbfe074551eb6b89e664f864644
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go
@@ -0,0 +1,27 @@
+package testrunner
+
+type RunResult struct {
+	Passed               bool
+	HasProgrammaticFocus bool
+}
+
+func PassingRunResult() RunResult {
+	return RunResult{
+		Passed:               true,
+		HasProgrammaticFocus: false,
+	}
+}
+
+func FailingRunResult() RunResult {
+	return RunResult{
+		Passed:               false,
+		HasProgrammaticFocus: false,
+	}
+}
+
+func (r RunResult) Merge(o RunResult) RunResult {
+	return RunResult{
+		Passed:               r.Passed && o.Passed,
+		HasProgrammaticFocus: r.HasProgrammaticFocus || o.HasProgrammaticFocus,
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
new file mode 100644
index 0000000000000000000000000000000000000000..a1e47ba18b3cad15a32fc86c0b4d9f18e832df1e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
@@ -0,0 +1,460 @@
+package testrunner
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"regexp"
+	"strconv"
+	"strings"
+	"syscall"
+	"time"
+
+	"github.com/onsi/ginkgo/config"
+	"github.com/onsi/ginkgo/ginkgo/testsuite"
+	"github.com/onsi/ginkgo/internal/remote"
+	"github.com/onsi/ginkgo/reporters/stenographer"
+	"github.com/onsi/ginkgo/types"
+)
+
+type TestRunner struct {
+	Suite testsuite.TestSuite
+
+	compiled              bool
+	compilationTargetPath string
+
+	numCPU         int
+	parallelStream bool
+	race           bool
+	cover          bool
+	coverPkg       string
+	tags           string
+	additionalArgs []string
+}
+
+func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, race bool, cover bool, coverPkg string, tags string, additionalArgs []string) *TestRunner {
+	runner := &TestRunner{
+		Suite:          suite,
+		numCPU:         numCPU,
+		parallelStream: parallelStream,
+		race:           race,
+		cover:          cover,
+		coverPkg:       coverPkg,
+		tags:           tags,
+		additionalArgs: additionalArgs,
+	}
+
+	if !suite.Precompiled {
+		dir, err := ioutil.TempDir("", "ginkgo")
+		if err != nil {
+			panic(fmt.Sprintf("coulnd't create temporary directory... might be time to rm -rf:\n%s", err.Error()))
+		}
+		runner.compilationTargetPath = filepath.Join(dir, suite.PackageName+".test")
+	}
+
+	return runner
+}
+
+func (t *TestRunner) Compile() error {
+	return t.CompileTo(t.compilationTargetPath)
+}
+
+func (t *TestRunner) CompileTo(path string) error {
+	if t.compiled {
+		return nil
+	}
+
+	if t.Suite.Precompiled {
+		return nil
+	}
+
+	args := []string{"test", "-c", "-i", "-o", path}
+	if t.race {
+		args = append(args, "-race")
+	}
+	if t.cover || t.coverPkg != "" {
+		args = append(args, "-cover", "-covermode=atomic")
+	}
+	if t.coverPkg != "" {
+		args = append(args, fmt.Sprintf("-coverpkg=%s", t.coverPkg))
+	}
+	if t.tags != "" {
+		args = append(args, fmt.Sprintf("-tags=%s", t.tags))
+	}
+
+	cmd := exec.Command("go", args...)
+
+	cmd.Dir = t.Suite.Path
+
+	output, err := cmd.CombinedOutput()
+
+	if err != nil {
+		fixedOutput := fixCompilationOutput(string(output), t.Suite.Path)
+		if len(output) > 0 {
+			return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, fixedOutput)
+		}
+		return fmt.Errorf("Failed to compile %s", t.Suite.PackageName)
+	}
+
+	if fileExists(path) == false {
+		compiledFile := filepath.Join(t.Suite.Path, t.Suite.PackageName+".test")
+		if fileExists(compiledFile) {
+			// seems like we are on an old go version that does not support the -o flag on go test
+			// move the compiled test file to the desired location by hand
+			err = os.Rename(compiledFile, path)
+			if err != nil {
+				// We cannot move the file, perhaps because the source and destination
+				// are on different partitions. We can copy the file, however.
+				err = copyFile(compiledFile, path)
+				if err != nil {
+					return fmt.Errorf("Failed to copy compiled file: %s", err)
+				}
+			}
+		} else {
+			return fmt.Errorf("Failed to compile %s: output file %q could not be found", t.Suite.PackageName, path)
+		}
+	}
+
+	t.compiled = true
+
+	return nil
+}
+
+func fileExists(path string) bool {
+	_, err := os.Stat(path)
+	return err == nil || os.IsNotExist(err) == false
+}
+
+// copyFile copies the contents of the file named src to the file named
+// by dst. The file will be created if it does not already exist. If the
+// destination file exists, all it's contents will be replaced by the contents
+// of the source file.
+func copyFile(src, dst string) error {
+	srcInfo, err := os.Stat(src)
+	if err != nil {
+		return err
+	}
+	mode := srcInfo.Mode()
+
+	in, err := os.Open(src)
+	if err != nil {
+		return err
+	}
+
+	defer in.Close()
+
+	out, err := os.Create(dst)
+	if err != nil {
+		return err
+	}
+
+	defer func() {
+		closeErr := out.Close()
+		if err == nil {
+			err = closeErr
+		}
+	}()
+
+	_, err = io.Copy(out, in)
+	if err != nil {
+		return err
+	}
+
+	err = out.Sync()
+	if err != nil {
+		return err
+	}
+
+	return out.Chmod(mode)
+}
+
+/*
+go test -c -i spits package.test out into the cwd. there's no way to change this.
+
+to make sure it doesn't generate conflicting .test files in the cwd, Compile() must switch the cwd to the test package.
+
+unfortunately, this causes go test's compile output to be expressed *relative to the test package* instead of the cwd.
+
+this makes it hard to reason about what failed, and also prevents iterm's Cmd+click from working.
+
+fixCompilationOutput..... rewrites the output to fix the paths.
+
+yeah......
+*/
+func fixCompilationOutput(output string, relToPath string) string {
+	re := regexp.MustCompile(`^(\S.*\.go)\:\d+\:`)
+	lines := strings.Split(output, "\n")
+	for i, line := range lines {
+		indices := re.FindStringSubmatchIndex(line)
+		if len(indices) == 0 {
+			continue
+		}
+
+		path := line[indices[2]:indices[3]]
+		path = filepath.Join(relToPath, path)
+		lines[i] = path + line[indices[3]:]
+	}
+	return strings.Join(lines, "\n")
+}
+
+func (t *TestRunner) Run() RunResult {
+	if t.Suite.IsGinkgo {
+		if t.numCPU > 1 {
+			if t.parallelStream {
+				return t.runAndStreamParallelGinkgoSuite()
+			} else {
+				return t.runParallelGinkgoSuite()
+			}
+		} else {
+			return t.runSerialGinkgoSuite()
+		}
+	} else {
+		return t.runGoTestSuite()
+	}
+}
+
+func (t *TestRunner) CleanUp() {
+	if t.Suite.Precompiled {
+		return
+	}
+	os.RemoveAll(filepath.Dir(t.compilationTargetPath))
+}
+
+func (t *TestRunner) runSerialGinkgoSuite() RunResult {
+	ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
+	return t.run(t.cmd(ginkgoArgs, os.Stdout, 1), nil)
+}
+
+func (t *TestRunner) runGoTestSuite() RunResult {
+	return t.run(t.cmd([]string{"-test.v"}, os.Stdout, 1), nil)
+}
+
+func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult {
+	completions := make(chan RunResult)
+	writers := make([]*logWriter, t.numCPU)
+
+	server, err := remote.NewServer(t.numCPU)
+	if err != nil {
+		panic("Failed to start parallel spec server")
+	}
+
+	server.Start()
+	defer server.Close()
+
+	for cpu := 0; cpu < t.numCPU; cpu++ {
+		config.GinkgoConfig.ParallelNode = cpu + 1
+		config.GinkgoConfig.ParallelTotal = t.numCPU
+		config.GinkgoConfig.SyncHost = server.Address()
+
+		ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
+
+		writers[cpu] = newLogWriter(os.Stdout, cpu+1)
+
+		cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1)
+
+		server.RegisterAlive(cpu+1, func() bool {
+			if cmd.ProcessState == nil {
+				return true
+			}
+			return !cmd.ProcessState.Exited()
+		})
+
+		go t.run(cmd, completions)
+	}
+
+	res := PassingRunResult()
+
+	for cpu := 0; cpu < t.numCPU; cpu++ {
+		res = res.Merge(<-completions)
+	}
+
+	for _, writer := range writers {
+		writer.Close()
+	}
+
+	os.Stdout.Sync()
+
+	if t.cover || t.coverPkg != "" {
+		t.combineCoverprofiles()
+	}
+
+	return res
+}
+
+func (t *TestRunner) runParallelGinkgoSuite() RunResult {
+	result := make(chan bool)
+	completions := make(chan RunResult)
+	writers := make([]*logWriter, t.numCPU)
+	reports := make([]*bytes.Buffer, t.numCPU)
+
+	stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor)
+	aggregator := remote.NewAggregator(t.numCPU, result, config.DefaultReporterConfig, stenographer)
+
+	server, err := remote.NewServer(t.numCPU)
+	if err != nil {
+		panic("Failed to start parallel spec server")
+	}
+	server.RegisterReporters(aggregator)
+	server.Start()
+	defer server.Close()
+
+	for cpu := 0; cpu < t.numCPU; cpu++ {
+		config.GinkgoConfig.ParallelNode = cpu + 1
+		config.GinkgoConfig.ParallelTotal = t.numCPU
+		config.GinkgoConfig.SyncHost = server.Address()
+		config.GinkgoConfig.StreamHost = server.Address()
+
+		ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
+
+		reports[cpu] = &bytes.Buffer{}
+		writers[cpu] = newLogWriter(reports[cpu], cpu+1)
+
+		cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1)
+
+		server.RegisterAlive(cpu+1, func() bool {
+			if cmd.ProcessState == nil {
+				return true
+			}
+			return !cmd.ProcessState.Exited()
+		})
+
+		go t.run(cmd, completions)
+	}
+
+	res := PassingRunResult()
+
+	for cpu := 0; cpu < t.numCPU; cpu++ {
+		res = res.Merge(<-completions)
+	}
+
+	//all test processes are done, at this point
+	//we should be able to wait for the aggregator to tell us that it's done
+
+	select {
+	case <-result:
+		fmt.Println("")
+	case <-time.After(time.Second):
+		//the aggregator never got back to us!  something must have gone wrong
+		fmt.Println(`
+	 -------------------------------------------------------------------
+	|                                                                   |
+	|  Ginkgo timed out waiting for all parallel nodes to report back!  |
+	|                                                                   |
+	 -------------------------------------------------------------------
+`)
+
+		os.Stdout.Sync()
+
+		for _, writer := range writers {
+			writer.Close()
+		}
+
+		for _, report := range reports {
+			fmt.Print(report.String())
+		}
+
+		os.Stdout.Sync()
+	}
+
+	if t.cover || t.coverPkg != "" {
+		t.combineCoverprofiles()
+	}
+
+	return res
+}
+
+func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.Cmd {
+	args := []string{"--test.timeout=24h"}
+	if t.cover || t.coverPkg != "" {
+		coverprofile := "--test.coverprofile=" + t.Suite.PackageName + ".coverprofile"
+		if t.numCPU > 1 {
+			coverprofile = fmt.Sprintf("%s.%d", coverprofile, node)
+		}
+		args = append(args, coverprofile)
+	}
+
+	args = append(args, ginkgoArgs...)
+	args = append(args, t.additionalArgs...)
+
+	path := t.compilationTargetPath
+	if t.Suite.Precompiled {
+		path, _ = filepath.Abs(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.test", t.Suite.PackageName)))
+	}
+
+	cmd := exec.Command(path, args...)
+
+	cmd.Dir = t.Suite.Path
+	cmd.Stderr = stream
+	cmd.Stdout = stream
+
+	return cmd
+}
+
+func (t *TestRunner) run(cmd *exec.Cmd, completions chan RunResult) RunResult {
+	var res RunResult
+
+	defer func() {
+		if completions != nil {
+			completions <- res
+		}
+	}()
+
+	err := cmd.Start()
+	if err != nil {
+		fmt.Printf("Failed to run test suite!\n\t%s", err.Error())
+		return res
+	}
+
+	cmd.Wait()
+	exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
+	res.Passed = (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
+	res.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
+
+	return res
+}
+
+func (t *TestRunner) combineCoverprofiles() {
+	profiles := []string{}
+	for cpu := 1; cpu <= t.numCPU; cpu++ {
+		coverFile := fmt.Sprintf("%s.coverprofile.%d", t.Suite.PackageName, cpu)
+		coverFile = filepath.Join(t.Suite.Path, coverFile)
+		coverProfile, err := ioutil.ReadFile(coverFile)
+		os.Remove(coverFile)
+
+		if err == nil {
+			profiles = append(profiles, string(coverProfile))
+		}
+	}
+
+	if len(profiles) != t.numCPU {
+		return
+	}
+
+	lines := map[string]int{}
+	lineOrder := []string{}
+	for i, coverProfile := range profiles {
+		for _, line := range strings.Split(string(coverProfile), "\n")[1:] {
+			if len(line) == 0 {
+				continue
+			}
+			components := strings.Split(line, " ")
+			count, _ := strconv.Atoi(components[len(components)-1])
+			prefix := strings.Join(components[0:len(components)-1], " ")
+			lines[prefix] += count
+			if i == 0 {
+				lineOrder = append(lineOrder, prefix)
+			}
+		}
+	}
+
+	output := []string{"mode: atomic"}
+	for _, line := range lineOrder {
+		output = append(output, fmt.Sprintf("%s %d", line, lines[line]))
+	}
+	finalOutput := strings.Join(output, "\n")
+	ioutil.WriteFile(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.coverprofile", t.Suite.PackageName)), []byte(finalOutput), 0666)
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go b/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go
new file mode 100644
index 0000000000000000000000000000000000000000..4ef3bc44ff145a5904c097a5c2bcf1221e913f64
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go
@@ -0,0 +1,116 @@
+package testsuite
+
+import (
+	"errors"
+	"io/ioutil"
+	"os"
+	"path"
+	"path/filepath"
+	"regexp"
+	"strings"
+)
+
+type TestSuite struct {
+	Path        string
+	PackageName string
+	IsGinkgo    bool
+	Precompiled bool
+}
+
+func PrecompiledTestSuite(path string) (TestSuite, error) {
+	info, err := os.Stat(path)
+	if err != nil {
+		return TestSuite{}, err
+	}
+
+	if info.IsDir() {
+		return TestSuite{}, errors.New("this is a directory, not a file")
+	}
+
+	if filepath.Ext(path) != ".test" {
+		return TestSuite{}, errors.New("this is not a .test binary")
+	}
+
+	if info.Mode()&0111 == 0 {
+		return TestSuite{}, errors.New("this is not executable")
+	}
+
+	dir := relPath(filepath.Dir(path))
+	packageName := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path))
+
+	return TestSuite{
+		Path:        dir,
+		PackageName: packageName,
+		IsGinkgo:    true,
+		Precompiled: true,
+	}, nil
+}
+
+func SuitesInDir(dir string, recurse bool) []TestSuite {
+	suites := []TestSuite{}
+
+	// "This change will only be enabled if the go command is run with
+	// GO15VENDOREXPERIMENT=1 in its environment."
+	// c.f. the vendor-experiment proposal https://goo.gl/2ucMeC
+	vendorExperiment := os.Getenv("GO15VENDOREXPERIMENT")
+	if (vendorExperiment == "1") && path.Base(dir) == "vendor" {
+		return suites
+	}
+
+	files, _ := ioutil.ReadDir(dir)
+	re := regexp.MustCompile(`_test\.go$`)
+	for _, file := range files {
+		if !file.IsDir() && re.Match([]byte(file.Name())) {
+			suites = append(suites, New(dir, files))
+			break
+		}
+	}
+
+	if recurse {
+		re = regexp.MustCompile(`^[._]`)
+		for _, file := range files {
+			if file.IsDir() && !re.Match([]byte(file.Name())) {
+				suites = append(suites, SuitesInDir(dir+"/"+file.Name(), recurse)...)
+			}
+		}
+	}
+
+	return suites
+}
+
+func relPath(dir string) string {
+	dir, _ = filepath.Abs(dir)
+	cwd, _ := os.Getwd()
+	dir, _ = filepath.Rel(cwd, filepath.Clean(dir))
+	dir = "." + string(filepath.Separator) + dir
+	return dir
+}
+
+func New(dir string, files []os.FileInfo) TestSuite {
+	return TestSuite{
+		Path:        relPath(dir),
+		PackageName: packageNameForSuite(dir),
+		IsGinkgo:    filesHaveGinkgoSuite(dir, files),
+	}
+}
+
+func packageNameForSuite(dir string) string {
+	path, _ := filepath.Abs(dir)
+	return filepath.Base(path)
+}
+
+func filesHaveGinkgoSuite(dir string, files []os.FileInfo) bool {
+	reTestFile := regexp.MustCompile(`_test\.go$`)
+	reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"`)
+
+	for _, file := range files {
+		if !file.IsDir() && reTestFile.Match([]byte(file.Name())) {
+			contents, _ := ioutil.ReadFile(dir + "/" + file.Name())
+			if reGinkgo.Match(contents) {
+				return true
+			}
+		}
+	}
+
+	return false
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go
new file mode 100644
index 0000000000000000000000000000000000000000..683c3a9982d32523f59d238c4641c78b4a040b75
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go
@@ -0,0 +1,38 @@
+package main
+
+import (
+	"flag"
+	"fmt"
+	"os/exec"
+)
+
+func BuildUnfocusCommand() *Command {
+	return &Command{
+		Name:         "unfocus",
+		AltName:      "blur",
+		FlagSet:      flag.NewFlagSet("unfocus", flag.ExitOnError),
+		UsageCommand: "ginkgo unfocus (or ginkgo blur)",
+		Usage: []string{
+			"Recursively unfocuses any focused tests under the current directory",
+		},
+		Command: unfocusSpecs,
+	}
+}
+
+func unfocusSpecs([]string, []string) {
+	unfocus("Describe")
+	unfocus("Context")
+	unfocus("It")
+	unfocus("Measure")
+	unfocus("DescribeTable")
+	unfocus("Entry")
+}
+
+func unfocus(component string) {
+	fmt.Printf("Removing F%s...\n", component)
+	cmd := exec.Command("gofmt", fmt.Sprintf("-r=F%s -> %s", component, component), "-w", ".")
+	out, _ := cmd.CombinedOutput()
+	if string(out) != "" {
+		println(string(out))
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/version_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/version_command.go
new file mode 100644
index 0000000000000000000000000000000000000000..cdca3a348b6bd80b1c1a966c661e9cda522a50e5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/version_command.go
@@ -0,0 +1,23 @@
+package main
+
+import (
+	"flag"
+	"fmt"
+	"github.com/onsi/ginkgo/config"
+)
+
+func BuildVersionCommand() *Command {
+	return &Command{
+		Name:         "version",
+		FlagSet:      flag.NewFlagSet("version", flag.ExitOnError),
+		UsageCommand: "ginkgo version",
+		Usage: []string{
+			"Print Ginkgo's version",
+		},
+		Command: printVersion,
+	}
+}
+
+func printVersion([]string, []string) {
+	fmt.Printf("Ginkgo Version %s\n", config.VERSION)
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta.go
new file mode 100644
index 0000000000000000000000000000000000000000..6c485c5b1af813c0d7242e926c12d0488fa23d17
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta.go
@@ -0,0 +1,22 @@
+package watch
+
+import "sort"
+
+type Delta struct {
+	ModifiedPackages []string
+
+	NewSuites      []*Suite
+	RemovedSuites  []*Suite
+	modifiedSuites []*Suite
+}
+
+type DescendingByDelta []*Suite
+
+func (a DescendingByDelta) Len() int           { return len(a) }
+func (a DescendingByDelta) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() }
+
+func (d Delta) ModifiedSuites() []*Suite {
+	sort.Sort(DescendingByDelta(d.modifiedSuites))
+	return d.modifiedSuites
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go
new file mode 100644
index 0000000000000000000000000000000000000000..452c07e4d69e87574e8a62bfb2ba45eaa29881ff
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go
@@ -0,0 +1,71 @@
+package watch
+
+import (
+	"fmt"
+
+	"github.com/onsi/ginkgo/ginkgo/testsuite"
+)
+
+type SuiteErrors map[testsuite.TestSuite]error
+
+type DeltaTracker struct {
+	maxDepth      int
+	suites        map[string]*Suite
+	packageHashes *PackageHashes
+}
+
+func NewDeltaTracker(maxDepth int) *DeltaTracker {
+	return &DeltaTracker{
+		maxDepth:      maxDepth,
+		packageHashes: NewPackageHashes(),
+		suites:        map[string]*Suite{},
+	}
+}
+
+func (d *DeltaTracker) Delta(suites []testsuite.TestSuite) (delta Delta, errors SuiteErrors) {
+	errors = SuiteErrors{}
+	delta.ModifiedPackages = d.packageHashes.CheckForChanges()
+
+	providedSuitePaths := map[string]bool{}
+	for _, suite := range suites {
+		providedSuitePaths[suite.Path] = true
+	}
+
+	d.packageHashes.StartTrackingUsage()
+
+	for _, suite := range d.suites {
+		if providedSuitePaths[suite.Suite.Path] {
+			if suite.Delta() > 0 {
+				delta.modifiedSuites = append(delta.modifiedSuites, suite)
+			}
+		} else {
+			delta.RemovedSuites = append(delta.RemovedSuites, suite)
+		}
+	}
+
+	d.packageHashes.StopTrackingUsageAndPrune()
+
+	for _, suite := range suites {
+		_, ok := d.suites[suite.Path]
+		if !ok {
+			s, err := NewSuite(suite, d.maxDepth, d.packageHashes)
+			if err != nil {
+				errors[suite] = err
+				continue
+			}
+			d.suites[suite.Path] = s
+			delta.NewSuites = append(delta.NewSuites, s)
+		}
+	}
+
+	return delta, errors
+}
+
+func (d *DeltaTracker) WillRun(suite testsuite.TestSuite) error {
+	s, ok := d.suites[suite.Path]
+	if !ok {
+		return fmt.Errorf("unknown suite %s", suite.Path)
+	}
+
+	return s.MarkAsRunAndRecomputedDependencies(d.maxDepth)
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go
new file mode 100644
index 0000000000000000000000000000000000000000..82c25face30f4c7095baac9b89008d0956bfa6f5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go
@@ -0,0 +1,91 @@
+package watch
+
+import (
+	"go/build"
+	"regexp"
+)
+
+var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`)
+
+type Dependencies struct {
+	deps map[string]int
+}
+
+func NewDependencies(path string, maxDepth int) (Dependencies, error) {
+	d := Dependencies{
+		deps: map[string]int{},
+	}
+
+	if maxDepth == 0 {
+		return d, nil
+	}
+
+	err := d.seedWithDepsForPackageAtPath(path)
+	if err != nil {
+		return d, err
+	}
+
+	for depth := 1; depth < maxDepth; depth++ {
+		n := len(d.deps)
+		d.addDepsForDepth(depth)
+		if n == len(d.deps) {
+			break
+		}
+	}
+
+	return d, nil
+}
+
+func (d Dependencies) Dependencies() map[string]int {
+	return d.deps
+}
+
+func (d Dependencies) seedWithDepsForPackageAtPath(path string) error {
+	pkg, err := build.ImportDir(path, 0)
+	if err != nil {
+		return err
+	}
+
+	d.resolveAndAdd(pkg.Imports, 1)
+	d.resolveAndAdd(pkg.TestImports, 1)
+	d.resolveAndAdd(pkg.XTestImports, 1)
+
+	delete(d.deps, pkg.Dir)
+	return nil
+}
+
+func (d Dependencies) addDepsForDepth(depth int) {
+	for dep, depDepth := range d.deps {
+		if depDepth == depth {
+			d.addDepsForDep(dep, depth+1)
+		}
+	}
+}
+
+func (d Dependencies) addDepsForDep(dep string, depth int) {
+	pkg, err := build.ImportDir(dep, 0)
+	if err != nil {
+		println(err.Error())
+		return
+	}
+	d.resolveAndAdd(pkg.Imports, depth)
+}
+
+func (d Dependencies) resolveAndAdd(deps []string, depth int) {
+	for _, dep := range deps {
+		pkg, err := build.Import(dep, ".", 0)
+		if err != nil {
+			continue
+		}
+		if pkg.Goroot == false && !ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) {
+			d.addDepIfNotPresent(pkg.Dir, depth)
+		}
+	}
+}
+
+func (d Dependencies) addDepIfNotPresent(dep string, depth int) {
+	_, ok := d.deps[dep]
+	if !ok {
+		d.deps[dep] = depth
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go
new file mode 100644
index 0000000000000000000000000000000000000000..eaf357c249c17ff24b9ecc3eb0555914eb73d1d6
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go
@@ -0,0 +1,103 @@
+package watch
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"regexp"
+	"time"
+)
+
+var goRegExp = regexp.MustCompile(`\.go$`)
+var goTestRegExp = regexp.MustCompile(`_test\.go$`)
+
+type PackageHash struct {
+	CodeModifiedTime time.Time
+	TestModifiedTime time.Time
+	Deleted          bool
+
+	path     string
+	codeHash string
+	testHash string
+}
+
+func NewPackageHash(path string) *PackageHash {
+	p := &PackageHash{
+		path: path,
+	}
+
+	p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes()
+
+	return p
+}
+
+func (p *PackageHash) CheckForChanges() bool {
+	codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes()
+
+	if deleted {
+		if p.Deleted == false {
+			t := time.Now()
+			p.CodeModifiedTime = t
+			p.TestModifiedTime = t
+		}
+		p.Deleted = true
+		return true
+	}
+
+	modified := false
+	p.Deleted = false
+
+	if p.codeHash != codeHash {
+		p.CodeModifiedTime = codeModifiedTime
+		modified = true
+	}
+	if p.testHash != testHash {
+		p.TestModifiedTime = testModifiedTime
+		modified = true
+	}
+
+	p.codeHash = codeHash
+	p.testHash = testHash
+	return modified
+}
+
+func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) {
+	infos, err := ioutil.ReadDir(p.path)
+
+	if err != nil {
+		deleted = true
+		return
+	}
+
+	for _, info := range infos {
+		if info.IsDir() {
+			continue
+		}
+
+		if goTestRegExp.Match([]byte(info.Name())) {
+			testHash += p.hashForFileInfo(info)
+			if info.ModTime().After(testModifiedTime) {
+				testModifiedTime = info.ModTime()
+			}
+			continue
+		}
+
+		if goRegExp.Match([]byte(info.Name())) {
+			codeHash += p.hashForFileInfo(info)
+			if info.ModTime().After(codeModifiedTime) {
+				codeModifiedTime = info.ModTime()
+			}
+		}
+	}
+
+	testHash += codeHash
+	if codeModifiedTime.After(testModifiedTime) {
+		testModifiedTime = codeModifiedTime
+	}
+
+	return
+}
+
+func (p *PackageHash) hashForFileInfo(info os.FileInfo) string {
+	return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano())
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go
new file mode 100644
index 0000000000000000000000000000000000000000..262eaa847ea70e94ccbbfb6c056623191f98769f
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go
@@ -0,0 +1,82 @@
+package watch
+
+import (
+	"path/filepath"
+	"sync"
+)
+
+type PackageHashes struct {
+	PackageHashes map[string]*PackageHash
+	usedPaths     map[string]bool
+	lock          *sync.Mutex
+}
+
+func NewPackageHashes() *PackageHashes {
+	return &PackageHashes{
+		PackageHashes: map[string]*PackageHash{},
+		usedPaths:     nil,
+		lock:          &sync.Mutex{},
+	}
+}
+
+func (p *PackageHashes) CheckForChanges() []string {
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
+	modified := []string{}
+
+	for _, packageHash := range p.PackageHashes {
+		if packageHash.CheckForChanges() {
+			modified = append(modified, packageHash.path)
+		}
+	}
+
+	return modified
+}
+
+func (p *PackageHashes) Add(path string) *PackageHash {
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
+	path, _ = filepath.Abs(path)
+	_, ok := p.PackageHashes[path]
+	if !ok {
+		p.PackageHashes[path] = NewPackageHash(path)
+	}
+
+	if p.usedPaths != nil {
+		p.usedPaths[path] = true
+	}
+	return p.PackageHashes[path]
+}
+
+func (p *PackageHashes) Get(path string) *PackageHash {
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
+	path, _ = filepath.Abs(path)
+	if p.usedPaths != nil {
+		p.usedPaths[path] = true
+	}
+	return p.PackageHashes[path]
+}
+
+func (p *PackageHashes) StartTrackingUsage() {
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
+	p.usedPaths = map[string]bool{}
+}
+
+func (p *PackageHashes) StopTrackingUsageAndPrune() {
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
+	for path := range p.PackageHashes {
+		if !p.usedPaths[path] {
+			delete(p.PackageHashes, path)
+		}
+	}
+
+	p.usedPaths = nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/suite.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/suite.go
new file mode 100644
index 0000000000000000000000000000000000000000..5deaba7cb6dbe8212f726f2d326b2988b5c5b578
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/watch/suite.go
@@ -0,0 +1,87 @@
+package watch
+
+import (
+	"fmt"
+	"math"
+	"time"
+
+	"github.com/onsi/ginkgo/ginkgo/testsuite"
+)
+
+type Suite struct {
+	Suite        testsuite.TestSuite
+	RunTime      time.Time
+	Dependencies Dependencies
+
+	sharedPackageHashes *PackageHashes
+}
+
+func NewSuite(suite testsuite.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) {
+	deps, err := NewDependencies(suite.Path, maxDepth)
+	if err != nil {
+		return nil, err
+	}
+
+	sharedPackageHashes.Add(suite.Path)
+	for dep := range deps.Dependencies() {
+		sharedPackageHashes.Add(dep)
+	}
+
+	return &Suite{
+		Suite:        suite,
+		Dependencies: deps,
+
+		sharedPackageHashes: sharedPackageHashes,
+	}, nil
+}
+
+func (s *Suite) Delta() float64 {
+	delta := s.delta(s.Suite.Path, true, 0) * 1000
+	for dep, depth := range s.Dependencies.Dependencies() {
+		delta += s.delta(dep, false, depth)
+	}
+	return delta
+}
+
+func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error {
+	s.RunTime = time.Now()
+
+	deps, err := NewDependencies(s.Suite.Path, maxDepth)
+	if err != nil {
+		return err
+	}
+
+	s.sharedPackageHashes.Add(s.Suite.Path)
+	for dep := range deps.Dependencies() {
+		s.sharedPackageHashes.Add(dep)
+	}
+
+	s.Dependencies = deps
+
+	return nil
+}
+
+func (s *Suite) Description() string {
+	numDeps := len(s.Dependencies.Dependencies())
+	pluralizer := "ies"
+	if numDeps == 1 {
+		pluralizer = "y"
+	}
+	return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer)
+}
+
+func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 {
+	return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1)
+}
+
+func (s *Suite) dt(packagePath string, includeTests bool) time.Duration {
+	packageHash := s.sharedPackageHashes.Get(packagePath)
+	var modifiedTime time.Time
+	if includeTests {
+		modifiedTime = packageHash.TestModifiedTime
+	} else {
+		modifiedTime = packageHash.CodeModifiedTime
+	}
+
+	return modifiedTime.Sub(s.RunTime)
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch_command.go
new file mode 100644
index 0000000000000000000000000000000000000000..03ea012587737cd0fa42f82eeefab7e8f9d8f49b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/watch_command.go
@@ -0,0 +1,172 @@
+package main
+
+import (
+	"flag"
+	"fmt"
+	"time"
+
+	"github.com/onsi/ginkgo/config"
+	"github.com/onsi/ginkgo/ginkgo/interrupthandler"
+	"github.com/onsi/ginkgo/ginkgo/testrunner"
+	"github.com/onsi/ginkgo/ginkgo/testsuite"
+	"github.com/onsi/ginkgo/ginkgo/watch"
+)
+
+func BuildWatchCommand() *Command {
+	commandFlags := NewWatchCommandFlags(flag.NewFlagSet("watch", flag.ExitOnError))
+	interruptHandler := interrupthandler.NewInterruptHandler()
+	notifier := NewNotifier(commandFlags)
+	watcher := &SpecWatcher{
+		commandFlags:     commandFlags,
+		notifier:         notifier,
+		interruptHandler: interruptHandler,
+		suiteRunner:      NewSuiteRunner(notifier, interruptHandler),
+	}
+
+	return &Command{
+		Name:         "watch",
+		FlagSet:      commandFlags.FlagSet,
+		UsageCommand: "ginkgo watch <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
+		Usage: []string{
+			"Watches the tests in the passed in <PACKAGES> and runs them when changes occur.",
+			"Any arguments after -- will be passed to the test.",
+		},
+		Command:                   watcher.WatchSpecs,
+		SuppressFlagDocumentation: true,
+		FlagDocSubstitute: []string{
+			"Accepts all the flags that the ginkgo command accepts except for --keepGoing and --untilItFails",
+		},
+	}
+}
+
+type SpecWatcher struct {
+	commandFlags     *RunWatchAndBuildCommandFlags
+	notifier         *Notifier
+	interruptHandler *interrupthandler.InterruptHandler
+	suiteRunner      *SuiteRunner
+}
+
+func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) {
+	w.commandFlags.computeNodes()
+	w.notifier.VerifyNotificationsAreAvailable()
+
+	w.WatchSuites(args, additionalArgs)
+}
+
+func (w *SpecWatcher) runnersForSuites(suites []testsuite.TestSuite, additionalArgs []string) []*testrunner.TestRunner {
+	runners := []*testrunner.TestRunner{}
+
+	for _, suite := range suites {
+		runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Race, w.commandFlags.Cover, w.commandFlags.CoverPkg, w.commandFlags.Tags, additionalArgs))
+	}
+
+	return runners
+}
+
+func (w *SpecWatcher) WatchSuites(args []string, additionalArgs []string) {
+	suites, _ := findSuites(args, w.commandFlags.Recurse, w.commandFlags.SkipPackage, false)
+
+	if len(suites) == 0 {
+		complainAndQuit("Found no test suites")
+	}
+
+	fmt.Printf("Identified %d test %s.  Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), pluralizedWord("suite", "suites", len(suites)), w.commandFlags.Depth)
+	deltaTracker := watch.NewDeltaTracker(w.commandFlags.Depth)
+	delta, errors := deltaTracker.Delta(suites)
+
+	fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites)))
+	for _, suite := range delta.NewSuites {
+		fmt.Println("  " + suite.Description())
+	}
+
+	for suite, err := range errors {
+		fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err)
+	}
+
+	if len(suites) == 1 {
+		runners := w.runnersForSuites(suites, additionalArgs)
+		w.suiteRunner.RunSuites(runners, w.commandFlags.NumCompilers, true, nil)
+		runners[0].CleanUp()
+	}
+
+	ticker := time.NewTicker(time.Second)
+
+	for {
+		select {
+		case <-ticker.C:
+			suites, _ := findSuites(args, w.commandFlags.Recurse, w.commandFlags.SkipPackage, false)
+			delta, _ := deltaTracker.Delta(suites)
+
+			suitesToRun := []testsuite.TestSuite{}
+
+			if len(delta.NewSuites) > 0 {
+				fmt.Printf(greenColor+"Detected %d new %s:\n"+defaultStyle, len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites)))
+				for _, suite := range delta.NewSuites {
+					suitesToRun = append(suitesToRun, suite.Suite)
+					fmt.Println("  " + suite.Description())
+				}
+			}
+
+			modifiedSuites := delta.ModifiedSuites()
+			if len(modifiedSuites) > 0 {
+				fmt.Println(greenColor + "\nDetected changes in:" + defaultStyle)
+				for _, pkg := range delta.ModifiedPackages {
+					fmt.Println("  " + pkg)
+				}
+				fmt.Printf(greenColor+"Will run %d %s:\n"+defaultStyle, len(modifiedSuites), pluralizedWord("suite", "suites", len(modifiedSuites)))
+				for _, suite := range modifiedSuites {
+					suitesToRun = append(suitesToRun, suite.Suite)
+					fmt.Println("  " + suite.Description())
+				}
+				fmt.Println("")
+			}
+
+			if len(suitesToRun) > 0 {
+				w.UpdateSeed()
+				w.ComputeSuccinctMode(len(suitesToRun))
+				runners := w.runnersForSuites(suitesToRun, additionalArgs)
+				result, _ := w.suiteRunner.RunSuites(runners, w.commandFlags.NumCompilers, true, func(suite testsuite.TestSuite) {
+					deltaTracker.WillRun(suite)
+				})
+				for _, runner := range runners {
+					runner.CleanUp()
+				}
+				if !w.interruptHandler.WasInterrupted() {
+					color := redColor
+					if result.Passed {
+						color = greenColor
+					}
+					fmt.Println(color + "\nDone.  Resuming watch..." + defaultStyle)
+				}
+			}
+
+		case <-w.interruptHandler.C:
+			return
+		}
+	}
+}
+
+func (w *SpecWatcher) ComputeSuccinctMode(numSuites int) {
+	if config.DefaultReporterConfig.Verbose {
+		config.DefaultReporterConfig.Succinct = false
+		return
+	}
+
+	if w.commandFlags.wasSet("succinct") {
+		return
+	}
+
+	if numSuites == 1 {
+		config.DefaultReporterConfig.Succinct = false
+	}
+
+	if numSuites > 1 {
+		config.DefaultReporterConfig.Succinct = true
+	}
+}
+
+func (w *SpecWatcher) UpdateSeed() {
+	if !w.commandFlags.wasSet("seed") {
+		config.GinkgoConfig.RandomSeed = time.Now().Unix()
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
new file mode 100644
index 0000000000000000000000000000000000000000..1d8e593052a369b64680ea9a1e3285eacbb12ecd
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
@@ -0,0 +1,558 @@
+/*
+Ginkgo is a BDD-style testing framework for Golang
+
+The godoc documentation describes Ginkgo's API.  More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/
+
+Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega)
+
+Ginkgo on Github: http://github.com/onsi/ginkgo
+
+Ginkgo is MIT-Licensed
+*/
+package ginkgo
+
+import (
+	"flag"
+	"fmt"
+	"io"
+	"net/http"
+	"os"
+	"strings"
+	"time"
+
+	"github.com/onsi/ginkgo/config"
+	"github.com/onsi/ginkgo/internal/codelocation"
+	"github.com/onsi/ginkgo/internal/failer"
+	"github.com/onsi/ginkgo/internal/remote"
+	"github.com/onsi/ginkgo/internal/suite"
+	"github.com/onsi/ginkgo/internal/testingtproxy"
+	"github.com/onsi/ginkgo/internal/writer"
+	"github.com/onsi/ginkgo/reporters"
+	"github.com/onsi/ginkgo/reporters/stenographer"
+	"github.com/onsi/ginkgo/types"
+)
+
+const GINKGO_VERSION = config.VERSION
+const GINKGO_PANIC = `
+Your test failed.
+Ginkgo panics to prevent subsequent assertions from running.
+Normally Ginkgo rescues this panic so you shouldn't see it.
+
+But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
+To circumvent this, you should call
+
+	defer GinkgoRecover()
+
+at the top of the goroutine that caused this panic.
+`
+const defaultTimeout = 1
+
+var globalSuite *suite.Suite
+var globalFailer *failer.Failer
+
+func init() {
+	config.Flags(flag.CommandLine, "ginkgo", true)
+	GinkgoWriter = writer.New(os.Stdout)
+	globalFailer = failer.New()
+	globalSuite = suite.New(globalFailer)
+}
+
+//GinkgoWriter implements an io.Writer
+//When running in verbose mode any writes to GinkgoWriter will be immediately printed
+//to stdout.  Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen
+//only if the current test fails.
+var GinkgoWriter io.Writer
+
+//The interface by which Ginkgo receives *testing.T
+type GinkgoTestingT interface {
+	Fail()
+}
+
+//GinkgoParallelNode returns the parallel node number for the current ginkgo process
+//The node number is 1-indexed
+func GinkgoParallelNode() int {
+	return config.GinkgoConfig.ParallelNode
+}
+
+//Some matcher libraries or legacy codebases require a *testing.T
+//GinkgoT implements an interface analogous to *testing.T and can be used if
+//the library in question accepts *testing.T through an interface
+//
+// For example, with testify:
+// assert.Equal(GinkgoT(), 123, 123, "they should be equal")
+//
+// Or with gomock:
+// gomock.NewController(GinkgoT())
+//
+// GinkgoT() takes an optional offset argument that can be used to get the
+// correct line number associated with the failure.
+func GinkgoT(optionalOffset ...int) GinkgoTInterface {
+	offset := 3
+	if len(optionalOffset) > 0 {
+		offset = optionalOffset[0]
+	}
+	return testingtproxy.New(GinkgoWriter, Fail, offset)
+}
+
+//The interface returned by GinkgoT().  This covers most of the methods
+//in the testing package's T.
+type GinkgoTInterface interface {
+	Fail()
+	Error(args ...interface{})
+	Errorf(format string, args ...interface{})
+	FailNow()
+	Fatal(args ...interface{})
+	Fatalf(format string, args ...interface{})
+	Log(args ...interface{})
+	Logf(format string, args ...interface{})
+	Failed() bool
+	Parallel()
+	Skip(args ...interface{})
+	Skipf(format string, args ...interface{})
+	SkipNow()
+	Skipped() bool
+}
+
+//Custom Ginkgo test reporters must implement the Reporter interface.
+//
+//The custom reporter is passed in a SuiteSummary when the suite begins and ends,
+//and a SpecSummary just before a spec begins and just after a spec ends
+type Reporter reporters.Reporter
+
+//Asynchronous specs are given a channel of the Done type.  You must close or write to the channel
+//to tell Ginkgo that your async test is done.
+type Done chan<- interface{}
+
+//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription
+//	FullTestText: a concatenation of ComponentTexts and the TestText
+//	ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test
+//	TestText: the text in the actual It or Measure node
+//	IsMeasurement: true if the current test is a measurement
+//	FileName: the name of the file containing the current test
+//	LineNumber: the line number for the current test
+//	Failed: if the current test has failed, this will be true (useful in an AfterEach)
+type GinkgoTestDescription struct {
+	FullTestText   string
+	ComponentTexts []string
+	TestText       string
+
+	IsMeasurement bool
+
+	FileName   string
+	LineNumber int
+
+	Failed bool
+}
+
+//CurrentGinkgoTestDescripton returns information about the current running test.
+func CurrentGinkgoTestDescription() GinkgoTestDescription {
+	summary, ok := globalSuite.CurrentRunningSpecSummary()
+	if !ok {
+		return GinkgoTestDescription{}
+	}
+
+	subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1]
+
+	return GinkgoTestDescription{
+		ComponentTexts: summary.ComponentTexts[1:],
+		FullTestText:   strings.Join(summary.ComponentTexts[1:], " "),
+		TestText:       summary.ComponentTexts[len(summary.ComponentTexts)-1],
+		IsMeasurement:  summary.IsMeasurement,
+		FileName:       subjectCodeLocation.FileName,
+		LineNumber:     subjectCodeLocation.LineNumber,
+		Failed:         summary.HasFailureState(),
+	}
+}
+
+//Measurement tests receive a Benchmarker.
+//
+//You use the Time() function to time how long the passed in body function takes to run
+//You use the RecordValue() function to track arbitrary numerical measurements.
+//The optional info argument is passed to the test reporter and can be used to
+// provide the measurement data to a custom reporter with context.
+//
+//See http://onsi.github.io/ginkgo/#benchmark_tests for more details
+type Benchmarker interface {
+	Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
+	RecordValue(name string, value float64, info ...interface{})
+}
+
+//RunSpecs is the entry point for the Ginkgo test runner.
+//You must call this within a Golang testing TestX(t *testing.T) function.
+//
+//To bootstrap a test suite you can use the Ginkgo CLI:
+//
+//	ginkgo bootstrap
+func RunSpecs(t GinkgoTestingT, description string) bool {
+	specReporters := []Reporter{buildDefaultReporter()}
+	return RunSpecsWithCustomReporters(t, description, specReporters)
+}
+
+//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace
+//RunSpecs() with this method.
+func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
+	specReporters = append([]Reporter{buildDefaultReporter()}, specReporters...)
+	return RunSpecsWithCustomReporters(t, description, specReporters)
+}
+
+//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace
+//RunSpecs() with this method.  Note that parallel tests will not work correctly without the default reporter
+func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
+	writer := GinkgoWriter.(*writer.Writer)
+	writer.SetStream(config.DefaultReporterConfig.Verbose)
+	reporters := make([]reporters.Reporter, len(specReporters))
+	for i, reporter := range specReporters {
+		reporters[i] = reporter
+	}
+	passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig)
+	if passed && hasFocusedTests {
+		fmt.Println("PASS | FOCUSED")
+		os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
+	}
+	return passed
+}
+
+func buildDefaultReporter() Reporter {
+	remoteReportingServer := config.GinkgoConfig.StreamHost
+	if remoteReportingServer == "" {
+		stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor)
+		return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer)
+	} else {
+		return remote.NewForwardingReporter(remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor())
+	}
+}
+
+//Skip notifies Ginkgo that the current spec should be skipped.
+func Skip(message string, callerSkip ...int) {
+	skip := 0
+	if len(callerSkip) > 0 {
+		skip = callerSkip[0]
+	}
+
+	globalFailer.Skip(message, codelocation.New(skip+1))
+	panic(GINKGO_PANIC)
+}
+
+//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.)
+func Fail(message string, callerSkip ...int) {
+	skip := 0
+	if len(callerSkip) > 0 {
+		skip = callerSkip[0]
+	}
+
+	globalFailer.Fail(message, codelocation.New(skip+1))
+	panic(GINKGO_PANIC)
+}
+
+//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
+//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
+//calls out to Gomega
+//
+//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent
+//further assertions from running.  This panic must be recovered.  Ginkgo does this for you
+//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...)
+//
+//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no
+//way for Ginkgo to rescue the panic.  To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine.
+func GinkgoRecover() {
+	e := recover()
+	if e != nil {
+		globalFailer.Panic(codelocation.New(1), e)
+	}
+}
+
+//Describe blocks allow you to organize your specs.  A Describe block can contain any number of
+//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
+//
+//In addition you can nest Describe and Context blocks.  Describe and Context blocks are functionally
+//equivalent.  The difference is purely semantic -- you typical Describe the behavior of an object
+//or method and, within that Describe, outline a number of Contexts.
+func Describe(text string, body func()) bool {
+	globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
+	return true
+}
+
+//You can focus the tests within a describe block using FDescribe
+func FDescribe(text string, body func()) bool {
+	globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
+	return true
+}
+
+//You can mark the tests within a describe block as pending using PDescribe
+func PDescribe(text string, body func()) bool {
+	globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
+	return true
+}
+
+//You can mark the tests within a describe block as pending using XDescribe
+func XDescribe(text string, body func()) bool {
+	globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
+	return true
+}
+
+//Context blocks allow you to organize your specs.  A Context block can contain any number of
+//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
+//
+//In addition you can nest Describe and Context blocks.  Describe and Context blocks are functionally
+//equivalent.  The difference is purely semantic -- you typical Describe the behavior of an object
+//or method and, within that Describe, outline a number of Contexts.
+func Context(text string, body func()) bool {
+	globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
+	return true
+}
+
+//You can focus the tests within a describe block using FContext
+func FContext(text string, body func()) bool {
+	globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
+	return true
+}
+
+//You can mark the tests within a describe block as pending using PContext
+func PContext(text string, body func()) bool {
+	globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
+	return true
+}
+
+//You can mark the tests within a describe block as pending using XContext
+func XContext(text string, body func()) bool {
+	globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
+	return true
+}
+
+//It blocks contain your test code and assertions.  You cannot nest any other Ginkgo blocks
+//within an It block.
+//
+//Ginkgo will normally run It blocks synchronously.  To perform asynchronous tests, pass a
+//function that accepts a Done channel.  When you do this, you can also provide an optional timeout.
+func It(text string, body interface{}, timeout ...float64) bool {
+	globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
+	return true
+}
+
+//You can focus individual Its using FIt
+func FIt(text string, body interface{}, timeout ...float64) bool {
+	globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
+	return true
+}
+
+//You can mark Its as pending using PIt
+func PIt(text string, _ ...interface{}) bool {
+	globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
+	return true
+}
+
+//You can mark Its as pending using XIt
+func XIt(text string, _ ...interface{}) bool {
+	globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
+	return true
+}
+
+//Specify blocks are aliases for It blocks and allow for more natural wording in situations
+//which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks
+//which apply to It blocks.
+func Specify(text string, body interface{}, timeout ...float64) bool {
+	return It(text, body, timeout...)
+}
+
+//You can focus individual Specifys using FSpecify
+func FSpecify(text string, body interface{}, timeout ...float64) bool {
+	return FIt(text, body, timeout...)
+}
+
+//You can mark Specifys as pending using PSpecify
+func PSpecify(text string, is ...interface{}) bool {
+	return PIt(text, is...)
+}
+
+//You can mark Specifys as pending using XSpecify
+func XSpecify(text string, is ...interface{}) bool {
+	return XIt(text, is...)
+}
+
+//By allows you to better document large Its.
+//
+//Generally you should try to keep your Its short and to the point.  This is not always possible, however,
+//especially in the context of integration tests that capture a particular workflow.
+//
+//By allows you to document such flows.  By must be called within a runnable node (It, BeforeEach, Measure, etc...)
+//By will simply log the passed in text to the GinkgoWriter.  If By is handed a function it will immediately run the function.
+func By(text string, callbacks ...func()) {
+	preamble := "\x1b[1mSTEP\x1b[0m"
+	if config.DefaultReporterConfig.NoColor {
+		preamble = "STEP"
+	}
+	fmt.Fprintln(GinkgoWriter, preamble+": "+text)
+	if len(callbacks) == 1 {
+		callbacks[0]()
+	}
+	if len(callbacks) > 1 {
+		panic("just one callback per By, please")
+	}
+}
+
+//Measure blocks run the passed in body function repeatedly (determined by the samples argument)
+//and accumulate metrics provided to the Benchmarker by the body function.
+//
+//The body function must have the signature:
+//	func(b Benchmarker)
+func Measure(text string, body interface{}, samples int) bool {
+	globalSuite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples)
+	return true
+}
+
+//You can focus individual Measures using FMeasure
+func FMeasure(text string, body interface{}, samples int) bool {
+	globalSuite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples)
+	return true
+}
+
+//You can mark Maeasurements as pending using PMeasure
+func PMeasure(text string, _ ...interface{}) bool {
+	globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
+	return true
+}
+
+//You can mark Maeasurements as pending using XMeasure
+func XMeasure(text string, _ ...interface{}) bool {
+	globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
+	return true
+}
+
+//BeforeSuite blocks are run just once before any specs are run.  When running in parallel, each
+//parallel node process will call BeforeSuite.
+//
+//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
+//
+//You may only register *one* BeforeSuite handler per test suite.  You typically do so in your bootstrap file at the top level.
+func BeforeSuite(body interface{}, timeout ...float64) bool {
+	globalSuite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
+	return true
+}
+
+//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed.
+//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting.
+//
+//When running in parallel, each parallel node process will call AfterSuite.
+//
+//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
+//
+//You may only register *one* AfterSuite handler per test suite.  You typically do so in your bootstrap file at the top level.
+func AfterSuite(body interface{}, timeout ...float64) bool {
+	globalSuite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
+	return true
+}
+
+//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across
+//nodes when running tests in parallel.  For example, say you have a shared database that you can only start one instance of that
+//must be used in your tests.  When running in parallel, only one node should set up the database and all other nodes should wait
+//until that node is done before running.
+//
+//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments.  The first is only run on parallel node #1.  The second is
+//run on all nodes, but *only* after the first function completes succesfully.  Ginkgo also makes it possible to send data from the first function (on Node 1)
+//to the second function (on all the other nodes).
+//
+//The functions have the following signatures.  The first function (which only runs on node 1) has the signature:
+//
+//	func() []byte
+//
+//or, to run asynchronously:
+//
+//	func(done Done) []byte
+//
+//The byte array returned by the first function is then passed to the second function, which has the signature:
+//
+//	func(data []byte)
+//
+//or, to run asynchronously:
+//
+//	func(data []byte, done Done)
+//
+//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes:
+//
+//	var dbClient db.Client
+//	var dbRunner db.Runner
+//
+//	var _ = SynchronizedBeforeSuite(func() []byte {
+//		dbRunner = db.NewRunner()
+//		err := dbRunner.Start()
+//		Ω(err).ShouldNot(HaveOccurred())
+//		return []byte(dbRunner.URL)
+//	}, func(data []byte) {
+//		dbClient = db.NewClient()
+//		err := dbClient.Connect(string(data))
+//		Ω(err).ShouldNot(HaveOccurred())
+//	})
+func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool {
+	globalSuite.SetSynchronizedBeforeSuiteNode(
+		node1Body,
+		allNodesBody,
+		codelocation.New(1),
+		parseTimeout(timeout...),
+	)
+	return true
+}
+
+//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up
+//external singleton resources shared across nodes when running tests in parallel.
+//
+//SynchronizedAfterSuite accomplishes this by taking *two* function arguments.  The first runs on all nodes.  The second runs only on parallel node #1
+//and *only* after all other nodes have finished and exited.  This ensures that node 1, and any resources it is running, remain alive until
+//all other nodes are finished.
+//
+//Both functions have the same signature: either func() or func(done Done) to run asynchronously.
+//
+//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite.  Here, SynchronizedAfterSuite is used to tear down the shared database
+//only after all nodes have finished:
+//
+//	var _ = SynchronizedAfterSuite(func() {
+//		dbClient.Cleanup()
+//	}, func() {
+//		dbRunner.Stop()
+//	})
+func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool {
+	globalSuite.SetSynchronizedAfterSuiteNode(
+		allNodesBody,
+		node1Body,
+		codelocation.New(1),
+		parseTimeout(timeout...),
+	)
+	return true
+}
+
+//BeforeEach blocks are run before It blocks.  When multiple BeforeEach blocks are defined in nested
+//Describe and Context blocks the outermost BeforeEach blocks are run first.
+//
+//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
+//a Done channel
+func BeforeEach(body interface{}, timeout ...float64) bool {
+	globalSuite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
+	return true
+}
+
+//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks.  For more details,
+//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
+//
+//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
+//a Done channel
+func JustBeforeEach(body interface{}, timeout ...float64) bool {
+	globalSuite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
+	return true
+}
+
+//AfterEach blocks are run after It blocks.   When multiple AfterEach blocks are defined in nested
+//Describe and Context blocks the innermost AfterEach blocks are run first.
+//
+//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts
+//a Done channel
+func AfterEach(body interface{}, timeout ...float64) bool {
+	globalSuite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
+	return true
+}
+
+func parseTimeout(timeout ...float64) time.Duration {
+	if len(timeout) == 0 {
+		return time.Duration(defaultTimeout * int64(time.Second))
+	} else {
+		return time.Duration(timeout[0] * float64(time.Second))
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/integration/integration.go b/vendor/github.com/onsi/ginkgo/integration/integration.go
new file mode 100644
index 0000000000000000000000000000000000000000..76ab1b7282d8e05e72a611012e3dbf53ead7b701
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/integration/integration.go
@@ -0,0 +1 @@
+package integration
diff --git a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
new file mode 100644
index 0000000000000000000000000000000000000000..fa2f0bf730c067440e46706baa99be0cfe52915c
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
@@ -0,0 +1,32 @@
+package codelocation
+
+import (
+	"regexp"
+	"runtime"
+	"runtime/debug"
+	"strings"
+
+	"github.com/onsi/ginkgo/types"
+)
+
+func New(skip int) types.CodeLocation {
+	_, file, line, _ := runtime.Caller(skip + 1)
+	stackTrace := PruneStack(string(debug.Stack()), skip)
+	return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
+}
+
+func PruneStack(fullStackTrace string, skip int) string {
+	stack := strings.Split(fullStackTrace, "\n")
+	if len(stack) > 2*(skip+1) {
+		stack = stack[2*(skip+1):]
+	}
+	prunedStack := []string{}
+	re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
+	for i := 0; i < len(stack)/2; i++ {
+		if !re.Match([]byte(stack[i*2])) {
+			prunedStack = append(prunedStack, stack[i*2])
+			prunedStack = append(prunedStack, stack[i*2+1])
+		}
+	}
+	return strings.Join(prunedStack, "\n")
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
new file mode 100644
index 0000000000000000000000000000000000000000..0737746dcfe0a04387b4f5bdd2c488ec2d36b502
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
@@ -0,0 +1,151 @@
+package containernode
+
+import (
+	"math/rand"
+	"sort"
+
+	"github.com/onsi/ginkgo/internal/leafnodes"
+	"github.com/onsi/ginkgo/types"
+)
+
+type subjectOrContainerNode struct {
+	containerNode *ContainerNode
+	subjectNode   leafnodes.SubjectNode
+}
+
+func (n subjectOrContainerNode) text() string {
+	if n.containerNode != nil {
+		return n.containerNode.Text()
+	} else {
+		return n.subjectNode.Text()
+	}
+}
+
+type CollatedNodes struct {
+	Containers []*ContainerNode
+	Subject    leafnodes.SubjectNode
+}
+
+type ContainerNode struct {
+	text         string
+	flag         types.FlagType
+	codeLocation types.CodeLocation
+
+	setupNodes               []leafnodes.BasicNode
+	subjectAndContainerNodes []subjectOrContainerNode
+}
+
+func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode {
+	return &ContainerNode{
+		text:         text,
+		flag:         flag,
+		codeLocation: codeLocation,
+	}
+}
+
+func (container *ContainerNode) Shuffle(r *rand.Rand) {
+	sort.Sort(container)
+	permutation := r.Perm(len(container.subjectAndContainerNodes))
+	shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes))
+	for i, j := range permutation {
+		shuffledNodes[i] = container.subjectAndContainerNodes[j]
+	}
+	container.subjectAndContainerNodes = shuffledNodes
+}
+
+func (node *ContainerNode) BackPropagateProgrammaticFocus() bool {
+	if node.flag == types.FlagTypePending {
+		return false
+	}
+
+	shouldUnfocus := false
+	for _, subjectOrContainerNode := range node.subjectAndContainerNodes {
+		if subjectOrContainerNode.containerNode != nil {
+			shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus
+		} else {
+			shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus
+		}
+	}
+
+	if shouldUnfocus {
+		if node.flag == types.FlagTypeFocused {
+			node.flag = types.FlagTypeNone
+		}
+		return true
+	}
+
+	return node.flag == types.FlagTypeFocused
+}
+
+func (node *ContainerNode) Collate() []CollatedNodes {
+	return node.collate([]*ContainerNode{})
+}
+
+func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes {
+	collated := make([]CollatedNodes, 0)
+
+	containers := make([]*ContainerNode, len(enclosingContainers))
+	copy(containers, enclosingContainers)
+	containers = append(containers, node)
+
+	for _, subjectOrContainer := range node.subjectAndContainerNodes {
+		if subjectOrContainer.containerNode != nil {
+			collated = append(collated, subjectOrContainer.containerNode.collate(containers)...)
+		} else {
+			collated = append(collated, CollatedNodes{
+				Containers: containers,
+				Subject:    subjectOrContainer.subjectNode,
+			})
+		}
+	}
+
+	return collated
+}
+
+func (node *ContainerNode) PushContainerNode(container *ContainerNode) {
+	node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container})
+}
+
+func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) {
+	node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject})
+}
+
+func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) {
+	node.setupNodes = append(node.setupNodes, setupNode)
+}
+
+func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode {
+	nodes := []leafnodes.BasicNode{}
+	for _, setupNode := range node.setupNodes {
+		if setupNode.Type() == nodeType {
+			nodes = append(nodes, setupNode)
+		}
+	}
+	return nodes
+}
+
+func (node *ContainerNode) Text() string {
+	return node.text
+}
+
+func (node *ContainerNode) CodeLocation() types.CodeLocation {
+	return node.codeLocation
+}
+
+func (node *ContainerNode) Flag() types.FlagType {
+	return node.flag
+}
+
+//sort.Interface
+
+func (node *ContainerNode) Len() int {
+	return len(node.subjectAndContainerNodes)
+}
+
+func (node *ContainerNode) Less(i, j int) bool {
+	return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text()
+}
+
+func (node *ContainerNode) Swap(i, j int) {
+	node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i]
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/failer/failer.go b/vendor/github.com/onsi/ginkgo/internal/failer/failer.go
new file mode 100644
index 0000000000000000000000000000000000000000..678ea2514a6020c1cc989acf8d9ad865ea606152
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/failer/failer.go
@@ -0,0 +1,92 @@
+package failer
+
+import (
+	"fmt"
+	"sync"
+
+	"github.com/onsi/ginkgo/types"
+)
+
+type Failer struct {
+	lock    *sync.Mutex
+	failure types.SpecFailure
+	state   types.SpecState
+}
+
+func New() *Failer {
+	return &Failer{
+		lock:  &sync.Mutex{},
+		state: types.SpecStatePassed,
+	}
+}
+
+func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+
+	if f.state == types.SpecStatePassed {
+		f.state = types.SpecStatePanicked
+		f.failure = types.SpecFailure{
+			Message:        "Test Panicked",
+			Location:       location,
+			ForwardedPanic: fmt.Sprintf("%v", forwardedPanic),
+		}
+	}
+}
+
+func (f *Failer) Timeout(location types.CodeLocation) {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+
+	if f.state == types.SpecStatePassed {
+		f.state = types.SpecStateTimedOut
+		f.failure = types.SpecFailure{
+			Message:  "Timed out",
+			Location: location,
+		}
+	}
+}
+
+func (f *Failer) Fail(message string, location types.CodeLocation) {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+
+	if f.state == types.SpecStatePassed {
+		f.state = types.SpecStateFailed
+		f.failure = types.SpecFailure{
+			Message:  message,
+			Location: location,
+		}
+	}
+}
+
+func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+
+	failure := f.failure
+	outcome := f.state
+	if outcome != types.SpecStatePassed {
+		failure.ComponentType = componentType
+		failure.ComponentIndex = componentIndex
+		failure.ComponentCodeLocation = componentCodeLocation
+	}
+
+	f.state = types.SpecStatePassed
+	f.failure = types.SpecFailure{}
+
+	return failure, outcome
+}
+
+func (f *Failer) Skip(message string, location types.CodeLocation) {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+
+	if f.state == types.SpecStatePassed {
+		f.state = types.SpecStateSkipped
+		f.failure = types.SpecFailure{
+			Message:  message,
+			Location: location,
+		}
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
new file mode 100644
index 0000000000000000000000000000000000000000..bc0dd1a627acfb2e9fa817da6ccc7cd0e4dd756e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
@@ -0,0 +1,95 @@
+package leafnodes
+
+import (
+	"math"
+	"time"
+
+	"sync"
+
+	"github.com/onsi/ginkgo/types"
+)
+
+type benchmarker struct {
+	mu           sync.Mutex
+	measurements map[string]*types.SpecMeasurement
+	orderCounter int
+}
+
+func newBenchmarker() *benchmarker {
+	return &benchmarker{
+		measurements: make(map[string]*types.SpecMeasurement, 0),
+	}
+}
+
+func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) {
+	t := time.Now()
+	body()
+	elapsedTime = time.Since(t)
+
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", info...)
+	measurement.Results = append(measurement.Results, elapsedTime.Seconds())
+
+	return
+}
+
+func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {
+	measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", info...)
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	measurement.Results = append(measurement.Results, value)
+}
+
+func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, info ...interface{}) *types.SpecMeasurement {
+	measurement, ok := b.measurements[name]
+	if !ok {
+		var computedInfo interface{}
+		computedInfo = nil
+		if len(info) > 0 {
+			computedInfo = info[0]
+		}
+		measurement = &types.SpecMeasurement{
+			Name:          name,
+			Info:          computedInfo,
+			Order:         b.orderCounter,
+			SmallestLabel: smallestLabel,
+			LargestLabel:  largestLabel,
+			AverageLabel:  averageLabel,
+			Units:         units,
+			Results:       make([]float64, 0),
+		}
+		b.measurements[name] = measurement
+		b.orderCounter++
+	}
+
+	return measurement
+}
+
+func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	for _, measurement := range b.measurements {
+		measurement.Smallest = math.MaxFloat64
+		measurement.Largest = -math.MaxFloat64
+		sum := float64(0)
+		sumOfSquares := float64(0)
+
+		for _, result := range measurement.Results {
+			if result > measurement.Largest {
+				measurement.Largest = result
+			}
+			if result < measurement.Smallest {
+				measurement.Smallest = result
+			}
+			sum += result
+			sumOfSquares += result * result
+		}
+
+		n := float64(len(measurement.Results))
+		measurement.Average = sum / n
+		measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n))
+	}
+
+	return b.measurements
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
new file mode 100644
index 0000000000000000000000000000000000000000..8c3902d601c4bb1f0e4b2ba20a7dcda8759ef2c7
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
@@ -0,0 +1,19 @@
+package leafnodes
+
+import (
+	"github.com/onsi/ginkgo/types"
+)
+
+type BasicNode interface {
+	Type() types.SpecComponentType
+	Run() (types.SpecState, types.SpecFailure)
+	CodeLocation() types.CodeLocation
+}
+
+type SubjectNode interface {
+	BasicNode
+
+	Text() string
+	Flag() types.FlagType
+	Samples() int
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
new file mode 100644
index 0000000000000000000000000000000000000000..c76fe3a4512db90a8f6b0caf7d401461dc00fc49
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
@@ -0,0 +1,46 @@
+package leafnodes
+
+import (
+	"github.com/onsi/ginkgo/internal/failer"
+	"github.com/onsi/ginkgo/types"
+	"time"
+)
+
+type ItNode struct {
+	runner *runner
+
+	flag types.FlagType
+	text string
+}
+
+func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode {
+	return &ItNode{
+		runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex),
+		flag:   flag,
+		text:   text,
+	}
+}
+
+func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
+	return node.runner.run()
+}
+
+func (node *ItNode) Type() types.SpecComponentType {
+	return types.SpecComponentTypeIt
+}
+
+func (node *ItNode) Text() string {
+	return node.text
+}
+
+func (node *ItNode) Flag() types.FlagType {
+	return node.flag
+}
+
+func (node *ItNode) CodeLocation() types.CodeLocation {
+	return node.runner.codeLocation
+}
+
+func (node *ItNode) Samples() int {
+	return 1
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
new file mode 100644
index 0000000000000000000000000000000000000000..efc3348c1b6438bc20d76e3a6b15aec7f23e406b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
@@ -0,0 +1,61 @@
+package leafnodes
+
+import (
+	"github.com/onsi/ginkgo/internal/failer"
+	"github.com/onsi/ginkgo/types"
+	"reflect"
+)
+
+type MeasureNode struct {
+	runner *runner
+
+	text        string
+	flag        types.FlagType
+	samples     int
+	benchmarker *benchmarker
+}
+
+func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode {
+	benchmarker := newBenchmarker()
+
+	wrappedBody := func() {
+		reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)})
+	}
+
+	return &MeasureNode{
+		runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex),
+
+		text:        text,
+		flag:        flag,
+		samples:     samples,
+		benchmarker: benchmarker,
+	}
+}
+
+func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
+	return node.runner.run()
+}
+
+func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement {
+	return node.benchmarker.measurementsReport()
+}
+
+func (node *MeasureNode) Type() types.SpecComponentType {
+	return types.SpecComponentTypeMeasure
+}
+
+func (node *MeasureNode) Text() string {
+	return node.text
+}
+
+func (node *MeasureNode) Flag() types.FlagType {
+	return node.flag
+}
+
+func (node *MeasureNode) CodeLocation() types.CodeLocation {
+	return node.runner.codeLocation
+}
+
+func (node *MeasureNode) Samples() int {
+	return node.samples
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
new file mode 100644
index 0000000000000000000000000000000000000000..870ad826da0e394e9d751241bfed673fbb656f1e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
@@ -0,0 +1,113 @@
+package leafnodes
+
+import (
+	"fmt"
+	"github.com/onsi/ginkgo/internal/codelocation"
+	"github.com/onsi/ginkgo/internal/failer"
+	"github.com/onsi/ginkgo/types"
+	"reflect"
+	"time"
+)
+
+type runner struct {
+	isAsync          bool
+	asyncFunc        func(chan<- interface{})
+	syncFunc         func()
+	codeLocation     types.CodeLocation
+	timeoutThreshold time.Duration
+	nodeType         types.SpecComponentType
+	componentIndex   int
+	failer           *failer.Failer
+}
+
+func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner {
+	bodyType := reflect.TypeOf(body)
+	if bodyType.Kind() != reflect.Func {
+		panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation))
+	}
+
+	runner := &runner{
+		codeLocation:     codeLocation,
+		timeoutThreshold: timeout,
+		failer:           failer,
+		nodeType:         nodeType,
+		componentIndex:   componentIndex,
+	}
+
+	switch bodyType.NumIn() {
+	case 0:
+		runner.syncFunc = body.(func())
+		return runner
+	case 1:
+		if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) {
+			panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation))
+		}
+
+		wrappedBody := func(done chan<- interface{}) {
+			bodyValue := reflect.ValueOf(body)
+			bodyValue.Call([]reflect.Value{reflect.ValueOf(done)})
+		}
+
+		runner.isAsync = true
+		runner.asyncFunc = wrappedBody
+		return runner
+	}
+
+	panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation))
+}
+
+func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) {
+	if r.isAsync {
+		return r.runAsync()
+	} else {
+		return r.runSync()
+	}
+}
+
+func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) {
+	done := make(chan interface{}, 1)
+
+	go func() {
+		finished := false
+
+		defer func() {
+			if e := recover(); e != nil || !finished {
+				r.failer.Panic(codelocation.New(2), e)
+				select {
+				case <-done:
+					break
+				default:
+					close(done)
+				}
+			}
+		}()
+
+		r.asyncFunc(done)
+		finished = true
+	}()
+
+	select {
+	case <-done:
+	case <-time.After(r.timeoutThreshold):
+		r.failer.Timeout(r.codeLocation)
+	}
+
+	failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
+	return
+}
+func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) {
+	finished := false
+
+	defer func() {
+		if e := recover(); e != nil || !finished {
+			r.failer.Panic(codelocation.New(2), e)
+		}
+
+		failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
+	}()
+
+	r.syncFunc()
+	finished = true
+
+	return
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
new file mode 100644
index 0000000000000000000000000000000000000000..6b725a631536ecf40684115fc5affc06ceceb8a3
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
@@ -0,0 +1,41 @@
+package leafnodes
+
+import (
+	"github.com/onsi/ginkgo/internal/failer"
+	"github.com/onsi/ginkgo/types"
+	"time"
+)
+
+type SetupNode struct {
+	runner *runner
+}
+
+func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
+	return node.runner.run()
+}
+
+func (node *SetupNode) Type() types.SpecComponentType {
+	return node.runner.nodeType
+}
+
+func (node *SetupNode) CodeLocation() types.CodeLocation {
+	return node.runner.codeLocation
+}
+
+func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
+	return &SetupNode{
+		runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex),
+	}
+}
+
+func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
+	return &SetupNode{
+		runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex),
+	}
+}
+
+func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
+	return &SetupNode{
+		runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex),
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
new file mode 100644
index 0000000000000000000000000000000000000000..2ccc7dc0fb06540a12cc92e28bcdeac3c15e7d92
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
@@ -0,0 +1,54 @@
+package leafnodes
+
+import (
+	"github.com/onsi/ginkgo/internal/failer"
+	"github.com/onsi/ginkgo/types"
+	"time"
+)
+
+type SuiteNode interface {
+	Run(parallelNode int, parallelTotal int, syncHost string) bool
+	Passed() bool
+	Summary() *types.SetupSummary
+}
+
+type simpleSuiteNode struct {
+	runner  *runner
+	outcome types.SpecState
+	failure types.SpecFailure
+	runTime time.Duration
+}
+
+func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
+	t := time.Now()
+	node.outcome, node.failure = node.runner.run()
+	node.runTime = time.Since(t)
+
+	return node.outcome == types.SpecStatePassed
+}
+
+func (node *simpleSuiteNode) Passed() bool {
+	return node.outcome == types.SpecStatePassed
+}
+
+func (node *simpleSuiteNode) Summary() *types.SetupSummary {
+	return &types.SetupSummary{
+		ComponentType: node.runner.nodeType,
+		CodeLocation:  node.runner.codeLocation,
+		State:         node.outcome,
+		RunTime:       node.runTime,
+		Failure:       node.failure,
+	}
+}
+
+func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
+	return &simpleSuiteNode{
+		runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0),
+	}
+}
+
+func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
+	return &simpleSuiteNode{
+		runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
new file mode 100644
index 0000000000000000000000000000000000000000..e7030d9149a8c27334f72c80555d93327c8cdef5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
@@ -0,0 +1,89 @@
+package leafnodes
+
+import (
+	"encoding/json"
+	"github.com/onsi/ginkgo/internal/failer"
+	"github.com/onsi/ginkgo/types"
+	"io/ioutil"
+	"net/http"
+	"time"
+)
+
+type synchronizedAfterSuiteNode struct {
+	runnerA *runner
+	runnerB *runner
+
+	outcome types.SpecState
+	failure types.SpecFailure
+	runTime time.Duration
+}
+
+func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
+	return &synchronizedAfterSuiteNode{
+		runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
+		runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
+	}
+}
+
+func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
+	node.outcome, node.failure = node.runnerA.run()
+
+	if parallelNode == 1 {
+		if parallelTotal > 1 {
+			node.waitUntilOtherNodesAreDone(syncHost)
+		}
+
+		outcome, failure := node.runnerB.run()
+
+		if node.outcome == types.SpecStatePassed {
+			node.outcome, node.failure = outcome, failure
+		}
+	}
+
+	return node.outcome == types.SpecStatePassed
+}
+
+func (node *synchronizedAfterSuiteNode) Passed() bool {
+	return node.outcome == types.SpecStatePassed
+}
+
+func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary {
+	return &types.SetupSummary{
+		ComponentType: node.runnerA.nodeType,
+		CodeLocation:  node.runnerA.codeLocation,
+		State:         node.outcome,
+		RunTime:       node.runTime,
+		Failure:       node.failure,
+	}
+}
+
+func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) {
+	for {
+		if node.canRun(syncHost) {
+			return
+		}
+
+		time.Sleep(50 * time.Millisecond)
+	}
+}
+
+func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool {
+	resp, err := http.Get(syncHost + "/RemoteAfterSuiteData")
+	if err != nil || resp.StatusCode != http.StatusOK {
+		return false
+	}
+
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return false
+	}
+	resp.Body.Close()
+
+	afterSuiteData := types.RemoteAfterSuiteData{}
+	err = json.Unmarshal(body, &afterSuiteData)
+	if err != nil {
+		return false
+	}
+
+	return afterSuiteData.CanRun
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
new file mode 100644
index 0000000000000000000000000000000000000000..76a9679813f9bfe86d04559bd928f33f3a6df88d
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
@@ -0,0 +1,182 @@
+package leafnodes
+
+import (
+	"bytes"
+	"encoding/json"
+	"github.com/onsi/ginkgo/internal/failer"
+	"github.com/onsi/ginkgo/types"
+	"io/ioutil"
+	"net/http"
+	"reflect"
+	"time"
+)
+
+type synchronizedBeforeSuiteNode struct {
+	runnerA *runner
+	runnerB *runner
+
+	data []byte
+
+	outcome types.SpecState
+	failure types.SpecFailure
+	runTime time.Duration
+}
+
+func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
+	node := &synchronizedBeforeSuiteNode{}
+
+	node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
+	node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
+
+	return node
+}
+
+func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
+	t := time.Now()
+	defer func() {
+		node.runTime = time.Since(t)
+	}()
+
+	if parallelNode == 1 {
+		node.outcome, node.failure = node.runA(parallelTotal, syncHost)
+	} else {
+		node.outcome, node.failure = node.waitForA(syncHost)
+	}
+
+	if node.outcome != types.SpecStatePassed {
+		return false
+	}
+	node.outcome, node.failure = node.runnerB.run()
+
+	return node.outcome == types.SpecStatePassed
+}
+
+func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) {
+	outcome, failure := node.runnerA.run()
+
+	if parallelTotal > 1 {
+		state := types.RemoteBeforeSuiteStatePassed
+		if outcome != types.SpecStatePassed {
+			state = types.RemoteBeforeSuiteStateFailed
+		}
+		json := (types.RemoteBeforeSuiteData{
+			Data:  node.data,
+			State: state,
+		}).ToJSON()
+		http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json))
+	}
+
+	return outcome, failure
+}
+
+func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) {
+	failure := func(message string) types.SpecFailure {
+		return types.SpecFailure{
+			Message:               message,
+			Location:              node.runnerA.codeLocation,
+			ComponentType:         node.runnerA.nodeType,
+			ComponentIndex:        node.runnerA.componentIndex,
+			ComponentCodeLocation: node.runnerA.codeLocation,
+		}
+	}
+	for {
+		resp, err := http.Get(syncHost + "/BeforeSuiteState")
+		if err != nil || resp.StatusCode != http.StatusOK {
+			return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state")
+		}
+
+		body, err := ioutil.ReadAll(resp.Body)
+		if err != nil {
+			return types.SpecStateFailed, failure("Failed to read BeforeSuite state")
+		}
+		resp.Body.Close()
+
+		beforeSuiteData := types.RemoteBeforeSuiteData{}
+		err = json.Unmarshal(body, &beforeSuiteData)
+		if err != nil {
+			return types.SpecStateFailed, failure("Failed to decode BeforeSuite state")
+		}
+
+		switch beforeSuiteData.State {
+		case types.RemoteBeforeSuiteStatePassed:
+			node.data = beforeSuiteData.Data
+			return types.SpecStatePassed, types.SpecFailure{}
+		case types.RemoteBeforeSuiteStateFailed:
+			return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed")
+		case types.RemoteBeforeSuiteStateDisappeared:
+			return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite")
+		}
+
+		time.Sleep(50 * time.Millisecond)
+	}
+
+	return types.SpecStateFailed, failure("Shouldn't get here!")
+}
+
+func (node *synchronizedBeforeSuiteNode) Passed() bool {
+	return node.outcome == types.SpecStatePassed
+}
+
+func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary {
+	return &types.SetupSummary{
+		ComponentType: node.runnerA.nodeType,
+		CodeLocation:  node.runnerA.codeLocation,
+		State:         node.outcome,
+		RunTime:       node.runTime,
+		Failure:       node.failure,
+	}
+}
+
+func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} {
+	typeA := reflect.TypeOf(bodyA)
+	if typeA.Kind() != reflect.Func {
+		panic("SynchronizedBeforeSuite expects a function as its first argument")
+	}
+
+	takesNothing := typeA.NumIn() == 0
+	takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface
+	returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8
+
+	if !((takesNothing || takesADoneChannel) && returnsBytes) {
+		panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.")
+	}
+
+	if takesADoneChannel {
+		return func(done chan<- interface{}) {
+			out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)})
+			node.data = out[0].Interface().([]byte)
+		}
+	}
+
+	return func() {
+		out := reflect.ValueOf(bodyA).Call([]reflect.Value{})
+		node.data = out[0].Interface().([]byte)
+	}
+}
+
+func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} {
+	typeB := reflect.TypeOf(bodyB)
+	if typeB.Kind() != reflect.Func {
+		panic("SynchronizedBeforeSuite expects a function as its second argument")
+	}
+
+	returnsNothing := typeB.NumOut() == 0
+	takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8
+	takesBytesAndDone := typeB.NumIn() == 2 &&
+		typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 &&
+		typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface
+
+	if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) {
+		panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)")
+	}
+
+	if takesBytesAndDone {
+		return func(done chan<- interface{}) {
+			reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)})
+		}
+	}
+
+	return func() {
+		reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)})
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
new file mode 100644
index 0000000000000000000000000000000000000000..1e34dbf64462cacede14d288eed52949012c1996
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
@@ -0,0 +1,250 @@
+/*
+
+Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output
+coherently as tests complete.  You shouldn't need to use this in your code.  To run tests in parallel:
+
+	ginkgo -nodes=N
+
+where N is the number of nodes you desire.
+*/
+package remote
+
+import (
+	"time"
+
+	"github.com/onsi/ginkgo/config"
+	"github.com/onsi/ginkgo/reporters/stenographer"
+	"github.com/onsi/ginkgo/types"
+)
+
+type configAndSuite struct {
+	config  config.GinkgoConfigType
+	summary *types.SuiteSummary
+}
+
+type Aggregator struct {
+	nodeCount    int
+	config       config.DefaultReporterConfigType
+	stenographer stenographer.Stenographer
+	result       chan bool
+
+	suiteBeginnings           chan configAndSuite
+	aggregatedSuiteBeginnings []configAndSuite
+
+	beforeSuites           chan *types.SetupSummary
+	aggregatedBeforeSuites []*types.SetupSummary
+
+	afterSuites           chan *types.SetupSummary
+	aggregatedAfterSuites []*types.SetupSummary
+
+	specCompletions chan *types.SpecSummary
+	completedSpecs  []*types.SpecSummary
+
+	suiteEndings           chan *types.SuiteSummary
+	aggregatedSuiteEndings []*types.SuiteSummary
+	specs                  []*types.SpecSummary
+
+	startTime time.Time
+}
+
+func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator {
+	aggregator := &Aggregator{
+		nodeCount:    nodeCount,
+		result:       result,
+		config:       config,
+		stenographer: stenographer,
+
+		suiteBeginnings: make(chan configAndSuite, 0),
+		beforeSuites:    make(chan *types.SetupSummary, 0),
+		afterSuites:     make(chan *types.SetupSummary, 0),
+		specCompletions: make(chan *types.SpecSummary, 0),
+		suiteEndings:    make(chan *types.SuiteSummary, 0),
+	}
+
+	go aggregator.mux()
+
+	return aggregator
+}
+
+func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
+	aggregator.suiteBeginnings <- configAndSuite{config, summary}
+}
+
+func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
+	aggregator.beforeSuites <- setupSummary
+}
+
+func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
+	aggregator.afterSuites <- setupSummary
+}
+
+func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) {
+	//noop
+}
+
+func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) {
+	aggregator.specCompletions <- specSummary
+}
+
+func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) {
+	aggregator.suiteEndings <- summary
+}
+
+func (aggregator *Aggregator) mux() {
+loop:
+	for {
+		select {
+		case configAndSuite := <-aggregator.suiteBeginnings:
+			aggregator.registerSuiteBeginning(configAndSuite)
+		case setupSummary := <-aggregator.beforeSuites:
+			aggregator.registerBeforeSuite(setupSummary)
+		case setupSummary := <-aggregator.afterSuites:
+			aggregator.registerAfterSuite(setupSummary)
+		case specSummary := <-aggregator.specCompletions:
+			aggregator.registerSpecCompletion(specSummary)
+		case suite := <-aggregator.suiteEndings:
+			finished, passed := aggregator.registerSuiteEnding(suite)
+			if finished {
+				aggregator.result <- passed
+				break loop
+			}
+		}
+	}
+}
+
+func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) {
+	aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite)
+
+	if len(aggregator.aggregatedSuiteBeginnings) == 1 {
+		aggregator.startTime = time.Now()
+	}
+
+	if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
+		return
+	}
+
+	aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
+
+	numberOfSpecsToRun := 0
+	totalNumberOfSpecs := 0
+	for _, configAndSuite := range aggregator.aggregatedSuiteBeginnings {
+		numberOfSpecsToRun += configAndSuite.summary.NumberOfSpecsThatWillBeRun
+		totalNumberOfSpecs += configAndSuite.summary.NumberOfTotalSpecs
+	}
+
+	aggregator.stenographer.AnnounceNumberOfSpecs(numberOfSpecsToRun, totalNumberOfSpecs, aggregator.config.Succinct)
+	aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
+	aggregator.flushCompletedSpecs()
+}
+
+func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) {
+	aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary)
+	aggregator.flushCompletedSpecs()
+}
+
+func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) {
+	aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary)
+	aggregator.flushCompletedSpecs()
+}
+
+func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) {
+	aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary)
+	aggregator.specs = append(aggregator.specs, specSummary)
+	aggregator.flushCompletedSpecs()
+}
+
+func (aggregator *Aggregator) flushCompletedSpecs() {
+	if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
+		return
+	}
+
+	for _, setupSummary := range aggregator.aggregatedBeforeSuites {
+		aggregator.announceBeforeSuite(setupSummary)
+	}
+
+	for _, specSummary := range aggregator.completedSpecs {
+		aggregator.announceSpec(specSummary)
+	}
+
+	for _, setupSummary := range aggregator.aggregatedAfterSuites {
+		aggregator.announceAfterSuite(setupSummary)
+	}
+
+	aggregator.aggregatedBeforeSuites = []*types.SetupSummary{}
+	aggregator.completedSpecs = []*types.SpecSummary{}
+	aggregator.aggregatedAfterSuites = []*types.SetupSummary{}
+}
+
+func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) {
+	aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
+	if setupSummary.State != types.SpecStatePassed {
+		aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+	}
+}
+
+func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) {
+	aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
+	if setupSummary.State != types.SpecStatePassed {
+		aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+	}
+}
+
+func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
+	if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
+		aggregator.stenographer.AnnounceSpecWillRun(specSummary)
+	}
+
+	aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
+
+	switch specSummary.State {
+	case types.SpecStatePassed:
+		if specSummary.IsMeasurement {
+			aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct)
+		} else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
+			aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct)
+		} else {
+			aggregator.stenographer.AnnounceSuccesfulSpec(specSummary)
+		}
+
+	case types.SpecStatePending:
+		aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
+	case types.SpecStateSkipped:
+		aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+	case types.SpecStateTimedOut:
+		aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+	case types.SpecStatePanicked:
+		aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+	case types.SpecStateFailed:
+		aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+	}
+}
+
+func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) {
+	aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite)
+	if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount {
+		return false, false
+	}
+
+	aggregatedSuiteSummary := &types.SuiteSummary{}
+	aggregatedSuiteSummary.SuiteSucceeded = true
+
+	for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
+		if suiteSummary.SuiteSucceeded == false {
+			aggregatedSuiteSummary.SuiteSucceeded = false
+		}
+
+		aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun
+		aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs
+		aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs
+		aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs
+		aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs
+		aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs
+	}
+
+	aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime)
+
+	aggregator.stenographer.SummarizeFailures(aggregator.specs)
+	aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct)
+
+	return true, aggregatedSuiteSummary.SuiteSucceeded
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
new file mode 100644
index 0000000000000000000000000000000000000000..025eb5064483b24f40caace9c04ae9daf39d43c9
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
@@ -0,0 +1,90 @@
+package remote
+
+import (
+	"bytes"
+	"encoding/json"
+	"io"
+	"net/http"
+
+	"github.com/onsi/ginkgo/config"
+	"github.com/onsi/ginkgo/types"
+)
+
+//An interface to net/http's client to allow the injection of fakes under test
+type Poster interface {
+	Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error)
+}
+
+/*
+The ForwardingReporter is a Ginkgo reporter that forwards information to
+a Ginkgo remote server.
+
+When streaming parallel test output, this repoter is automatically installed by Ginkgo.
+
+This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner
+detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter
+in place of Ginkgo's DefaultReporter.
+*/
+
+type ForwardingReporter struct {
+	serverHost        string
+	poster            Poster
+	outputInterceptor OutputInterceptor
+}
+
+func NewForwardingReporter(serverHost string, poster Poster, outputInterceptor OutputInterceptor) *ForwardingReporter {
+	return &ForwardingReporter{
+		serverHost:        serverHost,
+		poster:            poster,
+		outputInterceptor: outputInterceptor,
+	}
+}
+
+func (reporter *ForwardingReporter) post(path string, data interface{}) {
+	encoded, _ := json.Marshal(data)
+	buffer := bytes.NewBuffer(encoded)
+	reporter.poster.Post(reporter.serverHost+path, "application/json", buffer)
+}
+
+func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) {
+	data := struct {
+		Config  config.GinkgoConfigType `json:"config"`
+		Summary *types.SuiteSummary     `json:"suite-summary"`
+	}{
+		conf,
+		summary,
+	}
+
+	reporter.outputInterceptor.StartInterceptingOutput()
+	reporter.post("/SpecSuiteWillBegin", data)
+}
+
+func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
+	output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
+	reporter.outputInterceptor.StartInterceptingOutput()
+	setupSummary.CapturedOutput = output
+	reporter.post("/BeforeSuiteDidRun", setupSummary)
+}
+
+func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) {
+	reporter.post("/SpecWillRun", specSummary)
+}
+
+func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) {
+	output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
+	reporter.outputInterceptor.StartInterceptingOutput()
+	specSummary.CapturedOutput = output
+	reporter.post("/SpecDidComplete", specSummary)
+}
+
+func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
+	output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
+	reporter.outputInterceptor.StartInterceptingOutput()
+	setupSummary.CapturedOutput = output
+	reporter.post("/AfterSuiteDidRun", setupSummary)
+}
+
+func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
+	reporter.outputInterceptor.StopInterceptingAndReturnOutput()
+	reporter.post("/SpecSuiteDidEnd", summary)
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
new file mode 100644
index 0000000000000000000000000000000000000000..093f4513c0be31d4c5caed58585bd4ece4789984
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
@@ -0,0 +1,10 @@
+package remote
+
+/*
+The OutputInterceptor is used by the ForwardingReporter to
+intercept and capture all stdin and stderr output during a test run.
+*/
+type OutputInterceptor interface {
+	StartInterceptingOutput() error
+	StopInterceptingAndReturnOutput() (string, error)
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..1235ad00cbbe29ddd2fb7f65aa315e3b338352da
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
@@ -0,0 +1,55 @@
+// +build freebsd openbsd netbsd dragonfly darwin linux
+
+package remote
+
+import (
+	"errors"
+	"io/ioutil"
+	"os"
+)
+
+func NewOutputInterceptor() OutputInterceptor {
+	return &outputInterceptor{}
+}
+
+type outputInterceptor struct {
+	redirectFile *os.File
+	intercepting bool
+}
+
+func (interceptor *outputInterceptor) StartInterceptingOutput() error {
+	if interceptor.intercepting {
+		return errors.New("Already intercepting output!")
+	}
+	interceptor.intercepting = true
+
+	var err error
+
+	interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output")
+	if err != nil {
+		return err
+	}
+
+	// Call a function in ./syscall_dup_*.go
+	// If building for everything other than linux_arm64,
+	// use a "normal" syscall.Dup2(oldfd, newfd) call. If building for linux_arm64 (which doesn't have syscall.Dup2)
+	// call syscall.Dup3(oldfd, newfd, 0). They are nearly identical, see: http://linux.die.net/man/2/dup3
+	syscallDup(int(interceptor.redirectFile.Fd()), 1)
+	syscallDup(int(interceptor.redirectFile.Fd()), 2)
+
+	return nil
+}
+
+func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
+	if !interceptor.intercepting {
+		return "", errors.New("Not intercepting output!")
+	}
+
+	interceptor.redirectFile.Close()
+	output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
+	os.Remove(interceptor.redirectFile.Name())
+
+	interceptor.intercepting = false
+
+	return string(output), err
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
new file mode 100644
index 0000000000000000000000000000000000000000..c8f97d97f07b7c7751e0f1197d3d505c72513120
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
@@ -0,0 +1,33 @@
+// +build windows
+
+package remote
+
+import (
+	"errors"
+)
+
+func NewOutputInterceptor() OutputInterceptor {
+	return &outputInterceptor{}
+}
+
+type outputInterceptor struct {
+	intercepting bool
+}
+
+func (interceptor *outputInterceptor) StartInterceptingOutput() error {
+	if interceptor.intercepting {
+		return errors.New("Already intercepting output!")
+	}
+	interceptor.intercepting = true
+
+	// not working on windows...
+
+	return nil
+}
+
+func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
+	// not working on windows...
+	interceptor.intercepting = false
+
+	return "", nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server.go b/vendor/github.com/onsi/ginkgo/internal/remote/server.go
new file mode 100644
index 0000000000000000000000000000000000000000..b55c681bcff30f593a8e2c31a85a62e69f987b61
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/server.go
@@ -0,0 +1,204 @@
+/*
+
+The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
+This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
+
+*/
+
+package remote
+
+import (
+	"encoding/json"
+	"github.com/onsi/ginkgo/config"
+	"github.com/onsi/ginkgo/reporters"
+	"github.com/onsi/ginkgo/types"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"sync"
+)
+
+/*
+Server spins up on an automatically selected port and listens for communication from the forwarding reporter.
+It then forwards that communication to attached reporters.
+*/
+type Server struct {
+	listener        net.Listener
+	reporters       []reporters.Reporter
+	alives          []func() bool
+	lock            *sync.Mutex
+	beforeSuiteData types.RemoteBeforeSuiteData
+	parallelTotal   int
+}
+
+//Create a new server, automatically selecting a port
+func NewServer(parallelTotal int) (*Server, error) {
+	listener, err := net.Listen("tcp", "127.0.0.1:0")
+	if err != nil {
+		return nil, err
+	}
+	return &Server{
+		listener:        listener,
+		lock:            &sync.Mutex{},
+		alives:          make([]func() bool, parallelTotal),
+		beforeSuiteData: types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending},
+		parallelTotal:   parallelTotal,
+	}, nil
+}
+
+//Start the server.  You don't need to `go s.Start()`, just `s.Start()`
+func (server *Server) Start() {
+	httpServer := &http.Server{}
+	mux := http.NewServeMux()
+	httpServer.Handler = mux
+
+	//streaming endpoints
+	mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin)
+	mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun)
+	mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun)
+	mux.HandleFunc("/SpecWillRun", server.specWillRun)
+	mux.HandleFunc("/SpecDidComplete", server.specDidComplete)
+	mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd)
+
+	//synchronization endpoints
+	mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
+	mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
+
+	go httpServer.Serve(server.listener)
+}
+
+//Stop the server
+func (server *Server) Close() {
+	server.listener.Close()
+}
+
+//The address the server can be reached it.  Pass this into the `ForwardingReporter`.
+func (server *Server) Address() string {
+	return "http://" + server.listener.Addr().String()
+}
+
+//
+// Streaming Endpoints
+//
+
+//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
+func (server *Server) readAll(request *http.Request) []byte {
+	defer request.Body.Close()
+	body, _ := ioutil.ReadAll(request.Body)
+	return body
+}
+
+func (server *Server) RegisterReporters(reporters ...reporters.Reporter) {
+	server.reporters = reporters
+}
+
+func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
+	body := server.readAll(request)
+
+	var data struct {
+		Config  config.GinkgoConfigType `json:"config"`
+		Summary *types.SuiteSummary     `json:"suite-summary"`
+	}
+
+	json.Unmarshal(body, &data)
+
+	for _, reporter := range server.reporters {
+		reporter.SpecSuiteWillBegin(data.Config, data.Summary)
+	}
+}
+
+func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
+	body := server.readAll(request)
+	var setupSummary *types.SetupSummary
+	json.Unmarshal(body, &setupSummary)
+
+	for _, reporter := range server.reporters {
+		reporter.BeforeSuiteDidRun(setupSummary)
+	}
+}
+
+func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
+	body := server.readAll(request)
+	var setupSummary *types.SetupSummary
+	json.Unmarshal(body, &setupSummary)
+
+	for _, reporter := range server.reporters {
+		reporter.AfterSuiteDidRun(setupSummary)
+	}
+}
+
+func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) {
+	body := server.readAll(request)
+	var specSummary *types.SpecSummary
+	json.Unmarshal(body, &specSummary)
+
+	for _, reporter := range server.reporters {
+		reporter.SpecWillRun(specSummary)
+	}
+}
+
+func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) {
+	body := server.readAll(request)
+	var specSummary *types.SpecSummary
+	json.Unmarshal(body, &specSummary)
+
+	for _, reporter := range server.reporters {
+		reporter.SpecDidComplete(specSummary)
+	}
+}
+
+func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
+	body := server.readAll(request)
+	var suiteSummary *types.SuiteSummary
+	json.Unmarshal(body, &suiteSummary)
+
+	for _, reporter := range server.reporters {
+		reporter.SpecSuiteDidEnd(suiteSummary)
+	}
+}
+
+//
+// Synchronization Endpoints
+//
+
+func (server *Server) RegisterAlive(node int, alive func() bool) {
+	server.lock.Lock()
+	defer server.lock.Unlock()
+	server.alives[node-1] = alive
+}
+
+func (server *Server) nodeIsAlive(node int) bool {
+	server.lock.Lock()
+	defer server.lock.Unlock()
+	alive := server.alives[node-1]
+	if alive == nil {
+		return true
+	}
+	return alive()
+}
+
+func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
+	if request.Method == "POST" {
+		dec := json.NewDecoder(request.Body)
+		dec.Decode(&(server.beforeSuiteData))
+	} else {
+		beforeSuiteData := server.beforeSuiteData
+		if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) {
+			beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared
+		}
+		enc := json.NewEncoder(writer)
+		enc.Encode(beforeSuiteData)
+	}
+}
+
+func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) {
+	afterSuiteData := types.RemoteAfterSuiteData{
+		CanRun: true,
+	}
+	for i := 2; i <= server.parallelTotal; i++ {
+		afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i)
+	}
+
+	enc := json.NewEncoder(writer)
+	enc.Encode(afterSuiteData)
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
new file mode 100644
index 0000000000000000000000000000000000000000..9550d37b36be39437bc9c4bfe5446663006d283c
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
@@ -0,0 +1,11 @@
+// +build linux,arm64
+
+package remote
+
+import "syscall"
+
+// linux_arm64 doesn't have syscall.Dup2 which ginkgo uses, so
+// use the nearly identical syscall.Dup3 instead
+func syscallDup(oldfd int, newfd int) (err error) {
+	return syscall.Dup3(oldfd, newfd, 0)
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..e7fad5bbb036507b42863c632edca2403ce78938
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
@@ -0,0 +1,10 @@
+// +build !linux !arm64
+// +build !windows
+
+package remote
+
+import "syscall"
+
+func syscallDup(oldfd int, newfd int) (err error) {
+	return syscall.Dup2(oldfd, newfd)
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/index_computer.go b/vendor/github.com/onsi/ginkgo/internal/spec/index_computer.go
new file mode 100644
index 0000000000000000000000000000000000000000..5a67fc7b7406738fdc85114bb8edb14c02bee4f1
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/spec/index_computer.go
@@ -0,0 +1,55 @@
+package spec
+
+func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) {
+	if length == 0 {
+		return 0, 0
+	}
+
+	// We have more nodes than tests. Trivial case.
+	if parallelTotal >= length {
+		if parallelNode > length {
+			return 0, 0
+		} else {
+			return parallelNode - 1, 1
+		}
+	}
+
+	// This is the minimum amount of tests that a node will be required to run
+	minTestsPerNode := length / parallelTotal
+
+	// This is the maximum amount of tests that a node will be required to run
+	// The algorithm guarantees that this would be equal to at least the minimum amount
+	// and at most one more
+	maxTestsPerNode := minTestsPerNode
+	if length%parallelTotal != 0 {
+		maxTestsPerNode++
+	}
+
+	// Number of nodes that will have to run the maximum amount of tests per node
+	numMaxLoadNodes := length % parallelTotal
+
+	// Number of nodes that precede the current node and will have to run the maximum amount of tests per node
+	var numPrecedingMaxLoadNodes int
+	if parallelNode > numMaxLoadNodes {
+		numPrecedingMaxLoadNodes = numMaxLoadNodes
+	} else {
+		numPrecedingMaxLoadNodes = parallelNode - 1
+	}
+
+	// Number of nodes that precede the current node and will have to run the minimum amount of tests per node
+	var numPrecedingMinLoadNodes int
+	if parallelNode <= numMaxLoadNodes {
+		numPrecedingMinLoadNodes = 0
+	} else {
+		numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1
+	}
+
+	// Evaluate the test start index and number of tests to run
+	startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode
+	if parallelNode > numMaxLoadNodes {
+		count = minTestsPerNode
+	} else {
+		count = maxTestsPerNode
+	}
+	return
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go
new file mode 100644
index 0000000000000000000000000000000000000000..ef788b76a489a6d421a9857013d7a6a6f81a5c75
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go
@@ -0,0 +1,197 @@
+package spec
+
+import (
+	"fmt"
+	"io"
+	"time"
+
+	"github.com/onsi/ginkgo/internal/containernode"
+	"github.com/onsi/ginkgo/internal/leafnodes"
+	"github.com/onsi/ginkgo/types"
+)
+
+type Spec struct {
+	subject          leafnodes.SubjectNode
+	focused          bool
+	announceProgress bool
+
+	containers []*containernode.ContainerNode
+
+	state   types.SpecState
+	runTime time.Duration
+	failure types.SpecFailure
+}
+
+func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec {
+	spec := &Spec{
+		subject:          subject,
+		containers:       containers,
+		focused:          subject.Flag() == types.FlagTypeFocused,
+		announceProgress: announceProgress,
+	}
+
+	spec.processFlag(subject.Flag())
+	for i := len(containers) - 1; i >= 0; i-- {
+		spec.processFlag(containers[i].Flag())
+	}
+
+	return spec
+}
+
+func (spec *Spec) processFlag(flag types.FlagType) {
+	if flag == types.FlagTypeFocused {
+		spec.focused = true
+	} else if flag == types.FlagTypePending {
+		spec.state = types.SpecStatePending
+	}
+}
+
+func (spec *Spec) Skip() {
+	spec.state = types.SpecStateSkipped
+}
+
+func (spec *Spec) Failed() bool {
+	return spec.state == types.SpecStateFailed || spec.state == types.SpecStatePanicked || spec.state == types.SpecStateTimedOut
+}
+
+func (spec *Spec) Passed() bool {
+	return spec.state == types.SpecStatePassed
+}
+
+func (spec *Spec) Pending() bool {
+	return spec.state == types.SpecStatePending
+}
+
+func (spec *Spec) Skipped() bool {
+	return spec.state == types.SpecStateSkipped
+}
+
+func (spec *Spec) Focused() bool {
+	return spec.focused
+}
+
+func (spec *Spec) IsMeasurement() bool {
+	return spec.subject.Type() == types.SpecComponentTypeMeasure
+}
+
+func (spec *Spec) Summary(suiteID string) *types.SpecSummary {
+	componentTexts := make([]string, len(spec.containers)+1)
+	componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1)
+
+	for i, container := range spec.containers {
+		componentTexts[i] = container.Text()
+		componentCodeLocations[i] = container.CodeLocation()
+	}
+
+	componentTexts[len(spec.containers)] = spec.subject.Text()
+	componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation()
+
+	return &types.SpecSummary{
+		IsMeasurement:          spec.IsMeasurement(),
+		NumberOfSamples:        spec.subject.Samples(),
+		ComponentTexts:         componentTexts,
+		ComponentCodeLocations: componentCodeLocations,
+		State:        spec.state,
+		RunTime:      spec.runTime,
+		Failure:      spec.failure,
+		Measurements: spec.measurementsReport(),
+		SuiteID:      suiteID,
+	}
+}
+
+func (spec *Spec) ConcatenatedString() string {
+	s := ""
+	for _, container := range spec.containers {
+		s += container.Text() + " "
+	}
+
+	return s + spec.subject.Text()
+}
+
+func (spec *Spec) Run(writer io.Writer) {
+	startTime := time.Now()
+	defer func() {
+		spec.runTime = time.Since(startTime)
+	}()
+
+	for sample := 0; sample < spec.subject.Samples(); sample++ {
+		spec.runSample(sample, writer)
+
+		if spec.state != types.SpecStatePassed {
+			return
+		}
+	}
+}
+
+func (spec *Spec) runSample(sample int, writer io.Writer) {
+	spec.state = types.SpecStatePassed
+	spec.failure = types.SpecFailure{}
+	innerMostContainerIndexToUnwind := -1
+
+	defer func() {
+		for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
+			container := spec.containers[i]
+			for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
+				spec.announceSetupNode(writer, "AfterEach", container, afterEach)
+				afterEachState, afterEachFailure := afterEach.Run()
+				if afterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
+					spec.state = afterEachState
+					spec.failure = afterEachFailure
+				}
+			}
+		}
+	}()
+
+	for i, container := range spec.containers {
+		innerMostContainerIndexToUnwind = i
+		for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
+			spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
+			spec.state, spec.failure = beforeEach.Run()
+			if spec.state != types.SpecStatePassed {
+				return
+			}
+		}
+	}
+
+	for _, container := range spec.containers {
+		for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
+			spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
+			spec.state, spec.failure = justBeforeEach.Run()
+			if spec.state != types.SpecStatePassed {
+				return
+			}
+		}
+	}
+
+	spec.announceSubject(writer, spec.subject)
+	spec.state, spec.failure = spec.subject.Run()
+}
+
+func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {
+	if spec.announceProgress {
+		s := fmt.Sprintf("[%s] %s\n  %s\n", nodeType, container.Text(), setupNode.CodeLocation().String())
+		writer.Write([]byte(s))
+	}
+}
+
+func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) {
+	if spec.announceProgress {
+		nodeType := ""
+		switch subject.Type() {
+		case types.SpecComponentTypeIt:
+			nodeType = "It"
+		case types.SpecComponentTypeMeasure:
+			nodeType = "Measure"
+		}
+		s := fmt.Sprintf("[%s] %s\n  %s\n", nodeType, subject.Text(), subject.CodeLocation().String())
+		writer.Write([]byte(s))
+	}
+}
+
+func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement {
+	if !spec.IsMeasurement() || spec.Failed() {
+		return map[string]*types.SpecMeasurement{}
+	}
+
+	return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport()
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go
new file mode 100644
index 0000000000000000000000000000000000000000..9c671e39f85c6b638289027da8a276ab2a07c1bd
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go
@@ -0,0 +1,122 @@
+package spec
+
+import (
+	"math/rand"
+	"regexp"
+	"sort"
+)
+
+type Specs struct {
+	specs                 []*Spec
+	numberOfOriginalSpecs int
+	hasProgrammaticFocus  bool
+}
+
+func NewSpecs(specs []*Spec) *Specs {
+	return &Specs{
+		specs: specs,
+		numberOfOriginalSpecs: len(specs),
+	}
+}
+
+func (e *Specs) Specs() []*Spec {
+	return e.specs
+}
+
+func (e *Specs) NumberOfOriginalSpecs() int {
+	return e.numberOfOriginalSpecs
+}
+
+func (e *Specs) HasProgrammaticFocus() bool {
+	return e.hasProgrammaticFocus
+}
+
+func (e *Specs) Shuffle(r *rand.Rand) {
+	sort.Sort(e)
+	permutation := r.Perm(len(e.specs))
+	shuffledSpecs := make([]*Spec, len(e.specs))
+	for i, j := range permutation {
+		shuffledSpecs[i] = e.specs[j]
+	}
+	e.specs = shuffledSpecs
+}
+
+func (e *Specs) ApplyFocus(description string, focusString string, skipString string) {
+	if focusString == "" && skipString == "" {
+		e.applyProgrammaticFocus()
+	} else {
+		e.applyRegExpFocus(description, focusString, skipString)
+	}
+}
+
+func (e *Specs) applyProgrammaticFocus() {
+	e.hasProgrammaticFocus = false
+	for _, spec := range e.specs {
+		if spec.Focused() && !spec.Pending() {
+			e.hasProgrammaticFocus = true
+			break
+		}
+	}
+
+	if e.hasProgrammaticFocus {
+		for _, spec := range e.specs {
+			if !spec.Focused() {
+				spec.Skip()
+			}
+		}
+	}
+}
+
+func (e *Specs) applyRegExpFocus(description string, focusString string, skipString string) {
+	for _, spec := range e.specs {
+		matchesFocus := true
+		matchesSkip := false
+
+		toMatch := []byte(description + " " + spec.ConcatenatedString())
+
+		if focusString != "" {
+			focusFilter := regexp.MustCompile(focusString)
+			matchesFocus = focusFilter.Match([]byte(toMatch))
+		}
+
+		if skipString != "" {
+			skipFilter := regexp.MustCompile(skipString)
+			matchesSkip = skipFilter.Match([]byte(toMatch))
+		}
+
+		if !matchesFocus || matchesSkip {
+			spec.Skip()
+		}
+	}
+}
+
+func (e *Specs) SkipMeasurements() {
+	for _, spec := range e.specs {
+		if spec.IsMeasurement() {
+			spec.Skip()
+		}
+	}
+}
+
+func (e *Specs) TrimForParallelization(total int, node int) {
+	startIndex, count := ParallelizedIndexRange(len(e.specs), total, node)
+	if count == 0 {
+		e.specs = make([]*Spec, 0)
+	} else {
+		e.specs = e.specs[startIndex : startIndex+count]
+	}
+}
+
+//sort.Interface
+
+func (e *Specs) Len() int {
+	return len(e.specs)
+}
+
+func (e *Specs) Less(i, j int) bool {
+	return e.specs[i].ConcatenatedString() < e.specs[j].ConcatenatedString()
+}
+
+func (e *Specs) Swap(i, j int) {
+	e.specs[i], e.specs[j] = e.specs[j], e.specs[i]
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
new file mode 100644
index 0000000000000000000000000000000000000000..a0b8b62d52563d3f4994029ac06b25b4c2f7dd02
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
@@ -0,0 +1,15 @@
+package specrunner
+
+import (
+	"crypto/rand"
+	"fmt"
+)
+
+func randomID() string {
+	b := make([]byte, 8)
+	_, err := rand.Read(b)
+	if err != nil {
+		return ""
+	}
+	return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8])
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
new file mode 100644
index 0000000000000000000000000000000000000000..7ca7740ba9f449fbf75997cbb82bc38f0ed92064
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
@@ -0,0 +1,324 @@
+package specrunner
+
+import (
+	"fmt"
+	"os"
+	"os/signal"
+	"sync"
+	"syscall"
+
+	"github.com/onsi/ginkgo/config"
+	"github.com/onsi/ginkgo/internal/leafnodes"
+	"github.com/onsi/ginkgo/internal/spec"
+	Writer "github.com/onsi/ginkgo/internal/writer"
+	"github.com/onsi/ginkgo/reporters"
+	"github.com/onsi/ginkgo/types"
+
+	"time"
+)
+
+type SpecRunner struct {
+	description     string
+	beforeSuiteNode leafnodes.SuiteNode
+	specs           *spec.Specs
+	afterSuiteNode  leafnodes.SuiteNode
+	reporters       []reporters.Reporter
+	startTime       time.Time
+	suiteID         string
+	runningSpec     *spec.Spec
+	writer          Writer.WriterInterface
+	config          config.GinkgoConfigType
+	interrupted     bool
+	lock            *sync.Mutex
+}
+
+func New(description string, beforeSuiteNode leafnodes.SuiteNode, specs *spec.Specs, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner {
+	return &SpecRunner{
+		description:     description,
+		beforeSuiteNode: beforeSuiteNode,
+		specs:           specs,
+		afterSuiteNode:  afterSuiteNode,
+		reporters:       reporters,
+		writer:          writer,
+		config:          config,
+		suiteID:         randomID(),
+		lock:            &sync.Mutex{},
+	}
+}
+
+func (runner *SpecRunner) Run() bool {
+	if runner.config.DryRun {
+		runner.performDryRun()
+		return true
+	}
+
+	runner.reportSuiteWillBegin()
+	go runner.registerForInterrupts()
+
+	suitePassed := runner.runBeforeSuite()
+
+	if suitePassed {
+		suitePassed = runner.runSpecs()
+	}
+
+	runner.blockForeverIfInterrupted()
+
+	suitePassed = runner.runAfterSuite() && suitePassed
+
+	runner.reportSuiteDidEnd(suitePassed)
+
+	return suitePassed
+}
+
+func (runner *SpecRunner) performDryRun() {
+	runner.reportSuiteWillBegin()
+
+	if runner.beforeSuiteNode != nil {
+		summary := runner.beforeSuiteNode.Summary()
+		summary.State = types.SpecStatePassed
+		runner.reportBeforeSuite(summary)
+	}
+
+	for _, spec := range runner.specs.Specs() {
+		summary := spec.Summary(runner.suiteID)
+		runner.reportSpecWillRun(summary)
+		if summary.State == types.SpecStateInvalid {
+			summary.State = types.SpecStatePassed
+		}
+		runner.reportSpecDidComplete(summary, false)
+	}
+
+	if runner.afterSuiteNode != nil {
+		summary := runner.afterSuiteNode.Summary()
+		summary.State = types.SpecStatePassed
+		runner.reportAfterSuite(summary)
+	}
+
+	runner.reportSuiteDidEnd(true)
+}
+
+func (runner *SpecRunner) runBeforeSuite() bool {
+	if runner.beforeSuiteNode == nil || runner.wasInterrupted() {
+		return true
+	}
+
+	runner.writer.Truncate()
+	conf := runner.config
+	passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
+	if !passed {
+		runner.writer.DumpOut()
+	}
+	runner.reportBeforeSuite(runner.beforeSuiteNode.Summary())
+	return passed
+}
+
+func (runner *SpecRunner) runAfterSuite() bool {
+	if runner.afterSuiteNode == nil {
+		return true
+	}
+
+	runner.writer.Truncate()
+	conf := runner.config
+	passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
+	if !passed {
+		runner.writer.DumpOut()
+	}
+	runner.reportAfterSuite(runner.afterSuiteNode.Summary())
+	return passed
+}
+
+func (runner *SpecRunner) runSpecs() bool {
+	suiteFailed := false
+	skipRemainingSpecs := false
+	for _, spec := range runner.specs.Specs() {
+		if runner.wasInterrupted() {
+			return suiteFailed
+		}
+		if skipRemainingSpecs {
+			spec.Skip()
+		}
+		runner.reportSpecWillRun(spec.Summary(runner.suiteID))
+
+		if !spec.Skipped() && !spec.Pending() {
+			runner.runningSpec = spec
+			spec.Run(runner.writer)
+			runner.runningSpec = nil
+			if spec.Failed() {
+				suiteFailed = true
+			}
+		} else if spec.Pending() && runner.config.FailOnPending {
+			suiteFailed = true
+		}
+
+		runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
+
+		if spec.Failed() && runner.config.FailFast {
+			skipRemainingSpecs = true
+		}
+	}
+
+	return !suiteFailed
+}
+
+func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) {
+	if runner.runningSpec == nil {
+		return nil, false
+	}
+
+	return runner.runningSpec.Summary(runner.suiteID), true
+}
+
+func (runner *SpecRunner) registerForInterrupts() {
+	c := make(chan os.Signal, 1)
+	signal.Notify(c, os.Interrupt, syscall.SIGTERM)
+
+	<-c
+	signal.Stop(c)
+	runner.markInterrupted()
+	go runner.registerForHardInterrupts()
+	runner.writer.DumpOutWithHeader(`
+Received interrupt.  Emitting contents of GinkgoWriter...
+---------------------------------------------------------
+`)
+	if runner.afterSuiteNode != nil {
+		fmt.Fprint(os.Stderr, `
+---------------------------------------------------------
+Received interrupt.  Running AfterSuite...
+^C again to terminate immediately
+`)
+		runner.runAfterSuite()
+	}
+	runner.reportSuiteDidEnd(false)
+	os.Exit(1)
+}
+
+func (runner *SpecRunner) registerForHardInterrupts() {
+	c := make(chan os.Signal, 1)
+	signal.Notify(c, os.Interrupt, syscall.SIGTERM)
+
+	<-c
+	fmt.Fprintln(os.Stderr, "\nReceived second interrupt.  Shutting down.")
+	os.Exit(1)
+}
+
+func (runner *SpecRunner) blockForeverIfInterrupted() {
+	runner.lock.Lock()
+	interrupted := runner.interrupted
+	runner.lock.Unlock()
+
+	if interrupted {
+		select {}
+	}
+}
+
+func (runner *SpecRunner) markInterrupted() {
+	runner.lock.Lock()
+	defer runner.lock.Unlock()
+	runner.interrupted = true
+}
+
+func (runner *SpecRunner) wasInterrupted() bool {
+	runner.lock.Lock()
+	defer runner.lock.Unlock()
+	return runner.interrupted
+}
+
+func (runner *SpecRunner) reportSuiteWillBegin() {
+	runner.startTime = time.Now()
+	summary := runner.summary(true)
+	for _, reporter := range runner.reporters {
+		reporter.SpecSuiteWillBegin(runner.config, summary)
+	}
+}
+
+func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) {
+	for _, reporter := range runner.reporters {
+		reporter.BeforeSuiteDidRun(summary)
+	}
+}
+
+func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) {
+	for _, reporter := range runner.reporters {
+		reporter.AfterSuiteDidRun(summary)
+	}
+}
+
+func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) {
+	runner.writer.Truncate()
+
+	for _, reporter := range runner.reporters {
+		reporter.SpecWillRun(summary)
+	}
+}
+
+func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) {
+	for i := len(runner.reporters) - 1; i >= 1; i-- {
+		runner.reporters[i].SpecDidComplete(summary)
+	}
+
+	if failed {
+		runner.writer.DumpOut()
+	}
+
+	runner.reporters[0].SpecDidComplete(summary)
+}
+
+func (runner *SpecRunner) reportSuiteDidEnd(success bool) {
+	summary := runner.summary(success)
+	summary.RunTime = time.Since(runner.startTime)
+	for _, reporter := range runner.reporters {
+		reporter.SpecSuiteDidEnd(summary)
+	}
+}
+
+func (runner *SpecRunner) countSpecsSatisfying(filter func(ex *spec.Spec) bool) (count int) {
+	count = 0
+
+	for _, spec := range runner.specs.Specs() {
+		if filter(spec) {
+			count++
+		}
+	}
+
+	return count
+}
+
+func (runner *SpecRunner) summary(success bool) *types.SuiteSummary {
+	numberOfSpecsThatWillBeRun := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
+		return !ex.Skipped() && !ex.Pending()
+	})
+
+	numberOfPendingSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
+		return ex.Pending()
+	})
+
+	numberOfSkippedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
+		return ex.Skipped()
+	})
+
+	numberOfPassedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
+		return ex.Passed()
+	})
+
+	numberOfFailedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
+		return ex.Failed()
+	})
+
+	if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun {
+		numberOfFailedSpecs = numberOfSpecsThatWillBeRun
+	}
+
+	return &types.SuiteSummary{
+		SuiteDescription: runner.description,
+		SuiteSucceeded:   success,
+		SuiteID:          runner.suiteID,
+
+		NumberOfSpecsBeforeParallelization: runner.specs.NumberOfOriginalSpecs(),
+		NumberOfTotalSpecs:                 len(runner.specs.Specs()),
+		NumberOfSpecsThatWillBeRun:         numberOfSpecsThatWillBeRun,
+		NumberOfPendingSpecs:               numberOfPendingSpecs,
+		NumberOfSkippedSpecs:               numberOfSkippedSpecs,
+		NumberOfPassedSpecs:                numberOfPassedSpecs,
+		NumberOfFailedSpecs:                numberOfFailedSpecs,
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go
new file mode 100644
index 0000000000000000000000000000000000000000..a054602f78b33e4566d3411e0a75a498e14f11b5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go
@@ -0,0 +1,171 @@
+package suite
+
+import (
+	"math/rand"
+	"time"
+
+	"github.com/onsi/ginkgo/config"
+	"github.com/onsi/ginkgo/internal/containernode"
+	"github.com/onsi/ginkgo/internal/failer"
+	"github.com/onsi/ginkgo/internal/leafnodes"
+	"github.com/onsi/ginkgo/internal/spec"
+	"github.com/onsi/ginkgo/internal/specrunner"
+	"github.com/onsi/ginkgo/internal/writer"
+	"github.com/onsi/ginkgo/reporters"
+	"github.com/onsi/ginkgo/types"
+)
+
+type ginkgoTestingT interface {
+	Fail()
+}
+
+type Suite struct {
+	topLevelContainer *containernode.ContainerNode
+	currentContainer  *containernode.ContainerNode
+	containerIndex    int
+	beforeSuiteNode   leafnodes.SuiteNode
+	afterSuiteNode    leafnodes.SuiteNode
+	runner            *specrunner.SpecRunner
+	failer            *failer.Failer
+	running           bool
+}
+
+func New(failer *failer.Failer) *Suite {
+	topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{})
+
+	return &Suite{
+		topLevelContainer: topLevelContainer,
+		currentContainer:  topLevelContainer,
+		failer:            failer,
+		containerIndex:    1,
+	}
+}
+
+func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) {
+	if config.ParallelTotal < 1 {
+		panic("ginkgo.parallel.total must be >= 1")
+	}
+
+	if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 {
+		panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total")
+	}
+
+	r := rand.New(rand.NewSource(config.RandomSeed))
+	suite.topLevelContainer.Shuffle(r)
+	specs := suite.generateSpecs(description, config)
+	suite.runner = specrunner.New(description, suite.beforeSuiteNode, specs, suite.afterSuiteNode, reporters, writer, config)
+
+	suite.running = true
+	success := suite.runner.Run()
+	if !success {
+		t.Fail()
+	}
+	return success, specs.HasProgrammaticFocus()
+}
+
+func (suite *Suite) generateSpecs(description string, config config.GinkgoConfigType) *spec.Specs {
+	specsSlice := []*spec.Spec{}
+	suite.topLevelContainer.BackPropagateProgrammaticFocus()
+	for _, collatedNodes := range suite.topLevelContainer.Collate() {
+		specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress))
+	}
+
+	specs := spec.NewSpecs(specsSlice)
+
+	if config.RandomizeAllSpecs {
+		specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed)))
+	}
+
+	specs.ApplyFocus(description, config.FocusString, config.SkipString)
+
+	if config.SkipMeasurements {
+		specs.SkipMeasurements()
+	}
+
+	if config.ParallelTotal > 1 {
+		specs.TrimForParallelization(config.ParallelTotal, config.ParallelNode)
+	}
+
+	return specs
+}
+
+func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) {
+	return suite.runner.CurrentSpecSummary()
+}
+
+func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+	if suite.beforeSuiteNode != nil {
+		panic("You may only call BeforeSuite once!")
+	}
+	suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer)
+}
+
+func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+	if suite.afterSuiteNode != nil {
+		panic("You may only call AfterSuite once!")
+	}
+	suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer)
+}
+
+func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+	if suite.beforeSuiteNode != nil {
+		panic("You may only call BeforeSuite once!")
+	}
+	suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
+}
+
+func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+	if suite.afterSuiteNode != nil {
+		panic("You may only call AfterSuite once!")
+	}
+	suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
+}
+
+func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) {
+	container := containernode.New(text, flag, codeLocation)
+	suite.currentContainer.PushContainerNode(container)
+
+	previousContainer := suite.currentContainer
+	suite.currentContainer = container
+	suite.containerIndex++
+
+	body()
+
+	suite.containerIndex--
+	suite.currentContainer = previousContainer
+}
+
+func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) {
+	if suite.running {
+		suite.failer.Fail("You may only call It from within a Describe or Context", codeLocation)
+	}
+	suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex))
+}
+
+func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) {
+	if suite.running {
+		suite.failer.Fail("You may only call Measure from within a Describe or Context", codeLocation)
+	}
+	suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex))
+}
+
+func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+	if suite.running {
+		suite.failer.Fail("You may only call BeforeEach from within a Describe or Context", codeLocation)
+	}
+	suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
+}
+
+func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+	if suite.running {
+		suite.failer.Fail("You may only call JustBeforeEach from within a Describe or Context", codeLocation)
+	}
+	suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
+}
+
+func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+	if suite.running {
+		suite.failer.Fail("You may only call AfterEach from within a Describe or Context", codeLocation)
+	}
+	suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
new file mode 100644
index 0000000000000000000000000000000000000000..a2b9af80629e71c1d4c38568a370e1c0ac308b59
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
@@ -0,0 +1,76 @@
+package testingtproxy
+
+import (
+	"fmt"
+	"io"
+)
+
+type failFunc func(message string, callerSkip ...int)
+
+func New(writer io.Writer, fail failFunc, offset int) *ginkgoTestingTProxy {
+	return &ginkgoTestingTProxy{
+		fail:   fail,
+		offset: offset,
+		writer: writer,
+	}
+}
+
+type ginkgoTestingTProxy struct {
+	fail   failFunc
+	offset int
+	writer io.Writer
+}
+
+func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
+	t.fail(fmt.Sprintln(args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
+	t.fail(fmt.Sprintf(format, args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Fail() {
+	t.fail("failed", t.offset)
+}
+
+func (t *ginkgoTestingTProxy) FailNow() {
+	t.fail("failed", t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
+	t.fail(fmt.Sprintln(args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
+	t.fail(fmt.Sprintf(format, args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
+	fmt.Fprintln(t.writer, args...)
+}
+
+func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
+	fmt.Fprintf(t.writer, format, args...)
+}
+
+func (t *ginkgoTestingTProxy) Failed() bool {
+	return false
+}
+
+func (t *ginkgoTestingTProxy) Parallel() {
+}
+
+func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
+	fmt.Println(args...)
+}
+
+func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
+	fmt.Printf(format, args...)
+}
+
+func (t *ginkgoTestingTProxy) SkipNow() {
+}
+
+func (t *ginkgoTestingTProxy) Skipped() bool {
+	return false
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
new file mode 100644
index 0000000000000000000000000000000000000000..ac6540f0c1d385c08c81d5157b7505bfec659a99
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
@@ -0,0 +1,31 @@
+package writer
+
+type FakeGinkgoWriter struct {
+	EventStream []string
+}
+
+func NewFake() *FakeGinkgoWriter {
+	return &FakeGinkgoWriter{
+		EventStream: []string{},
+	}
+}
+
+func (writer *FakeGinkgoWriter) AddEvent(event string) {
+	writer.EventStream = append(writer.EventStream, event)
+}
+
+func (writer *FakeGinkgoWriter) Truncate() {
+	writer.EventStream = append(writer.EventStream, "TRUNCATE")
+}
+
+func (writer *FakeGinkgoWriter) DumpOut() {
+	writer.EventStream = append(writer.EventStream, "DUMP")
+}
+
+func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) {
+	writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header)
+}
+
+func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) {
+	return 0, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/writer.go
new file mode 100644
index 0000000000000000000000000000000000000000..7678fc1d9cb765ca8f65dbc3a16f79cac0d82586
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/writer/writer.go
@@ -0,0 +1,71 @@
+package writer
+
+import (
+	"bytes"
+	"io"
+	"sync"
+)
+
+type WriterInterface interface {
+	io.Writer
+
+	Truncate()
+	DumpOut()
+	DumpOutWithHeader(header string)
+}
+
+type Writer struct {
+	buffer    *bytes.Buffer
+	outWriter io.Writer
+	lock      *sync.Mutex
+	stream    bool
+}
+
+func New(outWriter io.Writer) *Writer {
+	return &Writer{
+		buffer:    &bytes.Buffer{},
+		lock:      &sync.Mutex{},
+		outWriter: outWriter,
+		stream:    true,
+	}
+}
+
+func (w *Writer) SetStream(stream bool) {
+	w.lock.Lock()
+	defer w.lock.Unlock()
+	w.stream = stream
+}
+
+func (w *Writer) Write(b []byte) (n int, err error) {
+	w.lock.Lock()
+	defer w.lock.Unlock()
+
+	if w.stream {
+		return w.outWriter.Write(b)
+	} else {
+		return w.buffer.Write(b)
+	}
+}
+
+func (w *Writer) Truncate() {
+	w.lock.Lock()
+	defer w.lock.Unlock()
+	w.buffer.Reset()
+}
+
+func (w *Writer) DumpOut() {
+	w.lock.Lock()
+	defer w.lock.Unlock()
+	if !w.stream {
+		w.buffer.WriteTo(w.outWriter)
+	}
+}
+
+func (w *Writer) DumpOutWithHeader(header string) {
+	w.lock.Lock()
+	defer w.lock.Unlock()
+	if !w.stream && w.buffer.Len() > 0 {
+		w.outWriter.Write([]byte(header))
+		w.buffer.WriteTo(w.outWriter)
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
new file mode 100644
index 0000000000000000000000000000000000000000..044d2dfd2d793add08da96f09546f180c87eca26
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
@@ -0,0 +1,83 @@
+/*
+Ginkgo's Default Reporter
+
+A number of command line flags are available to tweak Ginkgo's default output.
+
+These are documented [here](http://onsi.github.io/ginkgo/#running_tests)
+*/
+package reporters
+
+import (
+	"github.com/onsi/ginkgo/config"
+	"github.com/onsi/ginkgo/reporters/stenographer"
+	"github.com/onsi/ginkgo/types"
+)
+
+type DefaultReporter struct {
+	config        config.DefaultReporterConfigType
+	stenographer  stenographer.Stenographer
+	specSummaries []*types.SpecSummary
+}
+
+func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter {
+	return &DefaultReporter{
+		config:       config,
+		stenographer: stenographer,
+	}
+}
+
+func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
+	reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct)
+	if config.ParallelTotal > 1 {
+		reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, summary.NumberOfTotalSpecs, summary.NumberOfSpecsBeforeParallelization, reporter.config.Succinct)
+	}
+	reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct)
+}
+
+func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
+	if setupSummary.State != types.SpecStatePassed {
+		reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
+	}
+}
+
+func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
+	if setupSummary.State != types.SpecStatePassed {
+		reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
+	}
+}
+
+func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) {
+	if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
+		reporter.stenographer.AnnounceSpecWillRun(specSummary)
+	}
+}
+
+func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) {
+	switch specSummary.State {
+	case types.SpecStatePassed:
+		if specSummary.IsMeasurement {
+			reporter.stenographer.AnnounceSuccesfulMeasurement(specSummary, reporter.config.Succinct)
+		} else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold {
+			reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct)
+		} else {
+			reporter.stenographer.AnnounceSuccesfulSpec(specSummary)
+		}
+	case types.SpecStatePending:
+		reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct)
+	case types.SpecStateSkipped:
+		reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
+	case types.SpecStateTimedOut:
+		reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
+	case types.SpecStatePanicked:
+		reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
+	case types.SpecStateFailed:
+		reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
+	}
+
+	reporter.specSummaries = append(reporter.specSummaries, specSummary)
+}
+
+func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
+	reporter.stenographer.SummarizeFailures(reporter.specSummaries)
+	reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct)
+}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go
new file mode 100644
index 0000000000000000000000000000000000000000..27db47949081ed317aa74de907cce0d08130111d
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go
@@ -0,0 +1,59 @@
+package reporters
+
+import (
+	"github.com/onsi/ginkgo/config"
+	"github.com/onsi/ginkgo/types"
+)
+
+//FakeReporter is useful for testing purposes
+type FakeReporter struct {
+	Config config.GinkgoConfigType
+
+	BeginSummary         *types.SuiteSummary
+	BeforeSuiteSummary   *types.SetupSummary
+	SpecWillRunSummaries []*types.SpecSummary
+	SpecSummaries        []*types.SpecSummary
+	AfterSuiteSummary    *types.SetupSummary
+	EndSummary           *types.SuiteSummary
+
+	SpecWillRunStub     func(specSummary *types.SpecSummary)
+	SpecDidCompleteStub func(specSummary *types.SpecSummary)
+}
+
+func NewFakeReporter() *FakeReporter {
+	return &FakeReporter{
+		SpecWillRunSummaries: make([]*types.SpecSummary, 0),
+		SpecSummaries:        make([]*types.SpecSummary, 0),
+	}
+}
+
+func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
+	fakeR.Config = config
+	fakeR.BeginSummary = summary
+}
+
+func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
+	fakeR.BeforeSuiteSummary = setupSummary
+}
+
+func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) {
+	if fakeR.SpecWillRunStub != nil {
+		fakeR.SpecWillRunStub(specSummary)
+	}
+	fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary)
+}
+
+func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) {
+	if fakeR.SpecDidCompleteStub != nil {
+		fakeR.SpecDidCompleteStub(specSummary)
+	}
+	fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary)
+}
+
+func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
+	fakeR.AfterSuiteSummary = setupSummary
+}
+
+func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
+	fakeR.EndSummary = summary
+}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
new file mode 100644
index 0000000000000000000000000000000000000000..278a88ed7353d4ee358b58b754a2ab89c93432af
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
@@ -0,0 +1,139 @@
+/*
+
+JUnit XML Reporter for Ginkgo
+
+For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output
+
+*/
+
+package reporters
+
+import (
+	"encoding/xml"
+	"fmt"
+	"github.com/onsi/ginkgo/config"
+	"github.com/onsi/ginkgo/types"
+	"os"
+	"strings"
+)
+
+type JUnitTestSuite struct {
+	XMLName   xml.Name        `xml:"testsuite"`
+	TestCases []JUnitTestCase `xml:"testcase"`
+	Tests     int             `xml:"tests,attr"`
+	Failures  int             `xml:"failures,attr"`
+	Time      float64         `xml:"time,attr"`
+}
+
+type JUnitTestCase struct {
+	Name           string               `xml:"name,attr"`
+	ClassName      string               `xml:"classname,attr"`
+	FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"`
+	Skipped        *JUnitSkipped        `xml:"skipped,omitempty"`
+	Time           float64              `xml:"time,attr"`
+}
+
+type JUnitFailureMessage struct {
+	Type    string `xml:"type,attr"`
+	Message string `xml:",chardata"`
+}
+
+type JUnitSkipped struct {
+	XMLName xml.Name `xml:"skipped"`
+}
+
+type JUnitReporter struct {
+	suite         JUnitTestSuite
+	filename      string
+	testSuiteName string
+}
+
+//NewJUnitReporter creates a new JUnit XML reporter.  The XML will be stored in the passed in filename.
+func NewJUnitReporter(filename string) *JUnitReporter {
+	return &JUnitReporter{
+		filename: filename,
+	}
+}
+
+func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
+	reporter.suite = JUnitTestSuite{
+		Tests:     summary.NumberOfSpecsThatWillBeRun,
+		TestCases: []JUnitTestCase{},
+	}
+	reporter.testSuiteName = summary.SuiteDescription
+}
+
+func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) {
+}
+
+func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
+	reporter.handleSetupSummary("BeforeSuite", setupSummary)
+}
+
+func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
+	reporter.handleSetupSummary("AfterSuite", setupSummary)
+}
+
+func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
+	if setupSummary.State != types.SpecStatePassed {
+		testCase := JUnitTestCase{
+			Name:      name,
+			ClassName: reporter.testSuiteName,
+		}
+
+		testCase.FailureMessage = &JUnitFailureMessage{
+			Type:    reporter.failureTypeForState(setupSummary.State),
+			Message: fmt.Sprintf("%s\n%s", setupSummary.Failure.ComponentCodeLocation.String(), setupSummary.Failure.Message),
+		}
+		testCase.Time = setupSummary.RunTime.Seconds()
+		reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
+	}
+}
+
+func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
+	testCase := JUnitTestCase{
+		Name:      strings.Join(specSummary.ComponentTexts[1:], " "),
+		ClassName: reporter.testSuiteName,
+	}
+	if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
+		testCase.FailureMessage = &JUnitFailureMessage{
+			Type:    reporter.failureTypeForState(specSummary.State),
+			Message: fmt.Sprintf("%s\n%s", specSummary.Failure.ComponentCodeLocation.String(), specSummary.Failure.Message),
+		}
+	}
+	if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
+		testCase.Skipped = &JUnitSkipped{}
+	}
+	testCase.Time = specSummary.RunTime.Seconds()
+	reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
+}
+
+func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
+	reporter.suite.Time = summary.RunTime.Seconds()
+	reporter.suite.Failures = summary.NumberOfFailedSpecs
+	file, err := os.Create(reporter.filename)
+	if err != nil {
+		fmt.Printf("Failed to create JUnit report file: %s\n\t%s", reporter.filename, err.Error())
+	}
+	defer file.Close()
+	file.WriteString(xml.Header)
+	encoder := xml.NewEncoder(file)
+	encoder.Indent("  ", "    ")
+	err = encoder.Encode(reporter.suite)
+	if err != nil {
+		fmt.Printf("Failed to generate JUnit report\n\t%s", err.Error())
+	}
+}
+
+func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string {
+	switch state {
+	case types.SpecStateFailed:
+		return "Failure"
+	case types.SpecStateTimedOut:
+		return "Timeout"
+	case types.SpecStatePanicked:
+		return "Panic"
+	default:
+		return ""
+	}
+}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/reporters/reporter.go
new file mode 100644
index 0000000000000000000000000000000000000000..348b9dfce1f67aa6decacc744aec26126c9f476f
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/reporters/reporter.go
@@ -0,0 +1,15 @@
+package reporters
+
+import (
+	"github.com/onsi/ginkgo/config"
+	"github.com/onsi/ginkgo/types"
+)
+
+type Reporter interface {
+	SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary)
+	BeforeSuiteDidRun(setupSummary *types.SetupSummary)
+	SpecWillRun(specSummary *types.SpecSummary)
+	SpecDidComplete(specSummary *types.SpecSummary)
+	AfterSuiteDidRun(setupSummary *types.SetupSummary)
+	SpecSuiteDidEnd(summary *types.SuiteSummary)
+}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
new file mode 100644
index 0000000000000000000000000000000000000000..ce5433af6a8e376b55ccf5a10123ed0b88b59589
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
@@ -0,0 +1,64 @@
+package stenographer
+
+import (
+	"fmt"
+	"strings"
+)
+
+func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string {
+	var out string
+
+	if len(args) > 0 {
+		out = fmt.Sprintf(format, args...)
+	} else {
+		out = format
+	}
+
+	if s.color {
+		return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle)
+	} else {
+		return out
+	}
+}
+
+func (s *consoleStenographer) printBanner(text string, bannerCharacter string) {
+	fmt.Println(text)
+	fmt.Println(strings.Repeat(bannerCharacter, len(text)))
+}
+
+func (s *consoleStenographer) printNewLine() {
+	fmt.Println("")
+}
+
+func (s *consoleStenographer) printDelimiter() {
+	fmt.Println(s.colorize(grayColor, "%s", strings.Repeat("-", 30)))
+}
+
+func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) {
+	fmt.Print(s.indent(indentation, format, args...))
+}
+
+func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) {
+	fmt.Println(s.indent(indentation, format, args...))
+}
+
+func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string {
+	var text string
+
+	if len(args) > 0 {
+		text = fmt.Sprintf(format, args...)
+	} else {
+		text = format
+	}
+
+	stringArray := strings.Split(text, "\n")
+	padding := ""
+	if indentation >= 0 {
+		padding = strings.Repeat("  ", indentation)
+	}
+	for i, s := range stringArray {
+		stringArray[i] = fmt.Sprintf("%s%s", padding, s)
+	}
+
+	return strings.Join(stringArray, "\n")
+}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
new file mode 100644
index 0000000000000000000000000000000000000000..1ff6104c86f2dd49e7f6c8a4a4234289815925d2
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
@@ -0,0 +1,138 @@
+package stenographer
+
+import (
+	"sync"
+
+	"github.com/onsi/ginkgo/types"
+)
+
+func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall {
+	return FakeStenographerCall{
+		Method: method,
+		Args:   args,
+	}
+}
+
+type FakeStenographer struct {
+	calls []FakeStenographerCall
+	lock  *sync.Mutex
+}
+
+type FakeStenographerCall struct {
+	Method string
+	Args   []interface{}
+}
+
+func NewFakeStenographer() *FakeStenographer {
+	stenographer := &FakeStenographer{
+		lock: &sync.Mutex{},
+	}
+	stenographer.Reset()
+	return stenographer
+}
+
+func (stenographer *FakeStenographer) Calls() []FakeStenographerCall {
+	stenographer.lock.Lock()
+	defer stenographer.lock.Unlock()
+
+	return stenographer.calls
+}
+
+func (stenographer *FakeStenographer) Reset() {
+	stenographer.lock.Lock()
+	defer stenographer.lock.Unlock()
+
+	stenographer.calls = make([]FakeStenographerCall, 0)
+}
+
+func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall {
+	stenographer.lock.Lock()
+	defer stenographer.lock.Unlock()
+
+	results := make([]FakeStenographerCall, 0)
+	for _, call := range stenographer.calls {
+		if call.Method == method {
+			results = append(results, call)
+		}
+	}
+
+	return results
+}
+
+func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) {
+	stenographer.lock.Lock()
+	defer stenographer.lock.Unlock()
+
+	stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...))
+}
+
+func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
+	stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct)
+}
+
+func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
+	stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct)
+}
+
+func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) {
+	stenographer.registerCall("AnnounceParallelRun", node, nodes, specsToRun, totalSpecs, succinct)
+}
+
+func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
+	stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct)
+}
+
+func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
+	stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct)
+}
+
+func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
+	stenographer.registerCall("AnnounceSpecWillRun", spec)
+}
+
+func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
+	stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace)
+}
+
+func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
+	stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace)
+}
+func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) {
+	stenographer.registerCall("AnnounceCapturedOutput", output)
+}
+
+func (stenographer *FakeStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
+	stenographer.registerCall("AnnounceSuccesfulSpec", spec)
+}
+
+func (stenographer *FakeStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
+	stenographer.registerCall("AnnounceSuccesfulSlowSpec", spec, succinct)
+}
+
+func (stenographer *FakeStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
+	stenographer.registerCall("AnnounceSuccesfulMeasurement", spec, succinct)
+}
+
+func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
+	stenographer.registerCall("AnnouncePendingSpec", spec, noisy)
+}
+
+func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
+	stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace)
+}
+
+func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
+	stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace)
+}
+
+func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
+	stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace)
+}
+
+func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
+	stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace)
+}
+
+func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
+	stenographer.registerCall("SummarizeFailures", summaries)
+}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
new file mode 100644
index 0000000000000000000000000000000000000000..5b5d905da160b166dc279f00206b87a613e5e51a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
@@ -0,0 +1,549 @@
+/*
+The stenographer is used by Ginkgo's reporters to generate output.
+
+Move along, nothing to see here.
+*/
+
+package stenographer
+
+import (
+	"fmt"
+	"runtime"
+	"strings"
+
+	"github.com/onsi/ginkgo/types"
+)
+
+const defaultStyle = "\x1b[0m"
+const boldStyle = "\x1b[1m"
+const redColor = "\x1b[91m"
+const greenColor = "\x1b[32m"
+const yellowColor = "\x1b[33m"
+const cyanColor = "\x1b[36m"
+const grayColor = "\x1b[90m"
+const lightGrayColor = "\x1b[37m"
+
+type cursorStateType int
+
+const (
+	cursorStateTop cursorStateType = iota
+	cursorStateStreaming
+	cursorStateMidBlock
+	cursorStateEndBlock
+)
+
+type Stenographer interface {
+	AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool)
+	AnnounceAggregatedParallelRun(nodes int, succinct bool)
+	AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool)
+	AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool)
+	AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool)
+
+	AnnounceSpecWillRun(spec *types.SpecSummary)
+	AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
+	AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
+
+	AnnounceCapturedOutput(output string)
+
+	AnnounceSuccesfulSpec(spec *types.SpecSummary)
+	AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool)
+	AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool)
+
+	AnnouncePendingSpec(spec *types.SpecSummary, noisy bool)
+	AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool)
+
+	AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool)
+	AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool)
+	AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool)
+
+	SummarizeFailures(summaries []*types.SpecSummary)
+}
+
+func New(color bool) Stenographer {
+	denoter := "•"
+	if runtime.GOOS == "windows" {
+		denoter = "+"
+	}
+	return &consoleStenographer{
+		color:       color,
+		denoter:     denoter,
+		cursorState: cursorStateTop,
+	}
+}
+
+type consoleStenographer struct {
+	color       bool
+	denoter     string
+	cursorState cursorStateType
+}
+
+var alternatingColors = []string{defaultStyle, grayColor}
+
+func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
+	if succinct {
+		s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description))
+		return
+	}
+	s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=")
+	s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed))
+	if randomizingAll {
+		s.print(0, " - Will randomize all specs")
+	}
+	s.printNewLine()
+}
+
+func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) {
+	if succinct {
+		s.print(0, "- node #%d ", node)
+		return
+	}
+	s.println(0,
+		"Parallel test node %s/%s. Assigned %s of %s specs.",
+		s.colorize(boldStyle, "%d", node),
+		s.colorize(boldStyle, "%d", nodes),
+		s.colorize(boldStyle, "%d", specsToRun),
+		s.colorize(boldStyle, "%d", totalSpecs),
+	)
+	s.printNewLine()
+}
+
+func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
+	if succinct {
+		s.print(0, "- %d nodes ", nodes)
+		return
+	}
+	s.println(0,
+		"Running in parallel across %s nodes",
+		s.colorize(boldStyle, "%d", nodes),
+	)
+	s.printNewLine()
+}
+
+func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
+	if succinct {
+		s.print(0, "- %d/%d specs ", specsToRun, total)
+		s.stream()
+		return
+	}
+	s.println(0,
+		"Will run %s of %s specs",
+		s.colorize(boldStyle, "%d", specsToRun),
+		s.colorize(boldStyle, "%d", total),
+	)
+
+	s.printNewLine()
+}
+
+func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
+	if succinct && summary.SuiteSucceeded {
+		s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime)
+		return
+	}
+	s.printNewLine()
+	color := greenColor
+	if !summary.SuiteSucceeded {
+		color = redColor
+	}
+	s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds()))
+
+	status := ""
+	if summary.SuiteSucceeded {
+		status = s.colorize(boldStyle+greenColor, "SUCCESS!")
+	} else {
+		status = s.colorize(boldStyle+redColor, "FAIL!")
+	}
+
+	s.print(0,
+		"%s -- %s | %s | %s | %s ",
+		status,
+		s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs),
+		s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs),
+		s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs),
+		s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs),
+	)
+}
+
+func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
+	s.startBlock()
+	for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] {
+		s.print(0, s.colorize(alternatingColors[i%2], text)+" ")
+	}
+
+	indentation := 0
+	if len(spec.ComponentTexts) > 2 {
+		indentation = 1
+		s.printNewLine()
+	}
+	index := len(spec.ComponentTexts) - 1
+	s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index]))
+	s.printNewLine()
+	s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String()))
+	s.printNewLine()
+	s.midBlock()
+}
+
+func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
+	s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace)
+}
+
+func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
+	s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace)
+}
+
+func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) {
+	s.startBlock()
+	var message string
+	switch summary.State {
+	case types.SpecStateFailed:
+		message = "Failure"
+	case types.SpecStatePanicked:
+		message = "Panic"
+	case types.SpecStateTimedOut:
+		message = "Timeout"
+	}
+
+	s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds()))
+
+	indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true)
+
+	s.printNewLine()
+	s.printFailure(indentation, summary.State, summary.Failure, fullTrace)
+
+	s.endBlock()
+}
+
+func (s *consoleStenographer) AnnounceCapturedOutput(output string) {
+	if output == "" {
+		return
+	}
+
+	s.startBlock()
+	s.println(0, output)
+	s.midBlock()
+}
+
+func (s *consoleStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
+	s.print(0, s.colorize(greenColor, s.denoter))
+	s.stream()
+}
+
+func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
+	s.printBlockWithMessage(
+		s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()),
+		"",
+		spec,
+		succinct,
+	)
+}
+
+func (s *consoleStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
+	s.printBlockWithMessage(
+		s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter),
+		s.measurementReport(spec, succinct),
+		spec,
+		succinct,
+	)
+}
+
+func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
+	if noisy {
+		s.printBlockWithMessage(
+			s.colorize(yellowColor, "P [PENDING]"),
+			"",
+			spec,
+			false,
+		)
+	} else {
+		s.print(0, s.colorize(yellowColor, "P"))
+		s.stream()
+	}
+}
+
+func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
+	// Skips at runtime will have a non-empty spec.Failure. All others should be succinct.
+	if succinct || spec.Failure == (types.SpecFailure{}) {
+		s.print(0, s.colorize(cyanColor, "S"))
+		s.stream()
+	} else {
+		s.startBlock()
+		s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
+
+		indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
+
+		s.printNewLine()
+		s.printSkip(indentation, spec.Failure)
+		s.endBlock()
+	}
+}
+
+func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
+	s.printSpecFailure(fmt.Sprintf("%s... Timeout", s.denoter), spec, succinct, fullTrace)
+}
+
+func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
+	s.printSpecFailure(fmt.Sprintf("%s! Panic", s.denoter), spec, succinct, fullTrace)
+}
+
+func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
+	s.printSpecFailure(fmt.Sprintf("%s Failure", s.denoter), spec, succinct, fullTrace)
+}
+
+func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
+	failingSpecs := []*types.SpecSummary{}
+
+	for _, summary := range summaries {
+		if summary.HasFailureState() {
+			failingSpecs = append(failingSpecs, summary)
+		}
+	}
+
+	if len(failingSpecs) == 0 {
+		return
+	}
+
+	s.printNewLine()
+	s.printNewLine()
+	plural := "s"
+	if len(failingSpecs) == 1 {
+		plural = ""
+	}
+	s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural))
+	for _, summary := range failingSpecs {
+		s.printNewLine()
+		if summary.HasFailureState() {
+			if summary.TimedOut() {
+				s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] "))
+			} else if summary.Panicked() {
+				s.print(0, s.colorize(redColor+boldStyle, "[Panic!] "))
+			} else if summary.Failed() {
+				s.print(0, s.colorize(redColor+boldStyle, "[Fail] "))
+			}
+			s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true)
+			s.printNewLine()
+			s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String()))
+		}
+	}
+}
+
+func (s *consoleStenographer) startBlock() {
+	if s.cursorState == cursorStateStreaming {
+		s.printNewLine()
+		s.printDelimiter()
+	} else if s.cursorState == cursorStateMidBlock {
+		s.printNewLine()
+	}
+}
+
+func (s *consoleStenographer) midBlock() {
+	s.cursorState = cursorStateMidBlock
+}
+
+func (s *consoleStenographer) endBlock() {
+	s.printDelimiter()
+	s.cursorState = cursorStateEndBlock
+}
+
+func (s *consoleStenographer) stream() {
+	s.cursorState = cursorStateStreaming
+}
+
+func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) {
+	s.startBlock()
+	s.println(0, header)
+
+	indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct)
+
+	if message != "" {
+		s.printNewLine()
+		s.println(indentation, message)
+	}
+
+	s.endBlock()
+}
+
+func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) {
+	s.startBlock()
+	s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
+
+	indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
+
+	s.printNewLine()
+	s.printFailure(indentation, spec.State, spec.Failure, fullTrace)
+	s.endBlock()
+}
+
+func (s *consoleStenographer) failureContext(failedComponentType types.SpecComponentType) string {
+	switch failedComponentType {
+	case types.SpecComponentTypeBeforeSuite:
+		return " in Suite Setup (BeforeSuite)"
+	case types.SpecComponentTypeAfterSuite:
+		return " in Suite Teardown (AfterSuite)"
+	case types.SpecComponentTypeBeforeEach:
+		return " in Spec Setup (BeforeEach)"
+	case types.SpecComponentTypeJustBeforeEach:
+		return " in Spec Setup (JustBeforeEach)"
+	case types.SpecComponentTypeAfterEach:
+		return " in Spec Teardown (AfterEach)"
+	}
+
+	return ""
+}
+
+func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) {
+	s.println(indentation, s.colorize(cyanColor, spec.Message))
+	s.printNewLine()
+	s.println(indentation, spec.Location.String())
+}
+
+func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) {
+	if state == types.SpecStatePanicked {
+		s.println(indentation, s.colorize(redColor+boldStyle, failure.Message))
+		s.println(indentation, s.colorize(redColor, failure.ForwardedPanic))
+		s.println(indentation, failure.Location.String())
+		s.printNewLine()
+		s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
+		s.println(indentation, failure.Location.FullStackTrace)
+	} else {
+		s.println(indentation, s.colorize(redColor, failure.Message))
+		s.printNewLine()
+		s.println(indentation, failure.Location.String())
+		if fullTrace {
+			s.printNewLine()
+			s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
+			s.println(indentation, failure.Location.FullStackTrace)
+		}
+	}
+}
+
+func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
+	startIndex := 1
+	indentation := 0
+
+	if len(componentTexts) == 1 {
+		startIndex = 0
+	}
+
+	for i := startIndex; i < len(componentTexts); i++ {
+		if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex {
+			color := redColor
+			if state == types.SpecStateSkipped {
+				color = cyanColor
+			}
+			blockType := ""
+			switch failedComponentType {
+			case types.SpecComponentTypeBeforeSuite:
+				blockType = "BeforeSuite"
+			case types.SpecComponentTypeAfterSuite:
+				blockType = "AfterSuite"
+			case types.SpecComponentTypeBeforeEach:
+				blockType = "BeforeEach"
+			case types.SpecComponentTypeJustBeforeEach:
+				blockType = "JustBeforeEach"
+			case types.SpecComponentTypeAfterEach:
+				blockType = "AfterEach"
+			case types.SpecComponentTypeIt:
+				blockType = "It"
+			case types.SpecComponentTypeMeasure:
+				blockType = "Measurement"
+			}
+			if succinct {
+				s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i]))
+			} else {
+				s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType))
+				s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
+			}
+		} else {
+			if succinct {
+				s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i]))
+			} else {
+				s.println(indentation, componentTexts[i])
+				s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
+			}
+		}
+		indentation++
+	}
+
+	return indentation
+}
+
+func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
+	indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct)
+
+	if succinct {
+		if len(componentTexts) > 0 {
+			s.printNewLine()
+			s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1]))
+		}
+		s.printNewLine()
+		indentation = 1
+	} else {
+		indentation--
+	}
+
+	return indentation
+}
+
+func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string {
+	orderedKeys := make([]string, len(measurements))
+	for key, measurement := range measurements {
+		orderedKeys[measurement.Order] = key
+	}
+	return orderedKeys
+}
+
+func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string {
+	if len(spec.Measurements) == 0 {
+		return "Found no measurements"
+	}
+
+	message := []string{}
+	orderedKeys := s.orderedMeasurementKeys(spec.Measurements)
+
+	if succinct {
+		message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
+		for _, key := range orderedKeys {
+			measurement := spec.Measurements[key]
+			message = append(message, fmt.Sprintf("  %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s",
+				s.colorize(boldStyle, "%s", measurement.Name),
+				measurement.SmallestLabel,
+				s.colorize(greenColor, "%.3f", measurement.Smallest),
+				measurement.Units,
+				measurement.AverageLabel,
+				s.colorize(cyanColor, "%.3f", measurement.Average),
+				measurement.Units,
+				s.colorize(cyanColor, "%.3f", measurement.StdDeviation),
+				measurement.Units,
+				measurement.LargestLabel,
+				s.colorize(redColor, "%.3f", measurement.Largest),
+				measurement.Units,
+			))
+		}
+	} else {
+		message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
+		for _, key := range orderedKeys {
+			measurement := spec.Measurements[key]
+			info := ""
+			if measurement.Info != nil {
+				message = append(message, fmt.Sprintf("%v", measurement.Info))
+			}
+
+			message = append(message, fmt.Sprintf("%s:\n%s  %s: %s%s\n  %s: %s%s\n  %s: %s%s ± %s%s",
+				s.colorize(boldStyle, "%s", measurement.Name),
+				info,
+				measurement.SmallestLabel,
+				s.colorize(greenColor, "%.3f", measurement.Smallest),
+				measurement.Units,
+				measurement.LargestLabel,
+				s.colorize(redColor, "%.3f", measurement.Largest),
+				measurement.Units,
+				measurement.AverageLabel,
+				s.colorize(cyanColor, "%.3f", measurement.Average),
+				measurement.Units,
+				s.colorize(cyanColor, "%.3f", measurement.StdDeviation),
+				measurement.Units,
+			))
+		}
+	}
+
+	return strings.Join(message, "\n")
+}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
new file mode 100644
index 0000000000000000000000000000000000000000..657dfe726e258bcccfb6edcd38c0c1a759728810
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
@@ -0,0 +1,92 @@
+/*
+
+TeamCity Reporter for Ginkgo
+
+Makes use of TeamCity's support for Service Messages
+http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests
+*/
+
+package reporters
+
+import (
+	"fmt"
+	"github.com/onsi/ginkgo/config"
+	"github.com/onsi/ginkgo/types"
+	"io"
+	"strings"
+)
+
+const (
+	messageId = "##teamcity"
+)
+
+type TeamCityReporter struct {
+	writer        io.Writer
+	testSuiteName string
+}
+
+func NewTeamCityReporter(writer io.Writer) *TeamCityReporter {
+	return &TeamCityReporter{
+		writer: writer,
+	}
+}
+
+func (reporter *TeamCityReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
+	reporter.testSuiteName = escape(summary.SuiteDescription)
+	fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']", messageId, reporter.testSuiteName)
+}
+
+func (reporter *TeamCityReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
+	reporter.handleSetupSummary("BeforeSuite", setupSummary)
+}
+
+func (reporter *TeamCityReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
+	reporter.handleSetupSummary("AfterSuite", setupSummary)
+}
+
+func (reporter *TeamCityReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
+	if setupSummary.State != types.SpecStatePassed {
+		testName := escape(name)
+		fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName)
+		message := escape(setupSummary.Failure.ComponentCodeLocation.String())
+		details := escape(setupSummary.Failure.Message)
+		fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details)
+		durationInMilliseconds := setupSummary.RunTime.Seconds() * 1000
+		fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds)
+	}
+}
+
+func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) {
+	testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
+	fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName)
+}
+
+func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) {
+	testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
+
+	if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
+		message := escape(specSummary.Failure.ComponentCodeLocation.String())
+		details := escape(specSummary.Failure.Message)
+		fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details)
+	}
+	if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
+		fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']", messageId, testName)
+	}
+
+	durationInMilliseconds := specSummary.RunTime.Seconds() * 1000
+	fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds)
+}
+
+func (reporter *TeamCityReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
+	fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']", messageId, reporter.testSuiteName)
+}
+
+func escape(output string) string {
+	output = strings.Replace(output, "|", "||", -1)
+	output = strings.Replace(output, "'", "|'", -1)
+	output = strings.Replace(output, "\n", "|n", -1)
+	output = strings.Replace(output, "\r", "|r", -1)
+	output = strings.Replace(output, "[", "|[", -1)
+	output = strings.Replace(output, "]", "|]", -1)
+	return output
+}
diff --git a/vendor/github.com/onsi/ginkgo/types/code_location.go b/vendor/github.com/onsi/ginkgo/types/code_location.go
new file mode 100644
index 0000000000000000000000000000000000000000..935a89e136a03d6929a5043a9344b3e77ab621b3
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/types/code_location.go
@@ -0,0 +1,15 @@
+package types
+
+import (
+	"fmt"
+)
+
+type CodeLocation struct {
+	FileName       string
+	LineNumber     int
+	FullStackTrace string
+}
+
+func (codeLocation CodeLocation) String() string {
+	return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber)
+}
diff --git a/vendor/github.com/onsi/ginkgo/types/synchronization.go b/vendor/github.com/onsi/ginkgo/types/synchronization.go
new file mode 100644
index 0000000000000000000000000000000000000000..fdd6ed5bdf85ba45926dd728b3ccd6e8ab36414c
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/types/synchronization.go
@@ -0,0 +1,30 @@
+package types
+
+import (
+	"encoding/json"
+)
+
+type RemoteBeforeSuiteState int
+
+const (
+	RemoteBeforeSuiteStateInvalid RemoteBeforeSuiteState = iota
+
+	RemoteBeforeSuiteStatePending
+	RemoteBeforeSuiteStatePassed
+	RemoteBeforeSuiteStateFailed
+	RemoteBeforeSuiteStateDisappeared
+)
+
+type RemoteBeforeSuiteData struct {
+	Data  []byte
+	State RemoteBeforeSuiteState
+}
+
+func (r RemoteBeforeSuiteData) ToJSON() []byte {
+	data, _ := json.Marshal(r)
+	return data
+}
+
+type RemoteAfterSuiteData struct {
+	CanRun bool
+}
diff --git a/vendor/github.com/onsi/ginkgo/types/types.go b/vendor/github.com/onsi/ginkgo/types/types.go
new file mode 100644
index 0000000000000000000000000000000000000000..889612e0a7204635cd31e0e11652a80e21750712
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/types/types.go
@@ -0,0 +1,143 @@
+package types
+
+import "time"
+
+const GINKGO_FOCUS_EXIT_CODE = 197
+
+type SuiteSummary struct {
+	SuiteDescription string
+	SuiteSucceeded   bool
+	SuiteID          string
+
+	NumberOfSpecsBeforeParallelization int
+	NumberOfTotalSpecs                 int
+	NumberOfSpecsThatWillBeRun         int
+	NumberOfPendingSpecs               int
+	NumberOfSkippedSpecs               int
+	NumberOfPassedSpecs                int
+	NumberOfFailedSpecs                int
+	RunTime                            time.Duration
+}
+
+type SpecSummary struct {
+	ComponentTexts         []string
+	ComponentCodeLocations []CodeLocation
+
+	State           SpecState
+	RunTime         time.Duration
+	Failure         SpecFailure
+	IsMeasurement   bool
+	NumberOfSamples int
+	Measurements    map[string]*SpecMeasurement
+
+	CapturedOutput string
+	SuiteID        string
+}
+
+func (s SpecSummary) HasFailureState() bool {
+	return s.State.IsFailure()
+}
+
+func (s SpecSummary) TimedOut() bool {
+	return s.State == SpecStateTimedOut
+}
+
+func (s SpecSummary) Panicked() bool {
+	return s.State == SpecStatePanicked
+}
+
+func (s SpecSummary) Failed() bool {
+	return s.State == SpecStateFailed
+}
+
+func (s SpecSummary) Passed() bool {
+	return s.State == SpecStatePassed
+}
+
+func (s SpecSummary) Skipped() bool {
+	return s.State == SpecStateSkipped
+}
+
+func (s SpecSummary) Pending() bool {
+	return s.State == SpecStatePending
+}
+
+type SetupSummary struct {
+	ComponentType SpecComponentType
+	CodeLocation  CodeLocation
+
+	State   SpecState
+	RunTime time.Duration
+	Failure SpecFailure
+
+	CapturedOutput string
+	SuiteID        string
+}
+
+type SpecFailure struct {
+	Message        string
+	Location       CodeLocation
+	ForwardedPanic string
+
+	ComponentIndex        int
+	ComponentType         SpecComponentType
+	ComponentCodeLocation CodeLocation
+}
+
+type SpecMeasurement struct {
+	Name  string
+	Info  interface{}
+	Order int
+
+	Results []float64
+
+	Smallest     float64
+	Largest      float64
+	Average      float64
+	StdDeviation float64
+
+	SmallestLabel string
+	LargestLabel  string
+	AverageLabel  string
+	Units         string
+}
+
+type SpecState uint
+
+const (
+	SpecStateInvalid SpecState = iota
+
+	SpecStatePending
+	SpecStateSkipped
+	SpecStatePassed
+	SpecStateFailed
+	SpecStatePanicked
+	SpecStateTimedOut
+)
+
+func (state SpecState) IsFailure() bool {
+	return state == SpecStateTimedOut || state == SpecStatePanicked || state == SpecStateFailed
+}
+
+type SpecComponentType uint
+
+const (
+	SpecComponentTypeInvalid SpecComponentType = iota
+
+	SpecComponentTypeContainer
+	SpecComponentTypeBeforeSuite
+	SpecComponentTypeAfterSuite
+	SpecComponentTypeBeforeEach
+	SpecComponentTypeJustBeforeEach
+	SpecComponentTypeAfterEach
+	SpecComponentTypeIt
+	SpecComponentTypeMeasure
+)
+
+type FlagType uint
+
+const (
+	FlagTypeNone FlagType = iota
+	FlagTypeFocused
+	FlagTypePending
+)
diff --git a/vendor/github.com/onsi/gomega/.gitignore b/vendor/github.com/onsi/gomega/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..720c13cba8439e8aa00eebd6288f21d4dca9ffc0
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/.gitignore
@@ -0,0 +1,5 @@
+.DS_Store
+*.test
+.
+.idea
+gomega.iml
diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml
new file mode 100644
index 0000000000000000000000000000000000000000..73d5863e2130b4b7da2695c1634330f0ce6075a2
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+go:
+  - 1.4
+  - 1.5
+  - tip
+
+install:
+  - go get -v ./...
+  - go get github.com/onsi/ginkgo
+  - go install github.com/onsi/ginkgo/ginkgo
+
+script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --failOnPending --randomizeSuites --race
diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md
new file mode 100644
index 0000000000000000000000000000000000000000..0c5ede5d8280daeb0f7dfd62597b4c33199ecdb1
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/CHANGELOG.md
@@ -0,0 +1,70 @@
+## HEAD
+
+Improvements:
+
+- Added `BeSent` which attempts to send a value down a channel and fails if the attempt blocks.  Can be paired with `Eventually` to safely send a value down a channel with a timeout.
+- `Ω`, `Expect`, `Eventually`, and `Consistently` now immediately `panic` if there is no registered fail handler.  This is always a mistake that can hide failing tests.
+- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShoudlNot(Receive()) always passes with a closed channel.
+- Added `HavePrefix` and `HaveSuffix` matchers.
+- `ghttp` can now handle concurrent requests.
+- Added `Succeed` which allows one to write `Ω(MyFunction()).Should(Succeed())`.
+- Improved `ghttp`'s behavior around failing assertions and panics:
+    - If a registered handler makes a failing assertion `ghttp` will return `500`.
+    - If a registered handler panics, `ghttp` will return `500` *and* fail the test.  This is new behavior that may cause existing code to break.  This code is almost certainly incorrect and creating a false positive.
+- `ghttp` servers can take an `io.Writer`.  `ghttp` will write a line to the writer when each request arrives.
+- Added `WithTransform` matcher to allow munging input data before feeding into the relevant matcher
+- Added boolean `And`, `Or`, and `Not` matchers to allow creating composite matchers
+
+Bug Fixes:
+- gexec: `session.Wait` now uses `EventuallyWithOffset` to get the right line number in the failure.
+- `ContainElement` no longer bails if a passed-in matcher errors.
+
+## 1.0 (8/2/2014)
+
+No changes. Dropping "beta" from the version number.
+
+## 1.0.0-beta (7/8/2014)
+Breaking Changes:
+
+- Changed OmegaMatcher interface.  Instead of having `Match` return failure messages, two new methods `FailureMessage` and `NegatedFailureMessage` are called instead.
+- Moved and renamed OmegaFailHandler to types.GomegaFailHandler and OmegaMatcher to types.GomegaMatcher.  Any references to OmegaMatcher in any custom matchers will need to be changed to point to types.GomegaMatcher
+
+New Test-Support Features:
+
+- `ghttp`: supports testing http clients
+    - Provides a flexible fake http server
+    - Provides a collection of chainable http handlers that perform assertions.
+- `gbytes`: supports making ordered assertions against streams of data
+    - Provides a `gbytes.Buffer`
+    - Provides a `Say` matcher to perform ordered assertions against output data
+- `gexec`: supports testing external processes
+    - Provides support for building Go binaries
+    - Wraps and starts `exec.Cmd` commands
+    - Makes it easy to assert against stdout and stderr
+    - Makes it easy to send signals and wait for processes to exit
+    - Provides an `Exit` matcher to assert against exit code.
+
+DSL Changes:
+
+- `Eventually` and `Consistently` can accept `time.Duration` interval and polling inputs.
+- The default timeouts for `Eventually` and `Consistently` are now configurable.
+
+New Matchers:
+
+- `ConsistOf`: order-independent assertion against the elements of an array/slice or keys of a map.
+- `BeTemporally`: like `BeNumerically` but for `time.Time`
+- `HaveKeyWithValue`: asserts a map has a given key with the given value.
+
+Updated Matchers:
+
+- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an objet that satisfies the passed-in matcher.
+- Matchers that implement `MatchMayChangeInTheFuture(actual interface{}) bool` can inform `Eventually` and/or `Consistently` when a match has no chance of changing status in the future.  For example, `Receive` returns `false` when a channel is closed.
+
+Misc:
+
+- Start using semantic versioning
+- Start maintaining changelog
+
+Major refactor:
+
+- Pull out Gomega's internal to `internal`
diff --git a/vendor/github.com/onsi/gomega/LICENSE b/vendor/github.com/onsi/gomega/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..9415ee72c17f87d26fc640a1b198ae12c675704c
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2013-2014 Onsi Fakhouri
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/onsi/gomega/README.md b/vendor/github.com/onsi/gomega/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..c825591922562b0e4fbf98f50e1032c2e81ce757
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/README.md
@@ -0,0 +1,17 @@
+![Gomega: Ginkgo's Preferred Matcher Library](http://onsi.github.io/gomega/images/gomega.png)
+
+[![Build Status](https://travis-ci.org/onsi/gomega.png)](https://travis-ci.org/onsi/gomega)
+
+Jump straight to the [docs](http://onsi.github.io/gomega/) to learn about Gomega, including a list of [all available matchers](http://onsi.github.io/gomega/#provided-matchers).
+
+To discuss Gomega and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega).
+
+## [Ginkgo](http://github.com/onsi/ginkgo): a BDD Testing Framework for Golang
+
+Learn more about Ginkgo [here](http://onsi.github.io/ginkgo/)
+
+## License
+
+Gomega is MIT-Licensed
+
+The `ConsistOf` matcher uses [goraph](https://github.com/amitkgupta/goraph) which is embedded in the source to simplify distribution.  goraph has an MIT license.
diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go
new file mode 100644
index 0000000000000000000000000000000000000000..ec9c91a42f67f9f95594d537339a29ebd39a4776
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/format/format.go
@@ -0,0 +1,276 @@
+/*
+Gomega's format package pretty-prints objects.  It explores input objects recursively and generates formatted, indented output with type information.
+*/
+package format
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+)
+
+// Use MaxDepth to set the maximum recursion depth when printing deeply nested objects
+var MaxDepth = uint(10)
+
+/*
+By default, all objects (even those that implement fmt.Stringer and fmt.GoStringer) are recursively inspected to generate output.
+
+Set UseStringerRepresentation = true to use GoString (for fmt.GoStringers) or String (for fmt.Stringer) instead.
+
+Note that GoString and String don't always have all the information you need to understand why a test failed!
+*/
+var UseStringerRepresentation = false
+
+//The default indentation string emitted by the format package
+var Indent = "    "
+
+var longFormThreshold = 20
+
+/*
+Generates a formatted matcher success/failure message of the form:
+
+	Expected
+		<pretty printed actual>
+	<message>
+		<pretty printed expected>
+
+If expected is omited, then the message looks like:
+
+	Expected
+		<pretty printed actual>
+	<message>
+*/
+func Message(actual interface{}, message string, expected ...interface{}) string {
+	if len(expected) == 0 {
+		return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message)
+	} else {
+		return fmt.Sprintf("Expected\n%s\n%s\n%s", Object(actual, 1), message, Object(expected[0], 1))
+	}
+}
+
+/*
+Pretty prints the passed in object at the passed in indentation level.
+
+Object recurses into deeply nested objects emitting pretty-printed representations of their components.
+
+Modify format.MaxDepth to control how deep the recursion is allowed to go
+Set format.UseStringerRepresentation to true to return object.GoString() or object.String() when available instead of
+recursing into the object.
+*/
+func Object(object interface{}, indentation uint) string {
+	indent := strings.Repeat(Indent, int(indentation))
+	value := reflect.ValueOf(object)
+	return fmt.Sprintf("%s<%s>: %s", indent, formatType(object), formatValue(value, indentation))
+}
+
+/*
+IndentString takes a string and indents each line by the specified amount.
+*/
+func IndentString(s string, indentation uint) string {
+	components := strings.Split(s, "\n")
+	result := ""
+	indent := strings.Repeat(Indent, int(indentation))
+	for i, component := range components {
+		result += indent + component
+		if i < len(components)-1 {
+			result += "\n"
+		}
+	}
+
+	return result
+}
+
+func formatType(object interface{}) string {
+	t := reflect.TypeOf(object)
+	if t == nil {
+		return "nil"
+	}
+	switch t.Kind() {
+	case reflect.Chan:
+		v := reflect.ValueOf(object)
+		return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap())
+	case reflect.Ptr:
+		return fmt.Sprintf("%T | %p", object, object)
+	case reflect.Slice:
+		v := reflect.ValueOf(object)
+		return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap())
+	case reflect.Map:
+		v := reflect.ValueOf(object)
+		return fmt.Sprintf("%T | len:%d", object, v.Len())
+	default:
+		return fmt.Sprintf("%T", object)
+	}
+}
+
+func formatValue(value reflect.Value, indentation uint) string {
+	if indentation > MaxDepth {
+		return "..."
+	}
+
+	if isNilValue(value) {
+		return "nil"
+	}
+
+	if UseStringerRepresentation {
+		if value.CanInterface() {
+			obj := value.Interface()
+			switch x := obj.(type) {
+			case fmt.GoStringer:
+				return x.GoString()
+			case fmt.Stringer:
+				return x.String()
+			}
+		}
+	}
+
+	switch value.Kind() {
+	case reflect.Bool:
+		return fmt.Sprintf("%v", value.Bool())
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return fmt.Sprintf("%v", value.Int())
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		return fmt.Sprintf("%v", value.Uint())
+	case reflect.Uintptr:
+		return fmt.Sprintf("0x%x", value.Uint())
+	case reflect.Float32, reflect.Float64:
+		return fmt.Sprintf("%v", value.Float())
+	case reflect.Complex64, reflect.Complex128:
+		return fmt.Sprintf("%v", value.Complex())
+	case reflect.Chan:
+		return fmt.Sprintf("0x%x", value.Pointer())
+	case reflect.Func:
+		return fmt.Sprintf("0x%x", value.Pointer())
+	case reflect.Ptr:
+		return formatValue(value.Elem(), indentation)
+	case reflect.Slice:
+		if value.Type().Elem().Kind() == reflect.Uint8 {
+			return formatString(value.Bytes(), indentation)
+		}
+		return formatSlice(value, indentation)
+	case reflect.String:
+		return formatString(value.String(), indentation)
+	case reflect.Array:
+		return formatSlice(value, indentation)
+	case reflect.Map:
+		return formatMap(value, indentation)
+	case reflect.Struct:
+		return formatStruct(value, indentation)
+	case reflect.Interface:
+		return formatValue(value.Elem(), indentation)
+	default:
+		if value.CanInterface() {
+			return fmt.Sprintf("%#v", value.Interface())
+		} else {
+			return fmt.Sprintf("%#v", value)
+		}
+	}
+}
+
+func formatString(object interface{}, indentation uint) string {
+	if indentation == 1 {
+		s := fmt.Sprintf("%s", object)
+		components := strings.Split(s, "\n")
+		result := ""
+		for i, component := range components {
+			if i == 0 {
+				result += component
+			} else {
+				result += Indent + component
+			}
+			if i < len(components)-1 {
+				result += "\n"
+			}
+		}
+
+		return fmt.Sprintf("%s", result)
+	} else {
+		return fmt.Sprintf("%q", object)
+	}
+}
+
+func formatSlice(v reflect.Value, indentation uint) string {
+	l := v.Len()
+	result := make([]string, l)
+	longest := 0
+	for i := 0; i < l; i++ {
+		result[i] = formatValue(v.Index(i), indentation+1)
+		if len(result[i]) > longest {
+			longest = len(result[i])
+		}
+	}
+
+	if longest > longFormThreshold {
+		indenter := strings.Repeat(Indent, int(indentation))
+		return fmt.Sprintf("[\n%s%s,\n%s]", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
+	} else {
+		return fmt.Sprintf("[%s]", strings.Join(result, ", "))
+	}
+}
+
+func formatMap(v reflect.Value, indentation uint) string {
+	l := v.Len()
+	result := make([]string, l)
+
+	longest := 0
+	for i, key := range v.MapKeys() {
+		value := v.MapIndex(key)
+		result[i] = fmt.Sprintf("%s: %s", formatValue(key, 0), formatValue(value, indentation+1))
+		if len(result[i]) > longest {
+			longest = len(result[i])
+		}
+	}
+
+	if longest > longFormThreshold {
+		indenter := strings.Repeat(Indent, int(indentation))
+		return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
+	} else {
+		return fmt.Sprintf("{%s}", strings.Join(result, ", "))
+	}
+}
+
+func formatStruct(v reflect.Value, indentation uint) string {
+	t := v.Type()
+
+	l := v.NumField()
+	result := []string{}
+	longest := 0
+	for i := 0; i < l; i++ {
+		structField := t.Field(i)
+		fieldEntry := v.Field(i)
+		representation := fmt.Sprintf("%s: %s", structField.Name, formatValue(fieldEntry, indentation+1))
+		result = append(result, representation)
+		if len(representation) > longest {
+			longest = len(representation)
+		}
+	}
+	if longest > longFormThreshold {
+		indenter := strings.Repeat(Indent, int(indentation))
+		return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
+	} else {
+		return fmt.Sprintf("{%s}", strings.Join(result, ", "))
+	}
+}
+
+func isNilValue(a reflect.Value) bool {
+	switch a.Kind() {
+	case reflect.Invalid:
+		return true
+	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return a.IsNil()
+	}
+
+	return false
+}
+
+func isNil(a interface{}) bool {
+	if a == nil {
+		return true
+	}
+
+	switch reflect.TypeOf(a).Kind() {
+	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return reflect.ValueOf(a).IsNil()
+	}
+
+	return false
+}
diff --git a/vendor/github.com/onsi/gomega/gbytes/buffer.go b/vendor/github.com/onsi/gomega/gbytes/buffer.go
new file mode 100644
index 0000000000000000000000000000000000000000..8775b8611a0de0f0c8c7f3e08d9164e17eeefff8
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/gbytes/buffer.go
@@ -0,0 +1,229 @@
+/*
+Package gbytes provides a buffer that supports incrementally detecting input.
+
+You use gbytes.Buffer with the gbytes.Say matcher.  When Say finds a match, it fastforwards the buffer's read cursor to the end of that match.
+
+Subsequent matches against the buffer will only operate against data that appears *after* the read cursor.
+
+The read cursor is an opaque implementation detail that you cannot access.  You should use the Say matcher to sift through the buffer.  You can always
+access the entire buffer's contents with Contents().
+
+*/
+package gbytes
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"regexp"
+	"sync"
+	"time"
+)
+
+/*
+gbytes.Buffer implements an io.Writer and can be used with the gbytes.Say matcher.
+
+You should only use a gbytes.Buffer in test code.  It stores all writes in an in-memory buffer - behavior that is inappropriate for production code!
+*/
+type Buffer struct {
+	contents     []byte
+	readCursor   uint64
+	lock         *sync.Mutex
+	detectCloser chan interface{}
+	closed       bool
+}
+
+/*
+NewBuffer returns a new gbytes.Buffer
+*/
+func NewBuffer() *Buffer {
+	return &Buffer{
+		lock: &sync.Mutex{},
+	}
+}
+
+/*
+BufferWithBytes returns a new gbytes.Buffer seeded with the passed in bytes
+*/
+func BufferWithBytes(bytes []byte) *Buffer {
+	return &Buffer{
+		lock:     &sync.Mutex{},
+		contents: bytes,
+	}
+}
+
+/*
+Write implements the io.Writer interface
+*/
+func (b *Buffer) Write(p []byte) (n int, err error) {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	if b.closed {
+		return 0, errors.New("attempt to write to closed buffer")
+	}
+
+	b.contents = append(b.contents, p...)
+	return len(p), nil
+}
+
+/*
+Read implements the io.Reader interface. It advances the
+cursor as it reads.
+
+Returns an error if called after Close.
+*/
+func (b *Buffer) Read(d []byte) (int, error) {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	if b.closed {
+		return 0, errors.New("attempt to read from closed buffer")
+	}
+
+	if uint64(len(b.contents)) <= b.readCursor {
+		return 0, io.EOF
+	}
+
+	n := copy(d, b.contents[b.readCursor:])
+	b.readCursor += uint64(n)
+
+	return n, nil
+}
+
+/*
+Close signifies that the buffer will no longer be written to
+*/
+func (b *Buffer) Close() error {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	b.closed = true
+
+	return nil
+}
+
+/*
+Closed returns true if the buffer has been closed
+*/
+func (b *Buffer) Closed() bool {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	return b.closed
+}
+
+/*
+Contents returns all data ever written to the buffer.
+*/
+func (b *Buffer) Contents() []byte {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	contents := make([]byte, len(b.contents))
+	copy(contents, b.contents)
+	return contents
+}
+
+/*
+Detect takes a regular expression and returns a channel.
+
+The channel will receive true the first time data matching the regular expression is written to the buffer.
+The channel is subsequently closed and the buffer's read-cursor is fast-forwarded to just after the matching region.
+
+You typically don't need to use Detect and should use the ghttp.Say matcher instead.  Detect is useful, however, in cases where your code must
+be branch and handle different outputs written to the buffer.
+
+For example, consider a buffer hooked up to the stdout of a client library.  You may (or may not, depending on state outside of your control) need to authenticate the client library.
+
+You could do something like:
+
+select {
+case <-buffer.Detect("You are not logged in"):
+	//log in
+case <-buffer.Detect("Success"):
+	//carry on
+case <-time.After(time.Second):
+	//welp
+}
+buffer.CancelDetects()
+
+You should always call CancelDetects after using Detect.  This will close any channels that have not detected and clean up the goroutines that were spawned to support them.
+
+Finally, you can pass detect a format string followed by variadic arguments.  This will construct the regexp using fmt.Sprintf.
+*/
+func (b *Buffer) Detect(desired string, args ...interface{}) chan bool {
+	formattedRegexp := desired
+	if len(args) > 0 {
+		formattedRegexp = fmt.Sprintf(desired, args...)
+	}
+	re := regexp.MustCompile(formattedRegexp)
+
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	if b.detectCloser == nil {
+		b.detectCloser = make(chan interface{})
+	}
+
+	closer := b.detectCloser
+	response := make(chan bool)
+	go func() {
+		ticker := time.NewTicker(10 * time.Millisecond)
+		defer ticker.Stop()
+		defer close(response)
+		for {
+			select {
+			case <-ticker.C:
+				b.lock.Lock()
+				data, cursor := b.contents[b.readCursor:], b.readCursor
+				loc := re.FindIndex(data)
+				b.lock.Unlock()
+
+				if loc != nil {
+					response <- true
+					b.lock.Lock()
+					newCursorPosition := cursor + uint64(loc[1])
+					if newCursorPosition >= b.readCursor {
+						b.readCursor = newCursorPosition
+					}
+					b.lock.Unlock()
+					return
+				}
+			case <-closer:
+				return
+			}
+		}
+	}()
+
+	return response
+}
+
+/*
+CancelDetects cancels any pending detects and cleans up their goroutines.  You should always call this when you're done with a set of Detect channels.
+*/
+func (b *Buffer) CancelDetects() {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	close(b.detectCloser)
+	b.detectCloser = nil
+}
+
+func (b *Buffer) didSay(re *regexp.Regexp) (bool, []byte) {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	unreadBytes := b.contents[b.readCursor:]
+	copyOfUnreadBytes := make([]byte, len(unreadBytes))
+	copy(copyOfUnreadBytes, unreadBytes)
+
+	loc := re.FindIndex(unreadBytes)
+
+	if loc != nil {
+		b.readCursor += uint64(loc[1])
+		return true, copyOfUnreadBytes
+	} else {
+		return false, copyOfUnreadBytes
+	}
+}
diff --git a/vendor/github.com/onsi/gomega/gbytes/say_matcher.go b/vendor/github.com/onsi/gomega/gbytes/say_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..ce5ebcbfa59f9fd91976ced955d41df0fdd153f4
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/gbytes/say_matcher.go
@@ -0,0 +1,105 @@
+package gbytes
+
+import (
+	"fmt"
+	"regexp"
+
+	"github.com/onsi/gomega/format"
+)
+
+//Objects satisfying the BufferProvider can be used with the Say matcher.
+type BufferProvider interface {
+	Buffer() *Buffer
+}
+
+/*
+Say is a Gomega matcher that operates on gbytes.Buffers:
+
+	Ω(buffer).Should(Say("something"))
+
+will succeed if the unread portion of the buffer matches the regular expression "something".
+
+When Say succeeds, it fast forwards the gbytes.Buffer's read cursor to just after the succesful match.
+Thus, subsequent calls to Say will only match against the unread portion of the buffer
+
+Say pairs very well with Eventually.  To asser that a buffer eventually receives data matching "[123]-star" within 3 seconds you can:
+
+	Eventually(buffer, 3).Should(Say("[123]-star"))
+
+Ditto with consistently.  To assert that a buffer does not receive data matching "never-see-this" for 1 second you can:
+
+	Consistently(buffer, 1).ShouldNot(Say("never-see-this"))
+
+In addition to bytes.Buffers, Say can operate on objects that implement the gbytes.BufferProvider interface.
+In such cases, Say simply operates on the *gbytes.Buffer returned by Buffer()
+
+If the buffer is closed, the Say matcher will tell Eventually to abort.
+*/
+func Say(expected string, args ...interface{}) *sayMatcher {
+	formattedRegexp := expected
+	if len(args) > 0 {
+		formattedRegexp = fmt.Sprintf(expected, args...)
+	}
+	return &sayMatcher{
+		re: regexp.MustCompile(formattedRegexp),
+	}
+}
+
+type sayMatcher struct {
+	re              *regexp.Regexp
+	receivedSayings []byte
+}
+
+func (m *sayMatcher) buffer(actual interface{}) (*Buffer, bool) {
+	var buffer *Buffer
+
+	switch x := actual.(type) {
+	case *Buffer:
+		buffer = x
+	case BufferProvider:
+		buffer = x.Buffer()
+	default:
+		return nil, false
+	}
+
+	return buffer, true
+}
+
+func (m *sayMatcher) Match(actual interface{}) (success bool, err error) {
+	buffer, ok := m.buffer(actual)
+	if !ok {
+		return false, fmt.Errorf("Say must be passed a *gbytes.Buffer or BufferProvider.  Got:\n%s", format.Object(actual, 1))
+	}
+
+	didSay, sayings := buffer.didSay(m.re)
+	m.receivedSayings = sayings
+
+	return didSay, nil
+}
+
+func (m *sayMatcher) FailureMessage(actual interface{}) (message string) {
+	return fmt.Sprintf(
+		"Got stuck at:\n%s\nWaiting for:\n%s",
+		format.IndentString(string(m.receivedSayings), 1),
+		format.IndentString(m.re.String(), 1),
+	)
+}
+
+func (m *sayMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return fmt.Sprintf(
+		"Saw:\n%s\nWhich matches the unexpected:\n%s",
+		format.IndentString(string(m.receivedSayings), 1),
+		format.IndentString(m.re.String(), 1),
+	)
+}
+
+func (m *sayMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+	switch x := actual.(type) {
+	case *Buffer:
+		return !x.Closed()
+	case BufferProvider:
+		return !x.Buffer().Closed()
+	default:
+		return true
+	}
+}
diff --git a/vendor/github.com/onsi/gomega/gexec/build.go b/vendor/github.com/onsi/gomega/gexec/build.go
new file mode 100644
index 0000000000000000000000000000000000000000..3e9bf9f9478fecf80ddd409bf89e2f7b80e535c3
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/gexec/build.go
@@ -0,0 +1,78 @@
+package gexec
+
+import (
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"path"
+	"path/filepath"
+	"runtime"
+)
+
+var tmpDir string
+
+/*
+Build uses go build to compile the package at packagePath.  The resulting binary is saved off in a temporary directory.
+A path pointing to this binary is returned.
+
+Build uses the $GOPATH set in your environment.  It passes the variadic args on to `go build`.
+*/
+func Build(packagePath string, args ...string) (compiledPath string, err error) {
+	return BuildIn(os.Getenv("GOPATH"), packagePath, args...)
+}
+
+/*
+BuildIn is identical to Build but allows you to specify a custom $GOPATH (the first argument).
+*/
+func BuildIn(gopath string, packagePath string, args ...string) (compiledPath string, err error) {
+	tmpDir, err := temporaryDirectory()
+	if err != nil {
+		return "", err
+	}
+
+	if len(gopath) == 0 {
+		return "", errors.New("$GOPATH not provided when building " + packagePath)
+	}
+
+	executable := filepath.Join(tmpDir, path.Base(packagePath))
+	if runtime.GOOS == "windows" {
+		executable = executable + ".exe"
+	}
+
+	cmdArgs := append([]string{"build"}, args...)
+	cmdArgs = append(cmdArgs, "-o", executable, packagePath)
+
+	build := exec.Command("go", cmdArgs...)
+	build.Env = append([]string{"GOPATH=" + gopath}, os.Environ()...)
+
+	output, err := build.CombinedOutput()
+	if err != nil {
+		return "", fmt.Errorf("Failed to build %s:\n\nError:\n%s\n\nOutput:\n%s", packagePath, err, string(output))
+	}
+
+	return executable, nil
+}
+
+/*
+You should call CleanupBuildArtifacts before your test ends to clean up any temporary artifacts generated by
+gexec. In Ginkgo this is typically done in an AfterSuite callback.
+*/
+func CleanupBuildArtifacts() {
+	if tmpDir != "" {
+		os.RemoveAll(tmpDir)
+	}
+}
+
+func temporaryDirectory() (string, error) {
+	var err error
+	if tmpDir == "" {
+		tmpDir, err = ioutil.TempDir("", "gexec_artifacts")
+		if err != nil {
+			return "", err
+		}
+	}
+
+	return ioutil.TempDir(tmpDir, "g")
+}
diff --git a/vendor/github.com/onsi/gomega/gexec/exit_matcher.go b/vendor/github.com/onsi/gomega/gexec/exit_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..e6f432942700595184c860a95ce2e78f3637c72a
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/gexec/exit_matcher.go
@@ -0,0 +1,88 @@
+package gexec
+
+import (
+	"fmt"
+
+	"github.com/onsi/gomega/format"
+)
+
+/*
+The Exit matcher operates on a session:
+
+	Ω(session).Should(Exit(<optional status code>))
+
+Exit passes if the session has already exited.
+
+If no status code is provided, then Exit will succeed if the session has exited regardless of exit code.
+Otherwise, Exit will only succeed if the process has exited with the provided status code.
+
+Note that the process must have already exited.  To wait for a process to exit, use Eventually:
+
+	Eventually(session, 3).Should(Exit(0))
+*/
+func Exit(optionalExitCode ...int) *exitMatcher {
+	exitCode := -1
+	if len(optionalExitCode) > 0 {
+		exitCode = optionalExitCode[0]
+	}
+
+	return &exitMatcher{
+		exitCode: exitCode,
+	}
+}
+
+type exitMatcher struct {
+	exitCode       int
+	didExit        bool
+	actualExitCode int
+}
+
+type Exiter interface {
+	ExitCode() int
+}
+
+func (m *exitMatcher) Match(actual interface{}) (success bool, err error) {
+	exiter, ok := actual.(Exiter)
+	if !ok {
+		return false, fmt.Errorf("Exit must be passed a gexec.Exiter (Missing method ExitCode() int) Got:\n%s", format.Object(actual, 1))
+	}
+
+	m.actualExitCode = exiter.ExitCode()
+
+	if m.actualExitCode == -1 {
+		return false, nil
+	}
+
+	if m.exitCode == -1 {
+		return true, nil
+	}
+	return m.exitCode == m.actualExitCode, nil
+}
+
+func (m *exitMatcher) FailureMessage(actual interface{}) (message string) {
+	if m.actualExitCode == -1 {
+		return "Expected process to exit.  It did not."
+	} else {
+		return format.Message(m.actualExitCode, "to match exit code:", m.exitCode)
+	}
+}
+
+func (m *exitMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	if m.actualExitCode == -1 {
+		return "you really shouldn't be able to see this!"
+	} else {
+		if m.exitCode == -1 {
+			return "Expected process not to exit.  It did."
+		} else {
+			return format.Message(m.actualExitCode, "not to match exit code:", m.exitCode)
+		}
+	}
+}
+
+func (m *exitMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+	session, ok := actual.(*Session)
+	if ok {
+		return session.ExitCode() == -1
+	}
+	return true
+}
diff --git a/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go b/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go
new file mode 100644
index 0000000000000000000000000000000000000000..05e695abc8df0633d8cd9ed3794bf8157cc75f2b
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go
@@ -0,0 +1,53 @@
+package gexec
+
+import (
+	"io"
+	"sync"
+)
+
+/*
+PrefixedWriter wraps an io.Writer, emiting the passed in prefix at the beginning of each new line.
+This can be useful when running multiple gexec.Sessions concurrently - you can prefix the log output of each
+session by passing in a PrefixedWriter:
+
+gexec.Start(cmd, NewPrefixedWriter("[my-cmd] ", GinkgoWriter), NewPrefixedWriter("[my-cmd] ", GinkgoWriter))
+*/
+type PrefixedWriter struct {
+	prefix        []byte
+	writer        io.Writer
+	lock          *sync.Mutex
+	atStartOfLine bool
+}
+
+func NewPrefixedWriter(prefix string, writer io.Writer) *PrefixedWriter {
+	return &PrefixedWriter{
+		prefix:        []byte(prefix),
+		writer:        writer,
+		lock:          &sync.Mutex{},
+		atStartOfLine: true,
+	}
+}
+
+func (w *PrefixedWriter) Write(b []byte) (int, error) {
+	w.lock.Lock()
+	defer w.lock.Unlock()
+
+	toWrite := []byte{}
+
+	for _, c := range b {
+		if w.atStartOfLine {
+			toWrite = append(toWrite, w.prefix...)
+		}
+
+		toWrite = append(toWrite, c)
+
+		w.atStartOfLine = c == '\n'
+	}
+
+	_, err := w.writer.Write(toWrite)
+	if err != nil {
+		return 0, err
+	}
+
+	return len(b), nil
+}
diff --git a/vendor/github.com/onsi/gomega/gexec/session.go b/vendor/github.com/onsi/gomega/gexec/session.go
new file mode 100644
index 0000000000000000000000000000000000000000..46e712235d899037479c29cb4eac9cf91e680acb
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/gexec/session.go
@@ -0,0 +1,214 @@
+/*
+Package gexec provides support for testing external processes.
+*/
+package gexec
+
+import (
+	"io"
+	"os"
+	"os/exec"
+	"reflect"
+	"sync"
+	"syscall"
+
+	. "github.com/onsi/gomega"
+	"github.com/onsi/gomega/gbytes"
+)
+
+const INVALID_EXIT_CODE = 254
+
+type Session struct {
+	//The wrapped command
+	Command *exec.Cmd
+
+	//A *gbytes.Buffer connected to the command's stdout
+	Out *gbytes.Buffer
+
+	//A *gbytes.Buffer connected to the command's stderr
+	Err *gbytes.Buffer
+
+	//A channel that will close when the command exits
+	Exited <-chan struct{}
+
+	lock     *sync.Mutex
+	exitCode int
+}
+
+/*
+Start starts the passed-in *exec.Cmd command.  It wraps the command in a *gexec.Session.
+
+The session pipes the command's stdout and stderr to two *gbytes.Buffers available as properties on the session: session.Out and session.Err.
+These buffers can be used with the gbytes.Say matcher to match against unread output:
+
+	Ω(session.Out).Should(gbytes.Say("foo-out"))
+	Ω(session.Err).Should(gbytes.Say("foo-err"))
+
+In addition, Session satisfies the gbytes.BufferProvider interface and provides the stdout *gbytes.Buffer.  This allows you to replace the first line, above, with:
+
+	Ω(session).Should(gbytes.Say("foo-out"))
+
+When outWriter and/or errWriter are non-nil, the session will pipe stdout and/or stderr output both into the session *gybtes.Buffers and to the passed-in outWriter/errWriter.
+This is useful for capturing the process's output or logging it to screen.  In particular, when using Ginkgo it can be convenient to direct output to the GinkgoWriter:
+
+	session, err := Start(command, GinkgoWriter, GinkgoWriter)
+
+This will log output when running tests in verbose mode, but - otherwise - will only log output when a test fails.
+
+The session wrapper is responsible for waiting on the *exec.Cmd command.  You *should not* call command.Wait() yourself.
+Instead, to assert that the command has exited you can use the gexec.Exit matcher:
+
+	Ω(session).Should(gexec.Exit())
+
+When the session exits it closes the stdout and stderr gbytes buffers.  This will short circuit any
+Eventuallys waiting fo the buffers to Say something.
+*/
+func Start(command *exec.Cmd, outWriter io.Writer, errWriter io.Writer) (*Session, error) {
+	exited := make(chan struct{})
+
+	session := &Session{
+		Command:  command,
+		Out:      gbytes.NewBuffer(),
+		Err:      gbytes.NewBuffer(),
+		Exited:   exited,
+		lock:     &sync.Mutex{},
+		exitCode: -1,
+	}
+
+	var commandOut, commandErr io.Writer
+
+	commandOut, commandErr = session.Out, session.Err
+
+	if outWriter != nil && !reflect.ValueOf(outWriter).IsNil() {
+		commandOut = io.MultiWriter(commandOut, outWriter)
+	}
+
+	if errWriter != nil && !reflect.ValueOf(errWriter).IsNil() {
+		commandErr = io.MultiWriter(commandErr, errWriter)
+	}
+
+	command.Stdout = commandOut
+	command.Stderr = commandErr
+
+	err := command.Start()
+	if err == nil {
+		go session.monitorForExit(exited)
+	}
+
+	return session, err
+}
+
+/*
+Buffer implements the gbytes.BufferProvider interface and returns s.Out
+This allows you to make gbytes.Say matcher assertions against stdout without having to reference .Out:
+
+	Eventually(session).Should(gbytes.Say("foo"))
+*/
+func (s *Session) Buffer() *gbytes.Buffer {
+	return s.Out
+}
+
+/*
+ExitCode returns the wrapped command's exit code.  If the command hasn't exited yet, ExitCode returns -1.
+
+To assert that the command has exited it is more convenient to use the Exit matcher:
+
+	Eventually(s).Should(gexec.Exit())
+
+When the process exits because it has received a particular signal, the exit code will be 128+signal-value
+(See http://www.tldp.org/LDP/abs/html/exitcodes.html and http://man7.org/linux/man-pages/man7/signal.7.html)
+
+*/
+func (s *Session) ExitCode() int {
+	s.lock.Lock()
+	defer s.lock.Unlock()
+	return s.exitCode
+}
+
+/*
+Wait waits until the wrapped command exits.  It can be passed an optional timeout.
+If the command does not exit within the timeout, Wait will trigger a test failure.
+
+Wait returns the session, making it possible to chain:
+
+	session.Wait().Out.Contents()
+
+will wait for the command to exit then return the entirety of Out's contents.
+
+Wait uses eventually under the hood and accepts the same timeout/polling intervals that eventually does.
+*/
+func (s *Session) Wait(timeout ...interface{}) *Session {
+	EventuallyWithOffset(1, s, timeout...).Should(Exit())
+	return s
+}
+
+/*
+Kill sends the running command a SIGKILL signal.  It does not wait for the process to exit.
+
+If the command has already exited, Kill returns silently.
+
+The session is returned to enable chaining.
+*/
+func (s *Session) Kill() *Session {
+	if s.ExitCode() != -1 {
+		return s
+	}
+	s.Command.Process.Kill()
+	return s
+}
+
+/*
+Interrupt sends the running command a SIGINT signal.  It does not wait for the process to exit.
+
+If the command has already exited, Interrupt returns silently.
+
+The session is returned to enable chaining.
+*/
+func (s *Session) Interrupt() *Session {
+	return s.Signal(syscall.SIGINT)
+}
+
+/*
+Terminate sends the running command a SIGTERM signal.  It does not wait for the process to exit.
+
+If the command has already exited, Terminate returns silently.
+
+The session is returned to enable chaining.
+*/
+func (s *Session) Terminate() *Session {
+	return s.Signal(syscall.SIGTERM)
+}
+
+/*
+Terminate sends the running command the passed in signal.  It does not wait for the process to exit.
+
+If the command has already exited, Signal returns silently.
+
+The session is returned to enable chaining.
+*/
+func (s *Session) Signal(signal os.Signal) *Session {
+	if s.ExitCode() != -1 {
+		return s
+	}
+	s.Command.Process.Signal(signal)
+	return s
+}
+
+func (s *Session) monitorForExit(exited chan<- struct{}) {
+	err := s.Command.Wait()
+	s.lock.Lock()
+	s.Out.Close()
+	s.Err.Close()
+	status := s.Command.ProcessState.Sys().(syscall.WaitStatus)
+	if status.Signaled() {
+		s.exitCode = 128 + int(status.Signal())
+	} else {
+		exitStatus := status.ExitStatus()
+		if exitStatus == -1 && err != nil {
+			s.exitCode = INVALID_EXIT_CODE
+		}
+		s.exitCode = exitStatus
+	}
+	s.lock.Unlock()
+
+	close(exited)
+}
diff --git a/vendor/github.com/onsi/gomega/ghttp/handlers.go b/vendor/github.com/onsi/gomega/ghttp/handlers.go
new file mode 100644
index 0000000000000000000000000000000000000000..63ff6919ad341470aa78be3c7503940f5d2ecbf5
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/ghttp/handlers.go
@@ -0,0 +1,313 @@
+package ghttp
+
+import (
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"reflect"
+
+	"github.com/golang/protobuf/proto"
+	. "github.com/onsi/gomega"
+	"github.com/onsi/gomega/types"
+)
+
+//CombineHandler takes variadic list of handlers and produces one handler
+//that calls each handler in order.
+func CombineHandlers(handlers ...http.HandlerFunc) http.HandlerFunc {
+	return func(w http.ResponseWriter, req *http.Request) {
+		for _, handler := range handlers {
+			handler(w, req)
+		}
+	}
+}
+
+//VerifyRequest returns a handler that verifies that a request uses the specified method to connect to the specified path
+//You may also pass in an optional rawQuery string which is tested against the request's `req.URL.RawQuery`
+//
+//For path, you may pass in a string, in which case strict equality will be applied
+//Alternatively you can pass in a matcher (ContainSubstring("/foo") and MatchRegexp("/foo/[a-f0-9]+") for example)
+func VerifyRequest(method string, path interface{}, rawQuery ...string) http.HandlerFunc {
+	return func(w http.ResponseWriter, req *http.Request) {
+		Ω(req.Method).Should(Equal(method), "Method mismatch")
+		switch p := path.(type) {
+		case types.GomegaMatcher:
+			Ω(req.URL.Path).Should(p, "Path mismatch")
+		default:
+			Ω(req.URL.Path).Should(Equal(path), "Path mismatch")
+		}
+		if len(rawQuery) > 0 {
+			values, err := url.ParseQuery(rawQuery[0])
+			Ω(err).ShouldNot(HaveOccurred(), "Expected RawQuery is malformed")
+
+			Ω(req.URL.Query()).Should(Equal(values), "RawQuery mismatch")
+		}
+	}
+}
+
+//VerifyContentType returns a handler that verifies that a request has a Content-Type header set to the
+//specified value
+func VerifyContentType(contentType string) http.HandlerFunc {
+	return func(w http.ResponseWriter, req *http.Request) {
+		Ω(req.Header.Get("Content-Type")).Should(Equal(contentType))
+	}
+}
+
+//VerifyBasicAuth returns a handler that verifies the request contains a BasicAuth Authorization header
+//matching the passed in username and password
+func VerifyBasicAuth(username string, password string) http.HandlerFunc {
+	return func(w http.ResponseWriter, req *http.Request) {
+		auth := req.Header.Get("Authorization")
+		Ω(auth).ShouldNot(Equal(""), "Authorization header must be specified")
+
+		decoded, err := base64.StdEncoding.DecodeString(auth[6:])
+		Ω(err).ShouldNot(HaveOccurred())
+
+		Ω(string(decoded)).Should(Equal(fmt.Sprintf("%s:%s", username, password)), "Authorization mismatch")
+	}
+}
+
+//VerifyHeader returns a handler that verifies the request contains the passed in headers.
+//The passed in header keys are first canonicalized via http.CanonicalHeaderKey.
+//
+//The request must contain *all* the passed in headers, but it is allowed to have additional headers
+//beyond the passed in set.
+func VerifyHeader(header http.Header) http.HandlerFunc {
+	return func(w http.ResponseWriter, req *http.Request) {
+		for key, values := range header {
+			key = http.CanonicalHeaderKey(key)
+			Ω(req.Header[key]).Should(Equal(values), "Header mismatch for key: %s", key)
+		}
+	}
+}
+
+//VerifyHeaderKV returns a handler that verifies the request contains a header matching the passed in key and values
+//(recall that a `http.Header` is a mapping from string (key) to []string (values))
+//It is a convenience wrapper around `VerifyHeader` that allows you to avoid having to create an `http.Header` object.
+func VerifyHeaderKV(key string, values ...string) http.HandlerFunc {
+	return VerifyHeader(http.Header{key: values})
+}
+
+//VerifyBody returns a handler that verifies that the body of the request matches the passed in byte array.
+//It does this using Equal().
+func VerifyBody(expectedBody []byte) http.HandlerFunc {
+	return CombineHandlers(
+		func(w http.ResponseWriter, req *http.Request) {
+			body, err := ioutil.ReadAll(req.Body)
+			req.Body.Close()
+			Ω(err).ShouldNot(HaveOccurred())
+			Ω(body).Should(Equal(expectedBody), "Body Mismatch")
+		},
+	)
+}
+
+//VerifyJSON returns a handler that verifies that the body of the request is a valid JSON representation
+//matching the passed in JSON string.  It does this using Gomega's MatchJSON method
+//
+//VerifyJSON also verifies that the request's content type is application/json
+func VerifyJSON(expectedJSON string) http.HandlerFunc {
+	return CombineHandlers(
+		VerifyContentType("application/json"),
+		func(w http.ResponseWriter, req *http.Request) {
+			body, err := ioutil.ReadAll(req.Body)
+			req.Body.Close()
+			Ω(err).ShouldNot(HaveOccurred())
+			Ω(body).Should(MatchJSON(expectedJSON), "JSON Mismatch")
+		},
+	)
+}
+
+//VerifyJSONRepresenting is similar to VerifyJSON.  Instead of taking a JSON string, however, it
+//takes an arbitrary JSON-encodable object and verifies that the requests's body is a JSON representation
+//that matches the object
+func VerifyJSONRepresenting(object interface{}) http.HandlerFunc {
+	data, err := json.Marshal(object)
+	Ω(err).ShouldNot(HaveOccurred())
+	return CombineHandlers(
+		VerifyContentType("application/json"),
+		VerifyJSON(string(data)),
+	)
+}
+
+//VerifyForm returns a handler that verifies a request contains the specified form values.
+//
+//The request must contain *all* of the specified values, but it is allowed to have additional
+//form values beyond the passed in set.
+func VerifyForm(values url.Values) http.HandlerFunc {
+	return func(w http.ResponseWriter, r *http.Request) {
+		err := r.ParseForm()
+		Ω(err).ShouldNot(HaveOccurred())
+		for key, vals := range values {
+			Ω(r.Form[key]).Should(Equal(vals), "Form mismatch for key: %s", key)
+		}
+	}
+}
+
+//VerifyFormKV returns a handler that verifies a request contains a form key with the specified values.
+//
+//It is a convenience wrapper around `VerifyForm` that lets you avoid having to create a `url.Values` object.
+func VerifyFormKV(key string, values ...string) http.HandlerFunc {
+	return VerifyForm(url.Values{key: values})
+}
+
+//VerifyProtoRepresenting returns a handler that verifies that the body of the request is a valid protobuf
+//representation of the passed message.
+//
+//VerifyProtoRepresenting also verifies that the request's content type is application/x-protobuf
+func VerifyProtoRepresenting(expected proto.Message) http.HandlerFunc {
+	return CombineHandlers(
+		VerifyContentType("application/x-protobuf"),
+		func(w http.ResponseWriter, req *http.Request) {
+			body, err := ioutil.ReadAll(req.Body)
+			Ω(err).ShouldNot(HaveOccurred())
+			req.Body.Close()
+
+			expectedType := reflect.TypeOf(expected)
+			actualValuePtr := reflect.New(expectedType.Elem())
+
+			actual, ok := actualValuePtr.Interface().(proto.Message)
+			Ω(ok).Should(BeTrue(), "Message value is not a proto.Message")
+
+			err = proto.Unmarshal(body, actual)
+			Ω(err).ShouldNot(HaveOccurred(), "Failed to unmarshal protobuf")
+
+			Ω(actual).Should(Equal(expected), "ProtoBuf Mismatch")
+		},
+	)
+}
+
+func copyHeader(src http.Header, dst http.Header) {
+	for key, value := range src {
+		dst[key] = value
+	}
+}
+
+/*
+RespondWith returns a handler that responds to a request with the specified status code and body
+
+Body may be a string or []byte
+
+Also, RespondWith can be given an optional http.Header.  The headers defined therein will be added to the response headers.
+*/
+func RespondWith(statusCode int, body interface{}, optionalHeader ...http.Header) http.HandlerFunc {
+	return func(w http.ResponseWriter, req *http.Request) {
+		if len(optionalHeader) == 1 {
+			copyHeader(optionalHeader[0], w.Header())
+		}
+		w.WriteHeader(statusCode)
+		switch x := body.(type) {
+		case string:
+			w.Write([]byte(x))
+		case []byte:
+			w.Write(x)
+		default:
+			Ω(body).Should(BeNil(), "Invalid type for body.  Should be string or []byte.")
+		}
+	}
+}
+
+/*
+RespondWithPtr returns a handler that responds to a request with the specified status code and body
+
+Unlike RespondWith, you pass RepondWithPtr a pointer to the status code and body allowing different tests
+to share the same setup but specify different status codes and bodies.
+
+Also, RespondWithPtr can be given an optional http.Header.  The headers defined therein will be added to the response headers.
+Since the http.Header can be mutated after the fact you don't need to pass in a pointer.
+*/
+func RespondWithPtr(statusCode *int, body interface{}, optionalHeader ...http.Header) http.HandlerFunc {
+	return func(w http.ResponseWriter, req *http.Request) {
+		if len(optionalHeader) == 1 {
+			copyHeader(optionalHeader[0], w.Header())
+		}
+		w.WriteHeader(*statusCode)
+		if body != nil {
+			switch x := (body).(type) {
+			case *string:
+				w.Write([]byte(*x))
+			case *[]byte:
+				w.Write(*x)
+			default:
+				Ω(body).Should(BeNil(), "Invalid type for body.  Should be string or []byte.")
+			}
+		}
+	}
+}
+
+/*
+RespondWithJSONEncoded returns a handler that responds to a request with the specified status code and a body
+containing the JSON-encoding of the passed in object
+
+Also, RespondWithJSONEncoded can be given an optional http.Header.  The headers defined therein will be added to the response headers.
+*/
+func RespondWithJSONEncoded(statusCode int, object interface{}, optionalHeader ...http.Header) http.HandlerFunc {
+	data, err := json.Marshal(object)
+	Ω(err).ShouldNot(HaveOccurred())
+
+	var headers http.Header
+	if len(optionalHeader) == 1 {
+		headers = optionalHeader[0]
+	} else {
+		headers = make(http.Header)
+	}
+	if _, found := headers["Content-Type"]; !found {
+		headers["Content-Type"] = []string{"application/json"}
+	}
+	return RespondWith(statusCode, string(data), headers)
+}
+
+/*
+RespondWithJSONEncodedPtr behaves like RespondWithJSONEncoded but takes a pointer
+to a status code and object.
+
+This allows different tests to share the same setup but specify different status codes and JSON-encoded
+objects.
+
+Also, RespondWithJSONEncodedPtr can be given an optional http.Header.  The headers defined therein will be added to the response headers.
+Since the http.Header can be mutated after the fact you don't need to pass in a pointer.
+*/
+func RespondWithJSONEncodedPtr(statusCode *int, object interface{}, optionalHeader ...http.Header) http.HandlerFunc {
+	return func(w http.ResponseWriter, req *http.Request) {
+		data, err := json.Marshal(object)
+		Ω(err).ShouldNot(HaveOccurred())
+		var headers http.Header
+		if len(optionalHeader) == 1 {
+			headers = optionalHeader[0]
+		} else {
+			headers = make(http.Header)
+		}
+		if _, found := headers["Content-Type"]; !found {
+			headers["Content-Type"] = []string{"application/json"}
+		}
+		copyHeader(headers, w.Header())
+		w.WriteHeader(*statusCode)
+		w.Write(data)
+	}
+}
+
+//RespondWithProto returns a handler that responds to a request with the specified status code and a body
+//containing the protobuf serialization of the provided message.
+//
+//Also, RespondWithProto can be given an optional http.Header.  The headers defined therein will be added to the response headers.
+func RespondWithProto(statusCode int, message proto.Message, optionalHeader ...http.Header) http.HandlerFunc {
+	return func(w http.ResponseWriter, req *http.Request) {
+		data, err := proto.Marshal(message)
+		Ω(err).ShouldNot(HaveOccurred())
+
+		var headers http.Header
+		if len(optionalHeader) == 1 {
+			headers = optionalHeader[0]
+		} else {
+			headers = make(http.Header)
+		}
+		if _, found := headers["Content-Type"]; !found {
+			headers["Content-Type"] = []string{"application/x-protobuf"}
+		}
+		copyHeader(headers, w.Header())
+
+		w.WriteHeader(statusCode)
+		w.Write(data)
+	}
+}
diff --git a/vendor/github.com/onsi/gomega/ghttp/protobuf/protobuf.go b/vendor/github.com/onsi/gomega/ghttp/protobuf/protobuf.go
new file mode 100644
index 0000000000000000000000000000000000000000..b2972bc9fb0536b442fbae082eb3a4a80270d10f
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/ghttp/protobuf/protobuf.go
@@ -0,0 +1,3 @@
+package protobuf
+
+//go:generate protoc --go_out=. simple_message.proto
diff --git a/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.pb.go b/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..c55a48448f24061977292dfa14e86d237b00d297
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.pb.go
@@ -0,0 +1,55 @@
+// Code generated by protoc-gen-go.
+// source: simple_message.proto
+// DO NOT EDIT!
+
+/*
+Package protobuf is a generated protocol buffer package.
+
+It is generated from these files:
+	simple_message.proto
+
+It has these top-level messages:
+	SimpleMessage
+*/
+package protobuf
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type SimpleMessage struct {
+	Description      *string `protobuf:"bytes,1,req,name=description" json:"description,omitempty"`
+	Id               *int32  `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
+	Metadata         *string `protobuf:"bytes,3,opt,name=metadata" json:"metadata,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *SimpleMessage) Reset()         { *m = SimpleMessage{} }
+func (m *SimpleMessage) String() string { return proto.CompactTextString(m) }
+func (*SimpleMessage) ProtoMessage()    {}
+
+func (m *SimpleMessage) GetDescription() string {
+	if m != nil && m.Description != nil {
+		return *m.Description
+	}
+	return ""
+}
+
+func (m *SimpleMessage) GetId() int32 {
+	if m != nil && m.Id != nil {
+		return *m.Id
+	}
+	return 0
+}
+
+func (m *SimpleMessage) GetMetadata() string {
+	if m != nil && m.Metadata != nil {
+		return *m.Metadata
+	}
+	return ""
+}
diff --git a/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.proto b/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.proto
new file mode 100644
index 0000000000000000000000000000000000000000..35b7145c247905989b993d0029d28d2a23d3f31b
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.proto
@@ -0,0 +1,9 @@
+syntax = "proto2";
+
+package protobuf;
+
+message SimpleMessage {
+    required string description = 1;
+    required int32 id = 2;
+    optional string metadata = 3;
+}
diff --git a/vendor/github.com/onsi/gomega/ghttp/test_server.go b/vendor/github.com/onsi/gomega/ghttp/test_server.go
new file mode 100644
index 0000000000000000000000000000000000000000..f5f537eb5e98012ff100dfb29b05445dabc099b4
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/ghttp/test_server.go
@@ -0,0 +1,379 @@
+/*
+Package ghttp supports testing HTTP clients by providing a test server (simply a thin wrapper around httptest's server) that supports
+registering multiple handlers.  Incoming requests are not routed between the different handlers
+- rather it is merely the order of the handlers that matters.  The first request is handled by the first
+registered handler, the second request by the second handler, etc.
+
+The intent here is to have each handler *verify* that the incoming request is valid.  To accomplish, ghttp
+also provides a collection of bite-size handlers that each perform one aspect of request verification.  These can
+be composed together and registered with a ghttp server.  The result is an expressive language for describing
+the requests generated by the client under test.
+
+Here's a simple example, note that the server handler is only defined in one BeforeEach and then modified, as required, by the nested BeforeEaches.
+A more comprehensive example is available at https://onsi.github.io/gomega/#_testing_http_clients
+
+	var _ = Describe("A Sprockets Client", func() {
+		var server *ghttp.Server
+		var client *SprocketClient
+		BeforeEach(func() {
+			server = ghttp.NewServer()
+			client = NewSprocketClient(server.URL(), "skywalker", "tk427")
+		})
+
+		AfterEach(func() {
+			server.Close()
+		})
+
+		Describe("fetching sprockets", func() {
+			var statusCode int
+			var sprockets []Sprocket
+			BeforeEach(func() {
+				statusCode = http.StatusOK
+				sprockets = []Sprocket{}
+				server.AppendHandlers(ghttp.CombineHandlers(
+					ghttp.VerifyRequest("GET", "/sprockets"),
+					ghttp.VerifyBasicAuth("skywalker", "tk427"),
+					ghttp.RespondWithJSONEncodedPtr(&statusCode, &sprockets),
+				))
+			})
+
+			Context("when requesting all sprockets", func() {
+				Context("when the response is succesful", func() {
+					BeforeEach(func() {
+						sprockets = []Sprocket{
+							NewSprocket("Alfalfa"),
+							NewSprocket("Banana"),
+						}
+					})
+
+					It("should return the returned sprockets", func() {
+						Ω(client.Sprockets()).Should(Equal(sprockets))
+					})
+				})
+
+				Context("when the response is missing", func() {
+					BeforeEach(func() {
+						statusCode = http.StatusNotFound
+					})
+
+					It("should return an empty list of sprockets", func() {
+						Ω(client.Sprockets()).Should(BeEmpty())
+					})
+				})
+
+				Context("when the response fails to authenticate", func() {
+					BeforeEach(func() {
+						statusCode = http.StatusUnauthorized
+					})
+
+					It("should return an AuthenticationError error", func() {
+						sprockets, err := client.Sprockets()
+						Ω(sprockets).Should(BeEmpty())
+						Ω(err).Should(MatchError(AuthenticationError))
+					})
+				})
+
+				Context("when the response is a server failure", func() {
+					BeforeEach(func() {
+						statusCode = http.StatusInternalServerError
+					})
+
+					It("should return an InternalError error", func() {
+						sprockets, err := client.Sprockets()
+						Ω(sprockets).Should(BeEmpty())
+						Ω(err).Should(MatchError(InternalError))
+					})
+				})
+			})
+
+			Context("when requesting some sprockets", func() {
+				BeforeEach(func() {
+					sprockets = []Sprocket{
+						NewSprocket("Alfalfa"),
+						NewSprocket("Banana"),
+					}
+
+					server.WrapHandler(0, ghttp.VerifyRequest("GET", "/sprockets", "filter=FOOD"))
+				})
+
+				It("should make the request with a filter", func() {
+					Ω(client.Sprockets("food")).Should(Equal(sprockets))
+				})
+			})
+		})
+	})
+*/
+package ghttp
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/http/httptest"
+	"reflect"
+	"regexp"
+	"strings"
+	"sync"
+
+	. "github.com/onsi/gomega"
+)
+
+func new() *Server {
+	return &Server{
+		AllowUnhandledRequests:     false,
+		UnhandledRequestStatusCode: http.StatusInternalServerError,
+		writeLock:                  &sync.Mutex{},
+	}
+}
+
+type routedHandler struct {
+	method     string
+	pathRegexp *regexp.Regexp
+	path       string
+	handler    http.HandlerFunc
+}
+
+// NewServer returns a new `*ghttp.Server` that wraps an `httptest` server.  The server is started automatically.
+func NewServer() *Server {
+	s := new()
+	s.HTTPTestServer = httptest.NewServer(s)
+	return s
+}
+
+// NewUnstartedServer return a new, unstarted, `*ghttp.Server`.  Useful for specifying a custom listener on `server.HTTPTestServer`.
+func NewUnstartedServer() *Server {
+	s := new()
+	s.HTTPTestServer = httptest.NewUnstartedServer(s)
+	return s
+}
+
+// NewTLSServer returns a new `*ghttp.Server` that wraps an `httptest` TLS server.  The server is started automatically.
+func NewTLSServer() *Server {
+	s := new()
+	s.HTTPTestServer = httptest.NewTLSServer(s)
+	return s
+}
+
+type Server struct {
+	//The underlying httptest server
+	HTTPTestServer *httptest.Server
+
+	//Defaults to false.  If set to true, the Server will allow more requests than there are registered handlers.
+	AllowUnhandledRequests bool
+
+	//The status code returned when receiving an unhandled request.
+	//Defaults to http.StatusInternalServerError.
+	//Only applies if AllowUnhandledRequests is true
+	UnhandledRequestStatusCode int
+
+	//If provided, ghttp will log about each request received to the provided io.Writer
+	//Defaults to nil
+	//If you're using Ginkgo, set this to GinkgoWriter to get improved output during failures
+	Writer io.Writer
+
+	receivedRequests []*http.Request
+	requestHandlers  []http.HandlerFunc
+	routedHandlers   []routedHandler
+
+	writeLock *sync.Mutex
+	calls     int
+}
+
+//Start() starts an unstarted ghttp server.  It is a catastrophic error to call Start more than once (thanks, httptest).
+func (s *Server) Start() {
+	s.HTTPTestServer.Start()
+}
+
+//URL() returns a url that will hit the server
+func (s *Server) URL() string {
+	return s.HTTPTestServer.URL
+}
+
+//Addr() returns the address on which the server is listening.
+func (s *Server) Addr() string {
+	return s.HTTPTestServer.Listener.Addr().String()
+}
+
+//Close() should be called at the end of each test.  It spins down and cleans up the test server.
+func (s *Server) Close() {
+	s.writeLock.Lock()
+	defer s.writeLock.Unlock()
+
+	server := s.HTTPTestServer
+	s.HTTPTestServer = nil
+	server.Close()
+}
+
+//ServeHTTP() makes Server an http.Handler
+//When the server receives a request it handles the request in the following order:
+//
+//1. If the request matches a handler registered with RouteToHandler, that handler is called.
+//2. Otherwise, if there are handlers registered via AppendHandlers, those handlers are called in order.
+//3. If all registered handlers have been called then:
+//   a) If AllowUnhandledRequests is true, the request will be handled with response code of UnhandledRequestStatusCode
+//   b) If AllowUnhandledRequests is false, the request will not be handled and the current test will be marked as failed.
+func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	s.writeLock.Lock()
+	defer func() {
+		e := recover()
+		if e != nil {
+			w.WriteHeader(http.StatusInternalServerError)
+		}
+
+		//If the handler panics GHTTP will silently succeed.  This is badâ„¢.
+		//To catch this case we need to fail the test if the handler has panicked.
+		//However, if the handler is panicking because Ginkgo's causing it to panic (i.e. an asswertion failed)
+		//then we shouldn't double-report the error as this will confuse people.
+
+		//So: step 1, if this is a Ginkgo panic - do nothing, Ginkgo's aware of the failure
+		eAsString, ok := e.(string)
+		if ok && strings.Contains(eAsString, "defer GinkgoRecover()") {
+			return
+		}
+
+		//If we're here, we have to do step 2: assert that the error is nil.  This assertion will
+		//allow us to fail the test suite (note: we can't call Fail since Gomega is not allowed to import Ginkgo).
+		//Since a failed assertion throws a panic, and we are likely in a goroutine, we need to defer within our defer!
+		defer func() {
+			recover()
+		}()
+		Ω(e).Should(BeNil(), "Handler Panicked")
+	}()
+
+	if s.Writer != nil {
+		s.Writer.Write([]byte(fmt.Sprintf("GHTTP Received Request: %s - %s\n", req.Method, req.URL)))
+	}
+
+	s.receivedRequests = append(s.receivedRequests, req)
+	if routedHandler, ok := s.handlerForRoute(req.Method, req.URL.Path); ok {
+		s.writeLock.Unlock()
+		routedHandler(w, req)
+	} else if s.calls < len(s.requestHandlers) {
+		h := s.requestHandlers[s.calls]
+		s.calls++
+		s.writeLock.Unlock()
+		h(w, req)
+	} else {
+		s.writeLock.Unlock()
+		if s.AllowUnhandledRequests {
+			ioutil.ReadAll(req.Body)
+			req.Body.Close()
+			w.WriteHeader(s.UnhandledRequestStatusCode)
+		} else {
+			Ω(req).Should(BeNil(), "Received Unhandled Request")
+		}
+	}
+}
+
+//ReceivedRequests is an array containing all requests received by the server (both handled and unhandled requests)
+func (s *Server) ReceivedRequests() []*http.Request {
+	s.writeLock.Lock()
+	defer s.writeLock.Unlock()
+
+	return s.receivedRequests
+}
+
+//RouteToHandler can be used to register handlers that will always handle requests that match
+//the passed in method and path.
+//
+//The path may be either a string object or a *regexp.Regexp.
+func (s *Server) RouteToHandler(method string, path interface{}, handler http.HandlerFunc) {
+	s.writeLock.Lock()
+	defer s.writeLock.Unlock()
+
+	rh := routedHandler{
+		method:  method,
+		handler: handler,
+	}
+
+	switch p := path.(type) {
+	case *regexp.Regexp:
+		rh.pathRegexp = p
+	case string:
+		rh.path = p
+	default:
+		panic("path must be a string or a regular expression")
+	}
+
+	for i, existingRH := range s.routedHandlers {
+		if existingRH.method == method &&
+			reflect.DeepEqual(existingRH.pathRegexp, rh.pathRegexp) &&
+			existingRH.path == rh.path {
+			s.routedHandlers[i] = rh
+			return
+		}
+	}
+	s.routedHandlers = append(s.routedHandlers, rh)
+}
+
+func (s *Server) handlerForRoute(method string, path string) (http.HandlerFunc, bool) {
+	for _, rh := range s.routedHandlers {
+		if rh.method == method {
+			if rh.pathRegexp != nil {
+				if rh.pathRegexp.Match([]byte(path)) {
+					return rh.handler, true
+				}
+			} else if rh.path == path {
+				return rh.handler, true
+			}
+		}
+	}
+
+	return nil, false
+}
+
+//AppendHandlers will appends http.HandlerFuncs to the server's list of registered handlers.  The first incoming request is handled by the first handler, the second by the second, etc...
+func (s *Server) AppendHandlers(handlers ...http.HandlerFunc) {
+	s.writeLock.Lock()
+	defer s.writeLock.Unlock()
+
+	s.requestHandlers = append(s.requestHandlers, handlers...)
+}
+
+//SetHandler overrides the registered handler at the passed in index with the passed in handler
+//This is useful, for example, when a server has been set up in a shared context, but must be tweaked
+//for a particular test.
+func (s *Server) SetHandler(index int, handler http.HandlerFunc) {
+	s.writeLock.Lock()
+	defer s.writeLock.Unlock()
+
+	s.requestHandlers[index] = handler
+}
+
+//GetHandler returns the handler registered at the passed in index.
+func (s *Server) GetHandler(index int) http.HandlerFunc {
+	s.writeLock.Lock()
+	defer s.writeLock.Unlock()
+
+	return s.requestHandlers[index]
+}
+
+func (s *Server) Reset() {
+	s.writeLock.Lock()
+	defer s.writeLock.Unlock()
+
+	s.HTTPTestServer.CloseClientConnections()
+	s.calls = 0
+	s.receivedRequests = nil
+	s.requestHandlers = nil
+	s.routedHandlers = nil
+}
+
+//WrapHandler combines the passed in handler with the handler registered at the passed in index.
+//This is useful, for example, when a server has been set up in a shared context but must be tweaked
+//for a particular test.
+//
+//If the currently registered handler is A, and the new passed in handler is B then
+//WrapHandler will generate a new handler that first calls A, then calls B, and assign it to index
+func (s *Server) WrapHandler(index int, handler http.HandlerFunc) {
+	existingHandler := s.GetHandler(index)
+	s.SetHandler(index, CombineHandlers(existingHandler, handler))
+}
+
+func (s *Server) CloseClientConnections() {
+	s.writeLock.Lock()
+	defer s.writeLock.Unlock()
+
+	s.HTTPTestServer.CloseClientConnections()
+}
diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go
new file mode 100644
index 0000000000000000000000000000000000000000..78bd188c07299a56f252143ed0f91bebd7d0ac76
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/gomega_dsl.go
@@ -0,0 +1,335 @@
+/*
+Gomega is the Ginkgo BDD-style testing framework's preferred matcher library.
+
+The godoc documentation describes Gomega's API.  More comprehensive documentation (with examples!) is available at http://onsi.github.io/gomega/
+
+Gomega on Github: http://github.com/onsi/gomega
+
+Learn more about Ginkgo online: http://onsi.github.io/ginkgo
+
+Ginkgo on Github: http://github.com/onsi/ginkgo
+
+Gomega is MIT-Licensed
+*/
+package gomega
+
+import (
+	"fmt"
+	"reflect"
+	"time"
+
+	"github.com/onsi/gomega/internal/assertion"
+	"github.com/onsi/gomega/internal/asyncassertion"
+	"github.com/onsi/gomega/internal/testingtsupport"
+	"github.com/onsi/gomega/types"
+)
+
+const GOMEGA_VERSION = "1.0"
+
+const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
+If you're using Ginkgo then you probably forgot to put your assertion in an It().
+Alternatively, you may have forgotten to register a fail handler with RegisterFailHandler() or RegisterTestingT().
+`
+
+var globalFailHandler types.GomegaFailHandler
+
+var defaultEventuallyTimeout = time.Second
+var defaultEventuallyPollingInterval = 10 * time.Millisecond
+var defaultConsistentlyDuration = 100 * time.Millisecond
+var defaultConsistentlyPollingInterval = 10 * time.Millisecond
+
+//RegisterFailHandler connects Ginkgo to Gomega.  When a matcher fails
+//the fail handler passed into RegisterFailHandler is called.
+func RegisterFailHandler(handler types.GomegaFailHandler) {
+	globalFailHandler = handler
+}
+
+//RegisterTestingT connects Gomega to Golang's XUnit style
+//Testing.T tests.  You'll need to call this at the top of each XUnit style test:
+//
+// func TestFarmHasCow(t *testing.T) {
+//     RegisterTestingT(t)
+//
+//	   f := farm.New([]string{"Cow", "Horse"})
+//     Expect(f.HasCow()).To(BeTrue(), "Farm should have cow")
+// }
+//
+// Note that this *testing.T is registered *globally* by Gomega (this is why you don't have to
+// pass `t` down to the matcher itself).  This means that you cannot run the XUnit style tests
+// in parallel as the global fail handler cannot point to more than one testing.T at a time.
+//
+// (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*).
+func RegisterTestingT(t types.GomegaTestingT) {
+	RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailHandler(t))
+}
+
+//InterceptGomegaHandlers runs a given callback and returns an array of
+//failure messages generated by any Gomega assertions within the callback.
+//
+//This is accomplished by temporarily replacing the *global* fail handler
+//with a fail handler that simply annotates failures.  The original fail handler
+//is reset when InterceptGomegaFailures returns.
+//
+//This is most useful when testing custom matchers, but can also be used to check
+//on a value using a Gomega assertion without causing a test failure.
+func InterceptGomegaFailures(f func()) []string {
+	originalHandler := globalFailHandler
+	failures := []string{}
+	RegisterFailHandler(func(message string, callerSkip ...int) {
+		failures = append(failures, message)
+	})
+	f()
+	RegisterFailHandler(originalHandler)
+	return failures
+}
+
+//Ω wraps an actual value allowing assertions to be made on it:
+//	Ω("foo").Should(Equal("foo"))
+//
+//If Ω is passed more than one argument it will pass the *first* argument to the matcher.
+//All subsequent arguments will be required to be nil/zero.
+//
+//This is convenient if you want to make an assertion on a method/function that returns
+//a value and an error - a common patter in Go.
+//
+//For example, given a function with signature:
+//  func MyAmazingThing() (int, error)
+//
+//Then:
+//    Ω(MyAmazingThing()).Should(Equal(3))
+//Will succeed only if `MyAmazingThing()` returns `(3, nil)`
+//
+//Ω and Expect are identical
+func Ω(actual interface{}, extra ...interface{}) GomegaAssertion {
+	return ExpectWithOffset(0, actual, extra...)
+}
+
+//Expect wraps an actual value allowing assertions to be made on it:
+//	Expect("foo").To(Equal("foo"))
+//
+//If Expect is passed more than one argument it will pass the *first* argument to the matcher.
+//All subsequent arguments will be required to be nil/zero.
+//
+//This is convenient if you want to make an assertion on a method/function that returns
+//a value and an error - a common patter in Go.
+//
+//For example, given a function with signature:
+//  func MyAmazingThing() (int, error)
+//
+//Then:
+//    Expect(MyAmazingThing()).Should(Equal(3))
+//Will succeed only if `MyAmazingThing()` returns `(3, nil)`
+//
+//Expect and Ω are identical
+func Expect(actual interface{}, extra ...interface{}) GomegaAssertion {
+	return ExpectWithOffset(0, actual, extra...)
+}
+
+//ExpectWithOffset wraps an actual value allowing assertions to be made on it:
+//    ExpectWithOffset(1, "foo").To(Equal("foo"))
+//
+//Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument
+//this is used to modify the call-stack offset when computing line numbers.
+//
+//This is most useful in helper functions that make assertions.  If you want Gomega's
+//error message to refer to the calling line in the test (as opposed to the line in the helper function)
+//set the first argument of `ExpectWithOffset` appropriately.
+func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) GomegaAssertion {
+	if globalFailHandler == nil {
+		panic(nilFailHandlerPanic)
+	}
+	return assertion.New(actual, globalFailHandler, offset, extra...)
+}
+
+//Eventually wraps an actual value allowing assertions to be made on it.
+//The assertion is tried periodically until it passes or a timeout occurs.
+//
+//Both the timeout and polling interval are configurable as optional arguments:
+//The first optional argument is the timeout
+//The second optional argument is the polling interval
+//
+//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers.  In the
+//last case they are interpreted as seconds.
+//
+//If Eventually is passed an actual that is a function taking no arguments and returning at least one value,
+//then Eventually will call the function periodically and try the matcher against the function's first return value.
+//
+//Example:
+//
+//    Eventually(func() int {
+//        return thingImPolling.Count()
+//    }).Should(BeNumerically(">=", 17))
+//
+//Note that this example could be rewritten:
+//
+//    Eventually(thingImPolling.Count).Should(BeNumerically(">=", 17))
+//
+//If the function returns more than one value, then Eventually will pass the first value to the matcher and
+//assert that all other values are nil/zero.
+//This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go.
+//
+//For example, consider a method that returns a value and an error:
+//    func FetchFromDB() (string, error)
+//
+//Then
+//    Eventually(FetchFromDB).Should(Equal("hasselhoff"))
+//
+//Will pass only if the the returned error is nil and the returned string passes the matcher.
+//
+//Eventually's default timeout is 1 second, and its default polling interval is 10ms
+func Eventually(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
+	return EventuallyWithOffset(0, actual, intervals...)
+}
+
+//EventuallyWithOffset operates like Eventually but takes an additional
+//initial argument to indicate an offset in the call stack.  This is useful when building helper
+//functions that contain matchers.  To learn more, read about `ExpectWithOffset`.
+func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
+	if globalFailHandler == nil {
+		panic(nilFailHandlerPanic)
+	}
+	timeoutInterval := defaultEventuallyTimeout
+	pollingInterval := defaultEventuallyPollingInterval
+	if len(intervals) > 0 {
+		timeoutInterval = toDuration(intervals[0])
+	}
+	if len(intervals) > 1 {
+		pollingInterval = toDuration(intervals[1])
+	}
+	return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, globalFailHandler, timeoutInterval, pollingInterval, offset)
+}
+
+//Consistently wraps an actual value allowing assertions to be made on it.
+//The assertion is tried periodically and is required to pass for a period of time.
+//
+//Both the total time and polling interval are configurable as optional arguments:
+//The first optional argument is the duration that Consistently will run for
+//The second optional argument is the polling interval
+//
+//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers.  In the
+//last case they are interpreted as seconds.
+//
+//If Consistently is passed an actual that is a function taking no arguments and returning at least one value,
+//then Consistently will call the function periodically and try the matcher against the function's first return value.
+//
+//If the function returns more than one value, then Consistently will pass the first value to the matcher and
+//assert that all other values are nil/zero.
+//This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go.
+//
+//Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem.
+//For example, you want to assert that a goroutine does *not* send data down a channel.  In this case, you could:
+//
+//  Consistently(channel).ShouldNot(Receive())
+//
+//Consistently's default duration is 100ms, and its default polling interval is 10ms
+func Consistently(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
+	return ConsistentlyWithOffset(0, actual, intervals...)
+}
+
+//ConsistentlyWithOffset operates like Consistnetly but takes an additional
+//initial argument to indicate an offset in the call stack.  This is useful when building helper
+//functions that contain matchers.  To learn more, read about `ExpectWithOffset`.
+func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
+	if globalFailHandler == nil {
+		panic(nilFailHandlerPanic)
+	}
+	timeoutInterval := defaultConsistentlyDuration
+	pollingInterval := defaultConsistentlyPollingInterval
+	if len(intervals) > 0 {
+		timeoutInterval = toDuration(intervals[0])
+	}
+	if len(intervals) > 1 {
+		pollingInterval = toDuration(intervals[1])
+	}
+	return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, globalFailHandler, timeoutInterval, pollingInterval, offset)
+}
+
+//Set the default timeout duration for Eventually.  Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses.
+func SetDefaultEventuallyTimeout(t time.Duration) {
+	defaultEventuallyTimeout = t
+}
+
+//Set the default polling interval for Eventually.
+func SetDefaultEventuallyPollingInterval(t time.Duration) {
+	defaultEventuallyPollingInterval = t
+}
+
+//Set the default duration for Consistently.  Consistently will verify that your condition is satsified for this long.
+func SetDefaultConsistentlyDuration(t time.Duration) {
+	defaultConsistentlyDuration = t
+}
+
+//Set the default polling interval for Consistently.
+func SetDefaultConsistentlyPollingInterval(t time.Duration) {
+	defaultConsistentlyPollingInterval = t
+}
+
+//GomegaAsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against
+//the matcher passed to the Should and ShouldNot methods.
+//
+//Both Should and ShouldNot take a variadic optionalDescription argument.  This is passed on to
+//fmt.Sprintf() and is used to annotate failure messages.  This allows you to make your failure messages more
+//descriptive
+//
+//Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed.
+//
+//Example:
+//
+//  Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.")
+//  Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.")
+type GomegaAsyncAssertion interface {
+	Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
+	ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
+}
+
+//GomegaAssertion is returned by Ω and Expect and compares the actual value to the matcher
+//passed to the Should/ShouldNot and To/ToNot/NotTo methods.
+//
+//Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect
+//though this is not enforced.
+//
+//All methods take a variadic optionalDescription argument.  This is passed on to fmt.Sprintf()
+//and is used to annotate failure messages.
+//
+//All methods return a bool that is true if hte assertion passed and false if it failed.
+//
+//Example:
+//
+//   Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm)
+type GomegaAssertion interface {
+	Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
+	ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
+
+	To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
+	ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
+	NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
+}
+
+//OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it
+type OmegaMatcher types.GomegaMatcher
+
+func toDuration(input interface{}) time.Duration {
+	duration, ok := input.(time.Duration)
+	if ok {
+		return duration
+	}
+
+	value := reflect.ValueOf(input)
+	kind := reflect.TypeOf(input).Kind()
+
+	if reflect.Int <= kind && kind <= reflect.Int64 {
+		return time.Duration(value.Int()) * time.Second
+	} else if reflect.Uint <= kind && kind <= reflect.Uint64 {
+		return time.Duration(value.Uint()) * time.Second
+	} else if reflect.Float32 <= kind && kind <= reflect.Float64 {
+		return time.Duration(value.Float() * float64(time.Second))
+	} else if reflect.String == kind {
+		duration, err := time.ParseDuration(value.String())
+		if err != nil {
+			panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input))
+		}
+		return duration
+	}
+
+	panic(fmt.Sprintf("%v is not a valid interval.  Must be time.Duration, parsable duration string or a number.", input))
+}
diff --git a/vendor/github.com/onsi/gomega/internal/assertion/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion/assertion.go
new file mode 100644
index 0000000000000000000000000000000000000000..b73673f21ed9113b8ed1d30f49a99cbf66d8afca
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/internal/assertion/assertion.go
@@ -0,0 +1,98 @@
+package assertion
+
+import (
+	"fmt"
+	"reflect"
+
+	"github.com/onsi/gomega/types"
+)
+
+type Assertion struct {
+	actualInput interface{}
+	fail        types.GomegaFailHandler
+	offset      int
+	extra       []interface{}
+}
+
+func New(actualInput interface{}, fail types.GomegaFailHandler, offset int, extra ...interface{}) *Assertion {
+	return &Assertion{
+		actualInput: actualInput,
+		fail:        fail,
+		offset:      offset,
+		extra:       extra,
+	}
+}
+
+func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+	return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
+}
+
+func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+	return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
+}
+
+func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+	return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
+}
+
+func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+	return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
+}
+
+func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+	return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
+}
+
+func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string {
+	switch len(optionalDescription) {
+	case 0:
+		return ""
+	default:
+		return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
+	}
+}
+
+func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
+	matches, err := matcher.Match(assertion.actualInput)
+	description := assertion.buildDescription(optionalDescription...)
+	if err != nil {
+		assertion.fail(description+err.Error(), 2+assertion.offset)
+		return false
+	}
+	if matches != desiredMatch {
+		var message string
+		if desiredMatch {
+			message = matcher.FailureMessage(assertion.actualInput)
+		} else {
+			message = matcher.NegatedFailureMessage(assertion.actualInput)
+		}
+		assertion.fail(description+message, 2+assertion.offset)
+		return false
+	}
+
+	return true
+}
+
+func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool {
+	success, message := vetExtras(assertion.extra)
+	if success {
+		return true
+	}
+
+	description := assertion.buildDescription(optionalDescription...)
+	assertion.fail(description+message, 2+assertion.offset)
+	return false
+}
+
+func vetExtras(extras []interface{}) (bool, string) {
+	for i, extra := range extras {
+		if extra != nil {
+			zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface()
+			if !reflect.DeepEqual(zeroValue, extra) {
+				message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra)
+				return false, message
+			}
+		}
+	}
+	return true, ""
+}
diff --git a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go
new file mode 100644
index 0000000000000000000000000000000000000000..bce0853006a942bec52572e1bcbd22c12b53085a
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go
@@ -0,0 +1,189 @@
+package asyncassertion
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"time"
+
+	"github.com/onsi/gomega/internal/oraclematcher"
+	"github.com/onsi/gomega/types"
+)
+
+type AsyncAssertionType uint
+
+const (
+	AsyncAssertionTypeEventually AsyncAssertionType = iota
+	AsyncAssertionTypeConsistently
+)
+
+type AsyncAssertion struct {
+	asyncType       AsyncAssertionType
+	actualInput     interface{}
+	timeoutInterval time.Duration
+	pollingInterval time.Duration
+	fail            types.GomegaFailHandler
+	offset          int
+}
+
+func New(asyncType AsyncAssertionType, actualInput interface{}, fail types.GomegaFailHandler, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion {
+	actualType := reflect.TypeOf(actualInput)
+	if actualType.Kind() == reflect.Func {
+		if actualType.NumIn() != 0 || actualType.NumOut() == 0 {
+			panic("Expected a function with no arguments and one or more return values.")
+		}
+	}
+
+	return &AsyncAssertion{
+		asyncType:       asyncType,
+		actualInput:     actualInput,
+		fail:            fail,
+		timeoutInterval: timeoutInterval,
+		pollingInterval: pollingInterval,
+		offset:          offset,
+	}
+}
+
+func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+	return assertion.match(matcher, true, optionalDescription...)
+}
+
+func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+	return assertion.match(matcher, false, optionalDescription...)
+}
+
+func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string {
+	switch len(optionalDescription) {
+	case 0:
+		return ""
+	default:
+		return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
+	}
+}
+
+func (assertion *AsyncAssertion) actualInputIsAFunction() bool {
+	actualType := reflect.TypeOf(assertion.actualInput)
+	return actualType.Kind() == reflect.Func && actualType.NumIn() == 0 && actualType.NumOut() > 0
+}
+
+func (assertion *AsyncAssertion) pollActual() (interface{}, error) {
+	if assertion.actualInputIsAFunction() {
+		values := reflect.ValueOf(assertion.actualInput).Call([]reflect.Value{})
+
+		extras := []interface{}{}
+		for _, value := range values[1:] {
+			extras = append(extras, value.Interface())
+		}
+
+		success, message := vetExtras(extras)
+
+		if !success {
+			return nil, errors.New(message)
+		}
+
+		return values[0].Interface(), nil
+	}
+
+	return assertion.actualInput, nil
+}
+
+func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool {
+	if assertion.actualInputIsAFunction() {
+		return true
+	}
+
+	return oraclematcher.MatchMayChangeInTheFuture(matcher, value)
+}
+
+func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
+	timer := time.Now()
+	timeout := time.After(assertion.timeoutInterval)
+
+	description := assertion.buildDescription(optionalDescription...)
+
+	var matches bool
+	var err error
+	mayChange := true
+	value, err := assertion.pollActual()
+	if err == nil {
+		mayChange = assertion.matcherMayChange(matcher, value)
+		matches, err = matcher.Match(value)
+	}
+
+	fail := func(preamble string) {
+		errMsg := ""
+		message := ""
+		if err != nil {
+			errMsg = "Error: " + err.Error()
+		} else {
+			if desiredMatch {
+				message = matcher.FailureMessage(value)
+			} else {
+				message = matcher.NegatedFailureMessage(value)
+			}
+		}
+		assertion.fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset)
+	}
+
+	if assertion.asyncType == AsyncAssertionTypeEventually {
+		for {
+			if err == nil && matches == desiredMatch {
+				return true
+			}
+
+			if !mayChange {
+				fail("No future change is possible.  Bailing out early")
+				return false
+			}
+
+			select {
+			case <-time.After(assertion.pollingInterval):
+				value, err = assertion.pollActual()
+				if err == nil {
+					mayChange = assertion.matcherMayChange(matcher, value)
+					matches, err = matcher.Match(value)
+				}
+			case <-timeout:
+				fail("Timed out")
+				return false
+			}
+		}
+	} else if assertion.asyncType == AsyncAssertionTypeConsistently {
+		for {
+			if !(err == nil && matches == desiredMatch) {
+				fail("Failed")
+				return false
+			}
+
+			if !mayChange {
+				return true
+			}
+
+			select {
+			case <-time.After(assertion.pollingInterval):
+				value, err = assertion.pollActual()
+				if err == nil {
+					mayChange = assertion.matcherMayChange(matcher, value)
+					matches, err = matcher.Match(value)
+				}
+			case <-timeout:
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+func vetExtras(extras []interface{}) (bool, string) {
+	for i, extra := range extras {
+		if extra != nil {
+			zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface()
+			if !reflect.DeepEqual(zeroValue, extra) {
+				message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra)
+				return false, message
+			}
+		}
+	}
+	return true, ""
+}
diff --git a/vendor/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go b/vendor/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..6e351a7de5799db444418187dc68feaa5ebad542
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go
@@ -0,0 +1,23 @@
+package fakematcher
+
+import "fmt"
+
+type FakeMatcher struct {
+	ReceivedActual  interface{}
+	MatchesToReturn bool
+	ErrToReturn     error
+}
+
+func (matcher *FakeMatcher) Match(actual interface{}) (bool, error) {
+	matcher.ReceivedActual = actual
+
+	return matcher.MatchesToReturn, matcher.ErrToReturn
+}
+
+func (matcher *FakeMatcher) FailureMessage(actual interface{}) string {
+	return fmt.Sprintf("positive: %v", actual)
+}
+
+func (matcher *FakeMatcher) NegatedFailureMessage(actual interface{}) string {
+	return fmt.Sprintf("negative: %v", actual)
+}
diff --git a/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go b/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..66cad88a1fbf7e68c581326b62131030b4223d39
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go
@@ -0,0 +1,25 @@
+package oraclematcher
+
+import "github.com/onsi/gomega/types"
+
+/*
+GomegaMatchers that also match the OracleMatcher interface can convey information about
+whether or not their result will change upon future attempts.
+
+This allows `Eventually` and `Consistently` to short circuit if success becomes impossible.
+
+For example, a process' exit code can never change.  So, gexec's Exit matcher returns `true`
+for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore.
+*/
+type OracleMatcher interface {
+	MatchMayChangeInTheFuture(actual interface{}) bool
+}
+
+func MatchMayChangeInTheFuture(matcher types.GomegaMatcher, value interface{}) bool {
+	oracleMatcher, ok := matcher.(OracleMatcher)
+	if !ok {
+		return true
+	}
+
+	return oracleMatcher.MatchMayChangeInTheFuture(value)
+}
diff --git a/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go
new file mode 100644
index 0000000000000000000000000000000000000000..7871fd43953e651c42ccb2eaea728f3a4996d568
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go
@@ -0,0 +1,40 @@
+package testingtsupport
+
+import (
+	"regexp"
+	"runtime/debug"
+	"strings"
+
+	"github.com/onsi/gomega/types"
+)
+
+type gomegaTestingT interface {
+	Errorf(format string, args ...interface{})
+}
+
+func BuildTestingTGomegaFailHandler(t gomegaTestingT) types.GomegaFailHandler {
+	return func(message string, callerSkip ...int) {
+		skip := 1
+		if len(callerSkip) > 0 {
+			skip = callerSkip[0]
+		}
+		stackTrace := pruneStack(string(debug.Stack()), skip)
+		t.Errorf("\n%s\n%s", stackTrace, message)
+	}
+}
+
+func pruneStack(fullStackTrace string, skip int) string {
+	stack := strings.Split(fullStackTrace, "\n")
+	if len(stack) > 2*(skip+1) {
+		stack = stack[2*(skip+1):]
+	}
+	prunedStack := []string{}
+	re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
+	for i := 0; i < len(stack)/2; i++ {
+		if !re.Match([]byte(stack[i*2])) {
+			prunedStack = append(prunedStack, stack[i*2])
+			prunedStack = append(prunedStack, stack[i*2+1])
+		}
+	}
+	return strings.Join(prunedStack, "\n")
+}
diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go
new file mode 100644
index 0000000000000000000000000000000000000000..b6110c4b85c6632a0381b3d51fa234ef273e454e
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers.go
@@ -0,0 +1,393 @@
+package gomega
+
+import (
+	"time"
+
+	"github.com/onsi/gomega/matchers"
+	"github.com/onsi/gomega/types"
+)
+
+//Equal uses reflect.DeepEqual to compare actual with expected.  Equal is strict about
+//types when performing comparisons.
+//It is an error for both actual and expected to be nil.  Use BeNil() instead.
+func Equal(expected interface{}) types.GomegaMatcher {
+	return &matchers.EqualMatcher{
+		Expected: expected,
+	}
+}
+
+//BeEquivalentTo is more lax than Equal, allowing equality between different types.
+//This is done by converting actual to have the type of expected before
+//attempting equality with reflect.DeepEqual.
+//It is an error for actual and expected to be nil.  Use BeNil() instead.
+func BeEquivalentTo(expected interface{}) types.GomegaMatcher {
+	return &matchers.BeEquivalentToMatcher{
+		Expected: expected,
+	}
+}
+
+//BeNil succeeds if actual is nil
+func BeNil() types.GomegaMatcher {
+	return &matchers.BeNilMatcher{}
+}
+
+//BeTrue succeeds if actual is true
+func BeTrue() types.GomegaMatcher {
+	return &matchers.BeTrueMatcher{}
+}
+
+//BeFalse succeeds if actual is false
+func BeFalse() types.GomegaMatcher {
+	return &matchers.BeFalseMatcher{}
+}
+
+//HaveOccurred succeeds if actual is a non-nil error
+//The typical Go error checking pattern looks like:
+//    err := SomethingThatMightFail()
+//    Ω(err).ShouldNot(HaveOccurred())
+func HaveOccurred() types.GomegaMatcher {
+	return &matchers.HaveOccurredMatcher{}
+}
+
+//Succeed passes if actual is a nil error
+//Succeed is intended to be used with functions that return a single error value. Instead of
+//    err := SomethingThatMightFail()
+//    Ω(err).ShouldNot(HaveOccurred())
+//
+//You can write:
+//    Ω(SomethingThatMightFail()).Should(Succeed())
+//
+//It is a mistake to use Succeed with a function that has multiple return values.  Gomega's Ω and Expect
+//functions automatically trigger failure if any return values after the first return value are non-zero/non-nil.
+//This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass.
+func Succeed() types.GomegaMatcher {
+	return &matchers.SucceedMatcher{}
+}
+
+//MatchError succeeds if actual is a non-nil error that matches the passed in string/error.
+//
+//These are valid use-cases:
+//  Ω(err).Should(MatchError("an error")) //asserts that err.Error() == "an error"
+//  Ω(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual)
+//
+//It is an error for err to be nil or an object that does not implement the Error interface
+func MatchError(expected interface{}) types.GomegaMatcher {
+	return &matchers.MatchErrorMatcher{
+		Expected: expected,
+	}
+}
+
+//BeClosed succeeds if actual is a closed channel.
+//It is an error to pass a non-channel to BeClosed, it is also an error to pass nil
+//
+//In order to check whether or not the channel is closed, Gomega must try to read from the channel
+//(even in the `ShouldNot(BeClosed())` case).  You should keep this in mind if you wish to make subsequent assertions about
+//values coming down the channel.
+//
+//Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before
+//asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read).
+//
+//Finally, as a corollary: it is an error to check whether or not a send-only channel is closed.
+func BeClosed() types.GomegaMatcher {
+	return &matchers.BeClosedMatcher{}
+}
+
+//Receive succeeds if there is a value to be received on actual.
+//Actual must be a channel (and cannot be a send-only channel) -- anything else is an error.
+//
+//Receive returns immediately and never blocks:
+//
+//- If there is nothing on the channel `c` then Ω(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
+//
+//- If the channel `c` is closed then Ω(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
+//
+//- If there is something on the channel `c` ready to be read, then Ω(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail.
+//
+//If you have a go-routine running in the background that will write to channel `c` you can:
+//    Eventually(c).Should(Receive())
+//
+//This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`)
+//
+//A similar use-case is to assert that no go-routine writes to a channel (for a period of time).  You can do this with `Consistently`:
+//    Consistently(c).ShouldNot(Receive())
+//
+//You can pass `Receive` a matcher.  If you do so, it will match the received object against the matcher.  For example:
+//    Ω(c).Should(Receive(Equal("foo")))
+//
+//When given a matcher, `Receive` will always fail if there is nothing to be received on the channel.
+//
+//Passing Receive a matcher is especially useful when paired with Eventually:
+//
+//    Eventually(c).Should(Receive(ContainSubstring("bar")))
+//
+//will repeatedly attempt to pull values out of `c` until a value matching "bar" is received.
+//
+//Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type:
+//    var myThing thing
+//    Eventually(thingChan).Should(Receive(&myThing))
+//    Ω(myThing.Sprocket).Should(Equal("foo"))
+//    Ω(myThing.IsValid()).Should(BeTrue())
+func Receive(args ...interface{}) types.GomegaMatcher {
+	var arg interface{}
+	if len(args) > 0 {
+		arg = args[0]
+	}
+
+	return &matchers.ReceiveMatcher{
+		Arg: arg,
+	}
+}
+
+//BeSent succeeds if a value can be sent to actual.
+//Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error.
+//In addition, actual must not be closed.
+//
+//BeSent never blocks:
+//
+//- If the channel `c` is not ready to receive then Ω(c).Should(BeSent("foo")) will fail immediately
+//- If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive  before Eventually's timeout
+//- If the channel `c` is closed then Ω(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately
+//
+//Of course, the value is actually sent to the channel.  The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with).
+//Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends.
+func BeSent(arg interface{}) types.GomegaMatcher {
+	return &matchers.BeSentMatcher{
+		Arg: arg,
+	}
+}
+
+//MatchRegexp succeeds if actual is a string or stringer that matches the
+//passed-in regexp.  Optional arguments can be provided to construct a regexp
+//via fmt.Sprintf().
+func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher {
+	return &matchers.MatchRegexpMatcher{
+		Regexp: regexp,
+		Args:   args,
+	}
+}
+
+//ContainSubstring succeeds if actual is a string or stringer that contains the
+//passed-in regexp.  Optional arguments can be provided to construct the substring
+//via fmt.Sprintf().
+func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher {
+	return &matchers.ContainSubstringMatcher{
+		Substr: substr,
+		Args:   args,
+	}
+}
+
+//HavePrefix succeeds if actual is a string or stringer that contains the
+//passed-in string as a prefix.  Optional arguments can be provided to construct
+//via fmt.Sprintf().
+func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher {
+	return &matchers.HavePrefixMatcher{
+		Prefix: prefix,
+		Args:   args,
+	}
+}
+
+//HaveSuffix succeeds if actual is a string or stringer that contains the
+//passed-in string as a suffix.  Optional arguments can be provided to construct
+//via fmt.Sprintf().
+func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher {
+	return &matchers.HaveSuffixMatcher{
+		Suffix: suffix,
+		Args:   args,
+	}
+}
+
+//MatchJSON succeeds if actual is a string or stringer of JSON that matches
+//the expected JSON.  The JSONs are decoded and the resulting objects are compared via
+//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
+func MatchJSON(json interface{}) types.GomegaMatcher {
+	return &matchers.MatchJSONMatcher{
+		JSONToMatch: json,
+	}
+}
+
+//BeEmpty succeeds if actual is empty.  Actual must be of type string, array, map, chan, or slice.
+func BeEmpty() types.GomegaMatcher {
+	return &matchers.BeEmptyMatcher{}
+}
+
+//HaveLen succeeds if actual has the passed-in length.  Actual must be of type string, array, map, chan, or slice.
+func HaveLen(count int) types.GomegaMatcher {
+	return &matchers.HaveLenMatcher{
+		Count: count,
+	}
+}
+
+//BeZero succeeds if actual is the zero value for its type or if actual is nil.
+func BeZero() types.GomegaMatcher {
+	return &matchers.BeZeroMatcher{}
+}
+
+//ContainElement succeeds if actual contains the passed in element.
+//By default ContainElement() uses Equal() to perform the match, however a
+//matcher can be passed in instead:
+//    Ω([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar")))
+//
+//Actual must be an array, slice or map.
+//For maps, ContainElement searches through the map's values.
+func ContainElement(element interface{}) types.GomegaMatcher {
+	return &matchers.ContainElementMatcher{
+		Element: element,
+	}
+}
+
+//ConsistOf succeeds if actual contains preciely the elements passed into the matcher.  The ordering of the elements does not matter.
+//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead.  Here are some examples:
+//
+//    Ω([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo"))
+//    Ω([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo"))
+//    Ω([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo")))
+//
+//Actual must be an array, slice or map.  For maps, ConsistOf matches against the map's values.
+//
+//You typically pass variadic arguments to ConsistOf (as in the examples above).  However, if you need to pass in a slice you can provided that it
+//is the only element passed in to ConsistOf:
+//
+//    Ω([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"}))
+//
+//Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule.
+func ConsistOf(elements ...interface{}) types.GomegaMatcher {
+	return &matchers.ConsistOfMatcher{
+		Elements: elements,
+	}
+}
+
+//HaveKey succeeds if actual is a map with the passed in key.
+//By default HaveKey uses Equal() to perform the match, however a
+//matcher can be passed in instead:
+//    Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`)))
+func HaveKey(key interface{}) types.GomegaMatcher {
+	return &matchers.HaveKeyMatcher{
+		Key: key,
+	}
+}
+
+//HaveKeyWithValue succeeds if actual is a map with the passed in key and value.
+//By default HaveKeyWithValue uses Equal() to perform the match, however a
+//matcher can be passed in instead:
+//    Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar"))
+//    Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar"))
+func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher {
+	return &matchers.HaveKeyWithValueMatcher{
+		Key:   key,
+		Value: value,
+	}
+}
+
+//BeNumerically performs numerical assertions in a type-agnostic way.
+//Actual and expected should be numbers, though the specific type of
+//number is irrelevant (floa32, float64, uint8, etc...).
+//
+//There are six, self-explanatory, supported comparators:
+//    Ω(1.0).Should(BeNumerically("==", 1))
+//    Ω(1.0).Should(BeNumerically("~", 0.999, 0.01))
+//    Ω(1.0).Should(BeNumerically(">", 0.9))
+//    Ω(1.0).Should(BeNumerically(">=", 1.0))
+//    Ω(1.0).Should(BeNumerically("<", 3))
+//    Ω(1.0).Should(BeNumerically("<=", 1.0))
+func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher {
+	return &matchers.BeNumericallyMatcher{
+		Comparator: comparator,
+		CompareTo:  compareTo,
+	}
+}
+
+//BeTemporally compares time.Time's like BeNumerically
+//Actual and expected must be time.Time. The comparators are the same as for BeNumerically
+//    Ω(time.Now()).Should(BeTemporally(">", time.Time{}))
+//    Ω(time.Now()).Should(BeTemporally("~", time.Now(), time.Second))
+func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Duration) types.GomegaMatcher {
+	return &matchers.BeTemporallyMatcher{
+		Comparator: comparator,
+		CompareTo:  compareTo,
+		Threshold:  threshold,
+	}
+}
+
+//BeAssignableToTypeOf succeeds if actual is assignable to the type of expected.
+//It will return an error when one of the values is nil.
+//	  Ω(0).Should(BeAssignableToTypeOf(0))         // Same values
+//	  Ω(5).Should(BeAssignableToTypeOf(-1))        // different values same type
+//	  Ω("foo").Should(BeAssignableToTypeOf("bar")) // different values same type
+//    Ω(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{}))
+func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher {
+	return &matchers.AssignableToTypeOfMatcher{
+		Expected: expected,
+	}
+}
+
+//Panic succeeds if actual is a function that, when invoked, panics.
+//Actual must be a function that takes no arguments and returns no results.
+func Panic() types.GomegaMatcher {
+	return &matchers.PanicMatcher{}
+}
+
+//BeAnExistingFile succeeds if a file exists.
+//Actual must be a string representing the abs path to the file being checked.
+func BeAnExistingFile() types.GomegaMatcher {
+	return &matchers.BeAnExistingFileMatcher{}
+}
+
+//BeARegularFile succeeds iff a file exists and is a regular file.
+//Actual must be a string representing the abs path to the file being checked.
+func BeARegularFile() types.GomegaMatcher {
+	return &matchers.BeARegularFileMatcher{}
+}
+
+//BeADirectory succeeds iff a file exists and is a directory.
+//Actual must be a string representing the abs path to the file being checked.
+func BeADirectory() types.GomegaMatcher {
+	return &matchers.BeADirectoryMatcher{}
+}
+
+//And succeeds only if all of the given matchers succeed.
+//The matchers are tried in order, and will fail-fast if one doesn't succeed.
+//  Expect("hi").To(And(HaveLen(2), Equal("hi"))
+//
+//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
+func And(ms ...types.GomegaMatcher) types.GomegaMatcher {
+	return &matchers.AndMatcher{Matchers: ms}
+}
+
+//SatisfyAll is an alias for And().
+//  Ω("hi").Should(SatisfyAll(HaveLen(2), Equal("hi")))
+func SatisfyAll(matchers ...types.GomegaMatcher) types.GomegaMatcher {
+	return And(matchers...)
+}
+
+//Or succeeds if any of the given matchers succeed.
+//The matchers are tried in order and will return immediately upon the first successful match.
+//  Expect("hi").To(Or(HaveLen(3), HaveLen(2))
+//
+//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
+func Or(ms ...types.GomegaMatcher) types.GomegaMatcher {
+	return &matchers.OrMatcher{Matchers: ms}
+}
+
+//SatisfyAny is an alias for Or().
+//  Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2))
+func SatisfyAny(matchers ...types.GomegaMatcher) types.GomegaMatcher {
+	return Or(matchers...)
+}
+
+//Not negates the given matcher; it succeeds if the given matcher fails.
+//  Expect(1).To(Not(Equal(2))
+//
+//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
+func Not(matcher types.GomegaMatcher) types.GomegaMatcher {
+	return &matchers.NotMatcher{Matcher: matcher}
+}
+
+//WithTransform applies the `transform` to the actual value and matches it against `matcher`.
+//The given transform must be a function of one parameter that returns one value.
+//  var plus1 = func(i int) int { return i + 1 }
+//  Expect(1).To(WithTransform(plus1, Equal(2))
+//
+//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
+func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher {
+	return matchers.NewWithTransformMatcher(transform, matcher)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/and.go b/vendor/github.com/onsi/gomega/matchers/and.go
new file mode 100644
index 0000000000000000000000000000000000000000..94c42a7db713eb705a28379bbb55708243cd36bc
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/and.go
@@ -0,0 +1,64 @@
+package matchers
+
+import (
+	"fmt"
+
+	"github.com/onsi/gomega/format"
+	"github.com/onsi/gomega/internal/oraclematcher"
+	"github.com/onsi/gomega/types"
+)
+
+type AndMatcher struct {
+	Matchers []types.GomegaMatcher
+
+	// state
+	firstFailedMatcher types.GomegaMatcher
+}
+
+func (m *AndMatcher) Match(actual interface{}) (success bool, err error) {
+	m.firstFailedMatcher = nil
+	for _, matcher := range m.Matchers {
+		success, err := matcher.Match(actual)
+		if !success || err != nil {
+			m.firstFailedMatcher = matcher
+			return false, err
+		}
+	}
+	return true, nil
+}
+
+func (m *AndMatcher) FailureMessage(actual interface{}) (message string) {
+	return m.firstFailedMatcher.FailureMessage(actual)
+}
+
+func (m *AndMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	// not the most beautiful list of matchers, but not bad either...
+	return format.Message(actual, fmt.Sprintf("To not satisfy all of these matchers: %s", m.Matchers))
+}
+
+func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+	/*
+		Example with 3 matchers: A, B, C
+
+		Match evaluates them: T, F, <?>  => F
+		So match is currently F, what should MatchMayChangeInTheFuture() return?
+		Seems like it only depends on B, since currently B MUST change to allow the result to become T
+
+		Match eval: T, T, T  => T
+		So match is currently T, what should MatchMayChangeInTheFuture() return?
+		Seems to depend on ANY of them being able to change to F.
+	*/
+
+	if m.firstFailedMatcher == nil {
+		// so all matchers succeeded.. Any one of them changing would change the result.
+		for _, matcher := range m.Matchers {
+			if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) {
+				return true
+			}
+		}
+		return false // none of were going to change
+	} else {
+		// one of the matchers failed.. it must be able to change in order to affect the result
+		return oraclematcher.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual)
+	}
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..89a1fc2116b65663fff82b0877e206169862fecd
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
@@ -0,0 +1,31 @@
+package matchers
+
+import (
+	"fmt"
+	"reflect"
+
+	"github.com/onsi/gomega/format"
+)
+
+type AssignableToTypeOfMatcher struct {
+	Expected interface{}
+}
+
+func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success bool, err error) {
+	if actual == nil || matcher.Expected == nil {
+		return false, fmt.Errorf("Refusing to compare <nil> to <nil>.\nBe explicit and use BeNil() instead.  This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
+	}
+
+	actualType := reflect.TypeOf(actual)
+	expectedType := reflect.TypeOf(matcher.Expected)
+
+	return actualType.AssignableTo(expectedType), nil
+}
+
+func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual interface{}) string {
+	return format.Message(actual, fmt.Sprintf("to be assignable to the type: %T", matcher.Expected))
+}
+
+func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual interface{}) string {
+	return format.Message(actual, fmt.Sprintf("not to be assignable to the type: %T", matcher.Expected))
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go
new file mode 100644
index 0000000000000000000000000000000000000000..7b6975e41e6e94bd9eb5898b9d5dae4182f22e79
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go
@@ -0,0 +1,54 @@
+package matchers
+
+import (
+	"fmt"
+	"os"
+
+	"github.com/onsi/gomega/format"
+)
+
+type notADirectoryError struct {
+	os.FileInfo
+}
+
+func (t notADirectoryError) Error() string {
+	fileInfo := os.FileInfo(t)
+	switch {
+	case fileInfo.Mode().IsRegular():
+		return "file is a regular file"
+	default:
+		return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String())
+	}
+}
+
+type BeADirectoryMatcher struct {
+	expected interface{}
+	err      error
+}
+
+func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err error) {
+	actualFilename, ok := actual.(string)
+	if !ok {
+		return false, fmt.Errorf("BeADirectoryMatcher matcher expects a file path")
+	}
+
+	fileInfo, err := os.Stat(actualFilename)
+	if err != nil {
+		matcher.err = err
+		return false, nil
+	}
+
+	if !fileInfo.Mode().IsDir() {
+		matcher.err = notADirectoryError{fileInfo}
+		return false, nil
+	}
+	return true, nil
+}
+
+func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, fmt.Sprintf("to be a directory: %s", matcher.err))
+}
+
+func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, fmt.Sprintf("not be a directory"))
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
new file mode 100644
index 0000000000000000000000000000000000000000..e239131fb616db2d8f9231de3ae708734c269cda
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
@@ -0,0 +1,54 @@
+package matchers
+
+import (
+	"fmt"
+	"os"
+
+	"github.com/onsi/gomega/format"
+)
+
+type notARegularFileError struct {
+	os.FileInfo
+}
+
+func (t notARegularFileError) Error() string {
+	fileInfo := os.FileInfo(t)
+	switch {
+	case fileInfo.IsDir():
+		return "file is a directory"
+	default:
+		return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String())
+	}
+}
+
+type BeARegularFileMatcher struct {
+	expected interface{}
+	err      error
+}
+
+func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, err error) {
+	actualFilename, ok := actual.(string)
+	if !ok {
+		return false, fmt.Errorf("BeARegularFileMatcher matcher expects a file path")
+	}
+
+	fileInfo, err := os.Stat(actualFilename)
+	if err != nil {
+		matcher.err = err
+		return false, nil
+	}
+
+	if !fileInfo.Mode().IsRegular() {
+		matcher.err = notARegularFileError{fileInfo}
+		return false, nil
+	}
+	return true, nil
+}
+
+func (matcher *BeARegularFileMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, fmt.Sprintf("to be a regular file: %s", matcher.err))
+}
+
+func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, fmt.Sprintf("not be a regular file"))
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
new file mode 100644
index 0000000000000000000000000000000000000000..d42eba2234463f7f4114d3fa3b194485327f8e7b
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
@@ -0,0 +1,38 @@
+package matchers
+
+import (
+	"fmt"
+	"os"
+
+	"github.com/onsi/gomega/format"
+)
+
+type BeAnExistingFileMatcher struct {
+	expected interface{}
+}
+
+func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool, err error) {
+	actualFilename, ok := actual.(string)
+	if !ok {
+		return false, fmt.Errorf("BeAnExistingFileMatcher matcher expects a file path")
+	}
+
+	if _, err = os.Stat(actualFilename); err != nil {
+		switch {
+		case os.IsNotExist(err):
+			return false, nil
+		default:
+			return false, err
+		}
+	}
+
+	return true, nil
+}
+
+func (matcher *BeAnExistingFileMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, fmt.Sprintf("to exist"))
+}
+
+func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, fmt.Sprintf("not to exist"))
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..c1b499597d34fd326784aa23b1e82f066f5a116f
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
@@ -0,0 +1,45 @@
+package matchers
+
+import (
+	"fmt"
+	"github.com/onsi/gomega/format"
+	"reflect"
+)
+
+type BeClosedMatcher struct {
+}
+
+func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err error) {
+	if !isChan(actual) {
+		return false, fmt.Errorf("BeClosed matcher expects a channel.  Got:\n%s", format.Object(actual, 1))
+	}
+
+	channelType := reflect.TypeOf(actual)
+	channelValue := reflect.ValueOf(actual)
+
+	if channelType.ChanDir() == reflect.SendDir {
+		return false, fmt.Errorf("BeClosed matcher cannot determine if a send-only channel is closed or open.  Got:\n%s", format.Object(actual, 1))
+	}
+
+	winnerIndex, _, open := reflect.Select([]reflect.SelectCase{
+		reflect.SelectCase{Dir: reflect.SelectRecv, Chan: channelValue},
+		reflect.SelectCase{Dir: reflect.SelectDefault},
+	})
+
+	var closed bool
+	if winnerIndex == 0 {
+		closed = !open
+	} else if winnerIndex == 1 {
+		closed = false
+	}
+
+	return closed, nil
+}
+
+func (matcher *BeClosedMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "to be closed")
+}
+
+func (matcher *BeClosedMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "to be open")
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..55bdd7d15dfe17accc52ea3e9770e037e4dc1b0a
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
@@ -0,0 +1,26 @@
+package matchers
+
+import (
+	"fmt"
+	"github.com/onsi/gomega/format"
+)
+
+type BeEmptyMatcher struct {
+}
+
+func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) {
+	length, ok := lengthOf(actual)
+	if !ok {
+		return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice.  Got:\n%s", format.Object(actual, 1))
+	}
+
+	return length == 0, nil
+}
+
+func (matcher *BeEmptyMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "to be empty")
+}
+
+func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "not to be empty")
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..32a0c3108a48f35bfb25496a76b218875e6b5201
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
@@ -0,0 +1,33 @@
+package matchers
+
+import (
+	"fmt"
+	"github.com/onsi/gomega/format"
+	"reflect"
+)
+
+type BeEquivalentToMatcher struct {
+	Expected interface{}
+}
+
+func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, err error) {
+	if actual == nil && matcher.Expected == nil {
+		return false, fmt.Errorf("Both actual and expected must not be nil.")
+	}
+
+	convertedActual := actual
+
+	if actual != nil && matcher.Expected != nil && reflect.TypeOf(actual).ConvertibleTo(reflect.TypeOf(matcher.Expected)) {
+		convertedActual = reflect.ValueOf(actual).Convert(reflect.TypeOf(matcher.Expected)).Interface()
+	}
+
+	return reflect.DeepEqual(convertedActual, matcher.Expected), nil
+}
+
+func (matcher *BeEquivalentToMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "to be equivalent to", matcher.Expected)
+}
+
+func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "not to be equivalent to", matcher.Expected)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..0b224cbbc6451ea3766d585783f1066de10a7e7e
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
@@ -0,0 +1,25 @@
+package matchers
+
+import (
+	"fmt"
+	"github.com/onsi/gomega/format"
+)
+
+type BeFalseMatcher struct {
+}
+
+func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) {
+	if !isBool(actual) {
+		return false, fmt.Errorf("Expected a boolean.  Got:\n%s", format.Object(actual, 1))
+	}
+
+	return actual == false, nil
+}
+
+func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "to be false")
+}
+
+func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "not to be false")
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..7ee84fe1bcf1fcae1ba57b5e6c00211c67cf90b8
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go
@@ -0,0 +1,18 @@
+package matchers
+
+import "github.com/onsi/gomega/format"
+
+type BeNilMatcher struct {
+}
+
+func (matcher *BeNilMatcher) Match(actual interface{}) (success bool, err error) {
+	return isNil(actual), nil
+}
+
+func (matcher *BeNilMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "to be nil")
+}
+
+func (matcher *BeNilMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "not to be nil")
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..52f83fe3f3910cd97265f6d81e80a21a45faa9ab
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
@@ -0,0 +1,119 @@
+package matchers
+
+import (
+	"fmt"
+	"github.com/onsi/gomega/format"
+	"math"
+)
+
+type BeNumericallyMatcher struct {
+	Comparator string
+	CompareTo  []interface{}
+}
+
+func (matcher *BeNumericallyMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo[0])
+}
+
+func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo[0])
+}
+
+func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, err error) {
+	if len(matcher.CompareTo) == 0 || len(matcher.CompareTo) > 2 {
+		return false, fmt.Errorf("BeNumerically requires 1 or 2 CompareTo arguments.  Got:\n%s", format.Object(matcher.CompareTo, 1))
+	}
+	if !isNumber(actual) {
+		return false, fmt.Errorf("Expected a number.  Got:\n%s", format.Object(actual, 1))
+	}
+	if !isNumber(matcher.CompareTo[0]) {
+		return false, fmt.Errorf("Expected a number.  Got:\n%s", format.Object(matcher.CompareTo[0], 1))
+	}
+	if len(matcher.CompareTo) == 2 && !isNumber(matcher.CompareTo[1]) {
+		return false, fmt.Errorf("Expected a number.  Got:\n%s", format.Object(matcher.CompareTo[0], 1))
+	}
+
+	switch matcher.Comparator {
+	case "==", "~", ">", ">=", "<", "<=":
+	default:
+		return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator)
+	}
+
+	if isFloat(actual) || isFloat(matcher.CompareTo[0]) {
+		var secondOperand float64 = 1e-8
+		if len(matcher.CompareTo) == 2 {
+			secondOperand = toFloat(matcher.CompareTo[1])
+		}
+		success = matcher.matchFloats(toFloat(actual), toFloat(matcher.CompareTo[0]), secondOperand)
+	} else if isInteger(actual) {
+		var secondOperand int64 = 0
+		if len(matcher.CompareTo) == 2 {
+			secondOperand = toInteger(matcher.CompareTo[1])
+		}
+		success = matcher.matchIntegers(toInteger(actual), toInteger(matcher.CompareTo[0]), secondOperand)
+	} else if isUnsignedInteger(actual) {
+		var secondOperand uint64 = 0
+		if len(matcher.CompareTo) == 2 {
+			secondOperand = toUnsignedInteger(matcher.CompareTo[1])
+		}
+		success = matcher.matchUnsignedIntegers(toUnsignedInteger(actual), toUnsignedInteger(matcher.CompareTo[0]), secondOperand)
+	} else {
+		return false, fmt.Errorf("Failed to compare:\n%s\n%s:\n%s", format.Object(actual, 1), matcher.Comparator, format.Object(matcher.CompareTo[0], 1))
+	}
+
+	return success, nil
+}
+
+func (matcher *BeNumericallyMatcher) matchIntegers(actual, compareTo, threshold int64) (success bool) {
+	switch matcher.Comparator {
+	case "==", "~":
+		diff := actual - compareTo
+		return -threshold <= diff && diff <= threshold
+	case ">":
+		return (actual > compareTo)
+	case ">=":
+		return (actual >= compareTo)
+	case "<":
+		return (actual < compareTo)
+	case "<=":
+		return (actual <= compareTo)
+	}
+	return false
+}
+
+func (matcher *BeNumericallyMatcher) matchUnsignedIntegers(actual, compareTo, threshold uint64) (success bool) {
+	switch matcher.Comparator {
+	case "==", "~":
+		if actual < compareTo {
+			actual, compareTo = compareTo, actual
+		}
+		return actual-compareTo <= threshold
+	case ">":
+		return (actual > compareTo)
+	case ">=":
+		return (actual >= compareTo)
+	case "<":
+		return (actual < compareTo)
+	case "<=":
+		return (actual <= compareTo)
+	}
+	return false
+}
+
+func (matcher *BeNumericallyMatcher) matchFloats(actual, compareTo, threshold float64) (success bool) {
+	switch matcher.Comparator {
+	case "~":
+		return math.Abs(actual-compareTo) <= threshold
+	case "==":
+		return (actual == compareTo)
+	case ">":
+		return (actual > compareTo)
+	case ">=":
+		return (actual >= compareTo)
+	case "<":
+		return (actual < compareTo)
+	case "<=":
+		return (actual <= compareTo)
+	}
+	return false
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..d7c32233ec429fc4c5de37187efbac49c19b8d0c
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go
@@ -0,0 +1,71 @@
+package matchers
+
+import (
+	"fmt"
+	"reflect"
+
+	"github.com/onsi/gomega/format"
+)
+
+type BeSentMatcher struct {
+	Arg           interface{}
+	channelClosed bool
+}
+
+func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error) {
+	if !isChan(actual) {
+		return false, fmt.Errorf("BeSent expects a channel.  Got:\n%s", format.Object(actual, 1))
+	}
+
+	channelType := reflect.TypeOf(actual)
+	channelValue := reflect.ValueOf(actual)
+
+	if channelType.ChanDir() == reflect.RecvDir {
+		return false, fmt.Errorf("BeSent matcher cannot be passed a receive-only channel.  Got:\n%s", format.Object(actual, 1))
+	}
+
+	argType := reflect.TypeOf(matcher.Arg)
+	assignable := argType.AssignableTo(channelType.Elem())
+
+	if !assignable {
+		return false, fmt.Errorf("Cannot pass:\n%s to the channel:\n%s\nThe types don't match.", format.Object(matcher.Arg, 1), format.Object(actual, 1))
+	}
+
+	argValue := reflect.ValueOf(matcher.Arg)
+
+	defer func() {
+		if e := recover(); e != nil {
+			success = false
+			err = fmt.Errorf("Cannot send to a closed channel")
+			matcher.channelClosed = true
+		}
+	}()
+
+	winnerIndex, _, _ := reflect.Select([]reflect.SelectCase{
+		reflect.SelectCase{Dir: reflect.SelectSend, Chan: channelValue, Send: argValue},
+		reflect.SelectCase{Dir: reflect.SelectDefault},
+	})
+
+	var didSend bool
+	if winnerIndex == 0 {
+		didSend = true
+	}
+
+	return didSend, nil
+}
+
+func (matcher *BeSentMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "to send:", matcher.Arg)
+}
+
+func (matcher *BeSentMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "not to send:", matcher.Arg)
+}
+
+func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+	if !isChan(actual) {
+		return false
+	}
+
+	return !matcher.channelClosed
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..abda4eb1e7b6968a482546dc759e1568f4d7ea7f
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
@@ -0,0 +1,65 @@
+package matchers
+
+import (
+	"fmt"
+	"github.com/onsi/gomega/format"
+	"time"
+)
+
+type BeTemporallyMatcher struct {
+	Comparator string
+	CompareTo  time.Time
+	Threshold  []time.Duration
+}
+
+func (matcher *BeTemporallyMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo)
+}
+
+func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo)
+}
+
+func (matcher *BeTemporallyMatcher) Match(actual interface{}) (bool, error) {
+	// predicate to test for time.Time type
+	isTime := func(t interface{}) bool {
+		_, ok := t.(time.Time)
+		return ok
+	}
+
+	if !isTime(actual) {
+		return false, fmt.Errorf("Expected a time.Time.  Got:\n%s", format.Object(actual, 1))
+	}
+
+	switch matcher.Comparator {
+	case "==", "~", ">", ">=", "<", "<=":
+	default:
+		return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator)
+	}
+
+	var threshold = time.Millisecond
+	if len(matcher.Threshold) == 1 {
+		threshold = matcher.Threshold[0]
+	}
+
+	return matcher.matchTimes(actual.(time.Time), matcher.CompareTo, threshold), nil
+}
+
+func (matcher *BeTemporallyMatcher) matchTimes(actual, compareTo time.Time, threshold time.Duration) (success bool) {
+	switch matcher.Comparator {
+	case "==":
+		return actual.Equal(compareTo)
+	case "~":
+		diff := actual.Sub(compareTo)
+		return -threshold <= diff && diff <= threshold
+	case ">":
+		return actual.After(compareTo)
+	case ">=":
+		return !actual.Before(compareTo)
+	case "<":
+		return actual.Before(compareTo)
+	case "<=":
+		return !actual.After(compareTo)
+	}
+	return false
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..1275e5fc9d804b01ede59dba0f28cc124cb977d5
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
@@ -0,0 +1,25 @@
+package matchers
+
+import (
+	"fmt"
+	"github.com/onsi/gomega/format"
+)
+
+type BeTrueMatcher struct {
+}
+
+func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) {
+	if !isBool(actual) {
+		return false, fmt.Errorf("Expected a boolean.  Got:\n%s", format.Object(actual, 1))
+	}
+
+	return actual.(bool), nil
+}
+
+func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "to be true")
+}
+
+func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "not to be true")
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..b39c9144be7d75eae3c975a0d39a1379408af286
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go
@@ -0,0 +1,27 @@
+package matchers
+
+import (
+	"github.com/onsi/gomega/format"
+	"reflect"
+)
+
+type BeZeroMatcher struct {
+}
+
+func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error) {
+	if actual == nil {
+		return true, nil
+	}
+	zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface()
+
+	return reflect.DeepEqual(zeroValue, actual), nil
+
+}
+
+func (matcher *BeZeroMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "to be zero-valued")
+}
+
+func (matcher *BeZeroMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "not to be zero-valued")
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go
new file mode 100644
index 0000000000000000000000000000000000000000..7b0e0886842a16da02bb1f332c3f4abee93beed4
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go
@@ -0,0 +1,80 @@
+package matchers
+
+import (
+	"fmt"
+	"reflect"
+
+	"github.com/onsi/gomega/format"
+	"github.com/onsi/gomega/matchers/support/goraph/bipartitegraph"
+)
+
+type ConsistOfMatcher struct {
+	Elements []interface{}
+}
+
+func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) {
+	if !isArrayOrSlice(actual) && !isMap(actual) {
+		return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map.  Got:\n%s", format.Object(actual, 1))
+	}
+
+	elements := matcher.Elements
+	if len(matcher.Elements) == 1 && isArrayOrSlice(matcher.Elements[0]) {
+		elements = []interface{}{}
+		value := reflect.ValueOf(matcher.Elements[0])
+		for i := 0; i < value.Len(); i++ {
+			elements = append(elements, value.Index(i).Interface())
+		}
+	}
+
+	matchers := []interface{}{}
+	for _, element := range elements {
+		matcher, isMatcher := element.(omegaMatcher)
+		if !isMatcher {
+			matcher = &EqualMatcher{Expected: element}
+		}
+		matchers = append(matchers, matcher)
+	}
+
+	values := matcher.valuesOf(actual)
+
+	if len(values) != len(matchers) {
+		return false, nil
+	}
+
+	neighbours := func(v, m interface{}) (bool, error) {
+		match, err := m.(omegaMatcher).Match(v)
+		return match && err == nil, nil
+	}
+
+	bipartiteGraph, err := bipartitegraph.NewBipartiteGraph(values, matchers, neighbours)
+	if err != nil {
+		return false, err
+	}
+
+	return len(bipartiteGraph.LargestMatching()) == len(values), nil
+}
+
+func (matcher *ConsistOfMatcher) valuesOf(actual interface{}) []interface{} {
+	value := reflect.ValueOf(actual)
+	values := []interface{}{}
+	if isMap(actual) {
+		keys := value.MapKeys()
+		for i := 0; i < value.Len(); i++ {
+			values = append(values, value.MapIndex(keys[i]).Interface())
+		}
+	} else {
+		for i := 0; i < value.Len(); i++ {
+			values = append(values, value.Index(i).Interface())
+		}
+	}
+
+	return values
+}
+
+func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "to consist of", matcher.Elements)
+}
+
+func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "not to consist of", matcher.Elements)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..4159335d0d5aecb8c2efc27b6bee1b2ef5f2a692
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
@@ -0,0 +1,56 @@
+package matchers
+
+import (
+	"fmt"
+	"reflect"
+
+	"github.com/onsi/gomega/format"
+)
+
+type ContainElementMatcher struct {
+	Element interface{}
+}
+
+func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) {
+	if !isArrayOrSlice(actual) && !isMap(actual) {
+		return false, fmt.Errorf("ContainElement matcher expects an array/slice/map.  Got:\n%s", format.Object(actual, 1))
+	}
+
+	elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher)
+	if !elementIsMatcher {
+		elemMatcher = &EqualMatcher{Expected: matcher.Element}
+	}
+
+	value := reflect.ValueOf(actual)
+	var keys []reflect.Value
+	if isMap(actual) {
+		keys = value.MapKeys()
+	}
+	var lastError error
+	for i := 0; i < value.Len(); i++ {
+		var success bool
+		var err error
+		if isMap(actual) {
+			success, err = elemMatcher.Match(value.MapIndex(keys[i]).Interface())
+		} else {
+			success, err = elemMatcher.Match(value.Index(i).Interface())
+		}
+		if err != nil {
+			lastError = err
+			continue
+		}
+		if success {
+			return true, nil
+		}
+	}
+
+	return false, lastError
+}
+
+func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "to contain element matching", matcher.Element)
+}
+
+func (matcher *ContainElementMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "not to contain element matching", matcher.Element)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..2e7608921ac4f05eafffcf7d83c1d232bb5f4621
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go
@@ -0,0 +1,37 @@
+package matchers
+
+import (
+	"fmt"
+	"github.com/onsi/gomega/format"
+	"strings"
+)
+
+type ContainSubstringMatcher struct {
+	Substr string
+	Args   []interface{}
+}
+
+func (matcher *ContainSubstringMatcher) Match(actual interface{}) (success bool, err error) {
+	actualString, ok := toString(actual)
+	if !ok {
+		return false, fmt.Errorf("ContainSubstring matcher requires a string or stringer.  Got:\n%s", format.Object(actual, 1))
+	}
+
+	return strings.Contains(actualString, matcher.stringToMatch()), nil
+}
+
+func (matcher *ContainSubstringMatcher) stringToMatch() string {
+	stringToMatch := matcher.Substr
+	if len(matcher.Args) > 0 {
+		stringToMatch = fmt.Sprintf(matcher.Substr, matcher.Args...)
+	}
+	return stringToMatch
+}
+
+func (matcher *ContainSubstringMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "to contain substring", matcher.stringToMatch())
+}
+
+func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "not to contain substring", matcher.stringToMatch())
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/equal_matcher.go b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..d186597379b908a8070d9270a10e3e51dfa94d18
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go
@@ -0,0 +1,27 @@
+package matchers
+
+import (
+	"fmt"
+	"reflect"
+
+	"github.com/onsi/gomega/format"
+)
+
+type EqualMatcher struct {
+	Expected interface{}
+}
+
+func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error) {
+	if actual == nil && matcher.Expected == nil {
+		return false, fmt.Errorf("Refusing to compare <nil> to <nil>.\nBe explicit and use BeNil() instead.  This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
+	}
+	return reflect.DeepEqual(actual, matcher.Expected), nil
+}
+
+func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "to equal", matcher.Expected)
+}
+
+func (matcher *EqualMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "not to equal", matcher.Expected)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..5701ba6e24cfae830fea929e543a2b49e0037efb
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
@@ -0,0 +1,53 @@
+package matchers
+
+import (
+	"fmt"
+	"github.com/onsi/gomega/format"
+	"reflect"
+)
+
+type HaveKeyMatcher struct {
+	Key interface{}
+}
+
+func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) {
+	if !isMap(actual) {
+		return false, fmt.Errorf("HaveKey matcher expects a map.  Got:%s", format.Object(actual, 1))
+	}
+
+	keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher)
+	if !keyIsMatcher {
+		keyMatcher = &EqualMatcher{Expected: matcher.Key}
+	}
+
+	keys := reflect.ValueOf(actual).MapKeys()
+	for i := 0; i < len(keys); i++ {
+		success, err := keyMatcher.Match(keys[i].Interface())
+		if err != nil {
+			return false, fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error())
+		}
+		if success {
+			return true, nil
+		}
+	}
+
+	return false, nil
+}
+
+func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message string) {
+	switch matcher.Key.(type) {
+	case omegaMatcher:
+		return format.Message(actual, "to have key matching", matcher.Key)
+	default:
+		return format.Message(actual, "to have key", matcher.Key)
+	}
+}
+
+func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	switch matcher.Key.(type) {
+	case omegaMatcher:
+		return format.Message(actual, "not to have key matching", matcher.Key)
+	default:
+		return format.Message(actual, "not to have key", matcher.Key)
+	}
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..464ac187e9084a107e0512854adb503903294445
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
@@ -0,0 +1,73 @@
+package matchers
+
+import (
+	"fmt"
+	"github.com/onsi/gomega/format"
+	"reflect"
+)
+
+type HaveKeyWithValueMatcher struct {
+	Key   interface{}
+	Value interface{}
+}
+
+func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) {
+	if !isMap(actual) {
+		return false, fmt.Errorf("HaveKeyWithValue matcher expects a map.  Got:%s", format.Object(actual, 1))
+	}
+
+	keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher)
+	if !keyIsMatcher {
+		keyMatcher = &EqualMatcher{Expected: matcher.Key}
+	}
+
+	valueMatcher, valueIsMatcher := matcher.Value.(omegaMatcher)
+	if !valueIsMatcher {
+		valueMatcher = &EqualMatcher{Expected: matcher.Value}
+	}
+
+	keys := reflect.ValueOf(actual).MapKeys()
+	for i := 0; i < len(keys); i++ {
+		success, err := keyMatcher.Match(keys[i].Interface())
+		if err != nil {
+			return false, fmt.Errorf("HaveKeyWithValue's key matcher failed with:\n%s%s", format.Indent, err.Error())
+		}
+		if success {
+			actualValue := reflect.ValueOf(actual).MapIndex(keys[i])
+			success, err := valueMatcher.Match(actualValue.Interface())
+			if err != nil {
+				return false, fmt.Errorf("HaveKeyWithValue's value matcher failed with:\n%s%s", format.Indent, err.Error())
+			}
+			return success, nil
+		}
+	}
+
+	return false, nil
+}
+
+func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (message string) {
+	str := "to have {key: value}"
+	if _, ok := matcher.Key.(omegaMatcher); ok {
+		str += " matching"
+	} else if _, ok := matcher.Value.(omegaMatcher); ok {
+		str += " matching"
+	}
+
+	expect := make(map[interface{}]interface{}, 1)
+	expect[matcher.Key] = matcher.Value
+	return format.Message(actual, str, expect)
+}
+
+func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	kStr := "not to have key"
+	if _, ok := matcher.Key.(omegaMatcher); ok {
+		kStr = "not to have key matching"
+	}
+
+	vStr := "or that key's value not be"
+	if _, ok := matcher.Value.(omegaMatcher); ok {
+		vStr = "or to have that key's value not matching"
+	}
+
+	return format.Message(actual, kStr, matcher.Key, vStr, matcher.Value)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..a1837755701ba909aace0494f52389498247cc46
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
@@ -0,0 +1,27 @@
+package matchers
+
+import (
+	"fmt"
+	"github.com/onsi/gomega/format"
+)
+
+type HaveLenMatcher struct {
+	Count int
+}
+
+func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) {
+	length, ok := lengthOf(actual)
+	if !ok {
+		return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice.  Got:\n%s", format.Object(actual, 1))
+	}
+
+	return length == matcher.Count, nil
+}
+
+func (matcher *HaveLenMatcher) FailureMessage(actual interface{}) (message string) {
+	return fmt.Sprintf("Expected\n%s\nto have length %d", format.Object(actual, 1), matcher.Count)
+}
+
+func (matcher *HaveLenMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return fmt.Sprintf("Expected\n%s\nnot to have length %d", format.Object(actual, 1), matcher.Count)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..ebdd71786d87a0a7d86462d386a84db75f0f1868
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
@@ -0,0 +1,33 @@
+package matchers
+
+import (
+	"fmt"
+
+	"github.com/onsi/gomega/format"
+)
+
+type HaveOccurredMatcher struct {
+}
+
+func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err error) {
+	// is purely nil?
+	if actual == nil {
+		return false, nil
+	}
+
+	// must be an 'error' type
+	if !isError(actual) {
+		return false, fmt.Errorf("Expected an error-type.  Got:\n%s", format.Object(actual, 1))
+	}
+
+	// must be non-nil (or a pointer to a non-nil)
+	return !isNil(actual), nil
+}
+
+func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message string) {
+	return fmt.Sprintf("Expected an error to have occurred.  Got:\n%s", format.Object(actual, 1))
+}
+
+func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return fmt.Sprintf("Expected error:\n%s\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1), "not to have occurred")
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..8b63a89997b36f6a1326328f5509e6252b36a3d6
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go
@@ -0,0 +1,35 @@
+package matchers
+
+import (
+	"fmt"
+	"github.com/onsi/gomega/format"
+)
+
+type HavePrefixMatcher struct {
+	Prefix string
+	Args   []interface{}
+}
+
+func (matcher *HavePrefixMatcher) Match(actual interface{}) (success bool, err error) {
+	actualString, ok := toString(actual)
+	if !ok {
+		return false, fmt.Errorf("HavePrefix matcher requires a string or stringer.  Got:\n%s", format.Object(actual, 1))
+	}
+	prefix := matcher.prefix()
+	return len(actualString) >= len(prefix) && actualString[0:len(prefix)] == prefix, nil
+}
+
+func (matcher *HavePrefixMatcher) prefix() string {
+	if len(matcher.Args) > 0 {
+		return fmt.Sprintf(matcher.Prefix, matcher.Args...)
+	}
+	return matcher.Prefix
+}
+
+func (matcher *HavePrefixMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "to have prefix", matcher.prefix())
+}
+
+func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "not to have prefix", matcher.prefix())
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..afc78fc901902b3de307c4373aeb0527914f5d75
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go
@@ -0,0 +1,35 @@
+package matchers
+
+import (
+	"fmt"
+	"github.com/onsi/gomega/format"
+)
+
+type HaveSuffixMatcher struct {
+	Suffix string
+	Args   []interface{}
+}
+
+func (matcher *HaveSuffixMatcher) Match(actual interface{}) (success bool, err error) {
+	actualString, ok := toString(actual)
+	if !ok {
+		return false, fmt.Errorf("HaveSuffix matcher requires a string or stringer.  Got:\n%s", format.Object(actual, 1))
+	}
+	suffix := matcher.suffix()
+	return len(actualString) >= len(suffix) && actualString[len(actualString)-len(suffix):] == suffix, nil
+}
+
+func (matcher *HaveSuffixMatcher) suffix() string {
+	if len(matcher.Args) > 0 {
+		return fmt.Sprintf(matcher.Suffix, matcher.Args...)
+	}
+	return matcher.Suffix
+}
+
+func (matcher *HaveSuffixMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "to have suffix", matcher.suffix())
+}
+
+func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "not to have suffix", matcher.suffix())
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..03cdf04588860ed5938be7396dba28632965bc2f
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
@@ -0,0 +1,50 @@
+package matchers
+
+import (
+	"fmt"
+	"github.com/onsi/gomega/format"
+	"reflect"
+)
+
+type MatchErrorMatcher struct {
+	Expected interface{}
+}
+
+func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err error) {
+	if isNil(actual) {
+		return false, fmt.Errorf("Expected an error, got nil")
+	}
+
+	if !isError(actual) {
+		return false, fmt.Errorf("Expected an error.  Got:\n%s", format.Object(actual, 1))
+	}
+
+	actualErr := actual.(error)
+
+	if isString(matcher.Expected) {
+		return reflect.DeepEqual(actualErr.Error(), matcher.Expected), nil
+	}
+
+	if isError(matcher.Expected) {
+		return reflect.DeepEqual(actualErr, matcher.Expected), nil
+	}
+
+	var subMatcher omegaMatcher
+	var hasSubMatcher bool
+	if matcher.Expected != nil {
+		subMatcher, hasSubMatcher = (matcher.Expected).(omegaMatcher)
+		if hasSubMatcher {
+			return subMatcher.Match(actualErr.Error())
+		}
+	}
+
+	return false, fmt.Errorf("MatchError must be passed an error, string, or Matcher that can match on strings.  Got:\n%s", format.Object(matcher.Expected, 1))
+}
+
+func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "to match error", matcher.Expected)
+}
+
+func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "not to match error", matcher.Expected)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..efc5e154592838f0adb9fcade415a13361bfe9c8
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go
@@ -0,0 +1,61 @@
+package matchers
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"github.com/onsi/gomega/format"
+	"reflect"
+)
+
+type MatchJSONMatcher struct {
+	JSONToMatch interface{}
+}
+
+func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err error) {
+	actualString, expectedString, err := matcher.prettyPrint(actual)
+	if err != nil {
+		return false, err
+	}
+
+	var aval interface{}
+	var eval interface{}
+
+	// this is guarded by prettyPrint
+	json.Unmarshal([]byte(actualString), &aval)
+	json.Unmarshal([]byte(expectedString), &eval)
+
+	return reflect.DeepEqual(aval, eval), nil
+}
+
+func (matcher *MatchJSONMatcher) FailureMessage(actual interface{}) (message string) {
+	actualString, expectedString, _ := matcher.prettyPrint(actual)
+	return format.Message(actualString, "to match JSON of", expectedString)
+}
+
+func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	actualString, expectedString, _ := matcher.prettyPrint(actual)
+	return format.Message(actualString, "not to match JSON of", expectedString)
+}
+
+func (matcher *MatchJSONMatcher) prettyPrint(actual interface{}) (actualFormatted, expectedFormatted string, err error) {
+	actualString, aok := toString(actual)
+	expectedString, eok := toString(matcher.JSONToMatch)
+
+	if !(aok && eok) {
+		return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string or stringer.  Got:\n%s", format.Object(actual, 1))
+	}
+
+	abuf := new(bytes.Buffer)
+	ebuf := new(bytes.Buffer)
+
+	if err := json.Indent(abuf, []byte(actualString), "", "  "); err != nil {
+		return "", "", err
+	}
+
+	if err := json.Indent(ebuf, []byte(expectedString), "", "  "); err != nil {
+		return "", "", err
+	}
+
+	return abuf.String(), ebuf.String(), nil
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..7ca79a15be247893f6e5215a8d4bc86a12e7f902
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go
@@ -0,0 +1,42 @@
+package matchers
+
+import (
+	"fmt"
+	"github.com/onsi/gomega/format"
+	"regexp"
+)
+
+type MatchRegexpMatcher struct {
+	Regexp string
+	Args   []interface{}
+}
+
+func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err error) {
+	actualString, ok := toString(actual)
+	if !ok {
+		return false, fmt.Errorf("RegExp matcher requires a string or stringer.\nGot:%s", format.Object(actual, 1))
+	}
+
+	match, err := regexp.Match(matcher.regexp(), []byte(actualString))
+	if err != nil {
+		return false, fmt.Errorf("RegExp match failed to compile with error:\n\t%s", err.Error())
+	}
+
+	return match, nil
+}
+
+func (matcher *MatchRegexpMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "to match regular expression", matcher.regexp())
+}
+
+func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "not to match regular expression", matcher.regexp())
+}
+
+func (matcher *MatchRegexpMatcher) regexp() string {
+	re := matcher.Regexp
+	if len(matcher.Args) > 0 {
+		re = fmt.Sprintf(matcher.Regexp, matcher.Args...)
+	}
+	return re
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/not.go b/vendor/github.com/onsi/gomega/matchers/not.go
new file mode 100644
index 0000000000000000000000000000000000000000..2c91670bd9bbc6191fc9754a15f6d76e7902f9fe
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/not.go
@@ -0,0 +1,30 @@
+package matchers
+
+import (
+	"github.com/onsi/gomega/internal/oraclematcher"
+	"github.com/onsi/gomega/types"
+)
+
+type NotMatcher struct {
+	Matcher types.GomegaMatcher
+}
+
+func (m *NotMatcher) Match(actual interface{}) (bool, error) {
+	success, err := m.Matcher.Match(actual)
+	if err != nil {
+		return false, err
+	}
+	return !success, nil
+}
+
+func (m *NotMatcher) FailureMessage(actual interface{}) (message string) {
+	return m.Matcher.NegatedFailureMessage(actual) // works beautifully
+}
+
+func (m *NotMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return m.Matcher.FailureMessage(actual) // works beautifully
+}
+
+func (m *NotMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+	return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/or.go b/vendor/github.com/onsi/gomega/matchers/or.go
new file mode 100644
index 0000000000000000000000000000000000000000..3bf7998001d5e56f525782acd6c4ee9965fca877
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/or.go
@@ -0,0 +1,67 @@
+package matchers
+
+import (
+	"fmt"
+
+	"github.com/onsi/gomega/format"
+	"github.com/onsi/gomega/internal/oraclematcher"
+	"github.com/onsi/gomega/types"
+)
+
+type OrMatcher struct {
+	Matchers []types.GomegaMatcher
+
+	// state
+	firstSuccessfulMatcher types.GomegaMatcher
+}
+
+func (m *OrMatcher) Match(actual interface{}) (success bool, err error) {
+	m.firstSuccessfulMatcher = nil
+	for _, matcher := range m.Matchers {
+		success, err := matcher.Match(actual)
+		if err != nil {
+			return false, err
+		}
+		if success {
+			m.firstSuccessfulMatcher = matcher
+			return true, nil
+		}
+	}
+	return false, nil
+}
+
+func (m *OrMatcher) FailureMessage(actual interface{}) (message string) {
+	// not the most beautiful list of matchers, but not bad either...
+	return format.Message(actual, fmt.Sprintf("To satisfy at least one of these matchers: %s", m.Matchers))
+}
+
+func (m *OrMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return m.firstSuccessfulMatcher.NegatedFailureMessage(actual)
+}
+
+func (m *OrMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+	/*
+		Example with 3 matchers: A, B, C
+
+		Match evaluates them: F, T, <?>  => T
+		So match is currently T, what should MatchMayChangeInTheFuture() return?
+		Seems like it only depends on B, since currently B MUST change to allow the result to become F
+
+		Match eval: F, F, F  => F
+		So match is currently F, what should MatchMayChangeInTheFuture() return?
+		Seems to depend on ANY of them being able to change to T.
+	*/
+
+	if m.firstSuccessfulMatcher != nil {
+		// one of the matchers succeeded.. it must be able to change in order to affect the result
+		return oraclematcher.MatchMayChangeInTheFuture(m.firstSuccessfulMatcher, actual)
+	} else {
+		// so all matchers failed.. Any one of them changing would change the result.
+		for _, matcher := range m.Matchers {
+			if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) {
+				return true
+			}
+		}
+		return false // none of were going to change
+	}
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/panic_matcher.go b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..75ab251bce94b8cf6bb0e88b3195cb8fda250eda
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go
@@ -0,0 +1,42 @@
+package matchers
+
+import (
+	"fmt"
+	"github.com/onsi/gomega/format"
+	"reflect"
+)
+
+type PanicMatcher struct{}
+
+func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) {
+	if actual == nil {
+		return false, fmt.Errorf("PanicMatcher expects a non-nil actual.")
+	}
+
+	actualType := reflect.TypeOf(actual)
+	if actualType.Kind() != reflect.Func {
+		return false, fmt.Errorf("PanicMatcher expects a function.  Got:\n%s", format.Object(actual, 1))
+	}
+	if !(actualType.NumIn() == 0 && actualType.NumOut() == 0) {
+		return false, fmt.Errorf("PanicMatcher expects a function with no arguments and no return value.  Got:\n%s", format.Object(actual, 1))
+	}
+
+	success = false
+	defer func() {
+		if e := recover(); e != nil {
+			success = true
+		}
+	}()
+
+	reflect.ValueOf(actual).Call([]reflect.Value{})
+
+	return
+}
+
+func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "to panic")
+}
+
+func (matcher *PanicMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return format.Message(actual, "not to panic")
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..7a8c2cda519215feb978997720c35a2ef57d8de8
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
@@ -0,0 +1,126 @@
+package matchers
+
+import (
+	"fmt"
+	"reflect"
+
+	"github.com/onsi/gomega/format"
+)
+
+type ReceiveMatcher struct {
+	Arg           interface{}
+	receivedValue reflect.Value
+	channelClosed bool
+}
+
+func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err error) {
+	if !isChan(actual) {
+		return false, fmt.Errorf("ReceiveMatcher expects a channel.  Got:\n%s", format.Object(actual, 1))
+	}
+
+	channelType := reflect.TypeOf(actual)
+	channelValue := reflect.ValueOf(actual)
+
+	if channelType.ChanDir() == reflect.SendDir {
+		return false, fmt.Errorf("ReceiveMatcher matcher cannot be passed a send-only channel.  Got:\n%s", format.Object(actual, 1))
+	}
+
+	var subMatcher omegaMatcher
+	var hasSubMatcher bool
+
+	if matcher.Arg != nil {
+		subMatcher, hasSubMatcher = (matcher.Arg).(omegaMatcher)
+		if !hasSubMatcher {
+			argType := reflect.TypeOf(matcher.Arg)
+			if argType.Kind() != reflect.Ptr {
+				return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(matcher.Arg, 1))
+			}
+
+			assignable := channelType.Elem().AssignableTo(argType.Elem())
+			if !assignable {
+				return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(matcher.Arg, 1))
+			}
+		}
+	}
+
+	winnerIndex, value, open := reflect.Select([]reflect.SelectCase{
+		reflect.SelectCase{Dir: reflect.SelectRecv, Chan: channelValue},
+		reflect.SelectCase{Dir: reflect.SelectDefault},
+	})
+
+	var closed bool
+	var didReceive bool
+	if winnerIndex == 0 {
+		closed = !open
+		didReceive = open
+	}
+	matcher.channelClosed = closed
+
+	if closed {
+		return false, nil
+	}
+
+	if hasSubMatcher {
+		if didReceive {
+			matcher.receivedValue = value
+			return subMatcher.Match(matcher.receivedValue.Interface())
+		} else {
+			return false, nil
+		}
+	}
+
+	if didReceive {
+		if matcher.Arg != nil {
+			outValue := reflect.ValueOf(matcher.Arg)
+			reflect.Indirect(outValue).Set(value)
+		}
+
+		return true, nil
+	} else {
+		return false, nil
+	}
+}
+
+func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) {
+	subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher)
+
+	closedAddendum := ""
+	if matcher.channelClosed {
+		closedAddendum = " The channel is closed."
+	}
+
+	if hasSubMatcher {
+		if matcher.receivedValue.IsValid() {
+			return subMatcher.FailureMessage(matcher.receivedValue.Interface())
+		}
+		return "When passed a matcher, ReceiveMatcher's channel *must* receive something."
+	} else {
+		return format.Message(actual, "to receive something."+closedAddendum)
+	}
+}
+
+func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher)
+
+	closedAddendum := ""
+	if matcher.channelClosed {
+		closedAddendum = " The channel is closed."
+	}
+
+	if hasSubMatcher {
+		if matcher.receivedValue.IsValid() {
+			return subMatcher.NegatedFailureMessage(matcher.receivedValue.Interface())
+		}
+		return "When passed a matcher, ReceiveMatcher's channel *must* receive something."
+	} else {
+		return format.Message(actual, "not to receive anything."+closedAddendum)
+	}
+}
+
+func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+	if !isChan(actual) {
+		return false
+	}
+
+	return !matcher.channelClosed
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..721ed5529bc6fe03adb89e25944e515517052f69
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
@@ -0,0 +1,33 @@
+package matchers
+
+import (
+	"fmt"
+
+	"github.com/onsi/gomega/format"
+)
+
+type SucceedMatcher struct {
+}
+
+func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err error) {
+	// is purely nil?
+	if actual == nil {
+		return true, nil
+	}
+
+	// must be an 'error' type
+	if !isError(actual) {
+		return false, fmt.Errorf("Expected an error-type.  Got:\n%s", format.Object(actual, 1))
+	}
+
+	// must be nil (or a pointer to a nil)
+	return isNil(actual), nil
+}
+
+func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) {
+	return fmt.Sprintf("Expected success, but got an error:\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1))
+}
+
+func (matcher *SucceedMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return "Expected failure, but got no error."
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE b/vendor/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..8edd8175abe7281dab05e59007f41eb2678256e3
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2014 Amit Kumar Gupta
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
new file mode 100644
index 0000000000000000000000000000000000000000..119d21ef3178bcb53ca1c765efea8d59404c61e5
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
@@ -0,0 +1,41 @@
+package bipartitegraph
+
+import "errors"
+import "fmt"
+
+import . "github.com/onsi/gomega/matchers/support/goraph/node"
+import . "github.com/onsi/gomega/matchers/support/goraph/edge"
+
+type BipartiteGraph struct {
+	Left  NodeOrderedSet
+	Right NodeOrderedSet
+	Edges EdgeSet
+}
+
+func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(interface{}, interface{}) (bool, error)) (*BipartiteGraph, error) {
+	left := NodeOrderedSet{}
+	for i, _ := range leftValues {
+		left = append(left, Node{i})
+	}
+
+	right := NodeOrderedSet{}
+	for j, _ := range rightValues {
+		right = append(right, Node{j + len(left)})
+	}
+
+	edges := EdgeSet{}
+	for i, leftValue := range leftValues {
+		for j, rightValue := range rightValues {
+			neighbours, err := neighbours(leftValue, rightValue)
+			if err != nil {
+				return nil, errors.New(fmt.Sprintf("error determining adjacency for %v and %v: %s", leftValue, rightValue, err.Error()))
+			}
+
+			if neighbours {
+				edges = append(edges, Edge{left[i], right[j]})
+			}
+		}
+	}
+
+	return &BipartiteGraph{left, right, edges}, nil
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go
new file mode 100644
index 0000000000000000000000000000000000000000..32529c51131480c4c5e75cf994c92dac9295fae6
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go
@@ -0,0 +1,161 @@
+package bipartitegraph
+
+import . "github.com/onsi/gomega/matchers/support/goraph/node"
+import . "github.com/onsi/gomega/matchers/support/goraph/edge"
+import "github.com/onsi/gomega/matchers/support/goraph/util"
+
+func (bg *BipartiteGraph) LargestMatching() (matching EdgeSet) {
+	paths := bg.maximalDisjointSLAPCollection(matching)
+
+	for len(paths) > 0 {
+		for _, path := range paths {
+			matching = matching.SymmetricDifference(path)
+		}
+		paths = bg.maximalDisjointSLAPCollection(matching)
+	}
+
+	return
+}
+
+func (bg *BipartiteGraph) maximalDisjointSLAPCollection(matching EdgeSet) (result []EdgeSet) {
+	guideLayers := bg.createSLAPGuideLayers(matching)
+	if len(guideLayers) == 0 {
+		return
+	}
+
+	used := make(map[Node]bool)
+
+	for _, u := range guideLayers[len(guideLayers)-1] {
+		slap, found := bg.findDisjointSLAP(u, matching, guideLayers, used)
+		if found {
+			for _, edge := range slap {
+				used[edge.Node1] = true
+				used[edge.Node2] = true
+			}
+			result = append(result, slap)
+		}
+	}
+
+	return
+}
+
+func (bg *BipartiteGraph) findDisjointSLAP(
+	start Node,
+	matching EdgeSet,
+	guideLayers []NodeOrderedSet,
+	used map[Node]bool,
+) ([]Edge, bool) {
+	return bg.findDisjointSLAPHelper(start, EdgeSet{}, len(guideLayers)-1, matching, guideLayers, used)
+}
+
+func (bg *BipartiteGraph) findDisjointSLAPHelper(
+	currentNode Node,
+	currentSLAP EdgeSet,
+	currentLevel int,
+	matching EdgeSet,
+	guideLayers []NodeOrderedSet,
+	used map[Node]bool,
+) (EdgeSet, bool) {
+	used[currentNode] = true
+
+	if currentLevel == 0 {
+		return currentSLAP, true
+	}
+
+	for _, nextNode := range guideLayers[currentLevel-1] {
+		if used[nextNode] {
+			continue
+		}
+
+		edge, found := bg.Edges.FindByNodes(currentNode, nextNode)
+		if !found {
+			continue
+		}
+
+		if matching.Contains(edge) == util.Odd(currentLevel) {
+			continue
+		}
+
+		currentSLAP = append(currentSLAP, edge)
+		slap, found := bg.findDisjointSLAPHelper(nextNode, currentSLAP, currentLevel-1, matching, guideLayers, used)
+		if found {
+			return slap, true
+		}
+		currentSLAP = currentSLAP[:len(currentSLAP)-1]
+	}
+
+	used[currentNode] = false
+	return nil, false
+}
+
+func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers []NodeOrderedSet) {
+	used := make(map[Node]bool)
+	currentLayer := NodeOrderedSet{}
+
+	for _, node := range bg.Left {
+		if matching.Free(node) {
+			used[node] = true
+			currentLayer = append(currentLayer, node)
+		}
+	}
+
+	if len(currentLayer) == 0 {
+		return []NodeOrderedSet{}
+	} else {
+		guideLayers = append(guideLayers, currentLayer)
+	}
+
+	done := false
+
+	for !done {
+		lastLayer := currentLayer
+		currentLayer = NodeOrderedSet{}
+
+		if util.Odd(len(guideLayers)) {
+			for _, leftNode := range lastLayer {
+				for _, rightNode := range bg.Right {
+					if used[rightNode] {
+						continue
+					}
+
+					edge, found := bg.Edges.FindByNodes(leftNode, rightNode)
+					if !found || matching.Contains(edge) {
+						continue
+					}
+
+					currentLayer = append(currentLayer, rightNode)
+					used[rightNode] = true
+
+					if matching.Free(rightNode) {
+						done = true
+					}
+				}
+			}
+		} else {
+			for _, rightNode := range lastLayer {
+				for _, leftNode := range bg.Left {
+					if used[leftNode] {
+						continue
+					}
+
+					edge, found := bg.Edges.FindByNodes(leftNode, rightNode)
+					if !found || !matching.Contains(edge) {
+						continue
+					}
+
+					currentLayer = append(currentLayer, leftNode)
+					used[leftNode] = true
+				}
+			}
+
+		}
+
+		if len(currentLayer) == 0 {
+			return []NodeOrderedSet{}
+		} else {
+			guideLayers = append(guideLayers, currentLayer)
+		}
+	}
+
+	return
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go
new file mode 100644
index 0000000000000000000000000000000000000000..4fd15cc06944bf6a070f15e6b8991a1fd6d6d6cb
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go
@@ -0,0 +1,61 @@
+package edge
+
+import . "github.com/onsi/gomega/matchers/support/goraph/node"
+
+type Edge struct {
+	Node1 Node
+	Node2 Node
+}
+
+type EdgeSet []Edge
+
+func (ec EdgeSet) Free(node Node) bool {
+	for _, e := range ec {
+		if e.Node1 == node || e.Node2 == node {
+			return false
+		}
+	}
+
+	return true
+}
+
+func (ec EdgeSet) Contains(edge Edge) bool {
+	for _, e := range ec {
+		if e == edge {
+			return true
+		}
+	}
+
+	return false
+}
+
+func (ec EdgeSet) FindByNodes(node1, node2 Node) (Edge, bool) {
+	for _, e := range ec {
+		if (e.Node1 == node1 && e.Node2 == node2) || (e.Node1 == node2 && e.Node2 == node1) {
+			return e, true
+		}
+	}
+
+	return Edge{}, false
+}
+
+func (ec EdgeSet) SymmetricDifference(ec2 EdgeSet) EdgeSet {
+	edgesToInclude := make(map[Edge]bool)
+
+	for _, e := range ec {
+		edgesToInclude[e] = true
+	}
+
+	for _, e := range ec2 {
+		edgesToInclude[e] = !edgesToInclude[e]
+	}
+
+	result := EdgeSet{}
+	for e, include := range edgesToInclude {
+		if include {
+			result = append(result, e)
+		}
+	}
+
+	return result
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go
new file mode 100644
index 0000000000000000000000000000000000000000..800c2ea8caf30f01877bb3140a81c8866f86a92c
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go
@@ -0,0 +1,7 @@
+package node
+
+type Node struct {
+	Id int
+}
+
+type NodeOrderedSet []Node
diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go
new file mode 100644
index 0000000000000000000000000000000000000000..d76a1ee00a2119a652e7271158c2f0a2e77ab632
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go
@@ -0,0 +1,7 @@
+package util
+
+import "math"
+
+func Odd(n int) bool {
+	return math.Mod(float64(n), 2.0) == 1.0
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/type_support.go b/vendor/github.com/onsi/gomega/matchers/type_support.go
new file mode 100644
index 0000000000000000000000000000000000000000..ef9b44835ea382f693604393a960a965394b9740
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/type_support.go
@@ -0,0 +1,165 @@
+/*
+Gomega matchers
+
+This package implements the Gomega matchers and does not typically need to be imported.
+See the docs for Gomega for documentation on the matchers
+
+http://onsi.github.io/gomega/
+*/
+package matchers
+
+import (
+	"fmt"
+	"reflect"
+)
+
+type omegaMatcher interface {
+	Match(actual interface{}) (success bool, err error)
+	FailureMessage(actual interface{}) (message string)
+	NegatedFailureMessage(actual interface{}) (message string)
+}
+
+func isBool(a interface{}) bool {
+	return reflect.TypeOf(a).Kind() == reflect.Bool
+}
+
+func isNumber(a interface{}) bool {
+	if a == nil {
+		return false
+	}
+	kind := reflect.TypeOf(a).Kind()
+	return reflect.Int <= kind && kind <= reflect.Float64
+}
+
+func isInteger(a interface{}) bool {
+	kind := reflect.TypeOf(a).Kind()
+	return reflect.Int <= kind && kind <= reflect.Int64
+}
+
+func isUnsignedInteger(a interface{}) bool {
+	kind := reflect.TypeOf(a).Kind()
+	return reflect.Uint <= kind && kind <= reflect.Uint64
+}
+
+func isFloat(a interface{}) bool {
+	kind := reflect.TypeOf(a).Kind()
+	return reflect.Float32 <= kind && kind <= reflect.Float64
+}
+
+func toInteger(a interface{}) int64 {
+	if isInteger(a) {
+		return reflect.ValueOf(a).Int()
+	} else if isUnsignedInteger(a) {
+		return int64(reflect.ValueOf(a).Uint())
+	} else if isFloat(a) {
+		return int64(reflect.ValueOf(a).Float())
+	} else {
+		panic(fmt.Sprintf("Expected a number!  Got <%T> %#v", a, a))
+	}
+}
+
+func toUnsignedInteger(a interface{}) uint64 {
+	if isInteger(a) {
+		return uint64(reflect.ValueOf(a).Int())
+	} else if isUnsignedInteger(a) {
+		return reflect.ValueOf(a).Uint()
+	} else if isFloat(a) {
+		return uint64(reflect.ValueOf(a).Float())
+	} else {
+		panic(fmt.Sprintf("Expected a number!  Got <%T> %#v", a, a))
+	}
+}
+
+func toFloat(a interface{}) float64 {
+	if isInteger(a) {
+		return float64(reflect.ValueOf(a).Int())
+	} else if isUnsignedInteger(a) {
+		return float64(reflect.ValueOf(a).Uint())
+	} else if isFloat(a) {
+		return reflect.ValueOf(a).Float()
+	} else {
+		panic(fmt.Sprintf("Expected a number!  Got <%T> %#v", a, a))
+	}
+}
+
+func isError(a interface{}) bool {
+	_, ok := a.(error)
+	return ok
+}
+
+func isChan(a interface{}) bool {
+	if isNil(a) {
+		return false
+	}
+	return reflect.TypeOf(a).Kind() == reflect.Chan
+}
+
+func isMap(a interface{}) bool {
+	if a == nil {
+		return false
+	}
+	return reflect.TypeOf(a).Kind() == reflect.Map
+}
+
+func isArrayOrSlice(a interface{}) bool {
+	if a == nil {
+		return false
+	}
+	switch reflect.TypeOf(a).Kind() {
+	case reflect.Array, reflect.Slice:
+		return true
+	default:
+		return false
+	}
+}
+
+func isString(a interface{}) bool {
+	if a == nil {
+		return false
+	}
+	return reflect.TypeOf(a).Kind() == reflect.String
+}
+
+func toString(a interface{}) (string, bool) {
+	aString, isString := a.(string)
+	if isString {
+		return aString, true
+	}
+
+	aBytes, isBytes := a.([]byte)
+	if isBytes {
+		return string(aBytes), true
+	}
+
+	aStringer, isStringer := a.(fmt.Stringer)
+	if isStringer {
+		return aStringer.String(), true
+	}
+
+	return "", false
+}
+
+func lengthOf(a interface{}) (int, bool) {
+	if a == nil {
+		return 0, false
+	}
+	switch reflect.TypeOf(a).Kind() {
+	case reflect.Map, reflect.Array, reflect.String, reflect.Chan, reflect.Slice:
+		return reflect.ValueOf(a).Len(), true
+	default:
+		return 0, false
+	}
+}
+
+func isNil(a interface{}) bool {
+	if a == nil {
+		return true
+	}
+
+	switch reflect.TypeOf(a).Kind() {
+	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return reflect.ValueOf(a).IsNil()
+	}
+
+	return false
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform.go b/vendor/github.com/onsi/gomega/matchers/with_transform.go
new file mode 100644
index 0000000000000000000000000000000000000000..8e58d8a0fb78d88f1bcfb05a4219922a63eb27b0
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/with_transform.go
@@ -0,0 +1,72 @@
+package matchers
+
+import (
+	"fmt"
+	"reflect"
+
+	"github.com/onsi/gomega/internal/oraclematcher"
+	"github.com/onsi/gomega/types"
+)
+
+type WithTransformMatcher struct {
+	// input
+	Transform interface{} // must be a function of one parameter that returns one value
+	Matcher   types.GomegaMatcher
+
+	// cached value
+	transformArgType reflect.Type
+
+	// state
+	transformedValue interface{}
+}
+
+func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) *WithTransformMatcher {
+	if transform == nil {
+		panic("transform function cannot be nil")
+	}
+	txType := reflect.TypeOf(transform)
+	if txType.NumIn() != 1 {
+		panic("transform function must have 1 argument")
+	}
+	if txType.NumOut() != 1 {
+		panic("transform function must have 1 return value")
+	}
+
+	return &WithTransformMatcher{
+		Transform:        transform,
+		Matcher:          matcher,
+		transformArgType: reflect.TypeOf(transform).In(0),
+	}
+}
+
+func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) {
+	// return error if actual's type is incompatible with Transform function's argument type
+	actualType := reflect.TypeOf(actual)
+	if !actualType.AssignableTo(m.transformArgType) {
+		return false, fmt.Errorf("Transform function expects '%s' but we have '%s'", m.transformArgType, actualType)
+	}
+
+	// call the Transform function with `actual`
+	fn := reflect.ValueOf(m.Transform)
+	result := fn.Call([]reflect.Value{reflect.ValueOf(actual)})
+	m.transformedValue = result[0].Interface() // expect exactly one value
+
+	return m.Matcher.Match(m.transformedValue)
+}
+
+func (m *WithTransformMatcher) FailureMessage(_ interface{}) (message string) {
+	return m.Matcher.FailureMessage(m.transformedValue)
+}
+
+func (m *WithTransformMatcher) NegatedFailureMessage(_ interface{}) (message string) {
+	return m.Matcher.NegatedFailureMessage(m.transformedValue)
+}
+
+func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ interface{}) bool {
+	// TODO: Maybe this should always just return true? (Only an issue for non-deterministic transformers.)
+	//
+	// Querying the next matcher is fine if the transformer always will return the same value.
+	// But if the transformer is non-deterministic and returns a different value each time, then there
+	// is no point in querying the next matcher, since it can only comment on the last transformed value.
+	return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, m.transformedValue)
+}
diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go
new file mode 100644
index 0000000000000000000000000000000000000000..1c632ade2913563e00f8c0749a0d9beb52bb3b4c
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/types/types.go
@@ -0,0 +1,17 @@
+package types
+
+type GomegaFailHandler func(message string, callerSkip ...int)
+
+//A simple *testing.T interface wrapper
+type GomegaTestingT interface {
+	Errorf(format string, args ...interface{})
+}
+
+//All Gomega matchers must implement the GomegaMatcher interface
+//
+//For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding_your_own_matchers
+type GomegaMatcher interface {
+	Match(actual interface{}) (success bool, err error)
+	FailureMessage(actual interface{}) (message string)
+	NegatedFailureMessage(actual interface{}) (message string)
+}
diff --git a/vendor/github.com/tendermint/tendermint/LICENSE b/vendor/github.com/tendermint/tendermint/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..36ca31af6462685e6ea19a48fc79a929b6e69332
--- /dev/null
+++ b/vendor/github.com/tendermint/tendermint/LICENSE
@@ -0,0 +1,626 @@
+Tendermint Core
+Copyright (C) 2015 Tendermint
+
+
+
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.  We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors.  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights.  Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received.  You must make sure that they, too, receive
+or can get the source code.  And you must show them these terms so they
+know their rights.
+
+  Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+  For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software.  For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+  Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so.  This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software.  The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable.  Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products.  If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+  Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary.  To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Use with the GNU Affero General Public License.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
diff --git a/vendor/github.com/tendermint/tendermint/consensus/state.go b/vendor/github.com/tendermint/tendermint/consensus/state.go
index f5a0b636ddf99aee67681dfb2436d71ff2265d0d..2015866438b71f2b2329ce103488a56c51c97c6f 100644
--- a/vendor/github.com/tendermint/tendermint/consensus/state.go
+++ b/vendor/github.com/tendermint/tendermint/consensus/state.go
@@ -819,11 +819,7 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
 	}
 
 	// Mempool validated transactions
-	txs, err := cs.mempool.Reap()
-	if err != nil {
-		log.Warn("createProposalBlock: Error getting proposal txs", "error", err)
-		return nil, nil
-	}
+	txs := cs.mempool.Reap()
 
 	block = &types.Block{
 		Header: &types.Header{
diff --git a/vendor/github.com/tendermint/tendermint/mempool/mempool.go b/vendor/github.com/tendermint/tendermint/mempool/mempool.go
index 95b91d3273b74a7466a019d40108772197b3f82e..2af9bc0e1632bfcb7db49b1aa5f055567a9fb719 100644
--- a/vendor/github.com/tendermint/tendermint/mempool/mempool.go
+++ b/vendor/github.com/tendermint/tendermint/mempool/mempool.go
@@ -1,9 +1,11 @@
 package mempool
 
 import (
+	"bytes"
 	"container/list"
 	"sync"
 	"sync/atomic"
+	"time"
 
 	"github.com/tendermint/go-clist"
 	. "github.com/tendermint/go-common"
@@ -36,16 +38,21 @@ Garbage collection of old elements from mempool.txs is handlde via
 the DetachPrev() call, which makes old elements not reachable by
 peer broadcastTxRoutine() automatically garbage collected.
 
+TODO: Better handle tmsp client errors. (make it automatically handle connection errors)
+
 */
 
 const cacheSize = 100000
 
 type Mempool struct {
-	proxyMtx     sync.Mutex
-	proxyAppConn proxy.AppConn
-	txs          *clist.CList // concurrent linked-list of good txs
-	counter      int64        // simple incrementing counter
-	height       int          // the last block Update()'d to
+	proxyMtx      sync.Mutex
+	proxyAppConn  proxy.AppConn
+	txs           *clist.CList    // concurrent linked-list of good txs
+	counter       int64           // simple incrementing counter
+	height        int             // the last block Update()'d to
+	rechecking    int32           // for re-checking filtered txs on Update()
+	recheckCursor *clist.CElement // next expected response
+	recheckEnd    *clist.CElement // re-checking stops here
 
 	// Keep a cache of already-seen txs.
 	// This reduces the pressure on the proxyApp.
@@ -55,10 +62,13 @@ type Mempool struct {
 
 func NewMempool(proxyAppConn proxy.AppConn) *Mempool {
 	mempool := &Mempool{
-		proxyAppConn: proxyAppConn,
-		txs:          clist.New(),
-		counter:      0,
-		height:       0,
+		proxyAppConn:  proxyAppConn,
+		txs:           clist.New(),
+		counter:       0,
+		height:        0,
+		rechecking:    0,
+		recheckCursor: nil,
+		recheckEnd:    nil,
 
 		cacheMap:  make(map[string]struct{}, cacheSize),
 		cacheList: list.New(),
@@ -102,6 +112,7 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*tmsp.Response)) (err error) {
 	mem.cacheList.PushBack(tx)
 	// END CACHE
 
+	// NOTE: proxyAppConn may error if tx buffer is full
 	if err = mem.proxyAppConn.Error(); err != nil {
 		return err
 	}
@@ -115,6 +126,14 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*tmsp.Response)) (err error) {
 
 // TMSP callback function
 func (mem *Mempool) resCb(req *tmsp.Request, res *tmsp.Response) {
+	if mem.recheckCursor == nil {
+		mem.resCbNormal(req, res)
+	} else {
+		mem.resCbRecheck(req, res)
+	}
+}
+
+func (mem *Mempool) resCbNormal(req *tmsp.Request, res *tmsp.Response) {
 	switch res.Type {
 	case tmsp.MessageType_CheckTx:
 		if res.Code == tmsp.CodeType_OK {
@@ -134,14 +153,47 @@ func (mem *Mempool) resCb(req *tmsp.Request, res *tmsp.Response) {
 	}
 }
 
+func (mem *Mempool) resCbRecheck(req *tmsp.Request, res *tmsp.Response) {
+	switch res.Type {
+	case tmsp.MessageType_CheckTx:
+		memTx := mem.recheckCursor.Value.(*mempoolTx)
+		if !bytes.Equal(req.Data, memTx.tx) {
+			PanicSanity(Fmt("Unexpected tx response from proxy during recheck\n"+
+				"Expected %X, got %X", req.Data, memTx.tx))
+		}
+		if res.Code == tmsp.CodeType_OK {
+			// Good, nothing to do.
+		} else {
+			// Tx became invalidated due to newly committed block.
+			mem.txs.Remove(mem.recheckCursor)
+			mem.recheckCursor.DetachPrev()
+		}
+		if mem.recheckCursor == mem.recheckEnd {
+			mem.recheckCursor = nil
+		} else {
+			mem.recheckCursor = mem.recheckCursor.Next()
+		}
+		if mem.recheckCursor == nil {
+			// Done!
+			atomic.StoreInt32(&mem.rechecking, 0)
+		}
+	default:
+		// ignore other messages
+	}
+}
+
 // Get the valid transactions remaining
-func (mem *Mempool) Reap() ([]types.Tx, error) {
+func (mem *Mempool) Reap() []types.Tx {
 	mem.proxyMtx.Lock()
 	defer mem.proxyMtx.Unlock()
 
-	txs := mem.collectTxs()
+	for atomic.LoadInt32(&mem.rechecking) > 0 {
+		// TODO: Something better?
+		time.Sleep(time.Millisecond * 10)
+	}
 
-	return txs, nil
+	txs := mem.collectTxs()
+	return txs
 }
 
 func (mem *Mempool) collectTxs() []types.Tx {
@@ -156,7 +208,7 @@ func (mem *Mempool) collectTxs() []types.Tx {
 // Tell mempool that these txs were committed.
 // Mempool will discard these txs.
 // NOTE: this should be called *after* block is committed by consensus.
-func (mem *Mempool) Update(height int, txs []types.Tx) error {
+func (mem *Mempool) Update(height int, txs []types.Tx) {
 	mem.proxyMtx.Lock()
 	defer mem.proxyMtx.Unlock()
 
@@ -168,11 +220,15 @@ func (mem *Mempool) Update(height int, txs []types.Tx) error {
 
 	// Set height
 	mem.height = height
-
 	// Remove transactions that are already in txs.
-	mem.filterTxs(txsMap)
-
-	return nil
+	goodTxs := mem.filterTxs(txsMap)
+	// Recheck mempool txs
+	// TODO: make optional
+	mem.recheckTxs(goodTxs)
+
+	// At this point, mem.txs are being rechecked.
+	// mem.recheckCursor re-scans mem.txs and possibly removes some txs.
+	// Before mem.Reap(), we should wait for mem.recheckCursor to be nil.
 }
 
 func (mem *Mempool) filterTxs(blockTxsMap map[string]struct{}) []types.Tx {
@@ -191,6 +247,23 @@ func (mem *Mempool) filterTxs(blockTxsMap map[string]struct{}) []types.Tx {
 	return goodTxs
 }
 
+// NOTE: pass in goodTxs because mem.txs can mutate concurrently.
+func (mem *Mempool) recheckTxs(goodTxs []types.Tx) {
+	if len(goodTxs) == 0 {
+		return
+	}
+	atomic.StoreInt32(&mem.rechecking, 1)
+	mem.recheckCursor = mem.txs.Front()
+	mem.recheckEnd = mem.txs.Back()
+
+	// Push txs to proxyAppConn
+	// NOTE: resCb() may be called concurrently.
+	for _, tx := range goodTxs {
+		mem.proxyAppConn.CheckTxAsync(tx)
+	}
+	mem.proxyAppConn.FlushAsync()
+}
+
 //--------------------------------------------------------------------------------
 
 // A transaction that successfully ran
diff --git a/vendor/github.com/tendermint/tendermint/node/node.go b/vendor/github.com/tendermint/tendermint/node/node.go
index 11d8730e9f89cf07cb9344e5dd5ec5ffb601f805..d344292825d2aef79eaf135a9863fd3f44af9105 100644
--- a/vendor/github.com/tendermint/tendermint/node/node.go
+++ b/vendor/github.com/tendermint/tendermint/node/node.go
@@ -25,7 +25,7 @@ import (
 	sm "github.com/tendermint/tendermint/state"
 	"github.com/tendermint/tendermint/types"
 	"github.com/tendermint/tendermint/version"
-	"github.com/tendermint/tmsp/example/golang"
+	"github.com/tendermint/tmsp/example/dummy"
 )
 
 import _ "net/http/pprof"
@@ -230,7 +230,7 @@ func makeNodeInfo(sw *p2p.Switch, privKey crypto.PrivKeyEd25519) *p2p.NodeInfo {
 func getProxyApp(addr string, hash []byte) (proxyAppConn proxy.AppConn) {
 	// use local app (for testing)
 	if addr == "local" {
-		app := example.NewDummyApplication()
+		app := dummy.NewDummyApplication()
 		mtx := new(sync.Mutex)
 		proxyAppConn = proxy.NewLocalAppConn(mtx, app)
 	} else {
@@ -242,7 +242,7 @@ func getProxyApp(addr string, hash []byte) (proxyAppConn proxy.AppConn) {
 	}
 
 	// Check the hash
-	currentHash, _, err := proxyAppConn.GetHashSync()
+	currentHash, _, err := proxyAppConn.CommitSync()
 	if err != nil {
 		PanicCrisis(Fmt("Error in getting proxyAppConn hash: %v", err))
 	}
diff --git a/vendor/github.com/tendermint/tendermint/proxy/app_conn.go b/vendor/github.com/tendermint/tendermint/proxy/app_conn.go
index 391254fe17489fe2a29f8166688e8ce832e1d85f..2e19394a709b39bf477613692c5e649f264b20f4 100644
--- a/vendor/github.com/tendermint/tendermint/proxy/app_conn.go
+++ b/vendor/github.com/tendermint/tendermint/proxy/app_conn.go
@@ -12,10 +12,10 @@ type AppConn interface {
 	FlushAsync() *tmspcli.ReqRes
 	AppendTxAsync(tx []byte) *tmspcli.ReqRes
 	CheckTxAsync(tx []byte) *tmspcli.ReqRes
-	GetHashAsync() *tmspcli.ReqRes
+	CommitAsync() *tmspcli.ReqRes
 	SetOptionAsync(key string, value string) *tmspcli.ReqRes
 
 	InfoSync() (info string, err error)
 	FlushSync() error
-	GetHashSync() (hash []byte, log string, err error)
+	CommitSync() (hash []byte, log string, err error)
 }
diff --git a/vendor/github.com/tendermint/tendermint/proxy/local_app_conn.go b/vendor/github.com/tendermint/tendermint/proxy/local_app_conn.go
index 713f7f38db20920ac44359e3c201e5d4e76f769b..001c81ebfbc194f5befccf0c78b181a04111afce 100644
--- a/vendor/github.com/tendermint/tendermint/proxy/local_app_conn.go
+++ b/vendor/github.com/tendermint/tendermint/proxy/local_app_conn.go
@@ -76,13 +76,13 @@ func (app *localAppConn) CheckTxAsync(tx []byte) *tmspcli.ReqRes {
 	return nil // TODO maybe create a ReqRes
 }
 
-func (app *localAppConn) GetHashAsync() *tmspcli.ReqRes {
+func (app *localAppConn) CommitAsync() *tmspcli.ReqRes {
 	app.mtx.Lock()
-	hash, log := app.Application.GetHash()
+	hash, log := app.Application.Commit()
 	app.mtx.Unlock()
 	app.Callback(
-		tmsp.RequestGetHash(),
-		tmsp.ResponseGetHash(hash, log),
+		tmsp.RequestCommit(),
+		tmsp.ResponseCommit(hash, log),
 	)
 	return nil // TODO maybe create a ReqRes
 }
@@ -98,9 +98,9 @@ func (app *localAppConn) FlushSync() error {
 	return nil
 }
 
-func (app *localAppConn) GetHashSync() (hash []byte, log string, err error) {
+func (app *localAppConn) CommitSync() (hash []byte, log string, err error) {
 	app.mtx.Lock()
-	hash, log = app.Application.GetHash()
+	hash, log = app.Application.Commit()
 	app.mtx.Unlock()
 	return hash, log, nil
 }
diff --git a/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go b/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go
index 7befedbf8a82c0826adc17bba496d2e5e6113f3a..61bf08a5652f7e83598452844b4046bfb7a4e18b 100644
--- a/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go
+++ b/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go
@@ -36,6 +36,6 @@ func BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
 }
 
 func UnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) {
-	txs, err := mempoolReactor.Mempool.Reap()
-	return &ctypes.ResultUnconfirmedTxs{len(txs), txs}, err
+	txs := mempoolReactor.Mempool.Reap()
+	return &ctypes.ResultUnconfirmedTxs{len(txs), txs}, nil
 }
diff --git a/vendor/github.com/tendermint/tendermint/state/execution.go b/vendor/github.com/tendermint/tendermint/state/execution.go
index 7c56e7216024348676ad4fde0278a744f0566fd2..1a81e6e66779b8a6803cb5ace849950b98fb28db 100644
--- a/vendor/github.com/tendermint/tendermint/state/execution.go
+++ b/vendor/github.com/tendermint/tendermint/state/execution.go
@@ -84,13 +84,13 @@ func (s *State) execBlockOnProxyApp(evsw *events.EventSwitch, proxyAppConn proxy
 			return err
 		}
 	}
-	hash, logStr, err := proxyAppConn.GetHashSync()
+	hash, logStr, err := proxyAppConn.CommitSync()
 	if err != nil {
 		log.Warn("Error computing proxyAppConn hash", "error", err)
 		return err
 	}
 	if logStr != "" {
-		log.Debug("GetHash.Log: " + logStr)
+		log.Debug("Commit.Log: " + logStr)
 	}
 	log.Info(Fmt("ExecBlock got %v valid txs and %v invalid txs", validTxs, invalidTxs))
 
diff --git a/vendor/github.com/tendermint/tmsp/client/client.go b/vendor/github.com/tendermint/tmsp/client/client.go
index adb2c4632255ce3e32a5c1d7dfe83fb79d33788e..80fbc39658e9dc590b0e79a1ec6158e0d09623f1 100644
--- a/vendor/github.com/tendermint/tmsp/client/client.go
+++ b/vendor/github.com/tendermint/tmsp/client/client.go
@@ -207,8 +207,8 @@ func (cli *TMSPClient) CheckTxAsync(tx []byte) *ReqRes {
 	return cli.queueRequest(types.RequestCheckTx(tx))
 }
 
-func (cli *TMSPClient) GetHashAsync() *ReqRes {
-	return cli.queueRequest(types.RequestGetHash())
+func (cli *TMSPClient) CommitAsync() *ReqRes {
+	return cli.queueRequest(types.RequestCommit())
 }
 
 func (cli *TMSPClient) QueryAsync(query []byte) *ReqRes {
@@ -217,6 +217,11 @@ func (cli *TMSPClient) QueryAsync(query []byte) *ReqRes {
 
 //----------------------------------------
 
+func (cli *TMSPClient) FlushSync() error {
+	cli.queueRequest(types.RequestFlush()).Wait()
+	return cli.err
+}
+
 func (cli *TMSPClient) InfoSync() (info string, err error) {
 	reqres := cli.queueRequest(types.RequestInfo())
 	cli.FlushSync()
@@ -226,9 +231,13 @@ func (cli *TMSPClient) InfoSync() (info string, err error) {
 	return string(reqres.Response.Data), nil
 }
 
-func (cli *TMSPClient) FlushSync() error {
-	cli.queueRequest(types.RequestFlush()).Wait()
-	return cli.err
+func (cli *TMSPClient) SetOptionSync(key string, value string) (log string, err error) {
+	reqres := cli.queueRequest(types.RequestSetOption(key, value))
+	cli.FlushSync()
+	if cli.err != nil {
+		return "", cli.err
+	}
+	return reqres.Response.Log, nil
 }
 
 func (cli *TMSPClient) AppendTxSync(tx []byte) (code types.CodeType, result []byte, log string, err error) {
@@ -251,8 +260,8 @@ func (cli *TMSPClient) CheckTxSync(tx []byte) (code types.CodeType, result []byt
 	return res.Code, res.Data, res.Log, nil
 }
 
-func (cli *TMSPClient) GetHashSync() (hash []byte, log string, err error) {
-	reqres := cli.queueRequest(types.RequestGetHash())
+func (cli *TMSPClient) CommitSync() (hash []byte, log string, err error) {
+	reqres := cli.queueRequest(types.RequestCommit())
 	cli.FlushSync()
 	if cli.err != nil {
 		return nil, "", cli.err
diff --git a/vendor/github.com/tendermint/tmsp/example/golang/dummy.go b/vendor/github.com/tendermint/tmsp/example/dummy/dummy.go
similarity index 93%
rename from vendor/github.com/tendermint/tmsp/example/golang/dummy.go
rename to vendor/github.com/tendermint/tmsp/example/dummy/dummy.go
index 2f1e7b9058a0eda18e52f52a81e552ae6d376dd2..dc4441d9cba54530a4b4ae2fa095a0fe4a31a0c5 100644
--- a/vendor/github.com/tendermint/tmsp/example/golang/dummy.go
+++ b/vendor/github.com/tendermint/tmsp/example/dummy/dummy.go
@@ -1,4 +1,4 @@
-package example
+package dummy
 
 import (
 	"strings"
@@ -42,7 +42,7 @@ func (app *DummyApplication) CheckTx(tx []byte) (code types.CodeType, result []b
 	return types.CodeType_OK, nil, ""
 }
 
-func (app *DummyApplication) GetHash() (hash []byte, log string) {
+func (app *DummyApplication) Commit() (hash []byte, log string) {
 	hash = app.state.Hash()
 	return hash, ""
 }
diff --git a/vendor/github.com/tendermint/tmsp/example/golang/counter.go b/vendor/github.com/tendermint/tmsp/example/golang/counter.go
deleted file mode 100644
index 1d1081df74692f8728441281805f94f83ed75f2b..0000000000000000000000000000000000000000
--- a/vendor/github.com/tendermint/tmsp/example/golang/counter.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package example
-
-import (
-	"encoding/binary"
-	"fmt"
-
-	. "github.com/tendermint/go-common"
-	"github.com/tendermint/tmsp/types"
-)
-
-type CounterApplication struct {
-	hashCount int
-	txCount   int
-	serial    bool
-}
-
-func NewCounterApplication(serial bool) *CounterApplication {
-	return &CounterApplication{serial: serial}
-}
-
-func (app *CounterApplication) Info() string {
-	return Fmt("hashes:%v, txs:%v", app.hashCount, app.txCount)
-}
-
-func (app *CounterApplication) SetOption(key string, value string) (log string) {
-	if key == "serial" && value == "on" {
-		app.serial = true
-	}
-	return ""
-}
-
-func (app *CounterApplication) AppendTx(tx []byte) (code types.CodeType, result []byte, log string) {
-	if app.serial {
-		tx8 := make([]byte, 8)
-		copy(tx8[len(tx8)-len(tx):], tx)
-		txValue := binary.BigEndian.Uint64(tx8)
-		if txValue != uint64(app.txCount) {
-			return types.CodeType_BadNonce, nil, fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)
-		}
-	}
-	app.txCount += 1
-	return types.CodeType_OK, nil, ""
-}
-
-func (app *CounterApplication) CheckTx(tx []byte) (code types.CodeType, result []byte, log string) {
-	if app.serial {
-		tx8 := make([]byte, 8)
-		copy(tx8[len(tx8)-len(tx):], tx)
-		txValue := binary.BigEndian.Uint64(tx8)
-		if txValue < uint64(app.txCount) {
-			return types.CodeType_BadNonce, nil, fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)
-		}
-	}
-	return types.CodeType_OK, nil, ""
-}
-
-func (app *CounterApplication) GetHash() (hash []byte, log string) {
-	app.hashCount += 1
-
-	if app.txCount == 0 {
-		return nil, ""
-	} else {
-		hash := make([]byte, 8)
-		binary.BigEndian.PutUint64(hash, uint64(app.txCount))
-		return hash, ""
-	}
-}
-
-func (app *CounterApplication) Query(query []byte) (code types.CodeType, result []byte, log string) {
-	return types.CodeType_OK, nil, fmt.Sprintf("Query is not supported")
-}
diff --git a/vendor/github.com/tendermint/tmsp/server/server.go b/vendor/github.com/tendermint/tmsp/server/server.go
index d5d1a12491a2f2cb9d309edf41a34041c13682ea..933aa4c6e2550660393a0eed4c8d98b9a9cb0833 100644
--- a/vendor/github.com/tendermint/tmsp/server/server.go
+++ b/vendor/github.com/tendermint/tmsp/server/server.go
@@ -112,9 +112,9 @@ func handleRequest(app types.Application, req *types.Request, responses chan<- *
 	case types.MessageType_CheckTx:
 		code, result, logStr := app.CheckTx(req.Data)
 		responses <- types.ResponseCheckTx(code, result, logStr)
-	case types.MessageType_GetHash:
-		hash, logStr := app.GetHash()
-		responses <- types.ResponseGetHash(hash, logStr)
+	case types.MessageType_Commit:
+		hash, logStr := app.Commit()
+		responses <- types.ResponseCommit(hash, logStr)
 	case types.MessageType_Query:
 		code, result, logStr := app.Query(req.Data)
 		responses <- types.ResponseQuery(code, result, logStr)
diff --git a/vendor/github.com/tendermint/tmsp/types/application.go b/vendor/github.com/tendermint/tmsp/types/application.go
index 8f27282f58965358e7e1986ea030410a566897c8..d0379df0fc1064c881d82db8ada1347d53d5cd4a 100644
--- a/vendor/github.com/tendermint/tmsp/types/application.go
+++ b/vendor/github.com/tendermint/tmsp/types/application.go
@@ -15,7 +15,7 @@ type Application interface {
 	CheckTx(tx []byte) (code CodeType, result []byte, log string)
 
 	// Return the application Merkle root hash
-	GetHash() (hash []byte, log string)
+	Commit() (hash []byte, log string)
 
 	// Query for state
 	Query(query []byte) (code CodeType, result []byte, log string)
diff --git a/vendor/github.com/tendermint/tmsp/types/messages.go b/vendor/github.com/tendermint/tmsp/types/messages.go
index 736745bfade416215ca3b3e7d3d2696b0bf29f43..49c02beab5e2e0cada20e7e86e79a47053f3a8c7 100644
--- a/vendor/github.com/tendermint/tmsp/types/messages.go
+++ b/vendor/github.com/tendermint/tmsp/types/messages.go
@@ -48,9 +48,9 @@ func RequestCheckTx(txBytes []byte) *Request {
 	}
 }
 
-func RequestGetHash() *Request {
+func RequestCommit() *Request {
 	return &Request{
-		Type: MessageType_GetHash,
+		Type: MessageType_Commit,
 	}
 }
 
@@ -115,9 +115,9 @@ func ResponseCheckTx(code CodeType, result []byte, log string) *Response {
 	}
 }
 
-func ResponseGetHash(hash []byte, log string) *Response {
+func ResponseCommit(hash []byte, log string) *Response {
 	return &Response{
-		Type: MessageType_GetHash,
+		Type: MessageType_Commit,
 		Data: hash,
 		Log:  log,
 	}
diff --git a/vendor/github.com/tendermint/tmsp/types/types.pb.go b/vendor/github.com/tendermint/tmsp/types/types.pb.go
index 8e86ed287d573ae878ec10f4917062d5a9f46d37..ec1bb40dc696705d6df6da9d829505355a782d80 100644
--- a/vendor/github.com/tendermint/tmsp/types/types.pb.go
+++ b/vendor/github.com/tendermint/tmsp/types/types.pb.go
@@ -34,7 +34,7 @@ const (
 	MessageType_Exception   MessageType = 5
 	MessageType_AppendTx    MessageType = 17
 	MessageType_CheckTx     MessageType = 18
-	MessageType_GetHash     MessageType = 19
+	MessageType_Commit      MessageType = 19
 	MessageType_Query       MessageType = 20
 )
 
@@ -47,7 +47,7 @@ var MessageType_name = map[int32]string{
 	5:  "Exception",
 	17: "AppendTx",
 	18: "CheckTx",
-	19: "GetHash",
+	19: "Commit",
 	20: "Query",
 }
 var MessageType_value = map[string]int32{
@@ -59,7 +59,7 @@ var MessageType_value = map[string]int32{
 	"Exception":   5,
 	"AppendTx":    17,
 	"CheckTx":     18,
-	"GetHash":     19,
+	"Commit":      19,
 	"Query":       20,
 }
 
@@ -143,31 +143,31 @@ func init() {
 }
 
 var fileDescriptor0 = []byte{
-	// 405 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x92, 0xdf, 0x6e, 0xd3, 0x30,
-	0x14, 0xc6, 0x49, 0x9b, 0xf4, 0xcf, 0x69, 0xd7, 0xb9, 0x87, 0x22, 0xe5, 0x12, 0x0d, 0x09, 0xa1,
-	0x5d, 0x0c, 0x69, 0x3c, 0xc1, 0x98, 0x3a, 0xa8, 0x10, 0x9b, 0x08, 0xdb, 0x03, 0x18, 0xe7, 0xb4,
-	0x8d, 0x1a, 0x8e, 0x43, 0x6c, 0xc3, 0xca, 0x43, 0x70, 0xc3, 0x73, 0xf0, 0x8e, 0xd8, 0x4e, 0x27,
-	0x8d, 0x6b, 0x6e, 0xa2, 0xf3, 0x7d, 0xf6, 0xf9, 0xce, 0xcf, 0x8e, 0x61, 0x6e, 0xf7, 0x0d, 0x99,
-	0xd7, 0xf1, 0x7b, 0xd6, 0xb4, 0xda, 0x6a, 0xcc, 0xa2, 0x38, 0xf9, 0x0a, 0xc3, 0x82, 0xbe, 0x39,
-	0x32, 0x16, 0x5f, 0x42, 0x1a, 0xbc, 0x3c, 0x79, 0x9e, 0xbc, 0x9a, 0x9d, 0xe3, 0x59, 0xb7, 0xfb,
-	0x23, 0x19, 0x23, 0x37, 0x74, 0xeb, 0x45, 0x11, 0xd7, 0x11, 0x21, 0x2d, 0xa5, 0x95, 0x79, 0xcf,
-	0xef, 0x9b, 0x16, 0xb1, 0x46, 0x01, 0xfd, 0x1d, 0xed, 0xf3, 0xbe, 0xb7, 0xc6, 0x45, 0x28, 0x71,
-	0x01, 0xd9, 0x77, 0x59, 0x3b, 0xca, 0xd3, 0xe8, 0x75, 0xe2, 0xe4, 0x77, 0x02, 0xa3, 0x82, 0x4c,
-	0xa3, 0xd9, 0xd0, 0x7f, 0x0d, 0x7c, 0x01, 0xa9, 0xd2, 0x25, 0xc5, 0x89, 0xb3, 0xf3, 0xe3, 0x43,
-	0xef, 0xa5, 0xb7, 0xba, 0xc6, 0xb0, 0x18, 0x18, 0xa8, 0x6d, 0x75, 0xfb, 0xc0, 0x10, 0x45, 0x60,
-	0xad, 0xf5, 0x26, 0xcf, 0x3a, 0x56, 0x5f, 0x9e, 0xfe, 0x4a, 0x60, 0xf2, 0x68, 0x2c, 0x1e, 0xc3,
-	0xe4, 0xda, 0xd5, 0xf5, 0xc1, 0x12, 0x4f, 0x70, 0x04, 0xe9, 0x52, 0x6d, 0xb5, 0x48, 0x70, 0x0c,
-	0xd9, 0x55, 0xed, 0xcc, 0x56, 0xf4, 0x82, 0xb9, 0xe2, 0xb5, 0x16, 0x7d, 0x3c, 0x82, 0xf1, 0x67,
-	0xb2, 0x37, 0x8d, 0xad, 0x34, 0x8b, 0x34, 0xc8, 0xe5, 0xbd, 0xa2, 0x4e, 0x66, 0x38, 0x85, 0xd1,
-	0x45, 0xd3, 0x10, 0x97, 0xb7, 0xf7, 0x62, 0x8e, 0x13, 0x18, 0x5e, 0x6e, 0x49, 0xed, 0xbc, 0xc0,
-	0x20, 0xde, 0x91, 0x7d, 0x2f, 0x7d, 0xde, 0xd3, 0x10, 0xfd, 0xc9, 0x51, 0xbb, 0x17, 0x8b, 0xd3,
-	0x3f, 0xfe, 0x9a, 0x1e, 0xce, 0x82, 0x03, 0xe8, 0xdd, 0x7c, 0xf0, 0x10, 0x73, 0x38, 0x5a, 0xb1,
-	0xa5, 0x96, 0x65, 0xbd, 0x0c, 0x07, 0xf1, 0x34, 0x02, 0xa6, 0x77, 0x2c, 0x9d, 0xdd, 0xea, 0xb6,
-	0xfa, 0x49, 0xa5, 0x87, 0x5a, 0x80, 0x58, 0xb1, 0x71, 0xeb, 0x75, 0xa5, 0x2a, 0x62, 0x7b, 0x45,
-	0x64, 0x3c, 0x20, 0xc2, 0xec, 0x8e, 0x77, 0xac, 0x7f, 0xf0, 0xe1, 0x67, 0x7b, 0x4a, 0x1f, 0xb7,
-	0x64, 0x7f, 0x4d, 0x15, 0x6f, 0xba, 0xb8, 0x48, 0xfa, 0x56, 0x96, 0xd7, 0x9a, 0x15, 0x89, 0xc1,
-	0xa3, 0xa6, 0x0b, 0xa5, 0xb4, 0x63, 0x2b, 0x86, 0xf8, 0x0c, 0xe6, 0xff, 0xc4, 0x3b, 0x2e, 0x8d,
-	0x18, 0x7d, 0x19, 0xc4, 0x37, 0xf5, 0xe6, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x90, 0x25,
-	0x05, 0x68, 0x02, 0x00, 0x00,
+	// 406 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x92, 0x5f, 0x6e, 0xd3, 0x40,
+	0x10, 0xc6, 0x71, 0x62, 0xe7, 0xcf, 0x24, 0x4d, 0x37, 0x43, 0x90, 0xfc, 0x88, 0x8a, 0x84, 0x50,
+	0x1f, 0x8a, 0x54, 0x4e, 0x50, 0xa2, 0x54, 0x8a, 0x10, 0xad, 0x30, 0xed, 0x01, 0xcc, 0x7a, 0x12,
+	0x5b, 0x71, 0x66, 0x8d, 0x77, 0x17, 0x1a, 0xee, 0xc0, 0x13, 0xe7, 0xe0, 0x8e, 0xec, 0xae, 0x53,
+	0xa9, 0x3c, 0xf7, 0xc5, 0x9a, 0xef, 0xdb, 0x9d, 0x99, 0xdf, 0x8c, 0x17, 0xe6, 0xe6, 0xd0, 0x90,
+	0x7e, 0x1f, 0xbe, 0x17, 0x4d, 0xab, 0x8c, 0xc2, 0x24, 0x88, 0xb3, 0x3d, 0x0c, 0x33, 0xfa, 0x6e,
+	0x49, 0x1b, 0x7c, 0x0b, 0xb1, 0xf7, 0xd2, 0xe8, 0x75, 0xf4, 0x6e, 0x76, 0x89, 0x17, 0xdd, 0xed,
+	0xcf, 0xa4, 0x75, 0xbe, 0xa5, 0x3b, 0x27, 0xb2, 0x70, 0x8e, 0x08, 0x71, 0x91, 0x9b, 0x3c, 0xed,
+	0xb9, 0x7b, 0xd3, 0x2c, 0xc4, 0x28, 0xa0, 0xbf, 0xa3, 0x43, 0xda, 0x77, 0xd6, 0x38, 0xf3, 0x21,
+	0x2e, 0x20, 0xf9, 0x91, 0xd7, 0x96, 0xd2, 0x38, 0x78, 0x9d, 0x38, 0xfb, 0x13, 0xc1, 0x28, 0x23,
+	0xdd, 0x28, 0xd6, 0xf4, 0xac, 0x86, 0x6f, 0x20, 0x96, 0xaa, 0xa0, 0xd0, 0x71, 0x76, 0x79, 0x7a,
+	0xcc, 0x5d, 0x3a, 0xab, 0x4b, 0xf4, 0x87, 0x9e, 0x81, 0xda, 0x56, 0xb5, 0x8f, 0x0c, 0x41, 0x78,
+	0xd6, 0x5a, 0x6d, 0xd3, 0xa4, 0x63, 0x75, 0xe1, 0xf9, 0xef, 0x08, 0x26, 0x4f, 0xda, 0xe2, 0x29,
+	0x4c, 0x6e, 0x6c, 0x5d, 0x1f, 0x2d, 0xf1, 0x02, 0x47, 0x10, 0xaf, 0x64, 0xa9, 0x44, 0x84, 0x63,
+	0x48, 0xae, 0x6b, 0xab, 0x4b, 0xd1, 0xf3, 0xe6, 0x9a, 0x37, 0x4a, 0xf4, 0xf1, 0x04, 0xc6, 0x5f,
+	0xc9, 0xdc, 0x36, 0xa6, 0x52, 0x2c, 0x62, 0x2f, 0x57, 0x0f, 0x92, 0x3a, 0x99, 0xe0, 0x14, 0x46,
+	0x57, 0x4d, 0x43, 0x5c, 0xdc, 0x3d, 0x88, 0x39, 0x4e, 0x60, 0xb8, 0x2c, 0x49, 0xee, 0x9c, 0x70,
+	0x83, 0xc1, 0x60, 0xa9, 0xf6, 0xfb, 0xca, 0x88, 0x97, 0xbe, 0xf2, 0x17, 0x4b, 0xed, 0x41, 0x2c,
+	0xce, 0xff, 0xba, 0x2d, 0x3d, 0x8e, 0x82, 0x03, 0xe8, 0xdd, 0x7e, 0x72, 0x0c, 0x73, 0x38, 0x59,
+	0xb3, 0xa1, 0x96, 0xf3, 0x7a, 0xe5, 0xe7, 0x70, 0x30, 0x02, 0xa6, 0xf7, 0x9c, 0x5b, 0x53, 0xaa,
+	0xb6, 0xfa, 0x45, 0x85, 0x63, 0x5a, 0x80, 0x58, 0xb3, 0xb6, 0x9b, 0x4d, 0x25, 0x2b, 0x62, 0x73,
+	0x4d, 0xa4, 0x1d, 0x1f, 0xc2, 0xec, 0x9e, 0x77, 0xac, 0x7e, 0xf2, 0xf1, 0x5f, 0x3b, 0x48, 0x57,
+	0x6e, 0xc5, 0x6e, 0x4b, 0x15, 0x6f, 0xbb, 0x72, 0x01, 0xf4, 0x63, 0x5e, 0xdc, 0x28, 0x96, 0x24,
+	0x06, 0x4f, 0x92, 0xae, 0xa4, 0x54, 0x96, 0x8d, 0x18, 0xe2, 0x2b, 0x98, 0xff, 0x57, 0xde, 0x72,
+	0xa1, 0xc5, 0xe8, 0xdb, 0x20, 0x3c, 0xa9, 0x0f, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x41, 0x3b,
+	0x4f, 0x97, 0x67, 0x02, 0x00, 0x00,
 }
diff --git a/vendor/github.com/tendermint/tmsp/types/types.proto b/vendor/github.com/tendermint/tmsp/types/types.proto
index f14276d1d235e495cae7ca02f41ee5630511046c..f9da8e7d9170ff9a3fc69d49c4957df6dbd74d4e 100644
--- a/vendor/github.com/tendermint/tmsp/types/types.proto
+++ b/vendor/github.com/tendermint/tmsp/types/types.proto
@@ -16,7 +16,7 @@ enum MessageType {
   Exception  = 0x05;
   AppendTx   = 0x11;
   CheckTx    = 0x12;
-  GetHash    = 0x13;
+  Commit     = 0x13;
   Query      = 0x14;
 }
 
diff --git a/vendor/github.com/tylerb/graceful/.gitignore b/vendor/github.com/tylerb/graceful/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..836562412fe8a44fa99a515eeff68d2bc1a86daa
--- /dev/null
+++ b/vendor/github.com/tylerb/graceful/.gitignore
@@ -0,0 +1,23 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
diff --git a/vendor/github.com/tylerb/graceful/LICENSE b/vendor/github.com/tylerb/graceful/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..a4f2f281ba0ae71f99177d0d789eabb3b0298a46
--- /dev/null
+++ b/vendor/github.com/tylerb/graceful/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Tyler Bunnell
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/tylerb/graceful/README.md b/vendor/github.com/tylerb/graceful/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..531249a73ef7d72bdf0c8eb01bfc3b5e2238f919
--- /dev/null
+++ b/vendor/github.com/tylerb/graceful/README.md
@@ -0,0 +1,137 @@
+graceful [![GoDoc](https://godoc.org/github.com/tylerb/graceful?status.png)](http://godoc.org/github.com/tylerb/graceful) [![Build Status](https://drone.io/github.com/tylerb/graceful/status.png)](https://drone.io/github.com/tylerb/graceful/latest) [![Coverage Status](https://coveralls.io/repos/tylerb/graceful/badge.svg?branch=dronedebug)](https://coveralls.io/r/tylerb/graceful?branch=dronedebug) [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/tylerb/graceful?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
+========
+
+Graceful is a Go 1.3+ package enabling graceful shutdown of http.Handler servers.
+
+## Installation
+
+To install, simply execute:
+
+```
+go get gopkg.in/tylerb/graceful.v1
+```
+
+I am using [gopkg.in](http://labix.org/gopkg.in) to control releases.
+
+## Usage
+
+Using Graceful is easy. Simply create your http.Handler and pass it to the `Run` function:
+
+```go
+package main
+
+import (
+  "gopkg.in/tylerb/graceful.v1"
+  "net/http"
+  "fmt"
+  "time"
+)
+
+func main() {
+  mux := http.NewServeMux()
+  mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
+    fmt.Fprintf(w, "Welcome to the home page!")
+  })
+
+  graceful.Run(":3001",10*time.Second,mux)
+}
+```
+
+Another example, using [Negroni](https://github.com/codegangsta/negroni), functions in much the same manner:
+
+```go
+package main
+
+import (
+  "github.com/codegangsta/negroni"
+  "gopkg.in/tylerb/graceful.v1"
+  "net/http"
+  "fmt"
+  "time"
+)
+
+func main() {
+  mux := http.NewServeMux()
+  mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
+    fmt.Fprintf(w, "Welcome to the home page!")
+  })
+
+  n := negroni.Classic()
+  n.UseHandler(mux)
+  //n.Run(":3000")
+  graceful.Run(":3001",10*time.Second,n)
+}
+```
+
+In addition to Run there are the http.Server counterparts ListenAndServe, ListenAndServeTLS and Serve, which allow you to configure HTTPS, custom timeouts and error handling.
+Graceful may also be used by instantiating its Server type directly, which embeds an http.Server:
+
+```go
+mux := // ...
+
+srv := &graceful.Server{
+  Timeout: 10 * time.Second,
+
+  Server: &http.Server{
+    Addr: ":1234",
+    Handler: mux,
+  },
+}
+
+srv.ListenAndServe()
+```
+
+This form allows you to set the ConnState callback, which works in the same way as in http.Server:
+
+```go
+mux := // ...
+
+srv := &graceful.Server{
+  Timeout: 10 * time.Second,
+
+  ConnState: func(conn net.Conn, state http.ConnState) {
+    // conn has a new state
+  },
+
+  Server: &http.Server{
+    Addr: ":1234",
+    Handler: mux,
+  },
+}
+
+srv.ListenAndServe()
+```
+
+## Behaviour
+
+When Graceful is sent a SIGINT or SIGTERM (possibly from ^C or a kill command), it:
+
+1. Disables keepalive connections.
+2. Closes the listening socket, allowing another process to listen on that port immediately.
+3. Starts a timer of `timeout` duration to give active requests a chance to finish.
+4. When timeout expires, closes all active connections.
+5. Closes the `stopChan`, waking up any blocking goroutines.
+6. Returns from the function, allowing the server to terminate.
+
+## Notes
+
+If the `timeout` argument to `Run` is 0, the server never times out, allowing all active requests to complete.
+
+If you wish to stop the server in some way other than an OS signal, you may call the `Stop()` function.
+This function stops the server, gracefully, using the new timeout value you provide. The `StopChan()` function
+returns a channel on which you can block while waiting for the server to stop. This channel will be closed when
+the server is stopped, allowing your execution to proceed. Multiple goroutines can block on this channel at the
+same time and all will be signalled when stopping is complete.
+
+## Contributing
+
+If you would like to contribute, please:
+
+1. Create a GitHub issue regarding the contribution. Features and bugs should be discussed beforehand.
+2. Fork the repository.
+3. Create a pull request with your solution. This pull request should reference and close the issues (Fix #2).
+
+All pull requests should:
+
+1. Pass [gometalinter -t .](https://github.com/alecthomas/gometalinter) with no warnings.
+2. Be `go fmt` formatted.
diff --git a/vendor/github.com/tylerb/graceful/graceful.go b/vendor/github.com/tylerb/graceful/graceful.go
new file mode 100644
index 0000000000000000000000000000000000000000..c0693ee36602641f575ed3cddba23a77d5e05b2d
--- /dev/null
+++ b/vendor/github.com/tylerb/graceful/graceful.go
@@ -0,0 +1,372 @@
+package graceful
+
+import (
+	"crypto/tls"
+	"log"
+	"net"
+	"net/http"
+	"os"
+	"os/signal"
+	"sync"
+	"syscall"
+	"time"
+
+	"golang.org/x/net/netutil"
+)
+
+// Server wraps an http.Server with graceful connection handling.
+// It may be used directly in the same way as http.Server, or may
+// be constructed with the global functions in this package.
+//
+// Example:
+//	srv := &graceful.Server{
+//		Timeout: 5 * time.Second,
+//		Server: &http.Server{Addr: ":1234", Handler: handler},
+//	}
+//	srv.ListenAndServe()
+type Server struct {
+	*http.Server
+
+	// Timeout is the duration to allow outstanding requests to survive
+	// before forcefully terminating them.
+	Timeout time.Duration
+
+	// Limit the number of outstanding requests
+	ListenLimit int
+
+	// ConnState specifies an optional callback function that is
+	// called when a client connection changes state. This is a proxy
+	// to the underlying http.Server's ConnState, and the original
+	// must not be set directly.
+	ConnState func(net.Conn, http.ConnState)
+
+	// BeforeShutdown is an optional callback function that is called
+	// before the listener is closed.
+	BeforeShutdown func()
+
+	// ShutdownInitiated is an optional callback function that is called
+	// when shutdown is initiated. It can be used to notify the client
+	// side of long lived connections (e.g. websockets) to reconnect.
+	ShutdownInitiated func()
+
+	// NoSignalHandling prevents graceful from automatically shutting down
+	// on SIGINT and SIGTERM. If set to true, you must shut down the server
+	// manually with Stop().
+	NoSignalHandling bool
+
+	// interrupt signals the listener to stop serving connections,
+	// and the server to shut down.
+	interrupt chan os.Signal
+
+	// stopLock is used to protect against concurrent calls to Stop
+	stopLock sync.Mutex
+
+	// stopChan is the channel on which callers may block while waiting for
+	// the server to stop.
+	stopChan chan struct{}
+
+	// chanLock is used to protect access to the various channel constructors.
+	chanLock sync.RWMutex
+
+	// connections holds all connections managed by graceful
+	connections map[net.Conn]struct{}
+}
+
+// Run serves the http.Handler with graceful shutdown enabled.
+//
+// timeout is the duration to wait until killing active requests and stopping the server.
+// If timeout is 0, the server never times out. It waits for all active requests to finish.
+func Run(addr string, timeout time.Duration, n http.Handler) {
+	srv := &Server{
+		Timeout: timeout,
+		Server:  &http.Server{Addr: addr, Handler: n},
+	}
+
+	if err := srv.ListenAndServe(); err != nil {
+		if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") {
+			logger := log.New(os.Stdout, "[graceful] ", 0)
+			logger.Fatal(err)
+		}
+	}
+
+}
+
+// RunWithErr is an alternative version of Run function which can return error.
+//
+// Unlike Run this version will not exit the program if an error is encountered but will
+// return it instead.
+func RunWithErr(addr string, timeout time.Duration, n http.Handler) error {
+	srv := &Server{
+		Timeout: timeout,
+		Server:  &http.Server{Addr: addr, Handler: n},
+	}
+
+	return srv.ListenAndServe()
+}
+
+// ListenAndServe is equivalent to http.Server.ListenAndServe with graceful shutdown enabled.
+//
+// timeout is the duration to wait until killing active requests and stopping the server.
+// If timeout is 0, the server never times out. It waits for all active requests to finish.
+func ListenAndServe(server *http.Server, timeout time.Duration) error {
+	srv := &Server{Timeout: timeout, Server: server}
+	return srv.ListenAndServe()
+}
+
+// ListenAndServe is equivalent to http.Server.ListenAndServe with graceful shutdown enabled.
+func (srv *Server) ListenAndServe() error {
+	// Create the listener so we can control their lifetime
+	addr := srv.Addr
+	if addr == "" {
+		addr = ":http"
+	}
+	l, err := net.Listen("tcp", addr)
+	if err != nil {
+		return err
+	}
+
+	return srv.Serve(l)
+}
+
+// ListenAndServeTLS is equivalent to http.Server.ListenAndServeTLS with graceful shutdown enabled.
+//
+// timeout is the duration to wait until killing active requests and stopping the server.
+// If timeout is 0, the server never times out. It waits for all active requests to finish.
+func ListenAndServeTLS(server *http.Server, certFile, keyFile string, timeout time.Duration) error {
+	srv := &Server{Timeout: timeout, Server: server}
+	return srv.ListenAndServeTLS(certFile, keyFile)
+}
+
+// ListenTLS is a convenience method that creates an https listener using the
+// provided cert and key files. Use this method if you need access to the
+// listener object directly. When ready, pass it to the Serve method.
+func (srv *Server) ListenTLS(certFile, keyFile string) (net.Listener, error) {
+	// Create the listener ourselves so we can control its lifetime
+	addr := srv.Addr
+	if addr == "" {
+		addr = ":https"
+	}
+
+	config := &tls.Config{}
+	if srv.TLSConfig != nil {
+		*config = *srv.TLSConfig
+	}
+	if config.NextProtos == nil {
+		config.NextProtos = []string{"http/1.1"}
+	}
+
+	var err error
+	config.Certificates = make([]tls.Certificate, 1)
+	config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
+	if err != nil {
+		return nil, err
+	}
+
+	conn, err := net.Listen("tcp", addr)
+	if err != nil {
+		return nil, err
+	}
+
+	tlsListener := tls.NewListener(conn, config)
+	return tlsListener, nil
+}
+
+// ListenAndServeTLS is equivalent to http.Server.ListenAndServeTLS with graceful shutdown enabled.
+func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
+	l, err := srv.ListenTLS(certFile, keyFile)
+	if err != nil {
+		return err
+	}
+
+	return srv.Serve(l)
+}
+
+// ListenAndServeTLSConfig can be used with an existing TLS config and is equivalent to
+// http.Server.ListenAndServeTLS with graceful shutdown enabled,
+func (srv *Server) ListenAndServeTLSConfig(config *tls.Config) error {
+	addr := srv.Addr
+	if addr == "" {
+		addr = ":https"
+	}
+
+	conn, err := net.Listen("tcp", addr)
+	if err != nil {
+		return err
+	}
+
+	tlsListener := tls.NewListener(conn, config)
+	return srv.Serve(tlsListener)
+}
+
+// Serve is equivalent to http.Server.Serve with graceful shutdown enabled.
+//
+// timeout is the duration to wait until killing active requests and stopping the server.
+// If timeout is 0, the server never times out. It waits for all active requests to finish.
+func Serve(server *http.Server, l net.Listener, timeout time.Duration) error {
+	srv := &Server{Timeout: timeout, Server: server}
+	return srv.Serve(l)
+}
+
+// Serve is equivalent to http.Server.Serve with graceful shutdown enabled.
+func (srv *Server) Serve(listener net.Listener) error {
+
+	if srv.ListenLimit != 0 {
+		listener = netutil.LimitListener(listener, srv.ListenLimit)
+	}
+
+	// Track connection state
+	add := make(chan net.Conn)
+	remove := make(chan net.Conn)
+
+	srv.Server.ConnState = func(conn net.Conn, state http.ConnState) {
+		switch state {
+		case http.StateNew:
+			add <- conn
+		case http.StateClosed, http.StateHijacked:
+			remove <- conn
+		}
+		if srv.ConnState != nil {
+			srv.ConnState(conn, state)
+		}
+	}
+
+	// Manage open connections
+	shutdown := make(chan chan struct{})
+	kill := make(chan struct{})
+	go srv.manageConnections(add, remove, shutdown, kill)
+
+	interrupt := srv.interruptChan()
+	// Set up the interrupt handler
+	if !srv.NoSignalHandling {
+		signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
+	}
+	quitting := make(chan struct{})
+	go srv.handleInterrupt(interrupt, quitting, listener)
+
+	// Serve with graceful listener.
+	// Execution blocks here until listener.Close() is called, above.
+	err := srv.Server.Serve(listener)
+	if err != nil {
+		// If the underlying listening is closed, Serve returns an error
+		// complaining about listening on a closed socket. This is expected, so
+		// let's ignore the error if we are the ones who explicitly closed the
+		// socket.
+		select {
+		case <-quitting:
+			err = nil
+		default:
+		}
+	}
+
+	srv.shutdown(shutdown, kill)
+
+	return err
+}
+
+// Stop instructs the type to halt operations and close
+// the stop channel when it is finished.
+//
+// timeout is grace period for which to wait before shutting
+// down the server. The timeout value passed here will override the
+// timeout given when constructing the server, as this is an explicit
+// command to stop the server.
+func (srv *Server) Stop(timeout time.Duration) {
+	srv.stopLock.Lock()
+	srv.Timeout = timeout
+	interrupt := srv.interruptChan()
+	interrupt <- syscall.SIGINT
+	srv.stopLock.Unlock()
+}
+
+// StopChan gets the stop channel which will block until
+// stopping has completed, at which point it is closed.
+// Callers should never close the stop channel.
+func (srv *Server) StopChan() <-chan struct{} {
+	srv.chanLock.Lock()
+	if srv.stopChan == nil {
+		srv.stopChan = make(chan struct{})
+	}
+	srv.chanLock.Unlock()
+	return srv.stopChan
+}
+
+func (srv *Server) manageConnections(add, remove chan net.Conn, shutdown chan chan struct{}, kill chan struct{}) {
+	var done chan struct{}
+	srv.connections = map[net.Conn]struct{}{}
+	for {
+		select {
+		case conn := <-add:
+			srv.connections[conn] = struct{}{}
+		case conn := <-remove:
+			delete(srv.connections, conn)
+			if done != nil && len(srv.connections) == 0 {
+				done <- struct{}{}
+				return
+			}
+		case done = <-shutdown:
+			if len(srv.connections) == 0 {
+				done <- struct{}{}
+				return
+			}
+		case <-kill:
+			for k := range srv.connections {
+				_ = k.Close() // nothing to do here if it errors
+			}
+			return
+		}
+	}
+}
+
+func (srv *Server) interruptChan() chan os.Signal {
+	srv.chanLock.Lock()
+	if srv.interrupt == nil {
+		srv.interrupt = make(chan os.Signal, 1)
+	}
+	srv.chanLock.Unlock()
+
+	return srv.interrupt
+}
+
+func (srv *Server) handleInterrupt(interrupt chan os.Signal, quitting chan struct{}, listener net.Listener) {
+	<-interrupt
+
+	if srv.BeforeShutdown != nil {
+		srv.BeforeShutdown()
+	}
+
+	close(quitting)
+	srv.SetKeepAlivesEnabled(false)
+	_ = listener.Close() // we are shutting down anyway. ignore error.
+
+	if srv.ShutdownInitiated != nil {
+		srv.ShutdownInitiated()
+	}
+
+	srv.stopLock.Lock()
+	signal.Stop(interrupt)
+	close(interrupt)
+	srv.interrupt = nil
+	srv.stopLock.Unlock()
+}
+
+func (srv *Server) shutdown(shutdown chan chan struct{}, kill chan struct{}) {
+	// Request done notification
+	done := make(chan struct{})
+	shutdown <- done
+
+	if srv.Timeout > 0 {
+		select {
+		case <-done:
+		case <-time.After(srv.Timeout):
+			close(kill)
+		}
+	} else {
+		<-done
+	}
+	// Close the stopChan to wake up any blocked goroutines.
+	srv.chanLock.Lock()
+	if srv.stopChan != nil {
+		close(srv.stopChan)
+	}
+	srv.chanLock.Unlock()
+}
diff --git a/vendor/github.com/tylerb/graceful/tests/main.go b/vendor/github.com/tylerb/graceful/tests/main.go
new file mode 100644
index 0000000000000000000000000000000000000000..8c8fa204605104c47d7e982b48c27c0a05ff60e5
--- /dev/null
+++ b/vendor/github.com/tylerb/graceful/tests/main.go
@@ -0,0 +1,40 @@
+package main
+
+import (
+	"fmt"
+	"sync"
+
+	"github.com/codegangsta/negroni"
+	"github.com/tylerb/graceful"
+)
+
+func main() {
+
+	var wg sync.WaitGroup
+
+	wg.Add(3)
+	go func() {
+		n := negroni.New()
+		fmt.Println("Launching server on :3000")
+		graceful.Run(":3000", 0, n)
+		fmt.Println("Terminated server on :3000")
+		wg.Done()
+	}()
+	go func() {
+		n := negroni.New()
+		fmt.Println("Launching server on :3001")
+		graceful.Run(":3001", 0, n)
+		fmt.Println("Terminated server on :3001")
+		wg.Done()
+	}()
+	go func() {
+		n := negroni.New()
+		fmt.Println("Launching server on :3002")
+		graceful.Run(":3002", 0, n)
+		fmt.Println("Terminated server on :3002")
+		wg.Done()
+	}()
+	fmt.Println("Press ctrl+c. All servers should terminate.")
+	wg.Wait()
+
+}